diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
146 files changed, 13885 insertions, 7307 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 1c2857f13ad4..19b5fe5016bf 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -75,6 +75,7 @@ i915-y += i915_cmd_parser.o \ i915_gemfs.o \ i915_query.o \ i915_request.o \ + i915_scheduler.o \ i915_timeline.o \ i915_trace_points.o \ i915_vma.o \ @@ -112,6 +113,8 @@ i915-y += intel_audio.o \ intel_bios.o \ intel_cdclk.o \ intel_color.o \ + intel_combo_phy.o \ + intel_connector.o \ intel_display.o \ intel_dpio_phy.o \ intel_dpll_mgr.o \ @@ -120,9 +123,9 @@ i915-y += intel_audio.o \ intel_frontbuffer.o \ intel_hdcp.o \ intel_hotplug.o \ - intel_modes.o \ intel_overlay.o \ intel_psr.o \ + intel_quirks.o \ intel_sideband.o \ intel_sprite.o i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o @@ -142,6 +145,7 @@ i915-y += dvo_ch7017.o \ intel_dp_link_training.o \ intel_dp_mst.o \ intel_dp.o \ + intel_dsi.o \ intel_dsi_dcs_backlight.o \ intel_dsi_vbt.o \ intel_dvo.o \ @@ -153,14 +157,17 @@ i915-y += dvo_ch7017.o \ intel_sdvo.o \ intel_tv.o \ vlv_dsi.o \ - vlv_dsi_pll.o + vlv_dsi_pll.o \ + intel_vdsc.o # Post-mortem debug and GPU hang state capture i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o i915-$(CONFIG_DRM_I915_SELFTEST) += \ selftests/i915_random.o \ selftests/i915_selftest.o \ - selftests/igt_flush_test.o + selftests/igt_flush_test.o \ + selftests/igt_reset.o \ + selftests/igt_spinner.o # virtual gpu code i915-y += i915_vgpu.o diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index fe754022e356..359d37d5c958 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -61,10 +61,12 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) } mutex_lock(&dev_priv->drm.struct_mutex); + mmio_hw_access_pre(dev_priv); ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node, size, I915_GTT_PAGE_SIZE, I915_COLOR_UNEVICTABLE, start, end, flags); + mmio_hw_access_post(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); if (ret) gvt_err("fail to alloc %s gm space from host\n", diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 481896fb712a..85e6736f0a32 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -235,7 +235,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, plane->bpp = skl_pixel_formats[fmt].bpp; plane->drm_format = skl_pixel_formats[fmt].drm_format; } else { - plane->tiled = !!(val & DISPPLANE_TILED); + plane->tiled = val & DISPPLANE_TILED; fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK); plane->bpp = bdw_pixel_formats[fmt].bpp; plane->drm_format = bdw_pixel_formats[fmt].drm_format; diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 2402395a068d..c7103dd2d8d5 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1905,7 +1905,6 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) vgpu_free_mm(mm); return ERR_PTR(-ENOMEM); } - mm->ggtt_mm.last_partial_off = -1UL; return mm; } @@ -1930,7 +1929,6 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) invalidate_ppgtt_mm(mm); } else { vfree(mm->ggtt_mm.virtual_ggtt); - mm->ggtt_mm.last_partial_off = -1UL; } vgpu_free_mm(mm); @@ -2168,6 +2166,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, struct intel_gvt_gtt_entry e, m; dma_addr_t dma_addr; int ret; + struct intel_gvt_partial_pte *partial_pte, *pos, *n; + bool partial_update = false; if (bytes != 4 && bytes != 8) return -EINVAL; @@ -2178,68 +2178,57 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, if (!vgpu_gmadr_is_valid(vgpu, gma)) return 0; - ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index); - + e.type = GTT_TYPE_GGTT_PTE; memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, bytes); /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes - * write, we assume the two 4 bytes writes are consecutive. - * Otherwise, we abort and report error + * write, save the first 4 bytes in a list and update virtual + * PTE. Only update shadow PTE when the second 4 bytes comes. */ if (bytes < info->gtt_entry_size) { - if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) { - /* the first partial part*/ - ggtt_mm->ggtt_mm.last_partial_off = off; - ggtt_mm->ggtt_mm.last_partial_data = e.val64; - return 0; - } else if ((g_gtt_index == - (ggtt_mm->ggtt_mm.last_partial_off >> - info->gtt_entry_size_shift)) && - (off != ggtt_mm->ggtt_mm.last_partial_off)) { - /* the second partial part */ - - int last_off = ggtt_mm->ggtt_mm.last_partial_off & - (info->gtt_entry_size - 1); - - memcpy((void *)&e.val64 + last_off, - (void *)&ggtt_mm->ggtt_mm.last_partial_data + - last_off, bytes); - - ggtt_mm->ggtt_mm.last_partial_off = -1UL; - } else { - int last_offset; - - gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n", - ggtt_mm->ggtt_mm.last_partial_off, off, - bytes, info->gtt_entry_size); - - /* set host ggtt entry to scratch page and clear - * virtual ggtt entry as not present for last - * partially write offset - */ - last_offset = ggtt_mm->ggtt_mm.last_partial_off & - (~(info->gtt_entry_size - 1)); - - ggtt_get_host_entry(ggtt_mm, &m, last_offset); - ggtt_invalidate_pte(vgpu, &m); - ops->set_pfn(&m, gvt->gtt.scratch_mfn); - ops->clear_present(&m); - ggtt_set_host_entry(ggtt_mm, &m, last_offset); - ggtt_invalidate(gvt->dev_priv); - - ggtt_get_guest_entry(ggtt_mm, &e, last_offset); - ops->clear_present(&e); - ggtt_set_guest_entry(ggtt_mm, &e, last_offset); - - ggtt_mm->ggtt_mm.last_partial_off = off; - ggtt_mm->ggtt_mm.last_partial_data = e.val64; + bool found = false; + + list_for_each_entry_safe(pos, n, + &ggtt_mm->ggtt_mm.partial_pte_list, list) { + if (g_gtt_index == pos->offset >> + info->gtt_entry_size_shift) { + if (off != pos->offset) { + /* the second partial part*/ + int last_off = pos->offset & + (info->gtt_entry_size - 1); + + memcpy((void *)&e.val64 + last_off, + (void *)&pos->data + last_off, + bytes); + + list_del(&pos->list); + kfree(pos); + found = true; + break; + } + + /* update of the first partial part */ + pos->data = e.val64; + ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); + return 0; + } + } - return 0; + if (!found) { + /* the first partial part */ + partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL); + if (!partial_pte) + return -ENOMEM; + partial_pte->offset = off; + partial_pte->data = e.val64; + list_add_tail(&partial_pte->list, + &ggtt_mm->ggtt_mm.partial_pte_list); + partial_update = true; } } - if (ops->test_present(&e)) { + if (!partial_update && (ops->test_present(&e))) { gfn = ops->get_pfn(&e); m = e; @@ -2263,16 +2252,18 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, } else ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); } else { - ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index); - ggtt_invalidate_pte(vgpu, &m); ops->set_pfn(&m, gvt->gtt.scratch_mfn); ops->clear_present(&m); } out: + ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); + + ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index); + ggtt_invalidate_pte(vgpu, &e); + ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); ggtt_invalidate(gvt->dev_priv); - ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); return 0; } @@ -2430,6 +2421,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) intel_vgpu_reset_ggtt(vgpu, false); + INIT_LIST_HEAD(>t->ggtt_mm->ggtt_mm.partial_pte_list); + return create_scratch_page_tree(vgpu); } @@ -2454,6 +2447,15 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu) static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu) { + struct intel_gvt_partial_pte *pos, *next; + + list_for_each_entry_safe(pos, next, + &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, + list) { + gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n", + pos->offset, pos->data); + kfree(pos); + } intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm); vgpu->gtt.ggtt_mm = NULL; } diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 7a9b36176efb..d8cb04cc946d 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -35,7 +35,6 @@ #define _GVT_GTT_H_ #define I915_GTT_PAGE_SHIFT 12 -#define I915_GTT_PAGE_MASK (~(I915_GTT_PAGE_SIZE - 1)) struct intel_vgpu_mm; @@ -133,6 +132,12 @@ enum intel_gvt_mm_type { #define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES +struct intel_gvt_partial_pte { + unsigned long offset; + u64 data; + struct list_head list; +}; + struct intel_vgpu_mm { enum intel_gvt_mm_type type; struct intel_vgpu *vgpu; @@ -157,8 +162,7 @@ struct intel_vgpu_mm { } ppgtt_mm; struct { void *virtual_ggtt; - unsigned long last_partial_off; - u64 last_partial_data; + struct list_head partial_pte_list; } ggtt_mm; }; }; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 90f50f67909a..aa280bb07125 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1609,7 +1609,7 @@ static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu, return 0; } -static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu, +static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { vgpu_vreg(vgpu, offset) = 0; @@ -2607,6 +2607,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); + + MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write); + MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write); return 0; } @@ -3205,9 +3208,6 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT); - MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write); - MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write); - MMIO_D(RC6_CTX_BASE, D_BXT); MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT); diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 10e63eea5492..d6e02c15ef97 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -131,7 +131,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ - {RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */ + {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ @@ -158,6 +158,8 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) int ring_id, i; for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) { + if (!HAS_ENGINE(dev_priv, ring_id)) + continue; offset.reg = regs[ring_id]; for (i = 0; i < GEN9_MOCS_SIZE; i++) { gen9_render_mocs.control_table[ring_id][i] = diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index ea34003d6dd2..b8fbe3fabea3 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -334,6 +334,28 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) i915_gem_object_put(wa_ctx->indirect_ctx.obj); } +static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, + struct i915_gem_context *ctx) +{ + struct intel_vgpu_mm *mm = workload->shadow_mm; + struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; + int i = 0; + + if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed) + return -1; + + if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { + px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0]; + } else { + for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) { + px_dma(ppgtt->pdp.page_directory[i]) = + mm->ppgtt_mm.shadow_pdps[i]; + } + } + + return 0; +} + /** * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and * shadow it as well, include ringbuffer,wa_ctx and ctx. @@ -358,6 +380,12 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) if (workload->req) return 0; + ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); + if (ret < 0) { + gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); + return ret; + } + /* pin shadow context by gvt even the shadow context will be pinned * when i915 alloc request. That is because gvt will update the guest * context from shadow context when workload is completed, and at that diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4f3ac0a12889..38dcee1ca062 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -943,30 +943,30 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) static ssize_t gpu_state_read(struct file *file, char __user *ubuf, size_t count, loff_t *pos) { - struct i915_gpu_state *error = file->private_data; - struct drm_i915_error_state_buf str; + struct i915_gpu_state *error; ssize_t ret; - loff_t tmp; + void *buf; + error = file->private_data; if (!error) return 0; - ret = i915_error_state_buf_init(&str, error->i915, count, *pos); - if (ret) - return ret; + /* Bounce buffer required because of kernfs __user API convenience. */ + buf = kmalloc(count, GFP_KERNEL); + if (!buf) + return -ENOMEM; - ret = i915_error_state_to_str(&str, error); - if (ret) + ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count); + if (ret <= 0) goto out; - tmp = 0; - ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes); - if (ret < 0) - goto out; + if (!copy_to_user(ubuf, buf, ret)) + *pos += ret; + else + ret = -EFAULT; - *pos = str.start + ret; out: - i915_error_state_buf_release(&str); + kfree(buf); return ret; } @@ -1788,6 +1788,8 @@ static int i915_emon_status(struct seq_file *m, void *unused) if (!IS_GEN5(dev_priv)) return -ENODEV; + intel_runtime_pm_get(dev_priv); + ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; @@ -1802,6 +1804,8 @@ static int i915_emon_status(struct seq_file *m, void *unused) seq_printf(m, "GFX power: %ld\n", gfx); seq_printf(m, "Total power: %ld\n", chipset + gfx); + intel_runtime_pm_put(dev_priv); + return 0; } @@ -2215,8 +2219,23 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_device *dev = &dev_priv->drm; struct intel_rps *rps = &dev_priv->gt_pm.rps; + u32 act_freq = rps->cur_freq; struct drm_file *file; + if (intel_runtime_pm_get_if_in_use(dev_priv)) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + mutex_lock(&dev_priv->pcu_lock); + act_freq = vlv_punit_read(dev_priv, + PUNIT_REG_GPU_FREQ_STS); + act_freq = (act_freq >> 8) & 0xff; + mutex_unlock(&dev_priv->pcu_lock); + } else { + act_freq = intel_get_cagf(dev_priv, + I915_READ(GEN6_RPSTAT1)); + } + intel_runtime_pm_put(dev_priv); + } + seq_printf(m, "RPS enabled? %d\n", rps->enabled); seq_printf(m, "GPU busy? %s [%d requests]\n", yesno(dev_priv->gt.awake), dev_priv->gt.active_requests); @@ -2224,8 +2243,9 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) seq_printf(m, "Boosts outstanding? %d\n", atomic_read(&rps->num_waiters)); seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive)); - seq_printf(m, "Frequency requested %d\n", - intel_gpu_freq(dev_priv, rps->cur_freq)); + seq_printf(m, "Frequency requested %d, actual %d\n", + intel_gpu_freq(dev_priv, rps->cur_freq), + intel_gpu_freq(dev_priv, act_freq)); seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", intel_gpu_freq(dev_priv, rps->min_freq), intel_gpu_freq(dev_priv, rps->min_freq_softlimit), @@ -2900,16 +2920,15 @@ static int i915_dmc_info(struct seq_file *m, void *unused) seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), CSR_VERSION_MINOR(csr->version)); - if (IS_KABYLAKE(dev_priv) || - (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) { - seq_printf(m, "DC3 -> DC5 count: %d\n", - I915_READ(SKL_CSR_DC3_DC5_COUNT)); + if (WARN_ON(INTEL_GEN(dev_priv) > 11)) + goto out; + + seq_printf(m, "DC3 -> DC5 count: %d\n", + I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT : + SKL_CSR_DC3_DC5_COUNT)); + if (!IS_GEN9_LP(dev_priv)) seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(SKL_CSR_DC5_DC6_COUNT)); - } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) { - seq_printf(m, "DC3 -> DC5 count: %d\n", - I915_READ(BXT_CSR_DC3_DC5_COUNT)); - } out: seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0))); @@ -3049,16 +3068,17 @@ static void intel_connector_info(struct seq_file *m, seq_printf(m, "connector %d: type %s, status: %s\n", connector->base.id, connector->name, drm_get_connector_status_name(connector->status)); - if (connector->status == connector_status_connected) { - seq_printf(m, "\tname: %s\n", connector->display_info.name); - seq_printf(m, "\tphysical dimensions: %dx%dmm\n", - connector->display_info.width_mm, - connector->display_info.height_mm); - seq_printf(m, "\tsubpixel order: %s\n", - drm_get_subpixel_order_name(connector->display_info.subpixel_order)); - seq_printf(m, "\tCEA rev: %d\n", - connector->display_info.cea_rev); - } + + if (connector->status == connector_status_disconnected) + return; + + seq_printf(m, "\tname: %s\n", connector->display_info.name); + seq_printf(m, "\tphysical dimensions: %dx%dmm\n", + connector->display_info.width_mm, + connector->display_info.height_mm); + seq_printf(m, "\tsubpixel order: %s\n", + drm_get_subpixel_order_name(connector->display_info.subpixel_order)); + seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev); if (!intel_encoder) return; @@ -3355,13 +3375,15 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) static int i915_wa_registers(struct seq_file *m, void *unused) { - struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds; - int i; + struct drm_i915_private *i915 = node_to_i915(m->private); + const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list; + struct i915_wa *wa; + unsigned int i; - seq_printf(m, "Workarounds applied: %d\n", wa->count); - for (i = 0; i < wa->count; ++i) + seq_printf(m, "Workarounds applied: %u\n", wal->count); + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n", - wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask); + i915_mmio_reg_offset(wa->reg), wa->val, wa->mask); return 0; } @@ -3421,31 +3443,32 @@ static int i915_ddb_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_device *dev = &dev_priv->drm; - struct skl_ddb_allocation *ddb; struct skl_ddb_entry *entry; - enum pipe pipe; - int plane; + struct intel_crtc *crtc; if (INTEL_GEN(dev_priv) < 9) return -ENODEV; drm_modeset_lock_all(dev); - ddb = &dev_priv->wm.skl_hw.ddb; - seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); - for_each_pipe(dev_priv, pipe) { + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + enum pipe pipe = crtc->pipe; + enum plane_id plane_id; + seq_printf(m, "Pipe %c\n", pipe_name(pipe)); - for_each_universal_plane(dev_priv, pipe, plane) { - entry = &ddb->plane[pipe][plane]; - seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, + for_each_plane_id_on_crtc(crtc, plane_id) { + entry = &crtc_state->wm.skl.plane_ddb_y[plane_id]; + seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1, entry->start, entry->end, skl_ddb_entry_size(entry)); } - entry = &ddb->plane[pipe][PLANE_CURSOR]; + entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, entry->end, skl_ddb_entry_size(entry)); } @@ -4172,6 +4195,7 @@ i915_drop_caches_set(void *data, u64 val) DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", val, val & DROP_ALL); + intel_runtime_pm_get(i915); if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915)) i915_gem_set_wedged(i915); @@ -4181,7 +4205,7 @@ i915_drop_caches_set(void *data, u64 val) if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) { ret = mutex_lock_interruptible(&i915->drm.struct_mutex); if (ret) - return ret; + goto out; if (val & DROP_ACTIVE) ret = i915_gem_wait_for_idle(i915, @@ -4189,11 +4213,8 @@ i915_drop_caches_set(void *data, u64 val) I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT); - if (ret == 0 && val & DROP_RESET_SEQNO) { - intel_runtime_pm_get(i915); + if (ret == 0 && val & DROP_RESET_SEQNO) ret = i915_gem_set_global_seqno(&i915->drm, 1); - intel_runtime_pm_put(i915); - } if (val & DROP_RETIRE) i915_retire_requests(i915); @@ -4231,6 +4252,9 @@ i915_drop_caches_set(void *data, u64 val) if (val & DROP_FREED) i915_gem_drain_freed_objects(i915); +out: + intel_runtime_pm_put(i915); + return ret; } @@ -4331,7 +4355,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv, for (s = 0; s < info->sseu.max_slices; s++) { /* * FIXME: Valid SS Mask respects the spec and read - * only valid bits for those registers, excluding reserverd + * only valid bits for those registers, excluding reserved * although this seems wrong because it would leave many * subslices without ACK. */ @@ -4571,6 +4595,13 @@ static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = m->private; struct i915_hotplug *hotplug = &dev_priv->hotplug; + /* Synchronize with everything first in case there's been an HPD + * storm, but we haven't finished handling it in the kernel yet + */ + synchronize_irq(dev_priv->drm.irq); + flush_work(&dev_priv->hotplug.dig_port_work); + flush_work(&dev_priv->hotplug.hotplug_work); + seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); seq_printf(m, "Detected: %s\n", yesno(delayed_work_pending(&hotplug->reenable_work))); @@ -4641,24 +4672,122 @@ static const struct file_operations i915_hpd_storm_ctl_fops = { .write = i915_hpd_storm_ctl_write }; +static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + + seq_printf(m, "Enabled: %s\n", + yesno(dev_priv->hotplug.hpd_short_storm_enabled)); + + return 0; +} + +static int +i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file) +{ + return single_open(file, i915_hpd_short_storm_ctl_show, + inode->i_private); +} + +static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, + const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *dev_priv = m->private; + struct i915_hotplug *hotplug = &dev_priv->hotplug; + char *newline; + char tmp[16]; + int i; + bool new_state; + + if (len >= sizeof(tmp)) + return -EINVAL; + + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + + tmp[len] = '\0'; + + /* Strip newline, if any */ + newline = strchr(tmp, '\n'); + if (newline) + *newline = '\0'; + + /* Reset to the "default" state for this system */ + if (strcmp(tmp, "reset") == 0) + new_state = !HAS_DP_MST(dev_priv); + else if (kstrtobool(tmp, &new_state) != 0) + return -EINVAL; + + DRM_DEBUG_KMS("%sabling HPD short storm detection\n", + new_state ? "En" : "Dis"); + + spin_lock_irq(&dev_priv->irq_lock); + hotplug->hpd_short_storm_enabled = new_state; + /* Reset the HPD storm stats so we don't accidentally trigger a storm */ + for_each_hpd_pin(i) + hotplug->stats[i].count = 0; + spin_unlock_irq(&dev_priv->irq_lock); + + /* Re-enable hpd immediately if we were in an irq storm */ + flush_delayed_work(&dev_priv->hotplug.reenable_work); + + return len; +} + +static const struct file_operations i915_hpd_short_storm_ctl_fops = { + .owner = THIS_MODULE, + .open = i915_hpd_short_storm_ctl_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = i915_hpd_short_storm_ctl_write, +}; + static int i915_drrs_ctl_set(void *data, u64 val) { struct drm_i915_private *dev_priv = data; struct drm_device *dev = &dev_priv->drm; - struct intel_crtc *intel_crtc; - struct intel_encoder *encoder; - struct intel_dp *intel_dp; + struct intel_crtc *crtc; if (INTEL_GEN(dev_priv) < 7) return -ENODEV; - drm_modeset_lock_all(dev); - for_each_intel_crtc(dev, intel_crtc) { - if (!intel_crtc->base.state->active || - !intel_crtc->config->has_drrs) - continue; + for_each_intel_crtc(dev, crtc) { + struct drm_connector_list_iter conn_iter; + struct intel_crtc_state *crtc_state; + struct drm_connector *connector; + struct drm_crtc_commit *commit; + int ret; + + ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); + if (ret) + return ret; + + crtc_state = to_intel_crtc_state(crtc->base.state); + + if (!crtc_state->base.active || + !crtc_state->has_drrs) + goto out; - for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) { + commit = crtc_state->base.commit; + if (commit) { + ret = wait_for_completion_interruptible(&commit->hw_done); + if (ret) + goto out; + } + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct intel_encoder *encoder; + struct intel_dp *intel_dp; + + if (!(crtc_state->base.connector_mask & + drm_connector_mask(connector))) + continue; + + encoder = intel_attached_encoder(connector); if (encoder->type != INTEL_OUTPUT_EDP) continue; @@ -4668,13 +4797,18 @@ static int i915_drrs_ctl_set(void *data, u64 val) intel_dp = enc_to_intel_dp(&encoder->base); if (val) intel_edp_drrs_enable(intel_dp, - intel_crtc->config); + crtc_state); else intel_edp_drrs_disable(intel_dp, - intel_crtc->config); + crtc_state); } + drm_connector_list_iter_end(&conn_iter); + +out: + drm_modeset_unlock(&crtc->base.mutex); + if (ret) + return ret; } - drm_modeset_unlock_all(dev); return 0; } @@ -4818,6 +4952,7 @@ static const struct i915_debugfs_files { {"i915_guc_log_level", &i915_guc_log_level_fops}, {"i915_guc_log_relay", &i915_guc_log_relay_fops}, {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, + {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops}, {"i915_ipc_status", &i915_ipc_status_fops}, {"i915_drrs_ctl", &i915_drrs_ctl_fops}, {"i915_edp_psr_debug", &i915_edp_psr_debug_fops} @@ -4899,13 +5034,10 @@ static int i915_dpcd_show(struct seq_file *m, void *data) continue; err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size); - if (err <= 0) { - DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", - size, b->offset, err); - continue; - } - - seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); + if (err < 0) + seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err); + else + seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf); } return 0; @@ -4934,6 +5066,28 @@ static int i915_panel_show(struct seq_file *m, void *data) } DEFINE_SHOW_ATTRIBUTE(i915_panel); +static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct intel_connector *intel_connector = to_intel_connector(connector); + + if (connector->status != connector_status_connected) + return -ENODEV; + + /* HDCP is supported by connector */ + if (!intel_connector->hdcp.shim) + return -EINVAL; + + seq_printf(m, "%s:%d HDCP version: ", connector->name, + connector->base.id); + seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ? + "None" : "HDCP1.4"); + seq_puts(m, "\n"); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); + /** * i915_debugfs_connector_add - add i915 specific connector debugfs files * @connector: pointer to a registered drm_connector @@ -4963,5 +5117,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector) connector, &i915_psr_sink_status_fops); } + if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || + connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) { + debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root, + connector, &i915_hdcp_sink_capability_fops); + } + return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 44e2c0f5ec50..b310a897a4ad 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -53,6 +53,7 @@ #include "i915_vgpu.h" #include "intel_drv.h" #include "intel_uc.h" +#include "intel_workarounds.h" static struct drm_driver driver; @@ -287,7 +288,7 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv) * Use PCH_NOP (PCH but no South Display) for PCH platforms without * display. */ - if (pch && INTEL_INFO(dev_priv)->num_pipes == 0) { + if (pch && !HAS_DISPLAY(dev_priv)) { DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n"); dev_priv->pch_type = PCH_NOP; dev_priv->pch_id = 0; @@ -345,7 +346,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = HAS_WT(dev_priv); break; case I915_PARAM_HAS_ALIASING_PPGTT: - value = USES_PPGTT(dev_priv); + value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL); break; case I915_PARAM_HAS_SEMAPHORES: value = HAS_LEGACY_SEMAPHORES(dev_priv); @@ -645,6 +646,13 @@ static int i915_load_modeset_init(struct drm_device *dev) if (i915_inject_load_failure()) return -ENODEV; + if (HAS_DISPLAY(dev_priv)) { + ret = drm_vblank_init(&dev_priv->drm, + INTEL_INFO(dev_priv)->num_pipes); + if (ret) + goto out; + } + intel_bios_init(dev_priv); /* If we have > 1 VGA cards, then we need to arbitrate access @@ -687,9 +695,9 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) goto cleanup_modeset; - intel_setup_overlay(dev_priv); + intel_overlay_setup(dev_priv); - if (INTEL_INFO(dev_priv)->num_pipes == 0) + if (!HAS_DISPLAY(dev_priv)) return 0; ret = intel_fbdev_init(dev); @@ -699,6 +707,8 @@ static int i915_load_modeset_init(struct drm_device *dev) /* Only enable hotplug handling once the fbdev is fully set up. */ intel_hpd_init(dev_priv); + intel_init_ipc(dev_priv); + return 0; cleanup_gem: @@ -859,6 +869,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) pre |= IS_HSW_EARLY_SDV(dev_priv); pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0); pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST); + pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0); if (pre) { DRM_ERROR("This is a pre-production stepping. " @@ -1030,6 +1041,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) err_uncore: intel_uncore_fini(dev_priv); + i915_mmio_cleanup(dev_priv); err_bridge: pci_dev_put(dev_priv->bridge_dev); @@ -1049,17 +1061,6 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) static void intel_sanitize_options(struct drm_i915_private *dev_priv) { - /* - * i915.enable_ppgtt is read-only, so do an early pass to validate the - * user's requested state against the hardware/driver capabilities. We - * do this now so that we can print out any log messages once rather - * than every time we check intel_enable_ppgtt(). - */ - i915_modparams.enable_ppgtt = - intel_sanitize_enable_ppgtt(dev_priv, - i915_modparams.enable_ppgtt); - DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt); - intel_gvt_sanitize_options(dev_priv); } @@ -1175,8 +1176,6 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv) return -EINVAL; } - dram_info->valid_dimm = true; - /* * If any of the channel is single rank channel, worst case output * will be same as if single rank memory, so consider single rank @@ -1193,8 +1192,7 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv) return -EINVAL; } - if (ch0.is_16gb_dimm || ch1.is_16gb_dimm) - dram_info->is_16gb_dimm = true; + dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm; dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0, val_ch1, @@ -1314,7 +1312,6 @@ bxt_get_dram_info(struct drm_i915_private *dev_priv) return -EINVAL; } - dram_info->valid_dimm = true; dram_info->valid = true; return 0; } @@ -1327,19 +1324,24 @@ intel_get_dram_info(struct drm_i915_private *dev_priv) int ret; dram_info->valid = false; - dram_info->valid_dimm = false; - dram_info->is_16gb_dimm = false; dram_info->rank = I915_DRAM_RANK_INVALID; dram_info->bandwidth_kbps = 0; dram_info->num_channels = 0; + /* + * Assume 16Gb DIMMs are present until proven otherwise. + * This is only used for the level 0 watermark latency + * w/a which does not apply to bxt/glk. + */ + dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv); + if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv)) return; /* Need to calculate bandwidth only for Gen9 */ if (IS_BROXTON(dev_priv)) ret = bxt_get_dram_info(dev_priv); - else if (INTEL_GEN(dev_priv) == 9) + else if (IS_GEN9(dev_priv)) ret = skl_get_dram_info(dev_priv); else ret = skl_dram_get_channels_info(dev_priv); @@ -1374,6 +1376,29 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) intel_device_info_runtime_init(mkwrite_device_info(dev_priv)); + if (HAS_PPGTT(dev_priv)) { + if (intel_vgpu_active(dev_priv) && + !intel_vgpu_has_full_48bit_ppgtt(dev_priv)) { + i915_report_error(dev_priv, + "incompatible vGPU found, support for isolated ppGTT required\n"); + return -ENXIO; + } + } + + if (HAS_EXECLISTS(dev_priv)) { + /* + * Older GVT emulation depends upon intercepting CSB mmio, + * which we no longer use, preferring to use the HWSP cache + * instead. + */ + if (intel_vgpu_active(dev_priv) && + !intel_vgpu_has_hwsp_emulation(dev_priv)) { + i915_report_error(dev_priv, + "old vGPU host found, support for HWSP emulation required\n"); + return -ENXIO; + } + } + intel_sanitize_options(dev_priv); i915_perf_init(dev_priv); @@ -1443,6 +1468,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) intel_uncore_sanitize(dev_priv); + intel_gt_init_workarounds(dev_priv); i915_gem_load_init_fences(dev_priv); /* On the 945G/GM, the chipset reports the MSI capability on the @@ -1542,7 +1568,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) } else DRM_ERROR("Failed to register driver for userspace access!\n"); - if (INTEL_INFO(dev_priv)->num_pipes) { + if (HAS_DISPLAY(dev_priv)) { /* Must be done after probing outputs */ intel_opregion_register(dev_priv); acpi_video_register(); @@ -1566,7 +1592,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) * We need to coordinate the hotplugs with the asynchronous fbdev * configuration, for which we use the fbdev->async_cookie. */ - if (INTEL_INFO(dev_priv)->num_pipes) + if (HAS_DISPLAY(dev_priv)) drm_kms_helper_poll_init(dev); intel_power_domains_enable(dev_priv); @@ -1629,14 +1655,16 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) (struct intel_device_info *)ent->driver_data; struct intel_device_info *device_info; struct drm_i915_private *i915; + int err; i915 = kzalloc(sizeof(*i915), GFP_KERNEL); if (!i915) - return NULL; + return ERR_PTR(-ENOMEM); - if (drm_dev_init(&i915->drm, &driver, &pdev->dev)) { + err = drm_dev_init(&i915->drm, &driver, &pdev->dev); + if (err) { kfree(i915); - return NULL; + return ERR_PTR(err); } i915->drm.pdev = pdev; @@ -1649,8 +1677,8 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) device_info->device_id = pdev->device; BUILD_BUG_ON(INTEL_MAX_PLATFORMS > - sizeof(device_info->platform_mask) * BITS_PER_BYTE); - BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); + BITS_PER_TYPE(device_info->platform_mask)); + BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask)); return i915; } @@ -1685,8 +1713,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) int ret; dev_priv = i915_driver_create(pdev, ent); - if (!dev_priv) - return -ENOMEM; + if (IS_ERR(dev_priv)) + return PTR_ERR(dev_priv); /* Disable nuclear pageflip by default on pre-ILK */ if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) @@ -1710,26 +1738,12 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret < 0) goto out_cleanup_mmio; - /* - * TODO: move the vblank init and parts of modeset init steps into one - * of the i915_driver_init_/i915_driver_register functions according - * to the role/effect of the given init step. - */ - if (INTEL_INFO(dev_priv)->num_pipes) { - ret = drm_vblank_init(&dev_priv->drm, - INTEL_INFO(dev_priv)->num_pipes); - if (ret) - goto out_cleanup_hw; - } - ret = i915_load_modeset_init(&dev_priv->drm); if (ret < 0) goto out_cleanup_hw; i915_driver_register(dev_priv); - intel_init_ipc(dev_priv); - enable_rpm_wakeref_asserts(dev_priv); i915_welcome_messages(dev_priv); @@ -1781,7 +1795,6 @@ void i915_driver_unload(struct drm_device *dev) i915_reset_error_state(dev_priv); i915_gem_fini(dev_priv); - intel_fbc_cleanup_cfb(dev_priv); intel_power_domains_fini_hw(dev_priv); @@ -1919,9 +1932,7 @@ static int i915_drm_suspend(struct drm_device *dev) i915_save_state(dev_priv); opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; - intel_opregion_notify_adapter(dev_priv, opregion_target_state); - - intel_opregion_unregister(dev_priv); + intel_opregion_suspend(dev_priv, opregion_target_state); intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); @@ -1962,7 +1973,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) get_suspend_mode(dev_priv, hibernation)); ret = 0; - if (IS_GEN9_LP(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) bxt_enable_dc9(dev_priv); else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) hsw_enable_pc8(dev_priv); @@ -2040,7 +2051,6 @@ static int i915_drm_resume(struct drm_device *dev) i915_restore_state(dev_priv); intel_pps_unlock_regs_wa(dev_priv); - intel_opregion_setup(dev_priv); intel_init_pch_refclk(dev_priv); @@ -2082,12 +2092,10 @@ static int i915_drm_resume(struct drm_device *dev) * */ intel_hpd_init(dev_priv); - intel_opregion_register(dev_priv); + intel_opregion_resume(dev_priv); intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); - intel_opregion_notify_adapter(dev_priv, PCI_D0); - intel_power_domains_enable(dev_priv); enable_rpm_wakeref_asserts(dev_priv); @@ -2155,7 +2163,7 @@ static int i915_drm_resume_early(struct drm_device *dev) intel_uncore_resume_early(dev_priv); - if (IS_GEN9_LP(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) { gen9_sanitize_dc_state(dev_priv); bxt_disable_dc9(dev_priv); } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { @@ -2922,7 +2930,10 @@ static int intel_runtime_suspend(struct device *kdev) intel_uncore_suspend(dev_priv); ret = 0; - if (IS_GEN9_LP(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 11) { + icl_display_core_uninit(dev_priv); + bxt_enable_dc9(dev_priv); + } else if (IS_GEN9_LP(dev_priv)) { bxt_display_core_uninit(dev_priv); bxt_enable_dc9(dev_priv); } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { @@ -3007,7 +3018,18 @@ static int intel_runtime_resume(struct device *kdev) if (intel_uncore_unclaimed_mmio(dev_priv)) DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); - if (IS_GEN9_LP(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 11) { + bxt_disable_dc9(dev_priv); + icl_display_core_init(dev_priv, true); + if (dev_priv->csr.dmc_payload) { + if (dev_priv->csr.allowed_dc_mask & + DC_STATE_EN_UPTO_DC6) + skl_enable_dc6(dev_priv); + else if (dev_priv->csr.allowed_dc_mask & + DC_STATE_EN_UPTO_DC5) + gen9_enable_dc5(dev_priv); + } + } else if (IS_GEN9_LP(dev_priv)) { bxt_disable_dc9(dev_priv); bxt_display_core_init(dev_priv, true); if (dev_priv->csr.dmc_payload && diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8624b4bdc242..b1c31967194b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -53,7 +53,9 @@ #include <drm/drm_auth.h> #include <drm/drm_cache.h> #include <drm/drm_util.h> +#include <drm/drm_dsc.h> +#include "i915_fixed.h" #include "i915_params.h" #include "i915_reg.h" #include "i915_utils.h" @@ -67,6 +69,7 @@ #include "intel_ringbuffer.h" #include "intel_uncore.h" #include "intel_wopcm.h" +#include "intel_workarounds.h" #include "intel_uc.h" #include "i915_gem.h" @@ -87,8 +90,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20180921" -#define DRIVER_TIMESTAMP 1537521997 +#define DRIVER_DATE "20181204" +#define DRIVER_TIMESTAMP 1543944377 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions @@ -127,144 +130,6 @@ bool i915_error_injected(void); __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ fmt, ##__VA_ARGS__) -typedef struct { - uint32_t val; -} uint_fixed_16_16_t; - -#define FP_16_16_MAX ({ \ - uint_fixed_16_16_t fp; \ - fp.val = UINT_MAX; \ - fp; \ -}) - -static inline bool is_fixed16_zero(uint_fixed_16_16_t val) -{ - if (val.val == 0) - return true; - return false; -} - -static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val) -{ - uint_fixed_16_16_t fp; - - WARN_ON(val > U16_MAX); - - fp.val = val << 16; - return fp; -} - -static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp) -{ - return DIV_ROUND_UP(fp.val, 1 << 16); -} - -static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp) -{ - return fp.val >> 16; -} - -static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1, - uint_fixed_16_16_t min2) -{ - uint_fixed_16_16_t min; - - min.val = min(min1.val, min2.val); - return min; -} - -static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1, - uint_fixed_16_16_t max2) -{ - uint_fixed_16_16_t max; - - max.val = max(max1.val, max2.val); - return max; -} - -static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val) -{ - uint_fixed_16_16_t fp; - WARN_ON(val > U32_MAX); - fp.val = (uint32_t) val; - return fp; -} - -static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val, - uint_fixed_16_16_t d) -{ - return DIV_ROUND_UP(val.val, d.val); -} - -static inline uint32_t mul_round_up_u32_fixed16(uint32_t val, - uint_fixed_16_16_t mul) -{ - uint64_t intermediate_val; - - intermediate_val = (uint64_t) val * mul.val; - intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16); - WARN_ON(intermediate_val > U32_MAX); - return (uint32_t) intermediate_val; -} - -static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val, - uint_fixed_16_16_t mul) -{ - uint64_t intermediate_val; - - intermediate_val = (uint64_t) val.val * mul.val; - intermediate_val = intermediate_val >> 16; - return clamp_u64_to_fixed16(intermediate_val); -} - -static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d) -{ - uint64_t interm_val; - - interm_val = (uint64_t)val << 16; - interm_val = DIV_ROUND_UP_ULL(interm_val, d); - return clamp_u64_to_fixed16(interm_val); -} - -static inline uint32_t div_round_up_u32_fixed16(uint32_t val, - uint_fixed_16_16_t d) -{ - uint64_t interm_val; - - interm_val = (uint64_t)val << 16; - interm_val = DIV_ROUND_UP_ULL(interm_val, d.val); - WARN_ON(interm_val > U32_MAX); - return (uint32_t) interm_val; -} - -static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val, - uint_fixed_16_16_t mul) -{ - uint64_t intermediate_val; - - intermediate_val = (uint64_t) val * mul.val; - return clamp_u64_to_fixed16(intermediate_val); -} - -static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1, - uint_fixed_16_16_t add2) -{ - uint64_t interm_sum; - - interm_sum = (uint64_t) add1.val + add2.val; - return clamp_u64_to_fixed16(interm_sum); -} - -static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1, - uint32_t add2) -{ - uint64_t interm_sum; - uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2); - - interm_sum = (uint64_t) add1.val + interm_add2.val; - return clamp_u64_to_fixed16(interm_sum); -} - enum hpd_pin { HPD_NONE = 0, HPD_TV = HPD_NONE, /* TV is known to be unreliable */ @@ -283,7 +148,8 @@ enum hpd_pin { #define for_each_hpd_pin(__pin) \ for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) -#define HPD_STORM_DEFAULT_THRESHOLD 5 +/* Threshold == 5 for long IRQs, 50 for short */ +#define HPD_STORM_DEFAULT_THRESHOLD 50 struct i915_hotplug { struct work_struct hotplug_work; @@ -308,6 +174,8 @@ struct i915_hotplug { bool poll_enabled; unsigned int hpd_storm_threshold; + /* Whether or not to count short HPD IRQs in HPD storms */ + u8 hpd_short_storm_enabled; /* * if we get a HPD irq from DP and a HPD irq from non-DP @@ -465,8 +333,10 @@ struct drm_i915_display_funcs { struct intel_csr { struct work_struct work; const char *fw_path; + uint32_t required_version; + uint32_t max_fw_size; /* bytes */ uint32_t *dmc_payload; - uint32_t dmc_fw_size; + uint32_t dmc_fw_size; /* dwords */ uint32_t version; uint32_t mmio_count; i915_reg_t mmioaddr[8]; @@ -546,6 +416,8 @@ struct intel_fbc { int adjusted_y; int y; + + uint16_t pixel_blend_mode; } plane; struct { @@ -624,17 +496,19 @@ struct i915_psr { bool sink_support; bool prepared, enabled; struct intel_dp *dp; + enum pipe pipe; bool active; struct work_struct work; unsigned busy_frontbuffer_bits; bool sink_psr2_support; bool link_standby; bool colorimetry_support; - bool alpm; bool psr2_enabled; u8 sink_sync_latency; ktime_t last_entry_attempt; ktime_t last_exit; + bool sink_not_reliable; + bool irq_aux_error; }; enum intel_pch { @@ -918,6 +792,11 @@ struct i915_power_well_desc { /* The pw is backing the VGA functionality */ bool has_vga:1; bool has_fuses:1; + /* + * The pw is for an ICL+ TypeC PHY port in + * Thunderbolt mode. + */ + bool is_tc_tbt:1; } hsw; }; const struct i915_power_well_ops *ops; @@ -1042,17 +921,6 @@ struct i915_gem_mm { #define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */ -#define DP_AUX_A 0x40 -#define DP_AUX_B 0x10 -#define DP_AUX_C 0x20 -#define DP_AUX_D 0x30 -#define DP_AUX_E 0x50 -#define DP_AUX_F 0x60 - -#define DDC_PIN_B 0x05 -#define DDC_PIN_C 0x04 -#define DDC_PIN_D 0x06 - struct ddi_vbt_port_info { int max_tmds_clock; @@ -1099,6 +967,7 @@ struct intel_vbt_data { unsigned int panel_type:4; int lvds_ssc_freq; unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ + enum drm_panel_orientation orientation; enum drrs_support_type drrs_type; @@ -1144,6 +1013,7 @@ struct intel_vbt_data { u8 *data; const u8 *sequence[MIPI_SEQ_MAX]; u8 *deassert_seq; /* Used by fixup_mipi_sequences() */ + enum drm_panel_orientation orientation; } dsi; int crt_ddc_pin; @@ -1228,9 +1098,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, } struct skl_ddb_allocation { - /* packed/y */ - struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; - struct skl_ddb_entry uv_plane[I915_MAX_PIPES][I915_MAX_PLANES]; u8 enabled_slices; /* GEN11 has configurable 2 slices */ }; @@ -1240,9 +1107,9 @@ struct skl_ddb_values { }; struct skl_wm_level { - bool plane_en; uint16_t plane_res_b; uint8_t plane_res_l; + bool plane_en; }; /* Stores plane specific WM parameters */ @@ -1323,20 +1190,6 @@ struct i915_frontbuffer_tracking { unsigned flip_bits; }; -struct i915_wa_reg { - u32 addr; - u32 value; - /* bitmask representing WA bits */ - u32 mask; -}; - -#define I915_MAX_WA_REGS 16 - -struct i915_workarounds { - struct i915_wa_reg reg[I915_MAX_WA_REGS]; - u32 count; -}; - struct i915_virtual_gpu { bool active; u32 caps; @@ -1520,30 +1373,12 @@ struct i915_oa_ops { bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr); /** - * @init_oa_buffer: Resets the head and tail pointers of the - * circular buffer for periodic OA reports. - * - * Called when first opening a stream for OA metrics, but also may be - * called in response to an OA buffer overflow or other error - * condition. - * - * Note it may be necessary to clear the full OA buffer here as part of - * maintaining the invariable that new reports must be written to - * zeroed memory for us to be able to reliable detect if an expected - * report has not yet landed in memory. (At least on Haswell the OA - * buffer tail pointer is not synchronized with reports being visible - * to the CPU) - */ - void (*init_oa_buffer)(struct drm_i915_private *dev_priv); - - /** * @enable_metric_set: Selects and applies any MUX configuration to set * up the Boolean and Custom (B/C) counters that are part of the * counter reports being sampled. May apply system constraints such as * disabling EU clock gating as required. */ - int (*enable_metric_set)(struct drm_i915_private *dev_priv, - const struct i915_oa_config *oa_config); + int (*enable_metric_set)(struct i915_perf_stream *stream); /** * @disable_metric_set: Remove system constraints associated with using @@ -1554,12 +1389,12 @@ struct i915_oa_ops { /** * @oa_enable: Enable periodic sampling */ - void (*oa_enable)(struct drm_i915_private *dev_priv); + void (*oa_enable)(struct i915_perf_stream *stream); /** * @oa_disable: Disable periodic sampling */ - void (*oa_disable)(struct drm_i915_private *dev_priv); + void (*oa_disable)(struct i915_perf_stream *stream); /** * @read: Copy data from the circular OA buffer into a given userspace @@ -1804,7 +1639,7 @@ struct drm_i915_private { int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; - struct i915_workarounds workarounds; + struct i915_wa_list gt_wa_list; struct i915_frontbuffer_tracking fb_tracking; @@ -1948,7 +1783,6 @@ struct drm_i915_private { struct dram_info { bool valid; - bool valid_dimm; bool is_16gb_dimm; u8 num_channels; enum dram_rank { @@ -2149,6 +1983,8 @@ struct drm_i915_private { struct delayed_work idle_work; ktime_t last_init_time; + + struct i915_vma *scratch; } gt; /* perform PHY state sanity checks? */ @@ -2323,6 +2159,8 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg) (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0) +bool i915_sg_trim(struct sg_table *orig_st); + static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg) { unsigned int page_sizes; @@ -2368,20 +2206,12 @@ intel_info(const struct drm_i915_private *dev_priv) #define REVID_FOREVER 0xff #define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision) -#define GEN_FOREVER (0) - #define INTEL_GEN_MASK(s, e) ( \ BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \ BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \ - GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \ - (s) != GEN_FOREVER ? (s) - 1 : 0) \ -) + GENMASK((e) - 1, (s) - 1)) -/* - * Returns true if Gen is in inclusive range [Start, End]. - * - * Use GEN_FOREVER for unbound start and or end. - */ +/* Returns true if Gen is in inclusive range [Start, End] */ #define IS_GEN(dev_priv, s, e) \ (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e)))) @@ -2462,6 +2292,8 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \ INTEL_DEVID(dev_priv) == 0x5915 || \ INTEL_DEVID(dev_priv) == 0x591E) +#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \ + INTEL_DEVID(dev_priv) == 0x87C0) #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ (dev_priv)->info.gt == 2) #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ @@ -2593,17 +2425,22 @@ intel_info(const struct drm_i915_private *dev_priv) #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) -#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt) -#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2) -#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3) +#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt) +#define HAS_PPGTT(dev_priv) \ + (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE) +#define HAS_FULL_PPGTT(dev_priv) \ + (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL) +#define HAS_FULL_48BIT_PPGTT(dev_priv) \ + (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL_4LVL) + #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \ GEM_BUG_ON((sizes) == 0); \ ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \ }) -#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay) +#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.display.has_overlay) #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ - ((dev_priv)->info.overlay_needs_physical) + ((dev_priv)->info.display.overlay_needs_physical) /* Early gen2 have a totally busted CS tlb and require pinned batches. */ #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) @@ -2624,31 +2461,31 @@ intel_info(const struct drm_i915_private *dev_priv) #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \ !(IS_I915G(dev_priv) || \ IS_I915GM(dev_priv))) -#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv) -#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug) +#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.display.supports_tv) +#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.display.has_hotplug) #define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2) -#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc) +#define HAS_FBC(dev_priv) ((dev_priv)->info.display.has_fbc) #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7) #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) -#define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst) +#define HAS_DP_MST(dev_priv) ((dev_priv)->info.display.has_dp_mst) -#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi) +#define HAS_DDI(dev_priv) ((dev_priv)->info.display.has_ddi) #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg) -#define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr) +#define HAS_PSR(dev_priv) ((dev_priv)->info.display.has_psr) #define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6) #define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p) #define HAS_RC6pp(dev_priv) (false) /* HW was never validated */ -#define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr) +#define HAS_CSR(dev_priv) ((dev_priv)->info.display.has_csr) #define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm) #define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc) -#define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc) +#define HAS_IPC(dev_priv) ((dev_priv)->info.display.has_ipc) /* * For now, anything with a GuC requires uCode loading, and then supports @@ -2709,7 +2546,7 @@ intel_info(const struct drm_i915_private *dev_priv) #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP) #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE) -#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display) +#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.display.has_gmch_display) #define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9) @@ -2721,6 +2558,8 @@ intel_info(const struct drm_i915_private *dev_priv) #define GT_FREQUENCY_MULTIPLIER 50 #define GEN9_FREQ_SCALER 3 +#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0) + #include "i915_trace.h" static inline bool intel_vtd_active(void) @@ -2743,9 +2582,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv) return IS_BROXTON(dev_priv) && intel_vtd_active(); } -int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, - int enable_ppgtt); - /* i915_drv.c */ void __printf(3, 4) __i915_printk(struct drm_i915_private *dev_priv, const char *level, @@ -3230,7 +3066,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj, int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, unsigned int flags, const struct i915_sched_attr *attr); -#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX +#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX) int __must_check i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); @@ -3462,6 +3298,7 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, enum port port); +enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port); /* intel_acpi.c */ #ifdef CONFIG_ACPI @@ -3483,8 +3320,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv) extern void intel_modeset_init_hw(struct drm_device *dev); extern int intel_modeset_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); -extern int intel_connector_register(struct drm_connector *); -extern void intel_connector_unregister(struct drm_connector *); extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state); extern void intel_display_resume(struct drm_device *dev); @@ -3497,6 +3332,9 @@ extern void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive); extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); +void intel_dsc_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void intel_dsc_disable(const struct intel_crtc_state *crtc_state); int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); @@ -3584,6 +3422,12 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder, void vlv_phy_reset_lanes(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state); +/* intel_combo_phy.c */ +void icl_combo_phys_init(struct drm_i915_private *dev_priv); +void icl_combo_phys_uninit(struct drm_i915_private *dev_priv); +void cnl_combo_phys_init(struct drm_i915_private *dev_priv); +void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv); + int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, @@ -3871,4 +3715,9 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) return I915_HWS_CSB_WRITE_INDEX; } +static inline u32 i915_scratch_offset(const struct drm_i915_private *i915) +{ + return i915_ggtt_offset(i915->gt.scratch); +} + #endif diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/i915_fixed.h new file mode 100644 index 000000000000..591dd89ba7af --- /dev/null +++ b/drivers/gpu/drm/i915/i915_fixed.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2018 Intel Corporation + */ + +#ifndef _I915_FIXED_H_ +#define _I915_FIXED_H_ + +typedef struct { + u32 val; +} uint_fixed_16_16_t; + +#define FP_16_16_MAX ((uint_fixed_16_16_t){ .val = UINT_MAX }) + +static inline bool is_fixed16_zero(uint_fixed_16_16_t val) +{ + return val.val == 0; +} + +static inline uint_fixed_16_16_t u32_to_fixed16(u32 val) +{ + uint_fixed_16_16_t fp = { .val = val << 16 }; + + WARN_ON(val > U16_MAX); + + return fp; +} + +static inline u32 fixed16_to_u32_round_up(uint_fixed_16_16_t fp) +{ + return DIV_ROUND_UP(fp.val, 1 << 16); +} + +static inline u32 fixed16_to_u32(uint_fixed_16_16_t fp) +{ + return fp.val >> 16; +} + +static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1, + uint_fixed_16_16_t min2) +{ + uint_fixed_16_16_t min = { .val = min(min1.val, min2.val) }; + + return min; +} + +static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1, + uint_fixed_16_16_t max2) +{ + uint_fixed_16_16_t max = { .val = max(max1.val, max2.val) }; + + return max; +} + +static inline uint_fixed_16_16_t clamp_u64_to_fixed16(u64 val) +{ + uint_fixed_16_16_t fp = { .val = (u32)val }; + + WARN_ON(val > U32_MAX); + + return fp; +} + +static inline u32 div_round_up_fixed16(uint_fixed_16_16_t val, + uint_fixed_16_16_t d) +{ + return DIV_ROUND_UP(val.val, d.val); +} + +static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul) +{ + u64 tmp; + + tmp = (u64)val * mul.val; + tmp = DIV_ROUND_UP_ULL(tmp, 1 << 16); + WARN_ON(tmp > U32_MAX); + + return (u32)tmp; +} + +static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val, + uint_fixed_16_16_t mul) +{ + u64 tmp; + + tmp = (u64)val.val * mul.val; + tmp = tmp >> 16; + + return clamp_u64_to_fixed16(tmp); +} + +static inline uint_fixed_16_16_t div_fixed16(u32 val, u32 d) +{ + u64 tmp; + + tmp = (u64)val << 16; + tmp = DIV_ROUND_UP_ULL(tmp, d); + + return clamp_u64_to_fixed16(tmp); +} + +static inline u32 div_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t d) +{ + u64 tmp; + + tmp = (u64)val << 16; + tmp = DIV_ROUND_UP_ULL(tmp, d.val); + WARN_ON(tmp > U32_MAX); + + return (u32)tmp; +} + +static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul) +{ + u64 tmp; + + tmp = (u64)val * mul.val; + + return clamp_u64_to_fixed16(tmp); +} + +static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1, + uint_fixed_16_16_t add2) +{ + u64 tmp; + + tmp = (u64)add1.val + add2.val; + + return clamp_u64_to_fixed16(tmp); +} + +static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1, + u32 add2) +{ + uint_fixed_16_16_t tmp_add2 = u32_to_fixed16(add2); + u64 tmp; + + tmp = (u64)add1.val + tmp_add2.val; + + return clamp_u64_to_fixed16(tmp); +} + +#endif /* _I915_FIXED_H_ */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0c8aa57ce83b..d36a9755ad91 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1740,6 +1740,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, */ err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | + I915_WAIT_PRIORITY | (write_domain ? I915_WAIT_ALL : 0), MAX_SCHEDULE_TIMEOUT, to_rps_client(file)); @@ -2381,11 +2382,23 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj) invalidate_mapping_pages(mapping, 0, (loff_t)-1); } +/* + * Move pages to appropriate lru and release the pagevec, decrementing the + * ref count of those pages. + */ +static void check_release_pagevec(struct pagevec *pvec) +{ + check_move_unevictable_pages(pvec); + __pagevec_release(pvec); + cond_resched(); +} + static void i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, struct sg_table *pages) { struct sgt_iter sgt_iter; + struct pagevec pvec; struct page *page; __i915_gem_object_release_shmem(obj, pages, true); @@ -2395,6 +2408,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_save_bit_17_swizzle(obj, pages); + mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping); + + pagevec_init(&pvec); for_each_sgt_page(page, sgt_iter, pages) { if (obj->mm.dirty) set_page_dirty(page); @@ -2402,8 +2418,11 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, if (obj->mm.madv == I915_MADV_WILLNEED) mark_page_accessed(page); - put_page(page); + if (!pagevec_add(&pvec, page)) + check_release_pagevec(&pvec); } + if (pagevec_count(&pvec)) + check_release_pagevec(&pvec); obj->mm.dirty = false; sg_free_table(pages); @@ -2483,7 +2502,7 @@ unlock: mutex_unlock(&obj->mm.lock); } -static bool i915_sg_trim(struct sg_table *orig_st) +bool i915_sg_trim(struct sg_table *orig_st) { struct sg_table new_st; struct scatterlist *sg, *new_sg; @@ -2524,6 +2543,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) unsigned long last_pfn = 0; /* suppress gcc warning */ unsigned int max_segment = i915_sg_segment_size(); unsigned int sg_page_sizes; + struct pagevec pvec; gfp_t noreclaim; int ret; @@ -2559,6 +2579,7 @@ rebuild_st: * Fail silently without starting the shrinker */ mapping = obj->base.filp->f_mapping; + mapping_set_unevictable(mapping); noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); noreclaim |= __GFP_NORETRY | __GFP_NOWARN; @@ -2573,6 +2594,7 @@ rebuild_st: gfp_t gfp = noreclaim; do { + cond_resched(); page = shmem_read_mapping_page_gfp(mapping, i, gfp); if (likely(!IS_ERR(page))) break; @@ -2583,7 +2605,6 @@ rebuild_st: } i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++); - cond_resched(); /* * We've tried hard to allocate the memory by reaping @@ -2673,8 +2694,14 @@ rebuild_st: err_sg: sg_mark_end(sg); err_pages: - for_each_sgt_page(page, sgt_iter, st) - put_page(page); + mapping_clear_unevictable(mapping); + pagevec_init(&pvec); + for_each_sgt_page(page, sgt_iter, st) { + if (!pagevec_add(&pvec, page)) + check_release_pagevec(&pvec); + } + if (pagevec_count(&pvec)) + check_release_pagevec(&pvec); sg_free_table(st); kfree(st); @@ -3282,16 +3309,6 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv) static void nop_submit_request(struct i915_request *request) { - GEM_TRACE("%s fence %llx:%d -> -EIO\n", - request->engine->name, - request->fence.context, request->fence.seqno); - dma_fence_set_error(&request->fence, -EIO); - - i915_request_submit(request); -} - -static void nop_complete_submit_request(struct i915_request *request) -{ unsigned long flags; GEM_TRACE("%s fence %llx:%d -> -EIO\n", @@ -3327,57 +3344,33 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) * rolling the global seqno forward (since this would complete requests * for which we haven't set the fence error to EIO yet). */ - for_each_engine(engine, i915, id) { + for_each_engine(engine, i915, id) i915_gem_reset_prepare_engine(engine); - engine->submit_request = nop_submit_request; - engine->schedule = NULL; - } - i915->caps.scheduler = 0; - /* Even if the GPU reset fails, it should still stop the engines */ if (INTEL_GEN(i915) >= 5) intel_gpu_reset(i915, ALL_ENGINES); - /* - * Make sure no one is running the old callback before we proceed with - * cancelling requests and resetting the completion tracking. Otherwise - * we might submit a request to the hardware which never completes. - */ - synchronize_rcu(); - for_each_engine(engine, i915, id) { - /* Mark all executing requests as skipped */ - engine->cancel_requests(engine); - - /* - * Only once we've force-cancelled all in-flight requests can we - * start to complete all requests. - */ - engine->submit_request = nop_complete_submit_request; + engine->submit_request = nop_submit_request; + engine->schedule = NULL; } + i915->caps.scheduler = 0; /* * Make sure no request can slip through without getting completed by * either this call here to intel_engine_init_global_seqno, or the one - * in nop_complete_submit_request. + * in nop_submit_request. */ synchronize_rcu(); - for_each_engine(engine, i915, id) { - unsigned long flags; - - /* - * Mark all pending requests as complete so that any concurrent - * (lockless) lookup doesn't try and wait upon the request as we - * reset it. - */ - spin_lock_irqsave(&engine->timeline.lock, flags); - intel_engine_init_global_seqno(engine, - intel_engine_last_submit(engine)); - spin_unlock_irqrestore(&engine->timeline.lock, flags); + /* Mark all executing requests as skipped */ + for_each_engine(engine, i915, id) + engine->cancel_requests(engine); + for_each_engine(engine, i915, id) { i915_gem_reset_finish_engine(engine); + intel_engine_wakeup(engine); } out: @@ -3530,6 +3523,8 @@ static void __sleep_rcu(struct rcu_head *rcu) struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu); struct drm_i915_private *i915 = s->i915; + destroy_rcu_head(&s->rcu); + if (same_epoch(i915, s->epoch)) { INIT_WORK(&s->work, __sleep_work); queue_work(i915->wq, &s->work); @@ -3646,6 +3641,7 @@ out_rearm: if (same_epoch(dev_priv, epoch)) { struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL); if (s) { + init_rcu_head(&s->rcu); s->i915 = dev_priv; s->epoch = epoch; call_rcu(&s->rcu, __sleep_rcu); @@ -3743,7 +3739,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) start = ktime_get(); ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, + I915_WAIT_INTERRUPTIBLE | + I915_WAIT_PRIORITY | + I915_WAIT_ALL, to_wait_timeout(args->timeout_ns), to_rps_client(file)); @@ -4710,6 +4708,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, INIT_LIST_HEAD(&obj->lut_list); INIT_LIST_HEAD(&obj->batch_pool_link); + init_rcu_head(&obj->rcu); + obj->ops = ops; reservation_object_init(&obj->__builtin_resv); @@ -4977,6 +4977,13 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head) struct drm_i915_private *i915 = to_i915(obj->base.dev); /* + * We reuse obj->rcu for the freed list, so we had better not treat + * it like a rcu_head from this point forwards. And we expect all + * objects to be freed via this path. + */ + destroy_rcu_head(&obj->rcu); + + /* * Since we require blocking on struct_mutex to unbind the freed * object from the GPU before releasing resources back to the * system, we can not do that directly from the RCU callback (which may @@ -5293,19 +5300,10 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); - if (HAS_PCH_NOP(dev_priv)) { - if (IS_IVYBRIDGE(dev_priv)) { - u32 temp = I915_READ(GEN7_MSG_CTL); - temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); - I915_WRITE(GEN7_MSG_CTL, temp); - } else if (INTEL_GEN(dev_priv) >= 7) { - u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT); - temp &= ~RESET_PCH_HANDSHAKE_ENABLE; - I915_WRITE(HSW_NDE_RSTWRN_OPT, temp); - } - } - - intel_gt_workarounds_apply(dev_priv); + /* Apply the GT workarounds... */ + intel_gt_apply_workarounds(dev_priv); + /* ...and determine whether they are sticking. */ + intel_gt_verify_workarounds(dev_priv, "init"); i915_gem_init_swizzling(dev_priv); @@ -5500,6 +5498,44 @@ err_active: goto out_ctx; } +static int +i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int ret; + + obj = i915_gem_object_create_stolen(i915, size); + if (!obj) + obj = i915_gem_object_create_internal(i915, size); + if (IS_ERR(obj)) { + DRM_ERROR("Failed to allocate scratch page\n"); + return PTR_ERR(obj); + } + + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err_unref; + } + + ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + if (ret) + goto err_unref; + + i915->gt.scratch = vma; + return 0; + +err_unref: + i915_gem_object_put(obj); + return ret; +} + +static void i915_gem_fini_scratch(struct drm_i915_private *i915) +{ + i915_vma_unpin_and_release(&i915->gt.scratch, 0); +} + int i915_gem_init(struct drm_i915_private *dev_priv) { int ret; @@ -5546,12 +5582,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv) goto err_unlock; } - ret = i915_gem_contexts_init(dev_priv); + ret = i915_gem_init_scratch(dev_priv, + IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE); if (ret) { GEM_BUG_ON(ret == -EIO); goto err_ggtt; } + ret = i915_gem_contexts_init(dev_priv); + if (ret) { + GEM_BUG_ON(ret == -EIO); + goto err_scratch; + } + ret = intel_engines_init(dev_priv); if (ret) { GEM_BUG_ON(ret == -EIO); @@ -5624,6 +5667,8 @@ err_pm: err_context: if (ret != -EIO) i915_gem_contexts_fini(dev_priv); +err_scratch: + i915_gem_fini_scratch(dev_priv); err_ggtt: err_unlock: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); @@ -5675,8 +5720,11 @@ void i915_gem_fini(struct drm_i915_private *dev_priv) intel_uc_fini(dev_priv); i915_gem_cleanup_engines(dev_priv); i915_gem_contexts_fini(dev_priv); + i915_gem_fini_scratch(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); + intel_wa_list_free(&dev_priv->gt_wa_list); + intel_cleanup_gt_powersave(dev_priv); intel_uc_fini_misc(dev_priv); @@ -5951,7 +5999,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, * the bits. */ BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > - sizeof(atomic_t) * BITS_PER_BYTE); + BITS_PER_TYPE(atomic_t)); if (old) { WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits)); diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index 599c4f6eb1ea..b0e4b976880c 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -47,17 +47,19 @@ struct drm_i915_private; #define GEM_DEBUG_DECL(var) var #define GEM_DEBUG_EXEC(expr) expr #define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr) +#define GEM_DEBUG_WARN_ON(expr) GEM_WARN_ON(expr) #else #define GEM_SHOW_DEBUG() (0) #define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) -#define GEM_WARN_ON(expr) (BUILD_BUG_ON_INVALID(expr), 0) +#define GEM_WARN_ON(expr) ({ unlikely(!!(expr)); }) #define GEM_DEBUG_DECL(var) #define GEM_DEBUG_EXEC(expr) do { } while (0) #define GEM_DEBUG_BUG_ON(expr) +#define GEM_DEBUG_WARN_ON(expr) ({ BUILD_BUG_ON_INVALID(expr); 0; }) #endif #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index f772593b99ab..371c07087095 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -337,7 +337,7 @@ __create_hw_context(struct drm_i915_private *dev_priv, kref_init(&ctx->ref); list_add_tail(&ctx->link, &dev_priv->contexts.list); ctx->i915 = dev_priv; - ctx->sched.priority = I915_PRIORITY_NORMAL; + ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { struct intel_context *ce = &ctx->__engine[n]; @@ -414,7 +414,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, if (IS_ERR(ctx)) return ctx; - if (USES_FULL_PPGTT(dev_priv)) { + if (HAS_FULL_PPGTT(dev_priv)) { struct i915_hw_ppgtt *ppgtt; ppgtt = i915_ppgtt_create(dev_priv, file_priv); @@ -457,7 +457,7 @@ i915_gem_context_create_gvt(struct drm_device *dev) if (ret) return ERR_PTR(ret); - ctx = __create_hw_context(to_i915(dev), NULL); + ctx = i915_gem_create_context(to_i915(dev), NULL); if (IS_ERR(ctx)) goto out; @@ -504,7 +504,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) } i915_gem_context_clear_bannable(ctx); - ctx->sched.priority = prio; + ctx->sched.priority = I915_USER_PRIORITY(prio); ctx->ring_size = PAGE_SIZE; GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); @@ -535,16 +535,12 @@ static bool needs_preempt_context(struct drm_i915_private *i915) int i915_gem_contexts_init(struct drm_i915_private *dev_priv) { struct i915_gem_context *ctx; - int ret; /* Reassure ourselves we are only called once */ GEM_BUG_ON(dev_priv->kernel_context); GEM_BUG_ON(dev_priv->preempt_context); - ret = intel_ctx_workarounds_init(dev_priv); - if (ret) - return ret; - + intel_engine_init_ctx_wa(dev_priv->engine[RCS]); init_contexts(dev_priv); /* lowest priority; idle task */ @@ -879,7 +875,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, args->value = i915_gem_context_is_bannable(ctx); break; case I915_CONTEXT_PARAM_PRIORITY: - args->value = ctx->sched.priority; + args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; break; default: ret = -EINVAL; @@ -948,7 +944,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, !capable(CAP_SYS_NICE)) ret = -EPERM; else - ctx->sched.priority = priority; + ctx->sched.priority = + I915_USER_PRIORITY(priority); } break; diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index 08165f6a0a84..f6d870b1f73e 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -163,6 +163,7 @@ struct i915_gem_context { /** engine: per-engine logical HW state */ struct intel_context { struct i915_gem_context *gem_context; + struct intel_engine_cs *active; struct i915_vma *state; struct intel_ring *ring; u32 *lrc_reg_state; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 09187286d346..786d719e652d 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -460,7 +460,7 @@ eb_validate_vma(struct i915_execbuffer *eb, * any non-page-aligned or non-canonical addresses. */ if (unlikely(entry->flags & EXEC_OBJECT_PINNED && - entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK))) + entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK))) return -EINVAL; /* pad_to_size was once a reserved field, so sanitize it */ @@ -2186,7 +2186,7 @@ signal_fence_array(struct i915_execbuffer *eb, if (!(flags & I915_EXEC_FENCE_SIGNAL)) continue; - drm_syncobj_replace_fence(syncobj, 0, fence); + drm_syncobj_replace_fence(syncobj, fence); } } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 56c7f8637311..add1fe7aeb93 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -133,55 +133,6 @@ static inline void i915_ggtt_invalidate(struct drm_i915_private *i915) i915->ggtt.invalidate(i915); } -int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, - int enable_ppgtt) -{ - bool has_full_ppgtt; - bool has_full_48bit_ppgtt; - - if (!dev_priv->info.has_aliasing_ppgtt) - return 0; - - has_full_ppgtt = dev_priv->info.has_full_ppgtt; - has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt; - - if (intel_vgpu_active(dev_priv)) { - /* GVT-g has no support for 32bit ppgtt */ - has_full_ppgtt = false; - has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv); - } - - /* - * We don't allow disabling PPGTT for gen9+ as it's a requirement for - * execlists, the sole mechanism available to submit work. - */ - if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9) - return 0; - - if (enable_ppgtt == 1) - return 1; - - if (enable_ppgtt == 2 && has_full_ppgtt) - return 2; - - if (enable_ppgtt == 3 && has_full_48bit_ppgtt) - return 3; - - /* Disable ppgtt on SNB if VT-d is on. */ - if (IS_GEN6(dev_priv) && intel_vtd_active()) { - DRM_INFO("Disabling PPGTT because VT-d is on\n"); - return 0; - } - - if (has_full_48bit_ppgtt) - return 3; - - if (has_full_ppgtt) - return 2; - - return 1; -} - static int ppgtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, u32 unused) @@ -235,9 +186,9 @@ static void clear_pages(struct i915_vma *vma) memset(&vma->page_sizes, 0, sizeof(vma->page_sizes)); } -static gen8_pte_t gen8_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) +static u64 gen8_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) { gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; @@ -274,9 +225,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, #define gen8_pdpe_encode gen8_pde_encode #define gen8_pml4e_encode gen8_pde_encode -static gen6_pte_t snb_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 unused) +static u64 snb_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) { gen6_pte_t pte = GEN6_PTE_VALID; pte |= GEN6_PTE_ADDR_ENCODE(addr); @@ -296,9 +247,9 @@ static gen6_pte_t snb_pte_encode(dma_addr_t addr, return pte; } -static gen6_pte_t ivb_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 unused) +static u64 ivb_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) { gen6_pte_t pte = GEN6_PTE_VALID; pte |= GEN6_PTE_ADDR_ENCODE(addr); @@ -320,9 +271,9 @@ static gen6_pte_t ivb_pte_encode(dma_addr_t addr, return pte; } -static gen6_pte_t byt_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) +static u64 byt_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) { gen6_pte_t pte = GEN6_PTE_VALID; pte |= GEN6_PTE_ADDR_ENCODE(addr); @@ -336,9 +287,9 @@ static gen6_pte_t byt_pte_encode(dma_addr_t addr, return pte; } -static gen6_pte_t hsw_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 unused) +static u64 hsw_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) { gen6_pte_t pte = GEN6_PTE_VALID; pte |= HSW_PTE_ADDR_ENCODE(addr); @@ -349,9 +300,9 @@ static gen6_pte_t hsw_pte_encode(dma_addr_t addr, return pte; } -static gen6_pte_t iris_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 unused) +static u64 iris_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) { gen6_pte_t pte = GEN6_PTE_VALID; pte |= HSW_PTE_ADDR_ENCODE(addr); @@ -629,10 +580,9 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) * region, including any PTEs which happen to point to scratch. * * This is only relevant for the 48b PPGTT where we support - * huge-gtt-pages, see also i915_vma_insert(). - * - * TODO: we should really consider write-protecting the scratch-page and - * sharing between ppgtt + * huge-gtt-pages, see also i915_vma_insert(). However, as we share the + * scratch (read-only) between all vm, we create one 64k scratch page + * for all. */ size = I915_GTT_PAGE_SIZE_4K; if (i915_vm_is_48bit(vm) && @@ -715,14 +665,13 @@ static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt) static void gen8_initialize_pt(struct i915_address_space *vm, struct i915_page_table *pt) { - fill_px(vm, pt, - gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0)); + fill_px(vm, pt, vm->scratch_pte); } -static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt, +static void gen6_initialize_pt(struct i915_address_space *vm, struct i915_page_table *pt) { - fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte); + fill32_px(vm, pt, vm->scratch_pte); } static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) @@ -856,15 +805,13 @@ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) /* Removes entries from a single page table, releasing it if it's empty. * Caller can use the return value to update higher-level entries. */ -static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm, +static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm, struct i915_page_table *pt, u64 start, u64 length) { unsigned int num_entries = gen8_pte_count(start, length); unsigned int pte = gen8_pte_index(start); unsigned int pte_end = pte + num_entries; - const gen8_pte_t scratch_pte = - gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); gen8_pte_t *vaddr; GEM_BUG_ON(num_entries > pt->used_ptes); @@ -875,7 +822,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm, vaddr = kmap_atomic_px(pt); while (pte < pte_end) - vaddr[pte++] = scratch_pte; + vaddr[pte++] = vm->scratch_pte; kunmap_atomic(vaddr); return false; @@ -1208,7 +1155,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) { u16 i; - encode = pte_encode | vma->vm->scratch_page.daddr; + encode = vma->vm->scratch_pte; vaddr = kmap_atomic_px(pd->page_table[idx.pde]); for (i = 1; i < index; i += 16) @@ -1261,10 +1208,35 @@ static int gen8_init_scratch(struct i915_address_space *vm) { int ret; + /* + * If everybody agrees to not to write into the scratch page, + * we can reuse it for all vm, keeping contexts and processes separate. + */ + if (vm->has_read_only && + vm->i915->kernel_context && + vm->i915->kernel_context->ppgtt) { + struct i915_address_space *clone = + &vm->i915->kernel_context->ppgtt->vm; + + GEM_BUG_ON(!clone->has_read_only); + + vm->scratch_page.order = clone->scratch_page.order; + vm->scratch_pte = clone->scratch_pte; + vm->scratch_pt = clone->scratch_pt; + vm->scratch_pd = clone->scratch_pd; + vm->scratch_pdp = clone->scratch_pdp; + return 0; + } + ret = setup_scratch_page(vm, __GFP_HIGHMEM); if (ret) return ret; + vm->scratch_pte = + gen8_pte_encode(vm->scratch_page.daddr, + I915_CACHE_LLC, + PTE_READ_ONLY); + vm->scratch_pt = alloc_pt(vm); if (IS_ERR(vm->scratch_pt)) { ret = PTR_ERR(vm->scratch_pt); @@ -1336,6 +1308,9 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) static void gen8_free_scratch(struct i915_address_space *vm) { + if (!vm->scratch_page.daddr) + return; + if (use_4lvl(vm)) free_pdp(vm, vm->scratch_pdp); free_pd(vm, vm->scratch_pd); @@ -1573,8 +1548,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt, static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) { struct i915_address_space *vm = &ppgtt->vm; - const gen8_pte_t scratch_pte = - gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); + const gen8_pte_t scratch_pte = vm->scratch_pte; u64 start = 0, length = ppgtt->vm.total; if (use_4lvl(vm)) { @@ -1647,16 +1621,12 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) ppgtt->vm.i915 = i915; ppgtt->vm.dma = &i915->drm.pdev->dev; - ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ? + ppgtt->vm.total = HAS_FULL_48BIT_PPGTT(i915) ? 1ULL << 48 : 1ULL << 32; - /* - * From bdw, there is support for read-only pages in the PPGTT. - * - * XXX GVT is not honouring the lack of RW in the PTE bits. - */ - ppgtt->vm.has_read_only = !intel_vgpu_active(i915); + /* From bdw, there is support for read-only pages in the PPGTT. */ + ppgtt->vm.has_read_only = true; i915_address_space_init(&ppgtt->vm, i915); @@ -1721,7 +1691,7 @@ err_free: static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m) { struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); - const gen6_pte_t scratch_pte = ppgtt->scratch_pte; + const gen6_pte_t scratch_pte = base->vm.scratch_pte; struct i915_page_table *pt; u32 pte, pde; @@ -1757,7 +1727,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m) if (i == 4) continue; - seq_printf(m, "\t\t(%03d, %04d) %08lx: ", + seq_printf(m, "\t\t(%03d, %04d) %08llx: ", pde, pte, (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE); for (i = 0; i < 4; i++) { @@ -1782,19 +1752,6 @@ static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt, ppgtt->pd_addr + pde); } -static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, dev_priv, id) { - u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ? - GEN8_GFX_PPGTT_48B : 0; - I915_WRITE(RING_MODE_GEN7(engine), - _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level)); - } -} - static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; @@ -1834,7 +1791,8 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv) ecochk = I915_READ(GAM_ECOCHK); I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); - I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); + if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */ + I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); } /* PPGTT support for Sandybdrige/Gen6 and later */ @@ -1846,7 +1804,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, unsigned int pde = first_entry / GEN6_PTES; unsigned int pte = first_entry % GEN6_PTES; unsigned int num_entries = length / I915_GTT_PAGE_SIZE; - const gen6_pte_t scratch_pte = ppgtt->scratch_pte; + const gen6_pte_t scratch_pte = vm->scratch_pte; while (num_entries) { struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++]; @@ -1937,7 +1895,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, if (IS_ERR(pt)) goto unwind_out; - gen6_initialize_pt(ppgtt, pt); + gen6_initialize_pt(vm, pt); ppgtt->base.pd.page_table[pde] = pt; if (i915_vma_is_bound(ppgtt->vma, @@ -1975,9 +1933,9 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt) if (ret) return ret; - ppgtt->scratch_pte = - vm->pte_encode(vm->scratch_page.daddr, - I915_CACHE_NONE, PTE_READ_ONLY); + vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr, + I915_CACHE_NONE, + PTE_READ_ONLY); vm->scratch_pt = alloc_pt(vm); if (IS_ERR(vm->scratch_pt)) { @@ -1985,7 +1943,7 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt) return PTR_ERR(vm->scratch_pt); } - gen6_initialize_pt(ppgtt, vm->scratch_pt); + gen6_initialize_pt(vm, vm->scratch_pt); gen6_for_all_pdes(unused, &ppgtt->base.pd, pde) ppgtt->base.pd.page_table[pde] = vm->scratch_pt; @@ -2237,23 +2195,10 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv) { gtt_write_workarounds(dev_priv); - /* In the case of execlists, PPGTT is enabled by the context descriptor - * and the PDPs are contained within the context itself. We don't - * need to do anything here. */ - if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) - return 0; - - if (!USES_PPGTT(dev_priv)) - return 0; - if (IS_GEN6(dev_priv)) gen6_ppgtt_enable(dev_priv); else if (IS_GEN7(dev_priv)) gen7_ppgtt_enable(dev_priv); - else if (INTEL_GEN(dev_priv) >= 8) - gen8_ppgtt_enable(dev_priv); - else - MISSING_CASE(INTEL_GEN(dev_priv)); return 0; } @@ -2543,8 +2488,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); unsigned first_entry = start / I915_GTT_PAGE_SIZE; unsigned num_entries = length / I915_GTT_PAGE_SIZE; - const gen8_pte_t scratch_pte = - gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); + const gen8_pte_t scratch_pte = vm->scratch_pte; gen8_pte_t __iomem *gtt_base = (gen8_pte_t __iomem *)ggtt->gsm + first_entry; const int max_entries = ggtt_total_entries(ggtt) - first_entry; @@ -2669,8 +2613,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, first_entry, num_entries, max_entries)) num_entries = max_entries; - scratch_pte = vm->pte_encode(vm->scratch_page.daddr, - I915_CACHE_LLC, 0); + scratch_pte = vm->scratch_pte; for (i = 0; i < num_entries; i++) iowrite32(scratch_pte, >t_base[i]); @@ -2952,7 +2895,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) /* And finally clear the reserved guard page */ ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); - if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) { + if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) { ret = i915_gem_init_aliasing_ppgtt(dev_priv); if (ret) goto err; @@ -3076,6 +3019,10 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) return ret; } + ggtt->vm.scratch_pte = + ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr, + I915_CACHE_NONE, 0); + return 0; } @@ -3275,7 +3222,7 @@ static void bdw_setup_private_ppat(struct intel_ppat *ppat) ppat->match = bdw_private_pat_match; ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3); - if (!USES_PPGTT(ppat->i915)) { + if (!HAS_PPGTT(ppat->i915)) { /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, * so RTL will always use the value corresponding to * pat_sel = 000". @@ -3402,7 +3349,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.cleanup = gen6_gmch_remove; ggtt->vm.insert_page = gen8_ggtt_insert_page; ggtt->vm.clear_range = nop_clear_range; - if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv)) + if (intel_scanout_needs_vtd_wa(dev_priv)) ggtt->vm.clear_range = gen8_ggtt_clear_range; ggtt->vm.insert_entries = gen8_ggtt_insert_entries; @@ -3413,6 +3360,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; if (ggtt->vm.clear_range != nop_clear_range) ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; + + /* Prevent recursively calling stop_machine() and deadlocks. */ + dev_info(dev_priv->drm.dev, + "Disabling error capture for VT-d workaround\n"); + i915_disable_error_state(dev_priv, -ENODEV); } ggtt->invalidate = gen6_ggtt_invalidate; @@ -3422,6 +3374,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.vma_ops.set_pages = ggtt_set_pages; ggtt->vm.vma_ops.clear_pages = clear_pages; + ggtt->vm.pte_encode = gen8_pte_encode; + setup_private_pat(dev_priv); return ggtt_probe_common(ggtt, size); @@ -3609,7 +3563,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) /* Only VLV supports read-only GGTT mappings */ ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv); - if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv)) + if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv)) ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; mutex_unlock(&dev_priv->drm.struct_mutex); @@ -3711,7 +3665,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) } static struct scatterlist * -rotate_pages(const dma_addr_t *in, unsigned int offset, +rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, unsigned int width, unsigned int height, unsigned int stride, struct sg_table *st, struct scatterlist *sg) @@ -3720,7 +3674,7 @@ rotate_pages(const dma_addr_t *in, unsigned int offset, unsigned int src_idx; for (column = 0; column < width; column++) { - src_idx = stride * (height - 1) + column; + src_idx = stride * (height - 1) + column + offset; for (row = 0; row < height; row++) { st->nents++; /* We don't need the pages, but need to initialize @@ -3728,7 +3682,8 @@ rotate_pages(const dma_addr_t *in, unsigned int offset, * The only thing we need are DMA addresses. */ sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); - sg_dma_address(sg) = in[offset + src_idx]; + sg_dma_address(sg) = + i915_gem_object_get_dma_address(obj, src_idx); sg_dma_len(sg) = I915_GTT_PAGE_SIZE; sg = sg_next(sg); src_idx -= stride; @@ -3742,22 +3697,11 @@ static noinline struct sg_table * intel_rotate_pages(struct intel_rotation_info *rot_info, struct drm_i915_gem_object *obj) { - const unsigned long n_pages = obj->base.size / I915_GTT_PAGE_SIZE; unsigned int size = intel_rotation_info_size(rot_info); - struct sgt_iter sgt_iter; - dma_addr_t dma_addr; - unsigned long i; - dma_addr_t *page_addr_list; struct sg_table *st; struct scatterlist *sg; int ret = -ENOMEM; - - /* Allocate a temporary list of source pages for random access. */ - page_addr_list = kvmalloc_array(n_pages, - sizeof(dma_addr_t), - GFP_KERNEL); - if (!page_addr_list) - return ERR_PTR(ret); + int i; /* Allocate target SG list. */ st = kmalloc(sizeof(*st), GFP_KERNEL); @@ -3768,29 +3712,20 @@ intel_rotate_pages(struct intel_rotation_info *rot_info, if (ret) goto err_sg_alloc; - /* Populate source page list from the object. */ - i = 0; - for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages) - page_addr_list[i++] = dma_addr; - - GEM_BUG_ON(i != n_pages); st->nents = 0; sg = st->sgl; for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { - sg = rotate_pages(page_addr_list, rot_info->plane[i].offset, + sg = rotate_pages(obj, rot_info->plane[i].offset, rot_info->plane[i].width, rot_info->plane[i].height, rot_info->plane[i].stride, st, sg); } - kvfree(page_addr_list); - return st; err_sg_alloc: kfree(st); err_st_alloc: - kvfree(page_addr_list); DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); @@ -3835,6 +3770,8 @@ intel_partial_pages(const struct i915_ggtt_view *view, count -= len >> PAGE_SHIFT; if (count == 0) { sg_mark_end(sg); + i915_sg_trim(st); /* Drop any unused tail entries. */ + return st; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 7e2af5f4f39b..4874da09a3c4 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -42,13 +42,15 @@ #include "i915_selftest.h" #include "i915_timeline.h" -#define I915_GTT_PAGE_SIZE_4K BIT(12) -#define I915_GTT_PAGE_SIZE_64K BIT(16) -#define I915_GTT_PAGE_SIZE_2M BIT(21) +#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12) +#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16) +#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21) #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M +#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE + #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE #define I915_FENCE_REG_NONE -1 @@ -287,6 +289,7 @@ struct i915_address_space { struct mutex mutex; /* protects vma and our lists */ + u64 scratch_pte; struct i915_page_dma scratch_page; struct i915_page_table *scratch_pt; struct i915_page_directory *scratch_pd; @@ -333,12 +336,11 @@ struct i915_address_space { /* Some systems support read-only mappings for GGTT and/or PPGTT */ bool has_read_only:1; - /* FIXME: Need a more generic return type */ - gen6_pte_t (*pte_encode)(dma_addr_t addr, - enum i915_cache_level level, - u32 flags); /* Create a valid PTE */ - /* flags for pte_encode */ + u64 (*pte_encode)(dma_addr_t addr, + enum i915_cache_level level, + u32 flags); /* Create a valid PTE */ #define PTE_READ_ONLY (1<<0) + int (*allocate_va_range)(struct i915_address_space *vm, u64 start, u64 length); void (*clear_range)(struct i915_address_space *vm, @@ -420,7 +422,6 @@ struct gen6_hw_ppgtt { struct i915_vma *vma; gen6_pte_t __iomem *pd_addr; - gen6_pte_t scratch_pte; unsigned int pin_count; bool scan_for_unused_pt; @@ -659,20 +660,20 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, u64 start, u64 end, unsigned int flags); /* Flags used by pin/bind&friends. */ -#define PIN_NONBLOCK BIT(0) -#define PIN_MAPPABLE BIT(1) -#define PIN_ZONE_4G BIT(2) -#define PIN_NONFAULT BIT(3) -#define PIN_NOEVICT BIT(4) - -#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */ -#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */ -#define PIN_USER BIT(7) /* I915_VMA_LOCAL_BIND */ -#define PIN_UPDATE BIT(8) - -#define PIN_HIGH BIT(9) -#define PIN_OFFSET_BIAS BIT(10) -#define PIN_OFFSET_FIXED BIT(11) +#define PIN_NONBLOCK BIT_ULL(0) +#define PIN_MAPPABLE BIT_ULL(1) +#define PIN_ZONE_4G BIT_ULL(2) +#define PIN_NONFAULT BIT_ULL(3) +#define PIN_NOEVICT BIT_ULL(4) + +#define PIN_MBZ BIT_ULL(5) /* I915_VMA_PIN_OVERFLOW */ +#define PIN_GLOBAL BIT_ULL(6) /* I915_VMA_GLOBAL_BIND */ +#define PIN_USER BIT_ULL(7) /* I915_VMA_LOCAL_BIND */ +#define PIN_UPDATE BIT_ULL(8) + +#define PIN_HIGH BIT_ULL(9) +#define PIN_OFFSET_BIAS BIT_ULL(10) +#define PIN_OFFSET_FIXED BIT_ULL(11) #define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE) #endif diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 8762d17b6659..07465123c166 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -27,11 +27,14 @@ * */ -#include <generated/utsrelease.h> +#include <linux/ascii85.h> +#include <linux/nmi.h> +#include <linux/scatterlist.h> #include <linux/stop_machine.h> +#include <linux/utsname.h> #include <linux/zlib.h> + #include <drm/drm_print.h> -#include <linux/ascii85.h> #include "i915_gpu_error.h" #include "i915_drv.h" @@ -77,112 +80,110 @@ static const char *purgeable_flag(int purgeable) return purgeable ? " purgeable" : ""; } -static bool __i915_error_ok(struct drm_i915_error_state_buf *e) +static void __sg_set_buf(struct scatterlist *sg, + void *addr, unsigned int len, loff_t it) { - - if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) { - e->err = -ENOSPC; - return false; - } - - if (e->bytes == e->size - 1 || e->err) - return false; - - return true; + sg->page_link = (unsigned long)virt_to_page(addr); + sg->offset = offset_in_page(addr); + sg->length = len; + sg->dma_address = it; } -static bool __i915_error_seek(struct drm_i915_error_state_buf *e, - unsigned len) +static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len) { - if (e->pos + len <= e->start) { - e->pos += len; + if (!len) return false; - } - /* First vsnprintf needs to fit in its entirety for memmove */ - if (len >= e->size) { - e->err = -EIO; - return false; - } + if (e->bytes + len + 1 <= e->size) + return true; - return true; -} + if (e->bytes) { + __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter); + e->iter += e->bytes; + e->buf = NULL; + e->bytes = 0; + } -static void __i915_error_advance(struct drm_i915_error_state_buf *e, - unsigned len) -{ - /* If this is first printf in this window, adjust it so that - * start position matches start of the buffer - */ + if (e->cur == e->end) { + struct scatterlist *sgl; - if (e->pos < e->start) { - const size_t off = e->start - e->pos; + sgl = (typeof(sgl))__get_free_page(GFP_KERNEL); + if (!sgl) { + e->err = -ENOMEM; + return false; + } - /* Should not happen but be paranoid */ - if (off > len || e->bytes) { - e->err = -EIO; - return; + if (e->cur) { + e->cur->offset = 0; + e->cur->length = 0; + e->cur->page_link = + (unsigned long)sgl | SG_CHAIN; + } else { + e->sgl = sgl; } - memmove(e->buf, e->buf + off, len - off); - e->bytes = len - off; - e->pos = e->start; - return; + e->cur = sgl; + e->end = sgl + SG_MAX_SINGLE_ALLOC - 1; } - e->bytes += len; - e->pos += len; + e->size = ALIGN(len + 1, SZ_64K); + e->buf = kmalloc(e->size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); + if (!e->buf) { + e->size = PAGE_ALIGN(len + 1); + e->buf = kmalloc(e->size, GFP_KERNEL); + } + if (!e->buf) { + e->err = -ENOMEM; + return false; + } + + return true; } __printf(2, 0) static void i915_error_vprintf(struct drm_i915_error_state_buf *e, - const char *f, va_list args) + const char *fmt, va_list args) { - unsigned len; + va_list ap; + int len; - if (!__i915_error_ok(e)) + if (e->err) return; - /* Seek the first printf which is hits start position */ - if (e->pos < e->start) { - va_list tmp; - - va_copy(tmp, args); - len = vsnprintf(NULL, 0, f, tmp); - va_end(tmp); - - if (!__i915_error_seek(e, len)) - return; + va_copy(ap, args); + len = vsnprintf(NULL, 0, fmt, ap); + va_end(ap); + if (len <= 0) { + e->err = len; + return; } - len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args); - if (len >= e->size - e->bytes) - len = e->size - e->bytes - 1; + if (!__i915_error_grow(e, len)) + return; - __i915_error_advance(e, len); + GEM_BUG_ON(e->bytes >= e->size); + len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args); + if (len < 0) { + e->err = len; + return; + } + e->bytes += len; } -static void i915_error_puts(struct drm_i915_error_state_buf *e, - const char *str) +static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str) { unsigned len; - if (!__i915_error_ok(e)) + if (e->err || !str) return; len = strlen(str); + if (!__i915_error_grow(e, len)) + return; - /* Seek the first printf which is hits start position */ - if (e->pos < e->start) { - if (!__i915_error_seek(e, len)) - return; - } - - if (len >= e->size - e->bytes) - len = e->size - e->bytes - 1; + GEM_BUG_ON(e->bytes + len > e->size); memcpy(e->buf + e->bytes, str, len); - - __i915_error_advance(e, len); + e->bytes += len; } #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) @@ -268,6 +269,8 @@ static int compress_page(struct compress *c, if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK) return -EIO; + + touch_nmi_watchdog(); } while (zstream->avail_in); /* Fallback to uncompressed if we increase size? */ @@ -512,7 +515,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, err_printf(m, " SYNC_2: 0x%08x\n", ee->semaphore_mboxes[2]); } - if (USES_PPGTT(m->i915)) { + if (HAS_PPGTT(m->i915)) { err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode); if (INTEL_GEN(m->i915) >= 8) { @@ -635,22 +638,33 @@ static void err_print_uc(struct drm_i915_error_state_buf *m, print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log); } -int i915_error_state_to_str(struct drm_i915_error_state_buf *m, - const struct i915_gpu_state *error) +static void err_free_sgl(struct scatterlist *sgl) +{ + while (sgl) { + struct scatterlist *sg; + + for (sg = sgl; !sg_is_chain(sg); sg++) { + kfree(sg_virt(sg)); + if (sg_is_last(sg)) + break; + } + + sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg); + free_page((unsigned long)sgl); + sgl = sg; + } +} + +static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, + struct i915_gpu_state *error) { - struct drm_i915_private *dev_priv = m->i915; struct drm_i915_error_object *obj; struct timespec64 ts; int i, j; - if (!error) { - err_printf(m, "No error state collected\n"); - return 0; - } - if (*error->error_msg) err_printf(m, "%s\n", error->error_msg); - err_printf(m, "Kernel: " UTS_RELEASE "\n"); + err_printf(m, "Kernel: %s\n", init_utsname()->release); ts = ktime_to_timespec64(error->time); err_printf(m, "Time: %lld s %ld us\n", (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); @@ -680,12 +694,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_printf(m, "Reset count: %u\n", error->reset_count); err_printf(m, "Suspend count: %u\n", error->suspend_count); err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform)); - err_print_pciid(m, error->i915); + err_print_pciid(m, m->i915); err_printf(m, "IOMMU enabled?: %d\n", error->iommu); - if (HAS_CSR(dev_priv)) { - struct intel_csr *csr = &dev_priv->csr; + if (HAS_CSR(m->i915)) { + struct intel_csr *csr = &m->i915->csr; err_printf(m, "DMC loaded: %s\n", yesno(csr->dmc_payload != NULL)); @@ -705,22 +719,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); err_printf(m, "CCID: 0x%08x\n", error->ccid); - err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings); + err_printf(m, "Missed interrupts: 0x%08lx\n", + m->i915->gpu_error.missed_irq_rings); for (i = 0; i < error->nfence; i++) err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); - if (INTEL_GEN(dev_priv) >= 6) { + if (INTEL_GEN(m->i915) >= 6) { err_printf(m, "ERROR: 0x%08x\n", error->error); - if (INTEL_GEN(dev_priv) >= 8) + if (INTEL_GEN(m->i915) >= 8) err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", error->fault_data1, error->fault_data0); err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); } - if (IS_GEN7(dev_priv)) + if (IS_GEN7(m->i915)) err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); for (i = 0; i < ARRAY_SIZE(error->engine); i++) { @@ -742,7 +757,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, len += scnprintf(buf + len, sizeof(buf), "%s%s", first ? "" : ", ", - dev_priv->engine[j]->name); + m->i915->engine[j]->name); first = 0; } scnprintf(buf + len, sizeof(buf), ")"); @@ -760,7 +775,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, obj = ee->batchbuffer; if (obj) { - err_puts(m, dev_priv->engine[i]->name); + err_puts(m, m->i915->engine[i]->name); if (ee->context.pid) err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)", ee->context.comm, @@ -772,16 +787,16 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_printf(m, " --- gtt_offset = 0x%08x %08x\n", upper_32_bits(obj->gtt_offset), lower_32_bits(obj->gtt_offset)); - print_error_obj(m, dev_priv->engine[i], NULL, obj); + print_error_obj(m, m->i915->engine[i], NULL, obj); } for (j = 0; j < ee->user_bo_count; j++) - print_error_obj(m, dev_priv->engine[i], + print_error_obj(m, m->i915->engine[i], "user", ee->user_bo[j]); if (ee->num_requests) { err_printf(m, "%s --- %d requests\n", - dev_priv->engine[i]->name, + m->i915->engine[i]->name, ee->num_requests); for (j = 0; j < ee->num_requests; j++) error_print_request(m, " ", @@ -791,10 +806,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, if (IS_ERR(ee->waiters)) { err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n", - dev_priv->engine[i]->name); + m->i915->engine[i]->name); } else if (ee->num_waiters) { err_printf(m, "%s --- %d waiters\n", - dev_priv->engine[i]->name, + m->i915->engine[i]->name, ee->num_waiters); for (j = 0; j < ee->num_waiters; j++) { err_printf(m, " seqno 0x%08x for %s [%d]\n", @@ -804,22 +819,22 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, } } - print_error_obj(m, dev_priv->engine[i], + print_error_obj(m, m->i915->engine[i], "ringbuffer", ee->ringbuffer); - print_error_obj(m, dev_priv->engine[i], + print_error_obj(m, m->i915->engine[i], "HW Status", ee->hws_page); - print_error_obj(m, dev_priv->engine[i], + print_error_obj(m, m->i915->engine[i], "HW context", ee->ctx); - print_error_obj(m, dev_priv->engine[i], + print_error_obj(m, m->i915->engine[i], "WA context", ee->wa_ctx); - print_error_obj(m, dev_priv->engine[i], + print_error_obj(m, m->i915->engine[i], "WA batchbuffer", ee->wa_batchbuffer); - print_error_obj(m, dev_priv->engine[i], + print_error_obj(m, m->i915->engine[i], "NULL context", ee->default_state); } @@ -832,43 +847,107 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_print_capabilities(m, &error->device_info, &error->driver_caps); err_print_params(m, &error->params); err_print_uc(m, &error->uc); +} + +static int err_print_to_sgl(struct i915_gpu_state *error) +{ + struct drm_i915_error_state_buf m; + + if (IS_ERR(error)) + return PTR_ERR(error); + + if (READ_ONCE(error->sgl)) + return 0; + + memset(&m, 0, sizeof(m)); + m.i915 = error->i915; + + __err_print_to_sgl(&m, error); - if (m->bytes == 0 && m->err) - return m->err; + if (m.buf) { + __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter); + m.bytes = 0; + m.buf = NULL; + } + if (m.cur) { + GEM_BUG_ON(m.end < m.cur); + sg_mark_end(m.cur - 1); + } + GEM_BUG_ON(m.sgl && !m.cur); + + if (m.err) { + err_free_sgl(m.sgl); + return m.err; + } + + if (cmpxchg(&error->sgl, NULL, m.sgl)) + err_free_sgl(m.sgl); return 0; } -int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, - struct drm_i915_private *i915, - size_t count, loff_t pos) +ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error, + char *buf, loff_t off, size_t rem) { - memset(ebuf, 0, sizeof(*ebuf)); - ebuf->i915 = i915; + struct scatterlist *sg; + size_t count; + loff_t pos; + int err; - /* We need to have enough room to store any i915_error_state printf - * so that we can move it to start position. - */ - ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE; - ebuf->buf = kmalloc(ebuf->size, - GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); + if (!error || !rem) + return 0; - if (ebuf->buf == NULL) { - ebuf->size = PAGE_SIZE; - ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL); - } + err = err_print_to_sgl(error); + if (err) + return err; - if (ebuf->buf == NULL) { - ebuf->size = 128; - ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL); - } + sg = READ_ONCE(error->fit); + if (!sg || off < sg->dma_address) + sg = error->sgl; + if (!sg) + return 0; - if (ebuf->buf == NULL) - return -ENOMEM; + pos = sg->dma_address; + count = 0; + do { + size_t len, start; - ebuf->start = pos; + if (sg_is_chain(sg)) { + sg = sg_chain_ptr(sg); + GEM_BUG_ON(sg_is_chain(sg)); + } - return 0; + len = sg->length; + if (pos + len <= off) { + pos += len; + continue; + } + + start = sg->offset; + if (pos < off) { + GEM_BUG_ON(off - pos > len); + len -= off - pos; + start += off - pos; + pos = off; + } + + len = min(len, rem); + GEM_BUG_ON(!len || len > sg->length); + + memcpy(buf, page_address(sg_page(sg)) + start, len); + + count += len; + pos += len; + + buf += len; + rem -= len; + if (!rem) { + WRITE_ONCE(error->fit, sg); + break; + } + } while (!sg_is_last(sg++)); + + return count; } static void i915_error_object_free(struct drm_i915_error_object *obj) @@ -941,6 +1020,7 @@ void __i915_gpu_state_free(struct kref *error_ref) cleanup_params(error); cleanup_uc_state(error); + err_free_sgl(error->sgl); kfree(error); } @@ -999,7 +1079,6 @@ i915_error_object_create(struct drm_i915_private *i915, } compress_fini(&compress, dst); - ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); return dst; } @@ -1268,7 +1347,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error, ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error, engine); - if (USES_PPGTT(dev_priv)) { + if (HAS_PPGTT(dev_priv)) { int i; ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); @@ -1492,7 +1571,7 @@ static void gem_record_rings(struct i915_gpu_state *error) if (HAS_BROKEN_CS_TLB(i915)) ee->wa_batchbuffer = i915_error_object_create(i915, - engine->scratch); + i915->gt.scratch); request_record_user_bo(request, ee); ee->ctx = @@ -1785,6 +1864,14 @@ static unsigned long capture_find_epoch(const struct i915_gpu_state *error) return epoch; } +static void capture_finish(struct i915_gpu_state *error) +{ + struct i915_ggtt *ggtt = &error->i915->ggtt; + const u64 slot = ggtt->error_capture.start; + + ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); +} + static int capture(void *data) { struct i915_gpu_state *error = data; @@ -1809,6 +1896,7 @@ static int capture(void *data) error->epoch = capture_find_epoch(error); + capture_finish(error); return 0; } @@ -1859,6 +1947,7 @@ void i915_capture_error_state(struct drm_i915_private *i915, error = i915_capture_gpu_state(i915); if (!error) { DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); + i915_disable_error_state(i915, -ENOMEM); return; } @@ -1914,5 +2003,14 @@ void i915_reset_error_state(struct drm_i915_private *i915) i915->gpu_error.first_error = NULL; spin_unlock_irq(&i915->gpu_error.lock); - i915_gpu_state_put(error); + if (!IS_ERR(error)) + i915_gpu_state_put(error); +} + +void i915_disable_error_state(struct drm_i915_private *i915, int err) +{ + spin_lock_irq(&i915->gpu_error.lock); + if (!i915->gpu_error.first_error) + i915->gpu_error.first_error = ERR_PTR(err); + spin_unlock_irq(&i915->gpu_error.lock); } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 8710fb18ed74..ff2652bbb0b0 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -192,6 +192,8 @@ struct i915_gpu_state { } *active_bo[I915_NUM_ENGINES], *pinned_bo; u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count; struct i915_address_space *active_vm[I915_NUM_ENGINES]; + + struct scatterlist *sgl, *fit; }; struct i915_gpu_error { @@ -298,29 +300,20 @@ struct i915_gpu_error { struct drm_i915_error_state_buf { struct drm_i915_private *i915; - unsigned int bytes; - unsigned int size; + struct scatterlist *sgl, *cur, *end; + + char *buf; + size_t bytes; + size_t size; + loff_t iter; + int err; - u8 *buf; - loff_t start; - loff_t pos; }; #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) __printf(2, 3) void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); -int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, - const struct i915_gpu_state *gpu); -int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, - struct drm_i915_private *i915, - size_t count, loff_t pos); - -static inline void -i915_error_state_buf_release(struct drm_i915_error_state_buf *eb) -{ - kfree(eb->buf); -} struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915); void i915_capture_error_state(struct drm_i915_private *dev_priv, @@ -334,6 +327,9 @@ i915_gpu_state_get(struct i915_gpu_state *gpu) return gpu; } +ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error, + char *buf, loff_t offset, size_t count); + void __i915_gpu_state_free(struct kref *kref); static inline void i915_gpu_state_put(struct i915_gpu_state *gpu) { @@ -343,6 +339,7 @@ static inline void i915_gpu_state_put(struct i915_gpu_state *gpu) struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915); void i915_reset_error_state(struct drm_i915_private *i915); +void i915_disable_error_state(struct drm_i915_private *i915, int err); #else @@ -355,13 +352,18 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv, static inline struct i915_gpu_state * i915_first_error_state(struct drm_i915_private *i915) { - return NULL; + return ERR_PTR(-ENODEV); } static inline void i915_reset_error_state(struct drm_i915_private *i915) { } +static inline void i915_disable_error_state(struct drm_i915_private *i915, + int err) +{ +} + #endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */ #endif /* _I915_GPU_ERROR_H_ */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2e242270e270..d447d7d508f4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2887,21 +2887,39 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) return ret; } +static inline u32 gen8_master_intr_disable(void __iomem * const regs) +{ + raw_reg_write(regs, GEN8_MASTER_IRQ, 0); + + /* + * Now with master disabled, get a sample of level indications + * for this interrupt. Indications will be cleared on related acks. + * New indications can and will light up during processing, + * and will generate new interrupt after enabling master. + */ + return raw_reg_read(regs, GEN8_MASTER_IRQ); +} + +static inline void gen8_master_intr_enable(void __iomem * const regs) +{ + raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); +} + static irqreturn_t gen8_irq_handler(int irq, void *arg) { struct drm_i915_private *dev_priv = to_i915(arg); + void __iomem * const regs = dev_priv->regs; u32 master_ctl; u32 gt_iir[4]; if (!intel_irqs_enabled(dev_priv)) return IRQ_NONE; - master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); - master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; - if (!master_ctl) + master_ctl = gen8_master_intr_disable(regs); + if (!master_ctl) { + gen8_master_intr_enable(regs); return IRQ_NONE; - - I915_WRITE_FW(GEN8_MASTER_IRQ, 0); + } /* Find, clear, then process each source of interrupt */ gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); @@ -2913,7 +2931,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) enable_rpm_wakeref_asserts(dev_priv); } - I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); + gen8_master_intr_enable(regs); gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); @@ -3111,6 +3129,24 @@ gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir) intel_opregion_asle_intr(dev_priv); } +static inline u32 gen11_master_intr_disable(void __iomem * const regs) +{ + raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); + + /* + * Now with master disabled, get a sample of level indications + * for this interrupt. Indications will be cleared on related acks. + * New indications can and will light up during processing, + * and will generate new interrupt after enabling master. + */ + return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); +} + +static inline void gen11_master_intr_enable(void __iomem * const regs) +{ + raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); +} + static irqreturn_t gen11_irq_handler(int irq, void *arg) { struct drm_i915_private * const i915 = to_i915(arg); @@ -3121,13 +3157,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) if (!intel_irqs_enabled(i915)) return IRQ_NONE; - master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); - master_ctl &= ~GEN11_MASTER_IRQ; - if (!master_ctl) + master_ctl = gen11_master_intr_disable(regs); + if (!master_ctl) { + gen11_master_intr_enable(regs); return IRQ_NONE; - - /* Disable interrupts. */ - raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); + } /* Find, clear, then process each source of interrupt. */ gen11_gt_irq_handler(i915, master_ctl); @@ -3147,8 +3181,7 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); - /* Acknowledge and enable interrupts. */ - raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl); + gen11_master_intr_enable(regs); gen11_gu_misc_irq_handler(i915, gu_misc_iir); @@ -3598,8 +3631,7 @@ static void gen8_irq_reset(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); int pipe; - I915_WRITE(GEN8_MASTER_IRQ, 0); - POSTING_READ(GEN8_MASTER_IRQ); + gen8_master_intr_disable(dev_priv->regs); gen8_gt_irq_reset(dev_priv); @@ -3641,13 +3673,15 @@ static void gen11_irq_reset(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - I915_WRITE(GEN11_GFX_MSTR_IRQ, 0); - POSTING_READ(GEN11_GFX_MSTR_IRQ); + gen11_master_intr_disable(dev_priv->regs); gen11_gt_irq_reset(dev_priv); I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); + I915_WRITE(EDP_PSR_IMR, 0xffffffff); + I915_WRITE(EDP_PSR_IIR, 0xffffffff); + for_each_pipe(dev_priv, pipe) if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) @@ -4244,8 +4278,7 @@ static int gen8_irq_postinstall(struct drm_device *dev) if (HAS_PCH_SPLIT(dev_priv)) ibx_irq_postinstall(dev); - I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); - POSTING_READ(GEN8_MASTER_IRQ); + gen8_master_intr_enable(dev_priv->regs); return 0; } @@ -4307,8 +4340,7 @@ static int gen11_irq_postinstall(struct drm_device *dev) I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); - I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); - POSTING_READ(GEN11_GFX_MSTR_IRQ); + gen11_master_intr_enable(dev_priv->regs); return 0; } @@ -4834,6 +4866,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev_priv->display_irqs_enabled = false; dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; + /* If we have MST support, we want to avoid doing short HPD IRQ storm + * detection, as short HPD storms will occur as a natural part of + * sideband messaging with MST. + * On older platforms however, IRQ storms can occur with both long and + * short pulses, as seen on some G4x systems. + */ + dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.c b/drivers/gpu/drm/i915/i915_oa_bdw.c index 4abd2e8b5083..4acdb94555b7 100644 --- a/drivers/gpu/drm/i915/i915_oa_bdw.c +++ b/drivers/gpu/drm/i915/i915_oa_bdw.c @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #include <linux/sysfs.h> diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.h b/drivers/gpu/drm/i915/i915_oa_bdw.h index b812d16162ac..0e667f1a8aa1 100644 --- a/drivers/gpu/drm/i915/i915_oa_bdw.h +++ b/drivers/gpu/drm/i915/i915_oa_bdw.h @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #ifndef __I915_OA_BDW_H__ diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.c b/drivers/gpu/drm/i915/i915_oa_bxt.c index cb6f304ec16a..a44195c39923 100644 --- a/drivers/gpu/drm/i915/i915_oa_bxt.c +++ b/drivers/gpu/drm/i915/i915_oa_bxt.c @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #include <linux/sysfs.h> diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.h b/drivers/gpu/drm/i915/i915_oa_bxt.h index 690b963a2383..679e92cf4f1d 100644 --- a/drivers/gpu/drm/i915/i915_oa_bxt.h +++ b/drivers/gpu/drm/i915/i915_oa_bxt.h @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #ifndef __I915_OA_BXT_H__ diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/i915_oa_cflgt2.c index 8641ae30e343..7f60d51b8761 100644 --- a/drivers/gpu/drm/i915/i915_oa_cflgt2.c +++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.c @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #include <linux/sysfs.h> diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/i915_oa_cflgt2.h index 1f3268ef2ea2..4d6025559bbe 100644 --- a/drivers/gpu/drm/i915/i915_oa_cflgt2.h +++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.h @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #ifndef __I915_OA_CFLGT2_H__ diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c index 792facdb6702..a92c38e3a0ce 100644 --- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c +++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #include <linux/sysfs.h> diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/i915_oa_cflgt3.h index c13b5aac01b9..0697f4077402 100644 --- a/drivers/gpu/drm/i915/i915_oa_cflgt3.h +++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.h @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #ifndef __I915_OA_CFLGT3_H__ diff --git a/drivers/gpu/drm/i915/i915_oa_chv.c b/drivers/gpu/drm/i915/i915_oa_chv.c index 556febb2c3c8..71ec889a0114 100644 --- a/drivers/gpu/drm/i915/i915_oa_chv.c +++ b/drivers/gpu/drm/i915/i915_oa_chv.c @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #include <linux/sysfs.h> diff --git a/drivers/gpu/drm/i915/i915_oa_chv.h b/drivers/gpu/drm/i915/i915_oa_chv.h index b9622496979e..0986eae3135f 100644 --- a/drivers/gpu/drm/i915/i915_oa_chv.h +++ b/drivers/gpu/drm/i915/i915_oa_chv.h @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #ifndef __I915_OA_CHV_H__ diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c index ba9140c87cc0..5c23d883d6c9 100644 --- a/drivers/gpu/drm/i915/i915_oa_cnl.c +++ b/drivers/gpu/drm/i915/i915_oa_cnl.c @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #include <linux/sysfs.h> diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.h b/drivers/gpu/drm/i915/i915_oa_cnl.h index fb918b131105..e830a406aff2 100644 --- a/drivers/gpu/drm/i915/i915_oa_cnl.h +++ b/drivers/gpu/drm/i915/i915_oa_cnl.h @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #ifndef __I915_OA_CNL_H__ diff --git a/drivers/gpu/drm/i915/i915_oa_glk.c b/drivers/gpu/drm/i915/i915_oa_glk.c index 971db587957c..4bdda66df7d2 100644 --- a/drivers/gpu/drm/i915/i915_oa_glk.c +++ b/drivers/gpu/drm/i915/i915_oa_glk.c @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #include <linux/sysfs.h> diff --git a/drivers/gpu/drm/i915/i915_oa_glk.h b/drivers/gpu/drm/i915/i915_oa_glk.h index 63bd113f4bc9..06dedf991edb 100644 --- a/drivers/gpu/drm/i915/i915_oa_glk.h +++ b/drivers/gpu/drm/i915/i915_oa_glk.h @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #ifndef __I915_OA_GLK_H__ diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c index 434a9b96d7ab..cc6526fdd2bd 100644 --- a/drivers/gpu/drm/i915/i915_oa_hsw.c +++ b/drivers/gpu/drm/i915/i915_oa_hsw.c @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #include <linux/sysfs.h> diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/i915_oa_hsw.h index 74d03439c157..3d0c870cd0bd 100644 --- a/drivers/gpu/drm/i915/i915_oa_hsw.h +++ b/drivers/gpu/drm/i915/i915_oa_hsw.h @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #ifndef __I915_OA_HSW_H__ diff --git a/drivers/gpu/drm/i915/i915_oa_icl.c b/drivers/gpu/drm/i915/i915_oa_icl.c index a5667926e3de..baa51427a543 100644 --- a/drivers/gpu/drm/i915/i915_oa_icl.c +++ b/drivers/gpu/drm/i915/i915_oa_icl.c @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #include <linux/sysfs.h> diff --git a/drivers/gpu/drm/i915/i915_oa_icl.h b/drivers/gpu/drm/i915/i915_oa_icl.h index ae1c24aafe4f..24eaa97d61ba 100644 --- a/drivers/gpu/drm/i915/i915_oa_icl.h +++ b/drivers/gpu/drm/i915/i915_oa_icl.h @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #ifndef __I915_OA_ICL_H__ diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/i915_oa_kblgt2.c index 2fa98a40bbc8..168e49ab0d4d 100644 --- a/drivers/gpu/drm/i915/i915_oa_kblgt2.c +++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.c @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #include <linux/sysfs.h> diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/i915_oa_kblgt2.h index 25b803546dc1..a55398a904de 100644 --- a/drivers/gpu/drm/i915/i915_oa_kblgt2.h +++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.h @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #ifndef __I915_OA_KBLGT2_H__ diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/i915_oa_kblgt3.c index f3cb6679a1bc..6ffa553c388e 100644 --- a/drivers/gpu/drm/i915/i915_oa_kblgt3.c +++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.c @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #include <linux/sysfs.h> diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/i915_oa_kblgt3.h index d5b5b5c1923e..3ddd3483b7cc 100644 --- a/drivers/gpu/drm/i915/i915_oa_kblgt3.h +++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.h @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #ifndef __I915_OA_KBLGT3_H__ diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/i915_oa_sklgt2.c index bf8b8cd8a50d..7ce6ee851d43 100644 --- a/drivers/gpu/drm/i915/i915_oa_sklgt2.c +++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.c @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #include <linux/sysfs.h> diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/i915_oa_sklgt2.h index fe1aa2c03958..be6256037239 100644 --- a/drivers/gpu/drm/i915/i915_oa_sklgt2.h +++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.h @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #ifndef __I915_OA_SKLGT2_H__ diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/i915_oa_sklgt3.c index ae534c7c8135..086ca2631e1c 100644 --- a/drivers/gpu/drm/i915/i915_oa_sklgt3.c +++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.c @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #include <linux/sysfs.h> diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/i915_oa_sklgt3.h index 06746b2616c8..650beb068e56 100644 --- a/drivers/gpu/drm/i915/i915_oa_sklgt3.h +++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.h @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #ifndef __I915_OA_SKLGT3_H__ diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/i915_oa_sklgt4.c index 817fba2d82df..b291a6eb8a87 100644 --- a/drivers/gpu/drm/i915/i915_oa_sklgt4.c +++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.c @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #include <linux/sysfs.h> diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/i915_oa_sklgt4.h index 944fd525c8b1..8dcf849d131e 100644 --- a/drivers/gpu/drm/i915/i915_oa_sklgt4.h +++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.h @@ -1,29 +1,10 @@ /* - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - * - * - * Copyright (c) 2015 Intel Corporation + * SPDX-License-Identifier: MIT * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * Copyright © 2018 Intel Corporation * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! */ #ifndef __I915_OA_SKLGT4_H__ diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 295e981e4a39..2e0356561839 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -82,10 +82,6 @@ i915_param_named_unsafe(enable_hangcheck, bool, 0644, "WARNING: Disabling this can cause system wide hangs. " "(default: true)"); -i915_param_named_unsafe(enable_ppgtt, int, 0400, - "Override PPGTT usage. " - "(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)"); - i915_param_named_unsafe(enable_psr, int, 0600, "Enable PSR " "(0=disabled, 1=enabled) " @@ -171,8 +167,10 @@ i915_param_named_unsafe(inject_load_failure, uint, 0400, i915_param_named(enable_dpcd_backlight, bool, 0600, "Enable support for DPCD backlight control (default:false)"); +#if IS_ENABLED(CONFIG_DRM_I915_GVT) i915_param_named(enable_gvt, bool, 0400, "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); +#endif static __always_inline void _print_param(struct drm_printer *p, const char *name, @@ -188,7 +186,8 @@ static __always_inline void _print_param(struct drm_printer *p, else if (!__builtin_strcmp(type, "char *")) drm_printf(p, "i915.%s=%s\n", name, *(const char **)x); else - BUILD_BUG(); + WARN_ONCE(1, "no printer defined for param type %s (i915.%s)\n", + type, name); } /** diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 6c4d4a21474b..7e56c516c815 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -41,7 +41,6 @@ struct drm_printer; param(int, vbt_sdvo_panel_type, -1) \ param(int, enable_dc, -1) \ param(int, enable_fbc, -1) \ - param(int, enable_ppgtt, -1) \ param(int, enable_psr, -1) \ param(int, disable_power_well, -1) \ param(int, enable_ips, 1) \ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index d6f7b9fe1d26..6350db5503cd 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -33,19 +33,30 @@ #define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1) #define GEN_DEFAULT_PIPEOFFSETS \ - .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ - PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ - .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ - TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ - .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + [TRANSCODER_B] = PIPE_B_OFFSET, \ + [TRANSCODER_C] = PIPE_C_OFFSET, \ + [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ + [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ + [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \ + } #define GEN_CHV_PIPEOFFSETS \ - .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ - CHV_PIPE_C_OFFSET }, \ - .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ - CHV_TRANSCODER_C_OFFSET, }, \ - .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ - CHV_PALETTE_C_OFFSET } + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + [TRANSCODER_B] = PIPE_B_OFFSET, \ + [TRANSCODER_C] = CHV_PIPE_C_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ + [TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \ + } #define CURSOR_OFFSETS \ .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } @@ -68,8 +79,9 @@ #define GEN2_FEATURES \ GEN(2), \ .num_pipes = 1, \ - .has_overlay = 1, .overlay_needs_physical = 1, \ - .has_gmch_display = 1, \ + .display.has_overlay = 1, \ + .display.overlay_needs_physical = 1, \ + .display.has_gmch_display = 1, \ .hws_needs_physical = 1, \ .unfenced_needs_alignment = 1, \ .ring_mask = RENDER_RING, \ @@ -82,7 +94,8 @@ static const struct intel_device_info intel_i830_info = { GEN2_FEATURES, PLATFORM(INTEL_I830), - .is_mobile = 1, .cursor_needs_physical = 1, + .is_mobile = 1, + .display.cursor_needs_physical = 1, .num_pipes = 2, /* legal, last one wins */ }; @@ -96,8 +109,8 @@ static const struct intel_device_info intel_i85x_info = { PLATFORM(INTEL_I85X), .is_mobile = 1, .num_pipes = 2, /* legal, last one wins */ - .cursor_needs_physical = 1, - .has_fbc = 1, + .display.cursor_needs_physical = 1, + .display.has_fbc = 1, }; static const struct intel_device_info intel_i865g_info = { @@ -108,7 +121,7 @@ static const struct intel_device_info intel_i865g_info = { #define GEN3_FEATURES \ GEN(3), \ .num_pipes = 2, \ - .has_gmch_display = 1, \ + .display.has_gmch_display = 1, \ .ring_mask = RENDER_RING, \ .has_snoop = true, \ .has_coherent_ggtt = true, \ @@ -120,8 +133,9 @@ static const struct intel_device_info intel_i915g_info = { GEN3_FEATURES, PLATFORM(INTEL_I915G), .has_coherent_ggtt = false, - .cursor_needs_physical = 1, - .has_overlay = 1, .overlay_needs_physical = 1, + .display.cursor_needs_physical = 1, + .display.has_overlay = 1, + .display.overlay_needs_physical = 1, .hws_needs_physical = 1, .unfenced_needs_alignment = 1, }; @@ -130,10 +144,11 @@ static const struct intel_device_info intel_i915gm_info = { GEN3_FEATURES, PLATFORM(INTEL_I915GM), .is_mobile = 1, - .cursor_needs_physical = 1, - .has_overlay = 1, .overlay_needs_physical = 1, - .supports_tv = 1, - .has_fbc = 1, + .display.cursor_needs_physical = 1, + .display.has_overlay = 1, + .display.overlay_needs_physical = 1, + .display.supports_tv = 1, + .display.has_fbc = 1, .hws_needs_physical = 1, .unfenced_needs_alignment = 1, }; @@ -141,8 +156,10 @@ static const struct intel_device_info intel_i915gm_info = { static const struct intel_device_info intel_i945g_info = { GEN3_FEATURES, PLATFORM(INTEL_I945G), - .has_hotplug = 1, .cursor_needs_physical = 1, - .has_overlay = 1, .overlay_needs_physical = 1, + .display.has_hotplug = 1, + .display.cursor_needs_physical = 1, + .display.has_overlay = 1, + .display.overlay_needs_physical = 1, .hws_needs_physical = 1, .unfenced_needs_alignment = 1, }; @@ -151,10 +168,12 @@ static const struct intel_device_info intel_i945gm_info = { GEN3_FEATURES, PLATFORM(INTEL_I945GM), .is_mobile = 1, - .has_hotplug = 1, .cursor_needs_physical = 1, - .has_overlay = 1, .overlay_needs_physical = 1, - .supports_tv = 1, - .has_fbc = 1, + .display.has_hotplug = 1, + .display.cursor_needs_physical = 1, + .display.has_overlay = 1, + .display.overlay_needs_physical = 1, + .display.supports_tv = 1, + .display.has_fbc = 1, .hws_needs_physical = 1, .unfenced_needs_alignment = 1, }; @@ -162,23 +181,23 @@ static const struct intel_device_info intel_i945gm_info = { static const struct intel_device_info intel_g33_info = { GEN3_FEATURES, PLATFORM(INTEL_G33), - .has_hotplug = 1, - .has_overlay = 1, + .display.has_hotplug = 1, + .display.has_overlay = 1, }; static const struct intel_device_info intel_pineview_info = { GEN3_FEATURES, PLATFORM(INTEL_PINEVIEW), .is_mobile = 1, - .has_hotplug = 1, - .has_overlay = 1, + .display.has_hotplug = 1, + .display.has_overlay = 1, }; #define GEN4_FEATURES \ GEN(4), \ .num_pipes = 2, \ - .has_hotplug = 1, \ - .has_gmch_display = 1, \ + .display.has_hotplug = 1, \ + .display.has_gmch_display = 1, \ .ring_mask = RENDER_RING, \ .has_snoop = true, \ .has_coherent_ggtt = true, \ @@ -189,7 +208,7 @@ static const struct intel_device_info intel_pineview_info = { static const struct intel_device_info intel_i965g_info = { GEN4_FEATURES, PLATFORM(INTEL_I965G), - .has_overlay = 1, + .display.has_overlay = 1, .hws_needs_physical = 1, .has_snoop = false, }; @@ -197,9 +216,10 @@ static const struct intel_device_info intel_i965g_info = { static const struct intel_device_info intel_i965gm_info = { GEN4_FEATURES, PLATFORM(INTEL_I965GM), - .is_mobile = 1, .has_fbc = 1, - .has_overlay = 1, - .supports_tv = 1, + .is_mobile = 1, + .display.has_fbc = 1, + .display.has_overlay = 1, + .display.supports_tv = 1, .hws_needs_physical = 1, .has_snoop = false, }; @@ -213,15 +233,16 @@ static const struct intel_device_info intel_g45_info = { static const struct intel_device_info intel_gm45_info = { GEN4_FEATURES, PLATFORM(INTEL_GM45), - .is_mobile = 1, .has_fbc = 1, - .supports_tv = 1, + .is_mobile = 1, + .display.has_fbc = 1, + .display.supports_tv = 1, .ring_mask = RENDER_RING | BSD_RING, }; #define GEN5_FEATURES \ GEN(5), \ .num_pipes = 2, \ - .has_hotplug = 1, \ + .display.has_hotplug = 1, \ .ring_mask = RENDER_RING | BSD_RING, \ .has_snoop = true, \ .has_coherent_ggtt = true, \ @@ -239,20 +260,21 @@ static const struct intel_device_info intel_ironlake_d_info = { static const struct intel_device_info intel_ironlake_m_info = { GEN5_FEATURES, PLATFORM(INTEL_IRONLAKE), - .is_mobile = 1, .has_fbc = 1, + .is_mobile = 1, + .display.has_fbc = 1, }; #define GEN6_FEATURES \ GEN(6), \ .num_pipes = 2, \ - .has_hotplug = 1, \ - .has_fbc = 1, \ + .display.has_hotplug = 1, \ + .display.has_fbc = 1, \ .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ .has_coherent_ggtt = true, \ .has_llc = 1, \ .has_rc6 = 1, \ .has_rc6p = 1, \ - .has_aliasing_ppgtt = 1, \ + .ppgtt = INTEL_PPGTT_ALIASING, \ GEN_DEFAULT_PIPEOFFSETS, \ GEN_DEFAULT_PAGE_SIZES, \ CURSOR_OFFSETS @@ -290,15 +312,14 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = { #define GEN7_FEATURES \ GEN(7), \ .num_pipes = 3, \ - .has_hotplug = 1, \ - .has_fbc = 1, \ + .display.has_hotplug = 1, \ + .display.has_fbc = 1, \ .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ .has_coherent_ggtt = true, \ .has_llc = 1, \ .has_rc6 = 1, \ .has_rc6p = 1, \ - .has_aliasing_ppgtt = 1, \ - .has_full_ppgtt = 1, \ + .ppgtt = INTEL_PPGTT_FULL, \ GEN_DEFAULT_PIPEOFFSETS, \ GEN_DEFAULT_PAGE_SIZES, \ IVB_CURSOR_OFFSETS @@ -349,10 +370,9 @@ static const struct intel_device_info intel_valleyview_info = { .num_pipes = 2, .has_runtime_pm = 1, .has_rc6 = 1, - .has_gmch_display = 1, - .has_hotplug = 1, - .has_aliasing_ppgtt = 1, - .has_full_ppgtt = 1, + .display.has_gmch_display = 1, + .display.has_hotplug = 1, + .ppgtt = INTEL_PPGTT_FULL, .has_snoop = true, .has_coherent_ggtt = false, .ring_mask = RENDER_RING | BSD_RING | BLT_RING, @@ -365,10 +385,10 @@ static const struct intel_device_info intel_valleyview_info = { #define G75_FEATURES \ GEN7_FEATURES, \ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ - .has_ddi = 1, \ + .display.has_ddi = 1, \ .has_fpga_dbg = 1, \ - .has_psr = 1, \ - .has_dp_mst = 1, \ + .display.has_psr = 1, \ + .display.has_dp_mst = 1, \ .has_rc6p = 0 /* RC6p removed-by HSW */, \ .has_runtime_pm = 1 @@ -399,7 +419,7 @@ static const struct intel_device_info intel_haswell_gt3_info = { .page_sizes = I915_GTT_PAGE_SIZE_4K | \ I915_GTT_PAGE_SIZE_2M, \ .has_logical_ring_contexts = 1, \ - .has_full_48bit_ppgtt = 1, \ + .ppgtt = INTEL_PPGTT_FULL_4LVL, \ .has_64bit_reloc = 1, \ .has_reset_engine = 1 @@ -435,16 +455,15 @@ static const struct intel_device_info intel_cherryview_info = { PLATFORM(INTEL_CHERRYVIEW), GEN(8), .num_pipes = 3, - .has_hotplug = 1, + .display.has_hotplug = 1, .is_lp = 1, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, .has_64bit_reloc = 1, .has_runtime_pm = 1, .has_rc6 = 1, .has_logical_ring_contexts = 1, - .has_gmch_display = 1, - .has_aliasing_ppgtt = 1, - .has_full_ppgtt = 1, + .display.has_gmch_display = 1, + .ppgtt = INTEL_PPGTT_FULL, .has_reset_engine = 1, .has_snoop = true, .has_coherent_ggtt = false, @@ -465,13 +484,15 @@ static const struct intel_device_info intel_cherryview_info = { GEN(9), \ GEN9_DEFAULT_PAGE_SIZES, \ .has_logical_ring_preemption = 1, \ - .has_csr = 1, \ + .display.has_csr = 1, \ .has_guc = 1, \ - .has_ipc = 1, \ + .display.has_ipc = 1, \ .ddb_size = 896 #define SKL_PLATFORM \ GEN9_FEATURES, \ + /* Display WA #0477 WaDisableIPC: skl */ \ + .display.has_ipc = 0, \ PLATFORM(INTEL_SKYLAKE) static const struct intel_device_info intel_skylake_gt1_info = { @@ -502,29 +523,27 @@ static const struct intel_device_info intel_skylake_gt4_info = { #define GEN9_LP_FEATURES \ GEN(9), \ .is_lp = 1, \ - .has_hotplug = 1, \ + .display.has_hotplug = 1, \ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ .num_pipes = 3, \ .has_64bit_reloc = 1, \ - .has_ddi = 1, \ + .display.has_ddi = 1, \ .has_fpga_dbg = 1, \ - .has_fbc = 1, \ - .has_psr = 1, \ + .display.has_fbc = 1, \ + .display.has_psr = 1, \ .has_runtime_pm = 1, \ .has_pooled_eu = 0, \ - .has_csr = 1, \ + .display.has_csr = 1, \ .has_rc6 = 1, \ - .has_dp_mst = 1, \ + .display.has_dp_mst = 1, \ .has_logical_ring_contexts = 1, \ .has_logical_ring_preemption = 1, \ .has_guc = 1, \ - .has_aliasing_ppgtt = 1, \ - .has_full_ppgtt = 1, \ - .has_full_48bit_ppgtt = 1, \ + .ppgtt = INTEL_PPGTT_FULL_4LVL, \ .has_reset_engine = 1, \ .has_snoop = true, \ .has_coherent_ggtt = false, \ - .has_ipc = 1, \ + .display.has_ipc = 1, \ GEN9_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_PIPEOFFSETS, \ IVB_CURSOR_OFFSETS, \ @@ -598,6 +617,22 @@ static const struct intel_device_info intel_cannonlake_info = { #define GEN11_FEATURES \ GEN10_FEATURES, \ + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + [TRANSCODER_B] = PIPE_B_OFFSET, \ + [TRANSCODER_C] = PIPE_C_OFFSET, \ + [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \ + [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \ + [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ + [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ + [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \ + [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ + [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ + }, \ GEN(11), \ .ddb_size = 2048, \ .has_logical_ring_elsq = 1 @@ -663,7 +698,7 @@ static const struct pci_device_id pciidlist[] = { INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info), INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), - INTEL_AML_GT2_IDS(&intel_kabylake_gt2_info), + INTEL_AML_KBL_GT2_IDS(&intel_kabylake_gt2_info), INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info), INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info), INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info), @@ -671,6 +706,7 @@ static const struct pci_device_id pciidlist[] = { INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info), INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info), INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info), + INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info), INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info), INTEL_CNL_IDS(&intel_cannonlake_info), INTEL_ICL_11_IDS(&intel_icelake_11_info), diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 664b96bb65a3..4529edfdcfc8 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -890,8 +890,8 @@ static int gen8_oa_read(struct i915_perf_stream *stream, DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", dev_priv->perf.oa.period_exponent); - dev_priv->perf.oa.ops.oa_disable(dev_priv); - dev_priv->perf.oa.ops.oa_enable(dev_priv); + dev_priv->perf.oa.ops.oa_disable(stream); + dev_priv->perf.oa.ops.oa_enable(stream); /* * Note: .oa_enable() is expected to re-init the oabuffer and @@ -1114,8 +1114,8 @@ static int gen7_oa_read(struct i915_perf_stream *stream, DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", dev_priv->perf.oa.period_exponent); - dev_priv->perf.oa.ops.oa_disable(dev_priv); - dev_priv->perf.oa.ops.oa_enable(dev_priv); + dev_priv->perf.oa.ops.oa_disable(stream); + dev_priv->perf.oa.ops.oa_enable(stream); oastatus1 = I915_READ(GEN7_OASTATUS1); } @@ -1528,8 +1528,6 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv) goto err_unpin; } - dev_priv->perf.oa.ops.init_oa_buffer(dev_priv); - DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n", i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma), dev_priv->perf.oa.oa_buffer.vaddr); @@ -1563,9 +1561,11 @@ static void config_oa_regs(struct drm_i915_private *dev_priv, } } -static int hsw_enable_metric_set(struct drm_i915_private *dev_priv, - const struct i915_oa_config *oa_config) +static int hsw_enable_metric_set(struct i915_perf_stream *stream) { + struct drm_i915_private *dev_priv = stream->dev_priv; + const struct i915_oa_config *oa_config = stream->oa_config; + /* PRM: * * OA unit is using “crclk” for its functionality. When trunk @@ -1767,9 +1767,10 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, return 0; } -static int gen8_enable_metric_set(struct drm_i915_private *dev_priv, - const struct i915_oa_config *oa_config) +static int gen8_enable_metric_set(struct i915_perf_stream *stream) { + struct drm_i915_private *dev_priv = stream->dev_priv; + const struct i915_oa_config *oa_config = stream->oa_config; int ret; /* @@ -1837,10 +1838,10 @@ static void gen10_disable_metric_set(struct drm_i915_private *dev_priv) I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE); } -static void gen7_oa_enable(struct drm_i915_private *dev_priv) +static void gen7_oa_enable(struct i915_perf_stream *stream) { - struct i915_gem_context *ctx = - dev_priv->perf.oa.exclusive_stream->ctx; + struct drm_i915_private *dev_priv = stream->dev_priv; + struct i915_gem_context *ctx = stream->ctx; u32 ctx_id = dev_priv->perf.oa.specific_ctx_id; bool periodic = dev_priv->perf.oa.periodic; u32 period_exponent = dev_priv->perf.oa.period_exponent; @@ -1867,8 +1868,9 @@ static void gen7_oa_enable(struct drm_i915_private *dev_priv) GEN7_OACONTROL_ENABLE); } -static void gen8_oa_enable(struct drm_i915_private *dev_priv) +static void gen8_oa_enable(struct i915_perf_stream *stream) { + struct drm_i915_private *dev_priv = stream->dev_priv; u32 report_format = dev_priv->perf.oa.oa_buffer.format; /* @@ -1905,7 +1907,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream) { struct drm_i915_private *dev_priv = stream->dev_priv; - dev_priv->perf.oa.ops.oa_enable(dev_priv); + dev_priv->perf.oa.ops.oa_enable(stream); if (dev_priv->perf.oa.periodic) hrtimer_start(&dev_priv->perf.oa.poll_check_timer, @@ -1913,8 +1915,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream) HRTIMER_MODE_REL_PINNED); } -static void gen7_oa_disable(struct drm_i915_private *dev_priv) +static void gen7_oa_disable(struct i915_perf_stream *stream) { + struct drm_i915_private *dev_priv = stream->dev_priv; + I915_WRITE(GEN7_OACONTROL, 0); if (intel_wait_for_register(dev_priv, GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0, @@ -1922,8 +1926,10 @@ static void gen7_oa_disable(struct drm_i915_private *dev_priv) DRM_ERROR("wait for OA to be disabled timed out\n"); } -static void gen8_oa_disable(struct drm_i915_private *dev_priv) +static void gen8_oa_disable(struct i915_perf_stream *stream) { + struct drm_i915_private *dev_priv = stream->dev_priv; + I915_WRITE(GEN8_OACONTROL, 0); if (intel_wait_for_register(dev_priv, GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0, @@ -1943,7 +1949,7 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream) { struct drm_i915_private *dev_priv = stream->dev_priv; - dev_priv->perf.oa.ops.oa_disable(dev_priv); + dev_priv->perf.oa.ops.oa_disable(stream); if (dev_priv->perf.oa.periodic) hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer); @@ -1998,7 +2004,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, return -EINVAL; } - if (!dev_priv->perf.oa.ops.init_oa_buffer) { + if (!dev_priv->perf.oa.ops.enable_metric_set) { DRM_DEBUG("OA unit not supported\n"); return -ENODEV; } @@ -2092,8 +2098,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, if (ret) goto err_lock; - ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv, - stream->oa_config); + ret = dev_priv->perf.oa.ops.enable_metric_set(stream); if (ret) { DRM_DEBUG("Unable to enable metric set\n"); goto err_enable; @@ -3387,7 +3392,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv) dev_priv->perf.oa.ops.is_valid_mux_reg = hsw_is_valid_mux_addr; dev_priv->perf.oa.ops.is_valid_flex_reg = NULL; - dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer; dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set; dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set; dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable; @@ -3406,7 +3410,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv) */ dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats; - dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer; dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable; dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable; dev_priv->perf.oa.ops.read = gen8_oa_read; diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index 3f502eef2431..6fc4b8eeab42 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -27,8 +27,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv, slice_length = sizeof(sseu->slice_mask); subslice_length = sseu->max_slices * - DIV_ROUND_UP(sseu->max_subslices, - sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE); + DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE); eu_length = sseu->max_slices * sseu->max_subslices * DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7c491ea3d052..0a7d60509ca7 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -157,20 +157,37 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) /* * Named helper wrappers around _PICK_EVEN() and _PICK(). */ -#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b) -#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b)) -#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b) -#define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b) -#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b) -#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b)) -#define _PORT(port, a, b) _PICK_EVEN(port, a, b) -#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b)) -#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) -#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) -#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b) -#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b)) -#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__) -#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) +#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b) +#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b) +#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b) +#define _PORT(port, a, b) _PICK_EVEN(port, a, b) +#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b) + +#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b)) +#define _MMIO_PLANE(plane, a, b) _MMIO(_PLANE(plane, a, b)) +#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b)) +#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b)) +#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b)) + +#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__) + +#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) +#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) +#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) + +/* + * Device info offset array based helpers for groups of registers with unevenly + * spaced base offsets. + */ +#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \ + dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \ + dev_priv->info.display_mmio_offset) +#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \ + dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \ + dev_priv->info.display_mmio_offset) +#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \ + dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \ + dev_priv->info.display_mmio_offset) #define __MASKED_FIELD(mask, value) ((mask) << 16 | (value)) #define _MASKED_FIELD(mask, value) ({ \ @@ -1631,35 +1648,6 @@ enum i915_power_well_id { #define PHY_RESERVED (1 << 7) #define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC) -#define CNL_PORT_CL1CM_DW5 _MMIO(0x162014) -#define CL_POWER_DOWN_ENABLE (1 << 4) -#define SUS_CLOCK_CONFIG (3 << 0) - -#define _ICL_PORT_CL_DW5_A 0x162014 -#define _ICL_PORT_CL_DW5_B 0x6C014 -#define ICL_PORT_CL_DW5(port) _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \ - _ICL_PORT_CL_DW5_B) - -#define _CNL_PORT_CL_DW10_A 0x162028 -#define _ICL_PORT_CL_DW10_B 0x6c028 -#define ICL_PORT_CL_DW10(port) _MMIO_PORT(port, \ - _CNL_PORT_CL_DW10_A, \ - _ICL_PORT_CL_DW10_B) -#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25) -#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25 -#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24) -#define PWR_UP_ALL_LANES (0x0 << 4) -#define PWR_DOWN_LN_3_2_1 (0xe << 4) -#define PWR_DOWN_LN_3_2 (0xc << 4) -#define PWR_DOWN_LN_3 (0x8 << 4) -#define PWR_DOWN_LN_2_1_0 (0x7 << 4) -#define PWR_DOWN_LN_1_0 (0x3 << 4) -#define PWR_DOWN_LN_1 (0x2 << 4) -#define PWR_DOWN_LN_3_1 (0xa << 4) -#define PWR_DOWN_LN_3_1_0 (0xb << 4) -#define PWR_DOWN_LN_MASK (0xf << 4) -#define PWR_DOWN_LN_SHIFT 4 - #define _PORT_CL1CM_DW9_A 0x162024 #define _PORT_CL1CM_DW9_BC 0x6C024 #define IREF0RC_OFFSET_SHIFT 8 @@ -1672,13 +1660,6 @@ enum i915_power_well_id { #define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT) #define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC) -#define _ICL_PORT_CL_DW12_A 0x162030 -#define _ICL_PORT_CL_DW12_B 0x6C030 -#define ICL_LANE_ENABLE_AUX (1 << 0) -#define ICL_PORT_CL_DW12(port) _MMIO_PORT((port), \ - _ICL_PORT_CL_DW12_A, \ - _ICL_PORT_CL_DW12_B) - #define _PORT_CL1CM_DW28_A 0x162070 #define _PORT_CL1CM_DW28_BC 0x6C070 #define OCL1_POWER_DOWN_EN (1 << 23) @@ -1691,6 +1672,74 @@ enum i915_power_well_id { #define OCL2_LDOFUSE_PWR_DIS (1 << 6) #define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC) +/* + * CNL/ICL Port/COMBO-PHY Registers + */ +#define _ICL_COMBOPHY_A 0x162000 +#define _ICL_COMBOPHY_B 0x6C000 +#define _ICL_COMBOPHY(port) _PICK(port, _ICL_COMBOPHY_A, \ + _ICL_COMBOPHY_B) + +/* CNL/ICL Port CL_DW registers */ +#define _ICL_PORT_CL_DW(dw, port) (_ICL_COMBOPHY(port) + \ + 4 * (dw)) + +#define CNL_PORT_CL1CM_DW5 _MMIO(0x162014) +#define ICL_PORT_CL_DW5(port) _MMIO(_ICL_PORT_CL_DW(5, port)) +#define CL_POWER_DOWN_ENABLE (1 << 4) +#define SUS_CLOCK_CONFIG (3 << 0) + +#define ICL_PORT_CL_DW10(port) _MMIO(_ICL_PORT_CL_DW(10, port)) +#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25) +#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25 +#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24) +#define PWR_UP_ALL_LANES (0x0 << 4) +#define PWR_DOWN_LN_3_2_1 (0xe << 4) +#define PWR_DOWN_LN_3_2 (0xc << 4) +#define PWR_DOWN_LN_3 (0x8 << 4) +#define PWR_DOWN_LN_2_1_0 (0x7 << 4) +#define PWR_DOWN_LN_1_0 (0x3 << 4) +#define PWR_DOWN_LN_1 (0x2 << 4) +#define PWR_DOWN_LN_3_1 (0xa << 4) +#define PWR_DOWN_LN_3_1_0 (0xb << 4) +#define PWR_DOWN_LN_MASK (0xf << 4) +#define PWR_DOWN_LN_SHIFT 4 + +#define ICL_PORT_CL_DW12(port) _MMIO(_ICL_PORT_CL_DW(12, port)) +#define ICL_LANE_ENABLE_AUX (1 << 0) + +/* CNL/ICL Port COMP_DW registers */ +#define _ICL_PORT_COMP 0x100 +#define _ICL_PORT_COMP_DW(dw, port) (_ICL_COMBOPHY(port) + \ + _ICL_PORT_COMP + 4 * (dw)) + +#define CNL_PORT_COMP_DW0 _MMIO(0x162100) +#define ICL_PORT_COMP_DW0(port) _MMIO(_ICL_PORT_COMP_DW(0, port)) +#define COMP_INIT (1 << 31) + +#define CNL_PORT_COMP_DW1 _MMIO(0x162104) +#define ICL_PORT_COMP_DW1(port) _MMIO(_ICL_PORT_COMP_DW(1, port)) + +#define CNL_PORT_COMP_DW3 _MMIO(0x16210c) +#define ICL_PORT_COMP_DW3(port) _MMIO(_ICL_PORT_COMP_DW(3, port)) +#define PROCESS_INFO_DOT_0 (0 << 26) +#define PROCESS_INFO_DOT_1 (1 << 26) +#define PROCESS_INFO_DOT_4 (2 << 26) +#define PROCESS_INFO_MASK (7 << 26) +#define PROCESS_INFO_SHIFT 26 +#define VOLTAGE_INFO_0_85V (0 << 24) +#define VOLTAGE_INFO_0_95V (1 << 24) +#define VOLTAGE_INFO_1_05V (2 << 24) +#define VOLTAGE_INFO_MASK (3 << 24) +#define VOLTAGE_INFO_SHIFT 24 + +#define CNL_PORT_COMP_DW9 _MMIO(0x162124) +#define ICL_PORT_COMP_DW9(port) _MMIO(_ICL_PORT_COMP_DW(9, port)) + +#define CNL_PORT_COMP_DW10 _MMIO(0x162128) +#define ICL_PORT_COMP_DW10(port) _MMIO(_ICL_PORT_COMP_DW(10, port)) + +/* CNL/ICL Port PCS registers */ #define _CNL_PORT_PCS_DW1_GRP_AE 0x162304 #define _CNL_PORT_PCS_DW1_GRP_B 0x162384 #define _CNL_PORT_PCS_DW1_GRP_C 0x162B04 @@ -1708,7 +1757,6 @@ enum i915_power_well_id { _CNL_PORT_PCS_DW1_GRP_D, \ _CNL_PORT_PCS_DW1_GRP_AE, \ _CNL_PORT_PCS_DW1_GRP_F)) - #define CNL_PORT_PCS_DW1_LN0(port) _MMIO(_PICK(port, \ _CNL_PORT_PCS_DW1_LN0_AE, \ _CNL_PORT_PCS_DW1_LN0_B, \ @@ -1717,24 +1765,21 @@ enum i915_power_well_id { _CNL_PORT_PCS_DW1_LN0_AE, \ _CNL_PORT_PCS_DW1_LN0_F)) -#define _ICL_PORT_PCS_DW1_GRP_A 0x162604 -#define _ICL_PORT_PCS_DW1_GRP_B 0x6C604 -#define _ICL_PORT_PCS_DW1_LN0_A 0x162804 -#define _ICL_PORT_PCS_DW1_LN0_B 0x6C804 -#define _ICL_PORT_PCS_DW1_AUX_A 0x162304 -#define _ICL_PORT_PCS_DW1_AUX_B 0x6c304 -#define ICL_PORT_PCS_DW1_GRP(port) _MMIO_PORT(port,\ - _ICL_PORT_PCS_DW1_GRP_A, \ - _ICL_PORT_PCS_DW1_GRP_B) -#define ICL_PORT_PCS_DW1_LN0(port) _MMIO_PORT(port, \ - _ICL_PORT_PCS_DW1_LN0_A, \ - _ICL_PORT_PCS_DW1_LN0_B) -#define ICL_PORT_PCS_DW1_AUX(port) _MMIO_PORT(port, \ - _ICL_PORT_PCS_DW1_AUX_A, \ - _ICL_PORT_PCS_DW1_AUX_B) +#define _ICL_PORT_PCS_AUX 0x300 +#define _ICL_PORT_PCS_GRP 0x600 +#define _ICL_PORT_PCS_LN(ln) (0x800 + (ln) * 0x100) +#define _ICL_PORT_PCS_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \ + _ICL_PORT_PCS_AUX + 4 * (dw)) +#define _ICL_PORT_PCS_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \ + _ICL_PORT_PCS_GRP + 4 * (dw)) +#define _ICL_PORT_PCS_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \ + _ICL_PORT_PCS_LN(ln) + 4 * (dw)) +#define ICL_PORT_PCS_DW1_AUX(port) _MMIO(_ICL_PORT_PCS_DW_AUX(1, port)) +#define ICL_PORT_PCS_DW1_GRP(port) _MMIO(_ICL_PORT_PCS_DW_GRP(1, port)) +#define ICL_PORT_PCS_DW1_LN0(port) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, port)) #define COMMON_KEEPER_EN (1 << 26) -/* CNL Port TX registers */ +/* CNL/ICL Port TX registers */ #define _CNL_PORT_TX_AE_GRP_OFFSET 0x162340 #define _CNL_PORT_TX_B_GRP_OFFSET 0x1623C0 #define _CNL_PORT_TX_C_GRP_OFFSET 0x162B40 @@ -1762,23 +1807,22 @@ enum i915_power_well_id { _CNL_PORT_TX_F_LN0_OFFSET) + \ 4 * (dw)) -#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 2)) -#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 2)) -#define _ICL_PORT_TX_DW2_GRP_A 0x162688 -#define _ICL_PORT_TX_DW2_GRP_B 0x6C688 -#define _ICL_PORT_TX_DW2_LN0_A 0x162888 -#define _ICL_PORT_TX_DW2_LN0_B 0x6C888 -#define _ICL_PORT_TX_DW2_AUX_A 0x162388 -#define _ICL_PORT_TX_DW2_AUX_B 0x6c388 -#define ICL_PORT_TX_DW2_GRP(port) _MMIO_PORT(port, \ - _ICL_PORT_TX_DW2_GRP_A, \ - _ICL_PORT_TX_DW2_GRP_B) -#define ICL_PORT_TX_DW2_LN0(port) _MMIO_PORT(port, \ - _ICL_PORT_TX_DW2_LN0_A, \ - _ICL_PORT_TX_DW2_LN0_B) -#define ICL_PORT_TX_DW2_AUX(port) _MMIO_PORT(port, \ - _ICL_PORT_TX_DW2_AUX_A, \ - _ICL_PORT_TX_DW2_AUX_B) +#define _ICL_PORT_TX_AUX 0x380 +#define _ICL_PORT_TX_GRP 0x680 +#define _ICL_PORT_TX_LN(ln) (0x880 + (ln) * 0x100) + +#define _ICL_PORT_TX_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \ + _ICL_PORT_TX_AUX + 4 * (dw)) +#define _ICL_PORT_TX_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \ + _ICL_PORT_TX_GRP + 4 * (dw)) +#define _ICL_PORT_TX_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \ + _ICL_PORT_TX_LN(ln) + 4 * (dw)) + +#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(2, port)) +#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(2, port)) +#define ICL_PORT_TX_DW2_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(2, port)) +#define ICL_PORT_TX_DW2_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(2, port)) +#define ICL_PORT_TX_DW2_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, port)) #define SWING_SEL_UPPER(x) (((x) >> 3) << 15) #define SWING_SEL_UPPER_MASK (1 << 15) #define SWING_SEL_LOWER(x) (((x) & 0x7) << 11) @@ -1795,24 +1839,10 @@ enum i915_power_well_id { #define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \ ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ _CNL_PORT_TX_DW4_LN0_AE))) -#define _ICL_PORT_TX_DW4_GRP_A 0x162690 -#define _ICL_PORT_TX_DW4_GRP_B 0x6C690 -#define _ICL_PORT_TX_DW4_LN0_A 0x162890 -#define _ICL_PORT_TX_DW4_LN1_A 0x162990 -#define _ICL_PORT_TX_DW4_LN0_B 0x6C890 -#define _ICL_PORT_TX_DW4_AUX_A 0x162390 -#define _ICL_PORT_TX_DW4_AUX_B 0x6c390 -#define ICL_PORT_TX_DW4_GRP(port) _MMIO_PORT(port, \ - _ICL_PORT_TX_DW4_GRP_A, \ - _ICL_PORT_TX_DW4_GRP_B) -#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_PORT(port, \ - _ICL_PORT_TX_DW4_LN0_A, \ - _ICL_PORT_TX_DW4_LN0_B) + \ - ((ln) * (_ICL_PORT_TX_DW4_LN1_A - \ - _ICL_PORT_TX_DW4_LN0_A))) -#define ICL_PORT_TX_DW4_AUX(port) _MMIO_PORT(port, \ - _ICL_PORT_TX_DW4_AUX_A, \ - _ICL_PORT_TX_DW4_AUX_B) +#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) +#define ICL_PORT_TX_DW4_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(4, port)) +#define ICL_PORT_TX_DW4_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, port)) +#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port)) #define LOADGEN_SELECT (1 << 31) #define POST_CURSOR_1(x) ((x) << 12) #define POST_CURSOR_1_MASK (0x3F << 12) @@ -1821,23 +1851,11 @@ enum i915_power_well_id { #define CURSOR_COEFF(x) ((x) << 0) #define CURSOR_COEFF_MASK (0x3F << 0) -#define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 5)) -#define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 5)) -#define _ICL_PORT_TX_DW5_GRP_A 0x162694 -#define _ICL_PORT_TX_DW5_GRP_B 0x6C694 -#define _ICL_PORT_TX_DW5_LN0_A 0x162894 -#define _ICL_PORT_TX_DW5_LN0_B 0x6C894 -#define _ICL_PORT_TX_DW5_AUX_A 0x162394 -#define _ICL_PORT_TX_DW5_AUX_B 0x6c394 -#define ICL_PORT_TX_DW5_GRP(port) _MMIO_PORT(port, \ - _ICL_PORT_TX_DW5_GRP_A, \ - _ICL_PORT_TX_DW5_GRP_B) -#define ICL_PORT_TX_DW5_LN0(port) _MMIO_PORT(port, \ - _ICL_PORT_TX_DW5_LN0_A, \ - _ICL_PORT_TX_DW5_LN0_B) -#define ICL_PORT_TX_DW5_AUX(port) _MMIO_PORT(port, \ - _ICL_PORT_TX_DW5_AUX_A, \ - _ICL_PORT_TX_DW5_AUX_B) +#define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(5, port)) +#define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(5, port)) +#define ICL_PORT_TX_DW5_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(5, port)) +#define ICL_PORT_TX_DW5_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(5, port)) +#define ICL_PORT_TX_DW5_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, port)) #define TX_TRAINING_EN (1 << 31) #define TAP2_DISABLE (1 << 30) #define TAP3_DISABLE (1 << 29) @@ -2054,49 +2072,16 @@ enum i915_power_well_id { #define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC) #define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28) -#define CNL_PORT_COMP_DW0 _MMIO(0x162100) -#define COMP_INIT (1 << 31) -#define CNL_PORT_COMP_DW1 _MMIO(0x162104) -#define CNL_PORT_COMP_DW3 _MMIO(0x16210c) -#define PROCESS_INFO_DOT_0 (0 << 26) -#define PROCESS_INFO_DOT_1 (1 << 26) -#define PROCESS_INFO_DOT_4 (2 << 26) -#define PROCESS_INFO_MASK (7 << 26) -#define PROCESS_INFO_SHIFT 26 -#define VOLTAGE_INFO_0_85V (0 << 24) -#define VOLTAGE_INFO_0_95V (1 << 24) -#define VOLTAGE_INFO_1_05V (2 << 24) -#define VOLTAGE_INFO_MASK (3 << 24) -#define VOLTAGE_INFO_SHIFT 24 -#define CNL_PORT_COMP_DW9 _MMIO(0x162124) -#define CNL_PORT_COMP_DW10 _MMIO(0x162128) - -#define _ICL_PORT_COMP_DW0_A 0x162100 -#define _ICL_PORT_COMP_DW0_B 0x6C100 -#define ICL_PORT_COMP_DW0(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW0_A, \ - _ICL_PORT_COMP_DW0_B) -#define _ICL_PORT_COMP_DW1_A 0x162104 -#define _ICL_PORT_COMP_DW1_B 0x6C104 -#define ICL_PORT_COMP_DW1(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW1_A, \ - _ICL_PORT_COMP_DW1_B) -#define _ICL_PORT_COMP_DW3_A 0x16210C -#define _ICL_PORT_COMP_DW3_B 0x6C10C -#define ICL_PORT_COMP_DW3(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW3_A, \ - _ICL_PORT_COMP_DW3_B) -#define _ICL_PORT_COMP_DW9_A 0x162124 -#define _ICL_PORT_COMP_DW9_B 0x6C124 -#define ICL_PORT_COMP_DW9(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW9_A, \ - _ICL_PORT_COMP_DW9_B) -#define _ICL_PORT_COMP_DW10_A 0x162128 -#define _ICL_PORT_COMP_DW10_B 0x6C128 -#define ICL_PORT_COMP_DW10(port) _MMIO_PORT(port, \ - _ICL_PORT_COMP_DW10_A, \ - _ICL_PORT_COMP_DW10_B) +#define FIA1_BASE 0x163000 /* ICL PHY DFLEX registers */ -#define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0) -#define DFLEXDPMLE1_DPMLETC_MASK(n) (0xf << (4 * (n))) -#define DFLEXDPMLE1_DPMLETC(n, x) ((x) << (4 * (n))) +#define PORT_TX_DFLEXDPMLE1 _MMIO(FIA1_BASE + 0x008C0) +#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML3(tc_port) (8 << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) (12 << (4 * (tc_port))) +#define DFLEXDPMLE1_DPMLETC_ML3_0(tc_port) (15 << (4 * (tc_port))) /* BXT PHY Ref registers */ #define _PORT_REF_DW3_A 0x16218C @@ -2413,6 +2398,7 @@ enum i915_power_well_id { #define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080) #define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF +#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7) #define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) #define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31) @@ -2573,6 +2559,7 @@ enum i915_power_well_id { /* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */ #define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4) #define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2) +#define GEN11_ENABLE_32_PLANE_MODE (1 << 7) /* WaClearTdlStateAckDirtyBits */ #define GEN8_STATE_ACK _MMIO(0x20F0) @@ -3475,11 +3462,13 @@ enum i915_power_well_id { /* * Palette regs */ -#define PALETTE_A_OFFSET 0xa000 -#define PALETTE_B_OFFSET 0xa800 -#define CHV_PALETTE_C_OFFSET 0xc000 -#define PALETTE(pipe, i) _MMIO(dev_priv->info.palette_offsets[pipe] + \ - dev_priv->info.display_mmio_offset + (i) * 4) +#define _PALETTE_A 0xa000 +#define _PALETTE_B 0xa800 +#define _CHV_PALETTE_C 0xc000 +#define PALETTE(pipe, i) _MMIO(dev_priv->info.display_mmio_offset + \ + _PICK((pipe), _PALETTE_A, \ + _PALETTE_B, _CHV_PALETTE_C) + \ + (i) * 4) /* MCH MMIO space */ @@ -4061,15 +4050,27 @@ enum { #define _VSYNCSHIFT_B 0x61028 #define _PIPE_MULT_B 0x6102c +/* DSI 0 timing regs */ +#define _HTOTAL_DSI0 0x6b000 +#define _HSYNC_DSI0 0x6b008 +#define _VTOTAL_DSI0 0x6b00c +#define _VSYNC_DSI0 0x6b014 +#define _VSYNCSHIFT_DSI0 0x6b028 + +/* DSI 1 timing regs */ +#define _HTOTAL_DSI1 0x6b800 +#define _HSYNC_DSI1 0x6b808 +#define _VTOTAL_DSI1 0x6b80c +#define _VSYNC_DSI1 0x6b814 +#define _VSYNCSHIFT_DSI1 0x6b828 + #define TRANSCODER_A_OFFSET 0x60000 #define TRANSCODER_B_OFFSET 0x61000 #define TRANSCODER_C_OFFSET 0x62000 #define CHV_TRANSCODER_C_OFFSET 0x63000 #define TRANSCODER_EDP_OFFSET 0x6f000 - -#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \ - dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \ - dev_priv->info.display_mmio_offset) +#define TRANSCODER_DSI0_OFFSET 0x6b000 +#define TRANSCODER_DSI1_OFFSET 0x6b800 #define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A) #define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A) @@ -4149,9 +4150,13 @@ enum { /* Bspec claims those aren't shifted but stay at 0x64800 */ #define EDP_PSR_IMR _MMIO(0x64834) #define EDP_PSR_IIR _MMIO(0x64838) -#define EDP_PSR_ERROR(trans) (1 << (((trans) * 8 + 10) & 31)) -#define EDP_PSR_POST_EXIT(trans) (1 << (((trans) * 8 + 9) & 31)) -#define EDP_PSR_PRE_ENTRY(trans) (1 << (((trans) * 8 + 8) & 31)) +#define EDP_PSR_ERROR(shift) (1 << ((shift) + 2)) +#define EDP_PSR_POST_EXIT(shift) (1 << ((shift) + 1)) +#define EDP_PSR_PRE_ENTRY(shift) (1 << (shift)) +#define EDP_PSR_TRANSCODER_C_SHIFT 24 +#define EDP_PSR_TRANSCODER_B_SHIFT 16 +#define EDP_PSR_TRANSCODER_A_SHIFT 8 +#define EDP_PSR_TRANSCODER_EDP_SHIFT 0 #define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10) #define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26) @@ -4195,7 +4200,7 @@ enum { #define EDP_PSR_DEBUG_MASK_LPSP (1 << 27) #define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26) #define EDP_PSR_DEBUG_MASK_HPD (1 << 25) -#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) +#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */ #define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */ #define EDP_PSR2_CTL _MMIO(0x6f900) @@ -4232,7 +4237,7 @@ enum { #define PSR_EVENT_FRONT_BUFFER_MODIFY (1 << 9) #define PSR_EVENT_WD_TIMER_EXPIRE (1 << 8) #define PSR_EVENT_PIPE_REGISTERS_UPDATE (1 << 6) -#define PSR_EVENT_REGISTER_UPDATE (1 << 5) +#define PSR_EVENT_REGISTER_UPDATE (1 << 5) /* Reserved in ICL+ */ #define PSR_EVENT_HDCP_ENABLE (1 << 4) #define PSR_EVENT_KVMR_SESSION_ENABLE (1 << 3) #define PSR_EVENT_VBI_ENABLE (1 << 2) @@ -4565,6 +4570,7 @@ enum { * of the infoframe structure specified by CEA-861. */ #define VIDEO_DIP_DATA_SIZE 32 #define VIDEO_DIP_VSC_DATA_SIZE 36 +#define VIDEO_DIP_PPS_DATA_SIZE 132 #define VIDEO_DIP_CTL _MMIO(0x61170) /* Pre HSW: */ #define VIDEO_DIP_ENABLE (1 << 31) @@ -4584,6 +4590,15 @@ enum { #define VIDEO_DIP_FREQ_2VSYNC (2 << 16) #define VIDEO_DIP_FREQ_MASK (3 << 16) /* HSW and later: */ +#define DRM_DIP_ENABLE (1 << 28) +#define PSR_VSC_BIT_7_SET (1 << 27) +#define VSC_SELECT_MASK (0x3 << 25) +#define VSC_SELECT_SHIFT 25 +#define VSC_DIP_HW_HEA_DATA (0 << 25) +#define VSC_DIP_HW_HEA_SW_DATA (1 << 25) +#define VSC_DIP_HW_DATA_SW_HEA (2 << 25) +#define VSC_DIP_SW_HEA_DATA (3 << 25) +#define VDIP_ENABLE_PPS (1 << 24) #define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20) #define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16) #define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12) @@ -4591,16 +4606,6 @@ enum { #define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4) #define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0) -#define DRM_DIP_ENABLE (1 << 28) -#define PSR_VSC_BIT_7_SET (1 << 27) -#define VSC_SELECT_MASK (0x3 << 26) -#define VSC_SELECT_SHIFT 26 -#define VSC_DIP_HW_HEA_DATA (0 << 26) -#define VSC_DIP_HW_HEA_SW_DATA (1 << 26) -#define VSC_DIP_HW_DATA_SW_HEA (2 << 26) -#define VSC_DIP_SW_HEA_DATA (3 << 26) -#define VDIP_ENABLE_PPS (1 << 24) - /* Panel power sequencing */ #define PPS_BASE 0x61200 #define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE) @@ -4613,6 +4618,17 @@ enum { #define _PP_STATUS 0x61200 #define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS) #define PP_ON (1 << 31) + +#define _PP_CONTROL_1 0xc7204 +#define _PP_CONTROL_2 0xc7304 +#define ICP_PP_CONTROL(x) _MMIO(((x) == 1) ? _PP_CONTROL_1 : \ + _PP_CONTROL_2) +#define POWER_CYCLE_DELAY_MASK (0x1f << 4) +#define POWER_CYCLE_DELAY_SHIFT 4 +#define VDD_OVERRIDE_FORCE (1 << 3) +#define BACKLIGHT_ENABLE (1 << 2) +#define PWR_DOWN_ON_RESET (1 << 1) +#define PWR_STATE_TARGET (1 << 0) /* * Indicates that all dependencies of the panel are on: * @@ -5636,9 +5652,9 @@ enum { */ #define PIPE_EDP_OFFSET 0x7f000 -#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \ - dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \ - dev_priv->info.display_mmio_offset) +/* ICL DSI 0 and 1 */ +#define PIPE_DSI0_OFFSET 0x7b000 +#define PIPE_DSI1_OFFSET 0x7b800 #define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF) #define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL) @@ -6087,10 +6103,6 @@ enum { #define _CURBBASE_IVB 0x71084 #define _CURBPOS_IVB 0x71088 -#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \ - dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \ - dev_priv->info.display_mmio_offset) - #define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR) #define CURBASE(pipe) _CURSOR2(pipe, _CURABASE) #define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS) @@ -6224,6 +6236,10 @@ enum { #define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4) #define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC) +/* ICL DSI 0 and 1 */ +#define _PIPEDSI0CONF 0x7b008 +#define _PIPEDSI1CONF 0x7b808 + /* Sprite A control */ #define _DVSACNTR 0x72180 #define DVS_ENABLE (1 << 31) @@ -6511,6 +6527,7 @@ enum { #define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21) #define PLANE_CTL_ORDER_BGRX (0 << 20) #define PLANE_CTL_ORDER_RGBX (1 << 20) +#define PLANE_CTL_YUV420_Y_PLANE (1 << 19) #define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) #define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16) #define PLANE_CTL_YUV422_YUYV (0 << 16) @@ -6554,17 +6571,33 @@ enum { #define _PLANE_KEYVAL_2_A 0x70294 #define _PLANE_KEYMSK_1_A 0x70198 #define _PLANE_KEYMSK_2_A 0x70298 +#define PLANE_KEYMSK_ALPHA_ENABLE (1 << 31) #define _PLANE_KEYMAX_1_A 0x701a0 #define _PLANE_KEYMAX_2_A 0x702a0 +#define PLANE_KEYMAX_ALPHA(a) ((a) << 24) #define _PLANE_AUX_DIST_1_A 0x701c0 #define _PLANE_AUX_DIST_2_A 0x702c0 #define _PLANE_AUX_OFFSET_1_A 0x701c4 #define _PLANE_AUX_OFFSET_2_A 0x702c4 +#define _PLANE_CUS_CTL_1_A 0x701c8 +#define _PLANE_CUS_CTL_2_A 0x702c8 +#define PLANE_CUS_ENABLE (1 << 31) +#define PLANE_CUS_PLANE_6 (0 << 30) +#define PLANE_CUS_PLANE_7 (1 << 30) +#define PLANE_CUS_HPHASE_SIGN_NEGATIVE (1 << 19) +#define PLANE_CUS_HPHASE_0 (0 << 16) +#define PLANE_CUS_HPHASE_0_25 (1 << 16) +#define PLANE_CUS_HPHASE_0_5 (2 << 16) +#define PLANE_CUS_VPHASE_SIGN_NEGATIVE (1 << 15) +#define PLANE_CUS_VPHASE_0 (0 << 12) +#define PLANE_CUS_VPHASE_0_25 (1 << 12) +#define PLANE_CUS_VPHASE_0_5 (2 << 12) #define _PLANE_COLOR_CTL_1_A 0x701CC /* GLK+ */ #define _PLANE_COLOR_CTL_2_A 0x702CC /* GLK+ */ #define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */ #define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-ICL */ #define PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE (1 << 28) +#define PLANE_COLOR_INPUT_CSC_ENABLE (1 << 20) /* ICL+ */ #define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23) /* Pre-ICL */ #define PLANE_COLOR_CSC_MODE_BYPASS (0 << 17) #define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709 (1 << 17) @@ -6581,6 +6614,55 @@ enum { #define _PLANE_NV12_BUF_CFG_1_A 0x70278 #define _PLANE_NV12_BUF_CFG_2_A 0x70378 +/* Input CSC Register Definitions */ +#define _PLANE_INPUT_CSC_RY_GY_1_A 0x701E0 +#define _PLANE_INPUT_CSC_RY_GY_2_A 0x702E0 + +#define _PLANE_INPUT_CSC_RY_GY_1_B 0x711E0 +#define _PLANE_INPUT_CSC_RY_GY_2_B 0x712E0 + +#define _PLANE_INPUT_CSC_RY_GY_1(pipe) \ + _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_1_A, \ + _PLANE_INPUT_CSC_RY_GY_1_B) +#define _PLANE_INPUT_CSC_RY_GY_2(pipe) \ + _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_2_A, \ + _PLANE_INPUT_CSC_RY_GY_2_B) + +#define PLANE_INPUT_CSC_COEFF(pipe, plane, index) \ + _MMIO_PLANE(plane, _PLANE_INPUT_CSC_RY_GY_1(pipe) + (index) * 4, \ + _PLANE_INPUT_CSC_RY_GY_2(pipe) + (index) * 4) + +#define _PLANE_INPUT_CSC_PREOFF_HI_1_A 0x701F8 +#define _PLANE_INPUT_CSC_PREOFF_HI_2_A 0x702F8 + +#define _PLANE_INPUT_CSC_PREOFF_HI_1_B 0x711F8 +#define _PLANE_INPUT_CSC_PREOFF_HI_2_B 0x712F8 + +#define _PLANE_INPUT_CSC_PREOFF_HI_1(pipe) \ + _PIPE(pipe, _PLANE_INPUT_CSC_PREOFF_HI_1_A, \ + _PLANE_INPUT_CSC_PREOFF_HI_1_B) +#define _PLANE_INPUT_CSC_PREOFF_HI_2(pipe) \ + _PIPE(pipe, _PLANE_INPUT_CSC_PREOFF_HI_2_A, \ + _PLANE_INPUT_CSC_PREOFF_HI_2_B) +#define PLANE_INPUT_CSC_PREOFF(pipe, plane, index) \ + _MMIO_PLANE(plane, _PLANE_INPUT_CSC_PREOFF_HI_1(pipe) + (index) * 4, \ + _PLANE_INPUT_CSC_PREOFF_HI_2(pipe) + (index) * 4) + +#define _PLANE_INPUT_CSC_POSTOFF_HI_1_A 0x70204 +#define _PLANE_INPUT_CSC_POSTOFF_HI_2_A 0x70304 + +#define _PLANE_INPUT_CSC_POSTOFF_HI_1_B 0x71204 +#define _PLANE_INPUT_CSC_POSTOFF_HI_2_B 0x71304 + +#define _PLANE_INPUT_CSC_POSTOFF_HI_1(pipe) \ + _PIPE(pipe, _PLANE_INPUT_CSC_POSTOFF_HI_1_A, \ + _PLANE_INPUT_CSC_POSTOFF_HI_1_B) +#define _PLANE_INPUT_CSC_POSTOFF_HI_2(pipe) \ + _PIPE(pipe, _PLANE_INPUT_CSC_POSTOFF_HI_2_A, \ + _PLANE_INPUT_CSC_POSTOFF_HI_2_B) +#define PLANE_INPUT_CSC_POSTOFF(pipe, plane, index) \ + _MMIO_PLANE(plane, _PLANE_INPUT_CSC_POSTOFF_HI_1(pipe) + (index) * 4, \ + _PLANE_INPUT_CSC_POSTOFF_HI_2(pipe) + (index) * 4) #define _PLANE_CTL_1_B 0x71180 #define _PLANE_CTL_2_B 0x71280 @@ -6697,6 +6779,15 @@ enum { #define PLANE_AUX_OFFSET(pipe, plane) \ _MMIO_PLANE(plane, _PLANE_AUX_OFFSET_1(pipe), _PLANE_AUX_OFFSET_2(pipe)) +#define _PLANE_CUS_CTL_1_B 0x711c8 +#define _PLANE_CUS_CTL_2_B 0x712c8 +#define _PLANE_CUS_CTL_1(pipe) \ + _PIPE(pipe, _PLANE_CUS_CTL_1_A, _PLANE_CUS_CTL_1_B) +#define _PLANE_CUS_CTL_2(pipe) \ + _PIPE(pipe, _PLANE_CUS_CTL_2_A, _PLANE_CUS_CTL_2_B) +#define PLANE_CUS_CTL(pipe, plane) \ + _MMIO_PLANE(plane, _PLANE_CUS_CTL_1(pipe), _PLANE_CUS_CTL_2(pipe)) + #define _PLANE_COLOR_CTL_1_B 0x711CC #define _PLANE_COLOR_CTL_2_B 0x712CC #define _PLANE_COLOR_CTL_3_B 0x713CC @@ -6850,11 +6941,12 @@ enum { #define _PS_2B_CTRL 0x68A80 #define _PS_1C_CTRL 0x69180 #define PS_SCALER_EN (1 << 31) -#define PS_SCALER_MODE_MASK (3 << 28) -#define PS_SCALER_MODE_DYN (0 << 28) -#define PS_SCALER_MODE_HQ (1 << 28) +#define SKL_PS_SCALER_MODE_MASK (3 << 28) +#define SKL_PS_SCALER_MODE_DYN (0 << 28) +#define SKL_PS_SCALER_MODE_HQ (1 << 28) #define SKL_PS_SCALER_MODE_NV12 (2 << 28) #define PS_SCALER_MODE_PLANAR (1 << 29) +#define PS_SCALER_MODE_NORMAL (0 << 29) #define PS_PLANE_SEL_MASK (7 << 25) #define PS_PLANE_SEL(plane) (((plane) + 1) << 25) #define PS_FILTER_MASK (3 << 23) @@ -6871,6 +6963,8 @@ enum { #define PS_VADAPT_MODE_LEAST_ADAPT (0 << 5) #define PS_VADAPT_MODE_MOD_ADAPT (1 << 5) #define PS_VADAPT_MODE_MOST_ADAPT (3 << 5) +#define PS_PLANE_Y_SEL_MASK (7 << 5) +#define PS_PLANE_Y_SEL(plane) (((plane) + 1) << 5) #define _PS_PWR_GATE_1A 0x68160 #define _PS_PWR_GATE_2A 0x68260 @@ -7317,9 +7411,10 @@ enum { #define BDW_DPRS_MASK_VBLANK_SRD (1 << 0) #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) -#define CHICKEN_TRANS_A 0x420c0 -#define CHICKEN_TRANS_B 0x420c4 -#define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B) +#define CHICKEN_TRANS_A _MMIO(0x420c0) +#define CHICKEN_TRANS_B _MMIO(0x420c4) +#define CHICKEN_TRANS_C _MMIO(0x420c8) +#define CHICKEN_TRANS_EDP _MMIO(0x420cc) #define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */ #define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19) #define DDI_TRAINING_OVERRIDE_VALUE (1 << 18) @@ -7409,6 +7504,10 @@ enum { #define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c) #define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11) +#define GEN7_SARCHKMD _MMIO(0xB000) +#define GEN7_DISABLE_DEMAND_PREFETCH (1 << 31) +#define GEN7_DISABLE_SAMPLER_PREFETCH (1 << 30) + #define GEN7_L3SQCREG1 _MMIO(0xB010) #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 @@ -7663,6 +7762,7 @@ enum { #define ICP_DDIB_HPD_LONG_DETECT (2 << 4) #define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4) #define ICP_DDIA_HPD_ENABLE (1 << 3) +#define ICP_DDIA_HPD_OP_DRIVE_1 (1 << 2) #define ICP_DDIA_HPD_STATUS_MASK (3 << 0) #define ICP_DDIA_HPD_NO_DETECT (0 << 0) #define ICP_DDIA_HPD_SHORT_DETECT (1 << 0) @@ -7824,8 +7924,7 @@ enum { #define CNP_RAWCLK_DIV_MASK (0x3ff << 16) #define CNP_RAWCLK_DIV(div) ((div) << 16) #define CNP_RAWCLK_FRAC_MASK (0xf << 26) -#define CNP_RAWCLK_FRAC(frac) ((frac) << 26) -#define ICP_RAWCLK_DEN(den) ((den) << 26) +#define CNP_RAWCLK_DEN(den) ((den) << 26) #define ICP_RAWCLK_NUM(num) ((num) << 11) #define PCH_DPLL_TMR_CFG _MMIO(0xc6208) @@ -8625,8 +8724,7 @@ enum { #define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9) #define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7) -#define GAMW_ECO_DEV_RW_IA_REG _MMIO(0x4080) -#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7) +#define GEN10_SAMPLER_MODE _MMIO(0xE18C) /* IVYBRIDGE DPF */ #define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */ @@ -8927,6 +9025,15 @@ enum skl_power_gate { #define CNL_AUX_ANAOVRD1_ENABLE (1 << 16) #define CNL_AUX_ANAOVRD1_LDO_BYPASS (1 << 23) +#define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) +#define _ICL_AUX_ANAOVRD1_A 0x162398 +#define _ICL_AUX_ANAOVRD1_B 0x6C398 +#define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \ + _ICL_AUX_ANAOVRD1_A, \ + _ICL_AUX_ANAOVRD1_B)) +#define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7) +#define ICL_AUX_ANAOVRD1_ENABLE (1 << 0) + /* HDCP Key Registers */ #define HDCP_KEY_CONF _MMIO(0x66c00) #define HDCP_AKSV_SEND_TRIGGER BIT(31) @@ -9009,11 +9116,45 @@ enum skl_power_gate { #define HDCP_STATUS_CIPHER BIT(16) #define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff) +/* HDCP2.2 Registers */ +#define _PORTA_HDCP2_BASE 0x66800 +#define _PORTB_HDCP2_BASE 0x66500 +#define _PORTC_HDCP2_BASE 0x66600 +#define _PORTD_HDCP2_BASE 0x66700 +#define _PORTE_HDCP2_BASE 0x66A00 +#define _PORTF_HDCP2_BASE 0x66900 +#define _PORT_HDCP2_BASE(port, x) _MMIO(_PICK((port), \ + _PORTA_HDCP2_BASE, \ + _PORTB_HDCP2_BASE, \ + _PORTC_HDCP2_BASE, \ + _PORTD_HDCP2_BASE, \ + _PORTE_HDCP2_BASE, \ + _PORTF_HDCP2_BASE) + (x)) + +#define HDCP2_AUTH_DDI(port) _PORT_HDCP2_BASE(port, 0x98) +#define AUTH_LINK_AUTHENTICATED BIT(31) +#define AUTH_LINK_TYPE BIT(30) +#define AUTH_FORCE_CLR_INPUTCTR BIT(19) +#define AUTH_CLR_KEYS BIT(18) + +#define HDCP2_CTL_DDI(port) _PORT_HDCP2_BASE(port, 0xB0) +#define CTL_LINK_ENCRYPTION_REQ BIT(31) + +#define HDCP2_STATUS_DDI(port) _PORT_HDCP2_BASE(port, 0xB4) +#define STREAM_ENCRYPTION_STATUS_A BIT(31) +#define STREAM_ENCRYPTION_STATUS_B BIT(30) +#define STREAM_ENCRYPTION_STATUS_C BIT(29) +#define LINK_TYPE_STATUS BIT(22) +#define LINK_AUTH_STATUS BIT(21) +#define LINK_ENCRYPTION_STATUS BIT(20) + /* Per-pipe DDI Function Control */ #define _TRANS_DDI_FUNC_CTL_A 0x60400 #define _TRANS_DDI_FUNC_CTL_B 0x61400 #define _TRANS_DDI_FUNC_CTL_C 0x62400 #define _TRANS_DDI_FUNC_CTL_EDP 0x6F400 +#define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400 +#define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00 #define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A) #define TRANS_DDI_FUNC_ENABLE (1 << 31) @@ -9051,11 +9192,25 @@ enum skl_power_gate { | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \ | TRANS_DDI_HDMI_SCRAMBLING) +#define _TRANS_DDI_FUNC_CTL2_A 0x60404 +#define _TRANS_DDI_FUNC_CTL2_B 0x61404 +#define _TRANS_DDI_FUNC_CTL2_C 0x62404 +#define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404 +#define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404 +#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04 +#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \ + _TRANS_DDI_FUNC_CTL2_A) +#define PORT_SYNC_MODE_ENABLE (1 << 4) +#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0) +#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0) +#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0 + /* DisplayPort Transport Control */ #define _DP_TP_CTL_A 0x64040 #define _DP_TP_CTL_B 0x64140 #define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B) #define DP_TP_CTL_ENABLE (1 << 31) +#define DP_TP_CTL_FEC_ENABLE (1 << 30) #define DP_TP_CTL_MODE_SST (0 << 27) #define DP_TP_CTL_MODE_MST (1 << 27) #define DP_TP_CTL_FORCE_ACT (1 << 25) @@ -9074,6 +9229,7 @@ enum skl_power_gate { #define _DP_TP_STATUS_A 0x64044 #define _DP_TP_STATUS_B 0x64144 #define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B) +#define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28) #define DP_TP_STATUS_IDLE_DONE (1 << 25) #define DP_TP_STATUS_ACT_SENT (1 << 24) #define DP_TP_STATUS_MODE_STATUS_MST (1 << 23) @@ -9222,6 +9378,8 @@ enum skl_power_gate { #define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC) #define TRANS_MSA_SYNC_CLK (1 << 0) +#define TRANS_MSA_SAMPLING_444 (2 << 1) +#define TRANS_MSA_CLRSP_YCBCR (2 << 3) #define TRANS_MSA_6_BPC (0 << 5) #define TRANS_MSA_8_BPC (1 << 5) #define TRANS_MSA_10_BPC (2 << 5) @@ -9789,6 +9947,10 @@ enum skl_power_gate { #define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */ #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) +/* Gen11 DSI */ +#define _MMIO_DSI(tc, dsi0, dsi1) _MMIO_TRANS((tc) - TRANSCODER_DSI_0, \ + dsi0, dsi1) + #define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004) #define GLK_TX_ESC_CLK_DIV1_MASK 0x3FF #define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008) @@ -9952,6 +10114,39 @@ enum skl_power_gate { _ICL_DSI_IO_MODECTL_1) #define COMBO_PHY_MODE_DSI (1 << 0) +/* Display Stream Splitter Control */ +#define DSS_CTL1 _MMIO(0x67400) +#define SPLITTER_ENABLE (1 << 31) +#define JOINER_ENABLE (1 << 30) +#define DUAL_LINK_MODE_INTERLEAVE (1 << 24) +#define DUAL_LINK_MODE_FRONTBACK (0 << 24) +#define OVERLAP_PIXELS_MASK (0xf << 16) +#define OVERLAP_PIXELS(pixels) ((pixels) << 16) +#define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0) +#define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0) +#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0 + +#define DSS_CTL2 _MMIO(0x67404) +#define LEFT_BRANCH_VDSC_ENABLE (1 << 31) +#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15) +#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0) +#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0) + +#define _ICL_PIPE_DSS_CTL1_PB 0x78200 +#define _ICL_PIPE_DSS_CTL1_PC 0x78400 +#define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_PIPE_DSS_CTL1_PB, \ + _ICL_PIPE_DSS_CTL1_PC) +#define BIG_JOINER_ENABLE (1 << 29) +#define MASTER_BIG_JOINER_ENABLE (1 << 28) +#define VGA_CENTERING_ENABLE (1 << 27) + +#define _ICL_PIPE_DSS_CTL2_PB 0x78204 +#define _ICL_PIPE_DSS_CTL2_PC 0x78404 +#define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_PIPE_DSS_CTL2_PB, \ + _ICL_PIPE_DSS_CTL2_PC) + #define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020) #define STAP_SELECT (1 << 0) @@ -10288,6 +10483,235 @@ enum skl_power_gate { _ICL_DSI_T_INIT_MASTER_0,\ _ICL_DSI_T_INIT_MASTER_1) +#define _DPHY_CLK_TIMING_PARAM_0 0x162180 +#define _DPHY_CLK_TIMING_PARAM_1 0x6c180 +#define DPHY_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \ + _DPHY_CLK_TIMING_PARAM_0,\ + _DPHY_CLK_TIMING_PARAM_1) +#define _DSI_CLK_TIMING_PARAM_0 0x6b080 +#define _DSI_CLK_TIMING_PARAM_1 0x6b880 +#define DSI_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \ + _DSI_CLK_TIMING_PARAM_0,\ + _DSI_CLK_TIMING_PARAM_1) +#define CLK_PREPARE_OVERRIDE (1 << 31) +#define CLK_PREPARE(x) ((x) << 28) +#define CLK_PREPARE_MASK (0x7 << 28) +#define CLK_PREPARE_SHIFT 28 +#define CLK_ZERO_OVERRIDE (1 << 27) +#define CLK_ZERO(x) ((x) << 20) +#define CLK_ZERO_MASK (0xf << 20) +#define CLK_ZERO_SHIFT 20 +#define CLK_PRE_OVERRIDE (1 << 19) +#define CLK_PRE(x) ((x) << 16) +#define CLK_PRE_MASK (0x3 << 16) +#define CLK_PRE_SHIFT 16 +#define CLK_POST_OVERRIDE (1 << 15) +#define CLK_POST(x) ((x) << 8) +#define CLK_POST_MASK (0x7 << 8) +#define CLK_POST_SHIFT 8 +#define CLK_TRAIL_OVERRIDE (1 << 7) +#define CLK_TRAIL(x) ((x) << 0) +#define CLK_TRAIL_MASK (0xf << 0) +#define CLK_TRAIL_SHIFT 0 + +#define _DPHY_DATA_TIMING_PARAM_0 0x162184 +#define _DPHY_DATA_TIMING_PARAM_1 0x6c184 +#define DPHY_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \ + _DPHY_DATA_TIMING_PARAM_0,\ + _DPHY_DATA_TIMING_PARAM_1) +#define _DSI_DATA_TIMING_PARAM_0 0x6B084 +#define _DSI_DATA_TIMING_PARAM_1 0x6B884 +#define DSI_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \ + _DSI_DATA_TIMING_PARAM_0,\ + _DSI_DATA_TIMING_PARAM_1) +#define HS_PREPARE_OVERRIDE (1 << 31) +#define HS_PREPARE(x) ((x) << 24) +#define HS_PREPARE_MASK (0x7 << 24) +#define HS_PREPARE_SHIFT 24 +#define HS_ZERO_OVERRIDE (1 << 23) +#define HS_ZERO(x) ((x) << 16) +#define HS_ZERO_MASK (0xf << 16) +#define HS_ZERO_SHIFT 16 +#define HS_TRAIL_OVERRIDE (1 << 15) +#define HS_TRAIL(x) ((x) << 8) +#define HS_TRAIL_MASK (0x7 << 8) +#define HS_TRAIL_SHIFT 8 +#define HS_EXIT_OVERRIDE (1 << 7) +#define HS_EXIT(x) ((x) << 0) +#define HS_EXIT_MASK (0x7 << 0) +#define HS_EXIT_SHIFT 0 + +#define _DPHY_TA_TIMING_PARAM_0 0x162188 +#define _DPHY_TA_TIMING_PARAM_1 0x6c188 +#define DPHY_TA_TIMING_PARAM(port) _MMIO_PORT(port, \ + _DPHY_TA_TIMING_PARAM_0,\ + _DPHY_TA_TIMING_PARAM_1) +#define _DSI_TA_TIMING_PARAM_0 0x6b098 +#define _DSI_TA_TIMING_PARAM_1 0x6b898 +#define DSI_TA_TIMING_PARAM(port) _MMIO_PORT(port, \ + _DSI_TA_TIMING_PARAM_0,\ + _DSI_TA_TIMING_PARAM_1) +#define TA_SURE_OVERRIDE (1 << 31) +#define TA_SURE(x) ((x) << 16) +#define TA_SURE_MASK (0x1f << 16) +#define TA_SURE_SHIFT 16 +#define TA_GO_OVERRIDE (1 << 15) +#define TA_GO(x) ((x) << 8) +#define TA_GO_MASK (0xf << 8) +#define TA_GO_SHIFT 8 +#define TA_GET_OVERRIDE (1 << 7) +#define TA_GET(x) ((x) << 0) +#define TA_GET_MASK (0xf << 0) +#define TA_GET_SHIFT 0 + +/* DSI transcoder configuration */ +#define _DSI_TRANS_FUNC_CONF_0 0x6b030 +#define _DSI_TRANS_FUNC_CONF_1 0x6b830 +#define DSI_TRANS_FUNC_CONF(tc) _MMIO_DSI(tc, \ + _DSI_TRANS_FUNC_CONF_0,\ + _DSI_TRANS_FUNC_CONF_1) +#define OP_MODE_MASK (0x3 << 28) +#define OP_MODE_SHIFT 28 +#define CMD_MODE_NO_GATE (0x0 << 28) +#define CMD_MODE_TE_GATE (0x1 << 28) +#define VIDEO_MODE_SYNC_EVENT (0x2 << 28) +#define VIDEO_MODE_SYNC_PULSE (0x3 << 28) +#define LINK_READY (1 << 20) +#define PIX_FMT_MASK (0x3 << 16) +#define PIX_FMT_SHIFT 16 +#define PIX_FMT_RGB565 (0x0 << 16) +#define PIX_FMT_RGB666_PACKED (0x1 << 16) +#define PIX_FMT_RGB666_LOOSE (0x2 << 16) +#define PIX_FMT_RGB888 (0x3 << 16) +#define PIX_FMT_RGB101010 (0x4 << 16) +#define PIX_FMT_RGB121212 (0x5 << 16) +#define PIX_FMT_COMPRESSED (0x6 << 16) +#define BGR_TRANSMISSION (1 << 15) +#define PIX_VIRT_CHAN(x) ((x) << 12) +#define PIX_VIRT_CHAN_MASK (0x3 << 12) +#define PIX_VIRT_CHAN_SHIFT 12 +#define PIX_BUF_THRESHOLD_MASK (0x3 << 10) +#define PIX_BUF_THRESHOLD_SHIFT 10 +#define PIX_BUF_THRESHOLD_1_4 (0x0 << 10) +#define PIX_BUF_THRESHOLD_1_2 (0x1 << 10) +#define PIX_BUF_THRESHOLD_3_4 (0x2 << 10) +#define PIX_BUF_THRESHOLD_FULL (0x3 << 10) +#define CONTINUOUS_CLK_MASK (0x3 << 8) +#define CONTINUOUS_CLK_SHIFT 8 +#define CLK_ENTER_LP_AFTER_DATA (0x0 << 8) +#define CLK_HS_OR_LP (0x2 << 8) +#define CLK_HS_CONTINUOUS (0x3 << 8) +#define LINK_CALIBRATION_MASK (0x3 << 4) +#define LINK_CALIBRATION_SHIFT 4 +#define CALIBRATION_DISABLED (0x0 << 4) +#define CALIBRATION_ENABLED_INITIAL_ONLY (0x2 << 4) +#define CALIBRATION_ENABLED_INITIAL_PERIODIC (0x3 << 4) +#define S3D_ORIENTATION_LANDSCAPE (1 << 1) +#define EOTP_DISABLED (1 << 0) + +#define _DSI_CMD_RXCTL_0 0x6b0d4 +#define _DSI_CMD_RXCTL_1 0x6b8d4 +#define DSI_CMD_RXCTL(tc) _MMIO_DSI(tc, \ + _DSI_CMD_RXCTL_0,\ + _DSI_CMD_RXCTL_1) +#define READ_UNLOADS_DW (1 << 16) +#define RECEIVED_UNASSIGNED_TRIGGER (1 << 15) +#define RECEIVED_ACKNOWLEDGE_TRIGGER (1 << 14) +#define RECEIVED_TEAR_EFFECT_TRIGGER (1 << 13) +#define RECEIVED_RESET_TRIGGER (1 << 12) +#define RECEIVED_PAYLOAD_WAS_LOST (1 << 11) +#define RECEIVED_CRC_WAS_LOST (1 << 10) +#define NUMBER_RX_PLOAD_DW_MASK (0xff << 0) +#define NUMBER_RX_PLOAD_DW_SHIFT 0 + +#define _DSI_CMD_TXCTL_0 0x6b0d0 +#define _DSI_CMD_TXCTL_1 0x6b8d0 +#define DSI_CMD_TXCTL(tc) _MMIO_DSI(tc, \ + _DSI_CMD_TXCTL_0,\ + _DSI_CMD_TXCTL_1) +#define KEEP_LINK_IN_HS (1 << 24) +#define FREE_HEADER_CREDIT_MASK (0x1f << 8) +#define FREE_HEADER_CREDIT_SHIFT 0x8 +#define FREE_PLOAD_CREDIT_MASK (0xff << 0) +#define FREE_PLOAD_CREDIT_SHIFT 0 +#define MAX_HEADER_CREDIT 0x10 +#define MAX_PLOAD_CREDIT 0x40 + +#define _DSI_CMD_TXHDR_0 0x6b100 +#define _DSI_CMD_TXHDR_1 0x6b900 +#define DSI_CMD_TXHDR(tc) _MMIO_DSI(tc, \ + _DSI_CMD_TXHDR_0,\ + _DSI_CMD_TXHDR_1) +#define PAYLOAD_PRESENT (1 << 31) +#define LP_DATA_TRANSFER (1 << 30) +#define VBLANK_FENCE (1 << 29) +#define PARAM_WC_MASK (0xffff << 8) +#define PARAM_WC_LOWER_SHIFT 8 +#define PARAM_WC_UPPER_SHIFT 16 +#define VC_MASK (0x3 << 6) +#define VC_SHIFT 6 +#define DT_MASK (0x3f << 0) +#define DT_SHIFT 0 + +#define _DSI_CMD_TXPYLD_0 0x6b104 +#define _DSI_CMD_TXPYLD_1 0x6b904 +#define DSI_CMD_TXPYLD(tc) _MMIO_DSI(tc, \ + _DSI_CMD_TXPYLD_0,\ + _DSI_CMD_TXPYLD_1) + +#define _DSI_LP_MSG_0 0x6b0d8 +#define _DSI_LP_MSG_1 0x6b8d8 +#define DSI_LP_MSG(tc) _MMIO_DSI(tc, \ + _DSI_LP_MSG_0,\ + _DSI_LP_MSG_1) +#define LPTX_IN_PROGRESS (1 << 17) +#define LINK_IN_ULPS (1 << 16) +#define LINK_ULPS_TYPE_LP11 (1 << 8) +#define LINK_ENTER_ULPS (1 << 0) + +/* DSI timeout registers */ +#define _DSI_HSTX_TO_0 0x6b044 +#define _DSI_HSTX_TO_1 0x6b844 +#define DSI_HSTX_TO(tc) _MMIO_DSI(tc, \ + _DSI_HSTX_TO_0,\ + _DSI_HSTX_TO_1) +#define HSTX_TIMEOUT_VALUE_MASK (0xffff << 16) +#define HSTX_TIMEOUT_VALUE_SHIFT 16 +#define HSTX_TIMEOUT_VALUE(x) ((x) << 16) +#define HSTX_TIMED_OUT (1 << 0) + +#define _DSI_LPRX_HOST_TO_0 0x6b048 +#define _DSI_LPRX_HOST_TO_1 0x6b848 +#define DSI_LPRX_HOST_TO(tc) _MMIO_DSI(tc, \ + _DSI_LPRX_HOST_TO_0,\ + _DSI_LPRX_HOST_TO_1) +#define LPRX_TIMED_OUT (1 << 16) +#define LPRX_TIMEOUT_VALUE_MASK (0xffff << 0) +#define LPRX_TIMEOUT_VALUE_SHIFT 0 +#define LPRX_TIMEOUT_VALUE(x) ((x) << 0) + +#define _DSI_PWAIT_TO_0 0x6b040 +#define _DSI_PWAIT_TO_1 0x6b840 +#define DSI_PWAIT_TO(tc) _MMIO_DSI(tc, \ + _DSI_PWAIT_TO_0,\ + _DSI_PWAIT_TO_1) +#define PRESET_TIMEOUT_VALUE_MASK (0xffff << 16) +#define PRESET_TIMEOUT_VALUE_SHIFT 16 +#define PRESET_TIMEOUT_VALUE(x) ((x) << 16) +#define PRESPONSE_TIMEOUT_VALUE_MASK (0xffff << 0) +#define PRESPONSE_TIMEOUT_VALUE_SHIFT 0 +#define PRESPONSE_TIMEOUT_VALUE(x) ((x) << 0) + +#define _DSI_TA_TO_0 0x6b04c +#define _DSI_TA_TO_1 0x6b84c +#define DSI_TA_TO(tc) _MMIO_DSI(tc, \ + _DSI_TA_TO_0,\ + _DSI_TA_TO_1) +#define TA_TIMED_OUT (1 << 16) +#define TA_TIMEOUT_VALUE_MASK (0xffff << 0) +#define TA_TIMEOUT_VALUE_SHIFT 0 +#define TA_TIMEOUT_VALUE(x) ((x) << 0) + /* bits 31:0 */ #define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084) #define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884) @@ -10400,10 +10824,6 @@ enum skl_power_gate { #define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID) #define READ_DATA_VALID(n) (1 << (n)) -/* For UMS only (deprecated): */ -#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000) -#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800) - /* MOCS (Memory Object Control State) registers */ #define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */ @@ -10689,6 +11109,7 @@ enum skl_power_gate { #define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC) +#define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20) #define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16) #define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0) @@ -10743,17 +11164,17 @@ enum skl_power_gate { _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC) -#define PORT_TX_DFLEXDPSP _MMIO(0x1638A0) +#define PORT_TX_DFLEXDPSP _MMIO(FIA1_BASE + 0x008A0) #define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6)) #define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5)) #define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8) #define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8)) #define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8)) -#define PORT_TX_DFLEXDPPMS _MMIO(0x163890) +#define PORT_TX_DFLEXDPPMS _MMIO(FIA1_BASE + 0x00890) #define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port)) -#define PORT_TX_DFLEXDPCSSS _MMIO(0x163894) +#define PORT_TX_DFLEXDPCSSS _MMIO(FIA1_BASE + 0x00894) #define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port)) #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index a492385b2089..ca95ab2f4cfa 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -111,91 +111,6 @@ i915_request_remove_from_client(struct i915_request *request) spin_unlock(&file_priv->mm.lock); } -static struct i915_dependency * -i915_dependency_alloc(struct drm_i915_private *i915) -{ - return kmem_cache_alloc(i915->dependencies, GFP_KERNEL); -} - -static void -i915_dependency_free(struct drm_i915_private *i915, - struct i915_dependency *dep) -{ - kmem_cache_free(i915->dependencies, dep); -} - -static void -__i915_sched_node_add_dependency(struct i915_sched_node *node, - struct i915_sched_node *signal, - struct i915_dependency *dep, - unsigned long flags) -{ - INIT_LIST_HEAD(&dep->dfs_link); - list_add(&dep->wait_link, &signal->waiters_list); - list_add(&dep->signal_link, &node->signalers_list); - dep->signaler = signal; - dep->flags = flags; -} - -static int -i915_sched_node_add_dependency(struct drm_i915_private *i915, - struct i915_sched_node *node, - struct i915_sched_node *signal) -{ - struct i915_dependency *dep; - - dep = i915_dependency_alloc(i915); - if (!dep) - return -ENOMEM; - - __i915_sched_node_add_dependency(node, signal, dep, - I915_DEPENDENCY_ALLOC); - return 0; -} - -static void -i915_sched_node_fini(struct drm_i915_private *i915, - struct i915_sched_node *node) -{ - struct i915_dependency *dep, *tmp; - - GEM_BUG_ON(!list_empty(&node->link)); - - /* - * Everyone we depended upon (the fences we wait to be signaled) - * should retire before us and remove themselves from our list. - * However, retirement is run independently on each timeline and - * so we may be called out-of-order. - */ - list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) { - GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler)); - GEM_BUG_ON(!list_empty(&dep->dfs_link)); - - list_del(&dep->wait_link); - if (dep->flags & I915_DEPENDENCY_ALLOC) - i915_dependency_free(i915, dep); - } - - /* Remove ourselves from everyone who depends upon us */ - list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) { - GEM_BUG_ON(dep->signaler != node); - GEM_BUG_ON(!list_empty(&dep->dfs_link)); - - list_del(&dep->signal_link); - if (dep->flags & I915_DEPENDENCY_ALLOC) - i915_dependency_free(i915, dep); - } -} - -static void -i915_sched_node_init(struct i915_sched_node *node) -{ - INIT_LIST_HEAD(&node->signalers_list); - INIT_LIST_HEAD(&node->waiters_list); - INIT_LIST_HEAD(&node->link); - node->attr.priority = I915_PRIORITY_INVALID; -} - static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) { struct intel_engine_cs *engine; @@ -221,6 +136,11 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) intel_engine_get_seqno(engine), seqno); + if (seqno == engine->timeline.seqno) + continue; + + kthread_park(engine->breadcrumbs.signaler); + if (!i915_seqno_passed(seqno, engine->timeline.seqno)) { /* Flush any waiters before we reuse the seqno */ intel_engine_disarm_breadcrumbs(engine); @@ -235,6 +155,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) /* Finally reset hw state */ intel_engine_init_global_seqno(engine, seqno); engine->timeline.seqno = seqno; + + kthread_unpark(engine->breadcrumbs.signaler); } list_for_each_entry(timeline, &i915->gt.timelines, link) @@ -740,17 +662,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) if (rq) cond_synchronize_rcu(rq->rcustate); - /* - * We've forced the client to stall and catch up with whatever - * backlog there might have been. As we are assuming that we - * caused the mempressure, now is an opportune time to - * recover as much memory from the request pool as is possible. - * Having already penalized the client to stall, we spend - * a little extra time to re-optimise page allocation. - */ - kmem_cache_shrink(i915->requests); - rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */ - rq = kmem_cache_alloc(i915->requests, GFP_KERNEL); if (!rq) { ret = -ENOMEM; @@ -1127,8 +1038,20 @@ void i915_request_add(struct i915_request *request) */ local_bh_disable(); rcu_read_lock(); /* RCU serialisation for set-wedged protection */ - if (engine->schedule) - engine->schedule(request, &request->gem_context->sched); + if (engine->schedule) { + struct i915_sched_attr attr = request->gem_context->sched; + + /* + * Boost priorities to new clients (new request flows). + * + * Allow interactive/synchronous clients to jump ahead of + * the bulk clients. (FQ_CODEL) + */ + if (!prev || i915_request_completed(prev)) + attr.priority |= I915_PRIORITY_NEWCLIENT; + + engine->schedule(request, &attr); + } rcu_read_unlock(); i915_sw_fence_commit(&request->submit); local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ @@ -1310,6 +1233,8 @@ long i915_request_wait(struct i915_request *rq, add_wait_queue(errq, &reset); intel_wait_init(&wait); + if (flags & I915_WAIT_PRIORITY) + i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT); restart: do { diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 7fa94b024968..90e9d170a0cd 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -277,8 +277,9 @@ long i915_request_wait(struct i915_request *rq, __attribute__((nonnull(1))); #define I915_WAIT_INTERRUPTIBLE BIT(0) #define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */ -#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */ -#define I915_WAIT_FOR_IDLE_BOOST BIT(3) +#define I915_WAIT_PRIORITY BIT(2) /* small priority bump for the request */ +#define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */ +#define I915_WAIT_FOR_IDLE_BOOST BIT(4) static inline bool intel_engine_has_started(struct intel_engine_cs *engine, u32 seqno); @@ -332,14 +333,6 @@ static inline bool i915_request_completed(const struct i915_request *rq) return __i915_request_completed(rq, seqno); } -static inline bool i915_sched_node_signaled(const struct i915_sched_node *node) -{ - const struct i915_request *rq = - container_of(node, const struct i915_request, sched); - - return i915_request_completed(rq); -} - void i915_retire_requests(struct drm_i915_private *i915); /* diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c new file mode 100644 index 000000000000..340faea6c08a --- /dev/null +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -0,0 +1,399 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include <linux/mutex.h> + +#include "i915_drv.h" +#include "i915_request.h" +#include "i915_scheduler.h" + +static DEFINE_SPINLOCK(schedule_lock); + +static const struct i915_request * +node_to_request(const struct i915_sched_node *node) +{ + return container_of(node, const struct i915_request, sched); +} + +static inline bool node_signaled(const struct i915_sched_node *node) +{ + return i915_request_completed(node_to_request(node)); +} + +void i915_sched_node_init(struct i915_sched_node *node) +{ + INIT_LIST_HEAD(&node->signalers_list); + INIT_LIST_HEAD(&node->waiters_list); + INIT_LIST_HEAD(&node->link); + node->attr.priority = I915_PRIORITY_INVALID; +} + +static struct i915_dependency * +i915_dependency_alloc(struct drm_i915_private *i915) +{ + return kmem_cache_alloc(i915->dependencies, GFP_KERNEL); +} + +static void +i915_dependency_free(struct drm_i915_private *i915, + struct i915_dependency *dep) +{ + kmem_cache_free(i915->dependencies, dep); +} + +bool __i915_sched_node_add_dependency(struct i915_sched_node *node, + struct i915_sched_node *signal, + struct i915_dependency *dep, + unsigned long flags) +{ + bool ret = false; + + spin_lock(&schedule_lock); + + if (!node_signaled(signal)) { + INIT_LIST_HEAD(&dep->dfs_link); + list_add(&dep->wait_link, &signal->waiters_list); + list_add(&dep->signal_link, &node->signalers_list); + dep->signaler = signal; + dep->flags = flags; + + ret = true; + } + + spin_unlock(&schedule_lock); + + return ret; +} + +int i915_sched_node_add_dependency(struct drm_i915_private *i915, + struct i915_sched_node *node, + struct i915_sched_node *signal) +{ + struct i915_dependency *dep; + + dep = i915_dependency_alloc(i915); + if (!dep) + return -ENOMEM; + + if (!__i915_sched_node_add_dependency(node, signal, dep, + I915_DEPENDENCY_ALLOC)) + i915_dependency_free(i915, dep); + + return 0; +} + +void i915_sched_node_fini(struct drm_i915_private *i915, + struct i915_sched_node *node) +{ + struct i915_dependency *dep, *tmp; + + GEM_BUG_ON(!list_empty(&node->link)); + + spin_lock(&schedule_lock); + + /* + * Everyone we depended upon (the fences we wait to be signaled) + * should retire before us and remove themselves from our list. + * However, retirement is run independently on each timeline and + * so we may be called out-of-order. + */ + list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) { + GEM_BUG_ON(!node_signaled(dep->signaler)); + GEM_BUG_ON(!list_empty(&dep->dfs_link)); + + list_del(&dep->wait_link); + if (dep->flags & I915_DEPENDENCY_ALLOC) + i915_dependency_free(i915, dep); + } + + /* Remove ourselves from everyone who depends upon us */ + list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) { + GEM_BUG_ON(dep->signaler != node); + GEM_BUG_ON(!list_empty(&dep->dfs_link)); + + list_del(&dep->signal_link); + if (dep->flags & I915_DEPENDENCY_ALLOC) + i915_dependency_free(i915, dep); + } + + spin_unlock(&schedule_lock); +} + +static inline struct i915_priolist *to_priolist(struct rb_node *rb) +{ + return rb_entry(rb, struct i915_priolist, node); +} + +static void assert_priolists(struct intel_engine_execlists * const execlists, + long queue_priority) +{ + struct rb_node *rb; + long last_prio, i; + + if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + return; + + GEM_BUG_ON(rb_first_cached(&execlists->queue) != + rb_first(&execlists->queue.rb_root)); + + last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1; + for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { + const struct i915_priolist *p = to_priolist(rb); + + GEM_BUG_ON(p->priority >= last_prio); + last_prio = p->priority; + + GEM_BUG_ON(!p->used); + for (i = 0; i < ARRAY_SIZE(p->requests); i++) { + if (list_empty(&p->requests[i])) + continue; + + GEM_BUG_ON(!(p->used & BIT(i))); + } + } +} + +struct list_head * +i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_priolist *p; + struct rb_node **parent, *rb; + bool first = true; + int idx, i; + + lockdep_assert_held(&engine->timeline.lock); + assert_priolists(execlists, INT_MAX); + + /* buckets sorted from highest [in slot 0] to lowest priority */ + idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1; + prio >>= I915_USER_PRIORITY_SHIFT; + if (unlikely(execlists->no_priolist)) + prio = I915_PRIORITY_NORMAL; + +find_priolist: + /* most positive priority is scheduled first, equal priorities fifo */ + rb = NULL; + parent = &execlists->queue.rb_root.rb_node; + while (*parent) { + rb = *parent; + p = to_priolist(rb); + if (prio > p->priority) { + parent = &rb->rb_left; + } else if (prio < p->priority) { + parent = &rb->rb_right; + first = false; + } else { + goto out; + } + } + + if (prio == I915_PRIORITY_NORMAL) { + p = &execlists->default_priolist; + } else { + p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC); + /* Convert an allocation failure to a priority bump */ + if (unlikely(!p)) { + prio = I915_PRIORITY_NORMAL; /* recurses just once */ + + /* To maintain ordering with all rendering, after an + * allocation failure we have to disable all scheduling. + * Requests will then be executed in fifo, and schedule + * will ensure that dependencies are emitted in fifo. + * There will be still some reordering with existing + * requests, so if userspace lied about their + * dependencies that reordering may be visible. + */ + execlists->no_priolist = true; + goto find_priolist; + } + } + + p->priority = prio; + for (i = 0; i < ARRAY_SIZE(p->requests); i++) + INIT_LIST_HEAD(&p->requests[i]); + rb_link_node(&p->node, rb, parent); + rb_insert_color_cached(&p->node, &execlists->queue, first); + p->used = 0; + +out: + p->used |= BIT(idx); + return &p->requests[idx]; +} + +static struct intel_engine_cs * +sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked) +{ + struct intel_engine_cs *engine = node_to_request(node)->engine; + + GEM_BUG_ON(!locked); + + if (engine != locked) { + spin_unlock(&locked->timeline.lock); + spin_lock(&engine->timeline.lock); + } + + return engine; +} + +static void __i915_schedule(struct i915_request *rq, + const struct i915_sched_attr *attr) +{ + struct list_head *uninitialized_var(pl); + struct intel_engine_cs *engine, *last; + struct i915_dependency *dep, *p; + struct i915_dependency stack; + const int prio = attr->priority; + LIST_HEAD(dfs); + + /* Needed in order to use the temporary link inside i915_dependency */ + lockdep_assert_held(&schedule_lock); + GEM_BUG_ON(prio == I915_PRIORITY_INVALID); + + if (i915_request_completed(rq)) + return; + + if (prio <= READ_ONCE(rq->sched.attr.priority)) + return; + + stack.signaler = &rq->sched; + list_add(&stack.dfs_link, &dfs); + + /* + * Recursively bump all dependent priorities to match the new request. + * + * A naive approach would be to use recursion: + * static void update_priorities(struct i915_sched_node *node, prio) { + * list_for_each_entry(dep, &node->signalers_list, signal_link) + * update_priorities(dep->signal, prio) + * queue_request(node); + * } + * but that may have unlimited recursion depth and so runs a very + * real risk of overunning the kernel stack. Instead, we build + * a flat list of all dependencies starting with the current request. + * As we walk the list of dependencies, we add all of its dependencies + * to the end of the list (this may include an already visited + * request) and continue to walk onwards onto the new dependencies. The + * end result is a topological list of requests in reverse order, the + * last element in the list is the request we must execute first. + */ + list_for_each_entry(dep, &dfs, dfs_link) { + struct i915_sched_node *node = dep->signaler; + + /* + * Within an engine, there can be no cycle, but we may + * refer to the same dependency chain multiple times + * (redundant dependencies are not eliminated) and across + * engines. + */ + list_for_each_entry(p, &node->signalers_list, signal_link) { + GEM_BUG_ON(p == dep); /* no cycles! */ + + if (node_signaled(p->signaler)) + continue; + + GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority); + if (prio > READ_ONCE(p->signaler->attr.priority)) + list_move_tail(&p->dfs_link, &dfs); + } + } + + /* + * If we didn't need to bump any existing priorities, and we haven't + * yet submitted this request (i.e. there is no potential race with + * execlists_submit_request()), we can set our own priority and skip + * acquiring the engine locks. + */ + if (rq->sched.attr.priority == I915_PRIORITY_INVALID) { + GEM_BUG_ON(!list_empty(&rq->sched.link)); + rq->sched.attr = *attr; + + if (stack.dfs_link.next == stack.dfs_link.prev) + return; + + __list_del_entry(&stack.dfs_link); + } + + last = NULL; + engine = rq->engine; + spin_lock_irq(&engine->timeline.lock); + + /* Fifo and depth-first replacement ensure our deps execute before us */ + list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { + struct i915_sched_node *node = dep->signaler; + + INIT_LIST_HEAD(&dep->dfs_link); + + engine = sched_lock_engine(node, engine); + + /* Recheck after acquiring the engine->timeline.lock */ + if (prio <= node->attr.priority || node_signaled(node)) + continue; + + node->attr.priority = prio; + if (!list_empty(&node->link)) { + if (last != engine) { + pl = i915_sched_lookup_priolist(engine, prio); + last = engine; + } + list_move_tail(&node->link, pl); + } else { + /* + * If the request is not in the priolist queue because + * it is not yet runnable, then it doesn't contribute + * to our preemption decisions. On the other hand, + * if the request is on the HW, it too is not in the + * queue; but in that case we may still need to reorder + * the inflight requests. + */ + if (!i915_sw_fence_done(&node_to_request(node)->submit)) + continue; + } + + if (prio <= engine->execlists.queue_priority) + continue; + + /* + * If we are already the currently executing context, don't + * bother evaluating if we should preempt ourselves. + */ + if (node_to_request(node)->global_seqno && + i915_seqno_passed(port_request(engine->execlists.port)->global_seqno, + node_to_request(node)->global_seqno)) + continue; + + /* Defer (tasklet) submission until after all of our updates. */ + engine->execlists.queue_priority = prio; + tasklet_hi_schedule(&engine->execlists.tasklet); + } + + spin_unlock_irq(&engine->timeline.lock); +} + +void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr) +{ + spin_lock(&schedule_lock); + __i915_schedule(rq, attr); + spin_unlock(&schedule_lock); +} + +void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump) +{ + struct i915_sched_attr attr; + + GEM_BUG_ON(bump & ~I915_PRIORITY_MASK); + + if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID) + return; + + spin_lock_bh(&schedule_lock); + + attr = rq->sched.attr; + attr.priority |= bump; + __i915_schedule(rq, &attr); + + spin_unlock_bh(&schedule_lock); +} diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h index 70a42220358d..dbe9cb7ecd82 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.h +++ b/drivers/gpu/drm/i915/i915_scheduler.h @@ -8,9 +8,14 @@ #define _I915_SCHEDULER_H_ #include <linux/bitops.h> +#include <linux/kernel.h> #include <uapi/drm/i915_drm.h> +struct drm_i915_private; +struct i915_request; +struct intel_engine_cs; + enum { I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1, I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY, @@ -19,6 +24,15 @@ enum { I915_PRIORITY_INVALID = INT_MIN }; +#define I915_USER_PRIORITY_SHIFT 2 +#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT) + +#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT) +#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1) + +#define I915_PRIORITY_WAIT ((u8)BIT(0)) +#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1)) + struct i915_sched_attr { /** * @priority: execution and service priority @@ -69,4 +83,26 @@ struct i915_dependency { #define I915_DEPENDENCY_ALLOC BIT(0) }; +void i915_sched_node_init(struct i915_sched_node *node); + +bool __i915_sched_node_add_dependency(struct i915_sched_node *node, + struct i915_sched_node *signal, + struct i915_dependency *dep, + unsigned long flags); + +int i915_sched_node_add_dependency(struct drm_i915_private *i915, + struct i915_sched_node *node, + struct i915_sched_node *signal); + +void i915_sched_node_fini(struct drm_i915_private *i915, + struct i915_sched_node *node); + +void i915_schedule(struct i915_request *request, + const struct i915_sched_attr *attr); + +void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump); + +struct list_head * +i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio); + #endif /* _I915_SCHEDULER_H_ */ diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 6dbeed079ae5..fc2eeab823b7 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -1,10 +1,7 @@ /* - * (C) Copyright 2016 Intel Corporation + * SPDX-License-Identifier: MIT * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; version 2 - * of the License. + * (C) Copyright 2016 Intel Corporation */ #include <linux/slab.h> diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index fe2ef4dadfc6..0e055ea0179f 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -1,10 +1,9 @@ /* + * SPDX-License-Identifier: MIT + * * i915_sw_fence.h - library routines for N:M synchronisation points * * Copyright (C) 2016 Intel Corporation - * - * This file is released under the GPLv2. - * */ #ifndef _I915_SW_FENCE_H_ diff --git a/drivers/gpu/drm/i915/i915_syncmap.c b/drivers/gpu/drm/i915/i915_syncmap.c index 58f8d0cc125c..60404dbb2e9f 100644 --- a/drivers/gpu/drm/i915/i915_syncmap.c +++ b/drivers/gpu/drm/i915/i915_syncmap.c @@ -92,7 +92,7 @@ void i915_syncmap_init(struct i915_syncmap **root) { BUILD_BUG_ON_NOT_POWER_OF_2(KSYNCMAP); BUILD_BUG_ON_NOT_POWER_OF_2(SHIFT); - BUILD_BUG_ON(KSYNCMAP > BITS_PER_BYTE * sizeof((*root)->bitmap)); + BUILD_BUG_ON(KSYNCMAP > BITS_PER_TYPE((*root)->bitmap)); *root = NULL; } diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index e5e6f6bb2b05..535caebd9813 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -483,7 +483,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr return snprintf(buf, PAGE_SIZE, "%d\n", val); } -static const struct attribute *gen6_attrs[] = { +static const struct attribute * const gen6_attrs[] = { &dev_attr_gt_act_freq_mhz.attr, &dev_attr_gt_cur_freq_mhz.attr, &dev_attr_gt_boost_freq_mhz.attr, @@ -495,7 +495,7 @@ static const struct attribute *gen6_attrs[] = { NULL, }; -static const struct attribute *vlv_attrs[] = { +static const struct attribute * const vlv_attrs[] = { &dev_attr_gt_act_freq_mhz.attr, &dev_attr_gt_cur_freq_mhz.attr, &dev_attr_gt_boost_freq_mhz.attr, @@ -516,26 +516,21 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, { struct device *kdev = kobj_to_dev(kobj); - struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); - struct drm_i915_error_state_buf error_str; + struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); struct i915_gpu_state *gpu; ssize_t ret; - ret = i915_error_state_buf_init(&error_str, dev_priv, count, off); - if (ret) - return ret; - - gpu = i915_first_error_state(dev_priv); - ret = i915_error_state_to_str(&error_str, gpu); - if (ret) - goto out; - - ret = count < error_str.bytes ? count : error_str.bytes; - memcpy(buf, error_str.buf, ret); + gpu = i915_first_error_state(i915); + if (gpu) { + ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count); + i915_gpu_state_put(gpu); + } else { + const char *str = "No error state collected\n"; + size_t len = strlen(str); -out: - i915_gpu_state_put(gpu); - i915_error_state_buf_release(&error_str); + ret = min_t(size_t, count, len - off); + memcpy(buf, str + off, ret); + } return ret; } diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h index a2c2c3ab5fb0..ebd71b487220 100644 --- a/drivers/gpu/drm/i915/i915_timeline.h +++ b/drivers/gpu/drm/i915/i915_timeline.h @@ -83,6 +83,25 @@ void i915_timeline_init(struct drm_i915_private *i915, const char *name); void i915_timeline_fini(struct i915_timeline *tl); +static inline void +i915_timeline_set_subclass(struct i915_timeline *timeline, + unsigned int subclass) +{ + lockdep_set_subclass(&timeline->lock, subclass); + + /* + * Due to an interesting quirk in lockdep's internal debug tracking, + * after setting a subclass we must ensure the lock is used. Otherwise, + * nr_unused_locks is incremented once too often. + */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + local_irq_disable(); + lock_map_acquire(&timeline->lock.dep_map); + lock_map_release(&timeline->lock.dep_map); + local_irq_enable(); +#endif +} + struct i915_timeline * i915_timeline_create(struct drm_i915_private *i915, const char *name); diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 395dd2511568..9726df37c4c4 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -44,16 +44,19 @@ __stringify(x), (long)(x)) #if defined(GCC_VERSION) && GCC_VERSION >= 70000 -#define add_overflows(A, B) \ - __builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0) +#define add_overflows_t(T, A, B) \ + __builtin_add_overflow_p((A), (B), (T)0) #else -#define add_overflows(A, B) ({ \ +#define add_overflows_t(T, A, B) ({ \ typeof(A) a = (A); \ typeof(B) b = (B); \ - a + b < a; \ + (T)(a + b) < a; \ }) #endif +#define add_overflows(A, B) \ + add_overflows_t(typeof((A) + (B)), (A), (B)) + #define range_overflows(start, size, max) ({ \ typeof(start) start__ = (start); \ typeof(size) size__ = (size); \ @@ -68,7 +71,7 @@ /* Note we don't consider signbits :| */ #define overflows_type(x, T) \ - (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE)) + (sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T)) #define ptr_mask_bits(ptr, n) ({ \ unsigned long __v = (unsigned long)(ptr); \ diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 31efc971a3a8..5b4d78cdb4ca 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -305,12 +305,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(vma->size > vma->node.size); - if (GEM_WARN_ON(range_overflows(vma->node.start, - vma->node.size, - vma->vm->total))) + if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, + vma->node.size, + vma->vm->total))) return -ENODEV; - if (GEM_WARN_ON(!flags)) + if (GEM_DEBUG_WARN_ON(!flags)) return -EINVAL; bind_flags = 0; @@ -892,7 +892,7 @@ static void export_fence(struct i915_vma *vma, reservation_object_lock(resv, NULL); if (flags & EXEC_OBJECT_WRITE) reservation_object_add_excl_fence(resv, &rq->fence); - else if (reservation_object_reserve_shared(resv) == 0) + else if (reservation_object_reserve_shared(resv, 1) == 0) reservation_object_add_shared_fence(resv, &rq->fence); reservation_object_unlock(resv); } diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c index 13830e43a4d1..4dd793b78996 100644 --- a/drivers/gpu/drm/i915/icl_dsi.c +++ b/drivers/gpu/drm/i915/icl_dsi.c @@ -25,8 +25,277 @@ * Jani Nikula <jani.nikula@intel.com> */ +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_atomic_helper.h> #include "intel_dsi.h" +static inline int header_credits_available(struct drm_i915_private *dev_priv, + enum transcoder dsi_trans) +{ + return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK) + >> FREE_HEADER_CREDIT_SHIFT; +} + +static inline int payload_credits_available(struct drm_i915_private *dev_priv, + enum transcoder dsi_trans) +{ + return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK) + >> FREE_PLOAD_CREDIT_SHIFT; +} + +static void wait_for_header_credits(struct drm_i915_private *dev_priv, + enum transcoder dsi_trans) +{ + if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >= + MAX_HEADER_CREDIT, 100)) + DRM_ERROR("DSI header credits not released\n"); +} + +static void wait_for_payload_credits(struct drm_i915_private *dev_priv, + enum transcoder dsi_trans) +{ + if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >= + MAX_PLOAD_CREDIT, 100)) + DRM_ERROR("DSI payload credits not released\n"); +} + +static enum transcoder dsi_port_to_transcoder(enum port port) +{ + if (port == PORT_A) + return TRANSCODER_DSI_0; + else + return TRANSCODER_DSI_1; +} + +static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct mipi_dsi_device *dsi; + enum port port; + enum transcoder dsi_trans; + int ret; + + /* wait for header/payload credits to be released */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + wait_for_header_credits(dev_priv, dsi_trans); + wait_for_payload_credits(dev_priv, dsi_trans); + } + + /* send nop DCS command */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi = intel_dsi->dsi_hosts[port]->device; + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + dsi->channel = 0; + ret = mipi_dsi_dcs_nop(dsi); + if (ret < 0) + DRM_ERROR("error sending DCS NOP command\n"); + } + + /* wait for header credits to be released */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + wait_for_header_credits(dev_priv, dsi_trans); + } + + /* wait for LP TX in progress bit to be cleared */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + if (wait_for_us(!(I915_READ(DSI_LP_MSG(dsi_trans)) & + LPTX_IN_PROGRESS), 20)) + DRM_ERROR("LPTX bit not cleared\n"); + } +} + +static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data, + u32 len) +{ + struct intel_dsi *intel_dsi = host->intel_dsi; + struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); + enum transcoder dsi_trans = dsi_port_to_transcoder(host->port); + int free_credits; + int i, j; + + for (i = 0; i < len; i += 4) { + u32 tmp = 0; + + free_credits = payload_credits_available(dev_priv, dsi_trans); + if (free_credits < 1) { + DRM_ERROR("Payload credit not available\n"); + return false; + } + + for (j = 0; j < min_t(u32, len - i, 4); j++) + tmp |= *data++ << 8 * j; + + I915_WRITE(DSI_CMD_TXPYLD(dsi_trans), tmp); + } + + return true; +} + +static int dsi_send_pkt_hdr(struct intel_dsi_host *host, + struct mipi_dsi_packet pkt, bool enable_lpdt) +{ + struct intel_dsi *intel_dsi = host->intel_dsi; + struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); + enum transcoder dsi_trans = dsi_port_to_transcoder(host->port); + u32 tmp; + int free_credits; + + /* check if header credit available */ + free_credits = header_credits_available(dev_priv, dsi_trans); + if (free_credits < 1) { + DRM_ERROR("send pkt header failed, not enough hdr credits\n"); + return -1; + } + + tmp = I915_READ(DSI_CMD_TXHDR(dsi_trans)); + + if (pkt.payload) + tmp |= PAYLOAD_PRESENT; + else + tmp &= ~PAYLOAD_PRESENT; + + tmp &= ~VBLANK_FENCE; + + if (enable_lpdt) + tmp |= LP_DATA_TRANSFER; + + tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK); + tmp |= ((pkt.header[0] & VC_MASK) << VC_SHIFT); + tmp |= ((pkt.header[0] & DT_MASK) << DT_SHIFT); + tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT); + tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT); + I915_WRITE(DSI_CMD_TXHDR(dsi_trans), tmp); + + return 0; +} + +static int dsi_send_pkt_payld(struct intel_dsi_host *host, + struct mipi_dsi_packet pkt) +{ + /* payload queue can accept *256 bytes*, check limit */ + if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) { + DRM_ERROR("payload size exceeds max queue limit\n"); + return -1; + } + + /* load data into command payload queue */ + if (!add_payld_to_queue(host, pkt.payload, + pkt.payload_length)) { + DRM_ERROR("adding payload to queue failed\n"); + return -1; + } + + return 0; +} + +static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + u32 tmp; + int lane; + + for_each_dsi_port(port, intel_dsi->ports) { + + /* + * Program voltage swing and pre-emphasis level values as per + * table in BSPEC under DDI buffer programing + */ + tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK); + tmp |= SCALING_MODE_SEL(0x2); + tmp |= TAP2_DISABLE | TAP3_DISABLE; + tmp |= RTERM_SELECT(0x6); + I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp); + + tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port)); + tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK); + tmp |= SCALING_MODE_SEL(0x2); + tmp |= TAP2_DISABLE | TAP3_DISABLE; + tmp |= RTERM_SELECT(0x6); + I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp); + + tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port)); + tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | + RCOMP_SCALAR_MASK); + tmp |= SWING_SEL_UPPER(0x2); + tmp |= SWING_SEL_LOWER(0x2); + tmp |= RCOMP_SCALAR(0x98); + I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp); + + tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port)); + tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | + RCOMP_SCALAR_MASK); + tmp |= SWING_SEL_UPPER(0x2); + tmp |= SWING_SEL_LOWER(0x2); + tmp |= RCOMP_SCALAR(0x98); + I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp); + + tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port)); + tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | + CURSOR_COEFF_MASK); + tmp |= POST_CURSOR_1(0x0); + tmp |= POST_CURSOR_2(0x0); + tmp |= CURSOR_COEFF(0x3f); + I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp); + + for (lane = 0; lane <= 3; lane++) { + /* Bspec: must not use GRP register for write */ + tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane)); + tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | + CURSOR_COEFF_MASK); + tmp |= POST_CURSOR_1(0x0); + tmp |= POST_CURSOR_2(0x0); + tmp |= CURSOR_COEFF(0x3f); + I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp); + } + } +} + +static void configure_dual_link_mode(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + u32 dss_ctl1; + + dss_ctl1 = I915_READ(DSS_CTL1); + dss_ctl1 |= SPLITTER_ENABLE; + dss_ctl1 &= ~OVERLAP_PIXELS_MASK; + dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap); + + if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { + const struct drm_display_mode *adjusted_mode = + &pipe_config->base.adjusted_mode; + u32 dss_ctl2; + u16 hactive = adjusted_mode->crtc_hdisplay; + u16 dl_buffer_depth; + + dss_ctl1 &= ~DUAL_LINK_MODE_INTERLEAVE; + dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap; + + if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH) + DRM_ERROR("DL buffer depth exceed max value\n"); + + dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK; + dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth); + dss_ctl2 = I915_READ(DSS_CTL2); + dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK; + dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth); + I915_WRITE(DSS_CTL2, dss_ctl2); + } else { + /* Interleave */ + dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE; + } + + I915_WRITE(DSS_CTL1, dss_ctl1); +} + static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -105,23 +374,1079 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) } } -static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder) +static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + u32 tmp; + int lane; + + /* Step 4b(i) set loadgen select for transmit and aux lanes */ + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port)); + tmp &= ~LOADGEN_SELECT; + I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp); + for (lane = 0; lane <= 3; lane++) { + tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane)); + tmp &= ~LOADGEN_SELECT; + if (lane != 2) + tmp |= LOADGEN_SELECT; + I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp); + } + } + + /* Step 4b(ii) set latency optimization for transmit and aux lanes */ + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port)); + tmp &= ~FRC_LATENCY_OPTIM_MASK; + tmp |= FRC_LATENCY_OPTIM_VAL(0x5); + I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp); + tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port)); + tmp &= ~FRC_LATENCY_OPTIM_MASK; + tmp |= FRC_LATENCY_OPTIM_VAL(0x5); + I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp); + } + +} + +static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + u32 tmp; + enum port port; + + /* clear common keeper enable bit */ + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port)); + tmp &= ~COMMON_KEEPER_EN; + I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp); + tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port)); + tmp &= ~COMMON_KEEPER_EN; + I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp); + } + + /* + * Set SUS Clock Config bitfield to 11b + * Note: loadgen select program is done + * as part of lane phy sequence configuration + */ + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_PORT_CL_DW5(port)); + tmp |= SUS_CLOCK_CONFIG; + I915_WRITE(ICL_PORT_CL_DW5(port), tmp); + } + + /* Clear training enable to change swing values */ + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + tmp &= ~TX_TRAINING_EN; + I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp); + tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port)); + tmp &= ~TX_TRAINING_EN; + I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp); + } + + /* Program swing and de-emphasis */ + dsi_program_swing_and_deemphasis(encoder); + + /* Set training enable to trigger update */ + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + tmp |= TX_TRAINING_EN; + I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp); + tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port)); + tmp |= TX_TRAINING_EN; + I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp); + } +} + +static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + u32 tmp; + enum port port; + + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(DDI_BUF_CTL(port)); + tmp |= DDI_BUF_CTL_ENABLE; + I915_WRITE(DDI_BUF_CTL(port), tmp); + + if (wait_for_us(!(I915_READ(DDI_BUF_CTL(port)) & + DDI_BUF_IS_IDLE), + 500)) + DRM_ERROR("DDI port:%c buffer idle\n", port_name(port)); + } +} + +static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + u32 tmp; + enum port port; + + /* Program T-INIT master registers */ + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_DSI_T_INIT_MASTER(port)); + tmp &= ~MASTER_INIT_TIMER_MASK; + tmp |= intel_dsi->init_count; + I915_WRITE(ICL_DSI_T_INIT_MASTER(port), tmp); + } + + /* Program DPHY clock lanes timings */ + for_each_dsi_port(port, intel_dsi->ports) { + I915_WRITE(DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg); + + /* shadow register inside display core */ + I915_WRITE(DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg); + } + + /* Program DPHY data lanes timings */ + for_each_dsi_port(port, intel_dsi->ports) { + I915_WRITE(DPHY_DATA_TIMING_PARAM(port), + intel_dsi->dphy_data_lane_reg); + + /* shadow register inside display core */ + I915_WRITE(DSI_DATA_TIMING_PARAM(port), + intel_dsi->dphy_data_lane_reg); + } + + /* + * If DSI link operating at or below an 800 MHz, + * TA_SURE should be override and programmed to + * a value '0' inside TA_PARAM_REGISTERS otherwise + * leave all fields at HW default values. + */ + if (intel_dsi_bitrate(intel_dsi) <= 800000) { + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(DPHY_TA_TIMING_PARAM(port)); + tmp &= ~TA_SURE_MASK; + tmp |= TA_SURE_OVERRIDE | TA_SURE(0); + I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp); + + /* shadow register inside display core */ + tmp = I915_READ(DSI_TA_TIMING_PARAM(port)); + tmp &= ~TA_SURE_MASK; + tmp |= TA_SURE_OVERRIDE | TA_SURE(0); + I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp); + } + } +} + +static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + u32 tmp; + enum port port; + + mutex_lock(&dev_priv->dpll_lock); + tmp = I915_READ(DPCLKA_CFGCR0_ICL); + for_each_dsi_port(port, intel_dsi->ports) { + tmp |= DPCLKA_CFGCR0_DDI_CLK_OFF(port); + } + + I915_WRITE(DPCLKA_CFGCR0_ICL, tmp); + mutex_unlock(&dev_priv->dpll_lock); +} + +static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + u32 tmp; + enum port port; + + mutex_lock(&dev_priv->dpll_lock); + tmp = I915_READ(DPCLKA_CFGCR0_ICL); + for_each_dsi_port(port, intel_dsi->ports) { + tmp &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port); + } + + I915_WRITE(DPCLKA_CFGCR0_ICL, tmp); + mutex_unlock(&dev_priv->dpll_lock); +} + +static void gen11_dsi_map_pll(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_shared_dpll *pll = crtc_state->shared_dpll; + enum port port; + u32 val; + + mutex_lock(&dev_priv->dpll_lock); + + val = I915_READ(DPCLKA_CFGCR0_ICL); + for_each_dsi_port(port, intel_dsi->ports) { + val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); + val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); + } + I915_WRITE(DPCLKA_CFGCR0_ICL, val); + POSTING_READ(DPCLKA_CFGCR0_ICL); + + mutex_unlock(&dev_priv->dpll_lock); +} + +static void +gen11_dsi_configure_transcoder(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); + enum pipe pipe = intel_crtc->pipe; + u32 tmp; + enum port port; + enum transcoder dsi_trans; + + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + tmp = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)); + + if (intel_dsi->eotp_pkt) + tmp &= ~EOTP_DISABLED; + else + tmp |= EOTP_DISABLED; + + /* enable link calibration if freq > 1.5Gbps */ + if (intel_dsi_bitrate(intel_dsi) >= 1500 * 1000) { + tmp &= ~LINK_CALIBRATION_MASK; + tmp |= CALIBRATION_ENABLED_INITIAL_ONLY; + } + + /* configure continuous clock */ + tmp &= ~CONTINUOUS_CLK_MASK; + if (intel_dsi->clock_stop) + tmp |= CLK_ENTER_LP_AFTER_DATA; + else + tmp |= CLK_HS_CONTINUOUS; + + /* configure buffer threshold limit to minimum */ + tmp &= ~PIX_BUF_THRESHOLD_MASK; + tmp |= PIX_BUF_THRESHOLD_1_4; + + /* set virtual channel to '0' */ + tmp &= ~PIX_VIRT_CHAN_MASK; + tmp |= PIX_VIRT_CHAN(0); + + /* program BGR transmission */ + if (intel_dsi->bgr_enabled) + tmp |= BGR_TRANSMISSION; + + /* select pixel format */ + tmp &= ~PIX_FMT_MASK; + switch (intel_dsi->pixel_format) { + default: + MISSING_CASE(intel_dsi->pixel_format); + /* fallthrough */ + case MIPI_DSI_FMT_RGB565: + tmp |= PIX_FMT_RGB565; + break; + case MIPI_DSI_FMT_RGB666_PACKED: + tmp |= PIX_FMT_RGB666_PACKED; + break; + case MIPI_DSI_FMT_RGB666: + tmp |= PIX_FMT_RGB666_LOOSE; + break; + case MIPI_DSI_FMT_RGB888: + tmp |= PIX_FMT_RGB888; + break; + } + + /* program DSI operation mode */ + if (is_vid_mode(intel_dsi)) { + tmp &= ~OP_MODE_MASK; + switch (intel_dsi->video_mode_format) { + default: + MISSING_CASE(intel_dsi->video_mode_format); + /* fallthrough */ + case VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS: + tmp |= VIDEO_MODE_SYNC_EVENT; + break; + case VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE: + tmp |= VIDEO_MODE_SYNC_PULSE; + break; + } + } + + I915_WRITE(DSI_TRANS_FUNC_CONF(dsi_trans), tmp); + } + + /* enable port sync mode if dual link */ + if (intel_dsi->dual_link) { + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans)); + tmp |= PORT_SYNC_MODE_ENABLE; + I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp); + } + + /* configure stream splitting */ + configure_dual_link_mode(encoder, pipe_config); + } + + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + + /* select data lane width */ + tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans)); + tmp &= ~DDI_PORT_WIDTH_MASK; + tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count); + + /* select input pipe */ + tmp &= ~TRANS_DDI_EDP_INPUT_MASK; + switch (pipe) { + default: + MISSING_CASE(pipe); + /* fallthrough */ + case PIPE_A: + tmp |= TRANS_DDI_EDP_INPUT_A_ON; + break; + case PIPE_B: + tmp |= TRANS_DDI_EDP_INPUT_B_ONOFF; + break; + case PIPE_C: + tmp |= TRANS_DDI_EDP_INPUT_C_ONOFF; + break; + } + + /* enable DDI buffer */ + tmp |= TRANS_DDI_FUNC_ENABLE; + I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp); + } + + /* wait for link ready */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + if (wait_for_us((I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)) & + LINK_READY), 2500)) + DRM_ERROR("DSI link not ready\n"); + } +} + +static void +gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + const struct drm_display_mode *adjusted_mode = + &pipe_config->base.adjusted_mode; + enum port port; + enum transcoder dsi_trans; + /* horizontal timings */ + u16 htotal, hactive, hsync_start, hsync_end, hsync_size; + u16 hfront_porch, hback_porch; + /* vertical timings */ + u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift; + + hactive = adjusted_mode->crtc_hdisplay; + htotal = adjusted_mode->crtc_htotal; + hsync_start = adjusted_mode->crtc_hsync_start; + hsync_end = adjusted_mode->crtc_hsync_end; + hsync_size = hsync_end - hsync_start; + hfront_porch = (adjusted_mode->crtc_hsync_start - + adjusted_mode->crtc_hdisplay); + hback_porch = (adjusted_mode->crtc_htotal - + adjusted_mode->crtc_hsync_end); + vactive = adjusted_mode->crtc_vdisplay; + vtotal = adjusted_mode->crtc_vtotal; + vsync_start = adjusted_mode->crtc_vsync_start; + vsync_end = adjusted_mode->crtc_vsync_end; + vsync_shift = hsync_start - htotal / 2; + + if (intel_dsi->dual_link) { + hactive /= 2; + if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) + hactive += intel_dsi->pixel_overlap; + htotal /= 2; + } + + /* minimum hactive as per bspec: 256 pixels */ + if (adjusted_mode->crtc_hdisplay < 256) + DRM_ERROR("hactive is less then 256 pixels\n"); + + /* if RGB666 format, then hactive must be multiple of 4 pixels */ + if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0) + DRM_ERROR("hactive pixels are not multiple of 4\n"); + + /* program TRANS_HTOTAL register */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + I915_WRITE(HTOTAL(dsi_trans), + (hactive - 1) | ((htotal - 1) << 16)); + } + + /* TRANS_HSYNC register to be programmed only for video mode */ + if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) { + if (intel_dsi->video_mode_format == + VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) { + /* BSPEC: hsync size should be atleast 16 pixels */ + if (hsync_size < 16) + DRM_ERROR("hsync size < 16 pixels\n"); + } + + if (hback_porch < 16) + DRM_ERROR("hback porch < 16 pixels\n"); + + if (intel_dsi->dual_link) { + hsync_start /= 2; + hsync_end /= 2; + } + + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + I915_WRITE(HSYNC(dsi_trans), + (hsync_start - 1) | ((hsync_end - 1) << 16)); + } + } + + /* program TRANS_VTOTAL register */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + /* + * FIXME: Programing this by assuming progressive mode, since + * non-interlaced info from VBT is not saved inside + * struct drm_display_mode. + * For interlace mode: program required pixel minus 2 + */ + I915_WRITE(VTOTAL(dsi_trans), + (vactive - 1) | ((vtotal - 1) << 16)); + } + + if (vsync_end < vsync_start || vsync_end > vtotal) + DRM_ERROR("Invalid vsync_end value\n"); + + if (vsync_start < vactive) + DRM_ERROR("vsync_start less than vactive\n"); + + /* program TRANS_VSYNC register */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + I915_WRITE(VSYNC(dsi_trans), + (vsync_start - 1) | ((vsync_end - 1) << 16)); + } + + /* + * FIXME: It has to be programmed only for interlaced + * modes. Put the check condition here once interlaced + * info available as described above. + * program TRANS_VSYNCSHIFT register + */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift); + } +} + +static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + enum transcoder dsi_trans; + u32 tmp; + + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + tmp = I915_READ(PIPECONF(dsi_trans)); + tmp |= PIPECONF_ENABLE; + I915_WRITE(PIPECONF(dsi_trans), tmp); + + /* wait for transcoder to be enabled */ + if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans), + I965_PIPECONF_ACTIVE, + I965_PIPECONF_ACTIVE, 10)) + DRM_ERROR("DSI transcoder not enabled\n"); + } +} + +static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + enum transcoder dsi_trans; + u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul; + + /* + * escape clock count calculation: + * BYTE_CLK_COUNT = TIME_NS/(8 * UI) + * UI (nsec) = (10^6)/Bitrate + * TIME_NS = (BYTE_CLK_COUNT * 8 * 10^6)/ Bitrate + * ESCAPE_CLK_COUNT = TIME_NS/ESC_CLK_NS + */ + divisor = intel_dsi_tlpx_ns(intel_dsi) * intel_dsi_bitrate(intel_dsi) * 1000; + mul = 8 * 1000000; + hs_tx_timeout = DIV_ROUND_UP(intel_dsi->hs_tx_timeout * mul, + divisor); + lp_rx_timeout = DIV_ROUND_UP(intel_dsi->lp_rx_timeout * mul, divisor); + ta_timeout = DIV_ROUND_UP(intel_dsi->turn_arnd_val * mul, divisor); + + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + + /* program hst_tx_timeout */ + tmp = I915_READ(DSI_HSTX_TO(dsi_trans)); + tmp &= ~HSTX_TIMEOUT_VALUE_MASK; + tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout); + I915_WRITE(DSI_HSTX_TO(dsi_trans), tmp); + + /* FIXME: DSI_CALIB_TO */ + + /* program lp_rx_host timeout */ + tmp = I915_READ(DSI_LPRX_HOST_TO(dsi_trans)); + tmp &= ~LPRX_TIMEOUT_VALUE_MASK; + tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout); + I915_WRITE(DSI_LPRX_HOST_TO(dsi_trans), tmp); + + /* FIXME: DSI_PWAIT_TO */ + + /* program turn around timeout */ + tmp = I915_READ(DSI_TA_TO(dsi_trans)); + tmp &= ~TA_TIMEOUT_VALUE_MASK; + tmp |= TA_TIMEOUT_VALUE(ta_timeout); + I915_WRITE(DSI_TA_TO(dsi_trans), tmp); + } +} + +static void +gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config) { /* step 4a: power up all lanes of the DDI used by DSI */ gen11_dsi_power_up_lanes(encoder); + + /* step 4b: configure lane sequencing of the Combo-PHY transmitters */ + gen11_dsi_config_phy_lanes_sequence(encoder); + + /* step 4c: configure voltage swing and skew */ + gen11_dsi_voltage_swing_program_seq(encoder); + + /* enable DDI buffer */ + gen11_dsi_enable_ddi_buffer(encoder); + + /* setup D-PHY timings */ + gen11_dsi_setup_dphy_timings(encoder); + + /* step 4h: setup DSI protocol timeouts */ + gen11_dsi_setup_timeouts(encoder); + + /* Step (4h, 4i, 4j, 4k): Configure transcoder */ + gen11_dsi_configure_transcoder(encoder, pipe_config); + + /* Step 4l: Gate DDI clocks */ + gen11_dsi_gate_clocks(encoder); } -static void __attribute__((unused)) -gen11_dsi_pre_enable(struct intel_encoder *encoder, - const struct intel_crtc_state *pipe_config, - const struct drm_connector_state *conn_state) +static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct mipi_dsi_device *dsi; + enum port port; + enum transcoder dsi_trans; + u32 tmp; + int ret; + + /* set maximum return packet size */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + + /* + * FIXME: This uses the number of DW's currently in the payload + * receive queue. This is probably not what we want here. + */ + tmp = I915_READ(DSI_CMD_RXCTL(dsi_trans)); + tmp &= NUMBER_RX_PLOAD_DW_MASK; + /* multiply "Number Rx Payload DW" by 4 to get max value */ + tmp = tmp * 4; + dsi = intel_dsi->dsi_hosts[port]->device; + ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp); + if (ret < 0) + DRM_ERROR("error setting max return pkt size%d\n", tmp); + } + + /* panel power on related mipi dsi vbt sequences */ + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON); + intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay); + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET); + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP); + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON); + + /* ensure all panel commands dispatched before enabling transcoder */ + wait_for_cmds_dispatched_to_panel(encoder); +} + +static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { /* step2: enable IO power */ gen11_dsi_enable_io_power(encoder); /* step3: enable DSI PLL */ gen11_dsi_program_esc_clk_div(encoder); +} + +static void gen11_dsi_pre_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) +{ + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + + /* step3b */ + gen11_dsi_map_pll(encoder, pipe_config); /* step4: enable DSI port and DPHY */ - gen11_dsi_enable_port_and_phy(encoder); + gen11_dsi_enable_port_and_phy(encoder, pipe_config); + + /* step5: program and powerup panel */ + gen11_dsi_powerup_panel(encoder); + + /* step6c: configure transcoder timings */ + gen11_dsi_set_transcoder_timings(encoder, pipe_config); + + /* step6d: enable dsi transcoder */ + gen11_dsi_enable_transcoder(encoder); + + /* step7: enable backlight */ + intel_panel_enable_backlight(pipe_config, conn_state); + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON); +} + +static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + enum transcoder dsi_trans; + u32 tmp; + + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + + /* disable transcoder */ + tmp = I915_READ(PIPECONF(dsi_trans)); + tmp &= ~PIPECONF_ENABLE; + I915_WRITE(PIPECONF(dsi_trans), tmp); + + /* wait for transcoder to be disabled */ + if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans), + I965_PIPECONF_ACTIVE, 0, 50)) + DRM_ERROR("DSI trancoder not disabled\n"); + } +} + +static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder) +{ + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF); + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET); + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF); + + /* ensure cmds dispatched to panel */ + wait_for_cmds_dispatched_to_panel(encoder); +} + +static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + enum transcoder dsi_trans; + u32 tmp; + + /* put dsi link in ULPS */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + tmp = I915_READ(DSI_LP_MSG(dsi_trans)); + tmp |= LINK_ENTER_ULPS; + tmp &= ~LINK_ULPS_TYPE_LP11; + I915_WRITE(DSI_LP_MSG(dsi_trans), tmp); + + if (wait_for_us((I915_READ(DSI_LP_MSG(dsi_trans)) & + LINK_IN_ULPS), + 10)) + DRM_ERROR("DSI link not in ULPS\n"); + } + + /* disable ddi function */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans)); + tmp &= ~TRANS_DDI_FUNC_ENABLE; + I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp); + } + + /* disable port sync mode if dual link */ + if (intel_dsi->dual_link) { + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans)); + tmp &= ~PORT_SYNC_MODE_ENABLE; + I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp); + } + } +} + +static void gen11_dsi_disable_port(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + u32 tmp; + enum port port; + + gen11_dsi_ungate_clocks(encoder); + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(DDI_BUF_CTL(port)); + tmp &= ~DDI_BUF_CTL_ENABLE; + I915_WRITE(DDI_BUF_CTL(port), tmp); + + if (wait_for_us((I915_READ(DDI_BUF_CTL(port)) & + DDI_BUF_IS_IDLE), + 8)) + DRM_ERROR("DDI port:%c buffer not idle\n", + port_name(port)); + } + gen11_dsi_ungate_clocks(encoder); +} + +static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + u32 tmp; + + intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_A_IO); + + if (intel_dsi->dual_link) + intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_B_IO); + + /* set mode to DDI */ + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_DSI_IO_MODECTL(port)); + tmp &= ~COMBO_PHY_MODE_DSI; + I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp); + } +} + +static void gen11_dsi_disable(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + + /* step1: turn off backlight */ + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF); + intel_panel_disable_backlight(old_conn_state); + + /* step2d,e: disable transcoder and wait */ + gen11_dsi_disable_transcoder(encoder); + + /* step2f,g: powerdown panel */ + gen11_dsi_powerdown_panel(encoder); + + /* step2h,i,j: deconfig trancoder */ + gen11_dsi_deconfigure_trancoder(encoder); + + /* step3: disable port */ + gen11_dsi_disable_port(encoder); + + /* step4: disable IO power */ + gen11_dsi_disable_io_power(encoder); +} + +static void gen11_dsi_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + u32 pll_id; + + /* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */ + pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll); + pipe_config->port_clock = cnl_calc_wrpll_link(dev_priv, pll_id); + pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk; + pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); +} + +static bool gen11_dsi_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) +{ + struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, + base); + struct intel_connector *intel_connector = intel_dsi->attached_connector; + struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + const struct drm_display_mode *fixed_mode = + intel_connector->panel.fixed_mode; + struct drm_display_mode *adjusted_mode = + &pipe_config->base.adjusted_mode; + + intel_fixed_panel_mode(fixed_mode, adjusted_mode); + intel_pch_panel_fitting(crtc, pipe_config, conn_state->scaling_mode); + + adjusted_mode->flags = 0; + + /* Dual link goes to trancoder DSI'0' */ + if (intel_dsi->ports == BIT(PORT_B)) + pipe_config->cpu_transcoder = TRANSCODER_DSI_1; + else + pipe_config->cpu_transcoder = TRANSCODER_DSI_0; + + pipe_config->clock_set = true; + pipe_config->port_clock = intel_dsi_bitrate(intel_dsi) / 5; + + return true; +} + +static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + u64 domains = 0; + enum port port; + + for_each_dsi_port(port, intel_dsi->ports) + if (port == PORT_A) + domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO); + else + domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO); + + return domains; +} + +static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + u32 tmp; + enum port port; + enum transcoder dsi_trans; + bool ret = false; + + if (!intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain)) + return false; + + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans)); + switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { + case TRANS_DDI_EDP_INPUT_A_ON: + *pipe = PIPE_A; + break; + case TRANS_DDI_EDP_INPUT_B_ONOFF: + *pipe = PIPE_B; + break; + case TRANS_DDI_EDP_INPUT_C_ONOFF: + *pipe = PIPE_C; + break; + default: + DRM_ERROR("Invalid PIPE input\n"); + goto out; + } + + tmp = I915_READ(PIPECONF(dsi_trans)); + ret = tmp & PIPECONF_ENABLE; + } +out: + intel_display_power_put(dev_priv, encoder->power_domain); + return ret; +} + +static void gen11_dsi_encoder_destroy(struct drm_encoder *encoder) +{ + intel_encoder_destroy(encoder); +} + +static const struct drm_encoder_funcs gen11_dsi_encoder_funcs = { + .destroy = gen11_dsi_encoder_destroy, +}; + +static const struct drm_connector_funcs gen11_dsi_connector_funcs = { + .late_register = intel_connector_register, + .early_unregister = intel_connector_unregister, + .destroy = intel_connector_destroy, + .fill_modes = drm_helper_probe_single_connector_modes, + .atomic_get_property = intel_digital_connector_atomic_get_property, + .atomic_set_property = intel_digital_connector_atomic_set_property, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = intel_digital_connector_duplicate_state, +}; + +static const struct drm_connector_helper_funcs gen11_dsi_connector_helper_funcs = { + .get_modes = intel_dsi_get_modes, + .mode_valid = intel_dsi_mode_valid, + .atomic_check = intel_digital_connector_atomic_check, +}; + +static int gen11_dsi_host_attach(struct mipi_dsi_host *host, + struct mipi_dsi_device *dsi) +{ + return 0; +} + +static int gen11_dsi_host_detach(struct mipi_dsi_host *host, + struct mipi_dsi_device *dsi) +{ + return 0; +} + +static ssize_t gen11_dsi_host_transfer(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host); + struct mipi_dsi_packet dsi_pkt; + ssize_t ret; + bool enable_lpdt = false; + + ret = mipi_dsi_create_packet(&dsi_pkt, msg); + if (ret < 0) + return ret; + + if (msg->flags & MIPI_DSI_MSG_USE_LPM) + enable_lpdt = true; + + /* send packet header */ + ret = dsi_send_pkt_hdr(intel_dsi_host, dsi_pkt, enable_lpdt); + if (ret < 0) + return ret; + + /* only long packet contains payload */ + if (mipi_dsi_packet_format_is_long(msg->type)) { + ret = dsi_send_pkt_payld(intel_dsi_host, dsi_pkt); + if (ret < 0) + return ret; + } + + //TODO: add payload receive code if needed + + ret = sizeof(dsi_pkt.header) + dsi_pkt.payload_length; + + return ret; +} + +static const struct mipi_dsi_host_ops gen11_dsi_host_ops = { + .attach = gen11_dsi_host_attach, + .detach = gen11_dsi_host_detach, + .transfer = gen11_dsi_host_transfer, +}; + +void icl_dsi_init(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = &dev_priv->drm; + struct intel_dsi *intel_dsi; + struct intel_encoder *encoder; + struct intel_connector *intel_connector; + struct drm_connector *connector; + struct drm_display_mode *scan, *fixed_mode = NULL; + enum port port; + + if (!intel_bios_is_dsi_present(dev_priv, &port)) + return; + + intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); + if (!intel_dsi) + return; + + intel_connector = intel_connector_alloc(); + if (!intel_connector) { + kfree(intel_dsi); + return; + } + + encoder = &intel_dsi->base; + intel_dsi->attached_connector = intel_connector; + connector = &intel_connector->base; + + /* register DSI encoder with DRM subsystem */ + drm_encoder_init(dev, &encoder->base, &gen11_dsi_encoder_funcs, + DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port)); + + encoder->pre_pll_enable = gen11_dsi_pre_pll_enable; + encoder->pre_enable = gen11_dsi_pre_enable; + encoder->disable = gen11_dsi_disable; + encoder->port = port; + encoder->get_config = gen11_dsi_get_config; + encoder->compute_config = gen11_dsi_compute_config; + encoder->get_hw_state = gen11_dsi_get_hw_state; + encoder->type = INTEL_OUTPUT_DSI; + encoder->cloneable = 0; + encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C); + encoder->power_domain = POWER_DOMAIN_PORT_DSI; + encoder->get_power_domains = gen11_dsi_get_power_domains; + + /* register DSI connector with DRM subsystem */ + drm_connector_init(dev, connector, &gen11_dsi_connector_funcs, + DRM_MODE_CONNECTOR_DSI); + drm_connector_helper_add(connector, &gen11_dsi_connector_helper_funcs); + connector->display_info.subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + intel_connector->get_hw_state = intel_connector_get_hw_state; + + /* attach connector to encoder */ + intel_connector_attach_encoder(intel_connector, encoder); + + /* fill mode info from VBT */ + mutex_lock(&dev->mode_config.mutex); + intel_dsi_vbt_get_modes(intel_dsi); + list_for_each_entry(scan, &connector->probed_modes, head) { + if (scan->type & DRM_MODE_TYPE_PREFERRED) { + fixed_mode = drm_mode_duplicate(dev, scan); + break; + } + } + mutex_unlock(&dev->mode_config.mutex); + + if (!fixed_mode) { + DRM_ERROR("DSI fixed mode info missing\n"); + goto err; + } + + connector->display_info.width_mm = fixed_mode->width_mm; + connector->display_info.height_mm = fixed_mode->height_mm; + intel_panel_init(&intel_connector->panel, fixed_mode, NULL); + intel_panel_setup_backlight(connector, INVALID_PIPE); + + + if (dev_priv->vbt.dsi.config->dual_link) + intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B); + else + intel_dsi->ports = BIT(port); + + intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports; + intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports; + + for_each_dsi_port(port, intel_dsi->ports) { + struct intel_dsi_host *host; + + host = intel_dsi_host_init(intel_dsi, &gen11_dsi_host_ops, port); + if (!host) + goto err; + + intel_dsi->dsi_hosts[port] = host; + } + + if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) { + DRM_DEBUG_KMS("no device found\n"); + goto err; + } + + return; + +err: + drm_encoder_cleanup(&encoder->base); + kfree(intel_dsi); + kfree(intel_connector); } diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index b04952bacf77..8cb02f28d30c 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -184,6 +184,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) crtc_state->fifo_changed = false; crtc_state->wm.need_postvbl_update = false; crtc_state->fb_bits = 0; + crtc_state->update_planes = 0; return &crtc_state->base; } @@ -203,6 +204,72 @@ intel_crtc_destroy_state(struct drm_crtc *crtc, drm_atomic_helper_crtc_destroy_state(crtc, state); } +static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state, + int num_scalers_need, struct intel_crtc *intel_crtc, + const char *name, int idx, + struct intel_plane_state *plane_state, + int *scaler_id) +{ + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); + int j; + u32 mode; + + if (*scaler_id < 0) { + /* find a free scaler */ + for (j = 0; j < intel_crtc->num_scalers; j++) { + if (scaler_state->scalers[j].in_use) + continue; + + *scaler_id = j; + scaler_state->scalers[*scaler_id].in_use = 1; + break; + } + } + + if (WARN(*scaler_id < 0, "Cannot find scaler for %s:%d\n", name, idx)) + return; + + /* set scaler mode */ + if (plane_state && plane_state->base.fb && + plane_state->base.fb->format->is_yuv && + plane_state->base.fb->format->num_planes > 1) { + if (IS_GEN9(dev_priv) && + !IS_GEMINILAKE(dev_priv)) { + mode = SKL_PS_SCALER_MODE_NV12; + } else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) { + /* + * On gen11+'s HDR planes we only use the scaler for + * scaling. They have a dedicated chroma upsampler, so + * we don't need the scaler to upsample the UV plane. + */ + mode = PS_SCALER_MODE_NORMAL; + } else { + mode = PS_SCALER_MODE_PLANAR; + + if (plane_state->linked_plane) + mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id); + } + } else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) { + mode = PS_SCALER_MODE_NORMAL; + } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) { + /* + * when only 1 scaler is in use on a pipe with 2 scalers + * scaler 0 operates in high quality (HQ) mode. + * In this case use scaler 0 to take advantage of HQ mode + */ + scaler_state->scalers[*scaler_id].in_use = 0; + *scaler_id = 0; + scaler_state->scalers[0].in_use = 1; + mode = SKL_PS_SCALER_MODE_HQ; + } else { + mode = SKL_PS_SCALER_MODE_DYN; + } + + DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n", + intel_crtc->pipe, *scaler_id, name, idx); + scaler_state->scalers[*scaler_id].mode = mode; +} + /** * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests * @dev_priv: i915 device @@ -232,7 +299,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, struct drm_atomic_state *drm_state = crtc_state->base.state; struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state); int num_scalers_need; - int i, j; + int i; num_scalers_need = hweight32(scaler_state->scaler_users); @@ -304,59 +371,17 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, idx = plane->base.id; /* plane on different crtc cannot be a scaler user of this crtc */ - if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) { + if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) continue; - } plane_state = intel_atomic_get_new_plane_state(intel_state, intel_plane); scaler_id = &plane_state->scaler_id; } - if (*scaler_id < 0) { - /* find a free scaler */ - for (j = 0; j < intel_crtc->num_scalers; j++) { - if (!scaler_state->scalers[j].in_use) { - scaler_state->scalers[j].in_use = 1; - *scaler_id = j; - DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n", - intel_crtc->pipe, *scaler_id, name, idx); - break; - } - } - } - - if (WARN_ON(*scaler_id < 0)) { - DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx); - continue; - } - - /* set scaler mode */ - if ((INTEL_GEN(dev_priv) >= 9) && - plane_state && plane_state->base.fb && - plane_state->base.fb->format->format == - DRM_FORMAT_NV12) { - if (INTEL_GEN(dev_priv) == 9 && - !IS_GEMINILAKE(dev_priv) && - !IS_SKYLAKE(dev_priv)) - scaler_state->scalers[*scaler_id].mode = - SKL_PS_SCALER_MODE_NV12; - else - scaler_state->scalers[*scaler_id].mode = - PS_SCALER_MODE_PLANAR; - } else if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) { - /* - * when only 1 scaler is in use on either pipe A or B, - * scaler 0 operates in high quality (HQ) mode. - * In this case use scaler 0 to take advantage of HQ mode - */ - *scaler_id = 0; - scaler_state->scalers[0].in_use = 1; - scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ; - scaler_state->scalers[1].in_use = 0; - } else { - scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN; - } + intel_atomic_setup_scaler(scaler_state, num_scalers_need, + intel_crtc, name, idx, + plane_state, scaler_id); } return 0; diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index aabebe0d2e9b..0a73e6e65c20 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -36,28 +36,31 @@ #include <drm/drm_plane_helper.h> #include "intel_drv.h" -/** - * intel_create_plane_state - create plane state object - * @plane: drm plane - * - * Allocates a fresh plane state for the given plane and sets some of - * the state values to sensible initial values. - * - * Returns: A newly allocated plane state, or NULL on failure - */ -struct intel_plane_state * -intel_create_plane_state(struct drm_plane *plane) +struct intel_plane *intel_plane_alloc(void) { - struct intel_plane_state *state; + struct intel_plane_state *plane_state; + struct intel_plane *plane; - state = kzalloc(sizeof(*state), GFP_KERNEL); - if (!state) - return NULL; + plane = kzalloc(sizeof(*plane), GFP_KERNEL); + if (!plane) + return ERR_PTR(-ENOMEM); - state->base.plane = plane; - state->base.rotation = DRM_MODE_ROTATE_0; + plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL); + if (!plane_state) { + kfree(plane); + return ERR_PTR(-ENOMEM); + } - return state; + __drm_atomic_helper_plane_reset(&plane->base, &plane_state->base); + plane_state->scaler_id = -1; + + return plane; +} + +void intel_plane_free(struct intel_plane *plane) +{ + intel_plane_destroy_state(&plane->base, plane->base.state); + kfree(plane); } /** @@ -117,10 +120,14 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ struct intel_plane *intel_plane = to_intel_plane(plane); int ret; + crtc_state->active_planes &= ~BIT(intel_plane->id); + crtc_state->nv12_planes &= ~BIT(intel_plane->id); + intel_state->base.visible = false; + + /* If this is a cursor plane, no further checks are needed. */ if (!intel_state->base.crtc && !old_plane_state->base.crtc) return 0; - intel_state->base.visible = false; ret = intel_plane->check_plane(crtc_state, intel_state); if (ret) return ret; @@ -128,13 +135,12 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ /* FIXME pre-g4x don't work like this */ if (state->visible) crtc_state->active_planes |= BIT(intel_plane->id); - else - crtc_state->active_planes &= ~BIT(intel_plane->id); if (state->visible && state->fb->format->format == DRM_FORMAT_NV12) crtc_state->nv12_planes |= BIT(intel_plane->id); - else - crtc_state->nv12_planes &= ~BIT(intel_plane->id); + + if (state->visible || old_plane_state->base.visible) + crtc_state->update_planes |= BIT(intel_plane->id); return intel_plane_atomic_calc_changes(old_crtc_state, &crtc_state->base, @@ -152,6 +158,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane, const struct drm_crtc_state *old_crtc_state; struct drm_crtc_state *new_crtc_state; + new_plane_state->visible = false; if (!crtc) return 0; @@ -164,29 +171,123 @@ static int intel_plane_atomic_check(struct drm_plane *plane, to_intel_plane_state(new_plane_state)); } -static void intel_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) +static struct intel_plane * +skl_next_plane_to_commit(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct skl_ddb_entry entries_y[I915_MAX_PLANES], + struct skl_ddb_entry entries_uv[I915_MAX_PLANES], + unsigned int *update_mask) { - struct intel_atomic_state *state = to_intel_atomic_state(old_state->state); - struct intel_plane *intel_plane = to_intel_plane(plane); - const struct intel_plane_state *new_plane_state = - intel_atomic_get_new_plane_state(state, intel_plane); - struct drm_crtc *crtc = new_plane_state->base.crtc ?: old_state->crtc; + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct intel_plane_state *plane_state; + struct intel_plane *plane; + int i; + + if (*update_mask == 0) + return NULL; - if (new_plane_state->base.visible) { - const struct intel_crtc_state *new_crtc_state = - intel_atomic_get_new_crtc_state(state, to_intel_crtc(crtc)); + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + enum plane_id plane_id = plane->id; - trace_intel_update_plane(plane, - to_intel_crtc(crtc)); + if (crtc->pipe != plane->pipe || + !(*update_mask & BIT(plane_id))) + continue; + + if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id], + entries_y, + I915_MAX_PLANES, plane_id) || + skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_uv[plane_id], + entries_uv, + I915_MAX_PLANES, plane_id)) + continue; + + *update_mask &= ~BIT(plane_id); + entries_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id]; + entries_uv[plane_id] = crtc_state->wm.skl.plane_ddb_uv[plane_id]; + + return plane; + } - intel_plane->update_plane(intel_plane, - new_crtc_state, new_plane_state); - } else { - trace_intel_disable_plane(plane, - to_intel_crtc(crtc)); + /* should never happen */ + WARN_ON(1); - intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc)); + return NULL; +} + +void skl_update_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct skl_ddb_entry entries_y[I915_MAX_PLANES]; + struct skl_ddb_entry entries_uv[I915_MAX_PLANES]; + u32 update_mask = new_crtc_state->update_planes; + struct intel_plane *plane; + + memcpy(entries_y, old_crtc_state->wm.skl.plane_ddb_y, + sizeof(old_crtc_state->wm.skl.plane_ddb_y)); + memcpy(entries_uv, old_crtc_state->wm.skl.plane_ddb_uv, + sizeof(old_crtc_state->wm.skl.plane_ddb_uv)); + + while ((plane = skl_next_plane_to_commit(state, crtc, + entries_y, entries_uv, + &update_mask))) { + struct intel_plane_state *new_plane_state = + intel_atomic_get_new_plane_state(state, plane); + + if (new_plane_state->base.visible) { + trace_intel_update_plane(&plane->base, crtc); + plane->update_plane(plane, new_crtc_state, new_plane_state); + } else if (new_plane_state->slave) { + struct intel_plane *master = + new_plane_state->linked_plane; + + /* + * We update the slave plane from this function because + * programming it from the master plane's update_plane + * callback runs into issues when the Y plane is + * reassigned, disabled or used by a different plane. + * + * The slave plane is updated with the master plane's + * plane_state. + */ + new_plane_state = + intel_atomic_get_new_plane_state(state, master); + + trace_intel_update_plane(&plane->base, crtc); + plane->update_slave(plane, new_crtc_state, new_plane_state); + } else { + trace_intel_disable_plane(&plane->base, crtc); + plane->disable_plane(plane, new_crtc_state); + } + } +} + +void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + u32 update_mask = new_crtc_state->update_planes; + struct intel_plane_state *new_plane_state; + struct intel_plane *plane; + int i; + + for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) { + if (crtc->pipe != plane->pipe || + !(update_mask & BIT(plane->id))) + continue; + + if (new_plane_state->base.visible) { + trace_intel_update_plane(&plane->base, crtc); + plane->update_plane(plane, new_crtc_state, new_plane_state); + } else { + trace_intel_disable_plane(&plane->base, crtc); + plane->disable_plane(plane, new_crtc_state); + } } } @@ -194,7 +295,6 @@ const struct drm_plane_helper_funcs intel_plane_helper_funcs = { .prepare_fb = intel_prepare_plane_fb, .cleanup_fb = intel_cleanup_plane_fb, .atomic_check = intel_plane_atomic_check, - .atomic_update = intel_plane_atomic_update, }; /** diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 769f3f586661..ae55a6865d5c 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -144,26 +144,43 @@ static const struct { /* HDMI N/CTS table */ #define TMDS_297M 297000 #define TMDS_296M 296703 +#define TMDS_594M 594000 +#define TMDS_593M 593407 + static const struct { int sample_rate; int clock; int n; int cts; } hdmi_aud_ncts[] = { - { 44100, TMDS_296M, 4459, 234375 }, - { 44100, TMDS_297M, 4704, 247500 }, - { 48000, TMDS_296M, 5824, 281250 }, - { 48000, TMDS_297M, 5120, 247500 }, { 32000, TMDS_296M, 5824, 421875 }, { 32000, TMDS_297M, 3072, 222750 }, + { 32000, TMDS_593M, 5824, 843750 }, + { 32000, TMDS_594M, 3072, 445500 }, + { 44100, TMDS_296M, 4459, 234375 }, + { 44100, TMDS_297M, 4704, 247500 }, + { 44100, TMDS_593M, 8918, 937500 }, + { 44100, TMDS_594M, 9408, 990000 }, { 88200, TMDS_296M, 8918, 234375 }, { 88200, TMDS_297M, 9408, 247500 }, - { 96000, TMDS_296M, 11648, 281250 }, - { 96000, TMDS_297M, 10240, 247500 }, + { 88200, TMDS_593M, 17836, 937500 }, + { 88200, TMDS_594M, 18816, 990000 }, { 176400, TMDS_296M, 17836, 234375 }, { 176400, TMDS_297M, 18816, 247500 }, + { 176400, TMDS_593M, 35672, 937500 }, + { 176400, TMDS_594M, 37632, 990000 }, + { 48000, TMDS_296M, 5824, 281250 }, + { 48000, TMDS_297M, 5120, 247500 }, + { 48000, TMDS_593M, 5824, 562500 }, + { 48000, TMDS_594M, 6144, 594000 }, + { 96000, TMDS_296M, 11648, 281250 }, + { 96000, TMDS_297M, 10240, 247500 }, + { 96000, TMDS_593M, 11648, 562500 }, + { 96000, TMDS_594M, 12288, 594000 }, { 192000, TMDS_296M, 23296, 281250 }, { 192000, TMDS_297M, 20480, 247500 }, + { 192000, TMDS_593M, 23296, 562500 }, + { 192000, TMDS_594M, 24576, 594000 }, }; /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ @@ -912,6 +929,9 @@ static int i915_audio_component_bind(struct device *i915_kdev, if (WARN_ON(acomp->base.ops || acomp->base.dev)) return -EEXIST; + if (WARN_ON(!device_link_add(hda_kdev, i915_kdev, DL_FLAG_STATELESS))) + return -ENOMEM; + drm_modeset_lock_all(&dev_priv->drm); acomp->base.ops = &i915_audio_component_ops; acomp->base.dev = i915_kdev; @@ -935,6 +955,8 @@ static void i915_audio_component_unbind(struct device *i915_kdev, acomp->base.dev = NULL; dev_priv->audio_component = NULL; drm_modeset_unlock_all(&dev_priv->drm); + + device_link_remove(hda_kdev, i915_kdev); } static const struct component_ops i915_audio_component_bind_ops = { diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 1faa494e2bc9..6d3e0260d49c 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -420,6 +420,13 @@ parse_general_features(struct drm_i915_private *dev_priv, intel_bios_ssc_frequency(dev_priv, general->ssc_freq); dev_priv->vbt.display_clock_mode = general->display_clock_mode; dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; + if (bdb->version >= 181) { + dev_priv->vbt.orientation = general->rotate_180 ? + DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP : + DRM_MODE_PANEL_ORIENTATION_NORMAL; + } else { + dev_priv->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; + } DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", dev_priv->vbt.int_tv_support, dev_priv->vbt.int_crt_support, @@ -852,6 +859,30 @@ parse_mipi_config(struct drm_i915_private *dev_priv, parse_dsi_backlight_ports(dev_priv, bdb->version, port); + /* FIXME is the 90 vs. 270 correct? */ + switch (config->rotation) { + case ENABLE_ROTATION_0: + /* + * Most (all?) VBTs claim 0 degrees despite having + * an upside down panel, thus we do not trust this. + */ + dev_priv->vbt.dsi.orientation = + DRM_MODE_PANEL_ORIENTATION_UNKNOWN; + break; + case ENABLE_ROTATION_90: + dev_priv->vbt.dsi.orientation = + DRM_MODE_PANEL_ORIENTATION_RIGHT_UP; + break; + case ENABLE_ROTATION_180: + dev_priv->vbt.dsi.orientation = + DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP; + break; + case ENABLE_ROTATION_270: + dev_priv->vbt.dsi.orientation = + DRM_MODE_PANEL_ORIENTATION_LEFT_UP; + break; + } + /* We have mandatory mipi config blocks. Initialize as generic panel */ dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; } @@ -1721,7 +1752,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv) const struct bdb_header *bdb; u8 __iomem *bios = NULL; - if (INTEL_INFO(dev_priv)->num_pipes == 0) { + if (!HAS_DISPLAY(dev_priv)) { DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n"); return; } @@ -2039,17 +2070,17 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, dvo_port = child->dvo_port; - switch (dvo_port) { - case DVO_PORT_MIPIA: - case DVO_PORT_MIPIC: + if (dvo_port == DVO_PORT_MIPIA || + (dvo_port == DVO_PORT_MIPIB && IS_ICELAKE(dev_priv)) || + (dvo_port == DVO_PORT_MIPIC && !IS_ICELAKE(dev_priv))) { if (port) *port = dvo_port - DVO_PORT_MIPIA; return true; - case DVO_PORT_MIPIB: - case DVO_PORT_MIPID: + } else if (dvo_port == DVO_PORT_MIPIB || + dvo_port == DVO_PORT_MIPIC || + dvo_port == DVO_PORT_MIPID) { DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n", port_name(dvo_port - DVO_PORT_MIPIA)); - break; } } @@ -2159,3 +2190,49 @@ intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, return false; } + +enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, + enum port port) +{ + const struct ddi_vbt_port_info *info = + &dev_priv->vbt.ddi_port_info[port]; + enum aux_ch aux_ch; + + if (!info->alternate_aux_channel) { + aux_ch = (enum aux_ch)port; + + DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n", + aux_ch_name(aux_ch), port_name(port)); + return aux_ch; + } + + switch (info->alternate_aux_channel) { + case DP_AUX_A: + aux_ch = AUX_CH_A; + break; + case DP_AUX_B: + aux_ch = AUX_CH_B; + break; + case DP_AUX_C: + aux_ch = AUX_CH_C; + break; + case DP_AUX_D: + aux_ch = AUX_CH_D; + break; + case DP_AUX_E: + aux_ch = AUX_CH_E; + break; + case DP_AUX_F: + aux_ch = AUX_CH_F; + break; + default: + MISSING_CASE(info->alternate_aux_channel); + aux_ch = AUX_CH_A; + break; + } + + DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n", + aux_ch_name(aux_ch), port_name(port)); + + return aux_ch; +} diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 84bf8d827136..447c5256f63a 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -27,11 +27,7 @@ #include "i915_drv.h" -#ifdef CONFIG_SMP -#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_cpu) -#else -#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL) -#endif +#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq) static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b) { diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 29075c763428..25e3aba9cded 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -2138,16 +2138,8 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv, static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv, int pixel_rate) { - if (INTEL_GEN(dev_priv) >= 10) + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) return DIV_ROUND_UP(pixel_rate, 2); - else if (IS_GEMINILAKE(dev_priv)) - /* - * FIXME: Avoid using a pixel clock that is more than 99% of the cdclk - * as a temporary workaround. Use a higher cdclk instead. (Note that - * intel_compute_max_dotclk() limits the max pixel clock to 99% of max - * cdclk.) - */ - return DIV_ROUND_UP(pixel_rate * 100, 2 * 99); else if (IS_GEN9(dev_priv) || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) return pixel_rate; @@ -2543,14 +2535,8 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) { int max_cdclk_freq = dev_priv->max_cdclk_freq; - if (INTEL_GEN(dev_priv) >= 10) + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) return 2 * max_cdclk_freq; - else if (IS_GEMINILAKE(dev_priv)) - /* - * FIXME: Limiting to 99% as a temporary workaround. See - * intel_min_cdclk() for details. - */ - return 2 * max_cdclk_freq * 99 / 100; else if (IS_GEN9(dev_priv) || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) return max_cdclk_freq; @@ -2674,37 +2660,18 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv) fraction = 200; } - rawclk = CNP_RAWCLK_DIV((divider / 1000) - 1); - if (fraction) - rawclk |= CNP_RAWCLK_FRAC(DIV_ROUND_CLOSEST(1000, - fraction) - 1); - - I915_WRITE(PCH_RAWCLK_FREQ, rawclk); - return divider + fraction; -} + rawclk = CNP_RAWCLK_DIV(divider / 1000); + if (fraction) { + int numerator = 1; -static int icp_rawclk(struct drm_i915_private *dev_priv) -{ - u32 rawclk; - int divider, numerator, denominator, frequency; - - if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) { - frequency = 24000; - divider = 23; - numerator = 0; - denominator = 0; - } else { - frequency = 19200; - divider = 18; - numerator = 1; - denominator = 4; + rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000, + fraction) - 1); + if (HAS_PCH_ICP(dev_priv)) + rawclk |= ICP_RAWCLK_NUM(numerator); } - rawclk = CNP_RAWCLK_DIV(divider) | ICP_RAWCLK_NUM(numerator) | - ICP_RAWCLK_DEN(denominator); - I915_WRITE(PCH_RAWCLK_FREQ, rawclk); - return frequency; + return divider + fraction; } static int pch_rawclk(struct drm_i915_private *dev_priv) @@ -2754,9 +2721,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv) */ void intel_update_rawclk(struct drm_i915_private *dev_priv) { - if (HAS_PCH_ICP(dev_priv)) - dev_priv->rawclk_freq = icp_rawclk(dev_priv); - else if (HAS_PCH_CNP(dev_priv)) + if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv)) dev_priv->rawclk_freq = cnp_rawclk(dev_priv); else if (HAS_PCH_SPLIT(dev_priv)) dev_priv->rawclk_freq = pch_rawclk(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index c6a7beabd58d..5127da286a2b 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -149,7 +149,8 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state) if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) limited_color_range = intel_crtc_state->limited_color_range; - if (intel_crtc_state->ycbcr420) { + if (intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || + intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) { ilk_load_ycbcr_conversion_matrix(intel_crtc); return; } else if (crtc_state->ctm) { diff --git a/drivers/gpu/drm/i915/intel_combo_phy.c b/drivers/gpu/drm/i915/intel_combo_phy.c new file mode 100644 index 000000000000..3d0271cebf99 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_combo_phy.c @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2018 Intel Corporation + */ + +#include "intel_drv.h" + +#define for_each_combo_port(__dev_priv, __port) \ + for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ + for_each_if(intel_port_is_combophy(__dev_priv, __port)) + +#define for_each_combo_port_reverse(__dev_priv, __port) \ + for ((__port) = I915_MAX_PORTS; (__port)-- > PORT_A;) \ + for_each_if(intel_port_is_combophy(__dev_priv, __port)) + +enum { + PROCMON_0_85V_DOT_0, + PROCMON_0_95V_DOT_0, + PROCMON_0_95V_DOT_1, + PROCMON_1_05V_DOT_0, + PROCMON_1_05V_DOT_1, +}; + +static const struct cnl_procmon { + u32 dw1, dw9, dw10; +} cnl_procmon_values[] = { + [PROCMON_0_85V_DOT_0] = + { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, }, + [PROCMON_0_95V_DOT_0] = + { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, }, + [PROCMON_0_95V_DOT_1] = + { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, }, + [PROCMON_1_05V_DOT_0] = + { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, }, + [PROCMON_1_05V_DOT_1] = + { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, }, +}; + +/* + * CNL has just one set of registers, while ICL has two sets: one for port A and + * the other for port B. The CNL registers are equivalent to the ICL port A + * registers, that's why we call the ICL macros even though the function has CNL + * on its name. + */ +static const struct cnl_procmon * +cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port) +{ + const struct cnl_procmon *procmon; + u32 val; + + val = I915_READ(ICL_PORT_COMP_DW3(port)); + switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) { + default: + MISSING_CASE(val); + /* fall through */ + case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0: + procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0]; + break; + case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0: + procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0]; + break; + case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1: + procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1]; + break; + case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0: + procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0]; + break; + case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1: + procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1]; + break; + } + + return procmon; +} + +static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv, + enum port port) +{ + const struct cnl_procmon *procmon; + u32 val; + + procmon = cnl_get_procmon_ref_values(dev_priv, port); + + val = I915_READ(ICL_PORT_COMP_DW1(port)); + val &= ~((0xff << 16) | 0xff); + val |= procmon->dw1; + I915_WRITE(ICL_PORT_COMP_DW1(port), val); + + I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9); + I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10); +} + +static bool check_phy_reg(struct drm_i915_private *dev_priv, + enum port port, i915_reg_t reg, u32 mask, + u32 expected_val) +{ + u32 val = I915_READ(reg); + + if ((val & mask) != expected_val) { + DRM_DEBUG_DRIVER("Port %c combo PHY reg %08x state mismatch: " + "current %08x mask %08x expected %08x\n", + port_name(port), + reg.reg, val, mask, expected_val); + return false; + } + + return true; +} + +static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv, + enum port port) +{ + const struct cnl_procmon *procmon; + bool ret; + + procmon = cnl_get_procmon_ref_values(dev_priv, port); + + ret = check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW1(port), + (0xff << 16) | 0xff, procmon->dw1); + ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW9(port), + -1U, procmon->dw9); + ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW10(port), + -1U, procmon->dw10); + + return ret; +} + +static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv) +{ + return !(I915_READ(CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) && + (I915_READ(CNL_PORT_COMP_DW0) & COMP_INIT); +} + +static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv) +{ + enum port port = PORT_A; + bool ret; + + if (!cnl_combo_phy_enabled(dev_priv)) + return false; + + ret = cnl_verify_procmon_ref_values(dev_priv, port); + + ret &= check_phy_reg(dev_priv, port, CNL_PORT_CL1CM_DW5, + CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE); + + return ret; +} + +void cnl_combo_phys_init(struct drm_i915_private *dev_priv) +{ + u32 val; + + val = I915_READ(CHICKEN_MISC_2); + val &= ~CNL_COMP_PWR_DOWN; + I915_WRITE(CHICKEN_MISC_2, val); + + /* Dummy PORT_A to get the correct CNL register from the ICL macro */ + cnl_set_procmon_ref_values(dev_priv, PORT_A); + + val = I915_READ(CNL_PORT_COMP_DW0); + val |= COMP_INIT; + I915_WRITE(CNL_PORT_COMP_DW0, val); + + val = I915_READ(CNL_PORT_CL1CM_DW5); + val |= CL_POWER_DOWN_ENABLE; + I915_WRITE(CNL_PORT_CL1CM_DW5, val); +} + +void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv) +{ + u32 val; + + if (!cnl_combo_phy_verify_state(dev_priv)) + DRM_WARN("Combo PHY HW state changed unexpectedly.\n"); + + val = I915_READ(CHICKEN_MISC_2); + val |= CNL_COMP_PWR_DOWN; + I915_WRITE(CHICKEN_MISC_2, val); +} + +static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv, + enum port port) +{ + return !(I915_READ(ICL_PHY_MISC(port)) & + ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) && + (I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT); +} + +static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, + enum port port) +{ + bool ret; + + if (!icl_combo_phy_enabled(dev_priv, port)) + return false; + + ret = cnl_verify_procmon_ref_values(dev_priv, port); + + ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port), + CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE); + + return ret; +} + +void icl_combo_phys_init(struct drm_i915_private *dev_priv) +{ + enum port port; + + for_each_combo_port(dev_priv, port) { + u32 val; + + if (icl_combo_phy_verify_state(dev_priv, port)) { + DRM_DEBUG_DRIVER("Port %c combo PHY already enabled, won't reprogram it.\n", + port_name(port)); + continue; + } + + val = I915_READ(ICL_PHY_MISC(port)); + val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; + I915_WRITE(ICL_PHY_MISC(port), val); + + cnl_set_procmon_ref_values(dev_priv, port); + + val = I915_READ(ICL_PORT_COMP_DW0(port)); + val |= COMP_INIT; + I915_WRITE(ICL_PORT_COMP_DW0(port), val); + + val = I915_READ(ICL_PORT_CL_DW5(port)); + val |= CL_POWER_DOWN_ENABLE; + I915_WRITE(ICL_PORT_CL_DW5(port), val); + } +} + +void icl_combo_phys_uninit(struct drm_i915_private *dev_priv) +{ + enum port port; + + for_each_combo_port_reverse(dev_priv, port) { + u32 val; + + if (!icl_combo_phy_verify_state(dev_priv, port)) + DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n", + port_name(port)); + + val = I915_READ(ICL_PHY_MISC(port)); + val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; + I915_WRITE(ICL_PHY_MISC(port), val); + + val = I915_READ(ICL_PORT_COMP_DW0(port)); + val &= ~COMP_INIT; + I915_WRITE(ICL_PORT_COMP_DW0(port), val); + } +} diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_connector.c index ca44bf368e24..18e370f607bc 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_connector.c @@ -25,11 +25,140 @@ #include <linux/slab.h> #include <linux/i2c.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> #include <drm/drmP.h> #include "intel_drv.h" #include "i915_drv.h" +int intel_connector_init(struct intel_connector *connector) +{ + struct intel_digital_connector_state *conn_state; + + /* + * Allocate enough memory to hold intel_digital_connector_state, + * This might be a few bytes too many, but for connectors that don't + * need it we'll free the state and allocate a smaller one on the first + * successful commit anyway. + */ + conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL); + if (!conn_state) + return -ENOMEM; + + __drm_atomic_helper_connector_reset(&connector->base, + &conn_state->base); + + return 0; +} + +struct intel_connector *intel_connector_alloc(void) +{ + struct intel_connector *connector; + + connector = kzalloc(sizeof(*connector), GFP_KERNEL); + if (!connector) + return NULL; + + if (intel_connector_init(connector) < 0) { + kfree(connector); + return NULL; + } + + return connector; +} + +/* + * Free the bits allocated by intel_connector_alloc. + * This should only be used after intel_connector_alloc has returned + * successfully, and before drm_connector_init returns successfully. + * Otherwise the destroy callbacks for the connector and the state should + * take care of proper cleanup/free (see intel_connector_destroy). + */ +void intel_connector_free(struct intel_connector *connector) +{ + kfree(to_intel_digital_connector_state(connector->base.state)); + kfree(connector); +} + +/* + * Connector type independent destroy hook for drm_connector_funcs. + */ +void intel_connector_destroy(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + + kfree(intel_connector->detect_edid); + + if (!IS_ERR_OR_NULL(intel_connector->edid)) + kfree(intel_connector->edid); + + intel_panel_fini(&intel_connector->panel); + + drm_connector_cleanup(connector); + kfree(connector); +} + +int intel_connector_register(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + int ret; + + ret = intel_backlight_device_register(intel_connector); + if (ret) + goto err; + + if (i915_inject_load_failure()) { + ret = -EFAULT; + goto err_backlight; + } + + return 0; + +err_backlight: + intel_backlight_device_unregister(intel_connector); +err: + return ret; +} + +void intel_connector_unregister(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + + intel_backlight_device_unregister(intel_connector); +} + +void intel_connector_attach_encoder(struct intel_connector *connector, + struct intel_encoder *encoder) +{ + connector->encoder = encoder; + drm_connector_attach_encoder(&connector->base, &encoder->base); +} + +/* + * Simple connector->get_hw_state implementation for encoders that support only + * one connector and no cloning and hence the encoder state determines the state + * of the connector. + */ +bool intel_connector_get_hw_state(struct intel_connector *connector) +{ + enum pipe pipe = 0; + struct intel_encoder *encoder = connector->encoder; + + return encoder->get_hw_state(encoder, &pipe); +} + +enum pipe intel_connector_get_pipe(struct intel_connector *connector) +{ + struct drm_device *dev = connector->base.dev; + + WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + + if (!connector->base.state->crtc) + return INVALID_PIPE; + + return to_intel_crtc(connector->base.state->crtc)->pipe; +} + /** * intel_connector_update_modes - update connector from edid * @connector: DRM connector device to use diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 0c6bf82bb059..68f2fb89ece3 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -354,6 +354,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return false; + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; return true; } @@ -368,6 +369,7 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder, return false; pipe_config->has_pch_encoder = true; + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; return true; } @@ -389,6 +391,7 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder, return false; pipe_config->has_pch_encoder = true; + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; /* LPT FDI RX only supports 8bpc. */ if (HAS_PCH_LPT(dev_priv)) { @@ -849,12 +852,6 @@ out: return status; } -static void intel_crt_destroy(struct drm_connector *connector) -{ - drm_connector_cleanup(connector); - kfree(connector); -} - static int intel_crt_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; @@ -909,7 +906,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, - .destroy = intel_crt_destroy, + .destroy = intel_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, }; diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index d48186e9ddad..a516697bf57d 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -34,34 +34,38 @@ * low-power state and comes back to normal. */ -#define I915_CSR_ICL "i915/icl_dmc_ver1_07.bin" -MODULE_FIRMWARE(I915_CSR_ICL); -#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) +#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE -#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin" -MODULE_FIRMWARE(I915_CSR_GLK); -#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4) +#define ICL_CSR_PATH "i915/icl_dmc_ver1_07.bin" +#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) +#define ICL_CSR_MAX_FW_SIZE 0x6000 +MODULE_FIRMWARE(ICL_CSR_PATH); -#define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin" -MODULE_FIRMWARE(I915_CSR_CNL); +#define CNL_CSR_PATH "i915/cnl_dmc_ver1_07.bin" #define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) +#define CNL_CSR_MAX_FW_SIZE GLK_CSR_MAX_FW_SIZE +MODULE_FIRMWARE(CNL_CSR_PATH); + +#define GLK_CSR_PATH "i915/glk_dmc_ver1_04.bin" +#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4) +#define GLK_CSR_MAX_FW_SIZE 0x4000 +MODULE_FIRMWARE(GLK_CSR_PATH); -#define I915_CSR_KBL "i915/kbl_dmc_ver1_04.bin" -MODULE_FIRMWARE(I915_CSR_KBL); +#define KBL_CSR_PATH "i915/kbl_dmc_ver1_04.bin" #define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4) +#define KBL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE +MODULE_FIRMWARE(KBL_CSR_PATH); -#define I915_CSR_SKL "i915/skl_dmc_ver1_27.bin" -MODULE_FIRMWARE(I915_CSR_SKL); +#define SKL_CSR_PATH "i915/skl_dmc_ver1_27.bin" #define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 27) +#define SKL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE +MODULE_FIRMWARE(SKL_CSR_PATH); -#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin" -MODULE_FIRMWARE(I915_CSR_BXT); +#define BXT_CSR_PATH "i915/bxt_dmc_ver1_07.bin" #define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) - - #define BXT_CSR_MAX_FW_SIZE 0x3000 -#define GLK_CSR_MAX_FW_SIZE 0x4000 -#define ICL_CSR_MAX_FW_SIZE 0x6000 +MODULE_FIRMWARE(BXT_CSR_PATH); + #define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF struct intel_css_header { @@ -190,6 +194,12 @@ static const struct stepping_info bxt_stepping_info[] = { {'B', '0'}, {'B', '1'}, {'B', '2'} }; +static const struct stepping_info icl_stepping_info[] = { + {'A', '0'}, {'A', '1'}, {'A', '2'}, + {'B', '0'}, {'B', '2'}, + {'C', '0'} +}; + static const struct stepping_info no_stepping_info = { '*', '*' }; static const struct stepping_info * @@ -198,7 +208,10 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv) const struct stepping_info *si; unsigned int size; - if (IS_SKYLAKE(dev_priv)) { + if (IS_ICELAKE(dev_priv)) { + size = ARRAY_SIZE(icl_stepping_info); + si = icl_stepping_info; + } else if (IS_SKYLAKE(dev_priv)) { size = ARRAY_SIZE(skl_stepping_info); si = skl_stepping_info; } else if (IS_BROXTON(dev_priv)) { @@ -285,10 +298,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, struct intel_csr *csr = &dev_priv->csr; const struct stepping_info *si = intel_get_stepping_info(dev_priv); uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; - uint32_t max_fw_size = 0; uint32_t i; uint32_t *dmc_payload; - uint32_t required_version; if (!fw) return NULL; @@ -303,38 +314,19 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, return NULL; } - csr->version = css_header->version; - - if (csr->fw_path == i915_modparams.dmc_firmware_path) { - /* Bypass version check for firmware override. */ - required_version = csr->version; - } else if (IS_ICELAKE(dev_priv)) { - required_version = ICL_CSR_VERSION_REQUIRED; - } else if (IS_CANNONLAKE(dev_priv)) { - required_version = CNL_CSR_VERSION_REQUIRED; - } else if (IS_GEMINILAKE(dev_priv)) { - required_version = GLK_CSR_VERSION_REQUIRED; - } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) { - required_version = KBL_CSR_VERSION_REQUIRED; - } else if (IS_SKYLAKE(dev_priv)) { - required_version = SKL_CSR_VERSION_REQUIRED; - } else if (IS_BROXTON(dev_priv)) { - required_version = BXT_CSR_VERSION_REQUIRED; - } else { - MISSING_CASE(INTEL_REVID(dev_priv)); - required_version = 0; - } - - if (csr->version != required_version) { + if (csr->required_version && + css_header->version != csr->required_version) { DRM_INFO("Refusing to load DMC firmware v%u.%u," " please use v%u.%u\n", - CSR_VERSION_MAJOR(csr->version), - CSR_VERSION_MINOR(csr->version), - CSR_VERSION_MAJOR(required_version), - CSR_VERSION_MINOR(required_version)); + CSR_VERSION_MAJOR(css_header->version), + CSR_VERSION_MINOR(css_header->version), + CSR_VERSION_MAJOR(csr->required_version), + CSR_VERSION_MINOR(csr->required_version)); return NULL; } + csr->version = css_header->version; + readcount += sizeof(struct intel_css_header); /* Extract Package Header information*/ @@ -402,15 +394,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */ nbytes = dmc_header->fw_size * 4; - if (INTEL_GEN(dev_priv) >= 11) - max_fw_size = ICL_CSR_MAX_FW_SIZE; - else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) - max_fw_size = GLK_CSR_MAX_FW_SIZE; - else if (IS_GEN9(dev_priv)) - max_fw_size = BXT_CSR_MAX_FW_SIZE; - else - MISSING_CASE(INTEL_REVID(dev_priv)); - if (nbytes > max_fw_size) { + if (nbytes > csr->max_fw_size) { DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes); return NULL; } @@ -475,27 +459,57 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) if (!HAS_CSR(dev_priv)) return; - if (i915_modparams.dmc_firmware_path) - csr->fw_path = i915_modparams.dmc_firmware_path; - else if (IS_ICELAKE(dev_priv)) - csr->fw_path = I915_CSR_ICL; - else if (IS_CANNONLAKE(dev_priv)) - csr->fw_path = I915_CSR_CNL; - else if (IS_GEMINILAKE(dev_priv)) - csr->fw_path = I915_CSR_GLK; - else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) - csr->fw_path = I915_CSR_KBL; - else if (IS_SKYLAKE(dev_priv)) - csr->fw_path = I915_CSR_SKL; - else if (IS_BROXTON(dev_priv)) - csr->fw_path = I915_CSR_BXT; - /* - * Obtain a runtime pm reference, until CSR is loaded, - * to avoid entering runtime-suspend. + * Obtain a runtime pm reference, until CSR is loaded, to avoid entering + * runtime-suspend. + * + * On error, we return with the rpm wakeref held to prevent runtime + * suspend as runtime suspend *requires* a working CSR for whatever + * reason. */ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + if (INTEL_GEN(dev_priv) >= 12) { + /* Allow to load fw via parameter using the last known size */ + csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE; + } else if (IS_ICELAKE(dev_priv)) { + csr->fw_path = ICL_CSR_PATH; + csr->required_version = ICL_CSR_VERSION_REQUIRED; + csr->max_fw_size = ICL_CSR_MAX_FW_SIZE; + } else if (IS_CANNONLAKE(dev_priv)) { + csr->fw_path = CNL_CSR_PATH; + csr->required_version = CNL_CSR_VERSION_REQUIRED; + csr->max_fw_size = CNL_CSR_MAX_FW_SIZE; + } else if (IS_GEMINILAKE(dev_priv)) { + csr->fw_path = GLK_CSR_PATH; + csr->required_version = GLK_CSR_VERSION_REQUIRED; + csr->max_fw_size = GLK_CSR_MAX_FW_SIZE; + } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) { + csr->fw_path = KBL_CSR_PATH; + csr->required_version = KBL_CSR_VERSION_REQUIRED; + csr->max_fw_size = KBL_CSR_MAX_FW_SIZE; + } else if (IS_SKYLAKE(dev_priv)) { + csr->fw_path = SKL_CSR_PATH; + csr->required_version = SKL_CSR_VERSION_REQUIRED; + csr->max_fw_size = SKL_CSR_MAX_FW_SIZE; + } else if (IS_BROXTON(dev_priv)) { + csr->fw_path = BXT_CSR_PATH; + csr->required_version = BXT_CSR_VERSION_REQUIRED; + csr->max_fw_size = BXT_CSR_MAX_FW_SIZE; + } + + if (i915_modparams.dmc_firmware_path) { + if (strlen(i915_modparams.dmc_firmware_path) == 0) { + csr->fw_path = NULL; + DRM_INFO("Disabling CSR firmware and runtime PM\n"); + return; + } + + csr->fw_path = i915_modparams.dmc_firmware_path; + /* Bypass version check for firmware override. */ + csr->required_version = 0; + } + if (csr->fw_path == NULL) { DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n"); WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv))); diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 5186cd7075f9..f3e1d6a0b7dd 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -28,6 +28,7 @@ #include <drm/drm_scdc_helper.h> #include "i915_drv.h" #include "intel_drv.h" +#include "intel_dsi.h" struct ddi_buf_trans { u32 trans1; /* balance leg enable, de-emph level */ @@ -642,7 +643,7 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) static const struct ddi_buf_trans * kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) { - if (IS_KBL_ULX(dev_priv)) { + if (IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp); return kbl_y_ddi_translations_dp; } else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) { @@ -658,7 +659,7 @@ static const struct ddi_buf_trans * skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) { if (dev_priv->vbt.edp.low_vswing) { - if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) { + if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); return skl_y_ddi_translations_edp; } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) || @@ -680,7 +681,7 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) static const struct ddi_buf_trans * skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) { - if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) { + if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi); return skl_y_ddi_translations_hdmi; } else { @@ -1060,10 +1061,10 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll) } static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, - const struct intel_shared_dpll *pll) + const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); - int clock = crtc->config->port_clock; + const struct intel_shared_dpll *pll = crtc_state->shared_dpll; + int clock = crtc_state->port_clock; const enum intel_dpll_id id = pll->info->id; switch (id) { @@ -1363,8 +1364,8 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv, return dco_freq / (p0 * p1 * p2 * 5); } -static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, - enum intel_dpll_id pll_id) +int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, + enum intel_dpll_id pll_id) { uint32_t cfgcr0, cfgcr1; uint32_t p0, p1, p2, dco_freq, ref_clock; @@ -1517,7 +1518,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) else dotclock = pipe_config->port_clock; - if (pipe_config->ycbcr420) + if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) dotclock *= 2; if (pipe_config->pixel_multiplier) @@ -1737,16 +1738,16 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (INTEL_GEN(dev_priv) <= 8) - hsw_ddi_clock_get(encoder, pipe_config); - else if (IS_GEN9_BC(dev_priv)) - skl_ddi_clock_get(encoder, pipe_config); - else if (IS_GEN9_LP(dev_priv)) - bxt_ddi_clock_get(encoder, pipe_config); + if (IS_ICELAKE(dev_priv)) + icl_ddi_clock_get(encoder, pipe_config); else if (IS_CANNONLAKE(dev_priv)) cnl_ddi_clock_get(encoder, pipe_config); - else if (IS_ICELAKE(dev_priv)) - icl_ddi_clock_get(encoder, pipe_config); + else if (IS_GEN9_LP(dev_priv)) + bxt_ddi_clock_get(encoder, pipe_config); + else if (IS_GEN9_BC(dev_priv)) + skl_ddi_clock_get(encoder, pipe_config); + else if (INTEL_GEN(dev_priv) <= 8) + hsw_ddi_clock_get(encoder, pipe_config); } void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) @@ -1784,6 +1785,13 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) break; } + /* + * As per DP 1.2 spec section 2.3.4.3 while sending + * YCBCR 444 signals we should program MSA MISC1/0 fields with + * colorspace information. The output colorspace encoding is BT601. + */ + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) + temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR; I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp); } @@ -1998,24 +2006,24 @@ out: return ret; } -bool intel_ddi_get_hw_state(struct intel_encoder *encoder, - enum pipe *pipe) +static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, + u8 *pipe_mask, bool *is_dp_mst) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum port port = encoder->port; enum pipe p; u32 tmp; - bool ret; + u8 mst_pipe_mask; + + *pipe_mask = 0; + *is_dp_mst = false; if (!intel_display_power_get_if_enabled(dev_priv, encoder->power_domain)) - return false; - - ret = false; + return; tmp = I915_READ(DDI_BUF_CTL(port)); - if (!(tmp & DDI_BUF_CTL_ENABLE)) goto out; @@ -2023,44 +2031,58 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { + default: + MISSING_CASE(tmp & TRANS_DDI_EDP_INPUT_MASK); + /* fallthrough */ case TRANS_DDI_EDP_INPUT_A_ON: case TRANS_DDI_EDP_INPUT_A_ONOFF: - *pipe = PIPE_A; + *pipe_mask = BIT(PIPE_A); break; case TRANS_DDI_EDP_INPUT_B_ONOFF: - *pipe = PIPE_B; + *pipe_mask = BIT(PIPE_B); break; case TRANS_DDI_EDP_INPUT_C_ONOFF: - *pipe = PIPE_C; + *pipe_mask = BIT(PIPE_C); break; } - ret = true; - goto out; } + mst_pipe_mask = 0; for_each_pipe(dev_priv, p) { - enum transcoder cpu_transcoder = (enum transcoder) p; + enum transcoder cpu_transcoder = (enum transcoder)p; tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); - if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) { - if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == - TRANS_DDI_MODE_SELECT_DP_MST) - goto out; + if ((tmp & TRANS_DDI_PORT_MASK) != TRANS_DDI_SELECT_PORT(port)) + continue; - *pipe = p; - ret = true; + if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == + TRANS_DDI_MODE_SELECT_DP_MST) + mst_pipe_mask |= BIT(p); - goto out; - } + *pipe_mask |= BIT(p); + } + + if (!*pipe_mask) + DRM_DEBUG_KMS("No pipe for ddi port %c found\n", + port_name(port)); + + if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) { + DRM_DEBUG_KMS("Multiple pipes for non DP-MST port %c (pipe_mask %02x)\n", + port_name(port), *pipe_mask); + *pipe_mask = BIT(ffs(*pipe_mask) - 1); } - DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port)); + if (mst_pipe_mask && mst_pipe_mask != *pipe_mask) + DRM_DEBUG_KMS("Conflicting MST and non-MST encoders for port %c (pipe_mask %02x mst_pipe_mask %02x)\n", + port_name(port), *pipe_mask, mst_pipe_mask); + else + *is_dp_mst = mst_pipe_mask; out: - if (ret && IS_GEN9_LP(dev_priv)) { + if (*pipe_mask && IS_GEN9_LP(dev_priv)) { tmp = I915_READ(BXT_PHY_CTL(port)); if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK | @@ -2070,12 +2092,26 @@ out: } intel_display_power_put(dev_priv, encoder->power_domain); +} - return ret; +bool intel_ddi_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + u8 pipe_mask; + bool is_mst; + + intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst); + + if (is_mst || !pipe_mask) + return false; + + *pipe = ffs(pipe_mask) - 1; + + return true; } static inline enum intel_display_power_domain -intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp) +intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port) { /* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with * DC states enabled at the same time, while for driver initiated AUX @@ -2089,13 +2125,14 @@ intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp) * Note that PSR is enabled only on Port A even though this function * returns the correct domain for other ports too. */ - return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A : - intel_dp->aux_power_domain; + return dig_port->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A : + intel_aux_power_domain(dig_port); } static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port; u64 domains; @@ -2110,12 +2147,19 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, dig_port = enc_to_dig_port(&encoder->base); domains = BIT_ULL(dig_port->ddi_io_power_domain); - /* AUX power is only needed for (e)DP mode, not for HDMI. */ - if (intel_crtc_has_dp_encoder(crtc_state)) { - struct intel_dp *intel_dp = &dig_port->dp; + /* + * AUX power is only needed for (e)DP mode, and for HDMI mode on TC + * ports. + */ + if (intel_crtc_has_dp_encoder(crtc_state) || + intel_port_is_tc(dev_priv, encoder->port)) + domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port)); - domains |= BIT_ULL(intel_ddi_main_link_aux_domain(intel_dp)); - } + /* + * VDSC power is needed when DSC is enabled + */ + if (crtc_state->dsc_params.compression_enable) + domains |= BIT_ULL(intel_dsc_power_domain(crtc_state)); return domains; } @@ -2748,77 +2792,130 @@ uint32_t icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv, return 0; } -void icl_map_plls_to_ports(struct drm_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct drm_atomic_state *old_state) +static void icl_map_plls_to_ports(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_shared_dpll *pll = crtc_state->shared_dpll; - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct drm_connector_state *conn_state; - struct drm_connector *conn; - int i; + enum port port = encoder->port; + u32 val; - for_each_new_connector_in_state(old_state, conn, conn_state, i) { - struct intel_encoder *encoder = - to_intel_encoder(conn_state->best_encoder); - enum port port; - uint32_t val; + mutex_lock(&dev_priv->dpll_lock); - if (conn_state->crtc != crtc) - continue; + val = I915_READ(DPCLKA_CFGCR0_ICL); + WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0); - port = encoder->port; - mutex_lock(&dev_priv->dpll_lock); + if (intel_port_is_combophy(dev_priv, port)) { + val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); + val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); + I915_WRITE(DPCLKA_CFGCR0_ICL, val); + POSTING_READ(DPCLKA_CFGCR0_ICL); + } - val = I915_READ(DPCLKA_CFGCR0_ICL); - WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0); + val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port); + I915_WRITE(DPCLKA_CFGCR0_ICL, val); - if (intel_port_is_combophy(dev_priv, port)) { - val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); - val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); - I915_WRITE(DPCLKA_CFGCR0_ICL, val); - POSTING_READ(DPCLKA_CFGCR0_ICL); - } + mutex_unlock(&dev_priv->dpll_lock); +} - val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port); - I915_WRITE(DPCLKA_CFGCR0_ICL, val); +static void icl_unmap_plls_to_ports(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + u32 val; - mutex_unlock(&dev_priv->dpll_lock); - } + mutex_lock(&dev_priv->dpll_lock); + + val = I915_READ(DPCLKA_CFGCR0_ICL); + val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port); + I915_WRITE(DPCLKA_CFGCR0_ICL, val); + + mutex_unlock(&dev_priv->dpll_lock); } -void icl_unmap_plls_to_ports(struct drm_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct drm_atomic_state *old_state) +void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct drm_connector_state *old_conn_state; - struct drm_connector *conn; - int i; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 val; + enum port port; + u32 port_mask; + bool ddi_clk_needed; + + /* + * In case of DP MST, we sanitize the primary encoder only, not the + * virtual ones. + */ + if (encoder->type == INTEL_OUTPUT_DP_MST) + return; - for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { - struct intel_encoder *encoder = - to_intel_encoder(old_conn_state->best_encoder); - enum port port; + if (!encoder->base.crtc && intel_encoder_is_dp(encoder)) { + u8 pipe_mask; + bool is_mst; - if (old_conn_state->crtc != crtc) + intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst); + /* + * In the unlikely case that BIOS enables DP in MST mode, just + * warn since our MST HW readout is incomplete. + */ + if (WARN_ON(is_mst)) + return; + } + + port_mask = BIT(encoder->port); + ddi_clk_needed = encoder->base.crtc; + + if (encoder->type == INTEL_OUTPUT_DSI) { + struct intel_encoder *other_encoder; + + port_mask = intel_dsi_encoder_ports(encoder); + /* + * Sanity check that we haven't incorrectly registered another + * encoder using any of the ports of this DSI encoder. + */ + for_each_intel_encoder(&dev_priv->drm, other_encoder) { + if (other_encoder == encoder) + continue; + + if (WARN_ON(port_mask & BIT(other_encoder->port))) + return; + } + /* + * DSI ports should have their DDI clock ungated when disabled + * and gated when enabled. + */ + ddi_clk_needed = !encoder->base.crtc; + } + + val = I915_READ(DPCLKA_CFGCR0_ICL); + for_each_port_masked(port, port_mask) { + bool ddi_clk_ungated = !(val & + icl_dpclka_cfgcr0_clk_off(dev_priv, + port)); + + if (ddi_clk_needed == ddi_clk_ungated) continue; - port = encoder->port; - mutex_lock(&dev_priv->dpll_lock); - I915_WRITE(DPCLKA_CFGCR0_ICL, - I915_READ(DPCLKA_CFGCR0_ICL) | - icl_dpclka_cfgcr0_clk_off(dev_priv, port)); - mutex_unlock(&dev_priv->dpll_lock); + /* + * Punt on the case now where clock is gated, but it would + * be needed by the port. Something else is really broken then. + */ + if (WARN_ON(ddi_clk_needed)) + continue; + + DRM_NOTE("Port %c is disabled/in DSI mode with an ungated DDI clock, gate it\n", + port_name(port)); + val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port); + I915_WRITE(DPCLKA_CFGCR0_ICL, val); } } static void intel_ddi_clk_select(struct intel_encoder *encoder, - const struct intel_shared_dpll *pll) + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; uint32_t val; + const struct intel_shared_dpll *pll = crtc_state->shared_dpll; if (WARN_ON(!pll)) return; @@ -2828,7 +2925,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, if (IS_ICELAKE(dev_priv)) { if (!intel_port_is_combophy(dev_priv, port)) I915_WRITE(DDI_CLK_SEL(port), - icl_pll_to_ddi_pll_sel(encoder, pll)); + icl_pll_to_ddi_pll_sel(encoder, crtc_state)); } else if (IS_CANNONLAKE(dev_priv)) { /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */ val = I915_READ(DPCLKA_CFGCR0); @@ -2881,6 +2978,184 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder) } } +static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum port port = dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) }; + u32 val; + int i; + + if (tc_port == PORT_TC_NONE) + return; + + for (i = 0; i < ARRAY_SIZE(mg_regs); i++) { + val = I915_READ(mg_regs[i]); + val |= MG_DP_MODE_CFG_TR2PWR_GATING | + MG_DP_MODE_CFG_TRPWR_GATING | + MG_DP_MODE_CFG_CLNPWR_GATING | + MG_DP_MODE_CFG_DIGPWR_GATING | + MG_DP_MODE_CFG_GAONPWR_GATING; + I915_WRITE(mg_regs[i], val); + } + + val = I915_READ(MG_MISC_SUS0(tc_port)); + val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) | + MG_MISC_SUS0_CFG_TR2PWR_GATING | + MG_MISC_SUS0_CFG_CL2PWR_GATING | + MG_MISC_SUS0_CFG_GAONPWR_GATING | + MG_MISC_SUS0_CFG_TRPWR_GATING | + MG_MISC_SUS0_CFG_CL1PWR_GATING | + MG_MISC_SUS0_CFG_DGPWR_GATING; + I915_WRITE(MG_MISC_SUS0(tc_port), val); +} + +static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum port port = dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) }; + u32 val; + int i; + + if (tc_port == PORT_TC_NONE) + return; + + for (i = 0; i < ARRAY_SIZE(mg_regs); i++) { + val = I915_READ(mg_regs[i]); + val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING | + MG_DP_MODE_CFG_TRPWR_GATING | + MG_DP_MODE_CFG_CLNPWR_GATING | + MG_DP_MODE_CFG_DIGPWR_GATING | + MG_DP_MODE_CFG_GAONPWR_GATING); + I915_WRITE(mg_regs[i], val); + } + + val = I915_READ(MG_MISC_SUS0(tc_port)); + val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK | + MG_MISC_SUS0_CFG_TR2PWR_GATING | + MG_MISC_SUS0_CFG_CL2PWR_GATING | + MG_MISC_SUS0_CFG_GAONPWR_GATING | + MG_MISC_SUS0_CFG_TRPWR_GATING | + MG_MISC_SUS0_CFG_CL1PWR_GATING | + MG_MISC_SUS0_CFG_DGPWR_GATING); + I915_WRITE(MG_MISC_SUS0(tc_port), val); +} + +static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port) +{ + struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); + enum port port = intel_dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + u32 ln0, ln1, lane_info; + + if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT) + return; + + ln0 = I915_READ(MG_DP_MODE(port, 0)); + ln1 = I915_READ(MG_DP_MODE(port, 1)); + + switch (intel_dig_port->tc_type) { + case TC_PORT_TYPEC: + ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); + ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); + + lane_info = (I915_READ(PORT_TX_DFLEXDPSP) & + DP_LANE_ASSIGNMENT_MASK(tc_port)) >> + DP_LANE_ASSIGNMENT_SHIFT(tc_port); + + switch (lane_info) { + case 0x1: + case 0x4: + break; + case 0x2: + ln0 |= MG_DP_MODE_CFG_DP_X1_MODE; + break; + case 0x3: + ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | + MG_DP_MODE_CFG_DP_X2_MODE; + break; + case 0x8: + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; + break; + case 0xC: + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | + MG_DP_MODE_CFG_DP_X2_MODE; + break; + case 0xF: + ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | + MG_DP_MODE_CFG_DP_X2_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | + MG_DP_MODE_CFG_DP_X2_MODE; + break; + default: + MISSING_CASE(lane_info); + } + break; + + case TC_PORT_LEGACY: + ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE; + break; + + default: + MISSING_CASE(intel_dig_port->tc_type); + return; + } + + I915_WRITE(MG_DP_MODE(port, 0), ln0); + I915_WRITE(MG_DP_MODE(port, 1), ln1); +} + +static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + if (!crtc_state->fec_enable) + return; + + if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0) + DRM_DEBUG_KMS("Failed to set FEC_READY in the sink\n"); +} + +static void intel_ddi_enable_fec(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + u32 val; + + if (!crtc_state->fec_enable) + return; + + val = I915_READ(DP_TP_CTL(port)); + val |= DP_TP_CTL_FEC_ENABLE; + I915_WRITE(DP_TP_CTL(port), val); + + if (intel_wait_for_register(dev_priv, DP_TP_STATUS(port), + DP_TP_STATUS_FEC_ENABLE_LIVE, + DP_TP_STATUS_FEC_ENABLE_LIVE, + 1)) + DRM_ERROR("Timed out waiting for FEC Enable Status\n"); +} + +static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + u32 val; + + if (!crtc_state->fec_enable) + return; + + val = I915_READ(DP_TP_CTL(port)); + val &= ~DP_TP_CTL_FEC_ENABLE; + I915_WRITE(DP_TP_CTL(port), val); + POSTING_READ(DP_TP_CTL(port)); +} + static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) @@ -2894,19 +3169,16 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, WARN_ON(is_mst && (port == PORT_A || port == PORT_E)); - intel_display_power_get(dev_priv, - intel_ddi_main_link_aux_domain(intel_dp)); - intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count, is_mst); intel_edp_panel_on(intel_dp); - intel_ddi_clk_select(encoder, crtc_state->shared_dpll); + intel_ddi_clk_select(encoder, crtc_state); intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); - icl_program_mg_dp_mode(intel_dp); + icl_program_mg_dp_mode(dig_port); icl_disable_phy_clock_gating(dig_port); if (IS_ICELAKE(dev_priv)) @@ -2922,14 +3194,21 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_ddi_init_dp_buf_reg(encoder); if (!is_mst) intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + intel_dp_sink_set_decompression_state(intel_dp, crtc_state, + true); + intel_dp_sink_set_fec_ready(intel_dp, crtc_state); intel_dp_start_link_train(intel_dp); if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) intel_dp_stop_link_train(intel_dp); + intel_ddi_enable_fec(encoder, crtc_state); + icl_enable_phy_clock_gating(dig_port); if (!is_mst) intel_ddi_enable_pipe_clock(crtc_state); + + intel_dsc_enable(encoder, crtc_state); } static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, @@ -2944,10 +3223,13 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); - intel_ddi_clk_select(encoder, crtc_state->shared_dpll); + intel_ddi_clk_select(encoder, crtc_state); intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); + icl_program_mg_dp_mode(dig_port); + icl_disable_phy_clock_gating(dig_port); + if (IS_ICELAKE(dev_priv)) icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level, INTEL_OUTPUT_HDMI); @@ -2958,12 +3240,14 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, else intel_prepare_hdmi_ddi_buffers(encoder, level); + icl_enable_phy_clock_gating(dig_port); + if (IS_GEN9_BC(dev_priv)) skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI); intel_ddi_enable_pipe_clock(crtc_state); - intel_dig_port->set_infoframes(&encoder->base, + intel_dig_port->set_infoframes(encoder, crtc_state->has_infoframe, crtc_state, conn_state); } @@ -2991,15 +3275,31 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder, WARN_ON(crtc_state->has_pch_encoder); + if (INTEL_GEN(dev_priv) >= 11) + icl_map_plls_to_ports(encoder, crtc_state); + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state); - else + } else { + struct intel_lspcon *lspcon = + enc_to_intel_lspcon(&encoder->base); + intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state); + if (lspcon->active) { + struct intel_digital_port *dig_port = + enc_to_dig_port(&encoder->base); + + dig_port->set_infoframes(encoder, + crtc_state->has_infoframe, + crtc_state, conn_state); + } + } } -static void intel_disable_ddi_buf(struct intel_encoder *encoder) +static void intel_disable_ddi_buf(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; @@ -3018,6 +3318,9 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder) val |= DP_TP_CTL_LINK_TRAIN_PAT1; I915_WRITE(DP_TP_CTL(port), val); + /* Disable FEC in DP Sink */ + intel_ddi_disable_fec_state(encoder, crtc_state); + if (wait) intel_wait_ddi_buf_idle(dev_priv, port); } @@ -3041,7 +3344,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); } - intel_disable_ddi_buf(encoder); + intel_disable_ddi_buf(encoder, old_crtc_state); intel_edp_panel_vdd_on(intel_dp); intel_edp_panel_off(intel_dp); @@ -3049,9 +3352,6 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain); intel_ddi_clk_disable(encoder); - - intel_display_power_put(dev_priv, - intel_ddi_main_link_aux_domain(intel_dp)); } static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, @@ -3062,12 +3362,12 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); struct intel_hdmi *intel_hdmi = &dig_port->hdmi; - dig_port->set_infoframes(&encoder->base, false, + dig_port->set_infoframes(encoder, false, old_crtc_state, old_conn_state); intel_ddi_disable_pipe_clock(old_crtc_state); - intel_disable_ddi_buf(encoder); + intel_disable_ddi_buf(encoder, old_crtc_state); intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain); @@ -3080,6 +3380,8 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + /* * When called from DP MST code: * - old_conn_state will be NULL @@ -3099,6 +3401,9 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder, else intel_ddi_post_disable_dp(encoder, old_crtc_state, old_conn_state); + + if (INTEL_GEN(dev_priv) >= 11) + icl_unmap_plls_to_ports(encoder); } void intel_ddi_fdi_post_disable(struct intel_encoder *encoder, @@ -3118,7 +3423,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder, val &= ~FDI_RX_ENABLE; I915_WRITE(FDI_RX_CTL(PIPE_A), val); - intel_disable_ddi_buf(encoder); + intel_disable_ddi_buf(encoder, old_crtc_state); intel_ddi_clk_disable(encoder); val = I915_READ(FDI_RX_MISC(PIPE_A)); @@ -3154,6 +3459,26 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder, intel_audio_codec_enable(encoder, crtc_state, conn_state); } +static i915_reg_t +gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv, + enum port port) +{ + static const i915_reg_t regs[] = { + [PORT_A] = CHICKEN_TRANS_EDP, + [PORT_B] = CHICKEN_TRANS_A, + [PORT_C] = CHICKEN_TRANS_B, + [PORT_D] = CHICKEN_TRANS_C, + [PORT_E] = CHICKEN_TRANS_A, + }; + + WARN_ON(INTEL_GEN(dev_priv) < 9); + + if (WARN_ON(port < PORT_A || port > PORT_E)) + port = PORT_A; + + return regs[port]; +} + static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) @@ -3177,17 +3502,10 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, * the bits affect a specific DDI port rather than * a specific transcoder. */ - static const enum transcoder port_to_transcoder[] = { - [PORT_A] = TRANSCODER_EDP, - [PORT_B] = TRANSCODER_A, - [PORT_C] = TRANSCODER_B, - [PORT_D] = TRANSCODER_C, - [PORT_E] = TRANSCODER_A, - }; - enum transcoder transcoder = port_to_transcoder[port]; + i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port); u32 val; - val = I915_READ(CHICKEN_TRANS(transcoder)); + val = I915_READ(reg); if (port == PORT_E) val |= DDIE_TRAINING_OVERRIDE_ENABLE | @@ -3196,8 +3514,8 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, val |= DDI_TRAINING_OVERRIDE_ENABLE | DDI_TRAINING_OVERRIDE_VALUE; - I915_WRITE(CHICKEN_TRANS(transcoder), val); - POSTING_READ(CHICKEN_TRANS(transcoder)); + I915_WRITE(reg, val); + POSTING_READ(reg); udelay(1); @@ -3208,7 +3526,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, val &= ~(DDI_TRAINING_OVERRIDE_ENABLE | DDI_TRAINING_OVERRIDE_VALUE); - I915_WRITE(CHICKEN_TRANS(transcoder), val); + I915_WRITE(reg, val); } /* In HDMI/DVI mode, the port width, and swing/emphasis values @@ -3252,6 +3570,9 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder, intel_edp_drrs_disable(intel_dp, old_crtc_state); intel_psr_disable(intel_dp, old_crtc_state); intel_edp_backlight_off(old_conn_state); + /* Disable the decompression in DP Sink */ + intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state, + false); } static void intel_disable_ddi_hdmi(struct intel_encoder *encoder, @@ -3282,13 +3603,76 @@ static void intel_disable_ddi(struct intel_encoder *encoder, intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state); } -static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder, - const struct intel_crtc_state *pipe_config, - const struct drm_connector_state *conn_state) +static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + enum port port) { - uint8_t mask = pipe_config->lane_lat_optim_mask; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + u32 val = I915_READ(PORT_TX_DFLEXDPMLE1); + bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; + + val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port); + switch (pipe_config->lane_count) { + case 1: + val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) : + DFLEXDPMLE1_DPMLETC_ML0(tc_port); + break; + case 2: + val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) : + DFLEXDPMLE1_DPMLETC_ML1_0(tc_port); + break; + case 4: + val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port); + break; + default: + MISSING_CASE(pipe_config->lane_count); + } + I915_WRITE(PORT_TX_DFLEXDPMLE1, val); +} - bxt_ddi_phy_set_lane_optim_mask(encoder, mask); +static void +intel_ddi_pre_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + enum port port = encoder->port; + + if (intel_crtc_has_dp_encoder(crtc_state) || + intel_port_is_tc(dev_priv, encoder->port)) + intel_display_power_get(dev_priv, + intel_ddi_main_link_aux_domain(dig_port)); + + if (IS_GEN9_LP(dev_priv)) + bxt_ddi_phy_set_lane_optim_mask(encoder, + crtc_state->lane_lat_optim_mask); + + /* + * Program the lane count for static/dynamic connections on Type-C ports. + * Skip this step for TBT. + */ + if (dig_port->tc_type == TC_PORT_UNKNOWN || + dig_port->tc_type == TC_PORT_TBT) + return; + + intel_ddi_set_fia_lane_count(encoder, crtc_state, port); +} + +static void +intel_ddi_post_pll_disable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + + if (intel_crtc_has_dp_encoder(crtc_state) || + intel_port_is_tc(dev_priv, encoder->port)) + intel_display_power_put(dev_priv, + intel_ddi_main_link_aux_domain(dig_port)); } void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) @@ -3353,10 +3737,10 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, struct intel_crtc_state *crtc_state) { - if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000) - crtc_state->min_voltage_level = 2; - else if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000) + if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000) crtc_state->min_voltage_level = 1; + else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000) + crtc_state->min_voltage_level = 2; } void intel_ddi_get_config(struct intel_encoder *encoder, @@ -3406,7 +3790,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder, pipe_config->has_hdmi_sink = true; intel_dig_port = enc_to_dig_port(&encoder->base); - if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config)) + if (intel_dig_port->infoframe_enabled(encoder, pipe_config)) pipe_config->has_infoframe = true; if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) == @@ -3767,6 +4151,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) struct intel_encoder *intel_encoder; struct drm_encoder *encoder; bool init_hdmi, init_dp, init_lspcon = false; + enum pipe pipe; init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi || @@ -3805,8 +4190,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_encoder->compute_output_type = intel_ddi_compute_output_type; intel_encoder->compute_config = intel_ddi_compute_config; intel_encoder->enable = intel_enable_ddi; - if (IS_GEN9_LP(dev_priv)) - intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable; + intel_encoder->pre_pll_enable = intel_ddi_pre_pll_enable; + intel_encoder->post_pll_disable = intel_ddi_post_pll_disable; intel_encoder->pre_enable = intel_ddi_pre_enable; intel_encoder->disable = intel_disable_ddi; intel_encoder->post_disable = intel_ddi_post_disable; @@ -3817,8 +4202,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_encoder->type = INTEL_OUTPUT_DDI; intel_encoder->power_domain = intel_port_to_power_domain(port); intel_encoder->port = port; - intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); intel_encoder->cloneable = 0; + for_each_pipe(dev_priv, pipe) + intel_encoder->crtc_mask |= BIT(pipe); if (INTEL_GEN(dev_priv) >= 11) intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & @@ -3828,6 +4214,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); intel_dig_port->dp.output_reg = INVALID_MMIO_REG; intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port); + intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); switch (port) { case PORT_A: @@ -3858,8 +4245,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) MISSING_CASE(port); } - intel_infoframe_init(intel_dig_port); - if (init_dp) { if (!intel_ddi_init_dp_connector(intel_dig_port)) goto err; @@ -3888,6 +4273,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) port_name(port)); } + intel_infoframe_init(intel_dig_port); return; err: diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 0ef0c6448d53..1e56319334f3 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -77,6 +77,10 @@ void intel_device_info_dump_flags(const struct intel_device_info *info, #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name)); DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); #undef PRINT_FLAG + +#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name)); + DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG); +#undef PRINT_FLAG } static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) @@ -474,7 +478,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) u8 eu_disabled_mask; u32 n_disabled; - if (!(sseu->subslice_mask[ss] & BIT(ss))) + if (!(sseu->subslice_mask[s] & BIT(ss))) /* skip disabled subslice */ continue; @@ -744,27 +748,30 @@ void intel_device_info_runtime_init(struct intel_device_info *info) if (INTEL_GEN(dev_priv) >= 10) { for_each_pipe(dev_priv, pipe) info->num_scalers[pipe] = 2; - } else if (INTEL_GEN(dev_priv) == 9) { + } else if (IS_GEN9(dev_priv)) { info->num_scalers[PIPE_A] = 2; info->num_scalers[PIPE_B] = 2; info->num_scalers[PIPE_C] = 1; } - BUILD_BUG_ON(I915_NUM_ENGINES > - sizeof(intel_ring_mask_t) * BITS_PER_BYTE); + BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t)); - /* - * Skylake and Broxton currently don't expose the topmost plane as its - * use is exclusive with the legacy cursor and we only want to expose - * one of those, not both. Until we can safely expose the topmost plane - * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported, - * we don't expose the topmost plane at all to prevent ABI breakage - * down the line. - */ - if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv)) + if (IS_GEN11(dev_priv)) + for_each_pipe(dev_priv, pipe) + info->num_sprites[pipe] = 6; + else if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv)) for_each_pipe(dev_priv, pipe) info->num_sprites[pipe] = 3; else if (IS_BROXTON(dev_priv)) { + /* + * Skylake and Broxton currently don't expose the topmost plane as its + * use is exclusive with the legacy cursor and we only want to expose + * one of those, not both. Until we can safely expose the topmost plane + * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported, + * we don't expose the topmost plane at all to prevent ABI breakage + * down the line. + */ + info->num_sprites[PIPE_A] = 2; info->num_sprites[PIPE_B] = 2; info->num_sprites[PIPE_C] = 1; @@ -779,7 +786,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info) if (i915_modparams.disable_display) { DRM_INFO("Display disabled (module parameter)\n"); info->num_pipes = 0; - } else if (info->num_pipes > 0 && + } else if (HAS_DISPLAY(dev_priv) && (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) && HAS_PCH_SPLIT(dev_priv)) { u32 fuse_strap = I915_READ(FUSE_STRAP); @@ -804,7 +811,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info) DRM_INFO("PipeC fused off\n"); info->num_pipes -= 1; } - } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) { + } else if (HAS_DISPLAY(dev_priv) && IS_GEN9(dev_priv)) { u32 dfsm = I915_READ(SKL_DFSM); u8 disabled_mask = 0; bool invalid; @@ -844,13 +851,18 @@ void intel_device_info_runtime_init(struct intel_device_info *info) cherryview_sseu_info_init(dev_priv); else if (IS_BROADWELL(dev_priv)) broadwell_sseu_info_init(dev_priv); - else if (INTEL_GEN(dev_priv) == 9) + else if (IS_GEN9(dev_priv)) gen9_sseu_info_init(dev_priv); - else if (INTEL_GEN(dev_priv) == 10) + else if (IS_GEN10(dev_priv)) gen10_sseu_info_init(dev_priv); else if (INTEL_GEN(dev_priv) >= 11) gen11_sseu_info_init(dev_priv); + if (IS_GEN6(dev_priv) && intel_vtd_active()) { + DRM_INFO("Disabling ppGTT for VT-d support\n"); + info->ppgtt = INTEL_PPGTT_NONE; + } + /* Initialize command stream timestamp frequency */ info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv); } @@ -872,40 +884,37 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps, void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) { struct intel_device_info *info = mkwrite_device_info(dev_priv); - u8 vdbox_disable, vebox_disable; u32 media_fuse; - int i; + unsigned int i; if (INTEL_GEN(dev_priv) < 11) return; - media_fuse = I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE); + media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE); - vdbox_disable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; - vebox_disable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> - GEN11_GT_VEBOX_DISABLE_SHIFT; + info->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; + info->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> + GEN11_GT_VEBOX_DISABLE_SHIFT; - DRM_DEBUG_DRIVER("vdbox disable: %04x\n", vdbox_disable); + DRM_DEBUG_DRIVER("vdbox enable: %04x\n", info->vdbox_enable); for (i = 0; i < I915_MAX_VCS; i++) { if (!HAS_ENGINE(dev_priv, _VCS(i))) continue; - if (!(BIT(i) & vdbox_disable)) - continue; - - info->ring_mask &= ~ENGINE_MASK(_VCS(i)); - DRM_DEBUG_DRIVER("vcs%u fused off\n", i); + if (!(BIT(i) & info->vdbox_enable)) { + info->ring_mask &= ~ENGINE_MASK(_VCS(i)); + DRM_DEBUG_DRIVER("vcs%u fused off\n", i); + } } - DRM_DEBUG_DRIVER("vebox disable: %04x\n", vebox_disable); + DRM_DEBUG_DRIVER("vebox enable: %04x\n", info->vebox_enable); for (i = 0; i < I915_MAX_VECS; i++) { if (!HAS_ENGINE(dev_priv, _VECS(i))) continue; - if (!(BIT(i) & vebox_disable)) - continue; - - info->ring_mask &= ~ENGINE_MASK(_VECS(i)); - DRM_DEBUG_DRIVER("vecs%u fused off\n", i); + if (!(BIT(i) & info->vebox_enable)) { + info->ring_mask &= ~ENGINE_MASK(_VECS(i)); + DRM_DEBUG_DRIVER("vecs%u fused off\n", i); + } } } diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 6eecd64734d5..1caf24e2cf0b 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -25,6 +25,8 @@ #ifndef _INTEL_DEVICE_INFO_H_ #define _INTEL_DEVICE_INFO_H_ +#include <uapi/drm/i915_drm.h> + #include "intel_display.h" struct drm_printer; @@ -74,51 +76,58 @@ enum intel_platform { INTEL_MAX_PLATFORMS }; +enum intel_ppgtt { + INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE, + INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING, + INTEL_PPGTT_FULL = I915_GEM_PPGTT_FULL, + INTEL_PPGTT_FULL_4LVL, +}; + #define DEV_INFO_FOR_EACH_FLAG(func) \ func(is_mobile); \ func(is_lp); \ func(is_alpha_support); \ /* Keep has_* in alphabetical order */ \ func(has_64bit_reloc); \ - func(has_aliasing_ppgtt); \ - func(has_csr); \ - func(has_ddi); \ - func(has_dp_mst); \ func(has_reset_engine); \ - func(has_fbc); \ func(has_fpga_dbg); \ - func(has_full_ppgtt); \ - func(has_full_48bit_ppgtt); \ - func(has_gmch_display); \ func(has_guc); \ func(has_guc_ct); \ - func(has_hotplug); \ func(has_l3_dpf); \ func(has_llc); \ func(has_logical_ring_contexts); \ func(has_logical_ring_elsq); \ func(has_logical_ring_preemption); \ - func(has_overlay); \ func(has_pooled_eu); \ - func(has_psr); \ func(has_rc6); \ func(has_rc6p); \ func(has_runtime_pm); \ func(has_snoop); \ func(has_coherent_ggtt); \ func(unfenced_needs_alignment); \ + func(hws_needs_physical); + +#define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \ + /* Keep in alphabetical order */ \ func(cursor_needs_physical); \ - func(hws_needs_physical); \ + func(has_csr); \ + func(has_ddi); \ + func(has_dp_mst); \ + func(has_fbc); \ + func(has_gmch_display); \ + func(has_hotplug); \ + func(has_ipc); \ + func(has_overlay); \ + func(has_psr); \ func(overlay_needs_physical); \ - func(supports_tv); \ - func(has_ipc); + func(supports_tv); #define GEN_MAX_SLICES (6) /* CNL upper bound */ #define GEN_MAX_SUBSLICES (8) /* ICL upper bound */ struct sseu_dev_info { u8 slice_mask; - u8 subslice_mask[GEN_MAX_SUBSLICES]; + u8 subslice_mask[GEN_MAX_SLICES]; u16 eu_total; u8 eu_per_subslice; u8 min_eu_in_pool; @@ -154,6 +163,7 @@ struct intel_device_info { enum intel_platform platform; u32 platform_mask; + enum intel_ppgtt ppgtt; unsigned int page_sizes; /* page sizes supported by the HW */ u32 display_mmio_offset; @@ -165,12 +175,18 @@ struct intel_device_info { #define DEFINE_FLAG(name) u8 name:1 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); #undef DEFINE_FLAG + + struct { +#define DEFINE_FLAG(name) u8 name:1 + DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG); +#undef DEFINE_FLAG + } display; + u16 ddb_size; /* in blocks */ /* Register offsets for the various display pipes and transcoders */ int pipe_offsets[I915_MAX_TRANSCODERS]; int trans_offsets[I915_MAX_TRANSCODERS]; - int palette_offsets[I915_MAX_PIPES]; int cursor_offsets[I915_MAX_PIPES]; /* Slice/subslice/EU info */ @@ -178,6 +194,10 @@ struct intel_device_info { u32 cs_timestamp_frequency_khz; + /* Enabled (not fused off) media engine bitmasks. */ + u8 vdbox_enable; + u8 vebox_enable; + struct color_luts { u16 degamma_lut_size; u16 gamma_lut_size; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9741cc419e1b..07c861884c70 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -24,7 +24,6 @@ * Eric Anholt <eric@anholt.net> */ -#include <linux/dmi.h> #include <linux/module.h> #include <linux/input.h> #include <linux/i2c.h> @@ -74,55 +73,6 @@ static const uint64_t i9xx_format_modifiers[] = { DRM_FORMAT_MOD_INVALID }; -static const uint32_t skl_primary_formats[] = { - DRM_FORMAT_C8, - DRM_FORMAT_RGB565, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_ARGB8888, - DRM_FORMAT_ABGR8888, - DRM_FORMAT_XRGB2101010, - DRM_FORMAT_XBGR2101010, - DRM_FORMAT_YUYV, - DRM_FORMAT_YVYU, - DRM_FORMAT_UYVY, - DRM_FORMAT_VYUY, -}; - -static const uint32_t skl_pri_planar_formats[] = { - DRM_FORMAT_C8, - DRM_FORMAT_RGB565, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_ARGB8888, - DRM_FORMAT_ABGR8888, - DRM_FORMAT_XRGB2101010, - DRM_FORMAT_XBGR2101010, - DRM_FORMAT_YUYV, - DRM_FORMAT_YVYU, - DRM_FORMAT_UYVY, - DRM_FORMAT_VYUY, - DRM_FORMAT_NV12, -}; - -static const uint64_t skl_format_modifiers_noccs[] = { - I915_FORMAT_MOD_Yf_TILED, - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static const uint64_t skl_format_modifiers_ccs[] = { - I915_FORMAT_MOD_Yf_TILED_CCS, - I915_FORMAT_MOD_Y_TILED_CCS, - I915_FORMAT_MOD_Yf_TILED, - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - /* Cursor formats */ static const uint32_t intel_cursor_formats[] = { DRM_FORMAT_ARGB8888, @@ -141,15 +91,15 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc, static int intel_framebuffer_init(struct intel_framebuffer *ifb, struct drm_i915_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd); -static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); -static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); -static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc); -static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, - struct intel_link_m_n *m_n, - struct intel_link_m_n *m2_n2); -static void ironlake_set_pipeconf(struct drm_crtc *crtc); -static void haswell_set_pipeconf(struct drm_crtc *crtc); -static void haswell_set_pipemisc(struct drm_crtc *crtc); +static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state); +static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); +static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, + const struct intel_link_m_n *m_n, + const struct intel_link_m_n *m2_n2); +static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); +static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state); +static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state); +static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state); static void vlv_prepare_pll(struct intel_crtc *crtc, const struct intel_crtc_state *pipe_config); static void chv_prepare_pll(struct intel_crtc *crtc, @@ -158,9 +108,9 @@ static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); static void intel_crtc_init_scalers(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); -static void skylake_pfit_enable(struct intel_crtc *crtc); -static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); -static void ironlake_pfit_enable(struct intel_crtc *crtc); +static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state); +static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state); +static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state); static void intel_modeset_setup_hw_state(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); @@ -506,23 +456,8 @@ static const struct intel_limit intel_limits_bxt = { }; static void -skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable) -{ - if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) - return; - - if (enable) - I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS); - else - I915_WRITE(CHICKEN_PIPESL_1(pipe), 0); -} - -static void skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable) { - if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) - return; - if (enable) I915_WRITE(CLKGATE_DIS_PSL(pipe), DUPS1_GATING_DIS | DUPS2_GATING_DIS); @@ -1381,6 +1316,7 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, "PCH LVDS enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); + /* PCH SDVOB multiplex with HDMIB */ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); @@ -1565,14 +1501,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc, } } -static void i9xx_disable_pll(struct intel_crtc *crtc) +static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; /* Disable DVO 2x clock on both PLLs if necessary */ if (IS_I830(dev_priv) && - intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) && + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) && !intel_num_dvo_pipes(dev_priv)) { I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); @@ -1666,16 +1603,16 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, I915_READ(dpll_reg) & port_mask, expected_mask); } -static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, - enum pipe pipe) +static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, - pipe); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; i915_reg_t reg; uint32_t val, pipeconf_val; /* Make sure PCH DPLL is enabled */ - assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll); + assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); /* FDI must be feeding us bits for PCH ports */ assert_fdi_tx_enabled(dev_priv, pipe); @@ -1701,7 +1638,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, * here for both 8bpc and 12bpc. */ val &= ~PIPECONF_BPC_MASK; - if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI)) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) val |= PIPECONF_8BPC; else val |= pipeconf_val & PIPECONF_BPC_MASK; @@ -1710,7 +1647,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, val &= ~TRANS_INTERLACE_MASK; if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) if (HAS_PCH_IBX(dev_priv) && - intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) val |= TRANS_LEGACY_INTERLACED_ILK; else val |= TRANS_INTERLACED; @@ -2254,6 +2191,11 @@ static u32 intel_adjust_tile_offset(int *x, int *y, return new_offset; } +static bool is_surface_linear(u64 modifier, int color_plane) +{ + return modifier == DRM_FORMAT_MOD_LINEAR; +} + static u32 intel_adjust_aligned_offset(int *x, int *y, const struct drm_framebuffer *fb, int color_plane, @@ -2266,7 +2208,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y, WARN_ON(new_offset > old_offset); - if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { + if (!is_surface_linear(fb->modifier, color_plane)) { unsigned int tile_size, tile_width, tile_height; unsigned int pitch_tiles; @@ -2330,14 +2272,13 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv, unsigned int rotation, u32 alignment) { - uint64_t fb_modifier = fb->modifier; unsigned int cpp = fb->format->cpp[color_plane]; u32 offset, offset_aligned; if (alignment) alignment--; - if (fb_modifier != DRM_FORMAT_MOD_LINEAR) { + if (!is_surface_linear(fb->modifier, color_plane)) { unsigned int tile_size, tile_width, tile_height; unsigned int tile_rows, tiles, pitch_tiles; @@ -2400,10 +2341,26 @@ static int intel_fb_offset_to_xy(int *x, int *y, int color_plane) { struct drm_i915_private *dev_priv = to_i915(fb->dev); + unsigned int height; if (fb->modifier != DRM_FORMAT_MOD_LINEAR && - fb->offsets[color_plane] % intel_tile_size(dev_priv)) + fb->offsets[color_plane] % intel_tile_size(dev_priv)) { + DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n", + fb->offsets[color_plane], color_plane); return -EINVAL; + } + + height = drm_framebuffer_plane_height(fb->height, fb, color_plane); + height = ALIGN(height, intel_tile_height(fb, color_plane)); + + /* Catch potential overflows early */ + if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]), + fb->offsets[color_plane])) { + DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n", + fb->offsets[color_plane], fb->pitches[color_plane], + color_plane); + return -ERANGE; + } *x = 0; *y = 0; @@ -2574,7 +2531,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, tile_size); offset /= tile_size; - if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { + if (!is_surface_linear(fb->modifier, i)) { unsigned int tile_width, tile_height; unsigned int pitch_tiles; struct drm_rect r; @@ -2788,10 +2745,6 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state, crtc_state->base.plane_mask |= drm_plane_mask(&plane->base); else crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base); - - DRM_DEBUG_KMS("%s active planes 0x%x\n", - crtc_state->base.crtc->name, - crtc_state->active_planes); } static void fixup_active_planes(struct intel_crtc_state *crtc_state) @@ -2819,6 +2772,10 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); + DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", + plane->base.base.id, plane->base.name, + crtc->base.base.id, crtc->base.name); + intel_set_plane_visible(crtc_state, plane_state, false); fixup_active_planes(crtc_state); @@ -2826,7 +2783,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, intel_pre_disable_primary_noatomic(&crtc->base); trace_intel_disable_plane(&plane->base, crtc); - plane->disable_plane(plane, crtc); + plane->disable_plane(plane, crtc_state); } static void @@ -2890,6 +2847,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, return; valid_fb: + intel_state->base.rotation = plane_config->rotation; intel_fill_fb_ggtt_view(&intel_state->view, fb, intel_state->base.rotation); intel_state->color_plane[0].stride = @@ -3098,28 +3056,6 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) return 0; } -static int -skl_check_nv12_surface(struct intel_plane_state *plane_state) -{ - /* Display WA #1106 */ - if (plane_state->base.rotation != - (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) && - plane_state->base.rotation != DRM_MODE_ROTATE_270) - return 0; - - /* - * src coordinates are rotated here. - * We check height but report it as width - */ - if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) { - DRM_DEBUG_KMS("src width must be multiple " - "of 4 for rotated NV12\n"); - return -EINVAL; - } - - return 0; -} - static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->base.fb; @@ -3198,9 +3134,6 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) * the main surface setup depends on it. */ if (fb->format->format == DRM_FORMAT_NV12) { - ret = skl_check_nv12_surface(plane_state); - if (ret) - return ret; ret = skl_check_nv12_aux_surface(plane_state); if (ret) return ret; @@ -3398,7 +3331,6 @@ static void i9xx_update_plane(struct intel_plane *plane, enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; u32 linear_offset; u32 dspcntr = plane_state->ctl; - i915_reg_t reg = DSPCNTR(i9xx_plane); int x = plane_state->color_plane[0].x; int y = plane_state->color_plane[0].y; unsigned long irqflags; @@ -3413,48 +3345,51 @@ static void i9xx_update_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride); + if (INTEL_GEN(dev_priv) < 4) { /* pipesrc and dspsize control the size that is scaled from, * which should always be the user's requested size. */ + I915_WRITE_FW(DSPPOS(i9xx_plane), 0); I915_WRITE_FW(DSPSIZE(i9xx_plane), ((crtc_state->pipe_src_h - 1) << 16) | (crtc_state->pipe_src_w - 1)); - I915_WRITE_FW(DSPPOS(i9xx_plane), 0); } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { + I915_WRITE_FW(PRIMPOS(i9xx_plane), 0); I915_WRITE_FW(PRIMSIZE(i9xx_plane), ((crtc_state->pipe_src_h - 1) << 16) | (crtc_state->pipe_src_w - 1)); - I915_WRITE_FW(PRIMPOS(i9xx_plane), 0); I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0); } - I915_WRITE_FW(reg, dspcntr); - - I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride); if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - I915_WRITE_FW(DSPSURF(i9xx_plane), - intel_plane_ggtt_offset(plane_state) + - dspaddr_offset); I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x); } else if (INTEL_GEN(dev_priv) >= 4) { + I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset); + I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x); + } + + /* + * The control register self-arms if the plane was previously + * disabled. Try to make the plane enable atomic by writing + * the control register just before the surface register. + */ + I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr); + if (INTEL_GEN(dev_priv) >= 4) I915_WRITE_FW(DSPSURF(i9xx_plane), intel_plane_ggtt_offset(plane_state) + dspaddr_offset); - I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x); - I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset); - } else { + else I915_WRITE_FW(DSPADDR(i9xx_plane), intel_plane_ggtt_offset(plane_state) + dspaddr_offset); - } - POSTING_READ_FW(reg); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } static void i9xx_disable_plane(struct intel_plane *plane, - struct intel_crtc *crtc) + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; @@ -3467,7 +3402,6 @@ static void i9xx_disable_plane(struct intel_plane *plane, I915_WRITE_FW(DSPSURF(i9xx_plane), 0); else I915_WRITE_FW(DSPADDR(i9xx_plane), 0); - POSTING_READ_FW(DSPCNTR(i9xx_plane)); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -3527,13 +3461,13 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) /* * This function detaches (aka. unbinds) unused scalers in hardware */ -static void skl_detach_scalers(struct intel_crtc *intel_crtc) +static void skl_detach_scalers(const struct intel_crtc_state *crtc_state) { - struct intel_crtc_scaler_state *scaler_state; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + const struct intel_crtc_scaler_state *scaler_state = + &crtc_state->scaler_state; int i; - scaler_state = &intel_crtc->config->scaler_state; - /* loop through and disable scalers that aren't in use */ for (i = 0; i < intel_crtc->num_scalers; i++) { if (!scaler_state->scalers[i].in_use) @@ -3541,6 +3475,21 @@ static void skl_detach_scalers(struct intel_crtc *intel_crtc) } } +static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb, + int color_plane, unsigned int rotation) +{ + /* + * The stride is either expressed as a multiple of 64 bytes chunks for + * linear buffers or in number of tiles for tiled buffers. + */ + if (fb->modifier == DRM_FORMAT_MOD_LINEAR) + return 64; + else if (drm_rotation_90_or_270(rotation)) + return intel_tile_height(fb, color_plane); + else + return intel_tile_width_bytes(fb, color_plane); +} + u32 skl_plane_stride(const struct intel_plane_state *plane_state, int color_plane) { @@ -3551,16 +3500,7 @@ u32 skl_plane_stride(const struct intel_plane_state *plane_state, if (color_plane >= fb->format->num_planes) return 0; - /* - * The stride is either expressed as a multiple of 64 bytes chunks for - * linear buffers or in number of tiles for tiled buffers. - */ - if (drm_rotation_90_or_270(rotation)) - stride /= intel_tile_height(fb, color_plane); - else - stride /= intel_fb_stride_alignment(fb, color_plane); - - return stride; + return stride / skl_plane_stride_mult(fb, color_plane, rotation); } static u32 skl_plane_ctl_format(uint32_t pixel_format) @@ -3597,29 +3537,38 @@ static u32 skl_plane_ctl_format(uint32_t pixel_format) return 0; } -/* - * XXX: For ARBG/ABGR formats we default to expecting scanout buffers - * to be already pre-multiplied. We need to add a knob (or a different - * DRM_FORMAT) for user-space to configure that. - */ -static u32 skl_plane_ctl_alpha(uint32_t pixel_format) +static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state) { - switch (pixel_format) { - case DRM_FORMAT_ABGR8888: - case DRM_FORMAT_ARGB8888: + if (!plane_state->base.fb->format->has_alpha) + return PLANE_CTL_ALPHA_DISABLE; + + switch (plane_state->base.pixel_blend_mode) { + case DRM_MODE_BLEND_PIXEL_NONE: + return PLANE_CTL_ALPHA_DISABLE; + case DRM_MODE_BLEND_PREMULTI: return PLANE_CTL_ALPHA_SW_PREMULTIPLY; + case DRM_MODE_BLEND_COVERAGE: + return PLANE_CTL_ALPHA_HW_PREMULTIPLY; default: + MISSING_CASE(plane_state->base.pixel_blend_mode); return PLANE_CTL_ALPHA_DISABLE; } } -static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format) +static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state) { - switch (pixel_format) { - case DRM_FORMAT_ABGR8888: - case DRM_FORMAT_ARGB8888: + if (!plane_state->base.fb->format->has_alpha) + return PLANE_COLOR_ALPHA_DISABLE; + + switch (plane_state->base.pixel_blend_mode) { + case DRM_MODE_BLEND_PIXEL_NONE: + return PLANE_COLOR_ALPHA_DISABLE; + case DRM_MODE_BLEND_PREMULTI: return PLANE_COLOR_ALPHA_SW_PREMULTIPLY; + case DRM_MODE_BLEND_COVERAGE: + return PLANE_COLOR_ALPHA_HW_PREMULTIPLY; default: + MISSING_CASE(plane_state->base.pixel_blend_mode); return PLANE_COLOR_ALPHA_DISABLE; } } @@ -3696,7 +3645,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, plane_ctl = PLANE_CTL_ENABLE; if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) { - plane_ctl |= skl_plane_ctl_alpha(fb->format->format); + plane_ctl |= skl_plane_ctl_alpha(plane_state); plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE | PLANE_CTL_PIPE_CSC_ENABLE | @@ -3731,6 +3680,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev); const struct drm_framebuffer *fb = plane_state->base.fb; + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); u32 plane_color_ctl = 0; if (INTEL_GEN(dev_priv) < 11) { @@ -3738,9 +3688,9 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; } plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; - plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format); + plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); - if (fb->format->is_yuv) { + if (fb->format->is_yuv && !icl_is_hdr_plane(plane)) { if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; else @@ -3748,6 +3698,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; + } else if (fb->format->is_yuv) { + plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; } return plane_color_ctl; @@ -3932,15 +3884,15 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta /* on skylake this is done by detaching scalers */ if (INTEL_GEN(dev_priv) >= 9) { - skl_detach_scalers(crtc); + skl_detach_scalers(new_crtc_state); if (new_crtc_state->pch_pfit.enabled) - skylake_pfit_enable(crtc); + skylake_pfit_enable(new_crtc_state); } else if (HAS_PCH_SPLIT(dev_priv)) { if (new_crtc_state->pch_pfit.enabled) - ironlake_pfit_enable(crtc); + ironlake_pfit_enable(new_crtc_state); else if (old_crtc_state->pch_pfit.enabled) - ironlake_pfit_disable(crtc, true); + ironlake_pfit_disable(old_crtc_state); } } @@ -4339,10 +4291,10 @@ train_done: DRM_DEBUG_KMS("FDI train done.\n"); } -static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) +static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state) { - struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); int pipe = intel_crtc->pipe; i915_reg_t reg; u32 temp; @@ -4351,7 +4303,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); - temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); + temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); @@ -4500,10 +4452,11 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv) } /* Program iCLKIP clock to the desired frequency */ -static void lpt_program_iclkip(struct intel_crtc *crtc) +static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int clock = crtc->config->base.adjusted_mode.crtc_clock; + int clock = crtc_state->base.adjusted_mode.crtc_clock; u32 divsel, phaseinc, auxdiv, phasedir = 0; u32 temp; @@ -4614,12 +4567,12 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv) desired_divisor << auxdiv); } -static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, +static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, enum pipe pch_transcoder) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), I915_READ(HTOTAL(cpu_transcoder))); @@ -4638,9 +4591,8 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, I915_READ(VSYNCSHIFT(cpu_transcoder))); } -static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) +static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) { - struct drm_i915_private *dev_priv = to_i915(dev); uint32_t temp; temp = I915_READ(SOUTH_CHICKEN1); @@ -4659,22 +4611,23 @@ static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) POSTING_READ(SOUTH_CHICKEN1); } -static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) +static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) { - struct drm_device *dev = intel_crtc->base.dev; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - switch (intel_crtc->pipe) { + switch (crtc->pipe) { case PIPE_A: break; case PIPE_B: - if (intel_crtc->config->fdi_lanes > 2) - cpt_set_fdi_bc_bifurcation(dev, false); + if (crtc_state->fdi_lanes > 2) + cpt_set_fdi_bc_bifurcation(dev_priv, false); else - cpt_set_fdi_bc_bifurcation(dev, true); + cpt_set_fdi_bc_bifurcation(dev_priv, true); break; case PIPE_C: - cpt_set_fdi_bc_bifurcation(dev, true); + cpt_set_fdi_bc_bifurcation(dev_priv, true); break; default: @@ -4731,7 +4684,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state, assert_pch_transcoder_disabled(dev_priv, pipe); if (IS_IVYBRIDGE(dev_priv)) - ivybridge_update_fdi_bc_bifurcation(crtc); + ivybridge_update_fdi_bc_bifurcation(crtc_state); /* Write the TU size bits before fdi link training, so that error * detection works. */ @@ -4764,11 +4717,11 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state, * Note that enable_shared_dpll tries to do the right thing, but * get_shared_dpll unconditionally resets the pll - we need that to have * the right LVDS enable sequence. */ - intel_enable_shared_dpll(crtc); + intel_enable_shared_dpll(crtc_state); /* set transcoder timing, panel must allow it */ assert_panel_unlocked(dev_priv, pipe); - ironlake_pch_transcoder_set_timings(crtc, pipe); + ironlake_pch_transcoder_set_timings(crtc_state, pipe); intel_fdi_normal_train(crtc); @@ -4800,7 +4753,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state, I915_WRITE(reg, temp); } - ironlake_enable_pch_transcoder(dev_priv, pipe); + ironlake_enable_pch_transcoder(crtc_state); } static void lpt_pch_enable(const struct intel_atomic_state *state, @@ -4812,10 +4765,10 @@ static void lpt_pch_enable(const struct intel_atomic_state *state, assert_pch_transcoder_disabled(dev_priv, PIPE_A); - lpt_program_iclkip(crtc); + lpt_program_iclkip(crtc_state); /* Set transcoder timing. */ - ironlake_pch_transcoder_set_timings(crtc, PIPE_A); + ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A); lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); } @@ -4850,8 +4803,31 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe) * chroma samples for both of the luma samples, and thus we don't * actually get the expected MPEG2 chroma siting convention :( * The same behaviour is observed on pre-SKL platforms as well. + * + * Theory behind the formula (note that we ignore sub-pixel + * source coordinates): + * s = source sample position + * d = destination sample position + * + * Downscaling 4:1: + * -0.5 + * | 0.0 + * | | 1.5 (initial phase) + * | | | + * v v v + * | s | s | s | s | + * | d | + * + * Upscaling 1:4: + * -0.5 + * | -0.375 (initial phase) + * | | 0.0 + * | | | + * v v v + * | s | + * | d | d | d | d | */ -u16 skl_scaler_calc_phase(int sub, bool chroma_cosited) +u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited) { int phase = -0x8000; u16 trip = 0; @@ -4859,6 +4835,15 @@ u16 skl_scaler_calc_phase(int sub, bool chroma_cosited) if (chroma_cosited) phase += (sub - 1) * 0x8000 / sub; + phase += scale / (2 * sub); + + /* + * Hardware initial phase limited to [-0.5:1.5]. + * Since the max hardware scale factor is 3.0, we + * should never actually excdeed 1.0 here. + */ + WARN_ON(phase < -0x8000 || phase > 0x18000); + if (phase < 0) phase = 0x10000 + phase; else @@ -4871,8 +4856,7 @@ static int skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, unsigned int scaler_user, int *scaler_id, int src_w, int src_h, int dst_w, int dst_h, - bool plane_scaler_check, - uint32_t pixel_format) + const struct drm_format_info *format, bool need_scaler) { struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; @@ -4881,21 +4865,14 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; - int need_scaling; /* * Src coordinates are already rotated by 270 degrees for * the 90/270 degree plane rotation cases (to match the * GTT mapping), hence no need to account for rotation here. */ - need_scaling = src_w != dst_w || src_h != dst_h; - - if (plane_scaler_check) - if (pixel_format == DRM_FORMAT_NV12) - need_scaling = true; - - if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX) - need_scaling = true; + if (src_w != dst_w || src_h != dst_h) + need_scaler = true; /* * Scaling/fitting not supported in IF-ID mode in GEN9+ @@ -4904,7 +4881,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, * for NV12. */ if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable && - need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { + need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n"); return -EINVAL; } @@ -4919,7 +4896,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, * update to free the scaler is done in plane/panel-fit programming. * For this purpose crtc/plane_state->scaler_id isn't reset here. */ - if (force_detach || !need_scaling) { + if (force_detach || !need_scaler) { if (*scaler_id >= 0) { scaler_state->scaler_users &= ~(1 << scaler_user); scaler_state->scalers[*scaler_id].in_use = 0; @@ -4933,7 +4910,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, return 0; } - if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 && + if (format && format->format == DRM_FORMAT_NV12 && (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { DRM_DEBUG_KMS("NV12: src dimensions not met\n"); return -EINVAL; @@ -4976,12 +4953,16 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, int skl_update_scaler_crtc(struct intel_crtc_state *state) { const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; + bool need_scaler = false; + + if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) + need_scaler = true; return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, &state->scaler_state.scaler_id, state->pipe_src_w, state->pipe_src_h, adjusted_mode->crtc_hdisplay, - adjusted_mode->crtc_vdisplay, false, 0); + adjusted_mode->crtc_vdisplay, NULL, need_scaler); } /** @@ -4996,13 +4977,17 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state) static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { - struct intel_plane *intel_plane = to_intel_plane(plane_state->base.plane); struct drm_framebuffer *fb = plane_state->base.fb; int ret; - bool force_detach = !fb || !plane_state->base.visible; + bool need_scaler = false; + + /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ + if (!icl_is_hdr_plane(intel_plane) && + fb && fb->format->format == DRM_FORMAT_NV12) + need_scaler = true; ret = skl_update_scaler(crtc_state, force_detach, drm_plane_index(&intel_plane->base), @@ -5011,7 +4996,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, drm_rect_height(&plane_state->base.src) >> 16, drm_rect_width(&plane_state->base.dst), drm_rect_height(&plane_state->base.dst), - fb ? true : false, fb ? fb->format->format : 0); + fb ? fb->format : NULL, need_scaler); if (ret || plane_state->scaler_id < 0) return ret; @@ -5057,23 +5042,30 @@ static void skylake_scaler_disable(struct intel_crtc *crtc) skl_detach_scaler(crtc, i); } -static void skylake_pfit_enable(struct intel_crtc *crtc) +static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = crtc->pipe; - struct intel_crtc_scaler_state *scaler_state = - &crtc->config->scaler_state; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + const struct intel_crtc_scaler_state *scaler_state = + &crtc_state->scaler_state; - if (crtc->config->pch_pfit.enabled) { + if (crtc_state->pch_pfit.enabled) { u16 uv_rgb_hphase, uv_rgb_vphase; + int pfit_w, pfit_h, hscale, vscale; int id; - if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) + if (WARN_ON(crtc_state->scaler_state.scaler_id < 0)) return; - uv_rgb_hphase = skl_scaler_calc_phase(1, false); - uv_rgb_vphase = skl_scaler_calc_phase(1, false); + pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF; + pfit_h = crtc_state->pch_pfit.size & 0xFFFF; + + hscale = (crtc_state->pipe_src_w << 16) / pfit_w; + vscale = (crtc_state->pipe_src_h << 16) / pfit_h; + + uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); + uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); id = scaler_state->scaler_id; I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | @@ -5082,18 +5074,18 @@ static void skylake_pfit_enable(struct intel_crtc *crtc) PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); I915_WRITE_FW(SKL_PS_HPHASE(pipe, id), PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); - I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); - I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); + I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos); + I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size); } } -static void ironlake_pfit_enable(struct intel_crtc *crtc) +static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); int pipe = crtc->pipe; - if (crtc->config->pch_pfit.enabled) { + if (crtc_state->pch_pfit.enabled) { /* Force use of hard-coded filter coefficients * as some pre-programmed values are broken, * e.g. x201. @@ -5103,8 +5095,8 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc) PF_PIPE_SEL_IVB(pipe)); else I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); - I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos); - I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size); + I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos); + I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size); } } @@ -5299,11 +5291,8 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv, if (!crtc_state->nv12_planes) return false; - if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) - return false; - - if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) || - IS_CANNONLAKE(dev_priv)) + /* WA Display #0827: Gen9:all */ + if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) return true; return false; @@ -5346,7 +5335,6 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) if (needs_nv12_wa(dev_priv, old_crtc_state) && !needs_nv12_wa(dev_priv, pipe_config)) { skl_wa_clkgate(dev_priv, crtc->pipe, false); - skl_wa_528(dev_priv, crtc->pipe, false); } } @@ -5386,7 +5374,6 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, if (!needs_nv12_wa(dev_priv, old_crtc_state) && needs_nv12_wa(dev_priv, pipe_config)) { skl_wa_clkgate(dev_priv, crtc->pipe, true); - skl_wa_528(dev_priv, crtc->pipe, true); } /* @@ -5409,7 +5396,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, * * WaCxSRDisabledForSpriteScaling:ivb */ - if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev)) + if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) && + old_crtc_state->base.active) intel_wait_for_vblank(dev_priv, crtc->pipe); /* @@ -5440,24 +5428,32 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, intel_update_watermarks(crtc); } -static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) +static void intel_crtc_disable_planes(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_device *dev = crtc->dev; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_plane *p; - int pipe = intel_crtc->pipe; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + unsigned int update_mask = new_crtc_state->update_planes; + const struct intel_plane_state *old_plane_state; + struct intel_plane *plane; + unsigned fb_bits = 0; + int i; - intel_crtc_dpms_overlay_disable(intel_crtc); + intel_crtc_dpms_overlay_disable(crtc); - drm_for_each_plane_mask(p, dev, plane_mask) - to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc); + for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { + if (crtc->pipe != plane->pipe || + !(update_mask & BIT(plane->id))) + continue; - /* - * FIXME: Once we grow proper nuclear flip support out of this we need - * to compute the mask of flip planes precisely. For the time being - * consider this a flip to a NULL plane. - */ - intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe)); + plane->disable_plane(plane, new_crtc_state); + + if (old_plane_state->base.visible) + fb_bits |= plane->frontbuffer_bit; + } + + intel_frontbuffer_flip(dev_priv, fb_bits); } static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc, @@ -5515,7 +5511,8 @@ static void intel_encoders_enable(struct drm_crtc *crtc, if (conn_state->crtc != crtc) continue; - encoder->enable(encoder, crtc_state, conn_state); + if (encoder->enable) + encoder->enable(encoder, crtc_state, conn_state); intel_opregion_notify_encoder(encoder, true); } } @@ -5536,7 +5533,8 @@ static void intel_encoders_disable(struct drm_crtc *crtc, continue; intel_opregion_notify_encoder(encoder, false); - encoder->disable(encoder, old_crtc_state, old_conn_state); + if (encoder->disable) + encoder->disable(encoder, old_crtc_state, old_conn_state); } } @@ -5607,37 +5605,37 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); - if (intel_crtc->config->has_pch_encoder) - intel_prepare_shared_dpll(intel_crtc); + if (pipe_config->has_pch_encoder) + intel_prepare_shared_dpll(pipe_config); - if (intel_crtc_has_dp_encoder(intel_crtc->config)) - intel_dp_set_m_n(intel_crtc, M1_N1); + if (intel_crtc_has_dp_encoder(pipe_config)) + intel_dp_set_m_n(pipe_config, M1_N1); - intel_set_pipe_timings(intel_crtc); - intel_set_pipe_src_size(intel_crtc); + intel_set_pipe_timings(pipe_config); + intel_set_pipe_src_size(pipe_config); - if (intel_crtc->config->has_pch_encoder) { - intel_cpu_transcoder_set_m_n(intel_crtc, - &intel_crtc->config->fdi_m_n, NULL); + if (pipe_config->has_pch_encoder) { + intel_cpu_transcoder_set_m_n(pipe_config, + &pipe_config->fdi_m_n, NULL); } - ironlake_set_pipeconf(crtc); + ironlake_set_pipeconf(pipe_config); intel_crtc->active = true; intel_encoders_pre_enable(crtc, pipe_config, old_state); - if (intel_crtc->config->has_pch_encoder) { + if (pipe_config->has_pch_encoder) { /* Note: FDI PLL enabling _must_ be done before we enable the * cpu pipes, hence this is separate from all the other fdi/pch * enabling. */ - ironlake_fdi_pll_enable(intel_crtc); + ironlake_fdi_pll_enable(pipe_config); } else { assert_fdi_tx_disabled(dev_priv, pipe); assert_fdi_rx_disabled(dev_priv, pipe); } - ironlake_pfit_enable(intel_crtc); + ironlake_pfit_enable(pipe_config); /* * On ILK+ LUT must be loaded before the pipe is running but with @@ -5646,10 +5644,10 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, intel_color_load_luts(&pipe_config->base); if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config); + dev_priv->display.initial_watermarks(old_intel_state, pipe_config); intel_enable_pipe(pipe_config); - if (intel_crtc->config->has_pch_encoder) + if (pipe_config->has_pch_encoder) ironlake_pch_enable(old_intel_state, pipe_config); assert_vblank_disabled(crtc); @@ -5666,7 +5664,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, * some interlaced HDMI modes. Let's do the double wait always * in case there are more corner cases we don't know about. */ - if (intel_crtc->config->has_pch_encoder) { + if (pipe_config->has_pch_encoder) { intel_wait_for_vblank(dev_priv, pipe); intel_wait_for_vblank(dev_priv, pipe); } @@ -5700,10 +5698,9 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc) enum pipe pipe = crtc->pipe; uint32_t val; - val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2); - - /* Program B credit equally to all pipes */ - val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes); + val = MBUS_DBOX_A_CREDIT(2); + val |= MBUS_DBOX_BW_CREDIT(1); + val |= MBUS_DBOX_B_CREDIT(8); I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val); } @@ -5715,7 +5712,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe, hsw_workaround_pipe; - enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; + enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; struct intel_atomic_state *old_intel_state = to_intel_atomic_state(old_state); bool psl_clkgate_wa; @@ -5726,37 +5723,34 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); - if (intel_crtc->config->shared_dpll) - intel_enable_shared_dpll(intel_crtc); - - if (INTEL_GEN(dev_priv) >= 11) - icl_map_plls_to_ports(crtc, pipe_config, old_state); + if (pipe_config->shared_dpll) + intel_enable_shared_dpll(pipe_config); intel_encoders_pre_enable(crtc, pipe_config, old_state); - if (intel_crtc_has_dp_encoder(intel_crtc->config)) - intel_dp_set_m_n(intel_crtc, M1_N1); + if (intel_crtc_has_dp_encoder(pipe_config)) + intel_dp_set_m_n(pipe_config, M1_N1); if (!transcoder_is_dsi(cpu_transcoder)) - intel_set_pipe_timings(intel_crtc); + intel_set_pipe_timings(pipe_config); - intel_set_pipe_src_size(intel_crtc); + intel_set_pipe_src_size(pipe_config); if (cpu_transcoder != TRANSCODER_EDP && !transcoder_is_dsi(cpu_transcoder)) { I915_WRITE(PIPE_MULT(cpu_transcoder), - intel_crtc->config->pixel_multiplier - 1); + pipe_config->pixel_multiplier - 1); } - if (intel_crtc->config->has_pch_encoder) { - intel_cpu_transcoder_set_m_n(intel_crtc, - &intel_crtc->config->fdi_m_n, NULL); + if (pipe_config->has_pch_encoder) { + intel_cpu_transcoder_set_m_n(pipe_config, + &pipe_config->fdi_m_n, NULL); } if (!transcoder_is_dsi(cpu_transcoder)) - haswell_set_pipeconf(crtc); + haswell_set_pipeconf(pipe_config); - haswell_set_pipemisc(crtc); + haswell_set_pipemisc(pipe_config); intel_color_set_csc(&pipe_config->base); @@ -5764,14 +5758,14 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */ psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && - intel_crtc->config->pch_pfit.enabled; + pipe_config->pch_pfit.enabled; if (psl_clkgate_wa) glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); if (INTEL_GEN(dev_priv) >= 9) - skylake_pfit_enable(intel_crtc); + skylake_pfit_enable(pipe_config); else - ironlake_pfit_enable(intel_crtc); + ironlake_pfit_enable(pipe_config); /* * On ILK+ LUT must be loaded before the pipe is running but with @@ -5804,10 +5798,10 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, if (!transcoder_is_dsi(cpu_transcoder)) intel_enable_pipe(pipe_config); - if (intel_crtc->config->has_pch_encoder) + if (pipe_config->has_pch_encoder) lpt_pch_enable(old_intel_state, pipe_config); - if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST)) + if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) intel_ddi_set_vc_payload_alloc(pipe_config, true); assert_vblank_disabled(crtc); @@ -5829,15 +5823,15 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, } } -static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) +static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = crtc->pipe; + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; /* To avoid upsetting the power well on haswell only disable the pfit if * it's in use. The hw state code will make sure we get this right. */ - if (force || crtc->config->pch_pfit.enabled) { + if (old_crtc_state->pch_pfit.enabled) { I915_WRITE(PF_CTL(pipe), 0); I915_WRITE(PF_WIN_POS(pipe), 0); I915_WRITE(PF_WIN_SZ(pipe), 0); @@ -5868,14 +5862,14 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, intel_disable_pipe(old_crtc_state); - ironlake_pfit_disable(intel_crtc, false); + ironlake_pfit_disable(old_crtc_state); - if (intel_crtc->config->has_pch_encoder) + if (old_crtc_state->has_pch_encoder) ironlake_fdi_disable(crtc); intel_encoders_post_disable(crtc, old_crtc_state, old_state); - if (intel_crtc->config->has_pch_encoder) { + if (old_crtc_state->has_pch_encoder) { ironlake_disable_pch_transcoder(dev_priv, pipe); if (HAS_PCH_CPT(dev_priv)) { @@ -5926,24 +5920,24 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, if (!transcoder_is_dsi(cpu_transcoder)) intel_ddi_disable_transcoder_func(old_crtc_state); + intel_dsc_disable(old_crtc_state); + if (INTEL_GEN(dev_priv) >= 9) skylake_scaler_disable(intel_crtc); else - ironlake_pfit_disable(intel_crtc, false); + ironlake_pfit_disable(old_crtc_state); intel_encoders_post_disable(crtc, old_crtc_state, old_state); - if (INTEL_GEN(dev_priv) >= 11) - icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state); + intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); } -static void i9xx_pfit_enable(struct intel_crtc *crtc) +static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc_state *pipe_config = crtc->config; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - if (!pipe_config->gmch_pfit.control) + if (!crtc_state->gmch_pfit.control) return; /* @@ -5953,8 +5947,8 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc) WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); assert_pipe_disabled(dev_priv, crtc->pipe); - I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); - I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); + I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios); + I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control); /* Border color in case we don't scale up to the full screen. Black by * default, change to something else for debugging. */ @@ -6009,6 +6003,28 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port) } } +enum intel_display_power_domain +intel_aux_power_domain(struct intel_digital_port *dig_port) +{ + switch (dig_port->aux_ch) { + case AUX_CH_A: + return POWER_DOMAIN_AUX_A; + case AUX_CH_B: + return POWER_DOMAIN_AUX_B; + case AUX_CH_C: + return POWER_DOMAIN_AUX_C; + case AUX_CH_D: + return POWER_DOMAIN_AUX_D; + case AUX_CH_E: + return POWER_DOMAIN_AUX_E; + case AUX_CH_F: + return POWER_DOMAIN_AUX_F; + default: + MISSING_CASE(dig_port->aux_ch); + return POWER_DOMAIN_AUX_A; + } +} + static u64 get_crtc_power_domains(struct drm_crtc *crtc, struct intel_crtc_state *crtc_state) { @@ -6088,20 +6104,18 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, if (WARN_ON(intel_crtc->active)) return; - if (intel_crtc_has_dp_encoder(intel_crtc->config)) - intel_dp_set_m_n(intel_crtc, M1_N1); + if (intel_crtc_has_dp_encoder(pipe_config)) + intel_dp_set_m_n(pipe_config, M1_N1); - intel_set_pipe_timings(intel_crtc); - intel_set_pipe_src_size(intel_crtc); + intel_set_pipe_timings(pipe_config); + intel_set_pipe_src_size(pipe_config); if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { - struct drm_i915_private *dev_priv = to_i915(dev); - I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); I915_WRITE(CHV_CANVAS(pipe), 0); } - i9xx_set_pipeconf(intel_crtc); + i9xx_set_pipeconf(pipe_config); intel_color_set_csc(&pipe_config->base); @@ -6112,16 +6126,16 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); if (IS_CHERRYVIEW(dev_priv)) { - chv_prepare_pll(intel_crtc, intel_crtc->config); - chv_enable_pll(intel_crtc, intel_crtc->config); + chv_prepare_pll(intel_crtc, pipe_config); + chv_enable_pll(intel_crtc, pipe_config); } else { - vlv_prepare_pll(intel_crtc, intel_crtc->config); - vlv_enable_pll(intel_crtc, intel_crtc->config); + vlv_prepare_pll(intel_crtc, pipe_config); + vlv_enable_pll(intel_crtc, pipe_config); } intel_encoders_pre_enable(crtc, pipe_config, old_state); - i9xx_pfit_enable(intel_crtc); + i9xx_pfit_enable(pipe_config); intel_color_load_luts(&pipe_config->base); @@ -6135,13 +6149,13 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, intel_encoders_enable(crtc, pipe_config, old_state); } -static void i9xx_set_pll_dividers(struct intel_crtc *crtc) +static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); - I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); + I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0); + I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1); } static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, @@ -6158,15 +6172,15 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, if (WARN_ON(intel_crtc->active)) return; - i9xx_set_pll_dividers(intel_crtc); + i9xx_set_pll_dividers(pipe_config); - if (intel_crtc_has_dp_encoder(intel_crtc->config)) - intel_dp_set_m_n(intel_crtc, M1_N1); + if (intel_crtc_has_dp_encoder(pipe_config)) + intel_dp_set_m_n(pipe_config, M1_N1); - intel_set_pipe_timings(intel_crtc); - intel_set_pipe_src_size(intel_crtc); + intel_set_pipe_timings(pipe_config); + intel_set_pipe_src_size(pipe_config); - i9xx_set_pipeconf(intel_crtc); + i9xx_set_pipeconf(pipe_config); intel_crtc->active = true; @@ -6177,13 +6191,13 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, i9xx_enable_pll(intel_crtc, pipe_config); - i9xx_pfit_enable(intel_crtc); + i9xx_pfit_enable(pipe_config); intel_color_load_luts(&pipe_config->base); if (dev_priv->display.initial_watermarks != NULL) dev_priv->display.initial_watermarks(old_intel_state, - intel_crtc->config); + pipe_config); else intel_update_watermarks(intel_crtc); intel_enable_pipe(pipe_config); @@ -6194,12 +6208,12 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, intel_encoders_enable(crtc, pipe_config, old_state); } -static void i9xx_pfit_disable(struct intel_crtc *crtc) +static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - if (!crtc->config->gmch_pfit.control) + if (!old_crtc_state->gmch_pfit.control) return; assert_pipe_disabled(dev_priv, crtc->pipe); @@ -6232,17 +6246,17 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, intel_disable_pipe(old_crtc_state); - i9xx_pfit_disable(intel_crtc); + i9xx_pfit_disable(old_crtc_state); intel_encoders_post_disable(crtc, old_crtc_state, old_state); - if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) { + if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { if (IS_CHERRYVIEW(dev_priv)) chv_disable_pll(dev_priv, pipe); else if (IS_VALLEYVIEW(dev_priv)) vlv_disable_pll(dev_priv, pipe); else - i9xx_disable_pll(intel_crtc); + i9xx_disable_pll(old_crtc_state); } intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); @@ -6316,7 +6330,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, intel_fbc_disable(intel_crtc); intel_update_watermarks(intel_crtc); - intel_disable_shared_dpll(intel_crtc); + intel_disable_shared_dpll(to_intel_crtc_state(crtc->state)); domains = intel_crtc->enabled_power_domains; for_each_power_domain(domain, domains) @@ -6394,66 +6408,6 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state, } } -int intel_connector_init(struct intel_connector *connector) -{ - struct intel_digital_connector_state *conn_state; - - /* - * Allocate enough memory to hold intel_digital_connector_state, - * This might be a few bytes too many, but for connectors that don't - * need it we'll free the state and allocate a smaller one on the first - * succesful commit anyway. - */ - conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL); - if (!conn_state) - return -ENOMEM; - - __drm_atomic_helper_connector_reset(&connector->base, - &conn_state->base); - - return 0; -} - -struct intel_connector *intel_connector_alloc(void) -{ - struct intel_connector *connector; - - connector = kzalloc(sizeof *connector, GFP_KERNEL); - if (!connector) - return NULL; - - if (intel_connector_init(connector) < 0) { - kfree(connector); - return NULL; - } - - return connector; -} - -/* - * Free the bits allocated by intel_connector_alloc. - * This should only be used after intel_connector_alloc has returned - * successfully, and before drm_connector_init returns successfully. - * Otherwise the destroy callbacks for the connector and the state should - * take care of proper cleanup/free - */ -void intel_connector_free(struct intel_connector *connector) -{ - kfree(to_intel_digital_connector_state(connector->base.state)); - kfree(connector); -} - -/* Simple connector->get_hw_state implementation for encoders that support only - * one connector and no cloning and hence the encoder state determines the state - * of the connector. */ -bool intel_connector_get_hw_state(struct intel_connector *connector) -{ - enum pipe pipe = 0; - struct intel_encoder *encoder = connector->encoder; - - return encoder->get_hw_state(encoder, &pipe); -} - static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) { if (crtc_state->base.enable && crtc_state->has_pch_encoder) @@ -6564,6 +6518,9 @@ retry: link_bw, &pipe_config->fdi_m_n, false); ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); + if (ret == -EDEADLK) + return ret; + if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { pipe_config->pipe_bpp -= 2*3; DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", @@ -6720,7 +6677,9 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, return -EINVAL; } - if (pipe_config->ycbcr420 && pipe_config->base.ctm) { + if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || + pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) && + pipe_config->base.ctm) { /* * There is only one pipe CSC unit per pipe, and we need that * for output conversion from RGB->YCBCR. So if CTM is already @@ -6795,7 +6754,7 @@ static void compute_m_n(unsigned int m, unsigned int n, } void -intel_link_compute_m_n(int bits_per_pixel, int nlanes, +intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, int pixel_clock, int link_clock, struct intel_link_m_n *m_n, bool constant_n) @@ -6886,12 +6845,12 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); } -static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, - struct intel_link_m_n *m_n) +static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, + const struct intel_link_m_n *m_n) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = crtc->pipe; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); @@ -6899,25 +6858,39 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); } -static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, - struct intel_link_m_n *m_n, - struct intel_link_m_n *m2_n2) +static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, + enum transcoder transcoder) { + if (IS_HASWELL(dev_priv)) + return transcoder == TRANSCODER_EDP; + + /* + * Strictly speaking some registers are available before + * gen7, but we only support DRRS on gen7+ + */ + return IS_GEN7(dev_priv) || IS_CHERRYVIEW(dev_priv); +} + +static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, + const struct intel_link_m_n *m_n, + const struct intel_link_m_n *m2_n2) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int pipe = crtc->pipe; - enum transcoder transcoder = crtc->config->cpu_transcoder; + enum pipe pipe = crtc->pipe; + enum transcoder transcoder = crtc_state->cpu_transcoder; if (INTEL_GEN(dev_priv) >= 5) { I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); - /* M2_N2 registers to be set only for gen < 8 (M2_N2 available - * for gen < 8) and if DRRS is supported (to make sure the - * registers are not unnecessarily accessed). + /* + * M2_N2 registers are set only if DRRS is supported + * (to make sure the registers are not unnecessarily accessed). */ - if (m2_n2 && (IS_CHERRYVIEW(dev_priv) || - INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) { + if (m2_n2 && crtc_state->has_drrs && + transcoder_has_m2_n2(dev_priv, transcoder)) { I915_WRITE(PIPE_DATA_M2(transcoder), TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); @@ -6932,29 +6905,29 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, } } -void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n) +void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n) { - struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; + const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; if (m_n == M1_N1) { - dp_m_n = &crtc->config->dp_m_n; - dp_m2_n2 = &crtc->config->dp_m2_n2; + dp_m_n = &crtc_state->dp_m_n; + dp_m2_n2 = &crtc_state->dp_m2_n2; } else if (m_n == M2_N2) { /* * M2_N2 registers are not supported. Hence m2_n2 divider value * needs to be programmed into M1_N1. */ - dp_m_n = &crtc->config->dp_m2_n2; + dp_m_n = &crtc_state->dp_m2_n2; } else { DRM_ERROR("Unsupported divider value\n"); return; } - if (crtc->config->has_pch_encoder) - intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); + if (crtc_state->has_pch_encoder) + intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n); else - intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2); + intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2); } static void vlv_compute_dpll(struct intel_crtc *crtc, @@ -7053,8 +7026,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc, /* Set HBR and RBR LPF coefficients */ if (pipe_config->port_clock == 162000 || - intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) || - intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) + intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) || + intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 0x009f0003); else @@ -7081,7 +7054,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc, coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); coreclk = (coreclk & 0x0000ff00) | 0x01c00000; - if (intel_crtc_has_dp_encoder(crtc->config)) + if (intel_crtc_has_dp_encoder(pipe_config)) coreclk |= 0x01000000; vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); @@ -7360,12 +7333,13 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc, crtc_state->dpll_hw_state.dpll = dpll; } -static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) +static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); - enum pipe pipe = intel_crtc->pipe; - enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; - const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; uint32_t crtc_vtotal, crtc_vblank_end; int vsyncshift = 0; @@ -7379,7 +7353,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) crtc_vtotal -= 1; crtc_vblank_end -= 1; - if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; else vsyncshift = adjusted_mode->crtc_hsync_start - @@ -7421,18 +7395,18 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) } -static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc) +static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) { - struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - enum pipe pipe = intel_crtc->pipe; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; /* pipesrc controls the size that is scaled from, which should * always be the user's requested size. */ I915_WRITE(PIPESRC(pipe), - ((intel_crtc->config->pipe_src_w - 1) << 16) | - (intel_crtc->config->pipe_src_h - 1)); + ((crtc_state->pipe_src_w - 1) << 16) | + (crtc_state->pipe_src_h - 1)); } static void intel_get_pipe_timings(struct intel_crtc *crtc, @@ -7508,29 +7482,30 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, drm_mode_set_name(mode); } -static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) +static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); uint32_t pipeconf; pipeconf = 0; /* we keep both pipes enabled on 830 */ if (IS_I830(dev_priv)) - pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE; + pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE; - if (intel_crtc->config->double_wide) + if (crtc_state->double_wide) pipeconf |= PIPECONF_DOUBLE_WIDE; /* only g4x and later have fancy bpc/dither controls */ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { /* Bspec claims that we can't use dithering for 30bpp pipes. */ - if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) + if (crtc_state->dither && crtc_state->pipe_bpp != 30) pipeconf |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP; - switch (intel_crtc->config->pipe_bpp) { + switch (crtc_state->pipe_bpp) { case 18: pipeconf |= PIPECONF_6BPC; break; @@ -7546,9 +7521,9 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) } } - if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { + if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { if (INTEL_GEN(dev_priv) < 4 || - intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; else pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; @@ -7556,11 +7531,11 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) pipeconf |= PIPECONF_PROGRESSIVE; if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - intel_crtc->config->limited_color_range) + crtc_state->limited_color_range) pipeconf |= PIPECONF_COLOR_RANGE_SELECT; - I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); - POSTING_READ(PIPECONF(intel_crtc->pipe)); + I915_WRITE(PIPECONF(crtc->pipe), pipeconf); + POSTING_READ(PIPECONF(crtc->pipe)); } static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, @@ -7843,8 +7818,15 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, plane_config->tiling = I915_TILING_X; fb->modifier = I915_FORMAT_MOD_X_TILED; } + + if (val & DISPPLANE_ROTATE_180) + plane_config->rotation = DRM_MODE_ROTATE_180; } + if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B && + val & DISPPLANE_MIRROR) + plane_config->rotation |= DRM_MODE_REFLECT_X; + pixel_format = val & DISPPLANE_PIXFORMAT_MASK; fourcc = i9xx_format_to_fourcc(pixel_format); fb->format = drm_format_info(fourcc); @@ -7916,6 +7898,49 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); } +static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB; + + pipe_config->lspcon_downsampling = false; + + if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) { + u32 tmp = I915_READ(PIPEMISC(crtc->pipe)); + + if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { + bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE; + bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND; + + if (ycbcr420_enabled) { + /* We support 4:2:0 in full blend mode only */ + if (!blend) + output = INTEL_OUTPUT_FORMAT_INVALID; + else if (!(IS_GEMINILAKE(dev_priv) || + INTEL_GEN(dev_priv) >= 10)) + output = INTEL_OUTPUT_FORMAT_INVALID; + else + output = INTEL_OUTPUT_FORMAT_YCBCR420; + } else { + /* + * Currently there is no interface defined to + * check user preference between RGB/YCBCR444 + * or YCBCR420. So the only possible case for + * YCBCR444 usage is driving YCBCR420 output + * with LSPCON, when pipe is configured for + * YCBCR444 output and LSPCON takes care of + * downsampling it. + */ + pipe_config->lspcon_downsampling = true; + output = INTEL_OUTPUT_FORMAT_YCBCR444; + } + } + } + + pipe_config->output_format = output; +} + static bool i9xx_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { @@ -7928,6 +7953,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = NULL; @@ -8459,16 +8485,16 @@ void intel_init_pch_refclk(struct drm_i915_private *dev_priv) lpt_init_pch_refclk(dev_priv); } -static void ironlake_set_pipeconf(struct drm_crtc *crtc) +static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; uint32_t val; val = 0; - switch (intel_crtc->config->pipe_bpp) { + switch (crtc_state->pipe_bpp) { case 18: val |= PIPECONF_6BPC; break; @@ -8486,32 +8512,32 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc) BUG(); } - if (intel_crtc->config->dither) + if (crtc_state->dither) val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); - if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) + if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) val |= PIPECONF_INTERLACED_ILK; else val |= PIPECONF_PROGRESSIVE; - if (intel_crtc->config->limited_color_range) + if (crtc_state->limited_color_range) val |= PIPECONF_COLOR_RANGE_SELECT; I915_WRITE(PIPECONF(pipe), val); POSTING_READ(PIPECONF(pipe)); } -static void haswell_set_pipeconf(struct drm_crtc *crtc) +static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 val = 0; - if (IS_HASWELL(dev_priv) && intel_crtc->config->dither) + if (IS_HASWELL(dev_priv) && crtc_state->dither) val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); - if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) + if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) val |= PIPECONF_INTERLACED_ILK; else val |= PIPECONF_PROGRESSIVE; @@ -8520,16 +8546,15 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc) POSTING_READ(PIPECONF(cpu_transcoder)); } -static void haswell_set_pipemisc(struct drm_crtc *crtc) +static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_crtc_state *config = intel_crtc->config; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) { u32 val = 0; - switch (intel_crtc->config->pipe_bpp) { + switch (crtc_state->pipe_bpp) { case 18: val |= PIPEMISC_DITHER_6_BPC; break; @@ -8547,14 +8572,16 @@ static void haswell_set_pipemisc(struct drm_crtc *crtc) BUG(); } - if (intel_crtc->config->dither) + if (crtc_state->dither) val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; - if (config->ycbcr420) { - val |= PIPEMISC_OUTPUT_COLORSPACE_YUV | - PIPEMISC_YUV420_ENABLE | + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || + crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) + val |= PIPEMISC_OUTPUT_COLORSPACE_YUV; + + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) + val |= PIPEMISC_YUV420_ENABLE | PIPEMISC_YUV420_MODE_FULL_BLEND; - } I915_WRITE(PIPEMISC(intel_crtc->pipe), val); } @@ -8765,12 +8792,8 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; - /* Read M2_N2 registers only for gen < 8 (M2_N2 available for - * gen < 8) and if DRRS is supported (to make sure the - * registers are not unnecessarily read). - */ - if (m2_n2 && INTEL_GEN(dev_priv) < 8 && - crtc->config->has_drrs) { + + if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) { m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) @@ -8913,6 +8936,29 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, goto error; } + /* + * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr + * while i915 HW rotation is clockwise, thats why this swapping. + */ + switch (val & PLANE_CTL_ROTATE_MASK) { + case PLANE_CTL_ROTATE_0: + plane_config->rotation = DRM_MODE_ROTATE_0; + break; + case PLANE_CTL_ROTATE_90: + plane_config->rotation = DRM_MODE_ROTATE_270; + break; + case PLANE_CTL_ROTATE_180: + plane_config->rotation = DRM_MODE_ROTATE_180; + break; + case PLANE_CTL_ROTATE_270: + plane_config->rotation = DRM_MODE_ROTATE_90; + break; + } + + if (INTEL_GEN(dev_priv) >= 10 && + val & PLANE_CTL_FLIP_HORIZONTAL) + plane_config->rotation |= DRM_MODE_REFLECT_X; + base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000; plane_config->base = base; @@ -8923,7 +8969,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, fb->width = ((val >> 0) & 0x1fff) + 1; val = I915_READ(PLANE_STRIDE(pipe, plane_id)); - stride_mult = intel_fb_stride_alignment(fb, 0); + stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); fb->pitches[0] = (val & 0x3ff) * stride_mult; aligned_height = intel_fb_align_height(fb, 0, fb->height); @@ -8979,6 +9025,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = NULL; @@ -9286,10 +9333,12 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv) static int haswell_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state); - if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) { + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) || + IS_ICELAKE(dev_priv)) { struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); @@ -9327,30 +9376,17 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv, u32 temp; /* TODO: TBT pll not implemented. */ - switch (port) { - case PORT_A: - case PORT_B: + if (intel_port_is_combophy(dev_priv, port)) { temp = I915_READ(DPCLKA_CFGCR0_ICL) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); - if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1)) + if (WARN_ON(!intel_dpll_is_combophy(id))) return; - break; - case PORT_C: - id = DPLL_ID_ICL_MGPLL1; - break; - case PORT_D: - id = DPLL_ID_ICL_MGPLL2; - break; - case PORT_E: - id = DPLL_ID_ICL_MGPLL3; - break; - case PORT_F: - id = DPLL_ID_ICL_MGPLL4; - break; - default: - MISSING_CASE(port); + } else if (intel_port_is_tc(dev_priv, port)) { + id = icl_port_to_mg_pll_id(port); + } else { + WARN(1, "Invalid port %x\n", port); return; } @@ -9440,11 +9476,18 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; + unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP); + unsigned long enabled_panel_transcoders = 0; + enum transcoder panel_transcoder; u32 tmp; + if (IS_ICELAKE(dev_priv)) + panel_transcoder_mask |= + BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); + /* * The pipe->transcoder mapping is fixed with the exception of the eDP - * transcoder handled below. + * and DSI transcoders handled below. */ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; @@ -9452,29 +9495,49 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, * XXX: Do intel_display_power_get_if_enabled before reading this (for * consistency and less surprising code; it's in always on power). */ - tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); - if (tmp & TRANS_DDI_FUNC_ENABLE) { - enum pipe trans_edp_pipe; + for_each_set_bit(panel_transcoder, &panel_transcoder_mask, 32) { + enum pipe trans_pipe; + + tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder)); + if (!(tmp & TRANS_DDI_FUNC_ENABLE)) + continue; + + /* + * Log all enabled ones, only use the first one. + * + * FIXME: This won't work for two separate DSI displays. + */ + enabled_panel_transcoders |= BIT(panel_transcoder); + if (enabled_panel_transcoders != BIT(panel_transcoder)) + continue; + switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { default: - WARN(1, "unknown pipe linked to edp transcoder\n"); + WARN(1, "unknown pipe linked to transcoder %s\n", + transcoder_name(panel_transcoder)); /* fall through */ case TRANS_DDI_EDP_INPUT_A_ONOFF: case TRANS_DDI_EDP_INPUT_A_ON: - trans_edp_pipe = PIPE_A; + trans_pipe = PIPE_A; break; case TRANS_DDI_EDP_INPUT_B_ONOFF: - trans_edp_pipe = PIPE_B; + trans_pipe = PIPE_B; break; case TRANS_DDI_EDP_INPUT_C_ONOFF: - trans_edp_pipe = PIPE_C; + trans_pipe = PIPE_C; break; } - if (trans_edp_pipe == crtc->pipe) - pipe_config->cpu_transcoder = TRANSCODER_EDP; + if (trans_pipe == crtc->pipe) + pipe_config->cpu_transcoder = panel_transcoder; } + /* + * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1 + */ + WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) && + enabled_panel_transcoders != BIT(TRANSCODER_EDP)); + power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; @@ -9607,33 +9670,18 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, if (!active) goto out; - if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { + if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || + IS_ICELAKE(dev_priv)) { haswell_get_ddi_port_state(crtc, pipe_config); intel_get_pipe_timings(crtc, pipe_config); } intel_get_pipe_src_size(crtc, pipe_config); + intel_get_crtc_ycbcr_config(crtc, pipe_config); pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; - if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) { - u32 tmp = I915_READ(PIPEMISC(crtc->pipe)); - bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV; - - if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) { - bool blend_mode_420 = tmp & - PIPEMISC_YUV420_MODE_FULL_BLEND; - - pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE; - if (pipe_config->ycbcr420 != clrspace_yuv || - pipe_config->ycbcr420 != blend_mode_420) - DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp); - } else if (clrspace_yuv) { - DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n"); - } - } - power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { power_domain_mask |= BIT_ULL(power_domain); @@ -9679,7 +9727,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state) const struct drm_i915_gem_object *obj = intel_fb_obj(fb); u32 base; - if (INTEL_INFO(dev_priv)->cursor_needs_physical) + if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) base = obj->phys_handle->busaddr; else base = intel_plane_ggtt_offset(plane_state); @@ -9902,15 +9950,13 @@ static void i845_update_cursor(struct intel_plane *plane, I915_WRITE_FW(CURPOS(PIPE_A), pos); } - POSTING_READ_FW(CURCNTR(PIPE_A)); - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } static void i845_disable_cursor(struct intel_plane *plane, - struct intel_crtc *crtc) + const struct intel_crtc_state *crtc_state) { - i845_update_cursor(plane, NULL, NULL); + i845_update_cursor(plane, crtc_state, NULL); } static bool i845_cursor_get_hw_state(struct intel_plane *plane, @@ -10101,8 +10147,8 @@ static void i9xx_update_cursor(struct intel_plane *plane, * On some platforms writing CURCNTR first will also * cause CURPOS to be armed by the CURBASE write. * Without the CURCNTR write the CURPOS write would - * arm itself. Thus we always start the full update - * with a CURCNTR write. + * arm itself. Thus we always update CURCNTR before + * CURPOS. * * On other platforms CURPOS always requires the * CURBASE write to arm the update. Additonally @@ -10112,15 +10158,20 @@ static void i9xx_update_cursor(struct intel_plane *plane, * cursor that doesn't appear to move, or even change * shape. Thus we always write CURBASE. * - * CURCNTR and CUR_FBC_CTL are always - * armed by the CURBASE write only. + * The other registers are armed by by the CURBASE write + * except when the plane is getting enabled at which time + * the CURCNTR write arms the update. */ + + if (INTEL_GEN(dev_priv) >= 9) + skl_write_cursor_wm(plane, crtc_state); + if (plane->cursor.base != base || plane->cursor.size != fbc_ctl || plane->cursor.cntl != cntl) { - I915_WRITE_FW(CURCNTR(pipe), cntl); if (HAS_CUR_FBC(dev_priv)) I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl); + I915_WRITE_FW(CURCNTR(pipe), cntl); I915_WRITE_FW(CURPOS(pipe), pos); I915_WRITE_FW(CURBASE(pipe), base); @@ -10132,15 +10183,13 @@ static void i9xx_update_cursor(struct intel_plane *plane, I915_WRITE_FW(CURBASE(pipe), base); } - POSTING_READ_FW(CURBASE(pipe)); - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } static void i9xx_disable_cursor(struct intel_plane *plane, - struct intel_crtc *crtc) + const struct intel_crtc_state *crtc_state) { - i9xx_update_cursor(plane, NULL, NULL); + i9xx_update_cursor(plane, crtc_state, NULL); } static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, @@ -10738,14 +10787,40 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat pipe_config->fb_bits |= plane->frontbuffer_bit; /* + * ILK/SNB DVSACNTR/Sprite Enable + * IVB SPR_CTL/Sprite Enable + * "When in Self Refresh Big FIFO mode, a write to enable the + * plane will be internally buffered and delayed while Big FIFO + * mode is exiting." + * + * Which means that enabling the sprite can take an extra frame + * when we start in big FIFO mode (LP1+). Thus we need to drop + * down to LP0 and wait for vblank in order to make sure the + * sprite gets enabled on the next vblank after the register write. + * Doing otherwise would risk enabling the sprite one frame after + * we've already signalled flip completion. We can resume LP1+ + * once the sprite has been enabled. + * + * * WaCxSRDisabledForSpriteScaling:ivb + * IVB SPR_SCALE/Scaling Enable + * "Low Power watermarks must be disabled for at least one + * frame before enabling sprite scaling, and kept disabled + * until sprite scaling is disabled." + * + * ILK/SNB DVSASCALE/Scaling Enable + * "When in Self Refresh Big FIFO mode, scaling enable will be + * masked off while Big FIFO mode is exiting." * - * cstate->update_wm was already set above, so this flag will - * take effect when we commit and program watermarks. + * Despite the w/a only being listed for IVB we assume that + * the ILK/SNB note has similar ramifications, hence we apply + * the w/a on all three platforms. */ - if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) && - needs_scaling(to_intel_plane_state(plane_state)) && - !needs_scaling(old_plane_state)) + if (plane->id == PLANE_SPRITE0 && + (IS_GEN5(dev_priv) || IS_GEN6(dev_priv) || + IS_IVYBRIDGE(dev_priv)) && + (turn_on || (!needs_scaling(old_plane_state) && + needs_scaling(to_intel_plane_state(plane_state))))) pipe_config->disable_lp_wm = true; return 0; @@ -10781,6 +10856,101 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state, return true; } +static int icl_add_linked_planes(struct intel_atomic_state *state) +{ + struct intel_plane *plane, *linked; + struct intel_plane_state *plane_state, *linked_plane_state; + int i; + + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + linked = plane_state->linked_plane; + + if (!linked) + continue; + + linked_plane_state = intel_atomic_get_plane_state(state, linked); + if (IS_ERR(linked_plane_state)) + return PTR_ERR(linked_plane_state); + + WARN_ON(linked_plane_state->linked_plane != plane); + WARN_ON(linked_plane_state->slave == plane_state->slave); + } + + return 0; +} + +static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state); + struct intel_plane *plane, *linked; + struct intel_plane_state *plane_state; + int i; + + if (INTEL_GEN(dev_priv) < 11) + return 0; + + /* + * Destroy all old plane links and make the slave plane invisible + * in the crtc_state->active_planes mask. + */ + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + if (plane->pipe != crtc->pipe || !plane_state->linked_plane) + continue; + + plane_state->linked_plane = NULL; + if (plane_state->slave && !plane_state->base.visible) { + crtc_state->active_planes &= ~BIT(plane->id); + crtc_state->update_planes |= BIT(plane->id); + } + + plane_state->slave = false; + } + + if (!crtc_state->nv12_planes) + return 0; + + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + struct intel_plane_state *linked_state = NULL; + + if (plane->pipe != crtc->pipe || + !(crtc_state->nv12_planes & BIT(plane->id))) + continue; + + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { + if (!icl_is_nv12_y_plane(linked->id)) + continue; + + if (crtc_state->active_planes & BIT(linked->id)) + continue; + + linked_state = intel_atomic_get_plane_state(state, linked); + if (IS_ERR(linked_state)) + return PTR_ERR(linked_state); + + break; + } + + if (!linked_state) { + DRM_DEBUG_KMS("Need %d free Y planes for NV12\n", + hweight8(crtc_state->nv12_planes)); + + return -EINVAL; + } + + plane_state->linked_plane = linked; + + linked_state->slave = true; + linked_state->linked_plane = plane; + crtc_state->active_planes |= BIT(linked->id); + crtc_state->update_planes |= BIT(linked->id); + DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name); + } + + return 0; +} + static int intel_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) { @@ -10789,7 +10959,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); - struct drm_atomic_state *state = crtc_state->state; int ret; bool mode_changed = needs_modeset(crtc_state); @@ -10826,8 +10995,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, } } - if (dev_priv->display.compute_intermediate_wm && - !to_intel_atomic_state(state)->skip_intermediate_wm) { + if (dev_priv->display.compute_intermediate_wm) { if (WARN_ON(!dev_priv->display.compute_pipe_wm)) return 0; @@ -10843,9 +11011,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); return ret; } - } else if (dev_priv->display.compute_intermediate_wm) { - if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) - pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal; } if (INTEL_GEN(dev_priv) >= 9) { @@ -10853,6 +11018,8 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, ret = skl_update_scaler_crtc(pipe_config); if (!ret) + ret = icl_check_nv12_planes(pipe_config); + if (!ret) ret = skl_check_pipe_max_pixel_rate(intel_crtc, pipe_config); if (!ret) @@ -10867,8 +11034,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs intel_helper_funcs = { - .atomic_begin = intel_begin_crtc_commit, - .atomic_flush = intel_finish_crtc_commit, .atomic_check = intel_crtc_atomic_check, }; @@ -10897,30 +11062,42 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) drm_connector_list_iter_end(&conn_iter); } -static void -connected_sink_compute_bpp(struct intel_connector *connector, - struct intel_crtc_state *pipe_config) +static int +compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, + struct intel_crtc_state *pipe_config) { - const struct drm_display_info *info = &connector->base.display_info; - int bpp = pipe_config->pipe_bpp; - - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", - connector->base.base.id, - connector->base.name); + struct drm_connector *connector = conn_state->connector; + const struct drm_display_info *info = &connector->display_info; + int bpp; - /* Don't use an invalid EDID bpc value */ - if (info->bpc != 0 && info->bpc * 3 < bpp) { - DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", - bpp, info->bpc * 3); - pipe_config->pipe_bpp = info->bpc * 3; + switch (conn_state->max_bpc) { + case 6 ... 7: + bpp = 6 * 3; + break; + case 8 ... 9: + bpp = 8 * 3; + break; + case 10 ... 11: + bpp = 10 * 3; + break; + case 12: + bpp = 12 * 3; + break; + default: + return -EINVAL; } - /* Clamp bpp to 8 on screens without EDID 1.4 */ - if (info->bpc == 0 && bpp > 24) { - DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", - bpp); - pipe_config->pipe_bpp = 24; + if (bpp < pipe_config->pipe_bpp) { + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of " + "EDID bpp %d, requested bpp %d, max platform bpp %d\n", + connector->base.id, connector->name, + bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc, + pipe_config->pipe_bpp); + + pipe_config->pipe_bpp = bpp; } + + return 0; } static int @@ -10928,7 +11105,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct drm_atomic_state *state; + struct drm_atomic_state *state = pipe_config->base.state; struct drm_connector *connector; struct drm_connector_state *connector_state; int bpp, i; @@ -10941,21 +11118,21 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc, else bpp = 8*3; - pipe_config->pipe_bpp = bpp; - state = pipe_config->base.state; - - /* Clamp display bpp to EDID value */ + /* Clamp display bpp to connector max bpp */ for_each_new_connector_in_state(state, connector, connector_state, i) { + int ret; + if (connector_state->crtc != &crtc->base) continue; - connected_sink_compute_bpp(to_intel_connector(connector), - pipe_config); + ret = compute_sink_pipe_bpp(connector_state, pipe_config); + if (ret) + return ret; } - return bpp; + return 0; } static void intel_dump_crtc_timings(const struct drm_display_mode *mode) @@ -11025,6 +11202,20 @@ static void snprintf_output_types(char *buf, size_t len, WARN_ON_ONCE(output_types != 0); } +static const char * const output_format_str[] = { + [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid", + [INTEL_OUTPUT_FORMAT_RGB] = "RGB", + [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0", + [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4", +}; + +static const char *output_formats(enum intel_output_format format) +{ + if (format >= ARRAY_SIZE(output_format_str)) + format = INTEL_OUTPUT_FORMAT_INVALID; + return output_format_str[format]; +} + static void intel_dump_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config, const char *context) @@ -11044,6 +11235,9 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, DRM_DEBUG_KMS("output_types: %s (0x%x)\n", buf, pipe_config->output_types); + DRM_DEBUG_KMS("output format: %s\n", + output_formats(pipe_config->output_format)); + DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", transcoder_name(pipe_config->cpu_transcoder), pipe_config->pipe_bpp, pipe_config->dither); @@ -11053,9 +11247,6 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, pipe_config->fdi_lanes, &pipe_config->fdi_m_n); - if (pipe_config->ycbcr420) - DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n"); - if (intel_crtc_has_dp_encoder(pipe_config)) { intel_dump_m_n_config(pipe_config, "dp m_n", pipe_config->lane_count, &pipe_config->dp_m_n); @@ -11244,7 +11435,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, struct intel_encoder *encoder; struct drm_connector *connector; struct drm_connector_state *connector_state; - int base_bpp, ret = -EINVAL; + int base_bpp, ret; int i; bool retry = true; @@ -11266,10 +11457,12 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; - base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), - pipe_config); - if (base_bpp < 0) - goto fail; + ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc), + pipe_config); + if (ret) + return ret; + + base_bpp = pipe_config->pipe_bpp; /* * Determine the real pipe dimensions. Note that stereo modes can @@ -11291,7 +11484,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); - goto fail; + return -EINVAL; } /* @@ -11327,7 +11520,7 @@ encoder_retry: if (!(encoder->compute_config(encoder, pipe_config, connector_state))) { DRM_DEBUG_KMS("Encoder config failure\n"); - goto fail; + return -EINVAL; } } @@ -11338,16 +11531,16 @@ encoder_retry: * pipe_config->pixel_multiplier; ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); + if (ret == -EDEADLK) + return ret; if (ret < 0) { DRM_DEBUG_KMS("CRTC fixup failed\n"); - goto fail; + return ret; } if (ret == RETRY) { - if (WARN(!retry, "loop in pipe configuration computation\n")) { - ret = -EINVAL; - goto fail; - } + if (WARN(!retry, "loop in pipe configuration computation\n")) + return -EINVAL; DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); retry = false; @@ -11363,8 +11556,7 @@ encoder_retry: DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", base_bpp, pipe_config->pipe_bpp, pipe_config->dither); -fail: - return ret; + return 0; } static bool intel_fuzzy_clock_check(int clock1, int clock2) @@ -11633,6 +11825,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); PIPE_CONF_CHECK_I(pixel_multiplier); + PIPE_CONF_CHECK_I(output_format); PIPE_CONF_CHECK_BOOL(has_hdmi_sink); if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) @@ -11641,7 +11834,6 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, PIPE_CONF_CHECK_BOOL(hdmi_scrambling); PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe); - PIPE_CONF_CHECK_BOOL(ycbcr420); PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); @@ -11763,6 +11955,8 @@ static void verify_wm_state(struct drm_crtc *crtc, struct skl_pipe_wm hw_wm, *sw_wm; struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; + struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES]; + struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); const enum pipe pipe = intel_crtc->pipe; int plane, level, max_level = ilk_wm_max_level(dev_priv); @@ -11773,6 +11967,8 @@ static void verify_wm_state(struct drm_crtc *crtc, skl_pipe_wm_get_hw_state(crtc, &hw_wm); sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal; + skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv); + skl_ddb_get_hw_state(dev_priv, &hw_ddb); sw_ddb = &dev_priv->wm.skl_hw.ddb; @@ -11815,8 +12011,8 @@ static void verify_wm_state(struct drm_crtc *crtc, } /* DDB */ - hw_ddb_entry = &hw_ddb.plane[pipe][plane]; - sw_ddb_entry = &sw_ddb->plane[pipe][plane]; + hw_ddb_entry = &hw_ddb_y[plane]; + sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane]; if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", @@ -11865,8 +12061,8 @@ static void verify_wm_state(struct drm_crtc *crtc, } /* DDB */ - hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR]; - sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR]; + hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR]; + sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR]; if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", @@ -12150,8 +12346,9 @@ intel_modeset_verify_disabled(struct drm_device *dev, verify_disabled_dpll_state(dev); } -static void update_scanline_offset(struct intel_crtc *crtc) +static void update_scanline_offset(const struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); /* @@ -12182,7 +12379,7 @@ static void update_scanline_offset(struct intel_crtc *crtc) * answer that's slightly in the future. */ if (IS_GEN2(dev_priv)) { - const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; + const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; int vtotal; vtotal = adjusted_mode->crtc_vtotal; @@ -12191,7 +12388,7 @@ static void update_scanline_offset(struct intel_crtc *crtc) crtc->scanline_offset = vtotal - 1; } else if (HAS_DDI(dev_priv) && - intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) { + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { crtc->scanline_offset = 2; } else crtc->scanline_offset = 1; @@ -12474,6 +12671,8 @@ static int intel_atomic_check(struct drm_device *dev, } ret = intel_modeset_pipe_config(crtc, pipe_config); + if (ret == -EDEADLK) + return ret; if (ret) { intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, "[failed]"); @@ -12505,6 +12704,10 @@ static int intel_atomic_check(struct drm_device *dev, intel_state->cdclk.logical = dev_priv->cdclk.logical; } + ret = icl_add_linked_planes(intel_state); + if (ret) + return ret; + ret = drm_atomic_helper_check_planes(dev, state); if (ret) return ret; @@ -12544,7 +12747,7 @@ static void intel_update_crtc(struct drm_crtc *crtc, to_intel_plane(crtc->primary)); if (modeset) { - update_scanline_offset(intel_crtc); + update_scanline_offset(pipe_config); dev_priv->display.crtc_enable(pipe_config, state); /* vblanks work again, re-enable pipe CRC. */ @@ -12557,7 +12760,14 @@ static void intel_update_crtc(struct drm_crtc *crtc, if (new_plane_state) intel_fbc_enable(intel_crtc, pipe_config, new_plane_state); - drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); + intel_begin_crtc_commit(crtc, old_crtc_state); + + if (INTEL_GEN(dev_priv) >= 9) + skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc); + else + i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc); + + intel_finish_crtc_commit(crtc, old_crtc_state); } static void intel_update_crtcs(struct drm_atomic_state *state) @@ -12589,13 +12799,12 @@ static void skl_update_crtcs(struct drm_atomic_state *state) int i; u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; u8 required_slices = intel_state->wm_results.ddb.enabled_slices; - - const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {}; + struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) /* ignore allocations for crtc's that have been turned off. */ if (new_crtc_state->active) - entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb; + entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb; /* If 2nd DBuf slice required, enable it here */ if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices) @@ -12621,14 +12830,13 @@ static void skl_update_crtcs(struct drm_atomic_state *state) if (updated & cmask || !cstate->base.active) continue; - if (skl_ddb_allocation_overlaps(dev_priv, + if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb, entries, - &cstate->wm.skl.ddb, - i)) + INTEL_INFO(dev_priv)->num_pipes, i)) continue; updated |= cmask; - entries[i] = &cstate->wm.skl.ddb; + entries[i] = cstate->wm.skl.ddb; /* * If this is an already active pipe, it's DDB changed, @@ -12718,8 +12926,9 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state; struct drm_crtc *crtc; - struct intel_crtc_state *intel_cstate; + struct intel_crtc *intel_crtc; u64 put_domains[I915_MAX_PIPES] = {}; int i; @@ -12731,24 +12940,25 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + old_intel_crtc_state = to_intel_crtc_state(old_crtc_state); + new_intel_crtc_state = to_intel_crtc_state(new_crtc_state); + intel_crtc = to_intel_crtc(crtc); if (needs_modeset(new_crtc_state) || to_intel_crtc_state(new_crtc_state)->update_pipe) { - put_domains[to_intel_crtc(crtc)->pipe] = + put_domains[intel_crtc->pipe] = modeset_get_crtc_power_domains(crtc, - to_intel_crtc_state(new_crtc_state)); + new_intel_crtc_state); } if (!needs_modeset(new_crtc_state)) continue; - intel_pre_plane_update(to_intel_crtc_state(old_crtc_state), - to_intel_crtc_state(new_crtc_state)); + intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state); if (old_crtc_state->active) { - intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask); + intel_crtc_disable_planes(intel_state, intel_crtc); /* * We need to disable pipe CRC before disabling the pipe, @@ -12756,10 +12966,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) */ intel_crtc_disable_pipe_crc(intel_crtc); - dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state); + dev_priv->display.crtc_disable(old_intel_crtc_state, state); intel_crtc->active = false; intel_fbc_disable(intel_crtc); - intel_disable_shared_dpll(intel_crtc); + intel_disable_shared_dpll(old_intel_crtc_state); /* * Underruns don't always raise @@ -12768,17 +12978,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_check_cpu_fifo_underruns(dev_priv); intel_check_pch_fifo_underruns(dev_priv); - if (!new_crtc_state->active) { - /* - * Make sure we don't call initial_watermarks - * for ILK-style watermark updates. - * - * No clue what this is supposed to achieve. - */ - if (INTEL_GEN(dev_priv) >= 9) - dev_priv->display.initial_watermarks(intel_state, - to_intel_crtc_state(new_crtc_state)); - } + /* FIXME unify this for all platforms */ + if (!new_crtc_state->active && + !HAS_GMCH_DISPLAY(dev_priv) && + dev_priv->display.initial_watermarks) + dev_priv->display.initial_watermarks(intel_state, + new_intel_crtc_state); } } @@ -12837,11 +13042,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) * TODO: Move this (and other cleanup) to an async worker eventually. */ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { - intel_cstate = to_intel_crtc_state(new_crtc_state); + new_intel_crtc_state = to_intel_crtc_state(new_crtc_state); if (dev_priv->display.optimize_watermarks) dev_priv->display.optimize_watermarks(intel_state, - intel_cstate); + new_intel_crtc_state); } for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { @@ -13108,7 +13313,7 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state) struct i915_vma *vma; if (plane->id == PLANE_CURSOR && - INTEL_INFO(dev_priv)->cursor_needs_physical) { + INTEL_INFO(dev_priv)->display.cursor_needs_physical) { struct drm_i915_gem_object *obj = intel_fb_obj(fb); const int align = intel_cursor_alignment(dev_priv); int err; @@ -13224,13 +13429,12 @@ intel_prepare_plane_fb(struct drm_plane *plane, ret = intel_plane_pin_fb(to_intel_plane_state(new_state)); - fb_obj_bump_render_priority(obj); - mutex_unlock(&dev_priv->drm.struct_mutex); i915_gem_object_unpin_pages(obj); if (ret) return ret; + fb_obj_bump_render_priority(obj); intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); if (!new_state->fence) { /* implicit fencing */ @@ -13361,7 +13565,7 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc, if (intel_cstate->update_pipe) intel_update_pipe_config(old_intel_cstate, intel_cstate); else if (INTEL_GEN(dev_priv) >= 9) - skl_detach_scalers(intel_crtc); + skl_detach_scalers(intel_cstate); out: if (dev_priv->display.atomic_update_watermarks) @@ -13463,56 +13667,6 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane, } } -static bool skl_plane_format_mod_supported(struct drm_plane *_plane, - u32 format, u64 modifier) -{ - struct intel_plane *plane = to_intel_plane(_plane); - - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - case I915_FORMAT_MOD_Y_TILED: - case I915_FORMAT_MOD_Yf_TILED: - break; - case I915_FORMAT_MOD_Y_TILED_CCS: - case I915_FORMAT_MOD_Yf_TILED_CCS: - if (!plane->has_ccs) - return false; - break; - default: - return false; - } - - switch (format) { - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_ABGR8888: - if (is_ccs_modifier(modifier)) - return true; - /* fall through */ - case DRM_FORMAT_RGB565: - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_YUYV: - case DRM_FORMAT_YVYU: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_VYUY: - case DRM_FORMAT_NV12: - if (modifier == I915_FORMAT_MOD_Yf_TILED) - return true; - /* fall through */ - case DRM_FORMAT_C8: - if (modifier == DRM_FORMAT_MOD_LINEAR || - modifier == I915_FORMAT_MOD_X_TILED || - modifier == I915_FORMAT_MOD_Y_TILED) - return true; - /* fall through */ - default: - return false; - } -} - static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { @@ -13520,18 +13674,7 @@ static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, format == DRM_FORMAT_ARGB8888; } -static struct drm_plane_funcs skl_plane_funcs = { - .update_plane = drm_atomic_helper_update_plane, - .disable_plane = drm_atomic_helper_disable_plane, - .destroy = intel_plane_destroy, - .atomic_get_property = intel_plane_atomic_get_property, - .atomic_set_property = intel_plane_atomic_set_property, - .atomic_duplicate_state = intel_plane_duplicate_state, - .atomic_destroy_state = intel_plane_destroy_state, - .format_mod_supported = skl_plane_format_mod_supported, -}; - -static struct drm_plane_funcs i965_plane_funcs = { +static const struct drm_plane_funcs i965_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = intel_plane_destroy, @@ -13542,7 +13685,7 @@ static struct drm_plane_funcs i965_plane_funcs = { .format_mod_supported = i965_plane_format_mod_supported, }; -static struct drm_plane_funcs i8xx_plane_funcs = { +static const struct drm_plane_funcs i8xx_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = intel_plane_destroy, @@ -13568,14 +13711,16 @@ intel_legacy_cursor_update(struct drm_plane *plane, struct drm_plane_state *old_plane_state, *new_plane_state; struct intel_plane *intel_plane = to_intel_plane(plane); struct drm_framebuffer *old_fb; - struct drm_crtc_state *crtc_state = crtc->state; + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->state); + struct intel_crtc_state *new_crtc_state; /* * When crtc is inactive or there is a modeset pending, * wait for it to complete in the slowpath */ - if (!crtc_state->active || needs_modeset(crtc_state) || - to_intel_crtc_state(crtc_state)->update_pipe) + if (!crtc_state->base.active || needs_modeset(&crtc_state->base) || + crtc_state->update_pipe) goto slow; old_plane_state = plane->state; @@ -13605,6 +13750,12 @@ intel_legacy_cursor_update(struct drm_plane *plane, if (!new_plane_state) return -ENOMEM; + new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc)); + if (!new_crtc_state) { + ret = -ENOMEM; + goto out_free; + } + drm_atomic_set_fb_for_plane(new_plane_state, fb); new_plane_state->src_x = src_x; @@ -13616,9 +13767,8 @@ intel_legacy_cursor_update(struct drm_plane *plane, new_plane_state->crtc_w = crtc_w; new_plane_state->crtc_h = crtc_h; - ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state), - to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */ - to_intel_plane_state(plane->state), + ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, + to_intel_plane_state(old_plane_state), to_intel_plane_state(new_plane_state)); if (ret) goto out_free; @@ -13640,14 +13790,25 @@ intel_legacy_cursor_update(struct drm_plane *plane, /* Swap plane state */ plane->state = new_plane_state; + /* + * We cannot swap crtc_state as it may be in use by an atomic commit or + * page flip that's running simultaneously. If we swap crtc_state and + * destroy the old state, we will cause a use-after-free there. + * + * Only update active_planes, which is needed for our internal + * bookkeeping. Either value will do the right thing when updating + * planes atomically. If the cursor was part of the atomic update then + * we would have taken the slowpath. + */ + crtc_state->active_planes = new_crtc_state->active_planes; + if (plane->state->visible) { trace_intel_update_plane(plane, to_intel_crtc(crtc)); - intel_plane->update_plane(intel_plane, - to_intel_crtc_state(crtc->state), + intel_plane->update_plane(intel_plane, crtc_state, to_intel_plane_state(plane->state)); } else { trace_intel_disable_plane(plane, to_intel_crtc(crtc)); - intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc)); + intel_plane->disable_plane(intel_plane, crtc_state); } intel_plane_unpin_fb(to_intel_plane_state(old_plane_state)); @@ -13655,6 +13816,8 @@ intel_legacy_cursor_update(struct drm_plane *plane, out_unlock: mutex_unlock(&dev_priv->drm.struct_mutex); out_free: + if (new_crtc_state) + intel_crtc_destroy_state(crtc, &new_crtc_state->base); if (ret) intel_plane_destroy_state(plane, new_plane_state); else @@ -13695,176 +13858,90 @@ static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, return i9xx_plane == PLANE_A; } -static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, - enum pipe pipe, enum plane_id plane_id) -{ - if (!HAS_FBC(dev_priv)) - return false; - - return pipe == PIPE_A && plane_id == PLANE_PRIMARY; -} - -bool skl_plane_has_planar(struct drm_i915_private *dev_priv, - enum pipe pipe, enum plane_id plane_id) -{ - /* - * FIXME: ICL requires two hardware planes for scanning out NV12 - * framebuffers. Do not advertize support until this is implemented. - */ - if (INTEL_GEN(dev_priv) >= 11) - return false; - - if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) - return false; - - if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C) - return false; - - if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0) - return false; - - return true; -} - static struct intel_plane * intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct intel_plane *primary = NULL; - struct intel_plane_state *state = NULL; + struct intel_plane *plane; const struct drm_plane_funcs *plane_funcs; - const uint32_t *intel_primary_formats; unsigned int supported_rotations; - unsigned int num_formats; - const uint64_t *modifiers; + unsigned int possible_crtcs; + const u64 *modifiers; + const u32 *formats; + int num_formats; int ret; - primary = kzalloc(sizeof(*primary), GFP_KERNEL); - if (!primary) { - ret = -ENOMEM; - goto fail; - } - - state = intel_create_plane_state(&primary->base); - if (!state) { - ret = -ENOMEM; - goto fail; - } + if (INTEL_GEN(dev_priv) >= 9) + return skl_universal_plane_create(dev_priv, pipe, + PLANE_PRIMARY); - primary->base.state = &state->base; + plane = intel_plane_alloc(); + if (IS_ERR(plane)) + return plane; - if (INTEL_GEN(dev_priv) >= 9) - state->scaler_id = -1; - primary->pipe = pipe; + plane->pipe = pipe; /* * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS * port is hooked to pipe B. Hence we want plane A feeding pipe B. */ if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) - primary->i9xx_plane = (enum i9xx_plane_id) !pipe; + plane->i9xx_plane = (enum i9xx_plane_id) !pipe; else - primary->i9xx_plane = (enum i9xx_plane_id) pipe; - primary->id = PLANE_PRIMARY; - primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id); + plane->i9xx_plane = (enum i9xx_plane_id) pipe; + plane->id = PLANE_PRIMARY; + plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); - if (INTEL_GEN(dev_priv) >= 9) - primary->has_fbc = skl_plane_has_fbc(dev_priv, - primary->pipe, - primary->id); - else - primary->has_fbc = i9xx_plane_has_fbc(dev_priv, - primary->i9xx_plane); - - if (primary->has_fbc) { + plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane); + if (plane->has_fbc) { struct intel_fbc *fbc = &dev_priv->fbc; - fbc->possible_framebuffer_bits |= primary->frontbuffer_bit; + fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; } - if (INTEL_GEN(dev_priv) >= 9) { - primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe, - PLANE_PRIMARY); - - if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) { - intel_primary_formats = skl_pri_planar_formats; - num_formats = ARRAY_SIZE(skl_pri_planar_formats); - } else { - intel_primary_formats = skl_primary_formats; - num_formats = ARRAY_SIZE(skl_primary_formats); - } - - if (primary->has_ccs) - modifiers = skl_format_modifiers_ccs; - else - modifiers = skl_format_modifiers_noccs; - - primary->max_stride = skl_plane_max_stride; - primary->update_plane = skl_update_plane; - primary->disable_plane = skl_disable_plane; - primary->get_hw_state = skl_plane_get_hw_state; - primary->check_plane = skl_plane_check; - - plane_funcs = &skl_plane_funcs; - } else if (INTEL_GEN(dev_priv) >= 4) { - intel_primary_formats = i965_primary_formats; + if (INTEL_GEN(dev_priv) >= 4) { + formats = i965_primary_formats; num_formats = ARRAY_SIZE(i965_primary_formats); modifiers = i9xx_format_modifiers; - primary->max_stride = i9xx_plane_max_stride; - primary->update_plane = i9xx_update_plane; - primary->disable_plane = i9xx_disable_plane; - primary->get_hw_state = i9xx_plane_get_hw_state; - primary->check_plane = i9xx_plane_check; + plane->max_stride = i9xx_plane_max_stride; + plane->update_plane = i9xx_update_plane; + plane->disable_plane = i9xx_disable_plane; + plane->get_hw_state = i9xx_plane_get_hw_state; + plane->check_plane = i9xx_plane_check; plane_funcs = &i965_plane_funcs; } else { - intel_primary_formats = i8xx_primary_formats; + formats = i8xx_primary_formats; num_formats = ARRAY_SIZE(i8xx_primary_formats); modifiers = i9xx_format_modifiers; - primary->max_stride = i9xx_plane_max_stride; - primary->update_plane = i9xx_update_plane; - primary->disable_plane = i9xx_disable_plane; - primary->get_hw_state = i9xx_plane_get_hw_state; - primary->check_plane = i9xx_plane_check; + plane->max_stride = i9xx_plane_max_stride; + plane->update_plane = i9xx_update_plane; + plane->disable_plane = i9xx_disable_plane; + plane->get_hw_state = i9xx_plane_get_hw_state; + plane->check_plane = i9xx_plane_check; plane_funcs = &i8xx_plane_funcs; } - if (INTEL_GEN(dev_priv) >= 9) - ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, - 0, plane_funcs, - intel_primary_formats, num_formats, - modifiers, - DRM_PLANE_TYPE_PRIMARY, - "plane 1%c", pipe_name(pipe)); - else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) - ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, - 0, plane_funcs, - intel_primary_formats, num_formats, - modifiers, + possible_crtcs = BIT(pipe); + + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) + ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, + possible_crtcs, plane_funcs, + formats, num_formats, modifiers, DRM_PLANE_TYPE_PRIMARY, "primary %c", pipe_name(pipe)); else - ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, - 0, plane_funcs, - intel_primary_formats, num_formats, - modifiers, + ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, + possible_crtcs, plane_funcs, + formats, num_formats, modifiers, DRM_PLANE_TYPE_PRIMARY, "plane %c", - plane_name(primary->i9xx_plane)); + plane_name(plane->i9xx_plane)); if (ret) goto fail; - if (INTEL_GEN(dev_priv) >= 10) { - supported_rotations = - DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | - DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 | - DRM_MODE_REFLECT_X; - } else if (INTEL_GEN(dev_priv) >= 9) { - supported_rotations = - DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | - DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; - } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { + if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { supported_rotations = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | DRM_MODE_REFLECT_X; @@ -13876,26 +13953,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) } if (INTEL_GEN(dev_priv) >= 4) - drm_plane_create_rotation_property(&primary->base, + drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0, supported_rotations); - if (INTEL_GEN(dev_priv) >= 9) - drm_plane_create_color_properties(&primary->base, - BIT(DRM_COLOR_YCBCR_BT601) | - BIT(DRM_COLOR_YCBCR_BT709), - BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | - BIT(DRM_COLOR_YCBCR_FULL_RANGE), - DRM_COLOR_YCBCR_BT709, - DRM_COLOR_YCBCR_LIMITED_RANGE); + drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); - drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs); - - return primary; + return plane; fail: - kfree(state); - kfree(primary); + intel_plane_free(plane); return ERR_PTR(ret); } @@ -13904,23 +13971,13 @@ static struct intel_plane * intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct intel_plane *cursor = NULL; - struct intel_plane_state *state = NULL; + unsigned int possible_crtcs; + struct intel_plane *cursor; int ret; - cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); - if (!cursor) { - ret = -ENOMEM; - goto fail; - } - - state = intel_create_plane_state(&cursor->base); - if (!state) { - ret = -ENOMEM; - goto fail; - } - - cursor->base.state = &state->base; + cursor = intel_plane_alloc(); + if (IS_ERR(cursor)) + return cursor; cursor->pipe = pipe; cursor->i9xx_plane = (enum i9xx_plane_id) pipe; @@ -13947,8 +14004,10 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) cursor->cursor.size = ~0; + possible_crtcs = BIT(pipe); + ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, - 0, &intel_cursor_plane_funcs, + possible_crtcs, &intel_cursor_plane_funcs, intel_cursor_formats, ARRAY_SIZE(intel_cursor_formats), cursor_format_modifiers, @@ -13963,16 +14022,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180); - if (INTEL_GEN(dev_priv) >= 9) - state->scaler_id = -1; - drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); return cursor; fail: - kfree(state); - kfree(cursor); + intel_plane_free(cursor); return ERR_PTR(ret); } @@ -13993,7 +14048,7 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc, struct intel_scaler *scaler = &scaler_state->scalers[i]; scaler->in_use = 0; - scaler->mode = PS_SCALER_MODE_DYN; + scaler->mode = 0; } scaler_state->scaler_id = -1; @@ -14088,18 +14143,6 @@ fail: return ret; } -enum pipe intel_get_pipe_from_connector(struct intel_connector *connector) -{ - struct drm_device *dev = connector->base.dev; - - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); - - if (!connector->base.state->crtc) - return INVALID_PIPE; - - return to_intel_crtc(connector->base.state->crtc)->pipe; -} - int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { @@ -14216,7 +14259,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_pps_init(dev_priv); - if (INTEL_INFO(dev_priv)->num_pipes == 0) + if (!HAS_DISPLAY(dev_priv)) return; /* @@ -14236,6 +14279,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_ddi_init(dev_priv, PORT_D); intel_ddi_init(dev_priv, PORT_E); intel_ddi_init(dev_priv, PORT_F); + icl_dsi_init(dev_priv); } else if (IS_GEN9_LP(dev_priv)) { /* * FIXME: Broxton doesn't support port detection via the @@ -14458,7 +14502,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { static u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv, - uint64_t fb_modifier, uint32_t pixel_format) + u32 pixel_format, u64 fb_modifier) { struct intel_crtc *crtc; struct intel_plane *plane; @@ -14480,7 +14524,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_framebuffer *fb = &intel_fb->base; - struct drm_format_name_buf format_name; u32 pitch_limit; unsigned int tiling, stride; int ret = -EINVAL; @@ -14511,33 +14554,14 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, } } - /* Passed in modifier sanity checking. */ - switch (mode_cmd->modifier[0]) { - case I915_FORMAT_MOD_Y_TILED_CCS: - case I915_FORMAT_MOD_Yf_TILED_CCS: - switch (mode_cmd->pixel_format) { - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ABGR8888: - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_ARGB8888: - break; - default: - DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n"); - goto err; - } - /* fall through */ - case I915_FORMAT_MOD_Y_TILED: - case I915_FORMAT_MOD_Yf_TILED: - if (INTEL_GEN(dev_priv) < 9) { - DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n", - mode_cmd->modifier[0]); - goto err; - } - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - break; - default: - DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n", + if (!drm_any_plane_has_format(&dev_priv->drm, + mode_cmd->pixel_format, + mode_cmd->modifier[0])) { + struct drm_format_name_buf format_name; + + DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n", + drm_get_format_name(mode_cmd->pixel_format, + &format_name), mode_cmd->modifier[0]); goto err; } @@ -14552,8 +14576,8 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, goto err; } - pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0], - mode_cmd->pixel_format); + pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format, + mode_cmd->modifier[0]); if (mode_cmd->pitches[0] > pitch_limit) { DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n", mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? @@ -14572,69 +14596,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, goto err; } - /* Reject formats not supported by any plane early. */ - switch (mode_cmd->pixel_format) { - case DRM_FORMAT_C8: - case DRM_FORMAT_RGB565: - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_ARGB8888: - break; - case DRM_FORMAT_XRGB1555: - if (INTEL_GEN(dev_priv) > 3) { - DRM_DEBUG_KMS("unsupported pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format, &format_name)); - goto err; - } - break; - case DRM_FORMAT_ABGR8888: - if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && - INTEL_GEN(dev_priv) < 9) { - DRM_DEBUG_KMS("unsupported pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format, &format_name)); - goto err; - } - break; - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_XBGR2101010: - if (INTEL_GEN(dev_priv) < 4) { - DRM_DEBUG_KMS("unsupported pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format, &format_name)); - goto err; - } - break; - case DRM_FORMAT_ABGR2101010: - if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { - DRM_DEBUG_KMS("unsupported pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format, &format_name)); - goto err; - } - break; - case DRM_FORMAT_YUYV: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_YVYU: - case DRM_FORMAT_VYUY: - if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { - DRM_DEBUG_KMS("unsupported pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format, &format_name)); - goto err; - } - break; - case DRM_FORMAT_NV12: - if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) || - IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) { - DRM_DEBUG_KMS("unsupported pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format, - &format_name)); - goto err; - } - break; - default: - DRM_DEBUG_KMS("unsupported pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format, &format_name)); - goto err; - } - /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ if (mode_cmd->offsets[0] != 0) goto err; @@ -14646,7 +14607,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, fb->height < SKL_MIN_YUV_420_SRC_H || (fb->width % 4) != 0 || (fb->height % 4) != 0)) { DRM_DEBUG_KMS("src dimensions not correct for NV12\n"); - return -EINVAL; + goto err; } for (i = 0; i < fb->format->num_planes; i++) { @@ -14906,174 +14867,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) dev_priv->display.update_crtcs = intel_update_crtcs; } -/* - * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason - */ -static void quirk_ssc_force_disable(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; - DRM_INFO("applying lvds SSC disable quirk\n"); -} - -/* - * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight - * brightness value - */ -static void quirk_invert_brightness(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; - DRM_INFO("applying inverted panel brightness quirk\n"); -} - -/* Some VBT's incorrectly indicate no backlight is present */ -static void quirk_backlight_present(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; - DRM_INFO("applying backlight present quirk\n"); -} - -/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms - * which is 300 ms greater than eDP spec T12 min. - */ -static void quirk_increase_t12_delay(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - - dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY; - DRM_INFO("Applying T12 delay quirk\n"); -} - -/* - * GeminiLake NUC HDMI outputs require additional off time - * this allows the onboard retimer to correctly sync to signal - */ -static void quirk_increase_ddi_disabled_time(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - - dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME; - DRM_INFO("Applying Increase DDI Disabled quirk\n"); -} - -struct intel_quirk { - int device; - int subsystem_vendor; - int subsystem_device; - void (*hook)(struct drm_device *dev); -}; - -/* For systems that don't have a meaningful PCI subdevice/subvendor ID */ -struct intel_dmi_quirk { - void (*hook)(struct drm_device *dev); - const struct dmi_system_id (*dmi_id_list)[]; -}; - -static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) -{ - DRM_INFO("Backlight polarity reversed on %s\n", id->ident); - return 1; -} - -static const struct intel_dmi_quirk intel_dmi_quirks[] = { - { - .dmi_id_list = &(const struct dmi_system_id[]) { - { - .callback = intel_dmi_reverse_brightness, - .ident = "NCR Corporation", - .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, ""), - }, - }, - { } /* terminating entry */ - }, - .hook = quirk_invert_brightness, - }, -}; - -static struct intel_quirk intel_quirks[] = { - /* Lenovo U160 cannot use SSC on LVDS */ - { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, - - /* Sony Vaio Y cannot use SSC on LVDS */ - { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, - - /* Acer Aspire 5734Z must invert backlight brightness */ - { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, - - /* Acer/eMachines G725 */ - { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, - - /* Acer/eMachines e725 */ - { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, - - /* Acer/Packard Bell NCL20 */ - { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, - - /* Acer Aspire 4736Z */ - { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, - - /* Acer Aspire 5336 */ - { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, - - /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ - { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, - - /* Acer C720 Chromebook (Core i3 4005U) */ - { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, - - /* Apple Macbook 2,1 (Core 2 T7400) */ - { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, - - /* Apple Macbook 4,1 */ - { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present }, - - /* Toshiba CB35 Chromebook (Celeron 2955U) */ - { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, - - /* HP Chromebook 14 (Celeron 2955U) */ - { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, - - /* Dell Chromebook 11 */ - { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, - - /* Dell Chromebook 11 (2015 version) */ - { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present }, - - /* Toshiba Satellite P50-C-18C */ - { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay }, - - /* GeminiLake NUC */ - { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time }, - { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time }, - /* ASRock ITX*/ - { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, - { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, -}; - -static void intel_init_quirks(struct drm_device *dev) -{ - struct pci_dev *d = dev->pdev; - int i; - - for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { - struct intel_quirk *q = &intel_quirks[i]; - - if (d->device == q->device && - (d->subsystem_vendor == q->subsystem_vendor || - q->subsystem_vendor == PCI_ANY_ID) && - (d->subsystem_device == q->subsystem_device || - q->subsystem_device == PCI_ANY_ID)) - q->hook(dev); - } - for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { - if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) - intel_dmi_quirks[i].hook(dev); - } -} - /* Disable the VGA plane that we never use */ static void i915_disable_vga(struct drm_i915_private *dev_priv) { @@ -15233,6 +15026,14 @@ retry: ret = drm_atomic_add_affected_planes(state, crtc); if (ret) goto out; + + /* + * FIXME hack to force a LUT update to avoid the + * plane update forcing the pipe gamma on without + * having a proper LUT loaded. Remove once we + * have readout for pipe gamma enable. + */ + crtc_state->color_mgmt_changed = true; } } @@ -15279,7 +15080,9 @@ int intel_modeset_init(struct drm_device *dev) INIT_WORK(&dev_priv->atomic_helper.free_work, intel_atomic_helper_free_state_worker); - intel_init_quirks(dev); + intel_init_quirks(dev_priv); + + intel_fbc_init(dev_priv); intel_init_pm(dev_priv); @@ -15511,8 +15314,8 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) if (pipe == crtc->pipe) continue; - DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n", - plane->base.name); + DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", + plane->base.base.id, plane->base.name); plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); intel_plane_disable_noatomic(plane_crtc, plane); @@ -15553,7 +15356,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; + struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; /* Clear any frame start delays used for debugging left by the BIOS */ if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) { @@ -15563,7 +15367,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); } - if (crtc->active) { + if (crtc_state->base.active) { struct intel_plane *plane; /* Disable everything but the primary plane */ @@ -15579,10 +15383,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, /* Adjust the state of the output pipe according to whether we * have active connectors/encoders. */ - if (crtc->active && !intel_crtc_has_encoders(crtc)) + if (crtc_state->base.active && !intel_crtc_has_encoders(crtc)) intel_crtc_disable_noatomic(&crtc->base, ctx); - if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { + if (crtc_state->base.active || HAS_GMCH_DISPLAY(dev_priv)) { /* * We start out with underrun reporting disabled to avoid races. * For correct bookkeeping mark this on active crtcs. @@ -15613,6 +15417,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, static void intel_sanitize_encoder(struct intel_encoder *encoder) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_connector *connector; /* We need to check both for a crtc link (meaning that the @@ -15636,7 +15441,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", encoder->base.base.id, encoder->base.name); - encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); + if (encoder->disable) + encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); if (encoder->post_disable) encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); } @@ -15653,6 +15459,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) /* notify opregion of the sanitized encoder state */ intel_opregion_notify_encoder(encoder, connector && has_active_crtc); + + if (INTEL_GEN(dev_priv) >= 11) + icl_sanitize_encoder_pll_mapping(encoder); } void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv) @@ -15701,6 +15510,10 @@ static void readout_plane_state(struct drm_i915_private *dev_priv) crtc_state = to_intel_crtc_state(crtc->base.state); intel_set_plane_visible(crtc_state, plane_state, visible); + + DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n", + plane->base.base.id, plane->base.name, + enableddisabled(visible), pipe_name(pipe)); } for_each_intel_crtc(&dev_priv->drm, crtc) { @@ -15853,7 +15666,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) drm_calc_timestamping_constants(&crtc->base, &crtc_state->base.adjusted_mode); - update_scanline_offset(crtc); + update_scanline_offset(crtc_state); } dev_priv->min_cdclk[crtc->pipe] = min_cdclk; @@ -15908,6 +15721,65 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv) } } +static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv, + enum port port, i915_reg_t hdmi_reg) +{ + u32 val = I915_READ(hdmi_reg); + + if (val & SDVO_ENABLE || + (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A)) + return; + + DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n", + port_name(port)); + + val &= ~SDVO_PIPE_SEL_MASK; + val |= SDVO_PIPE_SEL(PIPE_A); + + I915_WRITE(hdmi_reg, val); +} + +static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv, + enum port port, i915_reg_t dp_reg) +{ + u32 val = I915_READ(dp_reg); + + if (val & DP_PORT_EN || + (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A)) + return; + + DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n", + port_name(port)); + + val &= ~DP_PIPE_SEL_MASK; + val |= DP_PIPE_SEL(PIPE_A); + + I915_WRITE(dp_reg, val); +} + +static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv) +{ + /* + * The BIOS may select transcoder B on some of the PCH + * ports even it doesn't enable the port. This would trip + * assert_pch_dp_disabled() and assert_pch_hdmi_disabled(). + * Sanitize the transcoder select bits to prevent that. We + * assume that the BIOS never actually enabled the port, + * because if it did we'd actually have to toggle the port + * on and back off to make the transcoder A select stick + * (see. intel_dp_link_down(), intel_disable_hdmi(), + * intel_disable_sdvo()). + */ + ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B); + ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C); + ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D); + + /* PCH SDVOB multiplex with HDMIB */ + ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB); + ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC); + ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID); +} + /* Scan out the current hw modeset state, * and sanitizes it to the current state */ @@ -15917,6 +15789,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc; + struct intel_crtc_state *crtc_state; struct intel_encoder *encoder; int i; @@ -15928,6 +15801,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev, /* HW state is read out, now we need to sanitize this mess. */ get_encoder_power_domains(dev_priv); + if (HAS_PCH_IBX(dev_priv)) + ibx_sanitize_pch_ports(dev_priv); + /* * intel_sanitize_plane_mapping() may need to do vblank * waits, so we need vblank interrupts restored beforehand. @@ -15935,7 +15811,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, for_each_intel_crtc(&dev_priv->drm, crtc) { drm_crtc_vblank_reset(&crtc->base); - if (crtc->active) + if (crtc->base.state->active) drm_crtc_vblank_on(&crtc->base); } @@ -15945,8 +15821,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev, intel_sanitize_encoder(encoder); for_each_intel_crtc(&dev_priv->drm, crtc) { + crtc_state = to_intel_crtc_state(crtc->base.state); intel_sanitize_crtc(crtc, ctx); - intel_dump_pipe_config(crtc, crtc->config, + intel_dump_pipe_config(crtc, crtc_state, "[setup_hw_state]"); } @@ -15980,7 +15857,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev, for_each_intel_crtc(dev, crtc) { u64 put_domains; - put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config); + crtc_state = to_intel_crtc_state(crtc->base.state); + put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state); if (WARN_ON(put_domains)) modeset_put_power_domains(dev_priv, put_domains); } @@ -16024,29 +15902,6 @@ void intel_display_resume(struct drm_device *dev) drm_atomic_state_put(state); } -int intel_connector_register(struct drm_connector *connector) -{ - struct intel_connector *intel_connector = to_intel_connector(connector); - int ret; - - ret = intel_backlight_device_register(intel_connector); - if (ret) - goto err; - - return 0; - -err: - return ret; -} - -void intel_connector_unregister(struct drm_connector *connector) -{ - struct intel_connector *intel_connector = to_intel_connector(connector); - - intel_backlight_device_unregister(intel_connector); - intel_panel_destroy_backlight(connector); -} - static void intel_hpd_poll_fini(struct drm_device *dev) { struct intel_connector *connector; @@ -16057,9 +15912,9 @@ static void intel_hpd_poll_fini(struct drm_device *dev) for_each_intel_connector_iter(connector, &conn_iter) { if (connector->modeset_retry_work.func) cancel_work_sync(&connector->modeset_retry_work); - if (connector->hdcp_shim) { - cancel_delayed_work_sync(&connector->hdcp_check_work); - cancel_work_sync(&connector->hdcp_prop_work); + if (connector->hdcp.shim) { + cancel_delayed_work_sync(&connector->hdcp.check_work); + cancel_work_sync(&connector->hdcp.prop_work); } } drm_connector_list_iter_end(&conn_iter); @@ -16099,18 +15954,13 @@ void intel_modeset_cleanup(struct drm_device *dev) drm_mode_config_cleanup(dev); - intel_cleanup_overlay(dev_priv); + intel_overlay_cleanup(dev_priv); intel_teardown_gmbus(dev_priv); destroy_workqueue(dev_priv->modeset_wq); -} -void intel_connector_attach_encoder(struct intel_connector *connector, - struct intel_encoder *encoder) -{ - connector->encoder = encoder; - drm_connector_attach_encoder(&connector->base, &encoder->base); + intel_fbc_cleanup_cfb(dev_priv); } /* @@ -16200,7 +16050,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) }; int i; - if (INTEL_INFO(dev_priv)->num_pipes == 0) + if (!HAS_DISPLAY(dev_priv)) return NULL; error = kzalloc(sizeof(*error), GFP_ATOMIC); diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h index 9fac67e31205..4262452963b3 100644 --- a/drivers/gpu/drm/i915/intel_display.h +++ b/drivers/gpu/drm/i915/intel_display.h @@ -43,6 +43,11 @@ enum i915_gpio { GPIOM, }; +/* + * Keep the pipe enum values fixed: the code assumes that PIPE_A=0, the + * rest have consecutive values and match the enum values of transcoders + * with a 1:1 transcoder -> pipe mapping. + */ enum pipe { INVALID_PIPE = -1, @@ -57,12 +62,25 @@ enum pipe { #define pipe_name(p) ((p) + 'A') enum transcoder { - TRANSCODER_A = 0, - TRANSCODER_B, - TRANSCODER_C, + /* + * The following transcoders have a 1:1 transcoder -> pipe mapping, + * keep their values fixed: the code assumes that TRANSCODER_A=0, the + * rest have consecutive values and match the enum values of the pipes + * they map to. + */ + TRANSCODER_A = PIPE_A, + TRANSCODER_B = PIPE_B, + TRANSCODER_C = PIPE_C, + + /* + * The following transcoders can map to any pipe, their enum value + * doesn't need to stay fixed. + */ TRANSCODER_EDP, - TRANSCODER_DSI_A, - TRANSCODER_DSI_C, + TRANSCODER_DSI_0, + TRANSCODER_DSI_1, + TRANSCODER_DSI_A = TRANSCODER_DSI_0, /* legacy DSI */ + TRANSCODER_DSI_C = TRANSCODER_DSI_1, /* legacy DSI */ I915_MAX_TRANSCODERS }; @@ -120,6 +138,9 @@ enum plane_id { PLANE_SPRITE0, PLANE_SPRITE1, PLANE_SPRITE2, + PLANE_SPRITE3, + PLANE_SPRITE4, + PLANE_SPRITE5, PLANE_CURSOR, I915_MAX_PLANES, @@ -221,6 +242,7 @@ enum intel_display_power_domain { POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_TRANSCODER_C, POWER_DOMAIN_TRANSCODER_EDP, + POWER_DOMAIN_TRANSCODER_EDP_VDSC, POWER_DOMAIN_TRANSCODER_DSI_A, POWER_DOMAIN_TRANSCODER_DSI_C, POWER_DOMAIN_PORT_DDI_A_LANES, @@ -363,7 +385,7 @@ struct intel_link_m_n { (__dev_priv)->power_domains.power_well_count; \ (__power_well)++) -#define for_each_power_well_rev(__dev_priv, __power_well) \ +#define for_each_power_well_reverse(__dev_priv, __power_well) \ for ((__power_well) = (__dev_priv)->power_domains.power_wells + \ (__dev_priv)->power_domains.power_well_count - 1; \ (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \ @@ -373,10 +395,18 @@ struct intel_link_m_n { for_each_power_well(__dev_priv, __power_well) \ for_each_if((__power_well)->desc->domains & (__domain_mask)) -#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \ - for_each_power_well_rev(__dev_priv, __power_well) \ +#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \ + for_each_power_well_reverse(__dev_priv, __power_well) \ for_each_if((__power_well)->desc->domains & (__domain_mask)) +#define for_each_old_intel_plane_in_state(__state, plane, old_plane_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->base.dev->mode_config.num_total_plane && \ + ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \ + (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), 1); \ + (__i)++) \ + for_each_if(plane) + #define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->base.dev->mode_config.num_total_plane && \ @@ -402,10 +432,18 @@ struct intel_link_m_n { (__i)++) \ for_each_if(plane) -void intel_link_compute_m_n(int bpp, int nlanes, +#define for_each_oldnew_intel_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->base.dev->mode_config.num_crtc && \ + ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \ + (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \ + (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \ + (__i)++) \ + for_each_if(crtc) + +void intel_link_compute_m_n(u16 bpp, int nlanes, int pixel_clock, int link_clock, struct intel_link_m_n *m_n, bool constant_n); - bool is_ccs_modifier(u64 modifier); #endif diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 13f9b56a9ce7..fdd2cbc56fa3 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -45,6 +45,19 @@ #define DP_DPRX_ESI_LEN 14 +/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */ +#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440 +#define DP_DSC_MIN_SUPPORTED_BPC 8 +#define DP_DSC_MAX_SUPPORTED_BPC 10 + +/* DP DSC throughput values used for slice count calculations KPixels/s */ +#define DP_DSC_PEAK_PIXEL_RATE 2720000 +#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 +#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 + +/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */ +#define DP_DSC_FEC_OVERHEAD_FACTOR 976 + /* Compliance test status bits */ #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) @@ -93,6 +106,14 @@ static const struct dp_link_dpll chv_dpll[] = { { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, }; +/* Constants for DP DSC configurations */ +static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; + +/* With Single pipe configuration, HW is capable of supporting maximum + * of 4 slices per line. + */ +static const u8 valid_dsc_slicecount[] = {1, 2, 4}; + /** * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) * @intel_dp: DP struct @@ -222,138 +243,6 @@ intel_dp_link_required(int pixel_clock, int bpp) return DIV_ROUND_UP(pixel_clock * bpp, 8); } -void icl_program_mg_dp_mode(struct intel_dp *intel_dp) -{ - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - enum port port = intel_dig_port->base.port; - enum tc_port tc_port = intel_port_to_tc(dev_priv, port); - u32 ln0, ln1, lane_info; - - if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT) - return; - - ln0 = I915_READ(MG_DP_MODE(port, 0)); - ln1 = I915_READ(MG_DP_MODE(port, 1)); - - switch (intel_dig_port->tc_type) { - case TC_PORT_TYPEC: - ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); - ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); - - lane_info = (I915_READ(PORT_TX_DFLEXDPSP) & - DP_LANE_ASSIGNMENT_MASK(tc_port)) >> - DP_LANE_ASSIGNMENT_SHIFT(tc_port); - - switch (lane_info) { - case 0x1: - case 0x4: - break; - case 0x2: - ln0 |= MG_DP_MODE_CFG_DP_X1_MODE; - break; - case 0x3: - ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | - MG_DP_MODE_CFG_DP_X2_MODE; - break; - case 0x8: - ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; - break; - case 0xC: - ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | - MG_DP_MODE_CFG_DP_X2_MODE; - break; - case 0xF: - ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | - MG_DP_MODE_CFG_DP_X2_MODE; - ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | - MG_DP_MODE_CFG_DP_X2_MODE; - break; - default: - MISSING_CASE(lane_info); - } - break; - - case TC_PORT_LEGACY: - ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE; - ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE; - break; - - default: - MISSING_CASE(intel_dig_port->tc_type); - return; - } - - I915_WRITE(MG_DP_MODE(port, 0), ln0); - I915_WRITE(MG_DP_MODE(port, 1), ln1); -} - -void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port) -{ - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum port port = dig_port->base.port; - enum tc_port tc_port = intel_port_to_tc(dev_priv, port); - i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) }; - u32 val; - int i; - - if (tc_port == PORT_TC_NONE) - return; - - for (i = 0; i < ARRAY_SIZE(mg_regs); i++) { - val = I915_READ(mg_regs[i]); - val |= MG_DP_MODE_CFG_TR2PWR_GATING | - MG_DP_MODE_CFG_TRPWR_GATING | - MG_DP_MODE_CFG_CLNPWR_GATING | - MG_DP_MODE_CFG_DIGPWR_GATING | - MG_DP_MODE_CFG_GAONPWR_GATING; - I915_WRITE(mg_regs[i], val); - } - - val = I915_READ(MG_MISC_SUS0(tc_port)); - val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) | - MG_MISC_SUS0_CFG_TR2PWR_GATING | - MG_MISC_SUS0_CFG_CL2PWR_GATING | - MG_MISC_SUS0_CFG_GAONPWR_GATING | - MG_MISC_SUS0_CFG_TRPWR_GATING | - MG_MISC_SUS0_CFG_CL1PWR_GATING | - MG_MISC_SUS0_CFG_DGPWR_GATING; - I915_WRITE(MG_MISC_SUS0(tc_port), val); -} - -void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port) -{ - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum port port = dig_port->base.port; - enum tc_port tc_port = intel_port_to_tc(dev_priv, port); - i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) }; - u32 val; - int i; - - if (tc_port == PORT_TC_NONE) - return; - - for (i = 0; i < ARRAY_SIZE(mg_regs); i++) { - val = I915_READ(mg_regs[i]); - val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING | - MG_DP_MODE_CFG_TRPWR_GATING | - MG_DP_MODE_CFG_CLNPWR_GATING | - MG_DP_MODE_CFG_DIGPWR_GATING | - MG_DP_MODE_CFG_GAONPWR_GATING); - I915_WRITE(mg_regs[i], val); - } - - val = I915_READ(MG_MISC_SUS0(tc_port)); - val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK | - MG_MISC_SUS0_CFG_TR2PWR_GATING | - MG_MISC_SUS0_CFG_CL2PWR_GATING | - MG_MISC_SUS0_CFG_GAONPWR_GATING | - MG_MISC_SUS0_CFG_TRPWR_GATING | - MG_MISC_SUS0_CFG_CL1PWR_GATING | - MG_MISC_SUS0_CFG_DGPWR_GATING); - I915_WRITE(MG_MISC_SUS0(tc_port), val); -} - int intel_dp_max_data_rate(int max_link_clock, int max_lanes) { @@ -455,7 +344,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp) if (INTEL_GEN(dev_priv) >= 10) { source_rates = cnl_rates; size = ARRAY_SIZE(cnl_rates); - if (INTEL_GEN(dev_priv) == 10) + if (IS_GEN10(dev_priv)) max_rate = cnl_max_source_rate(intel_dp); else max_rate = icl_max_source_rate(intel_dp); @@ -616,9 +505,12 @@ intel_dp_mode_valid(struct drm_connector *connector, struct intel_dp *intel_dp = intel_attached_dp(connector); struct intel_connector *intel_connector = to_intel_connector(connector); struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; + struct drm_i915_private *dev_priv = to_i915(connector->dev); int target_clock = mode->clock; int max_rate, mode_rate, max_lanes, max_link_clock; int max_dotclk; + u16 dsc_max_output_bpp = 0; + u8 dsc_slice_count = 0; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; @@ -641,7 +533,33 @@ intel_dp_mode_valid(struct drm_connector *connector, max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); mode_rate = intel_dp_link_required(target_clock, 18); - if (mode_rate > max_rate || target_clock > max_dotclk) + /* + * Output bpp is stored in 6.4 format so right shift by 4 to get the + * integer value since we support only integer values of bpp. + */ + if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) && + drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { + if (intel_dp_is_edp(intel_dp)) { + dsc_max_output_bpp = + drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; + dsc_slice_count = + drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, + true); + } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { + dsc_max_output_bpp = + intel_dp_dsc_get_output_bpp(max_link_clock, + max_lanes, + target_clock, + mode->hdisplay) >> 4; + dsc_slice_count = + intel_dp_dsc_get_slice_count(intel_dp, + target_clock, + mode->hdisplay); + } + } + + if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) || + target_clock > max_dotclk) return MODE_CLOCK_HIGH; if (mode->clock < 10000) @@ -690,7 +608,8 @@ static void pps_lock(struct intel_dp *intel_dp) * See intel_power_sequencer_reset() why we need * a power domain reference here. */ - intel_display_power_get(dev_priv, intel_dp->aux_power_domain); + intel_display_power_get(dev_priv, + intel_aux_power_domain(dp_to_dig_port(intel_dp))); mutex_lock(&dev_priv->pps_mutex); } @@ -701,7 +620,8 @@ static void pps_unlock(struct intel_dp *intel_dp) mutex_unlock(&dev_priv->pps_mutex); - intel_display_power_put(dev_priv, intel_dp->aux_power_domain); + intel_display_power_put(dev_priv, + intel_aux_power_domain(dp_to_dig_port(intel_dp))); } static void @@ -1156,6 +1076,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); if (index) return 0; @@ -1165,7 +1086,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) * like to run at 2MHz. So, take the cdclk or PCH rawclk value and * divide by 2000 and use that */ - if (intel_dp->aux_ch == AUX_CH_A) + if (dig_port->aux_ch == AUX_CH_A) return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000); else return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); @@ -1174,8 +1095,9 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - if (intel_dp->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { + if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { /* Workaround for non-ULT HSW */ switch (index) { case 0: return 63; @@ -1503,80 +1425,12 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) return ret; } -static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp) -{ - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = encoder->port; - const struct ddi_vbt_port_info *info = - &dev_priv->vbt.ddi_port_info[port]; - enum aux_ch aux_ch; - - if (!info->alternate_aux_channel) { - aux_ch = (enum aux_ch) port; - - DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n", - aux_ch_name(aux_ch), port_name(port)); - return aux_ch; - } - - switch (info->alternate_aux_channel) { - case DP_AUX_A: - aux_ch = AUX_CH_A; - break; - case DP_AUX_B: - aux_ch = AUX_CH_B; - break; - case DP_AUX_C: - aux_ch = AUX_CH_C; - break; - case DP_AUX_D: - aux_ch = AUX_CH_D; - break; - case DP_AUX_E: - aux_ch = AUX_CH_E; - break; - case DP_AUX_F: - aux_ch = AUX_CH_F; - break; - default: - MISSING_CASE(info->alternate_aux_channel); - aux_ch = AUX_CH_A; - break; - } - - DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n", - aux_ch_name(aux_ch), port_name(port)); - - return aux_ch; -} - -static enum intel_display_power_domain -intel_aux_power_domain(struct intel_dp *intel_dp) -{ - switch (intel_dp->aux_ch) { - case AUX_CH_A: - return POWER_DOMAIN_AUX_A; - case AUX_CH_B: - return POWER_DOMAIN_AUX_B; - case AUX_CH_C: - return POWER_DOMAIN_AUX_C; - case AUX_CH_D: - return POWER_DOMAIN_AUX_D; - case AUX_CH_E: - return POWER_DOMAIN_AUX_E; - case AUX_CH_F: - return POWER_DOMAIN_AUX_F; - default: - MISSING_CASE(intel_dp->aux_ch); - return POWER_DOMAIN_AUX_A; - } -} static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - enum aux_ch aux_ch = intel_dp->aux_ch; + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; switch (aux_ch) { case AUX_CH_B: @@ -1592,7 +1446,8 @@ static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - enum aux_ch aux_ch = intel_dp->aux_ch; + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; switch (aux_ch) { case AUX_CH_B: @@ -1608,7 +1463,8 @@ static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - enum aux_ch aux_ch = intel_dp->aux_ch; + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; switch (aux_ch) { case AUX_CH_A: @@ -1626,7 +1482,8 @@ static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - enum aux_ch aux_ch = intel_dp->aux_ch; + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; switch (aux_ch) { case AUX_CH_A: @@ -1644,7 +1501,8 @@ static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - enum aux_ch aux_ch = intel_dp->aux_ch; + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; switch (aux_ch) { case AUX_CH_A: @@ -1663,7 +1521,8 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - enum aux_ch aux_ch = intel_dp->aux_ch; + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; switch (aux_ch) { case AUX_CH_A: @@ -1689,10 +1548,8 @@ static void intel_dp_aux_init(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - - intel_dp->aux_ch = intel_aux_ch(intel_dp); - intel_dp->aux_power_domain = intel_aux_power_domain(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &dig_port->base; if (INTEL_GEN(dev_priv) >= 9) { intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; @@ -1853,6 +1710,41 @@ struct link_config_limits { int min_bpp, max_bpp; }; +static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, + const struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + return INTEL_GEN(dev_priv) >= 11 && + pipe_config->cpu_transcoder != TRANSCODER_A; +} + +static bool intel_dp_supports_fec(struct intel_dp *intel_dp, + const struct intel_crtc_state *pipe_config) +{ + return intel_dp_source_supports_fec(intel_dp, pipe_config) && + drm_dp_sink_supports_fec(intel_dp->fec_capable); +} + +static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp, + const struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + return INTEL_GEN(dev_priv) >= 10 && + pipe_config->cpu_transcoder != TRANSCODER_A; +} + +static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, + const struct intel_crtc_state *pipe_config) +{ + if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable) + return false; + + return intel_dp_source_supports_dsc(intel_dp, pipe_config) && + drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); +} + static int intel_dp_compute_bpp(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config) { @@ -1951,14 +1843,158 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, return false; } +/* Optimize link config in order: max bpp, min lanes, min clock */ +static bool +intel_dp_compute_link_config_fast(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + const struct link_config_limits *limits) +{ + struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + int bpp, clock, lane_count; + int mode_rate, link_clock, link_avail; + + for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { + mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, + bpp); + + for (lane_count = limits->min_lane_count; + lane_count <= limits->max_lane_count; + lane_count <<= 1) { + for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { + link_clock = intel_dp->common_rates[clock]; + link_avail = intel_dp_max_data_rate(link_clock, + lane_count); + + if (mode_rate <= link_avail) { + pipe_config->lane_count = lane_count; + pipe_config->pipe_bpp = bpp; + pipe_config->port_clock = link_clock; + + return true; + } + } + } + } + + return false; +} + +static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) +{ + int i, num_bpc; + u8 dsc_bpc[3] = {0}; + + num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, + dsc_bpc); + for (i = 0; i < num_bpc; i++) { + if (dsc_max_bpc >= dsc_bpc[i]) + return dsc_bpc[i] * 3; + } + + return 0; +} + +static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state, + struct link_config_limits *limits) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + u8 dsc_max_bpc; + int pipe_bpp; + + if (!intel_dp_supports_dsc(intel_dp, pipe_config)) + return false; + + dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC, + conn_state->max_requested_bpc); + + pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); + if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) { + DRM_DEBUG_KMS("No DSC support for less than 8bpc\n"); + return false; + } + + /* + * For now enable DSC for max bpp, max link rate, max lane count. + * Optimize this later for the minimum possible link rate/lane count + * with DSC enabled for the requested mode. + */ + pipe_config->pipe_bpp = pipe_bpp; + pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; + pipe_config->lane_count = limits->max_lane_count; + + if (intel_dp_is_edp(intel_dp)) { + pipe_config->dsc_params.compressed_bpp = + min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, + pipe_config->pipe_bpp); + pipe_config->dsc_params.slice_count = + drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, + true); + } else { + u16 dsc_max_output_bpp; + u8 dsc_dp_slice_count; + + dsc_max_output_bpp = + intel_dp_dsc_get_output_bpp(pipe_config->port_clock, + pipe_config->lane_count, + adjusted_mode->crtc_clock, + adjusted_mode->crtc_hdisplay); + dsc_dp_slice_count = + intel_dp_dsc_get_slice_count(intel_dp, + adjusted_mode->crtc_clock, + adjusted_mode->crtc_hdisplay); + if (!dsc_max_output_bpp || !dsc_dp_slice_count) { + DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n"); + return false; + } + pipe_config->dsc_params.compressed_bpp = min_t(u16, + dsc_max_output_bpp >> 4, + pipe_config->pipe_bpp); + pipe_config->dsc_params.slice_count = dsc_dp_slice_count; + } + /* + * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate + * is greater than the maximum Cdclock and if slice count is even + * then we need to use 2 VDSC instances. + */ + if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) { + if (pipe_config->dsc_params.slice_count > 1) { + pipe_config->dsc_params.dsc_split = true; + } else { + DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n"); + return false; + } + } + if (intel_dp_compute_dsc_params(intel_dp, pipe_config) < 0) { + DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d " + "Compressed BPP = %d\n", + pipe_config->pipe_bpp, + pipe_config->dsc_params.compressed_bpp); + return false; + } + pipe_config->dsc_params.compression_enable = true; + DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d " + "Compressed Bpp = %d Slice Count = %d\n", + pipe_config->pipe_bpp, + pipe_config->dsc_params.compressed_bpp, + pipe_config->dsc_params.slice_count); + + return true; +} + static bool intel_dp_compute_link_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) { struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct link_config_limits limits; int common_len; + bool ret; common_len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); @@ -1975,13 +2011,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, limits.min_bpp = 6 * 3; limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); - if (intel_dp_is_edp(intel_dp)) { + if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) { /* * Use the maximum clock and number of lanes the eDP panel - * advertizes being capable of. The panels are generally - * designed to support only a single clock and lane - * configuration, and typically these values correspond to the - * native resolution of the panel. + * advertizes being capable of. The eDP 1.3 and earlier panels + * are generally designed to support only a single clock and + * lane configuration, and typically these values correspond to + * the native resolution of the panel. With eDP 1.4 rate select + * and DSC, this is decreasingly the case, and we need to be + * able to select less than maximum link config. */ limits.min_lane_count = limits.max_lane_count; limits.min_clock = limits.max_clock; @@ -1995,23 +2033,52 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, intel_dp->common_rates[limits.max_clock], limits.max_bpp, adjusted_mode->crtc_clock); - /* - * Optimize for slow and wide. This is the place to add alternative - * optimization policy. - */ - if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits)) - return false; - - DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n", - pipe_config->lane_count, pipe_config->port_clock, - pipe_config->pipe_bpp); - - DRM_DEBUG_KMS("DP link rate required %i available %i\n", - intel_dp_link_required(adjusted_mode->crtc_clock, - pipe_config->pipe_bpp), - intel_dp_max_data_rate(pipe_config->port_clock, - pipe_config->lane_count)); + if (intel_dp_is_edp(intel_dp)) + /* + * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4 + * section A.1: "It is recommended that the minimum number of + * lanes be used, using the minimum link rate allowed for that + * lane configuration." + * + * Note that we use the max clock and lane count for eDP 1.3 and + * earlier, and fast vs. wide is irrelevant. + */ + ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, + &limits); + else + /* Optimize for slow and wide. */ + ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, + &limits); + + /* enable compression if the mode doesn't fit available BW */ + if (!ret) { + if (!intel_dp_dsc_compute_config(intel_dp, pipe_config, + conn_state, &limits)) + return false; + } + + if (pipe_config->dsc_params.compression_enable) { + DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", + pipe_config->lane_count, pipe_config->port_clock, + pipe_config->pipe_bpp, + pipe_config->dsc_params.compressed_bpp); + + DRM_DEBUG_KMS("DP link rate required %i available %i\n", + intel_dp_link_required(adjusted_mode->crtc_clock, + pipe_config->dsc_params.compressed_bpp), + intel_dp_max_data_rate(pipe_config->port_clock, + pipe_config->lane_count)); + } else { + DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n", + pipe_config->lane_count, pipe_config->port_clock, + pipe_config->pipe_bpp); + DRM_DEBUG_KMS("DP link rate required %i available %i\n", + intel_dp_link_required(adjusted_mode->crtc_clock, + pipe_config->pipe_bpp), + intel_dp_max_data_rate(pipe_config->port_clock, + pipe_config->lane_count)); + } return true; } @@ -2023,6 +2090,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base); enum port port = encoder->port; struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); struct intel_connector *intel_connector = intel_dp->attached_connector; @@ -2034,6 +2102,10 @@ intel_dp_compute_config(struct intel_encoder *encoder, if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) pipe_config->has_pch_encoder = true; + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; + if (lspcon->active) + lspcon_ycbcr420_config(&intel_connector->base, pipe_config); + pipe_config->has_drrs = false; if (IS_G4X(dev_priv) || port == PORT_A) pipe_config->has_audio = false; @@ -2072,7 +2144,10 @@ intel_dp_compute_config(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) return false; - if (!intel_dp_compute_link_config(encoder, pipe_config)) + pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && + intel_dp_supports_fec(intel_dp, pipe_config); + + if (!intel_dp_compute_link_config(encoder, pipe_config, conn_state)) return false; if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { @@ -2090,11 +2165,20 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED; } - intel_link_compute_m_n(pipe_config->pipe_bpp, pipe_config->lane_count, - adjusted_mode->crtc_clock, - pipe_config->port_clock, - &pipe_config->dp_m_n, - constant_n); + if (!pipe_config->dsc_params.compression_enable) + intel_link_compute_m_n(pipe_config->pipe_bpp, + pipe_config->lane_count, + adjusted_mode->crtc_clock, + pipe_config->port_clock, + &pipe_config->dp_m_n, + constant_n); + else + intel_link_compute_m_n(pipe_config->dsc_params.compressed_bpp, + pipe_config->lane_count, + adjusted_mode->crtc_clock, + pipe_config->port_clock, + &pipe_config->dp_m_n, + constant_n); if (intel_connector->panel.downclock_mode != NULL && dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { @@ -2338,7 +2422,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) if (edp_have_panel_vdd(intel_dp)) return need_to_disable; - intel_display_power_get(dev_priv, intel_dp->aux_power_domain); + intel_display_power_get(dev_priv, + intel_aux_power_domain(intel_dig_port)); DRM_DEBUG_KMS("Turning eDP port %c VDD on\n", port_name(intel_dig_port->base.port)); @@ -2424,7 +2509,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) if ((pp & PANEL_POWER_ON) == 0) intel_dp->panel_power_off_time = ktime_get_boottime(); - intel_display_power_put(dev_priv, intel_dp->aux_power_domain); + intel_display_power_put(dev_priv, + intel_aux_power_domain(intel_dig_port)); } static void edp_panel_vdd_work(struct work_struct *__work) @@ -2537,6 +2623,7 @@ void intel_edp_panel_on(struct intel_dp *intel_dp) static void edp_panel_off(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); u32 pp; i915_reg_t pp_ctrl_reg; @@ -2546,10 +2633,10 @@ static void edp_panel_off(struct intel_dp *intel_dp) return; DRM_DEBUG_KMS("Turn eDP port %c panel power off\n", - port_name(dp_to_dig_port(intel_dp)->base.port)); + port_name(dig_port->base.port)); WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n", - port_name(dp_to_dig_port(intel_dp)->base.port)); + port_name(dig_port->base.port)); pp = ironlake_get_pp_control(intel_dp); /* We need to switch off panel power _and_ force vdd, for otherwise some @@ -2568,7 +2655,7 @@ static void edp_panel_off(struct intel_dp *intel_dp) intel_dp->panel_power_off_time = ktime_get_boottime(); /* We got a reference when we enabled the VDD. */ - intel_display_power_put(dev_priv, intel_dp->aux_power_domain); + intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port)); } void intel_edp_panel_off(struct intel_dp *intel_dp) @@ -2788,6 +2875,22 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; } +void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + bool enable) +{ + int ret; + + if (!crtc_state->dsc_params.compression_enable) + return; + + ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, + enable ? DP_DECOMPRESSION_EN : 0); + if (ret < 0) + DRM_DEBUG_KMS("Failed to %s sink decompression state\n", + enable ? "enable" : "disable"); +} + /* If the sink supports it, try to set the power state appropriately */ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) { @@ -3900,6 +4003,40 @@ intel_dp_read_dpcd(struct intel_dp *intel_dp) return intel_dp->dpcd[DP_DPCD_REV] != 0; } +static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) +{ + /* + * Clear the cached register set to avoid using stale values + * for the sinks that do not support DSC. + */ + memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); + + /* Clear fec_capable to avoid using stale values */ + intel_dp->fec_capable = 0; + + /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ + if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || + intel_dp->edp_dpcd[0] >= DP_EDP_14) { + if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, + intel_dp->dsc_dpcd, + sizeof(intel_dp->dsc_dpcd)) < 0) + DRM_ERROR("Failed to read DPCD register 0x%x\n", + DP_DSC_SUPPORT); + + DRM_DEBUG_KMS("DSC DPCD: %*ph\n", + (int)sizeof(intel_dp->dsc_dpcd), + intel_dp->dsc_dpcd); + + /* FEC is supported only on DP 1.4 */ + if (!intel_dp_is_edp(intel_dp) && + drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, + &intel_dp->fec_capable) < 0) + DRM_ERROR("Failed to read FEC DPCD register\n"); + + DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable); + } +} + static bool intel_edp_init_dpcd(struct intel_dp *intel_dp) { @@ -3976,6 +4113,10 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) intel_dp_set_common_rates(intel_dp); + /* Read the eDP DSC DPCD registers */ + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + intel_dp_get_dsc_sink_cap(intel_dp); + return true; } @@ -3983,8 +4124,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) static bool intel_dp_get_dpcd(struct intel_dp *intel_dp) { - u8 sink_count; - if (!intel_dp_read_dpcd(intel_dp)) return false; @@ -3994,25 +4133,35 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) intel_dp_set_common_rates(intel_dp); } - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &sink_count) <= 0) - return false; - /* - * Sink count can change between short pulse hpd hence - * a member variable in intel_dp will track any changes - * between short pulse interrupts. + * Some eDP panels do not set a valid value for sink count, that is why + * it don't care about read it here and in intel_edp_init_dpcd(). */ - intel_dp->sink_count = DP_GET_SINK_COUNT(sink_count); + if (!intel_dp_is_edp(intel_dp)) { + u8 count; + ssize_t r; - /* - * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that - * a dongle is present but no display. Unless we require to know - * if a dongle is present or not, we don't need to update - * downstream port information. So, an early return here saves - * time from performing other operations which are not required. - */ - if (!intel_dp_is_edp(intel_dp) && !intel_dp->sink_count) - return false; + r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count); + if (r < 1) + return false; + + /* + * Sink count can change between short pulse hpd hence + * a member variable in intel_dp will track any changes + * between short pulse interrupts. + */ + intel_dp->sink_count = DP_GET_SINK_COUNT(count); + + /* + * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that + * a dongle is present but no display. Unless we require to know + * if a dongle is present or not, we don't need to update + * downstream port information. So, an early return here saves + * time from performing other operations which are not required. + */ + if (!intel_dp->sink_count) + return false; + } if (!drm_dp_is_branch(intel_dp->dpcd)) return true; /* native DP sink */ @@ -4029,16 +4178,10 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) } static bool -intel_dp_can_mst(struct intel_dp *intel_dp) +intel_dp_sink_can_mst(struct intel_dp *intel_dp) { u8 mstm_cap; - if (!i915_modparams.enable_dp_mst) - return false; - - if (!intel_dp->can_mst) - return false; - if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) return false; @@ -4048,34 +4191,36 @@ intel_dp_can_mst(struct intel_dp *intel_dp) return mstm_cap & DP_MST_CAP; } +static bool +intel_dp_can_mst(struct intel_dp *intel_dp) +{ + return i915_modparams.enable_dp_mst && + intel_dp->can_mst && + intel_dp_sink_can_mst(intel_dp); +} + static void intel_dp_configure_mst(struct intel_dp *intel_dp) { - if (!i915_modparams.enable_dp_mst) - return; + struct intel_encoder *encoder = + &dp_to_dig_port(intel_dp)->base; + bool sink_can_mst = intel_dp_sink_can_mst(intel_dp); + + DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n", + port_name(encoder->port), yesno(intel_dp->can_mst), + yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst)); if (!intel_dp->can_mst) return; - intel_dp->is_mst = intel_dp_can_mst(intel_dp); - - if (intel_dp->is_mst) - DRM_DEBUG_KMS("Sink is MST capable\n"); - else - DRM_DEBUG_KMS("Sink is not MST capable\n"); + intel_dp->is_mst = sink_can_mst && + i915_modparams.enable_dp_mst; drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); } static bool -intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) -{ - return drm_dp_dpcd_readb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, - sink_irq_vector) == 1; -} - -static bool intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) { return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, @@ -4083,6 +4228,91 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) DP_DPRX_ESI_LEN; } +u16 intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count, + int mode_clock, int mode_hdisplay) +{ + u16 bits_per_pixel, max_bpp_small_joiner_ram; + int i; + + /* + * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* + * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP) + * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1, + * for MST -> TimeSlotsPerMTP has to be calculated + */ + bits_per_pixel = (link_clock * lane_count * 8 * + DP_DSC_FEC_OVERHEAD_FACTOR) / + mode_clock; + + /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ + max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / + mode_hdisplay; + + /* + * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW + * check, output bpp from small joiner RAM check) + */ + bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); + + /* Error out if the max bpp is less than smallest allowed valid bpp */ + if (bits_per_pixel < valid_dsc_bpp[0]) { + DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel); + return 0; + } + + /* Find the nearest match in the array of known BPPs from VESA */ + for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { + if (bits_per_pixel < valid_dsc_bpp[i + 1]) + break; + } + bits_per_pixel = valid_dsc_bpp[i]; + + /* + * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, + * fractional part is 0 + */ + return bits_per_pixel << 4; +} + +u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, + int mode_clock, + int mode_hdisplay) +{ + u8 min_slice_count, i; + int max_slice_width; + + if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) + min_slice_count = DIV_ROUND_UP(mode_clock, + DP_DSC_MAX_ENC_THROUGHPUT_0); + else + min_slice_count = DIV_ROUND_UP(mode_clock, + DP_DSC_MAX_ENC_THROUGHPUT_1); + + max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); + if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { + DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n", + max_slice_width); + return 0; + } + /* Also take into account max slice width */ + min_slice_count = min_t(uint8_t, min_slice_count, + DIV_ROUND_UP(mode_hdisplay, + max_slice_width)); + + /* Find the closest match to the valid slice count values */ + for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { + if (valid_dsc_slicecount[i] > + drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, + false)) + break; + if (min_slice_count <= valid_dsc_slicecount[i]) + return valid_dsc_slicecount[i]; + } + + DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count); + return 0; +} + static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp) { int status = 0; @@ -4341,6 +4571,17 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp) if (!intel_dp->link_trained) return false; + /* + * While PSR source HW is enabled, it will control main-link sending + * frames, enabling and disabling it so trying to do a retrain will fail + * as the link would or not be on or it could mix training patterns + * and frame data at the same time causing retrain to fail. + * Also when exiting PSR, HW will retrain the link anyways fixing + * any link status error. + */ + if (intel_psr_enabled(intel_dp)) + return false; + if (!intel_dp_get_link_status(intel_dp, link_status)) return false; @@ -4403,7 +4644,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, /* Suppress underruns caused by re-training */ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); - if (crtc->config->has_pch_encoder) + if (crtc_state->has_pch_encoder) intel_set_pch_fifo_underrun_reporting(dev_priv, intel_crtc_pch_transcoder(crtc), false); @@ -4414,7 +4655,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, intel_wait_for_vblank(dev_priv, crtc->pipe); intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); - if (crtc->config->has_pch_encoder) + if (crtc_state->has_pch_encoder) intel_set_pch_fifo_underrun_reporting(dev_priv, intel_crtc_pch_transcoder(crtc), true); @@ -4462,6 +4703,29 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder, return changed; } +static void intel_dp_check_service_irq(struct intel_dp *intel_dp) +{ + u8 val; + + if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) + return; + + if (drm_dp_dpcd_readb(&intel_dp->aux, + DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) + return; + + drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); + + if (val & DP_AUTOMATED_TEST_REQUEST) + intel_dp_handle_test_request(intel_dp); + + if (val & DP_CP_IRQ) + intel_hdcp_check_link(intel_dp->attached_connector); + + if (val & DP_SINK_SPECIFIC_IRQ) + DRM_DEBUG_DRIVER("Sink specific irq unhandled\n"); +} + /* * According to DP spec * 5.1.2: @@ -4479,7 +4743,6 @@ static bool intel_dp_short_pulse(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u8 sink_irq_vector = 0; u8 old_sink_count = intel_dp->sink_count; bool ret; @@ -4502,20 +4765,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) return false; } - /* Try to read the source of the interrupt */ - if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && - intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) && - sink_irq_vector != 0) { - /* Clear interrupt source */ - drm_dp_dpcd_writeb(&intel_dp->aux, - DP_DEVICE_SERVICE_IRQ_VECTOR, - sink_irq_vector); - - if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) - intel_dp_handle_test_request(intel_dp); - if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) - DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); - } + intel_dp_check_service_irq(intel_dp); /* Handle CEC interrupts, if any */ drm_dp_cec_irq(&intel_dp->aux); @@ -4810,6 +5060,9 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, type_str); } +static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv, + struct intel_digital_port *dig_port); + /* * This function implements the first part of the Connect Flow described by our * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading @@ -4864,9 +5117,7 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv, if (dig_port->tc_type == TC_PORT_TYPEC && !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) { DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port); - val = I915_READ(PORT_TX_DFLEXDPCSSS); - val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); - I915_WRITE(PORT_TX_DFLEXDPCSSS, val); + icl_tc_phy_disconnect(dev_priv, dig_port); return false; } @@ -4881,21 +5132,24 @@ static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv, struct intel_digital_port *dig_port) { enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); - u32 val; - if (dig_port->tc_type != TC_PORT_LEGACY && - dig_port->tc_type != TC_PORT_TYPEC) + if (dig_port->tc_type == TC_PORT_UNKNOWN) return; /* - * This function may be called many times in a row without an HPD event - * in between, so try to avoid the write when we can. + * TBT disconnection flow is read the live status, what was done in + * caller. */ - val = I915_READ(PORT_TX_DFLEXDPCSSS); - if (val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)) { + if (dig_port->tc_type == TC_PORT_TYPEC || + dig_port->tc_type == TC_PORT_LEGACY) { + u32 val; + + val = I915_READ(PORT_TX_DFLEXDPCSSS); val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); I915_WRITE(PORT_TX_DFLEXDPCSSS, val); } + + dig_port->tc_type = TC_PORT_UNKNOWN; } /* @@ -4945,19 +5199,14 @@ static bool icl_digital_port_connected(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); - switch (encoder->hpd_pin) { - case HPD_PORT_A: - case HPD_PORT_B: + if (intel_port_is_combophy(dev_priv, encoder->port)) return icl_combo_port_connected(dev_priv, dig_port); - case HPD_PORT_C: - case HPD_PORT_D: - case HPD_PORT_E: - case HPD_PORT_F: + else if (intel_port_is_tc(dev_priv, encoder->port)) return icl_tc_port_connected(dev_priv, dig_port); - default: + else MISSING_CASE(encoder->hpd_pin); - return false; - } + + return false; } /* @@ -4982,20 +5231,23 @@ bool intel_digital_port_connected(struct intel_encoder *encoder) return g4x_digital_port_connected(encoder); } - if (IS_GEN5(dev_priv)) - return ilk_digital_port_connected(encoder); - else if (IS_GEN6(dev_priv)) - return snb_digital_port_connected(encoder); - else if (IS_GEN7(dev_priv)) - return ivb_digital_port_connected(encoder); - else if (IS_GEN8(dev_priv)) - return bdw_digital_port_connected(encoder); + if (INTEL_GEN(dev_priv) >= 11) + return icl_digital_port_connected(encoder); + else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) + return spt_digital_port_connected(encoder); else if (IS_GEN9_LP(dev_priv)) return bxt_digital_port_connected(encoder); - else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv)) - return spt_digital_port_connected(encoder); - else - return icl_digital_port_connected(encoder); + else if (IS_GEN8(dev_priv)) + return bdw_digital_port_connected(encoder); + else if (IS_GEN7(dev_priv)) + return ivb_digital_port_connected(encoder); + else if (IS_GEN6(dev_priv)) + return snb_digital_port_connected(encoder); + else if (IS_GEN5(dev_priv)) + return ilk_digital_port_connected(encoder); + + MISSING_CASE(INTEL_GEN(dev_priv)); + return false; } static struct edid * @@ -5042,28 +5294,35 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) } static int -intel_dp_long_pulse(struct intel_connector *connector, - struct drm_modeset_acquire_ctx *ctx) +intel_dp_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_dp *intel_dp = intel_attached_dp(&connector->base); + struct drm_i915_private *dev_priv = to_i915(connector->dev); + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &dig_port->base; enum drm_connector_status status; - u8 sink_irq_vector = 0; + enum intel_display_power_domain aux_domain = + intel_aux_power_domain(dig_port); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); - intel_display_power_get(dev_priv, intel_dp->aux_power_domain); + intel_display_power_get(dev_priv, aux_domain); /* Can't disconnect eDP */ if (intel_dp_is_edp(intel_dp)) status = edp_detect(intel_dp); - else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) + else if (intel_digital_port_connected(encoder)) status = intel_dp_detect_dpcd(intel_dp); else status = connector_status_disconnected; if (status == connector_status_disconnected) { memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); + memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); if (intel_dp->is_mst) { DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", @@ -5089,6 +5348,10 @@ intel_dp_long_pulse(struct intel_connector *connector, intel_dp_print_rates(intel_dp); + /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ + if (INTEL_GEN(dev_priv) >= 11) + intel_dp_get_dsc_sink_cap(intel_dp); + drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, drm_dp_is_branch(intel_dp->dpcd)); @@ -5109,9 +5372,13 @@ intel_dp_long_pulse(struct intel_connector *connector, * with an IRQ_HPD, so force a link status check. */ if (!intel_dp_is_edp(intel_dp)) { - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + int ret; - intel_dp_retrain_link(encoder, ctx); + ret = intel_dp_retrain_link(encoder, ctx); + if (ret) { + intel_display_power_put(dev_priv, aux_domain); + return ret; + } } /* @@ -5123,61 +5390,17 @@ intel_dp_long_pulse(struct intel_connector *connector, intel_dp->aux.i2c_defer_count = 0; intel_dp_set_edid(intel_dp); - if (intel_dp_is_edp(intel_dp) || connector->detect_edid) + if (intel_dp_is_edp(intel_dp) || + to_intel_connector(connector)->detect_edid) status = connector_status_connected; - intel_dp->detect_done = true; - /* Try to read the source of the interrupt */ - if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && - intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) && - sink_irq_vector != 0) { - /* Clear interrupt source */ - drm_dp_dpcd_writeb(&intel_dp->aux, - DP_DEVICE_SERVICE_IRQ_VECTOR, - sink_irq_vector); - - if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) - intel_dp_handle_test_request(intel_dp); - if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) - DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); - } + intel_dp_check_service_irq(intel_dp); out: if (status != connector_status_connected && !intel_dp->is_mst) intel_dp_unset_edid(intel_dp); - intel_display_power_put(dev_priv, intel_dp->aux_power_domain); - return status; -} - -static int -intel_dp_detect(struct drm_connector *connector, - struct drm_modeset_acquire_ctx *ctx, - bool force) -{ - struct intel_dp *intel_dp = intel_attached_dp(connector); - int status = connector->status; - - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", - connector->base.id, connector->name); - - /* If full detect is not performed yet, do a full detect */ - if (!intel_dp->detect_done) { - struct drm_crtc *crtc; - int ret; - - crtc = connector->state->crtc; - if (crtc) { - ret = drm_modeset_lock(&crtc->mutex, ctx); - if (ret) - return ret; - } - - status = intel_dp_long_pulse(intel_dp->attached_connector, ctx); - } - - intel_dp->detect_done = false; - + intel_display_power_put(dev_priv, aux_domain); return status; } @@ -5185,8 +5408,11 @@ static void intel_dp_force(struct drm_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); - struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *intel_encoder = &dig_port->base; struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); + enum intel_display_power_domain aux_domain = + intel_aux_power_domain(dig_port); DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); @@ -5195,11 +5421,11 @@ intel_dp_force(struct drm_connector *connector) if (connector->status != connector_status_connected) return; - intel_display_power_get(dev_priv, intel_dp->aux_power_domain); + intel_display_power_get(dev_priv, aux_domain); intel_dp_set_edid(intel_dp); - intel_display_power_put(dev_priv, intel_dp->aux_power_domain); + intel_display_power_put(dev_priv, aux_domain); } static int intel_dp_get_modes(struct drm_connector *connector) @@ -5264,27 +5490,6 @@ intel_dp_connector_unregister(struct drm_connector *connector) intel_connector_unregister(connector); } -static void -intel_dp_connector_destroy(struct drm_connector *connector) -{ - struct intel_connector *intel_connector = to_intel_connector(connector); - - kfree(intel_connector->detect_edid); - - if (!IS_ERR_OR_NULL(intel_connector->edid)) - kfree(intel_connector->edid); - - /* - * Can't call intel_dp_is_edp() since the encoder may have been - * destroyed already. - */ - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) - intel_panel_fini(&intel_connector->panel); - - drm_connector_cleanup(connector); - kfree(connector); -} - void intel_dp_encoder_destroy(struct drm_encoder *encoder) { struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); @@ -5348,7 +5553,8 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN, an, DRM_HDCP_AN_LEN); if (dpcd_ret != DRM_HDCP_AN_LEN) { - DRM_ERROR("Failed to write An over DP/AUX (%zd)\n", dpcd_ret); + DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n", + dpcd_ret); return dpcd_ret >= 0 ? -EIO : dpcd_ret; } @@ -5364,10 +5570,10 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, rxbuf, sizeof(rxbuf), DP_AUX_CH_CTL_AUX_AKSV_SELECT); if (ret < 0) { - DRM_ERROR("Write Aksv over DP/AUX failed (%d)\n", ret); + DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret); return ret; } else if (ret == 0) { - DRM_ERROR("Aksv write over DP/AUX was empty\n"); + DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n"); return -EIO; } @@ -5382,7 +5588,7 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, DRM_HDCP_KSV_LEN); if (ret != DRM_HDCP_KSV_LEN) { - DRM_ERROR("Read Bksv from DP/AUX failed (%zd)\n", ret); + DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } return 0; @@ -5400,7 +5606,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO, bstatus, DRM_HDCP_BSTATUS_LEN); if (ret != DRM_HDCP_BSTATUS_LEN) { - DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret); + DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } return 0; @@ -5415,7 +5621,7 @@ int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port, ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS, bcaps, 1); if (ret != 1) { - DRM_ERROR("Read bcaps from DP/AUX failed (%zd)\n", ret); + DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } @@ -5445,7 +5651,7 @@ int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, ri_prime, DRM_HDCP_RI_LEN); if (ret != DRM_HDCP_RI_LEN) { - DRM_ERROR("Read Ri' from DP/AUX failed (%zd)\n", ret); + DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } return 0; @@ -5460,7 +5666,7 @@ int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, &bstatus, 1); if (ret != 1) { - DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret); + DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); return ret >= 0 ? -EIO : ret; } *ksv_ready = bstatus & DP_BSTATUS_READY; @@ -5482,8 +5688,8 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, ksv_fifo + i * DRM_HDCP_KSV_LEN, len); if (ret != len) { - DRM_ERROR("Read ksv[%d] from DP/AUX failed (%zd)\n", i, - ret); + DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n", + i, ret); return ret >= 0 ? -EIO : ret; } } @@ -5503,7 +5709,7 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, DP_AUX_HDCP_V_PRIME(i), part, DRM_HDCP_V_PRIME_PART_LEN); if (ret != DRM_HDCP_V_PRIME_PART_LEN) { - DRM_ERROR("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); + DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); return ret >= 0 ? -EIO : ret; } return 0; @@ -5526,7 +5732,7 @@ bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port) ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, &bstatus, 1); if (ret != 1) { - DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret); + DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); return false; } @@ -5565,6 +5771,7 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = { static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); lockdep_assert_held(&dev_priv->pps_mutex); @@ -5578,7 +5785,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) * indefinitely. */ DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); - intel_display_power_get(dev_priv, intel_dp->aux_power_domain); + intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); edp_panel_vdd_schedule_off(intel_dp); } @@ -5631,7 +5838,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { .atomic_set_property = intel_digital_connector_atomic_set_property, .late_register = intel_dp_connector_register, .early_unregister = intel_dp_connector_unregister, - .destroy = intel_dp_connector_destroy, + .destroy = intel_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, }; @@ -5673,11 +5880,11 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) if (long_hpd) { intel_dp->reset_link_params = true; - intel_dp->detect_done = false; return IRQ_NONE; } - intel_display_power_get(dev_priv, intel_dp->aux_power_domain); + intel_display_power_get(dev_priv, + intel_aux_power_domain(intel_dig_port)); if (intel_dp->is_mst) { if (intel_dp_check_mst_status(intel_dp) == -EINVAL) { @@ -5690,7 +5897,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) intel_dp->is_mst = false; drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); - intel_dp->detect_done = false; goto put_power; } } @@ -5700,19 +5906,15 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) handled = intel_dp_short_pulse(intel_dp); - /* Short pulse can signify loss of hdcp authentication */ - intel_hdcp_check_link(intel_dp->attached_connector); - - if (!handled) { - intel_dp->detect_done = false; + if (!handled) goto put_power; - } } ret = IRQ_HANDLED; put_power: - intel_display_power_put(dev_priv, intel_dp->aux_power_domain); + intel_display_power_put(dev_priv, + intel_aux_power_domain(intel_dig_port)); return ret; } @@ -5743,6 +5945,10 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); + if (HAS_GMCH_DISPLAY(dev_priv)) + drm_connector_attach_max_bpc_property(connector, 6, 10); + else if (INTEL_GEN(dev_priv) >= 5) + drm_connector_attach_max_bpc_property(connector, 6, 12); if (intel_dp_is_edp(intel_dp)) { u32 allowed_scalers; @@ -6099,10 +6305,10 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { switch (index) { case DRRS_HIGH_RR: - intel_dp_set_m_n(intel_crtc, M1_N1); + intel_dp_set_m_n(crtc_state, M1_N1); break; case DRRS_LOW_RR: - intel_dp_set_m_n(intel_crtc, M2_N2); + intel_dp_set_m_n(crtc_state, M2_N2); break; case DRRS_MAX_RR: default: @@ -6422,6 +6628,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, if (!intel_dp_is_edp(intel_dp)) return true; + INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work); + /* * On IBX/CPT we may get here with LVDS already registered. Since the * driver uses the only internal power sequencer available for both @@ -6514,6 +6722,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, intel_connector->panel.backlight.power = intel_edp_backlight_power; intel_panel_setup_backlight(connector, pipe); + if (fixed_mode) + drm_connector_init_panel_orientation_property( + connector, fixed_mode->hdisplay, fixed_mode->vdisplay); + return true; out_vdd_off: @@ -6624,9 +6836,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_dp_aux_init(intel_dp); - INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, - edp_panel_vdd_work); - intel_connector_attach_encoder(intel_connector, intel_encoder); if (HAS_DDI(dev_priv)) @@ -6743,6 +6952,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, if (port != PORT_A) intel_infoframe_init(intel_dig_port); + intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); if (!intel_dp_init_connector(intel_dig_port, intel_connector)) goto err_init_connector; diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 1b00f8ea145b..4de247ddf05f 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -51,6 +51,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return false; + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->has_pch_encoder = false; bpp = 24; if (intel_dp->compliance.test_data.bpc) { @@ -208,12 +209,25 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder, struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; - if (intel_dp->active_mst_links == 0 && - intel_dig_port->base.pre_pll_enable) + if (intel_dp->active_mst_links == 0) intel_dig_port->base.pre_pll_enable(&intel_dig_port->base, pipe_config, NULL); } +static void intel_mst_post_pll_disable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + + if (intel_dp->active_mst_links == 0) + intel_dig_port->base.post_pll_disable(&intel_dig_port->base, + old_crtc_state, + old_conn_state); +} + static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) @@ -335,24 +349,12 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force) intel_connector->port); } -static void -intel_dp_mst_connector_destroy(struct drm_connector *connector) -{ - struct intel_connector *intel_connector = to_intel_connector(connector); - - if (!IS_ERR_OR_NULL(intel_connector->edid)) - kfree(intel_connector->edid); - - drm_connector_cleanup(connector); - kfree(connector); -} - static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { .detect = intel_dp_mst_detect, .fill_modes = drm_helper_probe_single_connector_modes, .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, - .destroy = intel_dp_mst_connector_destroy, + .destroy = intel_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, }; @@ -452,6 +454,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo if (!intel_connector) return NULL; + intel_connector->get_hw_state = intel_dp_mst_get_hw_state; + intel_connector->mst_port = intel_dp; + intel_connector->port = port; + connector = &intel_connector->base; ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); @@ -462,10 +468,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); - intel_connector->get_hw_state = intel_dp_mst_get_hw_state; - intel_connector->mst_port = intel_dp; - intel_connector->port = port; - for_each_pipe(dev_priv, pipe) { struct drm_encoder *enc = &intel_dp->mst_encoders[pipe]->base.base; @@ -560,6 +562,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum intel_encoder->disable = intel_mst_disable_dp; intel_encoder->post_disable = intel_mst_post_disable_dp; intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp; + intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp; intel_encoder->pre_enable = intel_mst_pre_enable_dp; intel_encoder->enable = intel_mst_enable_dp; intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state; diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c index 00b3ab656b06..3c7f10d17658 100644 --- a/drivers/gpu/drm/i915/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/intel_dpio_phy.c @@ -748,7 +748,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder, val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); - if (crtc->config->lane_count > 2) { + if (crtc_state->lane_count > 2) { val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); if (reset) val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); @@ -765,7 +765,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder, val |= DPIO_PCS_CLK_SOFT_RESET; vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); - if (crtc->config->lane_count > 2) { + if (crtc_state->lane_count > 2) { val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); val |= CHV_PCS_REQ_SOFTRESET_EN; if (reset) diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index e6cac9225536..d513ca875c67 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -126,16 +126,16 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, /** * intel_prepare_shared_dpll - call a dpll's prepare hook - * @crtc: CRTC which has a shared dpll + * @crtc_state: CRTC, and its state, which has a shared dpll * * This calls the PLL's prepare hook if it has one and if the PLL is not * already enabled. The prepare hook is platform specific. */ -void intel_prepare_shared_dpll(struct intel_crtc *crtc) +void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_shared_dpll *pll = crtc->config->shared_dpll; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_shared_dpll *pll = crtc_state->shared_dpll; if (WARN_ON(pll == NULL)) return; @@ -154,15 +154,15 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc) /** * intel_enable_shared_dpll - enable a CRTC's shared DPLL - * @crtc: CRTC which has a shared DPLL + * @crtc_state: CRTC, and its state, which has a shared DPLL * * Enable the shared DPLL used by @crtc. */ -void intel_enable_shared_dpll(struct intel_crtc *crtc) +void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_shared_dpll *pll = crtc->config->shared_dpll; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_shared_dpll *pll = crtc_state->shared_dpll; unsigned int crtc_mask = drm_crtc_mask(&crtc->base); unsigned int old_mask; @@ -199,14 +199,15 @@ out: /** * intel_disable_shared_dpll - disable a CRTC's shared DPLL - * @crtc: CRTC which has a shared DPLL + * @crtc_state: CRTC, and its state, which has a shared DPLL * * Disable the shared DPLL used by @crtc. */ -void intel_disable_shared_dpll(struct intel_crtc *crtc) +void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_shared_dpll *pll = crtc->config->shared_dpll; + struct intel_shared_dpll *pll = crtc_state->shared_dpll; unsigned int crtc_mask = drm_crtc_mask(&crtc->base); /* PCH only available on ILK+ */ @@ -409,14 +410,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; - struct drm_device *dev = &dev_priv->drm; - struct intel_crtc *crtc; - - /* Make sure no transcoder isn't still depending on us. */ - for_each_intel_crtc(dev, crtc) { - if (crtc->config->shared_dpll == pll) - assert_pch_transcoder_disabled(dev_priv, crtc->pipe); - } I915_WRITE(PCH_DPLL(id), 0); POSTING_READ(PCH_DPLL(id)); @@ -2530,7 +2523,8 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, if (intel_port_is_tc(dev_priv, encoder->port)) ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params); - else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params); else ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params); @@ -2628,11 +2622,16 @@ static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id) return id - DPLL_ID_ICL_MGPLL1 + PORT_C; } -static enum intel_dpll_id icl_port_to_mg_pll_id(enum port port) +enum intel_dpll_id icl_port_to_mg_pll_id(enum port port) { return port - PORT_C + DPLL_ID_ICL_MGPLL1; } +bool intel_dpll_is_combophy(enum intel_dpll_id id) +{ + return id == DPLL_ID_ICL_DPLL0 || id == DPLL_ID_ICL_DPLL1; +} + static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, uint32_t *target_dco_khz, struct intel_dpll_hw_state *state) @@ -2874,8 +2873,8 @@ static struct intel_shared_dpll * icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { - struct intel_digital_port *intel_dig_port = - enc_to_dig_port(&encoder->base); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *intel_dig_port; struct intel_shared_dpll *pll; struct intel_dpll_hw_state pll_state = {}; enum port port = encoder->port; @@ -2883,18 +2882,21 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, int clock = crtc_state->port_clock; bool ret; - switch (port) { - case PORT_A: - case PORT_B: + if (intel_port_is_combophy(dev_priv, port)) { min = DPLL_ID_ICL_DPLL0; max = DPLL_ID_ICL_DPLL1; ret = icl_calc_dpll_state(crtc_state, encoder, clock, &pll_state); - break; - case PORT_C: - case PORT_D: - case PORT_E: - case PORT_F: + } else if (intel_port_is_tc(dev_priv, port)) { + if (encoder->type == INTEL_OUTPUT_DP_MST) { + struct intel_dp_mst_encoder *mst_encoder; + + mst_encoder = enc_to_mst(&encoder->base); + intel_dig_port = mst_encoder->primary; + } else { + intel_dig_port = enc_to_dig_port(&encoder->base); + } + if (intel_dig_port->tc_type == TC_PORT_TBT) { min = DPLL_ID_ICL_TBTPLL; max = min; @@ -2906,8 +2908,7 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, ret = icl_calc_mg_pll_state(crtc_state, encoder, clock, &pll_state); } - break; - default: + } else { MISSING_CASE(port); return NULL; } @@ -2932,21 +2933,16 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id) { - switch (id) { - default: - MISSING_CASE(id); - /* fall through */ - case DPLL_ID_ICL_DPLL0: - case DPLL_ID_ICL_DPLL1: + if (intel_dpll_is_combophy(id)) return CNL_DPLL_ENABLE(id); - case DPLL_ID_ICL_TBTPLL: + else if (id == DPLL_ID_ICL_TBTPLL) return TBT_PLL_ENABLE; - case DPLL_ID_ICL_MGPLL1: - case DPLL_ID_ICL_MGPLL2: - case DPLL_ID_ICL_MGPLL3: - case DPLL_ID_ICL_MGPLL4: + else + /* + * TODO: Make MG_PLL macros use + * tc port id instead of port id + */ return MG_PLL_ENABLE(icl_mg_pll_id_to_port(id)); - } } static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, @@ -2965,17 +2961,11 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, if (!(val & PLL_ENABLE)) goto out; - switch (id) { - case DPLL_ID_ICL_DPLL0: - case DPLL_ID_ICL_DPLL1: - case DPLL_ID_ICL_TBTPLL: + if (intel_dpll_is_combophy(id) || + id == DPLL_ID_ICL_TBTPLL) { hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id)); hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id)); - break; - case DPLL_ID_ICL_MGPLL1: - case DPLL_ID_ICL_MGPLL2: - case DPLL_ID_ICL_MGPLL3: - case DPLL_ID_ICL_MGPLL4: + } else { port = icl_mg_pll_id_to_port(id); hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port)); hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; @@ -3013,9 +3003,6 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask; hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask; - break; - default: - MISSING_CASE(id); } ret = true; @@ -3104,21 +3091,10 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv, PLL_POWER_STATE, 1)) DRM_ERROR("PLL %d Power not enabled\n", id); - switch (id) { - case DPLL_ID_ICL_DPLL0: - case DPLL_ID_ICL_DPLL1: - case DPLL_ID_ICL_TBTPLL: + if (intel_dpll_is_combophy(id) || id == DPLL_ID_ICL_TBTPLL) icl_dpll_write(dev_priv, pll); - break; - case DPLL_ID_ICL_MGPLL1: - case DPLL_ID_ICL_MGPLL2: - case DPLL_ID_ICL_MGPLL3: - case DPLL_ID_ICL_MGPLL4: + else icl_mg_pll_write(dev_priv, pll); - break; - default: - MISSING_CASE(id); - } /* * DVFS pre sequence would be here, but in our driver the cdclk code diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h index bf0de8a4dc63..a033d8f06d4a 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h @@ -334,9 +334,9 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, void intel_release_shared_dpll(struct intel_shared_dpll *dpll, struct intel_crtc *crtc, struct drm_atomic_state *state); -void intel_prepare_shared_dpll(struct intel_crtc *crtc); -void intel_enable_shared_dpll(struct intel_crtc *crtc); -void intel_disable_shared_dpll(struct intel_crtc *crtc); +void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state); +void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state); +void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_shared_dpll_swap_state(struct drm_atomic_state *state); void intel_shared_dpll_init(struct drm_device *dev); @@ -345,5 +345,7 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv, uint32_t pll_id); int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv); +enum intel_dpll_id icl_port_to_mg_pll_id(enum port port); +bool intel_dpll_is_combophy(enum intel_dpll_id id); #endif /* _INTEL_DPLL_MGR_H_ */ diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f8dc84b2d2d3..f94a04b4ad87 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -381,6 +381,15 @@ struct intel_hdcp_shim { bool *hdcp_capable); }; +struct intel_hdcp { + const struct intel_hdcp_shim *shim; + /* Mutex for hdcp state of the connector */ + struct mutex mutex; + u64 value; + struct delayed_work check_work; + struct work_struct prop_work; +}; + struct intel_connector { struct drm_connector base; /* @@ -413,11 +422,7 @@ struct intel_connector { /* Work struct to schedule a uevent on link train failure */ struct work_struct modeset_retry_work; - const struct intel_hdcp_shim *hdcp_shim; - struct mutex hdcp_mutex; - uint64_t hdcp_value; /* protected by hdcp_mutex */ - struct delayed_work hdcp_check_work; - struct work_struct hdcp_prop_work; + struct intel_hdcp hdcp; }; struct intel_digital_connector_state { @@ -539,6 +544,26 @@ struct intel_plane_state { */ int scaler_id; + /* + * linked_plane: + * + * ICL planar formats require 2 planes that are updated as pairs. + * This member is used to make sure the other plane is also updated + * when required, and for update_slave() to find the correct + * plane_state to pass as argument. + */ + struct intel_plane *linked_plane; + + /* + * slave: + * If set don't update use the linked plane's state for updating + * this plane during atomic commit with the update_slave() callback. + * + * It's also used by the watermark code to ignore wm calculations on + * this plane. They're calculated by the linked plane's wm code. + */ + u32 slave; + struct drm_intel_sprite_colorkey ckey; }; @@ -547,6 +572,7 @@ struct intel_initial_plane_config { unsigned int tiling; int size; u32 base; + u8 rotation; }; #define SKL_MIN_SRC_W 8 @@ -680,6 +706,8 @@ struct intel_crtc_wm_state { /* gen9+ only needs 1-step wm programming */ struct skl_pipe_wm optimal; struct skl_ddb_entry ddb; + struct skl_ddb_entry plane_ddb_y[I915_MAX_PLANES]; + struct skl_ddb_entry plane_ddb_uv[I915_MAX_PLANES]; } skl; struct { @@ -712,6 +740,13 @@ struct intel_crtc_wm_state { bool need_postvbl_update; }; +enum intel_output_format { + INTEL_OUTPUT_FORMAT_INVALID, + INTEL_OUTPUT_FORMAT_RGB, + INTEL_OUTPUT_FORMAT_YCBCR420, + INTEL_OUTPUT_FORMAT_YCBCR444, +}; + struct intel_crtc_state { struct drm_crtc_state base; @@ -893,14 +928,32 @@ struct intel_crtc_state { u8 active_planes; u8 nv12_planes; + /* bitmask of planes that will be updated during the commit */ + u8 update_planes; + /* HDMI scrambling status */ bool hdmi_scrambling; /* HDMI High TMDS char rate ratio */ bool hdmi_high_tmds_clock_ratio; - /* output format is YCBCR 4:2:0 */ - bool ycbcr420; + /* Output format RGB/YCBCR etc */ + enum intel_output_format output_format; + + /* Output down scaling is done in LSPCON device */ + bool lspcon_downsampling; + + /* Display Stream compression state */ + struct { + bool compression_enable; + bool dsc_split; + u16 compressed_bpp; + u8 slice_count; + } dsc_params; + struct drm_dsc_config dp_dsc_cfg; + + /* Forward Error correction State */ + bool fec_enable; }; struct intel_crtc { @@ -973,8 +1026,11 @@ struct intel_plane { void (*update_plane)(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); + void (*update_slave)(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); void (*disable_plane)(struct intel_plane *plane, - struct intel_crtc *crtc); + const struct intel_crtc_state *crtc_state); bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); int (*check_plane)(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state); @@ -1070,13 +1126,13 @@ struct intel_dp { bool link_mst; bool link_trained; bool has_audio; - bool detect_done; bool reset_link_params; - enum aux_ch aux_ch; uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; + u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]; + u8 fec_capable; /* source rates */ int num_source_rates; const int *source_rates; @@ -1094,7 +1150,6 @@ struct intel_dp { /* sink or branch descriptor */ struct drm_dp_desc desc; struct drm_dp_aux aux; - enum intel_display_power_domain aux_power_domain; uint8_t train_set[4]; int panel_power_up_delay; int panel_power_down_delay; @@ -1156,9 +1211,15 @@ struct intel_dp { struct intel_dp_compliance compliance; }; +enum lspcon_vendor { + LSPCON_VENDOR_MCA, + LSPCON_VENDOR_PARADE +}; + struct intel_lspcon { bool active; enum drm_lspcon_mode mode; + enum lspcon_vendor vendor; }; struct intel_digital_port { @@ -1170,18 +1231,20 @@ struct intel_digital_port { enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool); bool release_cl2_override; uint8_t max_lanes; + /* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */ + enum aux_ch aux_ch; enum intel_display_power_domain ddi_io_power_domain; enum tc_port_type tc_type; - void (*write_infoframe)(struct drm_encoder *encoder, + void (*write_infoframe)(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, const void *frame, ssize_t len); - void (*set_infoframes)(struct drm_encoder *encoder, + void (*set_infoframes)(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); - bool (*infoframe_enabled)(struct drm_encoder *encoder, + bool (*infoframe_enabled)(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config); }; @@ -1281,6 +1344,12 @@ enc_to_dig_port(struct drm_encoder *encoder) return NULL; } +static inline struct intel_digital_port * +conn_to_dig_port(struct intel_connector *connector) +{ + return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base); +} + static inline struct intel_dp_mst_encoder * enc_to_mst(struct drm_encoder *encoder) { @@ -1306,6 +1375,12 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder) } } +static inline struct intel_lspcon * +enc_to_intel_lspcon(struct drm_encoder *encoder) +{ + return &enc_to_dig_port(encoder)->lspcon; +} + static inline struct intel_digital_port * dp_to_dig_port(struct intel_dp *intel_dp) { @@ -1331,6 +1406,27 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) } static inline struct intel_plane_state * +intel_atomic_get_plane_state(struct intel_atomic_state *state, + struct intel_plane *plane) +{ + struct drm_plane_state *ret = + drm_atomic_get_plane_state(&state->base, &plane->base); + + if (IS_ERR(ret)) + return ERR_CAST(ret); + + return to_intel_plane_state(ret); +} + +static inline struct intel_plane_state * +intel_atomic_get_old_plane_state(struct intel_atomic_state *state, + struct intel_plane *plane) +{ + return to_intel_plane_state(drm_atomic_get_old_plane_state(&state->base, + &plane->base)); +} + +static inline struct intel_plane_state * intel_atomic_get_new_plane_state(struct intel_atomic_state *state, struct intel_plane *plane) { @@ -1438,12 +1534,9 @@ u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, u8 voltage_swing); int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, bool enable); -void icl_map_plls_to_ports(struct drm_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct drm_atomic_state *old_state); -void icl_unmap_plls_to_ports(struct drm_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct drm_atomic_state *old_state); +void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder); +int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, + enum intel_dpll_id pll_id); unsigned int intel_fb_align_height(const struct drm_framebuffer *fb, int color_plane, unsigned int height); @@ -1488,7 +1581,6 @@ void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state, void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc); -void intel_update_rawclk(struct drm_i915_private *dev_priv); int vlv_get_hpll_vco(struct drm_i915_private *dev_priv); int vlv_get_cck_clock(struct drm_i915_private *dev_priv, const char *name, u32 reg, int ref_freq); @@ -1509,20 +1601,12 @@ void intel_mark_idle(struct drm_i915_private *dev_priv); int intel_display_suspend(struct drm_device *dev); void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv); void intel_encoder_destroy(struct drm_encoder *encoder); -int intel_connector_init(struct intel_connector *); -struct intel_connector *intel_connector_alloc(void); -void intel_connector_free(struct intel_connector *connector); -bool intel_connector_get_hw_state(struct intel_connector *connector); -void intel_connector_attach_encoder(struct intel_connector *connector, - struct intel_encoder *encoder); struct drm_display_mode * intel_encoder_current_mode(struct intel_encoder *encoder); bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port); bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port); enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port); - -enum pipe intel_get_pipe_from_connector(struct intel_connector *connector); int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, @@ -1628,9 +1712,11 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv); void bxt_disable_dc9(struct drm_i915_private *dev_priv); void gen9_enable_dc5(struct drm_i915_private *dev_priv); unsigned int skl_cdclk_get_vco(unsigned int freq); +void skl_enable_dc6(struct drm_i915_private *dev_priv); void intel_dp_get_m_n(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); -void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n); +void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, + enum link_m_n_set m_n); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, struct dpll *best_clock); @@ -1641,12 +1727,14 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state); void hsw_enable_ips(const struct intel_crtc_state *crtc_state); void hsw_disable_ips(const struct intel_crtc_state *crtc_state); enum intel_display_power_domain intel_port_to_power_domain(enum port port); +enum intel_display_power_domain +intel_aux_power_domain(struct intel_digital_port *dig_port); void intel_mode_from_pipe_config(struct drm_display_mode *mode, struct intel_crtc_state *pipe_config); void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); -u16 skl_scaler_calc_phase(int sub, bool chroma_center); +u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center); int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); int skl_max_scale(const struct intel_crtc_state *crtc_state, u32 pixel_format); @@ -1670,6 +1758,24 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, unsigned int rotation); +/* intel_connector.c */ +int intel_connector_init(struct intel_connector *connector); +struct intel_connector *intel_connector_alloc(void); +void intel_connector_free(struct intel_connector *connector); +void intel_connector_destroy(struct drm_connector *connector); +int intel_connector_register(struct drm_connector *connector); +void intel_connector_unregister(struct drm_connector *connector); +void intel_connector_attach_encoder(struct intel_connector *connector, + struct intel_encoder *encoder); +bool intel_connector_get_hw_state(struct intel_connector *connector); +enum pipe intel_connector_get_pipe(struct intel_connector *connector); +int intel_connector_update_modes(struct drm_connector *connector, + struct edid *edid); +int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); +void intel_attach_force_audio_property(struct drm_connector *connector); +void intel_attach_broadcast_rgb_property(struct drm_connector *connector); +void intel_attach_aspect_ratio_property(struct drm_connector *connector); + /* intel_csr.c */ void intel_csr_ucode_init(struct drm_i915_private *); void intel_csr_load_program(struct drm_i915_private *); @@ -1695,6 +1801,9 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp); int intel_dp_retrain_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx); void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); +void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + bool enable); void intel_dp_encoder_reset(struct drm_encoder *encoder); void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); void intel_dp_encoder_destroy(struct drm_encoder *encoder); @@ -1728,9 +1837,6 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits); void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits); -void icl_program_mg_dp_mode(struct intel_dp *intel_dp); -void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port); -void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port); void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, @@ -1748,6 +1854,16 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp); bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp); bool intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]); +uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count, + int mode_clock, int mode_hdisplay); +uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock, + int mode_hdisplay); + +/* intel_vdsc.c */ +int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config); +enum intel_display_power_domain +intel_dsc_power_domain(const struct intel_crtc_state *crtc_state); static inline unsigned int intel_dp_unused_lane_mask(int lane_count) { @@ -1768,6 +1884,9 @@ void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); /* vlv_dsi.c */ void vlv_dsi_init(struct drm_i915_private *dev_priv); +/* icl_dsi.c */ +void icl_dsi_init(struct drm_i915_private *dev_priv); + /* intel_dsi_dcs_backlight.c */ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector); @@ -1858,7 +1977,6 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable); void intel_infoframe_init(struct intel_digital_port *intel_dig_port); - /* intel_lvds.c */ bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, i915_reg_t lvds_reg, enum pipe *pipe); @@ -1866,19 +1984,9 @@ void intel_lvds_init(struct drm_i915_private *dev_priv); struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev); bool intel_is_dual_link_lvds(struct drm_device *dev); - -/* intel_modes.c */ -int intel_connector_update_modes(struct drm_connector *connector, - struct edid *edid); -int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); -void intel_attach_force_audio_property(struct drm_connector *connector); -void intel_attach_broadcast_rgb_property(struct drm_connector *connector); -void intel_attach_aspect_ratio_property(struct drm_connector *connector); - - /* intel_overlay.c */ -void intel_setup_overlay(struct drm_i915_private *dev_priv); -void intel_cleanup_overlay(struct drm_i915_private *dev_priv); +void intel_overlay_setup(struct drm_i915_private *dev_priv); +void intel_overlay_cleanup(struct drm_i915_private *dev_priv); int intel_overlay_switch_off(struct intel_overlay *overlay); int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -1907,7 +2015,6 @@ int intel_panel_setup_backlight(struct drm_connector *connector, void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state); -void intel_panel_destroy_backlight(struct drm_connector *connector); extern struct drm_display_mode *intel_find_panel_downclock( struct drm_i915_private *dev_priv, struct drm_display_mode *fixed_mode, @@ -1936,6 +2043,7 @@ int intel_hdcp_enable(struct intel_connector *connector); int intel_hdcp_disable(struct intel_connector *connector); int intel_hdcp_check_link(struct intel_connector *connector); bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port); +bool intel_hdcp_capable(struct intel_connector *connector); /* intel_psr.c */ #define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support) @@ -1961,12 +2069,18 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir); void intel_psr_short_pulse(struct intel_dp *intel_dp); int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, u32 *out_value); +bool intel_psr_enabled(struct intel_dp *intel_dp); + +/* intel_quirks.c */ +void intel_init_quirks(struct drm_i915_private *dev_priv); /* intel_runtime_pm.c */ int intel_power_domains_init(struct drm_i915_private *); void intel_power_domains_cleanup(struct drm_i915_private *dev_priv); void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv); +void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume); +void icl_display_core_uninit(struct drm_i915_private *dev_priv); void intel_power_domains_enable(struct drm_i915_private *dev_priv); void intel_power_domains_disable(struct drm_i915_private *dev_priv); @@ -2090,6 +2204,9 @@ void g4x_wm_get_hw_state(struct drm_device *dev); void vlv_wm_get_hw_state(struct drm_device *dev); void ilk_wm_get_hw_state(struct drm_device *dev); void skl_wm_get_hw_state(struct drm_device *dev); +void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, + struct skl_ddb_entry *ddb_y, + struct skl_ddb_entry *ddb_uv); void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, struct skl_ddb_allocation *ddb /* out */); void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc, @@ -2101,10 +2218,13 @@ int intel_enable_sagv(struct drm_i915_private *dev_priv); int intel_disable_sagv(struct drm_i915_private *dev_priv); bool skl_wm_level_equals(const struct skl_wm_level *l1, const struct skl_wm_level *l2); -bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv, - const struct skl_ddb_entry **entries, - const struct skl_ddb_entry *ddb, - int ignore); +bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, + const struct skl_ddb_entry entries[], + int num_entries, int ignore_idx); +void skl_write_plane_wm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state); +void skl_write_cursor_wm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state); bool ilk_disable_lp_wm(struct drm_device *dev); int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, struct intel_crtc_state *cstate); @@ -2127,23 +2247,29 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state); void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); -void skl_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state); -void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc); -bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe); -bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, - enum pipe pipe, enum plane_id plane_id); -bool skl_plane_has_planar(struct drm_i915_private *dev_priv, - enum pipe pipe, enum plane_id plane_id); -unsigned int skl_plane_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation); -int skl_plane_check(struct intel_crtc_state *crtc_state, - struct intel_plane_state *plane_state); int intel_plane_check_stride(const struct intel_plane_state *plane_state); int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state); int chv_plane_check_rotation(const struct intel_plane_state *plane_state); +struct intel_plane * +skl_universal_plane_create(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id); + +static inline bool icl_is_nv12_y_plane(enum plane_id id) +{ + /* Don't need to do a gen check, these planes are only available on gen11 */ + if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5) + return true; + + return false; +} + +static inline bool icl_is_hdr_plane(struct intel_plane *plane) +{ + if (INTEL_GEN(to_i915(plane->base.dev)) < 11) + return false; + + return plane->id < PLANE_SPRITE2; +} /* intel_tv.c */ void intel_tv_init(struct drm_i915_private *dev_priv); @@ -2185,11 +2311,16 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, struct intel_crtc_state *crtc_state); /* intel_atomic_plane.c */ -struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane); +struct intel_plane *intel_plane_alloc(void); +void intel_plane_free(struct intel_plane *plane); struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane); void intel_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state); extern const struct drm_plane_helper_funcs intel_plane_helper_funcs; +void skl_update_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc); int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *crtc_state, const struct intel_plane_state *old_plane_state, @@ -2205,6 +2336,18 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state); bool lspcon_init(struct intel_digital_port *intel_dig_port); void lspcon_resume(struct intel_lspcon *lspcon); void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon); +void lspcon_write_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + const void *buf, ssize_t len); +void lspcon_set_infoframes(struct intel_encoder *encoder, + bool enable, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); +bool lspcon_infoframe_enabled(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config); +void lspcon_ycbcr420_config(struct drm_connector *connector, + struct intel_crtc_state *crtc_state); /* intel_pipe_crc.c */ #ifdef CONFIG_DEBUG_FS diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c new file mode 100644 index 000000000000..5fec02aceaed --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2018 Intel Corporation + */ + +#include <drm/drm_mipi_dsi.h> +#include "intel_dsi.h" + +int intel_dsi_bitrate(const struct intel_dsi *intel_dsi) +{ + int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); + + if (WARN_ON(bpp < 0)) + bpp = 16; + + return intel_dsi->pclk * bpp / intel_dsi->lane_count; +} + +int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi) +{ + switch (intel_dsi->escape_clk_div) { + default: + case 0: + return 50; + case 1: + return 100; + case 2: + return 200; + } +} + +int intel_dsi_get_modes(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_display_mode *mode; + + DRM_DEBUG_KMS("\n"); + + if (!intel_connector->panel.fixed_mode) { + DRM_DEBUG_KMS("no fixed mode\n"); + return 0; + } + + mode = drm_mode_duplicate(connector->dev, + intel_connector->panel.fixed_mode); + if (!mode) { + DRM_DEBUG_KMS("drm_mode_duplicate failed\n"); + return 0; + } + + drm_mode_probed_add(connector, mode); + return 1; +} + +enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; + int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; + + DRM_DEBUG_KMS("\n"); + + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + + if (fixed_mode) { + if (mode->hdisplay > fixed_mode->hdisplay) + return MODE_PANEL; + if (mode->vdisplay > fixed_mode->vdisplay) + return MODE_PANEL; + if (fixed_mode->clock > max_dotclk) + return MODE_CLOCK_HIGH; + } + + return MODE_OK; +} + +struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi, + const struct mipi_dsi_host_ops *funcs, + enum port port) +{ + struct intel_dsi_host *host; + struct mipi_dsi_device *device; + + host = kzalloc(sizeof(*host), GFP_KERNEL); + if (!host) + return NULL; + + host->base.ops = funcs; + host->intel_dsi = intel_dsi; + host->port = port; + + /* + * We should call mipi_dsi_host_register(&host->base) here, but we don't + * have a host->dev, and we don't have OF stuff either. So just use the + * dsi framework as a library and hope for the best. Create the dsi + * devices by ourselves here too. Need to be careful though, because we + * don't initialize any of the driver model devices here. + */ + device = kzalloc(sizeof(*device), GFP_KERNEL); + if (!device) { + kfree(host); + return NULL; + } + + device->host = &host->base; + host->device = device; + + return host; +} + +enum drm_panel_orientation +intel_dsi_get_panel_orientation(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + enum drm_panel_orientation orientation; + + orientation = dev_priv->vbt.dsi.orientation; + if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) + return orientation; + + orientation = dev_priv->vbt.orientation; + if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) + return orientation; + + return DRM_MODE_PANEL_ORIENTATION_NORMAL; +} diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index ad7c1cb32983..d968f1f13e09 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h @@ -81,14 +81,21 @@ struct intel_dsi { u16 dcs_backlight_ports; u16 dcs_cabc_ports; + /* RGB or BGR */ + bool bgr_enabled; + u8 pixel_overlap; u32 port_bits; u32 bw_timer; u32 dphy_reg; + + /* data lanes dphy timing */ + u32 dphy_data_lane_reg; u32 video_frmt_cfg_bits; u16 lp_byte_clk; /* timeouts in byte clocks */ + u16 hs_tx_timeout; u16 lp_rx_timeout; u16 turn_arnd_val; u16 rst_timer_val; @@ -129,9 +136,36 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) return container_of(encoder, struct intel_dsi, base.base); } +static inline bool is_vid_mode(struct intel_dsi *intel_dsi) +{ + return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE; +} + +static inline bool is_cmd_mode(struct intel_dsi *intel_dsi) +{ + return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE; +} + +static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder) +{ + return enc_to_intel_dsi(&encoder->base)->ports; +} + +/* intel_dsi.c */ +int intel_dsi_bitrate(const struct intel_dsi *intel_dsi); +int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi); +enum drm_panel_orientation +intel_dsi_get_panel_orientation(struct intel_connector *connector); + /* vlv_dsi.c */ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); +int intel_dsi_get_modes(struct drm_connector *connector); +enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode); +struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi, + const struct mipi_dsi_host_ops *funcs, + enum port port); /* vlv_dsi_pll.c */ int vlv_dsi_pll_compute(struct intel_encoder *encoder, @@ -158,5 +192,6 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi); void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, enum mipi_seq seq_id); +void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec); #endif /* _INTEL_DSI_H */ diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c index ac83d6b89ae0..a1a8b3790e61 100644 --- a/drivers/gpu/drm/i915/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c @@ -103,6 +103,18 @@ static struct gpio_map vlv_gpio_table[] = { #define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4) #define CHV_GPIO_CFGLOCK (1 << 31) +/* ICL DSI Display GPIO Pins */ +#define ICL_GPIO_DDSP_HPD_A 0 +#define ICL_GPIO_L_VDDEN_1 1 +#define ICL_GPIO_L_BKLTEN_1 2 +#define ICL_GPIO_DDPA_CTRLCLK_1 3 +#define ICL_GPIO_DDPA_CTRLDATA_1 4 +#define ICL_GPIO_DDSP_HPD_B 5 +#define ICL_GPIO_L_VDDEN_2 6 +#define ICL_GPIO_L_BKLTEN_2 7 +#define ICL_GPIO_DDPA_CTRLCLK_2 8 +#define ICL_GPIO_DDPA_CTRLDATA_2 9 + static inline enum port intel_dsi_seq_port_to_port(u8 port) { return port ? PORT_C : PORT_A; @@ -111,6 +123,7 @@ static inline enum port intel_dsi_seq_port_to_port(u8 port) static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, const u8 *data) { + struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); struct mipi_dsi_device *dsi_device; u8 type, flags, seq_port; u16 len; @@ -181,7 +194,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, break; } - vlv_dsi_wait_for_fifo_empty(intel_dsi, port); + if (!IS_ICELAKE(dev_priv)) + vlv_dsi_wait_for_fifo_empty(intel_dsi, port); out: data += len; @@ -322,6 +336,12 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv, gpiod_set_value(gpio_desc, value); } +static void icl_exec_gpio(struct drm_i915_private *dev_priv, + u8 gpio_source, u8 gpio_index, bool value) +{ + DRM_DEBUG_KMS("Skipping ICL GPIO element execution\n"); +} + static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) { struct drm_device *dev = intel_dsi->base.base.dev; @@ -345,7 +365,9 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) /* pull up/down */ value = *data++ & 1; - if (IS_VALLEYVIEW(dev_priv)) + if (IS_ICELAKE(dev_priv)) + icl_exec_gpio(dev_priv, gpio_source, gpio_index, value); + else if (IS_VALLEYVIEW(dev_priv)) vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value); else if (IS_CHERRYVIEW(dev_priv)) chv_exec_gpio(dev_priv, gpio_source, gpio_number, value); @@ -481,6 +503,17 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, } } +void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec) +{ + struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); + + /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */ + if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3) + return; + + msleep(msec); +} + int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi) { struct intel_connector *connector = intel_dsi->attached_connector; @@ -499,110 +532,125 @@ int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi) return 1; } -bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) +#define ICL_PREPARE_CNT_MAX 0x7 +#define ICL_CLK_ZERO_CNT_MAX 0xf +#define ICL_TRAIL_CNT_MAX 0x7 +#define ICL_TCLK_PRE_CNT_MAX 0x3 +#define ICL_TCLK_POST_CNT_MAX 0x7 +#define ICL_HS_ZERO_CNT_MAX 0xf +#define ICL_EXIT_ZERO_CNT_MAX 0x7 + +static void icl_dphy_param_init(struct intel_dsi *intel_dsi) { struct drm_device *dev = intel_dsi->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; - struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps; - struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode; - u32 bpp; - u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui; - u32 ui_num, ui_den; + u32 tlpx_ns; u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt; u32 ths_prepare_ns, tclk_trail_ns; - u32 tclk_prepare_clkzero, ths_prepare_hszero; - u32 lp_to_hs_switch, hs_to_lp_switch; - u32 pclk, computed_ddr; - u32 mul; - u16 burst_mode_ratio; - enum port port; - - DRM_DEBUG_KMS("\n"); - - intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1; - intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0; - intel_dsi->lane_count = mipi_config->lane_cnt + 1; - intel_dsi->pixel_format = - pixel_format_from_register_bits( - mipi_config->videomode_color_format << 7); - bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); + u32 hs_zero_cnt; + u32 tclk_pre_cnt, tclk_post_cnt; - intel_dsi->dual_link = mipi_config->dual_link; - intel_dsi->pixel_overlap = mipi_config->pixel_overlap; - intel_dsi->operation_mode = mipi_config->is_cmd_mode; - intel_dsi->video_mode_format = mipi_config->video_transfer_mode; - intel_dsi->escape_clk_div = mipi_config->byte_clk_sel; - intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout; - intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout; - intel_dsi->rst_timer_val = mipi_config->device_reset_timer; - intel_dsi->init_count = mipi_config->master_init_timer; - intel_dsi->bw_timer = mipi_config->dbi_bw_timer; - intel_dsi->video_frmt_cfg_bits = - mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0; + tlpx_ns = intel_dsi_tlpx_ns(intel_dsi); - pclk = mode->clock; + tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail); + ths_prepare_ns = max(mipi_config->ths_prepare, + mipi_config->tclk_prepare); - /* In dual link mode each port needs half of pixel clock */ - if (intel_dsi->dual_link) { - pclk = pclk / 2; + /* + * prepare cnt in escape clocks + * this field represents a hexadecimal value with a precision + * of 1.2 – i.e. the most significant bit is the integer + * and the least significant 2 bits are fraction bits. + * so, the field can represent a range of 0.25 to 1.75 + */ + prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns); + if (prepare_cnt > ICL_PREPARE_CNT_MAX) { + DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt); + prepare_cnt = ICL_PREPARE_CNT_MAX; + } - /* we can enable pixel_overlap if needed by panel. In this - * case we need to increase the pixelclock for extra pixels - */ - if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { - pclk += DIV_ROUND_UP(mode->vtotal * - intel_dsi->pixel_overlap * - 60, 1000); - } + /* clk zero count in escape clocks */ + clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero - + ths_prepare_ns, tlpx_ns); + if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) { + DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt); + clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX; } - /* Burst Mode Ratio - * Target ddr frequency from VBT / non burst ddr freq - * multiply by 100 to preserve remainder - */ - if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) { - if (mipi_config->target_burst_mode_freq) { - computed_ddr = (pclk * bpp) / intel_dsi->lane_count; + /* trail cnt in escape clocks*/ + trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns); + if (trail_cnt > ICL_TRAIL_CNT_MAX) { + DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt); + trail_cnt = ICL_TRAIL_CNT_MAX; + } - if (mipi_config->target_burst_mode_freq < - computed_ddr) { - DRM_ERROR("Burst mode freq is less than computed\n"); - return false; - } + /* tclk pre count in escape clocks */ + tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns); + if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) { + DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt); + tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX; + } - burst_mode_ratio = DIV_ROUND_UP( - mipi_config->target_burst_mode_freq * 100, - computed_ddr); + /* tclk post count in escape clocks */ + tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns); + if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) { + DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt); + tclk_post_cnt = ICL_TCLK_POST_CNT_MAX; + } - pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100); - } else { - DRM_ERROR("Burst mode target is not set\n"); - return false; - } - } else - burst_mode_ratio = 100; + /* hs zero cnt in escape clocks */ + hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero - + ths_prepare_ns, tlpx_ns); + if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) { + DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt); + hs_zero_cnt = ICL_HS_ZERO_CNT_MAX; + } - intel_dsi->burst_mode_ratio = burst_mode_ratio; - intel_dsi->pclk = pclk; + /* hs exit zero cnt in escape clocks */ + exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns); + if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) { + DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt); + exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX; + } - bitrate = (pclk * bpp) / intel_dsi->lane_count; + /* clock lane dphy timings */ + intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE | + CLK_PREPARE(prepare_cnt) | + CLK_ZERO_OVERRIDE | + CLK_ZERO(clk_zero_cnt) | + CLK_PRE_OVERRIDE | + CLK_PRE(tclk_pre_cnt) | + CLK_POST_OVERRIDE | + CLK_POST(tclk_post_cnt) | + CLK_TRAIL_OVERRIDE | + CLK_TRAIL(trail_cnt)); + + /* data lanes dphy timings */ + intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE | + HS_PREPARE(prepare_cnt) | + HS_ZERO_OVERRIDE | + HS_ZERO(hs_zero_cnt) | + HS_TRAIL_OVERRIDE | + HS_TRAIL(trail_cnt) | + HS_EXIT_OVERRIDE | + HS_EXIT(exit_zero_cnt)); +} - switch (intel_dsi->escape_clk_div) { - case 0: - tlpx_ns = 50; - break; - case 1: - tlpx_ns = 100; - break; +static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) +{ + struct drm_device *dev = intel_dsi->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; + u32 tlpx_ns, extra_byte_count, tlpx_ui; + u32 ui_num, ui_den; + u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt; + u32 ths_prepare_ns, tclk_trail_ns; + u32 tclk_prepare_clkzero, ths_prepare_hszero; + u32 lp_to_hs_switch, hs_to_lp_switch; + u32 mul; - case 2: - tlpx_ns = 200; - break; - default: - tlpx_ns = 50; - break; - } + tlpx_ns = intel_dsi_tlpx_ns(intel_dsi); switch (intel_dsi->lane_count) { case 1: @@ -620,7 +668,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) /* in Kbps */ ui_num = NS_KHZ_RATIO; - ui_den = bitrate; + ui_den = intel_dsi_bitrate(intel_dsi); tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero; ths_prepare_hszero = mipi_config->ths_prepare_hszero; @@ -746,6 +794,88 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8, 8); intel_dsi->clk_hs_to_lp_count += extra_byte_count; +} + +bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) +{ + struct drm_device *dev = intel_dsi->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; + struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps; + struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode; + u16 burst_mode_ratio; + enum port port; + + DRM_DEBUG_KMS("\n"); + + intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1; + intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0; + intel_dsi->lane_count = mipi_config->lane_cnt + 1; + intel_dsi->pixel_format = + pixel_format_from_register_bits( + mipi_config->videomode_color_format << 7); + + intel_dsi->dual_link = mipi_config->dual_link; + intel_dsi->pixel_overlap = mipi_config->pixel_overlap; + intel_dsi->operation_mode = mipi_config->is_cmd_mode; + intel_dsi->video_mode_format = mipi_config->video_transfer_mode; + intel_dsi->escape_clk_div = mipi_config->byte_clk_sel; + intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout; + intel_dsi->hs_tx_timeout = mipi_config->hs_tx_timeout; + intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout; + intel_dsi->rst_timer_val = mipi_config->device_reset_timer; + intel_dsi->init_count = mipi_config->master_init_timer; + intel_dsi->bw_timer = mipi_config->dbi_bw_timer; + intel_dsi->video_frmt_cfg_bits = + mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0; + intel_dsi->bgr_enabled = mipi_config->rgb_flip; + + /* Starting point, adjusted depending on dual link and burst mode */ + intel_dsi->pclk = mode->clock; + + /* In dual link mode each port needs half of pixel clock */ + if (intel_dsi->dual_link) { + intel_dsi->pclk /= 2; + + /* we can enable pixel_overlap if needed by panel. In this + * case we need to increase the pixelclock for extra pixels + */ + if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { + intel_dsi->pclk += DIV_ROUND_UP(mode->vtotal * intel_dsi->pixel_overlap * 60, 1000); + } + } + + /* Burst Mode Ratio + * Target ddr frequency from VBT / non burst ddr freq + * multiply by 100 to preserve remainder + */ + if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) { + if (mipi_config->target_burst_mode_freq) { + u32 bitrate = intel_dsi_bitrate(intel_dsi); + + if (mipi_config->target_burst_mode_freq < bitrate) { + DRM_ERROR("Burst mode freq is less than computed\n"); + return false; + } + + burst_mode_ratio = DIV_ROUND_UP( + mipi_config->target_burst_mode_freq * 100, + bitrate); + + intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100); + } else { + DRM_ERROR("Burst mode target is not set\n"); + return false; + } + } else + burst_mode_ratio = 100; + + intel_dsi->burst_mode_ratio = burst_mode_ratio; + + if (IS_ICELAKE(dev_priv)) + icl_dphy_param_init(intel_dsi); + else + vlv_dphy_param_init(intel_dsi); DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk); DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap); diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 4e142ff49708..0042a7f69387 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -256,6 +256,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return false; + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; return true; } @@ -333,18 +334,11 @@ static int intel_dvo_get_modes(struct drm_connector *connector) return 0; } -static void intel_dvo_destroy(struct drm_connector *connector) -{ - drm_connector_cleanup(connector); - intel_panel_fini(&to_intel_connector(connector)->panel); - kfree(connector); -} - static const struct drm_connector_funcs intel_dvo_connector_funcs = { .detect = intel_dvo_detect, .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, - .destroy = intel_dvo_destroy, + .destroy = intel_connector_destroy, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 217ed3ee1cab..af2873403009 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -273,13 +273,13 @@ intel_engine_setup(struct drm_i915_private *dev_priv, BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH)); BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH)); - if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS)) + if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS)) return -EINVAL; - if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE)) + if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE)) return -EINVAL; - if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance])) + if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance])) return -EINVAL; GEM_BUG_ON(dev_priv->engine[id]); @@ -335,7 +335,10 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv) WARN_ON(ring_mask == 0); WARN_ON(ring_mask & - GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES)); + GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES)); + + if (i915_inject_load_failure()) + return -ENODEV; for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { if (!HAS_ENGINE(dev_priv, i)) @@ -399,7 +402,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv) err = -EINVAL; err_id = id; - if (GEM_WARN_ON(!init)) + if (GEM_DEBUG_WARN_ON(!init)) goto cleanup; err = init(engine); @@ -463,7 +466,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine) struct intel_engine_execlists * const execlists = &engine->execlists; execlists->port_mask = 1; - BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists)); + GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists))); GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); execlists->queue_priority = INT_MIN; @@ -482,7 +485,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine) void intel_engine_setup_common(struct intel_engine_cs *engine) { i915_timeline_init(engine->i915, &engine->timeline, engine->name); - lockdep_set_subclass(&engine->timeline.lock, TIMELINE_ENGINE); + i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE); intel_engine_init_execlist(engine); intel_engine_init_hangcheck(engine); @@ -490,46 +493,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine) intel_engine_init_cmd_parser(engine); } -int intel_engine_create_scratch(struct intel_engine_cs *engine, - unsigned int size) -{ - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - int ret; - - WARN_ON(engine->scratch); - - obj = i915_gem_object_create_stolen(engine->i915, size); - if (!obj) - obj = i915_gem_object_create_internal(engine->i915, size); - if (IS_ERR(obj)) { - DRM_ERROR("Failed to allocate scratch page\n"); - return PTR_ERR(obj); - } - - vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL); - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto err_unref; - } - - ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); - if (ret) - goto err_unref; - - engine->scratch = vma; - return 0; - -err_unref: - i915_gem_object_put(obj); - return ret; -} - -void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) -{ - i915_vma_unpin_and_release(&engine->scratch, 0); -} - static void cleanup_status_page(struct intel_engine_cs *engine) { if (HWS_NEEDS_PHYSICAL(engine->i915)) { @@ -704,8 +667,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; - intel_engine_cleanup_scratch(engine); - cleanup_status_page(engine); intel_engine_fini_breadcrumbs(engine); @@ -720,6 +681,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) __intel_context_unpin(i915->kernel_context, engine); i915_timeline_fini(&engine->timeline); + + intel_wa_list_free(&engine->ctx_wa_list); + intel_wa_list_free(&engine->wa_list); + intel_wa_list_free(&engine->whitelist); } u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) @@ -809,7 +774,7 @@ u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv) u32 slice = fls(sseu->slice_mask); u32 subslice = fls(sseu->subslice_mask[slice]); - if (INTEL_GEN(dev_priv) == 10) + if (IS_GEN10(dev_priv)) mcr_s_ss_select = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice); else if (INTEL_GEN(dev_priv) >= 11) @@ -1534,10 +1499,10 @@ void intel_engine_dump(struct intel_engine_cs *engine, count = 0; drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority); for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { - struct i915_priolist *p = - rb_entry(rb, typeof(*p), node); + struct i915_priolist *p = rb_entry(rb, typeof(*p), node); + int i; - list_for_each_entry(rq, &p->requests, sched.link) { + priolist_for_each_request(rq, p, i) { if (count++ < MAX_REQUESTS_TO_SHOW - 1) print_request(m, rq, "\t\tQ "); else @@ -1559,8 +1524,10 @@ void intel_engine_dump(struct intel_engine_cs *engine, for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { struct intel_wait *w = rb_entry(rb, typeof(*w), node); - drm_printf(m, "\t%s [%d] waiting for %x\n", - w->tsk->comm, w->tsk->pid, w->seqno); + drm_printf(m, "\t%s [%d:%c] waiting for %x\n", + w->tsk->comm, w->tsk->pid, + task_state_to_char(w->tsk), + w->seqno); } spin_unlock(&b->rb_lock); local_irq_restore(flags); diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 74d425c700ef..f23570c44323 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -84,7 +84,7 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, int lines; intel_fbc_get_plane_source_size(cache, NULL, &lines); - if (INTEL_GEN(dev_priv) == 7) + if (IS_GEN7(dev_priv)) lines = min(lines, 2048); else if (INTEL_GEN(dev_priv) >= 8) lines = min(lines, 2560); @@ -674,6 +674,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, cache->plane.adjusted_y = plane_state->color_plane[0].y; cache->plane.y = plane_state->base.src.y1 >> 16; + cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode; + if (!cache->plane.visible) return; @@ -748,6 +750,12 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) return false; } + if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && + cache->fb.format->has_alpha) { + fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC"; + return false; + } + /* WaFbcExceedCdClockThreshold:hsw,bdw */ if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) { @@ -1301,7 +1309,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) fbc->active = false; if (need_fbc_vtd_wa(dev_priv)) - mkwrite_device_info(dev_priv)->has_fbc = false; + mkwrite_device_info(dev_priv)->display.has_fbc = false; i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv); DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index f99332972b7a..fb5bb5b32a60 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -593,7 +593,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, * pipe. Note we need to use the selected fb's pitch and bpp * rather than the current pipe's, since they differ. */ - cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay; + cur_size = crtc->state->adjusted_mode.crtc_hdisplay; cur_size = cur_size * fb->base.format->cpp[0]; if (fb->base.pitches[0] < cur_size) { DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n", @@ -603,13 +603,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, break; } - cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay; + cur_size = crtc->state->adjusted_mode.crtc_vdisplay; cur_size = intel_fb_align_height(&fb->base, 0, cur_size); cur_size *= fb->base.pitches[0]; DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n", pipe_name(intel_crtc->pipe), - intel_crtc->config->base.adjusted_mode.crtc_hdisplay, - intel_crtc->config->base.adjusted_mode.crtc_vdisplay, + crtc->state->adjusted_mode.crtc_hdisplay, + crtc->state->adjusted_mode.crtc_vdisplay, fb->base.format->cpp[0] * 8, cur_size); @@ -672,7 +672,7 @@ int intel_fbdev_init(struct drm_device *dev) struct intel_fbdev *ifbdev; int ret; - if (WARN_ON(INTEL_INFO(dev_priv)->num_pipes == 0)) + if (WARN_ON(!HAS_DISPLAY(dev_priv))) return -ENODEV; ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 230aea69385d..8660af3fd755 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -50,7 +50,8 @@ void intel_guc_init_send_regs(struct intel_guc *guc) unsigned int i; guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0)); - guc->send_regs.count = SOFT_SCRATCH_COUNT - 1; + guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN; + BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT); for (i = 0; i < guc->send_regs.count; i++) { fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, @@ -521,6 +522,44 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset) return intel_guc_send(guc, action, ARRAY_SIZE(action)); } +/* + * The ENTER/EXIT_S_STATE actions queue the save/restore operation in GuC FW and + * then return, so waiting on the H2G is not enough to guarantee GuC is done. + * When all the processing is done, GuC writes INTEL_GUC_SLEEP_STATE_SUCCESS to + * scratch register 14, so we can poll on that. Note that GuC does not ensure + * that the value in the register is different from + * INTEL_GUC_SLEEP_STATE_SUCCESS while the action is in progress so we need to + * take care of that ourselves as well. + */ +static int guc_sleep_state_action(struct intel_guc *guc, + const u32 *action, u32 len) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + int ret; + u32 status; + + I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK); + + ret = intel_guc_send(guc, action, len); + if (ret) + return ret; + + ret = __intel_wait_for_register(dev_priv, SOFT_SCRATCH(14), + INTEL_GUC_SLEEP_STATE_INVALID_MASK, + 0, 0, 10, &status); + if (ret) + return ret; + + if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) { + DRM_ERROR("GuC failed to change sleep state. " + "action=0x%x, err=%u\n", + action[0], status); + return -EIO; + } + + return 0; +} + /** * intel_guc_suspend() - notify GuC entering suspend state * @guc: the guc @@ -533,7 +572,7 @@ int intel_guc_suspend(struct intel_guc *guc) intel_guc_ggtt_offset(guc, guc->shared_data) }; - return intel_guc_send(guc, data, ARRAY_SIZE(data)); + return guc_sleep_state_action(guc, data, ARRAY_SIZE(data)); } /** @@ -571,7 +610,7 @@ int intel_guc_resume(struct intel_guc *guc) intel_guc_ggtt_offset(guc, guc->shared_data) }; - return intel_guc_send(guc, data, ARRAY_SIZE(data)); + return guc_sleep_state_action(guc, data, ARRAY_SIZE(data)); } /** diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index ad42faf48c46..0f1c4f9ebfd8 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -95,6 +95,11 @@ struct intel_guc { void (*notify)(struct intel_guc *guc); }; +static inline bool intel_guc_is_alive(struct intel_guc *guc) +{ + return intel_uc_fw_is_loaded(&guc->fw); +} + static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) { diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c index a9e6fcce467c..a67144ee5ceb 100644 --- a/drivers/gpu/drm/i915/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/intel_guc_fw.c @@ -78,7 +78,8 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw) guc_fw->major_ver_wanted = KBL_FW_MAJOR; guc_fw->minor_ver_wanted = KBL_FW_MINOR; } else { - DRM_WARN("%s: No firmware known for this platform!\n", + dev_info(dev_priv->drm.dev, + "%s: No firmware known for this platform!\n", intel_uc_fw_type_repr(guc_fw->type)); } } @@ -125,66 +126,26 @@ static void guc_prepare_xfer(struct intel_guc *guc) } /* Copy RSA signature from the fw image to HW for verification */ -static int guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma) +static void guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma) { struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct intel_uc_fw *guc_fw = &guc->fw; - struct sg_table *sg = vma->pages; u32 rsa[UOS_RSA_SCRATCH_COUNT]; int i; - if (sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), - guc_fw->rsa_offset) != sizeof(rsa)) - return -EINVAL; + sg_pcopy_to_buffer(vma->pages->sgl, vma->pages->nents, + rsa, sizeof(rsa), guc->fw.rsa_offset); for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++) I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]); - - return 0; } -/* - * Transfer the firmware image to RAM for execution by the microcontroller. - * - * Architecturally, the DMA engine is bidirectional, and can potentially even - * transfer between GTT locations. This functionality is left out of the API - * for now as there is no need for it. - */ -static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma) +static bool guc_xfer_completed(struct intel_guc *guc, u32 *status) { struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct intel_uc_fw *guc_fw = &guc->fw; - unsigned long offset; - u32 status; - int ret; - - /* - * The header plus uCode will be copied to WOPCM via DMA, excluding any - * other components - */ - I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size); - - /* Set the source address for the new blob */ - offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset; - I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); - I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); - /* - * Set the DMA destination. Current uCode expects the code to be - * loaded at 8k; locations below this are used for the stack. - */ - I915_WRITE(DMA_ADDR_1_LOW, 0x2000); - I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); - - /* Finally start the DMA */ - I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA)); - - /* Wait for DMA to finish */ - ret = __intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, - 2, 100, &status); - DRM_DEBUG_DRIVER("GuC DMA status %#x\n", status); - - return ret; + /* Did we complete the xfer? */ + *status = I915_READ(DMA_CTRL); + return !(*status & START_DMA); } /* @@ -217,8 +178,8 @@ static int guc_wait_ucode(struct intel_guc *guc) * NB: Docs recommend not using the interrupt for completion. * Measurements indicate this should take no more than 20ms, so a * timeout here indicates that the GuC has failed and is unusable. - * (Higher levels of the driver will attempt to fall back to - * execlist mode if this happens.) + * (Higher levels of the driver may decide to reset the GuC and + * attempt the ucode load again if this happens.) */ ret = wait_for(guc_ready(guc, &status), 100); DRM_DEBUG_DRIVER("GuC status %#x\n", status); @@ -228,10 +189,52 @@ static int guc_wait_ucode(struct intel_guc *guc) ret = -ENOEXEC; } + if (ret == 0 && !guc_xfer_completed(guc, &status)) { + DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n", + status); + ret = -ENXIO; + } + return ret; } /* + * Transfer the firmware image to RAM for execution by the microcontroller. + * + * Architecturally, the DMA engine is bidirectional, and can potentially even + * transfer between GTT locations. This functionality is left out of the API + * for now as there is no need for it. + */ +static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_uc_fw *guc_fw = &guc->fw; + unsigned long offset; + + /* + * The header plus uCode will be copied to WOPCM via DMA, excluding any + * other components + */ + I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size); + + /* Set the source address for the new blob */ + offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset; + I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); + I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); + + /* + * Set the DMA destination. Current uCode expects the code to be + * loaded at 8k; locations below this are used for the stack. + */ + I915_WRITE(DMA_ADDR_1_LOW, 0x2000); + I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); + + /* Finally start the DMA */ + I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA)); + + return guc_wait_ucode(guc); +} +/* * Load the GuC firmware blob into the MinuteIA. */ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma) @@ -251,17 +254,9 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma) * by the DMA engine in one operation, whereas the RSA signature is * loaded via MMIO. */ - ret = guc_xfer_rsa(guc, vma); - if (ret) - DRM_WARN("GuC firmware signature xfer error %d\n", ret); + guc_xfer_rsa(guc, vma); ret = guc_xfer_ucode(guc, vma); - if (ret) - DRM_WARN("GuC firmware code xfer error %d\n", ret); - - ret = guc_wait_ucode(guc); - if (ret) - DRM_ERROR("GuC firmware xfer error %d\n", ret); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 8382d591c784..b2f5148f4f17 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -39,6 +39,11 @@ #define GUC_VIDEO_ENGINE2 4 #define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1) +#define GUC_DOORBELL_INVALID 256 + +#define GUC_DB_SIZE (PAGE_SIZE) +#define GUC_WQ_SIZE (PAGE_SIZE * 2) + /* Work queue item header definitions */ #define WQ_STATUS_ACTIVE 1 #define WQ_STATUS_SUSPENDED 2 @@ -59,9 +64,6 @@ #define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */ #define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT) -#define GUC_DOORBELL_ENABLED 1 -#define GUC_DOORBELL_DISABLED 0 - #define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0) #define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1) #define GUC_STAGE_DESC_ATTR_KERNEL BIT(2) @@ -219,26 +221,6 @@ struct uc_css_header { u32 header_info; } __packed; -struct guc_doorbell_info { - u32 db_status; - u32 cookie; - u32 reserved[14]; -} __packed; - -union guc_doorbell_qw { - struct { - u32 db_status; - u32 cookie; - }; - u64 value_qw; -} __packed; - -#define GUC_NUM_DOORBELLS 256 -#define GUC_DOORBELL_INVALID (GUC_NUM_DOORBELLS) - -#define GUC_DB_SIZE (PAGE_SIZE) -#define GUC_WQ_SIZE (PAGE_SIZE * 2) - /* Work item for submitting workloads into work queue of GuC. */ struct guc_wq_item { u32 header; @@ -601,7 +583,9 @@ struct guc_shared_ctx_data { * registers, where first register holds data treated as message header, * and other registers are used to hold message payload. * - * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8 + * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8, + * but no H2G command takes more than 8 parameters and the GuC FW + * itself uses an 8-element array to store the H2G message. * * +-----------+---------+---------+---------+ * | MMIO[0] | MMIO[1] | ... | MMIO[n] | @@ -633,6 +617,8 @@ struct guc_shared_ctx_data { * field. */ +#define GUC_MAX_MMIO_MSG_LEN 8 + #define INTEL_GUC_MSG_TYPE_SHIFT 28 #define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT) #define INTEL_GUC_MSG_DATA_SHIFT 16 @@ -687,6 +673,13 @@ enum intel_guc_report_status { INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4, }; +enum intel_guc_sleep_state_status { + INTEL_GUC_SLEEP_STATE_SUCCESS = 0x0, + INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x1, + INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x2 +#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000 +}; + #define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0) #define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4 #define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT) diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h index d86084742a4a..57e7ad522c2f 100644 --- a/drivers/gpu/drm/i915/intel_guc_reg.h +++ b/drivers/gpu/drm/i915/intel_guc_reg.h @@ -104,6 +104,18 @@ #define GUC_SEND_INTERRUPT _MMIO(0xc4c8) #define GUC_SEND_TRIGGER (1<<0) +#define GUC_NUM_DOORBELLS 256 + +/* format of the HW-monitored doorbell cacheline */ +struct guc_doorbell_info { + u32 db_status; +#define GUC_DOORBELL_DISABLED 0 +#define GUC_DOORBELL_ENABLED 1 + + u32 cookie; + u32 reserved[14]; +} __packed; + #define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8) #define GEN8_DRB_VALID (1<<0) #define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4) diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index a81f04d46e87..1570dcbe249c 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -192,7 +192,15 @@ static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client) return client->vaddr + client->doorbell_offset; } -static void __create_doorbell(struct intel_guc_client *client) +static bool __doorbell_valid(struct intel_guc *guc, u16 db_id) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + + GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS); + return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID; +} + +static void __init_doorbell(struct intel_guc_client *client) { struct guc_doorbell_info *doorbell; @@ -201,21 +209,19 @@ static void __create_doorbell(struct intel_guc_client *client) doorbell->cookie = 0; } -static void __destroy_doorbell(struct intel_guc_client *client) +static void __fini_doorbell(struct intel_guc_client *client) { - struct drm_i915_private *dev_priv = guc_to_i915(client->guc); struct guc_doorbell_info *doorbell; u16 db_id = client->doorbell_id; doorbell = __get_doorbell(client); doorbell->db_status = GUC_DOORBELL_DISABLED; - doorbell->cookie = 0; /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit * to go to zero after updating db_status before we call the GuC to * release the doorbell */ - if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10)) + if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10)) WARN_ONCE(true, "Doorbell never became invalid after disable\n"); } @@ -227,11 +233,11 @@ static int create_doorbell(struct intel_guc_client *client) return -ENODEV; /* internal setup error, should never happen */ __update_doorbell_desc(client, client->doorbell_id); - __create_doorbell(client); + __init_doorbell(client); ret = __guc_allocate_doorbell(client->guc, client->stage_id); if (ret) { - __destroy_doorbell(client); + __fini_doorbell(client); __update_doorbell_desc(client, GUC_DOORBELL_INVALID); DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n", client->stage_id, ret); @@ -247,7 +253,7 @@ static int destroy_doorbell(struct intel_guc_client *client) GEM_BUG_ON(!has_doorbell(client)); - __destroy_doorbell(client); + __fini_doorbell(client); ret = __guc_deallocate_doorbell(client->guc, client->stage_id); if (ret) DRM_ERROR("Couldn't destroy client %u doorbell: %d\n", @@ -282,8 +288,7 @@ __get_process_desc(struct intel_guc_client *client) /* * Initialise the process descriptor shared with the GuC firmware. */ -static void guc_proc_desc_init(struct intel_guc *guc, - struct intel_guc_client *client) +static void guc_proc_desc_init(struct intel_guc_client *client) { struct guc_process_desc *desc; @@ -304,6 +309,14 @@ static void guc_proc_desc_init(struct intel_guc *guc, desc->priority = client->priority; } +static void guc_proc_desc_fini(struct intel_guc_client *client) +{ + struct guc_process_desc *desc; + + desc = __get_process_desc(client); + memset(desc, 0, sizeof(*desc)); +} + static int guc_stage_desc_pool_create(struct intel_guc *guc) { struct i915_vma *vma; @@ -341,9 +354,9 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc) * data structures relating to this client (doorbell, process descriptor, * write queue, etc). */ -static void guc_stage_desc_init(struct intel_guc *guc, - struct intel_guc_client *client) +static void guc_stage_desc_init(struct intel_guc_client *client) { + struct intel_guc *guc = client->guc; struct drm_i915_private *dev_priv = guc_to_i915(guc); struct intel_engine_cs *engine; struct i915_gem_context *ctx = client->owner; @@ -424,8 +437,7 @@ static void guc_stage_desc_init(struct intel_guc *guc, desc->desc_private = ptr_to_u64(client); } -static void guc_stage_desc_fini(struct intel_guc *guc, - struct intel_guc_client *client) +static void guc_stage_desc_fini(struct intel_guc_client *client) { struct guc_stage_desc *desc; @@ -486,14 +498,6 @@ static void guc_wq_item_append(struct intel_guc_client *client, WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1)); } -static void guc_reset_wq(struct intel_guc_client *client) -{ - struct guc_process_desc *desc = __get_process_desc(client); - - desc->head = 0; - desc->tail = 0; -} - static void guc_ring_doorbell(struct intel_guc_client *client) { struct guc_doorbell_info *db; @@ -746,30 +750,28 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) while ((rb = rb_first_cached(&execlists->queue))) { struct i915_priolist *p = to_priolist(rb); struct i915_request *rq, *rn; + int i; - list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { + priolist_for_each_request_consume(rq, rn, p, i) { if (last && rq->hw_context != last->hw_context) { - if (port == last_port) { - __list_del_many(&p->requests, - &rq->sched.link); + if (port == last_port) goto done; - } if (submit) port_assign(port, last); port++; } - INIT_LIST_HEAD(&rq->sched.link); + list_del_init(&rq->sched.link); __i915_request_submit(rq); trace_i915_request_in(rq, port_index(port, execlists)); + last = rq; submit = true; } rb_erase_cached(&p->node, &execlists->queue); - INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) kmem_cache_free(engine->i915->priorities, p); } @@ -791,19 +793,8 @@ done: static void guc_dequeue(struct intel_engine_cs *engine) { - unsigned long flags; - bool submit; - - local_irq_save(flags); - - spin_lock(&engine->timeline.lock); - submit = __guc_dequeue(engine); - spin_unlock(&engine->timeline.lock); - - if (submit) + if (__guc_dequeue(engine)) guc_submit(engine); - - local_irq_restore(flags); } static void guc_submission_tasklet(unsigned long data) @@ -812,6 +803,9 @@ static void guc_submission_tasklet(unsigned long data) struct intel_engine_execlists * const execlists = &engine->execlists; struct execlist_port *port = execlists->port; struct i915_request *rq; + unsigned long flags; + + spin_lock_irqsave(&engine->timeline.lock, flags); rq = port_request(port); while (rq && i915_request_completed(rq)) { @@ -835,6 +829,8 @@ static void guc_submission_tasklet(unsigned long data) if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)) guc_dequeue(engine); + + spin_unlock_irqrestore(&engine->timeline.lock, flags); } static struct i915_request * @@ -877,72 +873,31 @@ guc_reset_prepare(struct intel_engine_cs *engine) /* Check that a doorbell register is in the expected state */ static bool doorbell_ok(struct intel_guc *guc, u16 db_id) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - u32 drbregl; bool valid; - GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID); + GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS); - drbregl = I915_READ(GEN8_DRBREGL(db_id)); - valid = drbregl & GEN8_DRB_VALID; + valid = __doorbell_valid(guc, db_id); if (test_bit(db_id, guc->doorbell_bitmap) == valid) return true; - DRM_DEBUG_DRIVER("Doorbell %d has unexpected state (0x%x): valid=%s\n", - db_id, drbregl, yesno(valid)); + DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n", + db_id, yesno(valid)); return false; } static bool guc_verify_doorbells(struct intel_guc *guc) { + bool doorbells_ok = true; u16 db_id; for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) if (!doorbell_ok(guc, db_id)) - return false; - - return true; -} - -static int guc_clients_doorbell_init(struct intel_guc *guc) -{ - int ret; - - ret = create_doorbell(guc->execbuf_client); - if (ret) - return ret; - - if (guc->preempt_client) { - ret = create_doorbell(guc->preempt_client); - if (ret) { - destroy_doorbell(guc->execbuf_client); - return ret; - } - } - - return 0; -} - -static void guc_clients_doorbell_fini(struct intel_guc *guc) -{ - /* - * By the time we're here, GuC has already been reset. - * Instead of trying (in vain) to communicate with it, let's just - * cleanup the doorbell HW and our internal state. - */ - if (guc->preempt_client) { - __destroy_doorbell(guc->preempt_client); - __update_doorbell_desc(guc->preempt_client, - GUC_DOORBELL_INVALID); - } + doorbells_ok = false; - if (guc->execbuf_client) { - __destroy_doorbell(guc->execbuf_client); - __update_doorbell_desc(guc->execbuf_client, - GUC_DOORBELL_INVALID); - } + return doorbells_ok; } /** @@ -1005,6 +960,10 @@ guc_client_alloc(struct drm_i915_private *dev_priv, } client->vaddr = vaddr; + ret = reserve_doorbell(client); + if (ret) + goto err_vaddr; + client->doorbell_offset = __select_cacheline(guc); /* @@ -1017,13 +976,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv, else client->proc_desc_offset = (GUC_DB_SIZE / 2); - guc_proc_desc_init(guc, client); - guc_stage_desc_init(guc, client); - - ret = reserve_doorbell(client); - if (ret) - goto err_vaddr; - DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n", priority, client, client->engines, client->stage_id); DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n", @@ -1045,7 +997,6 @@ err_client: static void guc_client_free(struct intel_guc_client *client) { unreserve_doorbell(client); - guc_stage_desc_fini(client->guc, client); i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP); ida_simple_remove(&client->guc->stage_ids, client->stage_id); kfree(client); @@ -1112,6 +1063,69 @@ static void guc_clients_destroy(struct intel_guc *guc) guc_client_free(client); } +static int __guc_client_enable(struct intel_guc_client *client) +{ + int ret; + + guc_proc_desc_init(client); + guc_stage_desc_init(client); + + ret = create_doorbell(client); + if (ret) + goto fail; + + return 0; + +fail: + guc_stage_desc_fini(client); + guc_proc_desc_fini(client); + return ret; +} + +static void __guc_client_disable(struct intel_guc_client *client) +{ + /* + * By the time we're here, GuC may have already been reset. if that is + * the case, instead of trying (in vain) to communicate with it, let's + * just cleanup the doorbell HW and our internal state. + */ + if (intel_guc_is_alive(client->guc)) + destroy_doorbell(client); + else + __fini_doorbell(client); + + guc_stage_desc_fini(client); + guc_proc_desc_fini(client); +} + +static int guc_clients_enable(struct intel_guc *guc) +{ + int ret; + + ret = __guc_client_enable(guc->execbuf_client); + if (ret) + return ret; + + if (guc->preempt_client) { + ret = __guc_client_enable(guc->preempt_client); + if (ret) { + __guc_client_disable(guc->execbuf_client); + return ret; + } + } + + return 0; +} + +static void guc_clients_disable(struct intel_guc *guc) +{ + if (guc->preempt_client) + __guc_client_disable(guc->preempt_client); + + if (guc->execbuf_client) + __guc_client_disable(guc->execbuf_client); +} + /* * Set up the memory resources to be shared with the GuC (via the GGTT) * at firmware loading time. @@ -1295,15 +1309,11 @@ int intel_guc_submission_enable(struct intel_guc *guc) GEM_BUG_ON(!guc->execbuf_client); - guc_reset_wq(guc->execbuf_client); - if (guc->preempt_client) - guc_reset_wq(guc->preempt_client); - err = intel_guc_sample_forcewake(guc); if (err) return err; - err = guc_clients_doorbell_init(guc); + err = guc_clients_enable(guc); if (err) return err; @@ -1325,7 +1335,7 @@ void intel_guc_submission_disable(struct intel_guc *guc) GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */ guc_interrupts_release(dev_priv); - guc_clients_doorbell_fini(guc); + guc_clients_disable(guc); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c index 26e48fc95543..1bf487f94254 100644 --- a/drivers/gpu/drm/i915/intel_hdcp.c +++ b/drivers/gpu/drm/i915/intel_hdcp.c @@ -16,6 +16,62 @@ #define KEY_LOAD_TRIES 5 +static +bool intel_hdcp_is_ksv_valid(u8 *ksv) +{ + int i, ones = 0; + /* KSV has 20 1's and 20 0's */ + for (i = 0; i < DRM_HDCP_KSV_LEN; i++) + ones += hweight8(ksv[i]); + if (ones != 20) + return false; + + return true; +} + +static +int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port, + const struct intel_hdcp_shim *shim, u8 *bksv) +{ + int ret, i, tries = 2; + + /* HDCP spec states that we must retry the bksv if it is invalid */ + for (i = 0; i < tries; i++) { + ret = shim->read_bksv(intel_dig_port, bksv); + if (ret) + return ret; + if (intel_hdcp_is_ksv_valid(bksv)) + break; + } + if (i == tries) { + DRM_DEBUG_KMS("Bksv is invalid\n"); + return -ENODEV; + } + + return 0; +} + +/* Is HDCP1.4 capable on Platform and Sink */ +bool intel_hdcp_capable(struct intel_connector *connector) +{ + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + const struct intel_hdcp_shim *shim = connector->hdcp.shim; + bool capable = false; + u8 bksv[5]; + + if (!shim) + return capable; + + if (shim->hdcp_capable) { + shim->hdcp_capable(intel_dig_port, &capable); + } else { + if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv)) + capable = true; + } + + return capable; +} + static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port, const struct intel_hdcp_shim *shim) { @@ -168,18 +224,6 @@ u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port) } static -bool intel_hdcp_is_ksv_valid(u8 *ksv) -{ - int i, ones = 0; - /* KSV has 20 1's and 20 0's */ - for (i = 0; i < DRM_HDCP_KSV_LEN; i++) - ones += hweight8(ksv[i]); - if (ones != 20) - return false; - return true; -} - -static int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, const struct intel_hdcp_shim *shim, u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) @@ -383,7 +427,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, if (intel_wait_for_register(dev_priv, HDCP_REP_CTL, HDCP_SHA1_COMPLETE, HDCP_SHA1_COMPLETE, 1)) { - DRM_DEBUG_KMS("Timed out waiting for SHA1 complete\n"); + DRM_ERROR("Timed out waiting for SHA1 complete\n"); return -ETIMEDOUT; } if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { @@ -404,7 +448,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port, ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim); if (ret) { - DRM_ERROR("KSV list failed to become ready (%d)\n", ret); + DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret); return ret; } @@ -414,7 +458,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port, if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { - DRM_ERROR("Max Topology Limit Exceeded\n"); + DRM_DEBUG_KMS("Max Topology Limit Exceeded\n"); return -EPERM; } @@ -450,7 +494,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port, } if (i == tries) { - DRM_ERROR("V Prime validation failed.(%d)\n", ret); + DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret); goto err; } @@ -499,7 +543,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, if (ret) return ret; if (!hdcp_capable) { - DRM_ERROR("Panel is not HDCP capable\n"); + DRM_DEBUG_KMS("Panel is not HDCP capable\n"); return -EINVAL; } } @@ -527,18 +571,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, memset(&bksv, 0, sizeof(bksv)); - /* HDCP spec states that we must retry the bksv if it is invalid */ - for (i = 0; i < tries; i++) { - ret = shim->read_bksv(intel_dig_port, bksv.shim); - if (ret) - return ret; - if (intel_hdcp_is_ksv_valid(bksv.shim)) - break; - } - if (i == tries) { - DRM_ERROR("HDCP failed, Bksv is invalid\n"); - return -ENODEV; - } + ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim); + if (ret < 0) + return ret; I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]); I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]); @@ -594,8 +629,8 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, } if (i == tries) { - DRM_ERROR("Timed out waiting for Ri prime match (%x)\n", - I915_READ(PORT_HDCP_STATUS(port))); + DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n", + I915_READ(PORT_HDCP_STATUS(port))); return -ETIMEDOUT; } @@ -618,14 +653,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, return 0; } -static -struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector) -{ - return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base); -} - static int _intel_hdcp_disable(struct intel_connector *connector) { + struct intel_hdcp *hdcp = &connector->hdcp; struct drm_i915_private *dev_priv = connector->base.dev->dev_private; struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); enum port port = intel_dig_port->base.port; @@ -641,7 +671,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector) return -ETIMEDOUT; } - ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false); + ret = hdcp->shim->toggle_signalling(intel_dig_port, false); if (ret) { DRM_ERROR("Failed to disable HDCP signalling\n"); return ret; @@ -653,6 +683,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector) static int _intel_hdcp_enable(struct intel_connector *connector) { + struct intel_hdcp *hdcp = &connector->hdcp; struct drm_i915_private *dev_priv = connector->base.dev->dev_private; int i, ret, tries = 3; @@ -677,8 +708,7 @@ static int _intel_hdcp_enable(struct intel_connector *connector) /* Incase of authentication failures, HDCP spec expects reauth. */ for (i = 0; i < tries; i++) { - ret = intel_hdcp_auth(conn_to_dig_port(connector), - connector->hdcp_shim); + ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim); if (!ret) return 0; @@ -688,42 +718,50 @@ static int _intel_hdcp_enable(struct intel_connector *connector) _intel_hdcp_disable(connector); } - DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret); + DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret); return ret; } +static inline +struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) +{ + return container_of(hdcp, struct intel_connector, hdcp); +} + static void intel_hdcp_check_work(struct work_struct *work) { - struct intel_connector *connector = container_of(to_delayed_work(work), - struct intel_connector, - hdcp_check_work); + struct intel_hdcp *hdcp = container_of(to_delayed_work(work), + struct intel_hdcp, + check_work); + struct intel_connector *connector = intel_hdcp_to_connector(hdcp); + if (!intel_hdcp_check_link(connector)) - schedule_delayed_work(&connector->hdcp_check_work, + schedule_delayed_work(&hdcp->check_work, DRM_HDCP_CHECK_PERIOD_MS); } static void intel_hdcp_prop_work(struct work_struct *work) { - struct intel_connector *connector = container_of(work, - struct intel_connector, - hdcp_prop_work); + struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp, + prop_work); + struct intel_connector *connector = intel_hdcp_to_connector(hdcp); struct drm_device *dev = connector->base.dev; struct drm_connector_state *state; drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - mutex_lock(&connector->hdcp_mutex); + mutex_lock(&hdcp->mutex); /* * This worker is only used to flip between ENABLED/DESIRED. Either of - * those to UNDESIRED is handled by core. If hdcp_value == UNDESIRED, + * those to UNDESIRED is handled by core. If value == UNDESIRED, * we're running just after hdcp has been disabled, so just exit */ - if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { state = connector->base.state; - state->content_protection = connector->hdcp_value; + state->content_protection = hdcp->value; } - mutex_unlock(&connector->hdcp_mutex); + mutex_unlock(&hdcp->mutex); drm_modeset_unlock(&dev->mode_config.connection_mutex); } @@ -735,8 +773,9 @@ bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port) } int intel_hdcp_init(struct intel_connector *connector, - const struct intel_hdcp_shim *hdcp_shim) + const struct intel_hdcp_shim *shim) { + struct intel_hdcp *hdcp = &connector->hdcp; int ret; ret = drm_connector_attach_content_protection_property( @@ -744,51 +783,53 @@ int intel_hdcp_init(struct intel_connector *connector, if (ret) return ret; - connector->hdcp_shim = hdcp_shim; - mutex_init(&connector->hdcp_mutex); - INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work); - INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work); + hdcp->shim = shim; + mutex_init(&hdcp->mutex); + INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work); + INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work); return 0; } int intel_hdcp_enable(struct intel_connector *connector) { + struct intel_hdcp *hdcp = &connector->hdcp; int ret; - if (!connector->hdcp_shim) + if (!hdcp->shim) return -ENOENT; - mutex_lock(&connector->hdcp_mutex); + mutex_lock(&hdcp->mutex); ret = _intel_hdcp_enable(connector); if (ret) goto out; - connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED; - schedule_work(&connector->hdcp_prop_work); - schedule_delayed_work(&connector->hdcp_check_work, + hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; + schedule_work(&hdcp->prop_work); + schedule_delayed_work(&hdcp->check_work, DRM_HDCP_CHECK_PERIOD_MS); out: - mutex_unlock(&connector->hdcp_mutex); + mutex_unlock(&hdcp->mutex); return ret; } int intel_hdcp_disable(struct intel_connector *connector) { + struct intel_hdcp *hdcp = &connector->hdcp; int ret = 0; - if (!connector->hdcp_shim) + if (!hdcp->shim) return -ENOENT; - mutex_lock(&connector->hdcp_mutex); + mutex_lock(&hdcp->mutex); - if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { - connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED; + if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED; ret = _intel_hdcp_disable(connector); } - mutex_unlock(&connector->hdcp_mutex); - cancel_delayed_work_sync(&connector->hdcp_check_work); + mutex_unlock(&hdcp->mutex); + cancel_delayed_work_sync(&hdcp->check_work); return ret; } @@ -828,17 +869,18 @@ void intel_hdcp_atomic_check(struct drm_connector *connector, /* Implements Part 3 of the HDCP authorization procedure */ int intel_hdcp_check_link(struct intel_connector *connector) { + struct intel_hdcp *hdcp = &connector->hdcp; struct drm_i915_private *dev_priv = connector->base.dev->dev_private; struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); enum port port = intel_dig_port->base.port; int ret = 0; - if (!connector->hdcp_shim) + if (!hdcp->shim) return -ENOENT; - mutex_lock(&connector->hdcp_mutex); + mutex_lock(&hdcp->mutex); - if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) goto out; if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) { @@ -846,17 +888,15 @@ int intel_hdcp_check_link(struct intel_connector *connector) connector->base.name, connector->base.base.id, I915_READ(PORT_HDCP_STATUS(port))); ret = -ENXIO; - connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&connector->hdcp_prop_work); + hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&hdcp->prop_work); goto out; } - if (connector->hdcp_shim->check_link(intel_dig_port)) { - if (connector->hdcp_value != - DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { - connector->hdcp_value = - DRM_MODE_CONTENT_PROTECTION_ENABLED; - schedule_work(&connector->hdcp_prop_work); + if (hdcp->shim->check_link(intel_dig_port)) { + if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; + schedule_work(&hdcp->prop_work); } goto out; } @@ -867,20 +907,20 @@ int intel_hdcp_check_link(struct intel_connector *connector) ret = _intel_hdcp_disable(connector); if (ret) { DRM_ERROR("Failed to disable hdcp (%d)\n", ret); - connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&connector->hdcp_prop_work); + hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&hdcp->prop_work); goto out; } ret = _intel_hdcp_enable(connector); if (ret) { - DRM_ERROR("Failed to enable hdcp (%d)\n", ret); - connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&connector->hdcp_prop_work); + DRM_DEBUG_KMS("Failed to enable hdcp (%d)\n", ret); + hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&hdcp->prop_work); goto out; } out: - mutex_unlock(&connector->hdcp_mutex); + mutex_unlock(&hdcp->mutex); return ret; } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index a2dab0b6bde6..07e803a604bd 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -115,6 +115,8 @@ static u32 hsw_infoframe_enable(unsigned int type) switch (type) { case DP_SDP_VSC: return VIDEO_DIP_ENABLE_VSC_HSW; + case DP_SDP_PPS: + return VDIP_ENABLE_PPS; case HDMI_INFOFRAME_TYPE_AVI: return VIDEO_DIP_ENABLE_AVI_HSW; case HDMI_INFOFRAME_TYPE_SPD: @@ -136,6 +138,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv, switch (type) { case DP_SDP_VSC: return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i); + case DP_SDP_PPS: + return ICL_VIDEO_DIP_PPS_DATA(cpu_transcoder, i); case HDMI_INFOFRAME_TYPE_AVI: return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i); case HDMI_INFOFRAME_TYPE_SPD: @@ -148,14 +152,25 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv, } } -static void g4x_write_infoframe(struct drm_encoder *encoder, +static int hsw_dip_data_size(unsigned int type) +{ + switch (type) { + case DP_SDP_VSC: + return VIDEO_DIP_VSC_DATA_SIZE; + case DP_SDP_PPS: + return VIDEO_DIP_PPS_DATA_SIZE; + default: + return VIDEO_DIP_DATA_SIZE; + } +} + +static void g4x_write_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, const void *frame, ssize_t len) { const u32 *data = frame; - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 val = I915_READ(VIDEO_DIP_CTL); int i; @@ -186,31 +201,29 @@ static void g4x_write_infoframe(struct drm_encoder *encoder, POSTING_READ(VIDEO_DIP_CTL); } -static bool g4x_infoframe_enabled(struct drm_encoder *encoder, +static bool g4x_infoframe_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->dev); - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 val = I915_READ(VIDEO_DIP_CTL); if ((val & VIDEO_DIP_ENABLE) == 0) return false; - if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port)) + if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port)) return false; return val & (VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD); } -static void ibx_write_infoframe(struct drm_encoder *encoder, +static void ibx_write_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, const void *frame, ssize_t len) { const u32 *data = frame; - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); @@ -243,11 +256,10 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, POSTING_READ(reg); } -static bool ibx_infoframe_enabled(struct drm_encoder *encoder, +static bool ibx_infoframe_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->dev); - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; i915_reg_t reg = TVIDEO_DIP_CTL(pipe); u32 val = I915_READ(reg); @@ -255,7 +267,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder, if ((val & VIDEO_DIP_ENABLE) == 0) return false; - if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port)) + if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port)) return false; return val & (VIDEO_DIP_ENABLE_AVI | @@ -263,14 +275,13 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder, VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); } -static void cpt_write_infoframe(struct drm_encoder *encoder, +static void cpt_write_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, const void *frame, ssize_t len) { const u32 *data = frame; - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); @@ -306,10 +317,10 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, POSTING_READ(reg); } -static bool cpt_infoframe_enabled(struct drm_encoder *encoder, +static bool cpt_infoframe_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; u32 val = I915_READ(TVIDEO_DIP_CTL(pipe)); @@ -321,14 +332,13 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder, VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); } -static void vlv_write_infoframe(struct drm_encoder *encoder, +static void vlv_write_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, const void *frame, ssize_t len) { const u32 *data = frame; - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); @@ -361,18 +371,17 @@ static void vlv_write_infoframe(struct drm_encoder *encoder, POSTING_READ(reg); } -static bool vlv_infoframe_enabled(struct drm_encoder *encoder, +static bool vlv_infoframe_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->dev); - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe)); if ((val & VIDEO_DIP_ENABLE) == 0) return false; - if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port)) + if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port)) return false; return val & (VIDEO_DIP_ENABLE_AVI | @@ -380,21 +389,21 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder, VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); } -static void hsw_write_infoframe(struct drm_encoder *encoder, +static void hsw_write_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, const void *frame, ssize_t len) { const u32 *data = frame; - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); - int data_size = type == DP_SDP_VSC ? - VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE; + int data_size; int i; u32 val = I915_READ(ctl_reg); + data_size = hsw_dip_data_size(type); + val &= ~hsw_infoframe_enable(type); I915_WRITE(ctl_reg, val); @@ -415,10 +424,10 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, POSTING_READ(ctl_reg); } -static bool hsw_infoframe_enabled(struct drm_encoder *encoder, +static bool hsw_infoframe_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder)); return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | @@ -443,11 +452,11 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder, * trick them by giving an offset into the buffer and moving back the header * bytes by one. */ -static void intel_write_infoframe(struct drm_encoder *encoder, +static void intel_write_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, union hdmi_infoframe *frame) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); u8 buffer[VIDEO_DIP_DATA_SIZE]; ssize_t len; @@ -457,24 +466,25 @@ static void intel_write_infoframe(struct drm_encoder *encoder, return; /* Insert the 'hole' (see big comment above) at position 3 */ - buffer[0] = buffer[1]; - buffer[1] = buffer[2]; - buffer[2] = buffer[3]; + memmove(&buffer[0], &buffer[1], 3); buffer[3] = 0; len++; - intel_dig_port->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len); + intel_dig_port->write_infoframe(encoder, + crtc_state, + frame->any.type, buffer, len); } -static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, +static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; struct drm_connector *connector = &intel_hdmi->attached_connector->base; - bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported; + bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported || + connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB420; union hdmi_infoframe frame; int ret; @@ -486,8 +496,10 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, return; } - if (crtc_state->ycbcr420) + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) frame.avi.colorspace = HDMI_COLORSPACE_YUV420; + else if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) + frame.avi.colorspace = HDMI_COLORSPACE_YUV444; else frame.avi.colorspace = HDMI_COLORSPACE_RGB; @@ -502,10 +514,11 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, conn_state); /* TODO: handle pixel repetition for YCBCR420 outputs */ - intel_write_infoframe(encoder, crtc_state, &frame); + intel_write_infoframe(encoder, crtc_state, + &frame); } -static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder, +static void intel_hdmi_set_spd_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { union hdmi_infoframe frame; @@ -519,11 +532,12 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder, frame.spd.sdi = HDMI_SPD_SDI_PC; - intel_write_infoframe(encoder, crtc_state, &frame); + intel_write_infoframe(encoder, crtc_state, + &frame); } static void -intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder, +intel_hdmi_set_hdmi_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { @@ -536,20 +550,21 @@ intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder, if (ret < 0) return; - intel_write_infoframe(encoder, crtc_state, &frame); + intel_write_infoframe(encoder, crtc_state, + &frame); } -static void g4x_set_infoframes(struct drm_encoder *encoder, +static void g4x_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->dev); - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; i915_reg_t reg = VIDEO_DIP_CTL; u32 val = I915_READ(reg); - u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port); + u32 port = VIDEO_DIP_PORT(encoder->port); assert_hdmi_port_disabled(intel_hdmi); @@ -657,11 +672,11 @@ static bool gcp_default_phase_possible(int pipe_bpp, mode->crtc_htotal/2 % pixels_per_group == 0); } -static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder, +static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); i915_reg_t reg; u32 val = 0; @@ -689,18 +704,18 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder, return val != 0; } -static void ibx_set_infoframes(struct drm_encoder *encoder, +static void ibx_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); - u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port); + u32 port = VIDEO_DIP_PORT(encoder->port); assert_hdmi_port_disabled(intel_hdmi); @@ -742,14 +757,14 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); } -static void cpt_set_infoframes(struct drm_encoder *encoder, +static void cpt_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); @@ -785,18 +800,17 @@ static void cpt_set_infoframes(struct drm_encoder *encoder, intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); } -static void vlv_set_infoframes(struct drm_encoder *encoder, +static void vlv_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->dev); - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); - u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port); + u32 port = VIDEO_DIP_PORT(encoder->port); assert_hdmi_port_disabled(intel_hdmi); @@ -838,12 +852,12 @@ static void vlv_set_infoframes(struct drm_encoder *encoder, intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); } -static void hsw_set_infoframes(struct drm_encoder *encoder, +static void hsw_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); u32 val = I915_READ(reg); @@ -965,13 +979,13 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an, DRM_HDCP_AN_LEN); if (ret) { - DRM_ERROR("Write An over DDC failed (%d)\n", ret); + DRM_DEBUG_KMS("Write An over DDC failed (%d)\n", ret); return ret; } ret = intel_gmbus_output_aksv(adapter); if (ret < 0) { - DRM_ERROR("Failed to output aksv (%d)\n", ret); + DRM_DEBUG_KMS("Failed to output aksv (%d)\n", ret); return ret; } return 0; @@ -984,7 +998,7 @@ static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv, DRM_HDCP_KSV_LEN); if (ret) - DRM_ERROR("Read Bksv over DDC failed (%d)\n", ret); + DRM_DEBUG_KMS("Read Bksv over DDC failed (%d)\n", ret); return ret; } @@ -996,7 +1010,7 @@ int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS, bstatus, DRM_HDCP_BSTATUS_LEN); if (ret) - DRM_ERROR("Read bstatus over DDC failed (%d)\n", ret); + DRM_DEBUG_KMS("Read bstatus over DDC failed (%d)\n", ret); return ret; } @@ -1009,7 +1023,7 @@ int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port, ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); if (ret) { - DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret); + DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret); return ret; } *repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT; @@ -1024,7 +1038,7 @@ int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME, ri_prime, DRM_HDCP_RI_LEN); if (ret) - DRM_ERROR("Read Ri' over DDC failed (%d)\n", ret); + DRM_DEBUG_KMS("Read Ri' over DDC failed (%d)\n", ret); return ret; } @@ -1037,7 +1051,7 @@ int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); if (ret) { - DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret); + DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret); return ret; } *ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY; @@ -1052,7 +1066,7 @@ int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO, ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN); if (ret) { - DRM_ERROR("Read ksv fifo over DDC failed (%d)\n", ret); + DRM_DEBUG_KMS("Read ksv fifo over DDC failed (%d)\n", ret); return ret; } return 0; @@ -1070,7 +1084,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i), part, DRM_HDCP_V_PRIME_PART_LEN); if (ret) - DRM_ERROR("Read V'[%d] over DDC failed (%d)\n", i, ret); + DRM_DEBUG_KMS("Read V'[%d] over DDC failed (%d)\n", i, ret); return ret; } @@ -1217,7 +1231,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, if (tmp & HDMI_MODE_SELECT_HDMI) pipe_config->has_hdmi_sink = true; - if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config)) + if (intel_dig_port->infoframe_enabled(encoder, pipe_config)) pipe_config->has_infoframe = true; if (tmp & SDVO_AUDIO_ENABLE) @@ -1438,7 +1452,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder, intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } - intel_dig_port->set_infoframes(&encoder->base, false, + intel_dig_port->set_infoframes(encoder, + false, old_crtc_state, old_conn_state); intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); @@ -1597,6 +1612,8 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, struct drm_atomic_state *state = crtc_state->base.state; struct drm_connector_state *connector_state; struct drm_connector *connector; + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; int i; if (HAS_GMCH_DISPLAY(dev_priv)) @@ -1624,7 +1641,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, if (connector_state->crtc != crtc_state->base.crtc) continue; - if (crtc_state->ycbcr420) { + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { const struct drm_hdmi_info *hdmi = &info->hdmi; if (bpc == 12 && !(hdmi->y420_dc_modes & @@ -1645,7 +1662,14 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, /* Display WA #1139: glk */ if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) && - crtc_state->base.adjusted_mode.htotal > 5460) + adjusted_mode->htotal > 5460) + return false; + + /* Display Wa_1405510057:icl */ + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && + bpc == 10 && IS_ICELAKE(dev_priv) && + (adjusted_mode->crtc_hblank_end - + adjusted_mode->crtc_hblank_start) % 8 == 2) return false; return true; @@ -1669,7 +1693,7 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector, *clock_12bpc /= 2; *clock_10bpc /= 2; *clock_8bpc /= 2; - config->ycbcr420 = true; + config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; /* YCBCR 420 output conversion needs a scaler */ if (skl_update_scaler_crtc(config)) { @@ -1703,6 +1727,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return false; + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink; if (pipe_config->has_hdmi_sink) @@ -1973,7 +1998,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder, intel_hdmi_prepare(encoder, pipe_config); - intel_dig_port->set_infoframes(&encoder->base, + intel_dig_port->set_infoframes(encoder, pipe_config->has_infoframe, pipe_config, conn_state); } @@ -1991,7 +2016,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder, vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a, 0x2b247878); - dport->set_infoframes(&encoder->base, + dport->set_infoframes(encoder, pipe_config->has_infoframe, pipe_config, conn_state); @@ -2062,7 +2087,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder, /* Use 800mV-0dB */ chv_set_phy_signal_level(encoder, 128, 102, false); - dport->set_infoframes(&encoder->base, + dport->set_infoframes(encoder, pipe_config->has_infoframe, pipe_config, conn_state); @@ -2074,13 +2099,26 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder, chv_phy_release_cl2_override(encoder); } +static int +intel_hdmi_connector_register(struct drm_connector *connector) +{ + int ret; + + ret = intel_connector_register(connector); + if (ret) + return ret; + + i915_debugfs_connector_add(connector); + + return ret; +} + static void intel_hdmi_destroy(struct drm_connector *connector) { if (intel_attached_hdmi(connector)->cec_notifier) cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier); - kfree(to_intel_connector(connector)->detect_edid); - drm_connector_cleanup(connector); - kfree(connector); + + intel_connector_destroy(connector); } static const struct drm_connector_funcs intel_hdmi_connector_funcs = { @@ -2089,7 +2127,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, - .late_register = intel_connector_register, + .late_register = intel_hdmi_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_hdmi_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, @@ -2109,11 +2147,16 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { static void intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) { + struct drm_i915_private *dev_priv = to_i915(connector->dev); + intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); intel_attach_aspect_ratio_property(connector); drm_connector_attach_content_type_property(connector); connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE; + + if (!HAS_GMCH_DISPLAY(dev_priv)) + drm_connector_attach_max_bpc_property(connector, 8, 12); } /* @@ -2324,9 +2367,18 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port) intel_dig_port->set_infoframes = g4x_set_infoframes; intel_dig_port->infoframe_enabled = g4x_infoframe_enabled; } else if (HAS_DDI(dev_priv)) { - intel_dig_port->write_infoframe = hsw_write_infoframe; - intel_dig_port->set_infoframes = hsw_set_infoframes; - intel_dig_port->infoframe_enabled = hsw_infoframe_enabled; + if (intel_dig_port->lspcon.active) { + intel_dig_port->write_infoframe = + lspcon_write_infoframe; + intel_dig_port->set_infoframes = lspcon_set_infoframes; + intel_dig_port->infoframe_enabled = + lspcon_infoframe_enabled; + } else { + intel_dig_port->set_infoframes = hsw_set_infoframes; + intel_dig_port->infoframe_enabled = + hsw_infoframe_enabled; + intel_dig_port->write_infoframe = hsw_write_infoframe; + } } else if (HAS_PCH_IBX(dev_priv)) { intel_dig_port->write_infoframe = ibx_write_infoframe; intel_dig_port->set_infoframes = ibx_set_infoframes; @@ -2485,5 +2537,6 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, intel_infoframe_init(intel_dig_port); + intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); intel_hdmi_init_connector(intel_dig_port, intel_connector); } diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 648a13c6043c..e24174d08fed 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -114,51 +114,68 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, #define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) /** - * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin + * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin * @dev_priv: private driver data pointer * @pin: the pin to gather stats on + * @long_hpd: whether the HPD IRQ was long or short * - * Gather stats about HPD irqs from the specified @pin, and detect irq + * Gather stats about HPD IRQs from the specified @pin, and detect IRQ * storms. Only the pin specific stats and state are changed, the caller is * responsible for further action. * - * The number of irqs that are allowed within @HPD_STORM_DETECT_PERIOD is + * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to - * @HPD_STORM_DEFAULT_THRESHOLD. If this threshold is exceeded, it's - * considered an irq storm and the irq state is set to @HPD_MARK_DISABLED. + * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and + * short IRQs count as +1. If this threshold is exceeded, it's considered an + * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED. + * + * By default, most systems will only count long IRQs towards + * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also + * suffer from short IRQ storms and must also track these. Because short IRQ + * storms are naturally caused by sideband interactions with DP MST devices, + * short IRQ detection is only enabled for systems without DP MST support. + * Systems which are new enough to support DP MST are far less likely to + * suffer from IRQ storms at all, so this is fine. * * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs, * and should only be adjusted for automated hotplug testing. * - * Return true if an irq storm was detected on @pin. + * Return true if an IRQ storm was detected on @pin. */ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, - enum hpd_pin pin) + enum hpd_pin pin, bool long_hpd) { - unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies; + struct i915_hotplug *hpd = &dev_priv->hotplug; + unsigned long start = hpd->stats[pin].last_jiffies; unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); - const int threshold = dev_priv->hotplug.hpd_storm_threshold; + const int increment = long_hpd ? 10 : 1; + const int threshold = hpd->hpd_storm_threshold; bool storm = false; + if (!threshold || + (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled)) + return false; + if (!time_in_range(jiffies, start, end)) { - dev_priv->hotplug.stats[pin].last_jiffies = jiffies; - dev_priv->hotplug.stats[pin].count = 0; - DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin); - } else if (dev_priv->hotplug.stats[pin].count > threshold && - threshold) { - dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED; + hpd->stats[pin].last_jiffies = jiffies; + hpd->stats[pin].count = 0; + } + + hpd->stats[pin].count += increment; + if (hpd->stats[pin].count > threshold) { + hpd->stats[pin].state = HPD_MARK_DISABLED; DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin); storm = true; } else { - dev_priv->hotplug.stats[pin].count++; DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin, - dev_priv->hotplug.stats[pin].count); + hpd->stats[pin].count); } return storm; } -static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) +static void +intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; struct intel_connector *intel_connector; @@ -228,7 +245,9 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) drm_for_each_connector_iter(connector, &conn_iter) { struct intel_connector *intel_connector = to_intel_connector(connector); - if (intel_connector->encoder->hpd_pin == pin) { + /* Don't check MST ports, they don't have pins */ + if (!intel_connector->mst_port && + intel_connector->encoder->hpd_pin == pin) { if (connector->polled != intel_connector->polled) DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", connector->name); @@ -346,8 +365,8 @@ static void i915_hotplug_work_func(struct work_struct *work) hpd_event_bits = dev_priv->hotplug.event_bits; dev_priv->hotplug.event_bits = 0; - /* Disable hotplug on connectors that hit an irq storm. */ - intel_hpd_irq_storm_disable(dev_priv); + /* Enable polling for connectors which had HPD IRQ storms */ + intel_hpd_irq_storm_switch_to_polling(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); @@ -395,37 +414,54 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, struct intel_encoder *encoder; bool storm_detected = false; bool queue_dig = false, queue_hp = false; + u32 long_hpd_pulse_mask = 0; + u32 short_hpd_pulse_mask = 0; + enum hpd_pin pin; if (!pin_mask) return; spin_lock(&dev_priv->irq_lock); + + /* + * Determine whether ->hpd_pulse() exists for each pin, and + * whether we have a short or a long pulse. This is needed + * as each pin may have up to two encoders (HDMI and DP) and + * only the one of them (DP) will have ->hpd_pulse(). + */ for_each_intel_encoder(&dev_priv->drm, encoder) { - enum hpd_pin pin = encoder->hpd_pin; bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder); + enum port port = encoder->port; + bool long_hpd; + pin = encoder->hpd_pin; if (!(BIT(pin) & pin_mask)) continue; - if (has_hpd_pulse) { - bool long_hpd = long_mask & BIT(pin); - enum port port = encoder->port; + if (!has_hpd_pulse) + continue; - DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), - long_hpd ? "long" : "short"); - /* - * For long HPD pulses we want to have the digital queue happen, - * but we still want HPD storm detection to function. - */ - queue_dig = true; - if (long_hpd) { - dev_priv->hotplug.long_port_mask |= (1 << port); - } else { - /* for short HPD just trigger the digital queue */ - dev_priv->hotplug.short_port_mask |= (1 << port); - continue; - } + long_hpd = long_mask & BIT(pin); + + DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), + long_hpd ? "long" : "short"); + queue_dig = true; + + if (long_hpd) { + long_hpd_pulse_mask |= BIT(pin); + dev_priv->hotplug.long_port_mask |= BIT(port); + } else { + short_hpd_pulse_mask |= BIT(pin); + dev_priv->hotplug.short_port_mask |= BIT(port); } + } + + /* Now process each pin just once */ + for_each_hpd_pin(pin) { + bool long_hpd; + + if (!(BIT(pin) & pin_mask)) + continue; if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) { /* @@ -442,17 +478,30 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED) continue; - if (!has_hpd_pulse) { + /* + * Delegate to ->hpd_pulse() if one of the encoders for this + * pin has it, otherwise let the hotplug_work deal with this + * pin directly. + */ + if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) { + long_hpd = long_hpd_pulse_mask & BIT(pin); + } else { dev_priv->hotplug.event_bits |= BIT(pin); + long_hpd = true; queue_hp = true; } - if (intel_hpd_irq_storm_detect(dev_priv, pin)) { + if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) { dev_priv->hotplug.event_bits &= ~BIT(pin); storm_detected = true; + queue_hp = true; } } + /* + * Disable any IRQs that storms were detected on. Polling enablement + * happens later in our hotplug work. + */ if (storm_detected && dev_priv->display_irqs_enabled) dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock(&dev_priv->irq_lock); diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index 37ef540dd280..bc27b691d824 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -108,13 +108,14 @@ fail: * This function reads status register to verify if HuC * firmware was successfully loaded. * - * Returns positive value if HuC firmware is loaded and verified - * and -ENODEV if HuC is not present. + * Returns: 1 if HuC firmware is loaded and verified, + * 0 if HuC firmware is not loaded and -ENODEV if HuC + * is not present on this platform. */ int intel_huc_check_status(struct intel_huc *huc) { struct drm_i915_private *dev_priv = huc_to_i915(huc); - u32 status; + bool status; if (!HAS_HUC(dev_priv)) return -ENODEV; diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 33d87ab93fdd..802d0394ccc4 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -817,7 +817,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv) unsigned int pin; int ret; - if (INTEL_INFO(dev_priv)->num_pipes == 0) + if (!HAS_DISPLAY(dev_priv)) return 0; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c index cdf19553ffac..5d5336fbe7b0 100644 --- a/drivers/gpu/drm/i915/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c @@ -297,8 +297,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) lpe_audio_platdev_destroy(dev_priv); irq_free_desc(dev_priv->lpe_audio.irq); -} + dev_priv->lpe_audio.irq = -1; + dev_priv->lpe_audio.platdev = NULL; +} /** * intel_lpe_audio_notify() - notify lpe audio event diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 43957bb37a42..4be167dcd209 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -259,63 +259,6 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx, ce->lrc_desc = desc; } -static struct i915_priolist * -lookup_priolist(struct intel_engine_cs *engine, int prio) -{ - struct intel_engine_execlists * const execlists = &engine->execlists; - struct i915_priolist *p; - struct rb_node **parent, *rb; - bool first = true; - - if (unlikely(execlists->no_priolist)) - prio = I915_PRIORITY_NORMAL; - -find_priolist: - /* most positive priority is scheduled first, equal priorities fifo */ - rb = NULL; - parent = &execlists->queue.rb_root.rb_node; - while (*parent) { - rb = *parent; - p = to_priolist(rb); - if (prio > p->priority) { - parent = &rb->rb_left; - } else if (prio < p->priority) { - parent = &rb->rb_right; - first = false; - } else { - return p; - } - } - - if (prio == I915_PRIORITY_NORMAL) { - p = &execlists->default_priolist; - } else { - p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC); - /* Convert an allocation failure to a priority bump */ - if (unlikely(!p)) { - prio = I915_PRIORITY_NORMAL; /* recurses just once */ - - /* To maintain ordering with all rendering, after an - * allocation failure we have to disable all scheduling. - * Requests will then be executed in fifo, and schedule - * will ensure that dependencies are emitted in fifo. - * There will be still some reordering with existing - * requests, so if userspace lied about their - * dependencies that reordering may be visible. - */ - execlists->no_priolist = true; - goto find_priolist; - } - } - - p->priority = prio; - INIT_LIST_HEAD(&p->requests); - rb_link_node(&p->node, rb, parent); - rb_insert_color_cached(&p->node, &execlists->queue, first); - - return p; -} - static void unwind_wa_tail(struct i915_request *rq) { rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES); @@ -324,9 +267,9 @@ static void unwind_wa_tail(struct i915_request *rq) static void __unwind_incomplete_requests(struct intel_engine_cs *engine) { - struct i915_request *rq, *rn; - struct i915_priolist *uninitialized_var(p); - int last_prio = I915_PRIORITY_INVALID; + struct i915_request *rq, *rn, *active = NULL; + struct list_head *uninitialized_var(pl); + int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT; lockdep_assert_held(&engine->timeline.lock); @@ -334,19 +277,34 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine) &engine->timeline.requests, link) { if (i915_request_completed(rq)) - return; + break; __i915_request_unsubmit(rq); unwind_wa_tail(rq); + GEM_BUG_ON(rq->hw_context->active); + GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); - if (rq_prio(rq) != last_prio) { - last_prio = rq_prio(rq); - p = lookup_priolist(engine, last_prio); + if (rq_prio(rq) != prio) { + prio = rq_prio(rq); + pl = i915_sched_lookup_priolist(engine, prio); } + GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); + + list_add(&rq->sched.link, pl); - GEM_BUG_ON(p->priority != rq_prio(rq)); - list_add(&rq->sched.link, &p->requests); + active = rq; + } + + /* + * The active request is now effectively the start of a new client + * stream, so give it the equivalent small priority bump to prevent + * it being gazumped a second time by another peer. + */ + if (!(prio & I915_PRIORITY_NEWCLIENT)) { + prio |= I915_PRIORITY_NEWCLIENT; + list_move_tail(&active->sched.link, + i915_sched_lookup_priolist(engine, prio)); } } @@ -355,13 +313,8 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists) { struct intel_engine_cs *engine = container_of(execlists, typeof(*engine), execlists); - unsigned long flags; - - spin_lock_irqsave(&engine->timeline.lock, flags); __unwind_incomplete_requests(engine); - - spin_unlock_irqrestore(&engine->timeline.lock, flags); } static inline void @@ -394,13 +347,17 @@ execlists_user_end(struct intel_engine_execlists *execlists) static inline void execlists_context_schedule_in(struct i915_request *rq) { + GEM_BUG_ON(rq->hw_context->active); + execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); intel_engine_context_in(rq->engine); + rq->hw_context->active = rq->engine; } static inline void execlists_context_schedule_out(struct i915_request *rq, unsigned long status) { + rq->hw_context->active = NULL; intel_engine_context_out(rq->engine); execlists_context_status_change(rq, status); trace_i915_request_out(rq); @@ -417,21 +374,37 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state) static u64 execlists_update_context(struct i915_request *rq) { + struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt; struct intel_context *ce = rq->hw_context; - struct i915_hw_ppgtt *ppgtt = - rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt; u32 *reg_state = ce->lrc_reg_state; reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); - /* True 32b PPGTT with dynamic page allocation: update PDP + /* + * True 32b PPGTT with dynamic page allocation: update PDP * registers and point the unallocated PDPs to scratch page. * PML4 is allocated during ppgtt init, so this is not needed * in 48-bit mode. */ - if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm)) + if (!i915_vm_is_48bit(&ppgtt->vm)) execlists_update_context_pdps(ppgtt, reg_state); + /* + * Make sure the context image is complete before we submit it to HW. + * + * Ostensibly, writes (including the WCB) should be flushed prior to + * an uncached write such as our mmio register access, the empirical + * evidence (esp. on Braswell) suggests that the WC write into memory + * may not be visible to the HW prior to the completion of the UC + * register write and that we may begin execution from the context + * before its image is complete leading to invalid PD chasing. + * + * Furthermore, Braswell, at least, wants a full mb to be sure that + * the writes are coherent in memory (visible to the GPU) prior to + * execution, and not just visible to other CPUs (as is the result of + * wmb). + */ + mb(); return ce->lrc_desc; } @@ -669,8 +642,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine) while ((rb = rb_first_cached(&execlists->queue))) { struct i915_priolist *p = to_priolist(rb); struct i915_request *rq, *rn; + int i; - list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { + priolist_for_each_request_consume(rq, rn, p, i) { /* * Can we combine this request with the current port? * It has to be the same context/ringbuffer and not @@ -689,11 +663,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * combine this request with the last, then we * are done. */ - if (port == last_port) { - __list_del_many(&p->requests, - &rq->sched.link); + if (port == last_port) goto done; - } /* * If GVT overrides us we only ever submit @@ -703,11 +674,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * request) to the second port. */ if (ctx_single_port_submission(last->hw_context) || - ctx_single_port_submission(rq->hw_context)) { - __list_del_many(&p->requests, - &rq->sched.link); + ctx_single_port_submission(rq->hw_context)) goto done; - } GEM_BUG_ON(last->hw_context == rq->hw_context); @@ -718,15 +686,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine) GEM_BUG_ON(port_isset(port)); } - INIT_LIST_HEAD(&rq->sched.link); + list_del_init(&rq->sched.link); + __i915_request_submit(rq); trace_i915_request_in(rq, port_index(port, execlists)); + last = rq; submit = true; } rb_erase_cached(&p->node, &execlists->queue); - INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) kmem_cache_free(engine->i915->priorities, p); } @@ -803,6 +772,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) static void reset_csb_pointers(struct intel_engine_execlists *execlists) { + const unsigned int reset_value = GEN8_CSB_ENTRIES - 1; + /* * After a reset, the HW starts writing into CSB entry [0]. We * therefore have to set our HEAD pointer back one entry so that @@ -812,8 +783,8 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists) * inline comparison of our cached head position against the last HW * write works even before the first interrupt. */ - execlists->csb_head = execlists->csb_write_reset; - WRITE_ONCE(*execlists->csb_write, execlists->csb_write_reset); + execlists->csb_head = reset_value; + WRITE_ONCE(*execlists->csb_write, reset_value); } static void nop_submission_tasklet(unsigned long data) @@ -854,27 +825,34 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) /* Mark all executing requests as skipped. */ list_for_each_entry(rq, &engine->timeline.requests, link) { GEM_BUG_ON(!rq->global_seqno); - if (!i915_request_completed(rq)) - dma_fence_set_error(&rq->fence, -EIO); + + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) + continue; + + dma_fence_set_error(&rq->fence, -EIO); } /* Flush the queued requests to the timeline list (for retiring). */ while ((rb = rb_first_cached(&execlists->queue))) { struct i915_priolist *p = to_priolist(rb); + int i; - list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { - INIT_LIST_HEAD(&rq->sched.link); + priolist_for_each_request_consume(rq, rn, p, i) { + list_del_init(&rq->sched.link); dma_fence_set_error(&rq->fence, -EIO); __i915_request_submit(rq); } rb_erase_cached(&p->node, &execlists->queue); - INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) kmem_cache_free(engine->i915->priorities, p); } + intel_write_status_page(engine, + I915_GEM_HWS_INDEX, + intel_engine_last_submit(engine)); + /* Remaining _unready_ requests will be nop'ed when submitted */ execlists->queue_priority = INT_MIN; @@ -1076,13 +1054,7 @@ static void queue_request(struct intel_engine_cs *engine, struct i915_sched_node *node, int prio) { - list_add_tail(&node->link, - &lookup_priolist(engine, prio)->requests); -} - -static void __update_queue(struct intel_engine_cs *engine, int prio) -{ - engine->execlists.queue_priority = prio; + list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio)); } static void __submit_queue_imm(struct intel_engine_cs *engine) @@ -1101,7 +1073,7 @@ static void __submit_queue_imm(struct intel_engine_cs *engine) static void submit_queue(struct intel_engine_cs *engine, int prio) { if (prio > engine->execlists.queue_priority) { - __update_queue(engine, prio); + engine->execlists.queue_priority = prio; __submit_queue_imm(engine); } } @@ -1124,139 +1096,6 @@ static void execlists_submit_request(struct i915_request *request) spin_unlock_irqrestore(&engine->timeline.lock, flags); } -static struct i915_request *sched_to_request(struct i915_sched_node *node) -{ - return container_of(node, struct i915_request, sched); -} - -static struct intel_engine_cs * -sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked) -{ - struct intel_engine_cs *engine = sched_to_request(node)->engine; - - GEM_BUG_ON(!locked); - - if (engine != locked) { - spin_unlock(&locked->timeline.lock); - spin_lock(&engine->timeline.lock); - } - - return engine; -} - -static void execlists_schedule(struct i915_request *request, - const struct i915_sched_attr *attr) -{ - struct i915_priolist *uninitialized_var(pl); - struct intel_engine_cs *engine, *last; - struct i915_dependency *dep, *p; - struct i915_dependency stack; - const int prio = attr->priority; - LIST_HEAD(dfs); - - GEM_BUG_ON(prio == I915_PRIORITY_INVALID); - - if (i915_request_completed(request)) - return; - - if (prio <= READ_ONCE(request->sched.attr.priority)) - return; - - /* Need BKL in order to use the temporary link inside i915_dependency */ - lockdep_assert_held(&request->i915->drm.struct_mutex); - - stack.signaler = &request->sched; - list_add(&stack.dfs_link, &dfs); - - /* - * Recursively bump all dependent priorities to match the new request. - * - * A naive approach would be to use recursion: - * static void update_priorities(struct i915_sched_node *node, prio) { - * list_for_each_entry(dep, &node->signalers_list, signal_link) - * update_priorities(dep->signal, prio) - * queue_request(node); - * } - * but that may have unlimited recursion depth and so runs a very - * real risk of overunning the kernel stack. Instead, we build - * a flat list of all dependencies starting with the current request. - * As we walk the list of dependencies, we add all of its dependencies - * to the end of the list (this may include an already visited - * request) and continue to walk onwards onto the new dependencies. The - * end result is a topological list of requests in reverse order, the - * last element in the list is the request we must execute first. - */ - list_for_each_entry(dep, &dfs, dfs_link) { - struct i915_sched_node *node = dep->signaler; - - /* - * Within an engine, there can be no cycle, but we may - * refer to the same dependency chain multiple times - * (redundant dependencies are not eliminated) and across - * engines. - */ - list_for_each_entry(p, &node->signalers_list, signal_link) { - GEM_BUG_ON(p == dep); /* no cycles! */ - - if (i915_sched_node_signaled(p->signaler)) - continue; - - GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority); - if (prio > READ_ONCE(p->signaler->attr.priority)) - list_move_tail(&p->dfs_link, &dfs); - } - } - - /* - * If we didn't need to bump any existing priorities, and we haven't - * yet submitted this request (i.e. there is no potential race with - * execlists_submit_request()), we can set our own priority and skip - * acquiring the engine locks. - */ - if (request->sched.attr.priority == I915_PRIORITY_INVALID) { - GEM_BUG_ON(!list_empty(&request->sched.link)); - request->sched.attr = *attr; - if (stack.dfs_link.next == stack.dfs_link.prev) - return; - __list_del_entry(&stack.dfs_link); - } - - last = NULL; - engine = request->engine; - spin_lock_irq(&engine->timeline.lock); - - /* Fifo and depth-first replacement ensure our deps execute before us */ - list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { - struct i915_sched_node *node = dep->signaler; - - INIT_LIST_HEAD(&dep->dfs_link); - - engine = sched_lock_engine(node, engine); - - if (prio <= node->attr.priority) - continue; - - node->attr.priority = prio; - if (!list_empty(&node->link)) { - if (last != engine) { - pl = lookup_priolist(engine, prio); - last = engine; - } - GEM_BUG_ON(pl->priority != prio); - list_move_tail(&node->link, &pl->requests); - } - - if (prio > engine->execlists.queue_priority && - i915_sw_fence_done(&sched_to_request(node)->submit)) { - /* defer submission until after all of our updates */ - __update_queue(engine, prio); - tasklet_hi_schedule(&engine->execlists.tasklet); - } - } - - spin_unlock_irq(&engine->timeline.lock); -} - static void execlists_context_destroy(struct intel_context *ce) { GEM_BUG_ON(ce->pin_count); @@ -1272,6 +1111,28 @@ static void execlists_context_destroy(struct intel_context *ce) static void execlists_context_unpin(struct intel_context *ce) { + struct intel_engine_cs *engine; + + /* + * The tasklet may still be using a pointer to our state, via an + * old request. However, since we know we only unpin the context + * on retirement of the following request, we know that the last + * request referencing us will have had a completion CS interrupt. + * If we see that it is still active, it means that the tasklet hasn't + * had the chance to run yet; let it run before we teardown the + * reference it may use. + */ + engine = READ_ONCE(ce->active); + if (unlikely(engine)) { + unsigned long flags; + + spin_lock_irqsave(&engine->timeline.lock, flags); + process_csb(engine); + spin_unlock_irqrestore(&engine->timeline.lock, flags); + + GEM_BUG_ON(READ_ONCE(ce->active)); + } + i915_gem_context_unpin_hw_id(ce->gem_context); intel_ring_unpin(ce->ring); @@ -1375,6 +1236,7 @@ execlists_context_pin(struct intel_engine_cs *engine, struct intel_context *ce = to_intel_context(ctx, engine); lockdep_assert_held(&ctx->i915->drm.struct_mutex); + GEM_BUG_ON(!ctx->ppgtt); if (likely(ce->pin_count++)) return ce; @@ -1431,9 +1293,10 @@ static int execlists_request_alloc(struct i915_request *request) static u32 * gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) { + /* NB no one else is allowed to scribble over scratch + 256! */ *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); - *batch++ = i915_ggtt_offset(engine->scratch) + 256; + *batch++ = i915_scratch_offset(engine->i915) + 256; *batch++ = 0; *batch++ = MI_LOAD_REGISTER_IMM(1); @@ -1447,7 +1310,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); - *batch++ = i915_ggtt_offset(engine->scratch) + 256; + *batch++ = i915_scratch_offset(engine->i915) + 256; *batch++ = 0; return batch; @@ -1484,7 +1347,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL | PIPE_CONTROL_QW_WRITE, - i915_ggtt_offset(engine->scratch) + + i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES); *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; @@ -1553,18 +1416,6 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) batch = emit_lri(batch, lri, ARRAY_SIZE(lri)); - /* WaClearSlmSpaceAtContextSwitch:kbl */ - /* Actual scratch location is at 128 bytes offset */ - if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) { - batch = gen8_emit_pipe_control(batch, - PIPE_CONTROL_FLUSH_L3 | - PIPE_CONTROL_GLOBAL_GTT_IVB | - PIPE_CONTROL_CS_STALL | - PIPE_CONTROL_QW_WRITE, - i915_ggtt_offset(engine->scratch) - + 2 * CACHELINE_BYTES); - } - /* WaMediaPoolStateCmdInWABB:bxt,glk */ if (HAS_POOLED_EU(engine->i915)) { /* @@ -1679,7 +1530,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) unsigned int i; int ret; - if (GEM_WARN_ON(engine->id != RCS)) + if (GEM_DEBUG_WARN_ON(engine->id != RCS)) return -EINVAL; switch (INTEL_GEN(engine->i915)) { @@ -1718,8 +1569,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) */ for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) { wa_bb[i]->offset = batch_ptr - batch; - if (GEM_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, - CACHELINE_BYTES))) { + if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, + CACHELINE_BYTES))) { ret = -EINVAL; break; } @@ -1781,6 +1632,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine) static int gen8_init_common_ring(struct intel_engine_cs *engine) { + intel_engine_apply_workarounds(engine); + intel_mocs_init_engine(engine); intel_engine_reset_breadcrumbs(engine); @@ -1805,7 +1658,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *engine) if (ret) return ret; - intel_whitelist_workarounds_apply(engine); + intel_engine_apply_whitelist(engine); /* We need to disable the AsyncFlip performance optimisations in order * to use MI_WAIT_FOR_EVENT within the CS. It should already be @@ -1828,7 +1681,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine) if (ret) return ret; - intel_whitelist_workarounds_apply(engine); + intel_engine_apply_whitelist(engine); return 0; } @@ -1902,7 +1755,7 @@ static void execlists_reset(struct intel_engine_cs *engine, unsigned long flags; u32 *regs; - GEM_TRACE("%s request global=%x, current=%d\n", + GEM_TRACE("%s request global=%d, current=%d\n", engine->name, request ? request->global_seqno : 0, intel_engine_get_seqno(engine)); @@ -2029,8 +1882,7 @@ static int gen8_emit_bb_start(struct i915_request *rq, * it is unsafe in case of lite-restore (because the ctx is * not idle). PML4 is allocated during ppgtt init so this is * not needed in 48-bit.*/ - if (rq->gem_context->ppgtt && - (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) && + if ((intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) && !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) && !intel_vgpu_active(rq->i915)) { ret = intel_logical_ring_emit_pdps(rq); @@ -2109,7 +1961,7 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode) if (mode & EMIT_INVALIDATE) { cmd |= MI_INVALIDATE_TLB; - if (request->engine->id == VCS) + if (request->engine->class == VIDEO_DECODE_CLASS) cmd |= MI_INVALIDATE_BSD; } @@ -2127,7 +1979,7 @@ static int gen8_emit_flush_render(struct i915_request *request, { struct intel_engine_cs *engine = request->engine; u32 scratch_addr = - i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; + i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES; bool vf_flush_wa = false, dc_flush_wa = false; u32 *cs, flags = 0; int len; @@ -2241,7 +2093,7 @@ static int gen8_init_rcs_context(struct i915_request *rq) { int ret; - ret = intel_ctx_workarounds_emit(rq); + ret = intel_engine_emit_ctx_wa(rq); if (ret) return ret; @@ -2294,7 +2146,7 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine) { engine->submit_request = execlists_submit_request; engine->cancel_requests = execlists_cancel_requests; - engine->schedule = execlists_schedule; + engine->schedule = i915_schedule; engine->execlists.tasklet.func = execlists_submission_tasklet; engine->reset.prepare = execlists_reset_prepare; @@ -2382,12 +2234,6 @@ logical_ring_setup(struct intel_engine_cs *engine) logical_ring_default_irqs(engine); } -static bool csb_force_mmio(struct drm_i915_private *i915) -{ - /* Older GVT emulation depends upon intercepting CSB mmio */ - return intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915); -} - static int logical_ring_init(struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; @@ -2417,24 +2263,12 @@ static int logical_ring_init(struct intel_engine_cs *engine) upper_32_bits(ce->lrc_desc); } - execlists->csb_read = - i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)); - if (csb_force_mmio(i915)) { - execlists->csb_status = (u32 __force *) - (i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0))); + execlists->csb_status = + &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; - execlists->csb_write = (u32 __force *)execlists->csb_read; - execlists->csb_write_reset = - _MASKED_FIELD(GEN8_CSB_WRITE_PTR_MASK, - GEN8_CSB_ENTRIES - 1); - } else { - execlists->csb_status = - &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; + execlists->csb_write = + &engine->status_page.page_addr[intel_hws_csb_write_index(i915)]; - execlists->csb_write = - &engine->status_page.page_addr[intel_hws_csb_write_index(i915)]; - execlists->csb_write_reset = GEN8_CSB_ENTRIES - 1; - } reset_csb_pointers(execlists); return 0; @@ -2464,10 +2298,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine) if (ret) return ret; - ret = intel_engine_create_scratch(engine, PAGE_SIZE); - if (ret) - goto err_cleanup_common; - ret = intel_init_workaround_bb(engine); if (ret) { /* @@ -2479,11 +2309,10 @@ int logical_render_ring_init(struct intel_engine_cs *engine) ret); } - return 0; + intel_engine_init_whitelist(engine); + intel_engine_init_workarounds(engine); -err_cleanup_common: - intel_engine_cleanup_common(engine); - return ret; + return 0; } int logical_xcs_ring_init(struct intel_engine_cs *engine) @@ -2632,7 +2461,6 @@ static void execlists_init_reg_state(u32 *regs, struct intel_ring *ring) { struct drm_i915_private *dev_priv = engine->i915; - struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt; u32 base = engine->mmio_base; bool rcs = engine->class == RENDER_CLASS; @@ -2704,12 +2532,12 @@ static void execlists_init_reg_state(u32 *regs, CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0); CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0); - if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) { + if (i915_vm_is_48bit(&ctx->ppgtt->vm)) { /* 64b PPGTT (48bit canonical) * PDP0_DESCRIPTOR contains the base address to PML4 and * other PDP Descriptors are ignored. */ - ASSIGN_CTX_PML4(ppgtt, regs); + ASSIGN_CTX_PML4(ctx->ppgtt, regs); } if (rcs) { diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 3e085c5f2b81..96a8d9524b0c 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c @@ -27,6 +27,22 @@ #include <drm/drm_dp_dual_mode_helper.h> #include "intel_drv.h" +/* LSPCON OUI Vendor ID(signatures) */ +#define LSPCON_VENDOR_PARADE_OUI 0x001CF8 +#define LSPCON_VENDOR_MCA_OUI 0x0060AD + +/* AUX addresses to write MCA AVI IF */ +#define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0 +#define LSPCON_MCA_AVI_IF_CTRL 0x5DF +#define LSPCON_MCA_AVI_IF_KICKOFF (1 << 0) +#define LSPCON_MCA_AVI_IF_HANDLED (1 << 1) + +/* AUX addresses to write Parade AVI IF */ +#define LSPCON_PARADE_AVI_IF_WRITE_OFFSET 0x516 +#define LSPCON_PARADE_AVI_IF_CTRL 0x51E +#define LSPCON_PARADE_AVI_IF_KICKOFF (1 << 7) +#define LSPCON_PARADE_AVI_IF_DATA_SIZE 32 + static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon) { struct intel_digital_port *dig_port = @@ -50,6 +66,40 @@ static const char *lspcon_mode_name(enum drm_lspcon_mode mode) } } +static bool lspcon_detect_vendor(struct intel_lspcon *lspcon) +{ + struct intel_dp *dp = lspcon_to_intel_dp(lspcon); + struct drm_dp_dpcd_ident *ident; + u32 vendor_oui; + + if (drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd))) { + DRM_ERROR("Can't read description\n"); + return false; + } + + ident = &dp->desc.ident; + vendor_oui = (ident->oui[0] << 16) | (ident->oui[1] << 8) | + ident->oui[2]; + + switch (vendor_oui) { + case LSPCON_VENDOR_MCA_OUI: + lspcon->vendor = LSPCON_VENDOR_MCA; + DRM_DEBUG_KMS("Vendor: Mega Chips\n"); + break; + + case LSPCON_VENDOR_PARADE_OUI: + lspcon->vendor = LSPCON_VENDOR_PARADE; + DRM_DEBUG_KMS("Vendor: Parade Tech\n"); + break; + + default: + DRM_ERROR("Invalid/Unknown vendor OUI\n"); + return false; + } + + return true; +} + static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon) { enum drm_lspcon_mode current_mode; @@ -130,6 +180,21 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon) return true; } +void lspcon_ycbcr420_config(struct drm_connector *connector, + struct intel_crtc_state *crtc_state) +{ + const struct drm_display_info *info = &connector->display_info; + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + + if (drm_mode_is_420_only(info, adjusted_mode) && + connector->ycbcr_420_allowed) { + crtc_state->port_clock /= 2; + crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; + crtc_state->lspcon_downsampling = true; + } +} + static bool lspcon_probe(struct intel_lspcon *lspcon) { int retry; @@ -159,7 +224,18 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) /* Yay ... got a LSPCON device */ DRM_DEBUG_KMS("LSPCON detected\n"); lspcon->mode = lspcon_wait_mode(lspcon, expected_mode); - lspcon->active = true; + + /* + * In the SW state machine, lets Put LSPCON in PCON mode only. + * In this way, it will work with both HDMI 1.4 sinks as well as HDMI + * 2.0 sinks. + */ + if (lspcon->mode != DRM_LSPCON_MODE_PCON) { + if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) { + DRM_ERROR("LSPCON mode change to PCON failed\n"); + return false; + } + } return true; } @@ -185,6 +261,255 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n"); } +static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux) +{ + u8 avi_if_ctrl; + u8 retry; + ssize_t ret; + + /* Check if LSPCON FW is ready for data */ + for (retry = 0; retry < 5; retry++) { + if (retry) + usleep_range(200, 300); + + ret = drm_dp_dpcd_read(aux, LSPCON_PARADE_AVI_IF_CTRL, + &avi_if_ctrl, 1); + if (ret < 0) { + DRM_ERROR("Failed to read AVI IF control\n"); + return false; + } + + if ((avi_if_ctrl & LSPCON_PARADE_AVI_IF_KICKOFF) == 0) + return true; + } + + DRM_ERROR("Parade FW not ready to accept AVI IF\n"); + return false; +} + +static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux, + uint8_t *avi_buf) +{ + u8 avi_if_ctrl; + u8 block_count = 0; + u8 *data; + uint16_t reg; + ssize_t ret; + + while (block_count < 4) { + if (!lspcon_parade_fw_ready(aux)) { + DRM_DEBUG_KMS("LSPCON FW not ready, block %d\n", + block_count); + return false; + } + + reg = LSPCON_PARADE_AVI_IF_WRITE_OFFSET; + data = avi_buf + block_count * 8; + ret = drm_dp_dpcd_write(aux, reg, data, 8); + if (ret < 0) { + DRM_ERROR("Failed to write AVI IF block %d\n", + block_count); + return false; + } + + /* + * Once a block of data is written, we have to inform the FW + * about this by writing into avi infoframe control register: + * - set the kickoff bit[7] to 1 + * - write the block no. to bits[1:0] + */ + reg = LSPCON_PARADE_AVI_IF_CTRL; + avi_if_ctrl = LSPCON_PARADE_AVI_IF_KICKOFF | block_count; + ret = drm_dp_dpcd_write(aux, reg, &avi_if_ctrl, 1); + if (ret < 0) { + DRM_ERROR("Failed to update (0x%x), block %d\n", + reg, block_count); + return false; + } + + block_count++; + } + + DRM_DEBUG_KMS("Wrote AVI IF blocks successfully\n"); + return true; +} + +static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux, + const uint8_t *frame, + ssize_t len) +{ + uint8_t avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, }; + + /* + * Parade's frames contains 32 bytes of data, divided + * into 4 frames: + * Token byte (first byte of first frame, must be non-zero) + * HB0 to HB2 from AVI IF (3 bytes header) + * PB0 to PB27 from AVI IF (28 bytes data) + * So it should look like this + * first block: | <token> <HB0-HB2> <DB0-DB3> | + * next 3 blocks: |<DB4-DB11>|<DB12-DB19>|<DB20-DB28>| + */ + + if (len > LSPCON_PARADE_AVI_IF_DATA_SIZE - 1) { + DRM_ERROR("Invalid length of infoframes\n"); + return false; + } + + memcpy(&avi_if[1], frame, len); + + if (!_lspcon_parade_write_infoframe_blocks(aux, avi_if)) { + DRM_DEBUG_KMS("Failed to write infoframe blocks\n"); + return false; + } + + return true; +} + +static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux, + const uint8_t *buffer, ssize_t len) +{ + int ret; + uint32_t val = 0; + uint32_t retry; + uint16_t reg; + const uint8_t *data = buffer; + + reg = LSPCON_MCA_AVI_IF_WRITE_OFFSET; + while (val < len) { + /* DPCD write for AVI IF can fail on a slow FW day, so retry */ + for (retry = 0; retry < 5; retry++) { + ret = drm_dp_dpcd_write(aux, reg, (void *)data, 1); + if (ret == 1) { + break; + } else if (retry < 4) { + mdelay(50); + continue; + } else { + DRM_ERROR("DPCD write failed at:0x%x\n", reg); + return false; + } + } + val++; reg++; data++; + } + + val = 0; + reg = LSPCON_MCA_AVI_IF_CTRL; + ret = drm_dp_dpcd_read(aux, reg, &val, 1); + if (ret < 0) { + DRM_ERROR("DPCD read failed, address 0x%x\n", reg); + return false; + } + + /* Indicate LSPCON chip about infoframe, clear bit 1 and set bit 0 */ + val &= ~LSPCON_MCA_AVI_IF_HANDLED; + val |= LSPCON_MCA_AVI_IF_KICKOFF; + + ret = drm_dp_dpcd_write(aux, reg, &val, 1); + if (ret < 0) { + DRM_ERROR("DPCD read failed, address 0x%x\n", reg); + return false; + } + + val = 0; + ret = drm_dp_dpcd_read(aux, reg, &val, 1); + if (ret < 0) { + DRM_ERROR("DPCD read failed, address 0x%x\n", reg); + return false; + } + + if (val == LSPCON_MCA_AVI_IF_HANDLED) + DRM_DEBUG_KMS("AVI IF handled by FW\n"); + + return true; +} + +void lspcon_write_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + const void *frame, ssize_t len) +{ + bool ret; + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base); + + /* LSPCON only needs AVI IF */ + if (type != HDMI_INFOFRAME_TYPE_AVI) + return; + + if (lspcon->vendor == LSPCON_VENDOR_MCA) + ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux, + frame, len); + else + ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux, + frame, len); + + if (!ret) { + DRM_ERROR("Failed to write AVI infoframes\n"); + return; + } + + DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n"); +} + +void lspcon_set_infoframes(struct intel_encoder *encoder, + bool enable, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + ssize_t ret; + union hdmi_infoframe frame; + uint8_t buf[VIDEO_DIP_DATA_SIZE]; + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_lspcon *lspcon = &dig_port->lspcon; + struct intel_dp *intel_dp = &dig_port->dp; + struct drm_connector *connector = &intel_dp->attached_connector->base; + const struct drm_display_mode *mode = &crtc_state->base.adjusted_mode; + bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported; + + if (!lspcon->active) { + DRM_ERROR("Writing infoframes while LSPCON disabled ?\n"); + return; + } + + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, + mode, is_hdmi2_sink); + if (ret < 0) { + DRM_ERROR("couldn't fill AVI infoframe\n"); + return; + } + + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) { + if (crtc_state->lspcon_downsampling) + frame.avi.colorspace = HDMI_COLORSPACE_YUV420; + else + frame.avi.colorspace = HDMI_COLORSPACE_YUV444; + } else { + frame.avi.colorspace = HDMI_COLORSPACE_RGB; + } + + drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode, + crtc_state->limited_color_range ? + HDMI_QUANTIZATION_RANGE_LIMITED : + HDMI_QUANTIZATION_RANGE_FULL, + false, is_hdmi2_sink); + + ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf)); + if (ret < 0) { + DRM_ERROR("Failed to pack AVI IF\n"); + return; + } + + dig_port->write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI, + buf, ret); +} + +bool lspcon_infoframe_enabled(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config) +{ + return enc_to_intel_lspcon(&encoder->base)->active; +} + void lspcon_resume(struct intel_lspcon *lspcon) { enum drm_lspcon_mode expected_mode; @@ -216,6 +541,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port) struct intel_lspcon *lspcon = &intel_dig_port->lspcon; struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_connector *connector = &dp->attached_connector->base; if (!HAS_LSPCON(dev_priv)) { DRM_ERROR("LSPCON is not supported on this platform\n"); @@ -230,25 +556,18 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port) return false; } - /* - * In the SW state machine, lets Put LSPCON in PCON mode only. - * In this way, it will work with both HDMI 1.4 sinks as well as HDMI - * 2.0 sinks. - */ - if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) { - if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) { - DRM_ERROR("LSPCON mode change to PCON failed\n"); - return false; - } - } - if (!intel_dp_read_dpcd(dp)) { DRM_ERROR("LSPCON DPCD read failed\n"); return false; } - drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd)); + if (!lspcon_detect_vendor(lspcon)) { + DRM_ERROR("LSPCON vendor detection failed\n"); + return false; + } + connector->ycbcr_420_allowed = true; + lspcon->active = true; DRM_DEBUG_KMS("Success: LSPCON init\n"); return true; } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index f9f3b0885ba5..e6c5d985ea0a 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -42,10 +42,6 @@ #include <linux/acpi.h> /* Private structure for the integrated LVDS support */ -struct intel_lvds_connector { - struct intel_connector base; -}; - struct intel_lvds_pps { /* 100us units */ int t1_t2; @@ -70,7 +66,7 @@ struct intel_lvds_encoder { struct intel_lvds_pps init_pps; u32 init_lvds_val; - struct intel_lvds_connector *attached_connector; + struct intel_connector *attached_connector; }; static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder) @@ -78,11 +74,6 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder) return container_of(encoder, struct intel_lvds_encoder, base.base); } -static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector) -{ - return container_of(connector, struct intel_lvds_connector, base.base); -} - bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, i915_reg_t lvds_reg, enum pipe *pipe) { @@ -396,7 +387,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&intel_encoder->base); struct intel_connector *intel_connector = - &lvds_encoder->attached_connector->base; + lvds_encoder->attached_connector; struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); unsigned int lvds_bpp; @@ -418,6 +409,8 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, pipe_config->pipe_bpp = lvds_bpp; } + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; + /* * We have timings from the BIOS for the panel, put them in * to the adjusted mode. The CRTC will be set up for this mode, @@ -461,15 +454,15 @@ intel_lvds_detect(struct drm_connector *connector, bool force) */ static int intel_lvds_get_modes(struct drm_connector *connector) { - struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector); + struct intel_connector *intel_connector = to_intel_connector(connector); struct drm_device *dev = connector->dev; struct drm_display_mode *mode; /* use cached edid if we have one */ - if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) - return drm_add_edid_modes(connector, lvds_connector->base.edid); + if (!IS_ERR_OR_NULL(intel_connector->edid)) + return drm_add_edid_modes(connector, intel_connector->edid); - mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode); + mode = drm_mode_duplicate(dev, intel_connector->panel.fixed_mode); if (mode == NULL) return 0; @@ -477,27 +470,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector) return 1; } -/** - * intel_lvds_destroy - unregister and free LVDS structures - * @connector: connector to free - * - * Unregister the DDC bus for this connector then free the driver private - * structure. - */ -static void intel_lvds_destroy(struct drm_connector *connector) -{ - struct intel_lvds_connector *lvds_connector = - to_lvds_connector(connector); - - if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) - kfree(lvds_connector->base.edid); - - intel_panel_fini(&lvds_connector->base.panel); - - drm_connector_cleanup(connector); - kfree(connector); -} - static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { .get_modes = intel_lvds_get_modes, .mode_valid = intel_lvds_mode_valid, @@ -511,7 +483,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { .atomic_set_property = intel_digital_connector_atomic_set_property, .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, - .destroy = intel_lvds_destroy, + .destroy = intel_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, }; @@ -802,8 +774,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) return i915_modparams.lvds_channel_mode == 2; /* single channel LVDS is limited to 112 MHz */ - if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock - > 112999) + if (lvds_encoder->attached_connector->panel.fixed_mode->clock > 112999) return true; if (dmi_check_system(intel_dual_link_lvds)) @@ -858,7 +829,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) struct drm_device *dev = &dev_priv->drm; struct intel_lvds_encoder *lvds_encoder; struct intel_encoder *intel_encoder; - struct intel_lvds_connector *lvds_connector; struct intel_connector *intel_connector; struct drm_connector *connector; struct drm_encoder *encoder; @@ -911,23 +881,16 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) if (!lvds_encoder) return; - lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL); - if (!lvds_connector) { - kfree(lvds_encoder); - return; - } - - if (intel_connector_init(&lvds_connector->base) < 0) { - kfree(lvds_connector); + intel_connector = intel_connector_alloc(); + if (!intel_connector) { kfree(lvds_encoder); return; } - lvds_encoder->attached_connector = lvds_connector; + lvds_encoder->attached_connector = intel_connector; intel_encoder = &lvds_encoder->base; encoder = &intel_encoder->base; - intel_connector = &lvds_connector->base; connector = &intel_connector->base; drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); @@ -1008,7 +971,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) } else { edid = ERR_PTR(-ENOENT); } - lvds_connector->base.edid = edid; + intel_connector->edid = edid; list_for_each_entry(scan, &connector->probed_modes, head) { if (scan->type & DRM_MODE_TYPE_PREFERRED) { @@ -1072,6 +1035,6 @@ failed: drm_connector_cleanup(connector); drm_encoder_cleanup(encoder); kfree(lvds_encoder); - kfree(lvds_connector); + intel_connector_free(intel_connector); return; } diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index e034b4166d32..b8f106d9ecf8 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -773,70 +773,6 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv) opregion->acpi->cadl[i] = 0; } -void intel_opregion_register(struct drm_i915_private *dev_priv) -{ - struct intel_opregion *opregion = &dev_priv->opregion; - - if (!opregion->header) - return; - - if (opregion->acpi) { - intel_didl_outputs(dev_priv); - intel_setup_cadls(dev_priv); - - /* Notify BIOS we are ready to handle ACPI video ext notifs. - * Right now, all the events are handled by the ACPI video module. - * We don't actually need to do anything with them. */ - opregion->acpi->csts = 0; - opregion->acpi->drdy = 1; - - opregion->acpi_notifier.notifier_call = intel_opregion_video_event; - register_acpi_notifier(&opregion->acpi_notifier); - } - - if (opregion->asle) { - opregion->asle->tche = ASLE_TCHE_BLC_EN; - opregion->asle->ardy = ASLE_ARDY_READY; - } -} - -void intel_opregion_unregister(struct drm_i915_private *dev_priv) -{ - struct intel_opregion *opregion = &dev_priv->opregion; - - if (!opregion->header) - return; - - if (opregion->asle) - opregion->asle->ardy = ASLE_ARDY_NOT_READY; - - cancel_work_sync(&dev_priv->opregion.asle_work); - - if (opregion->acpi) { - opregion->acpi->drdy = 0; - - unregister_acpi_notifier(&opregion->acpi_notifier); - opregion->acpi_notifier.notifier_call = NULL; - } - - /* just clear all opregion memory pointers now */ - memunmap(opregion->header); - if (opregion->rvda) { - memunmap(opregion->rvda); - opregion->rvda = NULL; - } - if (opregion->vbt_firmware) { - kfree(opregion->vbt_firmware); - opregion->vbt_firmware = NULL; - } - opregion->header = NULL; - opregion->acpi = NULL; - opregion->swsci = NULL; - opregion->asle = NULL; - opregion->vbt = NULL; - opregion->lid_state = NULL; -} - static void swsci_setup(struct drm_i915_private *dev_priv) { struct intel_opregion *opregion = &dev_priv->opregion; @@ -1115,3 +1051,97 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv) return ret - 1; } + +void intel_opregion_register(struct drm_i915_private *i915) +{ + struct intel_opregion *opregion = &i915->opregion; + + if (!opregion->header) + return; + + if (opregion->acpi) { + opregion->acpi_notifier.notifier_call = + intel_opregion_video_event; + register_acpi_notifier(&opregion->acpi_notifier); + } + + intel_opregion_resume(i915); +} + +void intel_opregion_resume(struct drm_i915_private *i915) +{ + struct intel_opregion *opregion = &i915->opregion; + + if (!opregion->header) + return; + + if (opregion->acpi) { + intel_didl_outputs(i915); + intel_setup_cadls(i915); + + /* + * Notify BIOS we are ready to handle ACPI video ext notifs. + * Right now, all the events are handled by the ACPI video + * module. We don't actually need to do anything with them. + */ + opregion->acpi->csts = 0; + opregion->acpi->drdy = 1; + } + + if (opregion->asle) { + opregion->asle->tche = ASLE_TCHE_BLC_EN; + opregion->asle->ardy = ASLE_ARDY_READY; + } + + intel_opregion_notify_adapter(i915, PCI_D0); +} + +void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state) +{ + struct intel_opregion *opregion = &i915->opregion; + + if (!opregion->header) + return; + + intel_opregion_notify_adapter(i915, state); + + if (opregion->asle) + opregion->asle->ardy = ASLE_ARDY_NOT_READY; + + cancel_work_sync(&i915->opregion.asle_work); + + if (opregion->acpi) + opregion->acpi->drdy = 0; +} + +void intel_opregion_unregister(struct drm_i915_private *i915) +{ + struct intel_opregion *opregion = &i915->opregion; + + intel_opregion_suspend(i915, PCI_D1); + + if (!opregion->header) + return; + + if (opregion->acpi_notifier.notifier_call) { + unregister_acpi_notifier(&opregion->acpi_notifier); + opregion->acpi_notifier.notifier_call = NULL; + } + + /* just clear all opregion memory pointers now */ + memunmap(opregion->header); + if (opregion->rvda) { + memunmap(opregion->rvda); + opregion->rvda = NULL; + } + if (opregion->vbt_firmware) { + kfree(opregion->vbt_firmware); + opregion->vbt_firmware = NULL; + } + opregion->header = NULL; + opregion->acpi = NULL; + opregion->swsci = NULL; + opregion->asle = NULL; + opregion->vbt = NULL; + opregion->lid_state = NULL; +} diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h index e8498a8cda3d..4aa68ffbd30e 100644 --- a/drivers/gpu/drm/i915/intel_opregion.h +++ b/drivers/gpu/drm/i915/intel_opregion.h @@ -57,8 +57,14 @@ struct intel_opregion { #ifdef CONFIG_ACPI int intel_opregion_setup(struct drm_i915_private *dev_priv); + void intel_opregion_register(struct drm_i915_private *dev_priv); void intel_opregion_unregister(struct drm_i915_private *dev_priv); + +void intel_opregion_resume(struct drm_i915_private *dev_priv); +void intel_opregion_suspend(struct drm_i915_private *dev_priv, + pci_power_t state); + void intel_opregion_asle_intr(struct drm_i915_private *dev_priv); int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable); @@ -81,6 +87,15 @@ static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { } +static inline void intel_opregion_resume(struct drm_i915_private *dev_priv) +{ +} + +static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv, + pci_power_t state) +{ +} + static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) { } diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 72eb7e48e8bc..20ea7c99d13a 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -1338,7 +1338,7 @@ err_put_bo: return err; } -void intel_setup_overlay(struct drm_i915_private *dev_priv) +void intel_overlay_setup(struct drm_i915_private *dev_priv) { struct intel_overlay *overlay; int ret; @@ -1387,7 +1387,7 @@ out_free: kfree(overlay); } -void intel_cleanup_overlay(struct drm_i915_private *dev_priv) +void intel_overlay_cleanup(struct drm_i915_private *dev_priv) { struct intel_overlay *overlay; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 4a9f139e7b73..e6cd7b55c018 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -111,7 +111,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc, /* Native modes don't need fitting */ if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w && adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h && - !pipe_config->ycbcr420) + pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420) goto done; switch (fitting_mode) { @@ -505,7 +505,7 @@ static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe) static u32 vlv_get_backlight(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - enum pipe pipe = intel_get_pipe_from_connector(connector); + enum pipe pipe = intel_connector_get_pipe(connector); return _vlv_get_backlight(dev_priv, pipe); } @@ -763,7 +763,7 @@ static void pwm_disable_backlight(const struct drm_connector_state *old_conn_sta struct intel_panel *panel = &connector->panel; /* Disable the backlight */ - pwm_config(panel->backlight.pwm, 0, CRC_PMIC_PWM_PERIOD_NS); + intel_panel_actually_set_backlight(old_conn_state, 0); usleep_range(2000, 3000); pwm_disable(panel->backlight.pwm); } @@ -1814,11 +1814,8 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe) return 0; } -void intel_panel_destroy_backlight(struct drm_connector *connector) +static void intel_panel_destroy_backlight(struct intel_panel *panel) { - struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_panel *panel = &intel_connector->panel; - /* dispose of the pwm */ if (panel->backlight.pwm) pwm_put(panel->backlight.pwm); @@ -1923,6 +1920,8 @@ void intel_panel_fini(struct intel_panel *panel) struct intel_connector *intel_connector = container_of(panel, struct intel_connector, panel); + intel_panel_destroy_backlight(panel); + if (panel->fixed_mode) drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 1db9b8328275..a26b4eddda25 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2493,6 +2493,9 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, uint32_t method1, method2; int cpp; + if (mem_value == 0) + return U32_MAX; + if (!intel_wm_plane_visible(cstate, pstate)) return 0; @@ -2522,6 +2525,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, uint32_t method1, method2; int cpp; + if (mem_value == 0) + return U32_MAX; + if (!intel_wm_plane_visible(cstate, pstate)) return 0; @@ -2545,6 +2551,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, { int cpp; + if (mem_value == 0) + return U32_MAX; + if (!intel_wm_plane_visible(cstate, pstate)) return 0; @@ -2881,8 +2890,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, * any underrun. If not able to get Dimm info assume 16GB dimm * to avoid any underrun. */ - if (!dev_priv->dram_info.valid_dimm || - dev_priv->dram_info.is_16gb_dimm) + if (dev_priv->dram_info.is_16gb_dimm) wm[0] += 1; } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { @@ -3009,6 +3017,34 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); } +static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) +{ + /* + * On some SNB machines (Thinkpad X220 Tablet at least) + * LP3 usage can cause vblank interrupts to be lost. + * The DEIIR bit will go high but it looks like the CPU + * never gets interrupted. + * + * It's not clear whether other interrupt source could + * be affected or if this is somehow limited to vblank + * interrupts only. To play it safe we disable LP3 + * watermarks entirely. + */ + if (dev_priv->wm.pri_latency[3] == 0 && + dev_priv->wm.spr_latency[3] == 0 && + dev_priv->wm.cur_latency[3] == 0) + return; + + dev_priv->wm.pri_latency[3] = 0; + dev_priv->wm.spr_latency[3] = 0; + dev_priv->wm.cur_latency[3] = 0; + + DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n"); + intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); + intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); +} + static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) { intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); @@ -3025,8 +3061,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); - if (IS_GEN6(dev_priv)) + if (IS_GEN6(dev_priv)) { snb_wm_latency_quirk(dev_priv); + snb_wm_lp3_irq_quirk(dev_priv); + } } static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) @@ -3160,7 +3198,8 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev, * and after the vblank. */ *a = newstate->wm.ilk.optimal; - if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base)) + if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base) || + intel_state->skip_intermediate_wm) return 0; a->pipe_enabled |= b->pipe_enabled; @@ -3612,15 +3651,8 @@ static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state) static bool intel_has_sagv(struct drm_i915_private *dev_priv) { - if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || - IS_CANNONLAKE(dev_priv)) - return true; - - if (IS_SKYLAKE(dev_priv) && - dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED) - return true; - - return false; + return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) && + dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED; } /* @@ -3784,7 +3816,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, const struct intel_crtc_state *cstate, - const unsigned int total_data_rate, + const u64 total_data_rate, const int num_active, struct skl_ddb_allocation *ddb) { @@ -3798,12 +3830,12 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, return ddb_size - 4; /* 4 blocks for bypass path allocation */ adjusted_mode = &cstate->base.adjusted_mode; - total_data_bw = (u64)total_data_rate * drm_mode_vrefresh(adjusted_mode); + total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode); /* * 12GB/s is maximum BW supported by single DBuf slice. */ - if (total_data_bw >= GBps(12) || num_active > 1) { + if (num_active > 1 || total_data_bw >= GBps(12)) { ddb->enabled_slices = 2; } else { ddb->enabled_slices = 1; @@ -3814,16 +3846,15 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, } static void -skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, +skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, const struct intel_crtc_state *cstate, - const unsigned int total_data_rate, + const u64 total_data_rate, struct skl_ddb_allocation *ddb, struct skl_ddb_entry *alloc, /* out */ int *num_active /* out */) { struct drm_atomic_state *state = cstate->base.state; struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *for_crtc = cstate->base.crtc; const struct drm_crtc_state *crtc_state; const struct drm_crtc *crtc; @@ -3920,73 +3951,68 @@ static void skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, const enum pipe pipe, const enum plane_id plane_id, - struct skl_ddb_allocation *ddb /* out */) + struct skl_ddb_entry *ddb_y, + struct skl_ddb_entry *ddb_uv) { - u32 val, val2 = 0; - int fourcc, pixel_format; + u32 val, val2; + u32 fourcc = 0; /* Cursor doesn't support NV12/planar, so no extra calculation needed */ if (plane_id == PLANE_CURSOR) { val = I915_READ(CUR_BUF_CFG(pipe)); - skl_ddb_entry_init_from_hw(dev_priv, - &ddb->plane[pipe][plane_id], val); + skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); return; } val = I915_READ(PLANE_CTL(pipe, plane_id)); /* No DDB allocated for disabled planes */ - if (!(val & PLANE_CTL_ENABLE)) - return; + if (val & PLANE_CTL_ENABLE) + fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK, + val & PLANE_CTL_ORDER_RGBX, + val & PLANE_CTL_ALPHA_MASK); - pixel_format = val & PLANE_CTL_FORMAT_MASK; - fourcc = skl_format_to_fourcc(pixel_format, - val & PLANE_CTL_ORDER_RGBX, - val & PLANE_CTL_ALPHA_MASK); - - val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); - /* - * FIXME: add proper NV12 support for ICL. Avoid reading unclaimed - * registers for now. - */ - if (INTEL_GEN(dev_priv) < 11) + if (INTEL_GEN(dev_priv) >= 11) { + val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); + skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); + } else { + val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); - if (fourcc == DRM_FORMAT_NV12) { - skl_ddb_entry_init_from_hw(dev_priv, - &ddb->plane[pipe][plane_id], val2); - skl_ddb_entry_init_from_hw(dev_priv, - &ddb->uv_plane[pipe][plane_id], val); - } else { - skl_ddb_entry_init_from_hw(dev_priv, - &ddb->plane[pipe][plane_id], val); + if (fourcc == DRM_FORMAT_NV12) + swap(val, val2); + + skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); + skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2); } } -void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, - struct skl_ddb_allocation *ddb /* out */) +void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, + struct skl_ddb_entry *ddb_y, + struct skl_ddb_entry *ddb_uv) { - struct intel_crtc *crtc; - - memset(ddb, 0, sizeof(*ddb)); - - ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum intel_display_power_domain power_domain; + enum pipe pipe = crtc->pipe; + enum plane_id plane_id; - for_each_intel_crtc(&dev_priv->drm, crtc) { - enum intel_display_power_domain power_domain; - enum plane_id plane_id; - enum pipe pipe = crtc->pipe; + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return; - power_domain = POWER_DOMAIN_PIPE(pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) - continue; + for_each_plane_id_on_crtc(crtc, plane_id) + skl_ddb_get_hw_plane_state(dev_priv, pipe, + plane_id, + &ddb_y[plane_id], + &ddb_uv[plane_id]); - for_each_plane_id_on_crtc(crtc, plane_id) - skl_ddb_get_hw_plane_state(dev_priv, pipe, - plane_id, ddb); + intel_display_power_put(dev_priv, power_domain); +} - intel_display_power_put(dev_priv, power_domain); - } +void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, + struct skl_ddb_allocation *ddb /* out */) +{ + ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv); } /* @@ -4139,23 +4165,24 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, return 0; } -static unsigned int +static u64 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, - const struct drm_plane_state *pstate, + const struct intel_plane_state *intel_pstate, const int plane) { - struct intel_plane *intel_plane = to_intel_plane(pstate->plane); - struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); + struct intel_plane *intel_plane = + to_intel_plane(intel_pstate->base.plane); uint32_t data_rate; uint32_t width = 0, height = 0; struct drm_framebuffer *fb; u32 format; uint_fixed_16_16_t down_scale_amount; + u64 rate; if (!intel_pstate->base.visible) return 0; - fb = pstate->fb; + fb = intel_pstate->base.fb; format = fb->format->format; if (intel_plane->id == PLANE_CURSOR) @@ -4177,28 +4204,26 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, height /= 2; } - data_rate = width * height * fb->format->cpp[plane]; + data_rate = width * height; down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate); - return mul_round_up_u32_fixed16(data_rate, down_scale_amount); + rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount); + + rate *= fb->format->cpp[plane]; + return rate; } -/* - * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching - * a 8192x4096@32bpp framebuffer: - * 3 * 4096 * 8192 * 4 < 2^32 - */ -static unsigned int +static u64 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, - unsigned int *plane_data_rate, - unsigned int *uv_plane_data_rate) + u64 *plane_data_rate, + u64 *uv_plane_data_rate) { struct drm_crtc_state *cstate = &intel_cstate->base; struct drm_atomic_state *state = cstate->state; struct drm_plane *plane; const struct drm_plane_state *pstate; - unsigned int total_data_rate = 0; + u64 total_data_rate = 0; if (WARN_ON(!state)) return 0; @@ -4206,26 +4231,81 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, /* Calculate and cache data rate for each plane */ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) { enum plane_id plane_id = to_intel_plane(plane)->id; - unsigned int rate; + u64 rate; + const struct intel_plane_state *intel_pstate = + to_intel_plane_state(pstate); /* packed/y */ rate = skl_plane_relative_data_rate(intel_cstate, - pstate, 0); + intel_pstate, 0); plane_data_rate[plane_id] = rate; - total_data_rate += rate; /* uv-plane */ rate = skl_plane_relative_data_rate(intel_cstate, - pstate, 1); + intel_pstate, 1); uv_plane_data_rate[plane_id] = rate; - total_data_rate += rate; } return total_data_rate; } +static u64 +icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, + u64 *plane_data_rate) +{ + struct drm_crtc_state *cstate = &intel_cstate->base; + struct drm_atomic_state *state = cstate->state; + struct drm_plane *plane; + const struct drm_plane_state *pstate; + u64 total_data_rate = 0; + + if (WARN_ON(!state)) + return 0; + + /* Calculate and cache data rate for each plane */ + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) { + const struct intel_plane_state *intel_pstate = + to_intel_plane_state(pstate); + enum plane_id plane_id = to_intel_plane(plane)->id; + u64 rate; + + if (!intel_pstate->linked_plane) { + rate = skl_plane_relative_data_rate(intel_cstate, + intel_pstate, 0); + plane_data_rate[plane_id] = rate; + total_data_rate += rate; + } else { + enum plane_id y_plane_id; + + /* + * The slave plane might not iterate in + * drm_atomic_crtc_state_for_each_plane_state(), + * and needs the master plane state which may be + * NULL if we try get_new_plane_state(), so we + * always calculate from the master. + */ + if (intel_pstate->slave) + continue; + + /* Y plane rate is calculated on the slave */ + rate = skl_plane_relative_data_rate(intel_cstate, + intel_pstate, 0); + y_plane_id = intel_pstate->linked_plane->id; + plane_data_rate[y_plane_id] = rate; + total_data_rate += rate; + + rate = skl_plane_relative_data_rate(intel_cstate, + intel_pstate, 1); + plane_data_rate[plane_id] = rate; + total_data_rate += rate; + } + } + + return total_data_rate; +} + static uint16_t skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane) { @@ -4298,15 +4378,25 @@ skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active, drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) { enum plane_id plane_id = to_intel_plane(plane)->id; + struct intel_plane_state *plane_state = to_intel_plane_state(pstate); if (plane_id == PLANE_CURSOR) continue; - if (!pstate->visible) + /* slave plane must be invisible and calculated from master */ + if (!pstate->visible || WARN_ON(plane_state->slave)) continue; - minimum[plane_id] = skl_ddb_min_alloc(pstate, 0); - uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1); + if (!plane_state->linked_plane) { + minimum[plane_id] = skl_ddb_min_alloc(pstate, 0); + uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1); + } else { + enum plane_id y_plane_id = + plane_state->linked_plane->id; + + minimum[y_plane_id] = skl_ddb_min_alloc(pstate, 0); + minimum[plane_id] = skl_ddb_min_alloc(pstate, 1); + } } minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active); @@ -4318,23 +4408,22 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, { struct drm_atomic_state *state = cstate->base.state; struct drm_crtc *crtc = cstate->base.crtc; - struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb; uint16_t alloc_size, start; uint16_t minimum[I915_MAX_PLANES] = {}; uint16_t uv_minimum[I915_MAX_PLANES] = {}; - unsigned int total_data_rate; + u64 total_data_rate; enum plane_id plane_id; int num_active; - unsigned int plane_data_rate[I915_MAX_PLANES] = {}; - unsigned int uv_plane_data_rate[I915_MAX_PLANES] = {}; + u64 plane_data_rate[I915_MAX_PLANES] = {}; + u64 uv_plane_data_rate[I915_MAX_PLANES] = {}; uint16_t total_min_blocks = 0; /* Clear the partitioning for disabled planes. */ - memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); - memset(ddb->uv_plane[pipe], 0, sizeof(ddb->uv_plane[pipe])); + memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y)); + memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv)); if (WARN_ON(!state)) return 0; @@ -4344,11 +4433,18 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, return 0; } - total_data_rate = skl_get_total_relative_data_rate(cstate, - plane_data_rate, - uv_plane_data_rate); - skl_ddb_get_pipe_allocation_limits(dev, cstate, total_data_rate, ddb, - alloc, &num_active); + if (INTEL_GEN(dev_priv) < 11) + total_data_rate = + skl_get_total_relative_data_rate(cstate, + plane_data_rate, + uv_plane_data_rate); + else + total_data_rate = + icl_get_total_relative_data_rate(cstate, + plane_data_rate); + + skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate, + ddb, alloc, &num_active); alloc_size = skl_ddb_entry_size(alloc); if (alloc_size == 0) return 0; @@ -4374,8 +4470,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, } alloc_size -= total_min_blocks; - ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR]; - ddb->plane[pipe][PLANE_CURSOR].end = alloc->end; + cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR]; + cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end; /* * 2. Distribute the remaining space in proportion to the amount of @@ -4388,7 +4484,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, start = alloc->start; for_each_plane_id_on_crtc(intel_crtc, plane_id) { - unsigned int data_rate, uv_data_rate; + u64 data_rate, uv_data_rate; uint16_t plane_blocks, uv_plane_blocks; if (plane_id == PLANE_CURSOR) @@ -4402,13 +4498,12 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, * result is < available as data_rate / total_data_rate < 1 */ plane_blocks = minimum[plane_id]; - plane_blocks += div_u64((uint64_t)alloc_size * data_rate, - total_data_rate); + plane_blocks += div64_u64(alloc_size * data_rate, total_data_rate); /* Leave disabled planes at (0,0) */ if (data_rate) { - ddb->plane[pipe][plane_id].start = start; - ddb->plane[pipe][plane_id].end = start + plane_blocks; + cstate->wm.skl.plane_ddb_y[plane_id].start = start; + cstate->wm.skl.plane_ddb_y[plane_id].end = start + plane_blocks; } start += plane_blocks; @@ -4417,12 +4512,14 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, uv_data_rate = uv_plane_data_rate[plane_id]; uv_plane_blocks = uv_minimum[plane_id]; - uv_plane_blocks += div_u64((uint64_t)alloc_size * uv_data_rate, - total_data_rate); + uv_plane_blocks += div64_u64(alloc_size * uv_data_rate, total_data_rate); + + /* Gen11+ uses a separate plane for UV watermarks */ + WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks); if (uv_data_rate) { - ddb->uv_plane[pipe][plane_id].start = start; - ddb->uv_plane[pipe][plane_id].end = + cstate->wm.skl.plane_ddb_uv[plane_id].start = start; + cstate->wm.skl.plane_ddb_uv[plane_id].end = start + uv_plane_blocks; } @@ -4476,7 +4573,7 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate, } static uint_fixed_16_16_t -intel_get_linetime_us(struct intel_crtc_state *cstate) +intel_get_linetime_us(const struct intel_crtc_state *cstate) { uint32_t pixel_rate; uint32_t crtc_htotal; @@ -4519,12 +4616,12 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate, } static int -skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, - struct intel_crtc_state *cstate, +skl_compute_plane_wm_params(const struct intel_crtc_state *cstate, const struct intel_plane_state *intel_pstate, - struct skl_wm_params *wp, int plane_id) + struct skl_wm_params *wp, int color_plane) { struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_plane_state *pstate = &intel_pstate->base; const struct drm_framebuffer *fb = pstate->fb; uint32_t interm_pbpl; @@ -4532,11 +4629,8 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, to_intel_atomic_state(cstate->base.state); bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); - if (!intel_wm_plane_visible(cstate, intel_pstate)) - return 0; - /* only NV12 format has two planes */ - if (plane_id == 1 && fb->format->format != DRM_FORMAT_NV12) { + if (color_plane == 1 && fb->format->format != DRM_FORMAT_NV12) { DRM_DEBUG_KMS("Non NV12 format have single plane\n"); return -EINVAL; } @@ -4561,10 +4655,10 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, wp->width = drm_rect_width(&intel_pstate->base.src) >> 16; } - if (plane_id == 1 && wp->is_planar) + if (color_plane == 1 && wp->is_planar) wp->width /= 2; - wp->cpp = fb->format->cpp[plane_id]; + wp->cpp = fb->format->cpp[color_plane]; wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); @@ -4626,8 +4720,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, return 0; } -static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, - struct intel_crtc_state *cstate, +static int skl_compute_plane_wm(const struct intel_crtc_state *cstate, const struct intel_plane_state *intel_pstate, uint16_t ddb_allocation, int level, @@ -4635,6 +4728,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, const struct skl_wm_level *result_prev, struct skl_wm_level *result /* out */) { + struct drm_i915_private *dev_priv = + to_i915(intel_pstate->base.plane->dev); const struct drm_plane_state *pstate = &intel_pstate->base; uint32_t latency = dev_priv->wm.skl_latency[level]; uint_fixed_16_16_t method1, method2; @@ -4645,11 +4740,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); uint32_t min_disp_buf_needed; - if (latency == 0 || - !intel_wm_plane_visible(cstate, intel_pstate)) { - result->plane_en = false; - return 0; - } + if (latency == 0) + return level == 0 ? -EINVAL : 0; /* Display WA #1141: kbl,cfl */ if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || @@ -4672,15 +4764,24 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, } else { if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal / wp->dbuf_block_size < 1) && - (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) + (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) { selected_result = method2; - else if (ddb_allocation >= - fixed16_to_u32_round_up(wp->plane_blocks_per_line)) - selected_result = min_fixed16(method1, method2); - else if (latency >= wp->linetime_us) - selected_result = min_fixed16(method1, method2); - else + } else if (ddb_allocation >= + fixed16_to_u32_round_up(wp->plane_blocks_per_line)) { + if (IS_GEN9(dev_priv) && + !IS_GEMINILAKE(dev_priv)) + selected_result = min_fixed16(method1, method2); + else + selected_result = method2; + } else if (latency >= wp->linetime_us) { + if (IS_GEN9(dev_priv) && + !IS_GEMINILAKE(dev_priv)) + selected_result = min_fixed16(method1, method2); + else + selected_result = method2; + } else { selected_result = method1; + } } res_blocks = fixed16_to_u32_round_up(selected_result) + 1; @@ -4737,8 +4838,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, if ((level > 0 && res_lines > 31) || res_blocks >= ddb_allocation || min_disp_buf_needed >= ddb_allocation) { - result->plane_en = false; - /* * If there are no valid level 0 watermarks, then we can't * support this display configuration. @@ -4756,17 +4855,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, } } - /* - * Display WA #826 (SKL:ALL, BXT:ALL) & #1059 (CNL:A) - * disable wm level 1-7 on NV12 planes - */ - if (wp->is_planar && level >= 1 && - (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) || - IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))) { - result->plane_en = false; - return 0; - } - /* The number of lines are ignored for the level 0 watermark. */ result->plane_res_b = res_blocks; result->plane_res_l = res_lines; @@ -4776,43 +4864,22 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, } static int -skl_compute_wm_levels(const struct drm_i915_private *dev_priv, - struct skl_ddb_allocation *ddb, - struct intel_crtc_state *cstate, +skl_compute_wm_levels(const struct intel_crtc_state *cstate, const struct intel_plane_state *intel_pstate, + uint16_t ddb_blocks, const struct skl_wm_params *wm_params, - struct skl_plane_wm *wm, - int plane_id) + struct skl_wm_level *levels) { - struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); - struct drm_plane *plane = intel_pstate->base.plane; - struct intel_plane *intel_plane = to_intel_plane(plane); - uint16_t ddb_blocks; - enum pipe pipe = intel_crtc->pipe; + struct drm_i915_private *dev_priv = + to_i915(intel_pstate->base.plane->dev); int level, max_level = ilk_wm_max_level(dev_priv); - enum plane_id intel_plane_id = intel_plane->id; + struct skl_wm_level *result_prev = &levels[0]; int ret; - if (WARN_ON(!intel_pstate->base.fb)) - return -EINVAL; - - ddb_blocks = plane_id ? - skl_ddb_entry_size(&ddb->uv_plane[pipe][intel_plane_id]) : - skl_ddb_entry_size(&ddb->plane[pipe][intel_plane_id]); - for (level = 0; level <= max_level; level++) { - struct skl_wm_level *result = plane_id ? &wm->uv_wm[level] : - &wm->wm[level]; - struct skl_wm_level *result_prev; + struct skl_wm_level *result = &levels[level]; - if (level) - result_prev = plane_id ? &wm->uv_wm[level - 1] : - &wm->wm[level - 1]; - else - result_prev = plane_id ? &wm->uv_wm[0] : &wm->wm[0]; - - ret = skl_compute_plane_wm(dev_priv, - cstate, + ret = skl_compute_plane_wm(cstate, intel_pstate, ddb_blocks, level, @@ -4821,16 +4888,15 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv, result); if (ret) return ret; - } - if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12) - wm->is_planar = true; + result_prev = result; + } return 0; } static uint32_t -skl_compute_linetime_wm(struct intel_crtc_state *cstate) +skl_compute_linetime_wm(const struct intel_crtc_state *cstate) { struct drm_atomic_state *state = cstate->base.state; struct drm_i915_private *dev_priv = to_i915(state->dev); @@ -4852,42 +4918,50 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate) return linetime_wm; } -static void skl_compute_transition_wm(struct intel_crtc_state *cstate, - struct skl_wm_params *wp, - struct skl_wm_level *wm_l0, - uint16_t ddb_allocation, - struct skl_wm_level *trans_wm /* out */) +static void skl_compute_transition_wm(const struct intel_crtc_state *cstate, + const struct skl_wm_params *wp, + struct skl_plane_wm *wm, + uint16_t ddb_allocation) { struct drm_device *dev = cstate->base.crtc->dev; const struct drm_i915_private *dev_priv = to_i915(dev); uint16_t trans_min, trans_y_tile_min; const uint16_t trans_amount = 10; /* This is configurable amount */ - uint16_t trans_offset_b, res_blocks; - - if (!cstate->base.active) - goto exit; + uint16_t wm0_sel_res_b, trans_offset_b, res_blocks; /* Transition WM are not recommended by HW team for GEN9 */ if (INTEL_GEN(dev_priv) <= 9) - goto exit; + return; /* Transition WM don't make any sense if ipc is disabled */ if (!dev_priv->ipc_enabled) - goto exit; + return; - trans_min = 0; - if (INTEL_GEN(dev_priv) >= 10) + trans_min = 14; + if (INTEL_GEN(dev_priv) >= 11) trans_min = 4; trans_offset_b = trans_min + trans_amount; + /* + * The spec asks for Selected Result Blocks for wm0 (the real value), + * not Result Blocks (the integer value). Pay attention to the capital + * letters. The value wm_l0->plane_res_b is actually Result Blocks, but + * since Result Blocks is the ceiling of Selected Result Blocks plus 1, + * and since we later will have to get the ceiling of the sum in the + * transition watermarks calculation, we can just pretend Selected + * Result Blocks is Result Blocks minus 1 and it should work for the + * current platforms. + */ + wm0_sel_res_b = wm->wm[0].plane_res_b - 1; + if (wp->y_tiled) { trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2, wp->y_tile_minimum); - res_blocks = max(wm_l0->plane_res_b, trans_y_tile_min) + + res_blocks = max(wm0_sel_res_b, trans_y_tile_min) + trans_offset_b; } else { - res_blocks = wm_l0->plane_res_b + trans_offset_b; + res_blocks = wm0_sel_res_b + trans_offset_b; /* WA BUG:1938466 add one block for non y-tile planes */ if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0)) @@ -4898,25 +4972,132 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate, res_blocks += 1; if (res_blocks < ddb_allocation) { - trans_wm->plane_res_b = res_blocks; - trans_wm->plane_en = true; - return; + wm->trans_wm.plane_res_b = res_blocks; + wm->trans_wm.plane_en = true; + } +} + +static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + enum plane_id plane_id, int color_plane) +{ + struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; + u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_y[plane_id]); + struct skl_wm_params wm_params; + int ret; + + ret = skl_compute_plane_wm_params(crtc_state, plane_state, + &wm_params, color_plane); + if (ret) + return ret; + + ret = skl_compute_wm_levels(crtc_state, plane_state, + ddb_blocks, &wm_params, wm->wm); + if (ret) + return ret; + + skl_compute_transition_wm(crtc_state, &wm_params, wm, ddb_blocks); + + return 0; +} + +static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + enum plane_id plane_id) +{ + struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; + u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_uv[plane_id]); + struct skl_wm_params wm_params; + int ret; + + wm->is_planar = true; + + /* uv plane watermarks must also be validated for NV12/Planar */ + ret = skl_compute_plane_wm_params(crtc_state, plane_state, + &wm_params, 1); + if (ret) + return ret; + + ret = skl_compute_wm_levels(crtc_state, plane_state, + ddb_blocks, &wm_params, wm->uv_wm); + if (ret) + return ret; + + return 0; +} + +static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm, + struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + const struct drm_framebuffer *fb = plane_state->base.fb; + enum plane_id plane_id = plane->id; + int ret; + + if (!intel_wm_plane_visible(crtc_state, plane_state)) + return 0; + + ret = skl_build_plane_wm_single(crtc_state, plane_state, + plane_id, 0); + if (ret) + return ret; + + if (fb->format->is_yuv && fb->format->num_planes > 1) { + ret = skl_build_plane_wm_uv(crtc_state, plane_state, + plane_id); + if (ret) + return ret; } -exit: - trans_wm->plane_en = false; + return 0; +} + +static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm, + struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id; + int ret; + + /* Watermarks calculated in master */ + if (plane_state->slave) + return 0; + + if (plane_state->linked_plane) { + const struct drm_framebuffer *fb = plane_state->base.fb; + enum plane_id y_plane_id = plane_state->linked_plane->id; + + WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)); + WARN_ON(!fb->format->is_yuv || + fb->format->num_planes == 1); + + ret = skl_build_plane_wm_single(crtc_state, plane_state, + y_plane_id, 0); + if (ret) + return ret; + + ret = skl_build_plane_wm_single(crtc_state, plane_state, + plane_id, 1); + if (ret) + return ret; + } else if (intel_wm_plane_visible(crtc_state, plane_state)) { + ret = skl_build_plane_wm_single(crtc_state, plane_state, + plane_id, 0); + if (ret) + return ret; + } + + return 0; } static int skl_build_pipe_wm(struct intel_crtc_state *cstate, - struct skl_ddb_allocation *ddb, struct skl_pipe_wm *pipe_wm) { - struct drm_device *dev = cstate->base.crtc->dev; + struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); struct drm_crtc_state *crtc_state = &cstate->base; - const struct drm_i915_private *dev_priv = to_i915(dev); struct drm_plane *plane; const struct drm_plane_state *pstate; - struct skl_plane_wm *wm; int ret; /* @@ -4928,44 +5109,15 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate, drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { const struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); - enum plane_id plane_id = to_intel_plane(plane)->id; - struct skl_wm_params wm_params; - enum pipe pipe = to_intel_crtc(cstate->base.crtc)->pipe; - uint16_t ddb_blocks; - - wm = &pipe_wm->planes[plane_id]; - ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]); - ret = skl_compute_plane_wm_params(dev_priv, cstate, - intel_pstate, &wm_params, 0); - if (ret) - return ret; - - ret = skl_compute_wm_levels(dev_priv, ddb, cstate, - intel_pstate, &wm_params, wm, 0); + if (INTEL_GEN(dev_priv) >= 11) + ret = icl_build_plane_wm(pipe_wm, + cstate, intel_pstate); + else + ret = skl_build_plane_wm(pipe_wm, + cstate, intel_pstate); if (ret) return ret; - - skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0], - ddb_blocks, &wm->trans_wm); - - /* uv plane watermarks must also be validated for NV12/Planar */ - if (wm_params.is_planar) { - memset(&wm_params, 0, sizeof(struct skl_wm_params)); - wm->is_planar = true; - - ret = skl_compute_plane_wm_params(dev_priv, cstate, - intel_pstate, - &wm_params, 1); - if (ret) - return ret; - - ret = skl_compute_wm_levels(dev_priv, ddb, cstate, - intel_pstate, &wm_params, - wm, 1); - if (ret) - return ret; - } } pipe_wm->linetime = skl_compute_linetime_wm(cstate); @@ -4978,9 +5130,9 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, const struct skl_ddb_entry *entry) { if (entry->end) - I915_WRITE(reg, (entry->end - 1) << 16 | entry->start); + I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start); else - I915_WRITE(reg, 0); + I915_WRITE_FW(reg, 0); } static void skl_write_wm_level(struct drm_i915_private *dev_priv, @@ -4995,19 +5147,22 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv, val |= level->plane_res_l << PLANE_WM_LINES_SHIFT; } - I915_WRITE(reg, val); + I915_WRITE_FW(reg, val); } -static void skl_write_plane_wm(struct intel_crtc *intel_crtc, - const struct skl_plane_wm *wm, - const struct skl_ddb_allocation *ddb, - enum plane_id plane_id) +void skl_write_plane_wm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { - struct drm_crtc *crtc = &intel_crtc->base; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); int level, max_level = ilk_wm_max_level(dev_priv); - enum pipe pipe = intel_crtc->pipe; + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + const struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + const struct skl_ddb_entry *ddb_y = + &crtc_state->wm.skl.plane_ddb_y[plane_id]; + const struct skl_ddb_entry *ddb_uv = + &crtc_state->wm.skl.plane_ddb_uv[plane_id]; for (level = 0; level <= max_level; level++) { skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level), @@ -5016,35 +5171,32 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc, skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), &wm->trans_wm); - skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), - &ddb->plane[pipe][plane_id]); - /* FIXME: add proper NV12 support for ICL. */ - if (INTEL_GEN(dev_priv) >= 11) - return skl_ddb_entry_write(dev_priv, - PLANE_BUF_CFG(pipe, plane_id), - &ddb->plane[pipe][plane_id]); - if (wm->is_planar) { - skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), - &ddb->uv_plane[pipe][plane_id]); + if (INTEL_GEN(dev_priv) >= 11) { skl_ddb_entry_write(dev_priv, - PLANE_NV12_BUF_CFG(pipe, plane_id), - &ddb->plane[pipe][plane_id]); - } else { - skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), - &ddb->plane[pipe][plane_id]); - I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0); + PLANE_BUF_CFG(pipe, plane_id), ddb_y); + return; } + + if (wm->is_planar) + swap(ddb_y, ddb_uv); + + skl_ddb_entry_write(dev_priv, + PLANE_BUF_CFG(pipe, plane_id), ddb_y); + skl_ddb_entry_write(dev_priv, + PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv); } -static void skl_write_cursor_wm(struct intel_crtc *intel_crtc, - const struct skl_plane_wm *wm, - const struct skl_ddb_allocation *ddb) +void skl_write_cursor_wm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { - struct drm_crtc *crtc = &intel_crtc->base; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); int level, max_level = ilk_wm_max_level(dev_priv); - enum pipe pipe = intel_crtc->pipe; + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + const struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + const struct skl_ddb_entry *ddb = + &crtc_state->wm.skl.plane_ddb_y[plane_id]; for (level = 0; level <= max_level; level++) { skl_write_wm_level(dev_priv, CUR_WM(pipe, level), @@ -5052,22 +5204,30 @@ static void skl_write_cursor_wm(struct intel_crtc *intel_crtc, } skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm); - skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), - &ddb->plane[pipe][PLANE_CURSOR]); + skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb); } bool skl_wm_level_equals(const struct skl_wm_level *l1, const struct skl_wm_level *l2) { - if (l1->plane_en != l2->plane_en) - return false; + return l1->plane_en == l2->plane_en && + l1->plane_res_l == l2->plane_res_l && + l1->plane_res_b == l2->plane_res_b; +} - /* If both planes aren't enabled, the rest shouldn't matter */ - if (!l1->plane_en) - return true; +static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv, + const struct skl_plane_wm *wm1, + const struct skl_plane_wm *wm2) +{ + int level, max_level = ilk_wm_max_level(dev_priv); + + for (level = 0; level <= max_level; level++) { + if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) || + !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level])) + return false; + } - return (l1->plane_res_l == l2->plane_res_l && - l1->plane_res_b == l2->plane_res_b); + return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm); } static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, @@ -5076,16 +5236,15 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, return a->start < b->end && b->start < a->end; } -bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv, - const struct skl_ddb_entry **entries, - const struct skl_ddb_entry *ddb, - int ignore) +bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, + const struct skl_ddb_entry entries[], + int num_entries, int ignore_idx) { - enum pipe pipe; + int i; - for_each_pipe(dev_priv, pipe) { - if (pipe != ignore && entries[pipe] && - skl_ddb_entries_overlap(ddb, entries[pipe])) + for (i = 0; i < num_entries; i++) { + if (i != ignore_idx && + skl_ddb_entries_overlap(ddb, &entries[i])) return true; } @@ -5095,13 +5254,12 @@ bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv, static int skl_update_pipe_wm(struct drm_crtc_state *cstate, const struct skl_pipe_wm *old_pipe_wm, struct skl_pipe_wm *pipe_wm, /* out */ - struct skl_ddb_allocation *ddb, /* out */ bool *changed /* out */) { struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate); int ret; - ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm); + ret = skl_build_pipe_wm(intel_cstate, pipe_wm); if (ret) return ret; @@ -5127,32 +5285,29 @@ pipes_modified(struct drm_atomic_state *state) } static int -skl_ddb_add_affected_planes(struct intel_crtc_state *cstate) +skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state) { - struct drm_atomic_state *state = cstate->base.state; - struct drm_device *dev = state->dev; - struct drm_crtc *crtc = cstate->base.crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; - struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; - struct drm_plane_state *plane_state; - struct drm_plane *plane; - enum pipe pipe = intel_crtc->pipe; + struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state); + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane; - drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) { - enum plane_id plane_id = to_intel_plane(plane)->id; + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + struct intel_plane_state *plane_state; + enum plane_id plane_id = plane->id; - if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id], - &new_ddb->plane[pipe][plane_id]) && - skl_ddb_entry_equal(&cur_ddb->uv_plane[pipe][plane_id], - &new_ddb->uv_plane[pipe][plane_id])) + if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id], + &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) && + skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id], + &new_crtc_state->wm.skl.plane_ddb_uv[plane_id])) continue; - plane_state = drm_atomic_get_plane_state(state, plane); + plane_state = intel_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) return PTR_ERR(plane_state); + + new_crtc_state->update_planes |= BIT(plane_id); } return 0; @@ -5164,18 +5319,21 @@ skl_compute_ddb(struct drm_atomic_state *state) const struct drm_i915_private *dev_priv = to_i915(state->dev); struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb; + struct intel_crtc_state *old_crtc_state; + struct intel_crtc_state *new_crtc_state; struct intel_crtc *crtc; - struct intel_crtc_state *cstate; int ret, i; memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb)); - for_each_new_intel_crtc_in_state(intel_state, crtc, cstate, i) { - ret = skl_allocate_pipe_ddb(cstate, ddb); + for_each_oldnew_intel_crtc_in_state(intel_state, crtc, old_crtc_state, + new_crtc_state, i) { + ret = skl_allocate_pipe_ddb(new_crtc_state, ddb); if (ret) return ret; - ret = skl_ddb_add_affected_planes(cstate); + ret = skl_ddb_add_affected_planes(old_crtc_state, + new_crtc_state); if (ret) return ret; } @@ -5184,38 +5342,31 @@ skl_compute_ddb(struct drm_atomic_state *state) } static void -skl_print_wm_changes(const struct drm_atomic_state *state) +skl_print_wm_changes(struct intel_atomic_state *state) { - const struct drm_device *dev = state->dev; - const struct drm_i915_private *dev_priv = to_i915(dev); - const struct intel_atomic_state *intel_state = - to_intel_atomic_state(state); - const struct drm_crtc *crtc; - const struct drm_crtc_state *cstate; - const struct intel_plane *intel_plane; - const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb; - const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + const struct intel_crtc_state *old_crtc_state; + const struct intel_crtc_state *new_crtc_state; + struct intel_plane *plane; + struct intel_crtc *crtc; int i; - for_each_new_crtc_in_state(state, crtc, cstate, i) { - const struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; - - for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { - enum plane_id plane_id = intel_plane->id; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + enum plane_id plane_id = plane->id; const struct skl_ddb_entry *old, *new; - old = &old_ddb->plane[pipe][plane_id]; - new = &new_ddb->plane[pipe][plane_id]; + old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id]; + new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id]; if (skl_ddb_entry_equal(old, new)) continue; - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n", - intel_plane->base.base.id, - intel_plane->base.name, - old->start, old->end, - new->start, new->end); + DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n", + plane->base.base.id, plane->base.name, + old->start, old->end, + new->start, new->end); } } } @@ -5310,6 +5461,66 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed) return 0; } +/* + * To make sure the cursor watermark registers are always consistent + * with our computed state the following scenario needs special + * treatment: + * + * 1. enable cursor + * 2. move cursor entirely offscreen + * 3. disable cursor + * + * Step 2. does call .disable_plane() but does not zero the watermarks + * (since we consider an offscreen cursor still active for the purposes + * of watermarks). Step 3. would not normally call .disable_plane() + * because the actual plane visibility isn't changing, and we don't + * deallocate the cursor ddb until the pipe gets disabled. So we must + * force step 3. to call .disable_plane() to update the watermark + * registers properly. + * + * Other planes do not suffer from this issues as their watermarks are + * calculated based on the actual plane visibility. The only time this + * can trigger for the other planes is during the initial readout as the + * default value of the watermarks registers is not zero. + */ +static int skl_wm_add_affected_planes(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct intel_plane *plane; + + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + struct intel_plane_state *plane_state; + enum plane_id plane_id = plane->id; + + /* + * Force a full wm update for every plane on modeset. + * Required because the reset value of the wm registers + * is non-zero, whereas we want all disabled planes to + * have zero watermarks. So if we turn off the relevant + * power well the hardware state will go out of sync + * with the software state. + */ + if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) && + skl_plane_wm_equals(dev_priv, + &old_crtc_state->wm.skl.optimal.planes[plane_id], + &new_crtc_state->wm.skl.optimal.planes[plane_id])) + continue; + + plane_state = intel_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + + new_crtc_state->update_planes |= BIT(plane_id); + } + + return 0; +} + static int skl_compute_wm(struct drm_atomic_state *state) { @@ -5349,8 +5560,12 @@ skl_compute_wm(struct drm_atomic_state *state) &to_intel_crtc_state(crtc->state)->wm.skl.optimal; pipe_wm = &intel_cstate->wm.skl.optimal; - ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, - &results->ddb, &changed); + ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed); + if (ret) + return ret; + + ret = skl_wm_add_affected_planes(intel_state, + to_intel_crtc(crtc)); if (ret) return ret; @@ -5364,7 +5579,7 @@ skl_compute_wm(struct drm_atomic_state *state) intel_cstate->update_wm_pre = true; } - skl_print_wm_changes(state); + skl_print_wm_changes(intel_state); return 0; } @@ -5375,23 +5590,12 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc); struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; - const struct skl_ddb_allocation *ddb = &state->wm_results.ddb; enum pipe pipe = crtc->pipe; - enum plane_id plane_id; if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base))) return; I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime); - - for_each_plane_id_on_crtc(crtc, plane_id) { - if (plane_id != PLANE_CURSOR) - skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id], - ddb, plane_id); - else - skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id], - ddb); - } } static void skl_initial_wm(struct intel_atomic_state *state, @@ -5401,8 +5605,6 @@ static void skl_initial_wm(struct intel_atomic_state *state, struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct skl_ddb_values *results = &state->wm_results; - struct skl_ddb_values *hw_vals = &dev_priv->wm.skl_hw; - enum pipe pipe = intel_crtc->pipe; if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0) return; @@ -5412,11 +5614,6 @@ static void skl_initial_wm(struct intel_atomic_state *state, if (cstate->base.active_changed) skl_atomic_update_crtc_wm(state, cstate); - memcpy(hw_vals->ddb.uv_plane[pipe], results->ddb.uv_plane[pipe], - sizeof(hw_vals->ddb.uv_plane[pipe])); - memcpy(hw_vals->ddb.plane[pipe], results->ddb.plane[pipe], - sizeof(hw_vals->ddb.plane[pipe])); - mutex_unlock(&dev_priv->wm.wm_mutex); } @@ -5567,13 +5764,6 @@ void skl_wm_get_hw_state(struct drm_device *dev) if (dev_priv->active_crtcs) { /* Fully recompute DDB on first atomic commit */ dev_priv->wm.distrust_bios_wm = true; - } else { - /* - * Easy/common case; just sanitize DDB now if everything off - * Keep dbuf slice info intact - */ - memset(ddb->plane, 0, sizeof(ddb->plane)); - memset(ddb->uv_plane, 0, sizeof(ddb->uv_plane)); } } @@ -6117,14 +6307,8 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv) { u32 val; - /* Display WA #0477 WaDisableIPC: skl */ - if (IS_SKYLAKE(dev_priv)) - dev_priv->ipc_enabled = false; - - /* Display WA #1141: SKL:all KBL:all CFL */ - if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) && - !dev_priv->dram_info.symmetric_memory) - dev_priv->ipc_enabled = false; + if (!HAS_IPC(dev_priv)) + return; val = I915_READ(DISP_ARB_CTL2); @@ -6138,11 +6322,15 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv) void intel_init_ipc(struct drm_i915_private *dev_priv) { - dev_priv->ipc_enabled = false; if (!HAS_IPC(dev_priv)) return; - dev_priv->ipc_enabled = true; + /* Display WA #1141: SKL:all KBL:all CFL */ + if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) + dev_priv->ipc_enabled = dev_priv->dram_info.symmetric_memory; + else + dev_priv->ipc_enabled = true; + intel_enable_ipc(dev_priv); } @@ -8736,6 +8924,10 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) /* This is not an Wa. Enable to reduce Sampler power */ I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN, I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE); + + /* WaEnable32PlaneMode:icl */ + I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, + _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE)); } static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) @@ -9313,8 +9505,6 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) /* Set up chip specific power management-related functions */ void intel_init_pm(struct drm_i915_private *dev_priv) { - intel_fbc_init(dev_priv); - /* For cxsr */ if (IS_PINEVIEW(dev_priv)) i915_pineview_get_mem_freq(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index b6838b525502..419e56342523 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -71,6 +71,14 @@ static bool psr_global_enabled(u32 debug) static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, const struct intel_crtc_state *crtc_state) { + /* Disable PSR2 by default for all platforms */ + if (i915_modparams.enable_psr == -1) + return false; + + /* Cannot enable DSC and PSR2 simultaneously */ + WARN_ON(crtc_state->dsc_params.compression_enable && + crtc_state->has_psr2); + switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { case I915_PSR_DEBUG_FORCE_PSR1: return false; @@ -79,25 +87,42 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, } } +static int edp_psr_shift(enum transcoder cpu_transcoder) +{ + switch (cpu_transcoder) { + case TRANSCODER_A: + return EDP_PSR_TRANSCODER_A_SHIFT; + case TRANSCODER_B: + return EDP_PSR_TRANSCODER_B_SHIFT; + case TRANSCODER_C: + return EDP_PSR_TRANSCODER_C_SHIFT; + default: + MISSING_CASE(cpu_transcoder); + /* fallthrough */ + case TRANSCODER_EDP: + return EDP_PSR_TRANSCODER_EDP_SHIFT; + } +} + void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug) { u32 debug_mask, mask; + enum transcoder cpu_transcoder; + u32 transcoders = BIT(TRANSCODER_EDP); + + if (INTEL_GEN(dev_priv) >= 8) + transcoders |= BIT(TRANSCODER_A) | + BIT(TRANSCODER_B) | + BIT(TRANSCODER_C); + + debug_mask = 0; + mask = 0; + for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { + int shift = edp_psr_shift(cpu_transcoder); - mask = EDP_PSR_ERROR(TRANSCODER_EDP); - debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) | - EDP_PSR_PRE_ENTRY(TRANSCODER_EDP); - - if (INTEL_GEN(dev_priv) >= 8) { - mask |= EDP_PSR_ERROR(TRANSCODER_A) | - EDP_PSR_ERROR(TRANSCODER_B) | - EDP_PSR_ERROR(TRANSCODER_C); - - debug_mask |= EDP_PSR_POST_EXIT(TRANSCODER_A) | - EDP_PSR_PRE_ENTRY(TRANSCODER_A) | - EDP_PSR_POST_EXIT(TRANSCODER_B) | - EDP_PSR_PRE_ENTRY(TRANSCODER_B) | - EDP_PSR_POST_EXIT(TRANSCODER_C) | - EDP_PSR_PRE_ENTRY(TRANSCODER_C); + mask |= EDP_PSR_ERROR(shift); + debug_mask |= EDP_PSR_POST_EXIT(shift) | + EDP_PSR_PRE_ENTRY(shift); } if (debug & I915_PSR_DEBUG_IRQ) @@ -148,6 +173,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) u32 transcoders = BIT(TRANSCODER_EDP); enum transcoder cpu_transcoder; ktime_t time_ns = ktime_get(); + u32 mask = 0; if (INTEL_GEN(dev_priv) >= 8) transcoders |= BIT(TRANSCODER_A) | @@ -155,18 +181,32 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) BIT(TRANSCODER_C); for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { - /* FIXME: Exit PSR and link train manually when this happens. */ - if (psr_iir & EDP_PSR_ERROR(cpu_transcoder)) - DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n", - transcoder_name(cpu_transcoder)); + int shift = edp_psr_shift(cpu_transcoder); + + if (psr_iir & EDP_PSR_ERROR(shift)) { + DRM_WARN("[transcoder %s] PSR aux error\n", + transcoder_name(cpu_transcoder)); + + dev_priv->psr.irq_aux_error = true; - if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) { + /* + * If this interruption is not masked it will keep + * interrupting so fast that it prevents the scheduled + * work to run. + * Also after a PSR error, we don't want to arm PSR + * again so we don't care about unmask the interruption + * or unset irq_aux_error. + */ + mask |= EDP_PSR_ERROR(shift); + } + + if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) { dev_priv->psr.last_entry_attempt = time_ns; DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n", transcoder_name(cpu_transcoder)); } - if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) { + if (psr_iir & EDP_PSR_POST_EXIT(shift)) { dev_priv->psr.last_exit = time_ns; DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n", transcoder_name(cpu_transcoder)); @@ -180,6 +220,13 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) } } } + + if (mask) { + mask |= I915_READ(EDP_PSR_IMR); + I915_WRITE(EDP_PSR_IMR, mask); + + schedule_work(&dev_priv->psr.work); + } } static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) @@ -294,7 +341,8 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp, psr_vsc.sdp_header.HB3 = 0x8; } - intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state, + intel_dig_port->write_infoframe(&intel_dig_port->base, + crtc_state, DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc)); } @@ -458,6 +506,16 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, if (!dev_priv->psr.sink_psr2_support) return false; + /* + * DSC and PSR2 cannot be enabled simultaneously. If a requested + * resolution requires DSC to be enabled, priority is given to DSC + * over PSR2. + */ + if (crtc_state->dsc_params.compression_enable) { + DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n"); + return false; + } + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { psr_max_h = 4096; psr_max_v = 2304; @@ -503,10 +561,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, return; } - if (IS_HASWELL(dev_priv) && - I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) & - S3D_ENABLE) { - DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); + if (dev_priv->psr.sink_not_reliable) { + DRM_DEBUG_KMS("PSR sink implementation is not reliable\n"); return; } @@ -553,11 +609,31 @@ static void intel_psr_activate(struct intel_dp *intel_dp) dev_priv->psr.active = true; } +static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + static const i915_reg_t regs[] = { + [TRANSCODER_A] = CHICKEN_TRANS_A, + [TRANSCODER_B] = CHICKEN_TRANS_B, + [TRANSCODER_C] = CHICKEN_TRANS_C, + [TRANSCODER_EDP] = CHICKEN_TRANS_EDP, + }; + + WARN_ON(INTEL_GEN(dev_priv) < 9); + + if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) || + !regs[cpu_transcoder].reg)) + cpu_transcoder = TRANSCODER_A; + + return regs[cpu_transcoder]; +} + static void intel_psr_enable_source(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 mask; /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ * use hardcoded values PSR AUX transactions @@ -566,37 +642,34 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, hsw_psr_setup_aux(intel_dp); if (dev_priv->psr.psr2_enabled) { - u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder)); + i915_reg_t reg = gen9_chicken_trans_reg(dev_priv, + cpu_transcoder); + u32 chicken = I915_READ(reg); - if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) + if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) chicken |= (PSR2_VSC_ENABLE_PROG_HEADER | PSR2_ADD_VERTICAL_LINE_COUNT); else chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL; - I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken); - - I915_WRITE(EDP_PSR_DEBUG, - EDP_PSR_DEBUG_MASK_MEMUP | - EDP_PSR_DEBUG_MASK_HPD | - EDP_PSR_DEBUG_MASK_LPSP | - EDP_PSR_DEBUG_MASK_MAX_SLEEP | - EDP_PSR_DEBUG_MASK_DISP_REG_WRITE); - } else { - /* - * Per Spec: Avoid continuous PSR exit by masking MEMUP - * and HPD. also mask LPSP to avoid dependency on other - * drivers that might block runtime_pm besides - * preventing other hw tracking issues now we can rely - * on frontbuffer tracking. - */ - I915_WRITE(EDP_PSR_DEBUG, - EDP_PSR_DEBUG_MASK_MEMUP | - EDP_PSR_DEBUG_MASK_HPD | - EDP_PSR_DEBUG_MASK_LPSP | - EDP_PSR_DEBUG_MASK_DISP_REG_WRITE | - EDP_PSR_DEBUG_MASK_MAX_SLEEP); + I915_WRITE(reg, chicken); } + + /* + * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also + * mask LPSP to avoid dependency on other drivers that might block + * runtime_pm besides preventing other hw tracking issues now we + * can rely on frontbuffer tracking. + */ + mask = EDP_PSR_DEBUG_MASK_MEMUP | + EDP_PSR_DEBUG_MASK_HPD | + EDP_PSR_DEBUG_MASK_LPSP | + EDP_PSR_DEBUG_MASK_MAX_SLEEP; + + if (INTEL_GEN(dev_priv) < 11) + mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; + + I915_WRITE(EDP_PSR_DEBUG, mask); } static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, @@ -646,6 +719,7 @@ void intel_psr_enable(struct intel_dp *intel_dp, dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); dev_priv->psr.busy_frontbuffer_bits = 0; dev_priv->psr.prepared = true; + dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; if (psr_global_enabled(dev_priv->psr.debug)) intel_psr_enable_locked(dev_priv, crtc_state); @@ -656,49 +730,34 @@ unlock: mutex_unlock(&dev_priv->psr.lock); } -static void -intel_psr_disable_source(struct intel_dp *intel_dp) +static void intel_psr_exit(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - - if (dev_priv->psr.active) { - i915_reg_t psr_status; - u32 psr_status_mask; - - if (dev_priv->psr.psr2_enabled) { - psr_status = EDP_PSR2_STATUS; - psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; - - I915_WRITE(EDP_PSR2_CTL, - I915_READ(EDP_PSR2_CTL) & - ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE)); - - } else { - psr_status = EDP_PSR_STATUS; - psr_status_mask = EDP_PSR_STATUS_STATE_MASK; - - I915_WRITE(EDP_PSR_CTL, - I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); - } + u32 val; - /* Wait till PSR is idle */ - if (intel_wait_for_register(dev_priv, - psr_status, psr_status_mask, 0, - 2000)) - DRM_ERROR("Timed out waiting for PSR Idle State\n"); + if (!dev_priv->psr.active) { + if (INTEL_GEN(dev_priv) >= 9) + WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); + WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); + return; + } - dev_priv->psr.active = false; + if (dev_priv->psr.psr2_enabled) { + val = I915_READ(EDP_PSR2_CTL); + WARN_ON(!(val & EDP_PSR2_ENABLE)); + I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); } else { - if (dev_priv->psr.psr2_enabled) - WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); - else - WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); + val = I915_READ(EDP_PSR_CTL); + WARN_ON(!(val & EDP_PSR_ENABLE)); + I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); } + dev_priv->psr.active = false; } static void intel_psr_disable_locked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + i915_reg_t psr_status; + u32 psr_status_mask; lockdep_assert_held(&dev_priv->psr.lock); @@ -707,7 +766,21 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) DRM_DEBUG_KMS("Disabling PSR%s\n", dev_priv->psr.psr2_enabled ? "2" : "1"); - intel_psr_disable_source(intel_dp); + + intel_psr_exit(dev_priv); + + if (dev_priv->psr.psr2_enabled) { + psr_status = EDP_PSR2_STATUS; + psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; + } else { + psr_status = EDP_PSR_STATUS; + psr_status_mask = EDP_PSR_STATUS_STATE_MASK; + } + + /* Wait till PSR is idle */ + if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0, + 2000)) + DRM_ERROR("Timed out waiting PSR idle state\n"); /* Disable PSR on Sink */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); @@ -893,6 +966,16 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, return ret; } +static void intel_psr_handle_irq(struct drm_i915_private *dev_priv) +{ + struct i915_psr *psr = &dev_priv->psr; + + intel_psr_disable_locked(psr->dp); + psr->sink_not_reliable = true; + /* let's make sure that sink is awaken */ + drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0); +} + static void intel_psr_work(struct work_struct *work) { struct drm_i915_private *dev_priv = @@ -903,6 +986,9 @@ static void intel_psr_work(struct work_struct *work) if (!dev_priv->psr.enabled) goto unlock; + if (READ_ONCE(dev_priv->psr.irq_aux_error)) + intel_psr_handle_irq(dev_priv); + /* * We have to make sure PSR is ready for re-enable * otherwise it keeps disabled until next full enable/disable cycle. @@ -925,25 +1011,6 @@ unlock: mutex_unlock(&dev_priv->psr.lock); } -static void intel_psr_exit(struct drm_i915_private *dev_priv) -{ - u32 val; - - if (!dev_priv->psr.active) - return; - - if (dev_priv->psr.psr2_enabled) { - val = I915_READ(EDP_PSR2_CTL); - WARN_ON(!(val & EDP_PSR2_ENABLE)); - I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); - } else { - val = I915_READ(EDP_PSR_CTL); - WARN_ON(!(val & EDP_PSR_ENABLE)); - I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); - } - dev_priv->psr.active = false; -} - /** * intel_psr_invalidate - Invalidade PSR * @dev_priv: i915 device @@ -960,9 +1027,6 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv) void intel_psr_invalidate(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, enum fb_op_origin origin) { - struct drm_crtc *crtc; - enum pipe pipe; - if (!CAN_PSR(dev_priv)) return; @@ -975,10 +1039,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv, return; } - crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc; - pipe = to_intel_crtc(crtc)->pipe; - - frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); + frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; if (frontbuffer_bits) @@ -1003,9 +1064,6 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv, void intel_psr_flush(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, enum fb_op_origin origin) { - struct drm_crtc *crtc; - enum pipe pipe; - if (!CAN_PSR(dev_priv)) return; @@ -1018,28 +1076,21 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, return; } - crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc; - pipe = to_intel_crtc(crtc)->pipe; - - frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); + frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe); dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; /* By definition flush = invalidate + flush */ if (frontbuffer_bits) { - if (dev_priv->psr.psr2_enabled) { - intel_psr_exit(dev_priv); - } else { - /* - * Display WA #0884: all - * This documented WA for bxt can be safely applied - * broadly so we can force HW tracking to exit PSR - * instead of disabling and re-enabling. - * Workaround tells us to write 0 to CUR_SURFLIVE_A, - * but it makes more sense write to the current active - * pipe. - */ - I915_WRITE(CURSURFLIVE(pipe), 0); - } + /* + * Display WA #0884: all + * This documented WA for bxt can be safely applied + * broadly so we can force HW tracking to exit PSR + * instead of disabling and re-enabling. + * Workaround tells us to write 0 to CUR_SURFLIVE_A, + * but it makes more sense write to the current active + * pipe. + */ + I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0); } if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) @@ -1056,6 +1107,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, */ void intel_psr_init(struct drm_i915_private *dev_priv) { + u32 val; + if (!HAS_PSR(dev_priv)) return; @@ -1065,11 +1118,24 @@ void intel_psr_init(struct drm_i915_private *dev_priv) if (!dev_priv->psr.sink_support) return; - if (i915_modparams.enable_psr == -1) { - i915_modparams.enable_psr = dev_priv->vbt.psr.enable; + if (i915_modparams.enable_psr == -1) + if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable) + i915_modparams.enable_psr = 0; - /* Per platform default: all disabled. */ - i915_modparams.enable_psr = 0; + /* + * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR + * will still keep the error set even after the reset done in the + * irq_preinstall and irq_uninstall hooks. + * And enabling in this situation cause the screen to freeze in the + * first time that PSR HW tries to activate so lets keep PSR disabled + * to avoid any rendering problems. + */ + val = I915_READ(EDP_PSR_IIR); + val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP)); + if (val) { + DRM_DEBUG_KMS("PSR interruption error set\n"); + dev_priv->psr.sink_not_reliable = true; + return; } /* Set link_standby x link_off defaults */ @@ -1109,6 +1175,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp) if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) { DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n"); intel_psr_disable_locked(intel_dp); + psr->sink_not_reliable = true; } if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) { @@ -1126,12 +1193,27 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp) if (val & ~errors) DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n", val & ~errors); - if (val & errors) + if (val & errors) { intel_psr_disable_locked(intel_dp); + psr->sink_not_reliable = true; + } /* clear status register */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val); - - /* TODO: handle PSR2 errors */ exit: mutex_unlock(&psr->lock); } + +bool intel_psr_enabled(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + bool ret; + + if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) + return false; + + mutex_lock(&dev_priv->psr.lock); + ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled); + mutex_unlock(&dev_priv->psr.lock); + + return ret; +} diff --git a/drivers/gpu/drm/i915/intel_quirks.c b/drivers/gpu/drm/i915/intel_quirks.c new file mode 100644 index 000000000000..ec2b0fc92b8b --- /dev/null +++ b/drivers/gpu/drm/i915/intel_quirks.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2018 Intel Corporation + */ + +#include <linux/dmi.h> + +#include "intel_drv.h" + +/* + * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason + */ +static void quirk_ssc_force_disable(struct drm_i915_private *i915) +{ + i915->quirks |= QUIRK_LVDS_SSC_DISABLE; + DRM_INFO("applying lvds SSC disable quirk\n"); +} + +/* + * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight + * brightness value + */ +static void quirk_invert_brightness(struct drm_i915_private *i915) +{ + i915->quirks |= QUIRK_INVERT_BRIGHTNESS; + DRM_INFO("applying inverted panel brightness quirk\n"); +} + +/* Some VBT's incorrectly indicate no backlight is present */ +static void quirk_backlight_present(struct drm_i915_private *i915) +{ + i915->quirks |= QUIRK_BACKLIGHT_PRESENT; + DRM_INFO("applying backlight present quirk\n"); +} + +/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms + * which is 300 ms greater than eDP spec T12 min. + */ +static void quirk_increase_t12_delay(struct drm_i915_private *i915) +{ + i915->quirks |= QUIRK_INCREASE_T12_DELAY; + DRM_INFO("Applying T12 delay quirk\n"); +} + +/* + * GeminiLake NUC HDMI outputs require additional off time + * this allows the onboard retimer to correctly sync to signal + */ +static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915) +{ + i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME; + DRM_INFO("Applying Increase DDI Disabled quirk\n"); +} + +struct intel_quirk { + int device; + int subsystem_vendor; + int subsystem_device; + void (*hook)(struct drm_i915_private *i915); +}; + +/* For systems that don't have a meaningful PCI subdevice/subvendor ID */ +struct intel_dmi_quirk { + void (*hook)(struct drm_i915_private *i915); + const struct dmi_system_id (*dmi_id_list)[]; +}; + +static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) +{ + DRM_INFO("Backlight polarity reversed on %s\n", id->ident); + return 1; +} + +static const struct intel_dmi_quirk intel_dmi_quirks[] = { + { + .dmi_id_list = &(const struct dmi_system_id[]) { + { + .callback = intel_dmi_reverse_brightness, + .ident = "NCR Corporation", + .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, ""), + }, + }, + { } /* terminating entry */ + }, + .hook = quirk_invert_brightness, + }, +}; + +static struct intel_quirk intel_quirks[] = { + /* Lenovo U160 cannot use SSC on LVDS */ + { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, + + /* Sony Vaio Y cannot use SSC on LVDS */ + { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, + + /* Acer Aspire 5734Z must invert backlight brightness */ + { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, + + /* Acer/eMachines G725 */ + { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, + + /* Acer/eMachines e725 */ + { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, + + /* Acer/Packard Bell NCL20 */ + { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, + + /* Acer Aspire 4736Z */ + { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, + + /* Acer Aspire 5336 */ + { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, + + /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ + { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, + + /* Acer C720 Chromebook (Core i3 4005U) */ + { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, + + /* Apple Macbook 2,1 (Core 2 T7400) */ + { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, + + /* Apple Macbook 4,1 */ + { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present }, + + /* Toshiba CB35 Chromebook (Celeron 2955U) */ + { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, + + /* HP Chromebook 14 (Celeron 2955U) */ + { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, + + /* Dell Chromebook 11 */ + { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, + + /* Dell Chromebook 11 (2015 version) */ + { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present }, + + /* Toshiba Satellite P50-C-18C */ + { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay }, + + /* GeminiLake NUC */ + { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time }, + { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time }, + /* ASRock ITX*/ + { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, + { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, +}; + +void intel_init_quirks(struct drm_i915_private *i915) +{ + struct pci_dev *d = i915->drm.pdev; + int i; + + for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { + struct intel_quirk *q = &intel_quirks[i]; + + if (d->device == q->device && + (d->subsystem_vendor == q->subsystem_vendor || + q->subsystem_vendor == PCI_ANY_ID) && + (d->subsystem_device == q->subsystem_device || + q->subsystem_device == PCI_ANY_ID)) + q->hook(i915); + } + for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { + if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) + intel_dmi_quirks[i].hook(i915); + } +} diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index d0ef50bf930a..fbeaec3994e7 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -69,19 +69,28 @@ unsigned int intel_ring_update_space(struct intel_ring *ring) static int gen2_render_ring_flush(struct i915_request *rq, u32 mode) { + unsigned int num_store_dw; u32 cmd, *cs; cmd = MI_FLUSH; - + num_store_dw = 0; if (mode & EMIT_INVALIDATE) cmd |= MI_READ_FLUSH; + if (mode & EMIT_FLUSH) + num_store_dw = 4; - cs = intel_ring_begin(rq, 2); + cs = intel_ring_begin(rq, 2 + 3 * num_store_dw); if (IS_ERR(cs)) return PTR_ERR(cs); *cs++ = cmd; - *cs++ = MI_NOOP; + while (num_store_dw--) { + *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; + *cs++ = i915_scratch_offset(rq->i915); + *cs++ = 0; + } + *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH; + intel_ring_advance(rq, cs); return 0; @@ -91,6 +100,7 @@ static int gen4_render_ring_flush(struct i915_request *rq, u32 mode) { u32 cmd, *cs; + int i; /* * read/write caches: @@ -127,12 +137,43 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) cmd |= MI_INVALIDATE_ISP; } - cs = intel_ring_begin(rq, 2); + i = 2; + if (mode & EMIT_INVALIDATE) + i += 20; + + cs = intel_ring_begin(rq, i); if (IS_ERR(cs)) return PTR_ERR(cs); *cs++ = cmd; - *cs++ = MI_NOOP; + + /* + * A random delay to let the CS invalidate take effect? Without this + * delay, the GPU relocation path fails as the CS does not see + * the updated contents. Just as important, if we apply the flushes + * to the EMIT_FLUSH branch (i.e. immediately after the relocation + * write and before the invalidate on the next batch), the relocations + * still fail. This implies that is a delay following invalidation + * that is required to reset the caches as opposed to a delay to + * ensure the memory is written. + */ + if (mode & EMIT_INVALIDATE) { + *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; + *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT; + *cs++ = 0; + *cs++ = 0; + + for (i = 0; i < 12; i++) + *cs++ = MI_FLUSH; + + *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; + *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT; + *cs++ = 0; + *cs++ = 0; + } + + *cs++ = cmd; + intel_ring_advance(rq, cs); return 0; @@ -178,8 +219,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) static int intel_emit_post_sync_nonzero_flush(struct i915_request *rq) { - u32 scratch_addr = - i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES; + u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; u32 *cs; cs = intel_ring_begin(rq, 6); @@ -212,8 +252,7 @@ intel_emit_post_sync_nonzero_flush(struct i915_request *rq) static int gen6_render_ring_flush(struct i915_request *rq, u32 mode) { - u32 scratch_addr = - i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES; + u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; u32 *cs, flags = 0; int ret; @@ -282,8 +321,7 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq) static int gen7_render_ring_flush(struct i915_request *rq, u32 mode) { - u32 scratch_addr = - i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES; + u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; u32 *cs, flags = 0; /* @@ -495,6 +533,13 @@ static int init_ring_common(struct intel_engine_cs *engine) intel_engine_reset_breadcrumbs(engine); + if (HAS_LEGACY_SEMAPHORES(engine->i915)) { + I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); + I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); + if (HAS_VEBOX(dev_priv)) + I915_WRITE(RING_SYNC_2(engine->mmio_base), 0); + } + /* Enforce ordering by reading HEAD register back */ I915_READ_HEAD(engine); @@ -512,10 +557,11 @@ static int init_ring_common(struct intel_engine_cs *engine) /* Check that the ring offsets point within the ring! */ GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); - intel_ring_update_space(ring); + + /* First wake the ring up to an empty/idle ring */ I915_WRITE_HEAD(engine, ring->head); - I915_WRITE_TAIL(engine, ring->tail); + I915_WRITE_TAIL(engine, ring->head); (void)I915_READ_TAIL(engine); I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID); @@ -540,6 +586,12 @@ static int init_ring_common(struct intel_engine_cs *engine) if (INTEL_GEN(dev_priv) > 2) I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); + /* Now awake, let it get started */ + if (ring->tail != ring->head) { + I915_WRITE_TAIL(engine, ring->tail); + (void)I915_READ_TAIL(engine); + } + /* Papering over lost _interrupts_ immediately following the restart */ intel_engine_wakeup(engine); out: @@ -574,7 +626,9 @@ static void skip_request(struct i915_request *rq) static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq) { - GEM_TRACE("%s seqno=%x\n", engine->name, rq ? rq->global_seqno : 0); + GEM_TRACE("%s request global=%d, current=%d\n", + engine->name, rq ? rq->global_seqno : 0, + intel_engine_get_seqno(engine)); /* * Try to restore the logical GPU state to match the continuation @@ -606,7 +660,7 @@ static int intel_rcs_ctx_init(struct i915_request *rq) { int ret; - ret = intel_ctx_workarounds_emit(rq); + ret = intel_engine_emit_ctx_wa(rq); if (ret != 0) return ret; @@ -624,8 +678,6 @@ static int init_render_ring(struct intel_engine_cs *engine) if (ret) return ret; - intel_whitelist_workarounds_apply(engine); - /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ if (IS_GEN(dev_priv, 4, 6)) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); @@ -707,9 +759,18 @@ static void cancel_requests(struct intel_engine_cs *engine) /* Mark all submitted requests as skipped. */ list_for_each_entry(request, &engine->timeline.requests, link) { GEM_BUG_ON(!request->global_seqno); - if (!i915_request_completed(request)) - dma_fence_set_error(&request->fence, -EIO); + + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &request->fence.flags)) + continue; + + dma_fence_set_error(&request->fence, -EIO); } + + intel_write_status_page(engine, + I915_GEM_HWS_INDEX, + intel_engine_last_submit(engine)); + /* Remaining _unready_ requests will be nop'ed when submitted */ spin_unlock_irqrestore(&engine->timeline.lock, flags); @@ -937,7 +998,7 @@ i965_emit_bb_start(struct i915_request *rq, } /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ -#define I830_BATCH_LIMIT (256*1024) +#define I830_BATCH_LIMIT SZ_256K #define I830_TLB_ENTRIES (2) #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) static int @@ -945,7 +1006,9 @@ i830_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags) { - u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch); + u32 *cs, cs_offset = i915_scratch_offset(rq->i915); + + GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE); cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) @@ -1021,8 +1084,7 @@ i915_emit_bb_start(struct i915_request *rq, int intel_ring_pin(struct intel_ring *ring) { struct i915_vma *vma = ring->vma; - enum i915_map_type map = - HAS_LLC(vma->vm->i915) ? I915_MAP_WB : I915_MAP_WC; + enum i915_map_type map = i915_coherent_map_type(vma->vm->i915); unsigned int flags; void *addr; int ret; @@ -1403,7 +1465,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) { struct i915_timeline *timeline; struct intel_ring *ring; - unsigned int size; int err; intel_engine_setup_common(engine); @@ -1428,21 +1489,12 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) GEM_BUG_ON(engine->buffer); engine->buffer = ring; - size = PAGE_SIZE; - if (HAS_BROKEN_CS_TLB(engine->i915)) - size = I830_WA_SIZE; - err = intel_engine_create_scratch(engine, size); - if (err) - goto err_unpin; - err = intel_engine_init_common(engine); if (err) - goto err_scratch; + goto err_unpin; return 0; -err_scratch: - intel_engine_cleanup_scratch(engine); err_unpin: intel_ring_unpin(ring); err_ring: @@ -1516,7 +1568,7 @@ static int flush_pd_dir(struct i915_request *rq) /* Stall until the page table load is complete */ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); - *cs++ = i915_ggtt_offset(engine->scratch); + *cs++ = i915_scratch_offset(rq->i915); *cs++ = MI_NOOP; intel_ring_advance(rq, cs); @@ -1625,7 +1677,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) /* Insert a delay before the next switch! */ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; *cs++ = i915_mmio_reg_offset(last_reg); - *cs++ = i915_ggtt_offset(engine->scratch); + *cs++ = i915_scratch_offset(rq->i915); *cs++ = MI_NOOP; } *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 2dfa585712c2..72edaa7ff411 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: MIT */ #ifndef _INTEL_RINGBUFFER_H_ #define _INTEL_RINGBUFFER_H_ @@ -15,6 +15,7 @@ #include "i915_selftest.h" #include "i915_timeline.h" #include "intel_gpu_commands.h" +#include "intel_workarounds.h" struct drm_printer; struct i915_sched_attr; @@ -93,11 +94,11 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a) #define I915_MAX_SUBSLICES 8 #define instdone_slice_mask(dev_priv__) \ - (INTEL_GEN(dev_priv__) == 7 ? \ + (IS_GEN7(dev_priv__) ? \ 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask) #define instdone_subslice_mask(dev_priv__) \ - (INTEL_GEN(dev_priv__) == 7 ? \ + (IS_GEN7(dev_priv__) ? \ 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0]) #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ @@ -190,11 +191,22 @@ enum intel_engine_id { }; struct i915_priolist { + struct list_head requests[I915_PRIORITY_COUNT]; struct rb_node node; - struct list_head requests; + unsigned long used; int priority; }; +#define priolist_for_each_request(it, plist, idx) \ + for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \ + list_for_each_entry(it, &(plist)->requests[idx], sched.link) + +#define priolist_for_each_request_consume(it, n, plist, idx) \ + for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \ + list_for_each_entry_safe(it, n, \ + &(plist)->requests[idx - 1], \ + sched.link) + struct st_preempt_hang { struct completion completion; bool inject_hang; @@ -302,13 +314,6 @@ struct intel_engine_execlists { struct rb_root_cached queue; /** - * @csb_read: control register for Context Switch buffer - * - * Note this register is always in mmio. - */ - u32 __iomem *csb_read; - - /** * @csb_write: control register for Context Switch buffer * * Note this register may be either mmio or HWSP shadow. @@ -328,15 +333,6 @@ struct intel_engine_execlists { u32 preempt_complete_status; /** - * @csb_write_reset: reset value for CSB write pointer - * - * As the CSB write pointer maybe either in HWSP or as a field - * inside an mmio register, we want to reprogram it slightly - * differently to avoid later confusion. - */ - u32 csb_write_reset; - - /** * @csb_head: context status buffer head */ u8 csb_head; @@ -440,7 +436,9 @@ struct intel_engine_cs { struct intel_hw_status_page status_page; struct i915_ctx_workarounds wa_ctx; - struct i915_vma *scratch; + struct i915_wa_list ctx_wa_list; + struct i915_wa_list wa_list; + struct i915_wa_list whitelist; u32 irq_keep_mask; /* always keep these interrupts */ u32 irq_enable_mask; /* bitmask to enable ring interrupt */ @@ -487,11 +485,10 @@ struct intel_engine_cs { */ void (*submit_request)(struct i915_request *rq); - /* Call when the priority on a request has changed and it and its + /* + * Call when the priority on a request has changed and it and its * dependencies may need rescheduling. Note the request itself may * not be ready to run! - * - * Called under the struct_mutex. */ void (*schedule)(struct i915_request *request, const struct i915_sched_attr *attr); @@ -898,10 +895,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine); int intel_engine_init_common(struct intel_engine_cs *engine); void intel_engine_cleanup_common(struct intel_engine_cs *engine); -int intel_engine_create_scratch(struct intel_engine_cs *engine, - unsigned int size); -void intel_engine_cleanup_scratch(struct intel_engine_cs *engine); - int intel_init_render_ring_buffer(struct intel_engine_cs *engine); int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine); int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 0fdabce647ab..4350a5270423 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -76,6 +76,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "TRANSCODER_C"; case POWER_DOMAIN_TRANSCODER_EDP: return "TRANSCODER_EDP"; + case POWER_DOMAIN_TRANSCODER_EDP_VDSC: + return "TRANSCODER_EDP_VDSC"; case POWER_DOMAIN_TRANSCODER_DSI_A: return "TRANSCODER_DSI_A"; case POWER_DOMAIN_TRANSCODER_DSI_C: @@ -208,7 +210,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, is_enabled = true; - for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) { + for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) { if (power_well->desc->always_on) continue; @@ -436,6 +438,15 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX); hsw_wait_for_power_well_enable(dev_priv, power_well); + + /* Display WA #1178: icl */ + if (IS_ICELAKE(dev_priv) && + pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && + !intel_bios_is_port_edp(dev_priv, port)) { + val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx)); + val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; + I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val); + } } static void @@ -456,6 +467,25 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, hsw_wait_for_power_well_disable(dev_priv, power_well); } +#define ICL_AUX_PW_TO_CH(pw_idx) \ + ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) + +static void +icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx); + u32 val; + + val = I915_READ(DP_AUX_CH_CTL(aux_ch)); + val &= ~DP_AUX_CH_CTL_TBT_IO; + if (power_well->desc->hsw.is_tc_tbt) + val |= DP_AUX_CH_CTL_TBT_IO; + I915_WRITE(DP_AUX_CH_CTL(aux_ch), val); + + hsw_power_well_enable(dev_priv, power_well); +} + /* * We should only use the power well if we explicitly asked the hardware to * enable it, so check if it's enabled and also check if we've requested it to @@ -465,11 +495,25 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + enum i915_power_well_id id = power_well->desc->id; int pw_idx = power_well->desc->hsw.idx; u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | HSW_PWR_WELL_CTL_STATE(pw_idx); + u32 val; - return (I915_READ(regs->driver) & mask) == mask; + val = I915_READ(regs->driver); + + /* + * On GEN9 big core due to a DMC bug the driver's request bits for PW1 + * and the MISC_IO PW will be not restored, so check instead for the + * BIOS's own request bits, which are forced-on for these power wells + * when exiting DC5/6. + */ + if (IS_GEN9(dev_priv) && !IS_GEN9_LP(dev_priv) && + (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) + val |= I915_READ(regs->bios); + + return (val & mask) == mask; } static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) @@ -551,7 +595,9 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) u32 mask; mask = DC_STATE_EN_UPTO_DC5; - if (IS_GEN9_LP(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) + mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; + else if (IS_GEN9_LP(dev_priv)) mask |= DC_STATE_EN_DC9; else mask |= DC_STATE_EN_UPTO_DC6; @@ -624,8 +670,13 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv) assert_can_enable_dc9(dev_priv); DRM_DEBUG_KMS("Enabling DC9\n"); - - intel_power_sequencer_reset(dev_priv); + /* + * Power sequencer reset is not needed on + * platforms with South Display Engine on PCH, + * because PPS registers are always on. + */ + if (!HAS_PCH_SPLIT(dev_priv)) + intel_power_sequencer_reset(dev_priv); gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); } @@ -707,7 +758,7 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) assert_csr_loaded(dev_priv); } -static void skl_enable_dc6(struct drm_i915_private *dev_priv) +void skl_enable_dc6(struct drm_i915_private *dev_priv) { assert_can_enable_dc6(dev_priv); @@ -808,6 +859,14 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, if (IS_GEN9_LP(dev_priv)) bxt_verify_ddi_phy_power_wells(dev_priv); + + if (INTEL_GEN(dev_priv) >= 11) + /* + * DMC retains HW context only for port A, the other combo + * PHY's HW context for port B is lost after DC transitions, + * so we need to restore it manually. + */ + icl_combo_phys_init(dev_priv); } static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, @@ -1608,7 +1667,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, intel_display_power_domain_str(domain)); power_domains->domain_use_count[domain]--; - for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) + for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) intel_power_well_put(dev_priv, power_well); mutex_unlock(&power_domains->lock); @@ -1971,9 +2030,9 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, */ #define ICL_PW_2_POWER_DOMAINS ( \ ICL_PW_3_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \ BIT_ULL(POWER_DOMAIN_INIT)) /* - * - eDP/DSI VDSC * - KVMR (HW control) */ #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ @@ -2041,7 +2100,7 @@ static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { static const struct i915_power_well_desc i9xx_always_on_power_well[] = { { .name = "always-on", - .always_on = 1, + .always_on = true, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, .id = DISP_PW_ID_NONE, @@ -2058,7 +2117,7 @@ static const struct i915_power_well_ops i830_pipes_power_well_ops = { static const struct i915_power_well_desc i830_power_wells[] = { { .name = "always-on", - .always_on = 1, + .always_on = true, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, .id = DISP_PW_ID_NONE, @@ -2102,7 +2161,7 @@ static const struct i915_power_well_regs hsw_power_well_regs = { static const struct i915_power_well_desc hsw_power_wells[] = { { .name = "always-on", - .always_on = 1, + .always_on = true, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, .id = DISP_PW_ID_NONE, @@ -2123,7 +2182,7 @@ static const struct i915_power_well_desc hsw_power_wells[] = { static const struct i915_power_well_desc bdw_power_wells[] = { { .name = "always-on", - .always_on = 1, + .always_on = true, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, .id = DISP_PW_ID_NONE, @@ -2166,7 +2225,7 @@ static const struct i915_power_well_ops vlv_dpio_power_well_ops = { static const struct i915_power_well_desc vlv_power_wells[] = { { .name = "always-on", - .always_on = 1, + .always_on = true, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, .id = DISP_PW_ID_NONE, @@ -2242,7 +2301,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = { static const struct i915_power_well_desc chv_power_wells[] = { { .name = "always-on", - .always_on = 1, + .always_on = true, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, .id = DISP_PW_ID_NONE, @@ -2293,7 +2352,7 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, static const struct i915_power_well_desc skl_power_wells[] = { { .name = "always-on", - .always_on = 1, + .always_on = true, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, .id = DISP_PW_ID_NONE, @@ -2301,6 +2360,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { { .name = "power well 1", /* Handled by the DMC firmware */ + .always_on = true, .domains = 0, .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_1, @@ -2313,6 +2373,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { { .name = "MISC IO power well", /* Handled by the DMC firmware */ + .always_on = true, .domains = 0, .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_MISC_IO, @@ -2385,13 +2446,15 @@ static const struct i915_power_well_desc skl_power_wells[] = { static const struct i915_power_well_desc bxt_power_wells[] = { { .name = "always-on", - .always_on = 1, + .always_on = true, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, .id = DISP_PW_ID_NONE, }, { .name = "power well 1", + /* Handled by the DMC firmware */ + .always_on = true, .domains = 0, .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_1, @@ -2443,7 +2506,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = { static const struct i915_power_well_desc glk_power_wells[] = { { .name = "always-on", - .always_on = 1, + .always_on = true, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, .id = DISP_PW_ID_NONE, @@ -2451,6 +2514,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { { .name = "power well 1", /* Handled by the DMC firmware */ + .always_on = true, .domains = 0, .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_1, @@ -2571,7 +2635,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { static const struct i915_power_well_desc cnl_power_wells[] = { { .name = "always-on", - .always_on = 1, + .always_on = true, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, .id = DISP_PW_ID_NONE, @@ -2579,6 +2643,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = { { .name = "power well 1", /* Handled by the DMC firmware */ + .always_on = true, .domains = 0, .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_1, @@ -2716,6 +2781,13 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = { .is_enabled = hsw_power_well_enabled, }; +static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = { + .sync_hw = hsw_power_well_sync_hw, + .enable = icl_tc_phy_aux_power_well_enable, + .disable = hsw_power_well_disable, + .is_enabled = hsw_power_well_enabled, +}; + static const struct i915_power_well_regs icl_aux_power_well_regs = { .bios = ICL_PWR_WELL_CTL_AUX1, .driver = ICL_PWR_WELL_CTL_AUX2, @@ -2731,7 +2803,7 @@ static const struct i915_power_well_regs icl_ddi_power_well_regs = { static const struct i915_power_well_desc icl_power_wells[] = { { .name = "always-on", - .always_on = 1, + .always_on = true, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, .id = DISP_PW_ID_NONE, @@ -2739,6 +2811,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { { .name = "power well 1", /* Handled by the DMC firmware */ + .always_on = true, .domains = 0, .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_1, @@ -2749,6 +2822,12 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { + .name = "DC off", + .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { .name = "power well 2", .domains = ICL_PW_2_POWER_DOMAINS, .ops = &hsw_power_well_ops, @@ -2760,12 +2839,6 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { - .name = "DC off", - .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { .name = "power well 3", .domains = ICL_PW_3_POWER_DOMAINS, .ops = &hsw_power_well_ops, @@ -2861,81 +2934,89 @@ static const struct i915_power_well_desc icl_power_wells[] = { { .name = "AUX C", .domains = ICL_AUX_C_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_AUX_C, + .hsw.is_tc_tbt = false, }, }, { .name = "AUX D", .domains = ICL_AUX_D_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_AUX_D, + .hsw.is_tc_tbt = false, }, }, { .name = "AUX E", .domains = ICL_AUX_E_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_AUX_E, + .hsw.is_tc_tbt = false, }, }, { .name = "AUX F", .domains = ICL_AUX_F_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_AUX_F, + .hsw.is_tc_tbt = false, }, }, { .name = "AUX TBT1", .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, + .hsw.is_tc_tbt = true, }, }, { .name = "AUX TBT2", .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, + .hsw.is_tc_tbt = true, }, }, { .name = "AUX TBT3", .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, + .hsw.is_tc_tbt = true, }, }, { .name = "AUX TBT4", .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, + .hsw.is_tc_tbt = true, }, }, { @@ -2969,17 +3050,20 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, int requested_dc; int max_dc; - if (IS_GEN9_BC(dev_priv) || INTEL_INFO(dev_priv)->gen >= 10) { + if (INTEL_GEN(dev_priv) >= 11) { max_dc = 2; - mask = 0; - } else if (IS_GEN9_LP(dev_priv)) { - max_dc = 1; /* * DC9 has a separate HW flow from the rest of the DC states, * not depending on the DMC firmware. It's needed by system * suspend/resume, so allow it unconditionally. */ mask = DC_STATE_EN_DC9; + } else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) { + max_dc = 2; + mask = 0; + } else if (IS_GEN9_LP(dev_priv)) { + max_dc = 1; + mask = DC_STATE_EN_DC9; } else { max_dc = 0; mask = 0; @@ -3075,12 +3159,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) */ if (IS_ICELAKE(dev_priv)) { err = set_power_wells(power_domains, icl_power_wells); - } else if (IS_HASWELL(dev_priv)) { - err = set_power_wells(power_domains, hsw_power_wells); - } else if (IS_BROADWELL(dev_priv)) { - err = set_power_wells(power_domains, bdw_power_wells); - } else if (IS_GEN9_BC(dev_priv)) { - err = set_power_wells(power_domains, skl_power_wells); } else if (IS_CANNONLAKE(dev_priv)) { err = set_power_wells(power_domains, cnl_power_wells); @@ -3092,13 +3170,18 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) */ if (!IS_CNL_WITH_PORT_F(dev_priv)) power_domains->power_well_count -= 2; - - } else if (IS_BROXTON(dev_priv)) { - err = set_power_wells(power_domains, bxt_power_wells); } else if (IS_GEMINILAKE(dev_priv)) { err = set_power_wells(power_domains, glk_power_wells); + } else if (IS_BROXTON(dev_priv)) { + err = set_power_wells(power_domains, bxt_power_wells); + } else if (IS_GEN9_BC(dev_priv)) { + err = set_power_wells(power_domains, skl_power_wells); } else if (IS_CHERRYVIEW(dev_priv)) { err = set_power_wells(power_domains, chv_power_wells); + } else if (IS_BROADWELL(dev_priv)) { + err = set_power_wells(power_domains, bdw_power_wells); + } else if (IS_HASWELL(dev_priv)) { + err = set_power_wells(power_domains, hsw_power_wells); } else if (IS_VALLEYVIEW(dev_priv)) { err = set_power_wells(power_domains, vlv_power_wells); } else if (IS_I830(dev_priv)) { @@ -3176,8 +3259,7 @@ static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv) void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, u8 req_slices) { - u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; - u32 val; + const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; bool ret; if (req_slices > intel_dbuf_max_slices(dev_priv)) { @@ -3188,7 +3270,6 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, if (req_slices == hw_enabled_slices || req_slices == 0) return; - val = I915_READ(DBUF_CTL_S2); if (req_slices > hw_enabled_slices) ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true); else @@ -3240,18 +3321,40 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv) I915_WRITE(MBUS_ABOX_CTL, val); } +static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, + bool enable) +{ + i915_reg_t reg; + u32 reset_bits, val; + + if (IS_IVYBRIDGE(dev_priv)) { + reg = GEN7_MSG_CTL; + reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; + } else { + reg = HSW_NDE_RSTWRN_OPT; + reset_bits = RESET_PCH_HANDSHAKE_ENABLE; + } + + val = I915_READ(reg); + + if (enable) + val |= reset_bits; + else + val &= ~reset_bits; + + I915_WRITE(reg, val); +} + static void skl_display_core_init(struct drm_i915_private *dev_priv, bool resume) { struct i915_power_domains *power_domains = &dev_priv->power_domains; struct i915_power_well *well; - uint32_t val; gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); /* enable PCH reset handshake */ - val = I915_READ(HSW_NDE_RSTWRN_OPT); - I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE); + intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); /* enable PG1 and Misc I/O */ mutex_lock(&power_domains->lock); @@ -3307,7 +3410,6 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv, { struct i915_power_domains *power_domains = &dev_priv->power_domains; struct i915_power_well *well; - uint32_t val; gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); @@ -3317,9 +3419,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv, * Move the handshake programming to initialization sequence. * Previously was left up to BIOS. */ - val = I915_READ(HSW_NDE_RSTWRN_OPT); - val &= ~RESET_PCH_HANDSHAKE_ENABLE; - I915_WRITE(HSW_NDE_RSTWRN_OPT, val); + intel_pch_reset_handshake(dev_priv, false); /* Enable PG1 */ mutex_lock(&power_domains->lock); @@ -3365,101 +3465,18 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv) usleep_range(10, 30); /* 10 us delay per Bspec */ } -enum { - PROCMON_0_85V_DOT_0, - PROCMON_0_95V_DOT_0, - PROCMON_0_95V_DOT_1, - PROCMON_1_05V_DOT_0, - PROCMON_1_05V_DOT_1, -}; - -static const struct cnl_procmon { - u32 dw1, dw9, dw10; -} cnl_procmon_values[] = { - [PROCMON_0_85V_DOT_0] = - { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, }, - [PROCMON_0_95V_DOT_0] = - { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, }, - [PROCMON_0_95V_DOT_1] = - { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, }, - [PROCMON_1_05V_DOT_0] = - { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, }, - [PROCMON_1_05V_DOT_1] = - { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, }, -}; - -/* - * CNL has just one set of registers, while ICL has two sets: one for port A and - * the other for port B. The CNL registers are equivalent to the ICL port A - * registers, that's why we call the ICL macros even though the function has CNL - * on its name. - */ -static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv, - enum port port) -{ - const struct cnl_procmon *procmon; - u32 val; - - val = I915_READ(ICL_PORT_COMP_DW3(port)); - switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) { - default: - MISSING_CASE(val); - /* fall through */ - case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0: - procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0]; - break; - case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0: - procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0]; - break; - case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1: - procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1]; - break; - case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0: - procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0]; - break; - case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1: - procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1]; - break; - } - - val = I915_READ(ICL_PORT_COMP_DW1(port)); - val &= ~((0xff << 16) | 0xff); - val |= procmon->dw1; - I915_WRITE(ICL_PORT_COMP_DW1(port), val); - - I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9); - I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10); -} - static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume) { struct i915_power_domains *power_domains = &dev_priv->power_domains; struct i915_power_well *well; - u32 val; gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); /* 1. Enable PCH Reset Handshake */ - val = I915_READ(HSW_NDE_RSTWRN_OPT); - val |= RESET_PCH_HANDSHAKE_ENABLE; - I915_WRITE(HSW_NDE_RSTWRN_OPT, val); + intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); - /* 2. Enable Comp */ - val = I915_READ(CHICKEN_MISC_2); - val &= ~CNL_COMP_PWR_DOWN; - I915_WRITE(CHICKEN_MISC_2, val); - - /* Dummy PORT_A to get the correct CNL register from the ICL macro */ - cnl_set_procmon_ref_values(dev_priv, PORT_A); - - val = I915_READ(CNL_PORT_COMP_DW0); - val |= COMP_INIT; - I915_WRITE(CNL_PORT_COMP_DW0, val); - - /* 3. */ - val = I915_READ(CNL_PORT_CL1CM_DW5); - val |= CL_POWER_DOWN_ENABLE; - I915_WRITE(CNL_PORT_CL1CM_DW5, val); + /* 2-3. */ + cnl_combo_phys_init(dev_priv); /* * 4. Enable Power Well 1 (PG1). @@ -3484,7 +3501,6 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->power_domains; struct i915_power_well *well; - u32 val; gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); @@ -3508,44 +3524,23 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) usleep_range(10, 30); /* 10 us delay per Bspec */ - /* 5. Disable Comp */ - val = I915_READ(CHICKEN_MISC_2); - val |= CNL_COMP_PWR_DOWN; - I915_WRITE(CHICKEN_MISC_2, val); + /* 5. */ + cnl_combo_phys_uninit(dev_priv); } -static void icl_display_core_init(struct drm_i915_private *dev_priv, - bool resume) +void icl_display_core_init(struct drm_i915_private *dev_priv, + bool resume) { struct i915_power_domains *power_domains = &dev_priv->power_domains; struct i915_power_well *well; - enum port port; - u32 val; gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); /* 1. Enable PCH reset handshake. */ - val = I915_READ(HSW_NDE_RSTWRN_OPT); - val |= RESET_PCH_HANDSHAKE_ENABLE; - I915_WRITE(HSW_NDE_RSTWRN_OPT, val); - - for (port = PORT_A; port <= PORT_B; port++) { - /* 2. Enable DDI combo PHY comp. */ - val = I915_READ(ICL_PHY_MISC(port)); - val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; - I915_WRITE(ICL_PHY_MISC(port), val); - - cnl_set_procmon_ref_values(dev_priv, port); - - val = I915_READ(ICL_PORT_COMP_DW0(port)); - val |= COMP_INIT; - I915_WRITE(ICL_PORT_COMP_DW0(port), val); - - /* 3. Set power down enable. */ - val = I915_READ(ICL_PORT_CL_DW5(port)); - val |= CL_POWER_DOWN_ENABLE; - I915_WRITE(ICL_PORT_CL_DW5(port), val); - } + intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); + + /* 2-3. */ + icl_combo_phys_init(dev_priv); /* * 4. Enable Power Well 1 (PG1). @@ -3569,12 +3564,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, intel_csr_load_program(dev_priv); } -static void icl_display_core_uninit(struct drm_i915_private *dev_priv) +void icl_display_core_uninit(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->power_domains; struct i915_power_well *well; - enum port port; - u32 val; gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); @@ -3596,12 +3589,8 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv) intel_power_well_disable(dev_priv, well); mutex_unlock(&power_domains->lock); - /* 5. Disable Comp */ - for (port = PORT_A; port <= PORT_B; port++) { - val = I915_READ(ICL_PHY_MISC(port)); - val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; - I915_WRITE(ICL_PHY_MISC(port), val); - } + /* 5. */ + icl_combo_phys_uninit(dev_priv); } static void chv_phy_control_init(struct drm_i915_private *dev_priv) @@ -3759,7 +3748,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) mutex_lock(&power_domains->lock); vlv_cmnlane_wa(dev_priv); mutex_unlock(&power_domains->lock); - } + } else if (IS_IVYBRIDGE(dev_priv) || INTEL_GEN(dev_priv) >= 7) + intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); /* * Keep all power wells enabled for any dependent HW access during @@ -3953,14 +3943,6 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) int domains_count; bool enabled; - /* - * Power wells not belonging to any domain (like the MISC_IO - * and PW1 power wells) are under FW control, so ignore them, - * since their state can change asynchronously. - */ - if (!power_well->desc->domains) - continue; - enabled = power_well->desc->ops->is_enabled(dev_priv, power_well); if ((power_well->count || power_well->desc->always_on) != diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 701372e512a8..5805ec1aba12 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -105,11 +105,6 @@ struct intel_sdvo { bool has_hdmi_audio; bool rgb_quant_range_selectable; - /** - * This is sdvo fixed pannel mode pointer - */ - struct drm_display_mode *sdvo_lvds_fixed_mode; - /* DDC bus used by this SDVO encoder */ uint8_t ddc_bus; @@ -765,10 +760,14 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo, args.height = height; args.interlace = 0; - if (IS_LVDS(intel_sdvo_connector) && - (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width || - intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height)) - args.scaled = 1; + if (IS_LVDS(intel_sdvo_connector)) { + const struct drm_display_mode *fixed_mode = + intel_sdvo_connector->base.panel.fixed_mode; + + if (fixed_mode->hdisplay != width || + fixed_mode->vdisplay != height) + args.scaled = 1; + } return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, @@ -1123,6 +1122,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder, DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n"); pipe_config->pipe_bpp = 8*3; + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; if (HAS_PCH_SPLIT(to_i915(encoder->base.dev))) pipe_config->has_pch_encoder = true; @@ -1144,7 +1144,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder, pipe_config->sdvo_tv_clock = true; } else if (IS_LVDS(intel_sdvo_connector)) { if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, - intel_sdvo->sdvo_lvds_fixed_mode)) + intel_sdvo_connector->base.panel.fixed_mode)) return false; (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, @@ -1301,7 +1301,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, /* lvds has a special fixed output timing. */ if (IS_LVDS(intel_sdvo_connector)) intel_sdvo_get_dtd_from_mode(&output_dtd, - intel_sdvo->sdvo_lvds_fixed_mode); + intel_sdvo_connector->base.panel.fixed_mode); else intel_sdvo_get_dtd_from_mode(&output_dtd, mode); if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd)) @@ -1642,10 +1642,13 @@ intel_sdvo_mode_valid(struct drm_connector *connector, return MODE_CLOCK_HIGH; if (IS_LVDS(intel_sdvo_connector)) { - if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay) + const struct drm_display_mode *fixed_mode = + intel_sdvo_connector->base.panel.fixed_mode; + + if (mode->hdisplay > fixed_mode->hdisplay) return MODE_PANEL; - if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay) + if (mode->vdisplay > fixed_mode->vdisplay) return MODE_PANEL; } @@ -2058,14 +2061,6 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) return !list_empty(&connector->probed_modes); } -static void intel_sdvo_destroy(struct drm_connector *connector) -{ - struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); - - drm_connector_cleanup(connector); - kfree(intel_sdvo_connector); -} - static int intel_sdvo_connector_atomic_get_property(struct drm_connector *connector, const struct drm_connector_state *state, @@ -2228,7 +2223,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = { .atomic_set_property = intel_sdvo_connector_atomic_set_property, .late_register = intel_sdvo_connector_register, .early_unregister = intel_sdvo_connector_unregister, - .destroy = intel_sdvo_destroy, + .destroy = intel_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_sdvo_connector_duplicate_state, }; @@ -2267,10 +2262,6 @@ static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) { struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder)); - if (intel_sdvo->sdvo_lvds_fixed_mode != NULL) - drm_mode_destroy(encoder->dev, - intel_sdvo->sdvo_lvds_fixed_mode); - i2c_del_adapter(&intel_sdvo->ddc); intel_encoder_destroy(encoder); } @@ -2583,7 +2574,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) return true; err: - intel_sdvo_destroy(connector); + intel_connector_destroy(connector); return false; } @@ -2663,19 +2654,22 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) list_for_each_entry(mode, &connector->probed_modes, head) { if (mode->type & DRM_MODE_TYPE_PREFERRED) { - intel_sdvo->sdvo_lvds_fixed_mode = + struct drm_display_mode *fixed_mode = drm_mode_duplicate(connector->dev, mode); + + intel_panel_init(&intel_connector->panel, + fixed_mode, NULL); break; } } - if (!intel_sdvo->sdvo_lvds_fixed_mode) + if (!intel_connector->panel.fixed_mode) goto err; return true; err: - intel_sdvo_destroy(connector); + intel_connector_destroy(connector); return false; } @@ -2745,7 +2739,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo) &dev->mode_config.connector_list, head) { if (intel_attached_encoder(connector) == &intel_sdvo->base) { drm_connector_unregister(connector); - intel_sdvo_destroy(connector); + intel_connector_destroy(connector); } } } diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 5fd2f7bf3927..d2e003d8f3db 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -40,6 +40,7 @@ #include "intel_frontbuffer.h" #include <drm/i915_drm.h> #include "i915_drv.h" +#include <drm/drm_color_mgmt.h> int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs) @@ -275,17 +276,24 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) src->y2 = (src_y + src_h) << 16; if (fb->format->is_yuv && - fb->format->format != DRM_FORMAT_NV12 && (src_x & 1 || src_w & 1)) { DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n", src_x, src_w); return -EINVAL; } + if (fb->format->is_yuv && + fb->format->num_planes > 1 && + (src_y & 1 || src_h & 1)) { + DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of 2 for planar YUV planes\n", + src_y, src_h); + return -EINVAL; + } + return 0; } -unsigned int +static unsigned int skl_plane_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, unsigned int rotation) @@ -302,108 +310,292 @@ skl_plane_max_stride(struct intel_plane *plane, return min(8192 * cpp, 32768); } -void -skl_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static void +skl_program_scaler(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + int scaler_id = plane_state->scaler_id; + const struct intel_scaler *scaler = + &crtc_state->scaler_state.scalers[scaler_id]; + int crtc_x = plane_state->base.dst.x1; + int crtc_y = plane_state->base.dst.y1; + uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); + uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); + u16 y_hphase, uv_rgb_hphase; + u16 y_vphase, uv_rgb_vphase; + int hscale, vscale; + + hscale = drm_rect_calc_hscale(&plane_state->base.src, + &plane_state->base.dst, + 0, INT_MAX); + vscale = drm_rect_calc_vscale(&plane_state->base.src, + &plane_state->base.dst, + 0, INT_MAX); + + /* TODO: handle sub-pixel coordinates */ + if (plane_state->base.fb->format->format == DRM_FORMAT_NV12 && + !icl_is_hdr_plane(plane)) { + y_hphase = skl_scaler_calc_phase(1, hscale, false); + y_vphase = skl_scaler_calc_phase(1, vscale, false); + + /* MPEG2 chroma siting convention */ + uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true); + uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false); + } else { + /* not used */ + y_hphase = 0; + y_vphase = 0; + + uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); + uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); + } + + I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), + PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode); + I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id), + PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase)); + I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id), + PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase)); + I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y); + I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h); +} + +/* Preoffset values for YUV to RGB Conversion */ +#define PREOFF_YUV_TO_RGB_HI 0x1800 +#define PREOFF_YUV_TO_RGB_ME 0x1F00 +#define PREOFF_YUV_TO_RGB_LO 0x1800 + +#define ROFF(x) (((x) & 0xffff) << 16) +#define GOFF(x) (((x) & 0xffff) << 0) +#define BOFF(x) (((x) & 0xffff) << 16) + +static void +icl_program_input_csc(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + enum plane_id plane_id = plane->id; + + static const u16 input_csc_matrix[][9] = { + /* + * BT.601 full range YCbCr -> full range RGB + * The matrix required is : + * [1.000, 0.000, 1.371, + * 1.000, -0.336, -0.698, + * 1.000, 1.732, 0.0000] + */ + [DRM_COLOR_YCBCR_BT601] = { + 0x7AF8, 0x7800, 0x0, + 0x8B28, 0x7800, 0x9AC0, + 0x0, 0x7800, 0x7DD8, + }, + /* + * BT.709 full range YCbCr -> full range RGB + * The matrix required is : + * [1.000, 0.000, 1.574, + * 1.000, -0.187, -0.468, + * 1.000, 1.855, 0.0000] + */ + [DRM_COLOR_YCBCR_BT709] = { + 0x7C98, 0x7800, 0x0, + 0x9EF8, 0x7800, 0xABF8, + 0x0, 0x7800, 0x7ED8, + }, + }; + + /* Matrix for Limited Range to Full Range Conversion */ + static const u16 input_csc_matrix_lr[][9] = { + /* + * BT.601 Limted range YCbCr -> full range RGB + * The matrix required is : + * [1.164384, 0.000, 1.596370, + * 1.138393, -0.382500, -0.794598, + * 1.138393, 1.971696, 0.0000] + */ + [DRM_COLOR_YCBCR_BT601] = { + 0x7CC8, 0x7950, 0x0, + 0x8CB8, 0x7918, 0x9C40, + 0x0, 0x7918, 0x7FC8, + }, + /* + * BT.709 Limited range YCbCr -> full range RGB + * The matrix required is : + * [1.164, 0.000, 1.833671, + * 1.138393, -0.213249, -0.532909, + * 1.138393, 2.112402, 0.0000] + */ + [DRM_COLOR_YCBCR_BT709] = { + 0x7EA8, 0x7950, 0x0, + 0x8888, 0x7918, 0xADA8, + 0x0, 0x7918, 0x6870, + }, + }; + const u16 *csc; + + if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) + csc = input_csc_matrix[plane_state->base.color_encoding]; + else + csc = input_csc_matrix_lr[plane_state->base.color_encoding]; + + I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) | + GOFF(csc[1])); + I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), BOFF(csc[2])); + I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), ROFF(csc[3]) | + GOFF(csc[4])); + I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), BOFF(csc[5])); + I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), ROFF(csc[6]) | + GOFF(csc[7])); + I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), BOFF(csc[8])); + + I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), + PREOFF_YUV_TO_RGB_HI); + I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), + PREOFF_YUV_TO_RGB_ME); + I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2), + PREOFF_YUV_TO_RGB_LO); + I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0); + I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0); + I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0); +} + +static void +skl_program_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int color_plane, bool slave, u32 plane_ctl) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->base.fb; enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - u32 plane_ctl = plane_state->ctl; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - u32 surf_addr = plane_state->color_plane[0].offset; - u32 stride = skl_plane_stride(plane_state, 0); + u32 surf_addr = plane_state->color_plane[color_plane].offset; + u32 stride = skl_plane_stride(plane_state, color_plane); u32 aux_stride = skl_plane_stride(plane_state, 1); int crtc_x = plane_state->base.dst.x1; int crtc_y = plane_state->base.dst.y1; - uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); - uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); - uint32_t x = plane_state->color_plane[0].x; - uint32_t y = plane_state->color_plane[0].y; + uint32_t x = plane_state->color_plane[color_plane].x; + uint32_t y = plane_state->color_plane[color_plane].y; uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; + struct intel_plane *linked = plane_state->linked_plane; + const struct drm_framebuffer *fb = plane_state->base.fb; + u8 alpha = plane_state->base.alpha >> 8; unsigned long irqflags; + u32 keymsk, keymax; /* Sizes are 0 based */ src_w--; src_h--; - crtc_w--; - crtc_h--; - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) - I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), - plane_state->color_ctl); + keymsk = key->channel_mask & 0x3ffffff; + if (alpha < 0xff) + keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; - if (key->flags) { - I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value); - I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), key->max_value); - I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), key->channel_mask); + /* The scaler will handle the output position */ + if (plane_state->scaler_id >= 0) { + crtc_x = 0; + crtc_y = 0; } - I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x); + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride); + I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x); I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w); I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), (plane_state->color_plane[1].offset - surf_addr) | aux_stride); - I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id), - (plane_state->color_plane[1].y << 16) | - plane_state->color_plane[1].x); - - /* program plane scaler */ - if (plane_state->scaler_id >= 0) { - int scaler_id = plane_state->scaler_id; - const struct intel_scaler *scaler = - &crtc_state->scaler_state.scalers[scaler_id]; - u16 y_hphase, uv_rgb_hphase; - u16 y_vphase, uv_rgb_vphase; - - /* TODO: handle sub-pixel coordinates */ - if (fb->format->format == DRM_FORMAT_NV12) { - y_hphase = skl_scaler_calc_phase(1, false); - y_vphase = skl_scaler_calc_phase(1, false); - - /* MPEG2 chroma siting convention */ - uv_rgb_hphase = skl_scaler_calc_phase(2, true); - uv_rgb_vphase = skl_scaler_calc_phase(2, false); - } else { - /* not used */ - y_hphase = 0; - y_vphase = 0; - uv_rgb_hphase = skl_scaler_calc_phase(1, false); - uv_rgb_vphase = skl_scaler_calc_phase(1, false); + if (icl_is_hdr_plane(plane)) { + u32 cus_ctl = 0; + + if (linked) { + /* Enable and use MPEG-2 chroma siting */ + cus_ctl = PLANE_CUS_ENABLE | + PLANE_CUS_HPHASE_0 | + PLANE_CUS_VPHASE_SIGN_NEGATIVE | + PLANE_CUS_VPHASE_0_25; + + if (linked->id == PLANE_SPRITE5) + cus_ctl |= PLANE_CUS_PLANE_7; + else if (linked->id == PLANE_SPRITE4) + cus_ctl |= PLANE_CUS_PLANE_6; + else + MISSING_CASE(linked->id); } - I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), - PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode); - I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0); - I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id), - PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase)); - I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id), - PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase)); - I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y); - I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), - ((crtc_w + 1) << 16)|(crtc_h + 1)); - - I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0); - } else { - I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x); + I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl); } + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), + plane_state->color_ctl); + + if (fb->format->is_yuv && icl_is_hdr_plane(plane)) + icl_program_input_csc(plane, crtc_state, plane_state); + + skl_write_plane_wm(plane, crtc_state); + + I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value); + I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk); + I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax); + + I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x); + + if (INTEL_GEN(dev_priv) < 11) + I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id), + (plane_state->color_plane[1].y << 16) | + plane_state->color_plane[1].x); + + /* + * The control register self-arms if the plane was previously + * disabled. Try to make the plane enable atomic by writing + * the control register just before the surface register. + */ I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl); I915_WRITE_FW(PLANE_SURF(pipe, plane_id), intel_plane_ggtt_offset(plane_state) + surf_addr); - POSTING_READ_FW(PLANE_SURF(pipe, plane_id)); + + if (!slave && plane_state->scaler_id >= 0) + skl_program_scaler(plane, crtc_state, plane_state); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -void -skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) +static void +skl_update_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + int color_plane = 0; + + if (plane_state->linked_plane) { + /* Program the UV plane */ + color_plane = 1; + } + + skl_program_plane(plane, crtc_state, plane_state, + color_plane, false, plane_state->ctl); +} + +static void +icl_update_slave(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + skl_program_plane(plane, crtc_state, plane_state, 0, true, + plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE); +} + +static void +skl_disable_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; @@ -412,15 +604,15 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0); + skl_write_plane_wm(plane, crtc_state); + I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0); I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0); - POSTING_READ_FW(PLANE_SURF(pipe, plane_id)); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -bool +static bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe) { @@ -613,7 +805,6 @@ vlv_update_plane(struct intel_plane *plane, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->base.fb; enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; u32 sprctl = plane_state->ctl; @@ -636,38 +827,41 @@ vlv_update_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - vlv_update_clrc(plane_state); + I915_WRITE_FW(SPSTRIDE(pipe, plane_id), + plane_state->color_plane[0].stride); + I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x); + I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w); + I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0); if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) chv_update_csc(plane_state); if (key->flags) { I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value); - I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value); I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask); + I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value); } - I915_WRITE_FW(SPSTRIDE(pipe, plane_id), - plane_state->color_plane[0].stride); - I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x); - if (fb->modifier == I915_FORMAT_MOD_X_TILED) - I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x); - else - I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset); + I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset); + I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x); - I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0); - - I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w); + /* + * The control register self-arms if the plane was previously + * disabled. Try to make the plane enable atomic by writing + * the control register just before the surface register. + */ I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl); I915_WRITE_FW(SPSURF(pipe, plane_id), intel_plane_ggtt_offset(plane_state) + sprsurf_offset); - POSTING_READ_FW(SPSURF(pipe, plane_id)); + + vlv_update_clrc(plane_state); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } static void -vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) +vlv_disable_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -677,9 +871,7 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); I915_WRITE_FW(SPCNTR(pipe, plane_id), 0); - I915_WRITE_FW(SPSURF(pipe, plane_id), 0); - POSTING_READ_FW(SPSURF(pipe, plane_id)); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -774,7 +966,6 @@ ivb_update_plane(struct intel_plane *plane, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->base.fb; enum pipe pipe = plane->pipe; u32 sprctl = plane_state->ctl, sprscale = 0; u32 sprsurf_offset = plane_state->color_plane[0].offset; @@ -803,37 +994,42 @@ ivb_update_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride); + I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x); + I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); + if (IS_IVYBRIDGE(dev_priv)) + I915_WRITE_FW(SPRSCALE(pipe), sprscale); + if (key->flags) { I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value); - I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value); I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask); + I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value); } - I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride); - I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x); - /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET * register */ - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x); - else if (fb->modifier == I915_FORMAT_MOD_X_TILED) - I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x); - else + } else { I915_WRITE_FW(SPRLINOFF(pipe), linear_offset); + I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x); + } - I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); - if (IS_IVYBRIDGE(dev_priv)) - I915_WRITE_FW(SPRSCALE(pipe), sprscale); + /* + * The control register self-arms if the plane was previously + * disabled. Try to make the plane enable atomic by writing + * the control register just before the surface register. + */ I915_WRITE_FW(SPRCTL(pipe), sprctl); I915_WRITE_FW(SPRSURF(pipe), intel_plane_ggtt_offset(plane_state) + sprsurf_offset); - POSTING_READ_FW(SPRSURF(pipe)); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } static void -ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) +ivb_disable_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -842,12 +1038,10 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); I915_WRITE_FW(SPRCTL(pipe), 0); - /* Can't leave the scaler enabled... */ + /* Disable the scaler */ if (IS_IVYBRIDGE(dev_priv)) I915_WRITE_FW(SPRSCALE(pipe), 0); - I915_WRITE_FW(SPRSURF(pipe), 0); - POSTING_READ_FW(SPRSURF(pipe)); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -946,7 +1140,6 @@ g4x_update_plane(struct intel_plane *plane, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->base.fb; enum pipe pipe = plane->pipe; u32 dvscntr = plane_state->ctl, dvsscale = 0; u32 dvssurf_offset = plane_state->color_plane[0].offset; @@ -975,32 +1168,35 @@ g4x_update_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride); + I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x); + I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); + I915_WRITE_FW(DVSSCALE(pipe), dvsscale); + if (key->flags) { I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value); - I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value); I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask); + I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value); } - I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride); - I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x); + I915_WRITE_FW(DVSLINOFF(pipe), linear_offset); + I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x); - if (fb->modifier == I915_FORMAT_MOD_X_TILED) - I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x); - else - I915_WRITE_FW(DVSLINOFF(pipe), linear_offset); - - I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); - I915_WRITE_FW(DVSSCALE(pipe), dvsscale); + /* + * The control register self-arms if the plane was previously + * disabled. Try to make the plane enable atomic by writing + * the control register just before the surface register. + */ I915_WRITE_FW(DVSCNTR(pipe), dvscntr); I915_WRITE_FW(DVSSURF(pipe), intel_plane_ggtt_offset(plane_state) + dvssurf_offset); - POSTING_READ_FW(DVSSURF(pipe)); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } static void -g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) +g4x_disable_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -1011,9 +1207,7 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) I915_WRITE_FW(DVSCNTR(pipe), 0); /* Disable the scaler */ I915_WRITE_FW(DVSSCALE(pipe), 0); - I915_WRITE_FW(DVSSURF(pipe), 0); - POSTING_READ_FW(DVSSURF(pipe)); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -1039,6 +1233,19 @@ g4x_plane_get_hw_state(struct intel_plane *plane, return ret; } +static bool intel_fb_scalable(const struct drm_framebuffer *fb) +{ + if (!fb) + return false; + + switch (fb->format->format) { + case DRM_FORMAT_C8: + return false; + default: + return true; + } +} + static int g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) @@ -1106,18 +1313,18 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state, { struct intel_plane *plane = to_intel_plane(plane_state->base.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - int max_scale, min_scale; + int min_scale = DRM_PLANE_HELPER_NO_SCALING; + int max_scale = DRM_PLANE_HELPER_NO_SCALING; int ret; - if (INTEL_GEN(dev_priv) < 7) { - min_scale = 1; - max_scale = 16 << 16; - } else if (IS_IVYBRIDGE(dev_priv)) { - min_scale = 1; - max_scale = 2 << 16; - } else { - min_scale = DRM_PLANE_HELPER_NO_SCALING; - max_scale = DRM_PLANE_HELPER_NO_SCALING; + if (intel_fb_scalable(plane_state->base.fb)) { + if (INTEL_GEN(dev_priv) < 7) { + min_scale = 1; + max_scale = 16 << 16; + } else if (IS_IVYBRIDGE(dev_priv)) { + min_scale = 1; + max_scale = 2 << 16; + } } ret = drm_atomic_helper_check_plane_state(&plane_state->base, @@ -1204,6 +1411,8 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state, static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->base.fb; unsigned int rotation = plane_state->base.rotation; struct drm_format_name_buf format_name; @@ -1232,13 +1441,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, } /* - * 90/270 is not allowed with RGB64 16:16:16:16, - * RGB 16-bit 5:6:5, and Indexed 8-bit. - * TBD: Add RGB64 case once its added in supported format list. + * 90/270 is not allowed with RGB64 16:16:16:16 and + * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards. + * TBD: Add RGB64 case once its added in supported format + * list. */ switch (fb->format->format) { - case DRM_FORMAT_C8: case DRM_FORMAT_RGB565: + if (INTEL_GEN(dev_priv) >= 11) + break; + /* fall through */ + case DRM_FORMAT_C8: DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", drm_get_format_name(fb->format->format, &format_name)); @@ -1292,12 +1505,31 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s return 0; } -int skl_plane_check(struct intel_crtc_state *crtc_state, - struct intel_plane_state *plane_state) +static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->base.fb; + unsigned int rotation = plane_state->base.rotation; + int src_w = drm_rect_width(&plane_state->base.src) >> 16; + + /* Display WA #1106 */ + if (fb->format->format == DRM_FORMAT_NV12 && src_w & 3 && + (rotation == DRM_MODE_ROTATE_270 || + rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) { + DRM_DEBUG_KMS("src width must be multiple of 4 for rotated NV12\n"); + return -EINVAL; + } + + return 0; +} + +static int skl_plane_check(struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->base.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - int max_scale, min_scale; + const struct drm_framebuffer *fb = plane_state->base.fb; + int min_scale = DRM_PLANE_HELPER_NO_SCALING; + int max_scale = DRM_PLANE_HELPER_NO_SCALING; int ret; ret = skl_plane_check_fb(crtc_state, plane_state); @@ -1305,15 +1537,9 @@ int skl_plane_check(struct intel_crtc_state *crtc_state, return ret; /* use scaler when colorkey is not required */ - if (!plane_state->ckey.flags) { - const struct drm_framebuffer *fb = plane_state->base.fb; - + if (!plane_state->ckey.flags && intel_fb_scalable(fb)) { min_scale = 1; - max_scale = skl_max_scale(crtc_state, - fb ? fb->format->format : 0); - } else { - min_scale = DRM_PLANE_HELPER_NO_SCALING; - max_scale = DRM_PLANE_HELPER_NO_SCALING; + max_scale = skl_max_scale(crtc_state, fb->format->format); } ret = drm_atomic_helper_check_plane_state(&plane_state->base, @@ -1334,10 +1560,18 @@ int skl_plane_check(struct intel_crtc_state *crtc_state, if (ret) return ret; + ret = skl_plane_check_nv12_rotation(plane_state); + if (ret) + return ret; + ret = skl_check_plane_surface(plane_state); if (ret) return ret; + /* HW only has 8 bits pixel precision, disable plane if invisible */ + if (!(plane_state->base.alpha >> 8)) + plane_state->base.visible = false; + plane_state->ctl = skl_plane_ctl(crtc_state, plane_state); if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) @@ -1502,24 +1736,30 @@ static const uint32_t vlv_plane_formats[] = { DRM_FORMAT_VYUY, }; -static uint32_t skl_plane_formats[] = { +static const uint32_t skl_plane_formats[] = { + DRM_FORMAT_C8, DRM_FORMAT_RGB565, - DRM_FORMAT_ABGR8888, - DRM_FORMAT_ARGB8888, - DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, }; -static uint32_t skl_planar_formats[] = { +static const uint32_t skl_planar_formats[] = { + DRM_FORMAT_C8, DRM_FORMAT_RGB565, - DRM_FORMAT_ABGR8888, - DRM_FORMAT_ARGB8888, - DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, @@ -1724,8 +1964,36 @@ static const struct drm_plane_funcs skl_plane_funcs = { .format_mod_supported = skl_plane_format_mod_supported, }; -bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, - enum pipe pipe, enum plane_id plane_id) +static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id) +{ + if (!HAS_FBC(dev_priv)) + return false; + + return pipe == PIPE_A && plane_id == PLANE_PRIMARY; +} + +static bool skl_plane_has_planar(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id) +{ + if (INTEL_GEN(dev_priv) >= 11) + return plane_id <= PLANE_SPRITE3; + + /* Display WA #0870: skl, bxt */ + if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) + return false; + + if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C) + return false; + + if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0) + return false; + + return true; +} + +static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id) { if (plane_id == PLANE_CURSOR) return false; @@ -1742,109 +2010,173 @@ bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, } struct intel_plane * -intel_sprite_plane_create(struct drm_i915_private *dev_priv, - enum pipe pipe, int plane) +skl_universal_plane_create(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id) { - struct intel_plane *intel_plane = NULL; - struct intel_plane_state *state = NULL; - const struct drm_plane_funcs *plane_funcs; - unsigned long possible_crtcs; - const uint32_t *plane_formats; - const uint64_t *modifiers; + struct intel_plane *plane; + enum drm_plane_type plane_type; unsigned int supported_rotations; - int num_plane_formats; + unsigned int possible_crtcs; + const u64 *modifiers; + const u32 *formats; + int num_formats; int ret; - intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL); - if (!intel_plane) { - ret = -ENOMEM; - goto fail; + plane = intel_plane_alloc(); + if (IS_ERR(plane)) + return plane; + + plane->pipe = pipe; + plane->id = plane_id; + plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id); + + plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id); + if (plane->has_fbc) { + struct intel_fbc *fbc = &dev_priv->fbc; + + fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; } - state = intel_create_plane_state(&intel_plane->base); - if (!state) { - ret = -ENOMEM; - goto fail; + plane->max_stride = skl_plane_max_stride; + plane->update_plane = skl_update_plane; + plane->disable_plane = skl_disable_plane; + plane->get_hw_state = skl_plane_get_hw_state; + plane->check_plane = skl_plane_check; + if (icl_is_nv12_y_plane(plane_id)) + plane->update_slave = icl_update_slave; + + if (skl_plane_has_planar(dev_priv, pipe, plane_id)) { + formats = skl_planar_formats; + num_formats = ARRAY_SIZE(skl_planar_formats); + } else { + formats = skl_plane_formats; + num_formats = ARRAY_SIZE(skl_plane_formats); } - intel_plane->base.state = &state->base; - if (INTEL_GEN(dev_priv) >= 9) { - state->scaler_id = -1; + plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); + if (plane->has_ccs) + modifiers = skl_plane_format_modifiers_ccs; + else + modifiers = skl_plane_format_modifiers_noccs; - intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, - PLANE_SPRITE0 + plane); + if (plane_id == PLANE_PRIMARY) + plane_type = DRM_PLANE_TYPE_PRIMARY; + else + plane_type = DRM_PLANE_TYPE_OVERLAY; - intel_plane->max_stride = skl_plane_max_stride; - intel_plane->update_plane = skl_update_plane; - intel_plane->disable_plane = skl_disable_plane; - intel_plane->get_hw_state = skl_plane_get_hw_state; - intel_plane->check_plane = skl_plane_check; + possible_crtcs = BIT(pipe); - if (skl_plane_has_planar(dev_priv, pipe, - PLANE_SPRITE0 + plane)) { - plane_formats = skl_planar_formats; - num_plane_formats = ARRAY_SIZE(skl_planar_formats); - } else { - plane_formats = skl_plane_formats; - num_plane_formats = ARRAY_SIZE(skl_plane_formats); - } + ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, + possible_crtcs, &skl_plane_funcs, + formats, num_formats, modifiers, + plane_type, + "plane %d%c", plane_id + 1, + pipe_name(pipe)); + if (ret) + goto fail; + + supported_rotations = + DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | + DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; + + if (INTEL_GEN(dev_priv) >= 10) + supported_rotations |= DRM_MODE_REFLECT_X; + + drm_plane_create_rotation_property(&plane->base, + DRM_MODE_ROTATE_0, + supported_rotations); + + drm_plane_create_color_properties(&plane->base, + BIT(DRM_COLOR_YCBCR_BT601) | + BIT(DRM_COLOR_YCBCR_BT709), + BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | + BIT(DRM_COLOR_YCBCR_FULL_RANGE), + DRM_COLOR_YCBCR_BT709, + DRM_COLOR_YCBCR_LIMITED_RANGE); + + drm_plane_create_alpha_property(&plane->base); + drm_plane_create_blend_mode_property(&plane->base, + BIT(DRM_MODE_BLEND_PIXEL_NONE) | + BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE)); + + drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); - if (intel_plane->has_ccs) - modifiers = skl_plane_format_modifiers_ccs; - else - modifiers = skl_plane_format_modifiers_noccs; - - plane_funcs = &skl_plane_funcs; - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - intel_plane->max_stride = i9xx_plane_max_stride; - intel_plane->update_plane = vlv_update_plane; - intel_plane->disable_plane = vlv_disable_plane; - intel_plane->get_hw_state = vlv_plane_get_hw_state; - intel_plane->check_plane = vlv_sprite_check; - - plane_formats = vlv_plane_formats; - num_plane_formats = ARRAY_SIZE(vlv_plane_formats); + return plane; + +fail: + intel_plane_free(plane); + + return ERR_PTR(ret); +} + +struct intel_plane * +intel_sprite_plane_create(struct drm_i915_private *dev_priv, + enum pipe pipe, int sprite) +{ + struct intel_plane *plane; + const struct drm_plane_funcs *plane_funcs; + unsigned long possible_crtcs; + unsigned int supported_rotations; + const u64 *modifiers; + const u32 *formats; + int num_formats; + int ret; + + if (INTEL_GEN(dev_priv) >= 9) + return skl_universal_plane_create(dev_priv, pipe, + PLANE_SPRITE0 + sprite); + + plane = intel_plane_alloc(); + if (IS_ERR(plane)) + return plane; + + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + plane->max_stride = i9xx_plane_max_stride; + plane->update_plane = vlv_update_plane; + plane->disable_plane = vlv_disable_plane; + plane->get_hw_state = vlv_plane_get_hw_state; + plane->check_plane = vlv_sprite_check; + + formats = vlv_plane_formats; + num_formats = ARRAY_SIZE(vlv_plane_formats); modifiers = i9xx_plane_format_modifiers; plane_funcs = &vlv_sprite_funcs; } else if (INTEL_GEN(dev_priv) >= 7) { - intel_plane->max_stride = g4x_sprite_max_stride; - intel_plane->update_plane = ivb_update_plane; - intel_plane->disable_plane = ivb_disable_plane; - intel_plane->get_hw_state = ivb_plane_get_hw_state; - intel_plane->check_plane = g4x_sprite_check; - - plane_formats = snb_plane_formats; - num_plane_formats = ARRAY_SIZE(snb_plane_formats); + plane->max_stride = g4x_sprite_max_stride; + plane->update_plane = ivb_update_plane; + plane->disable_plane = ivb_disable_plane; + plane->get_hw_state = ivb_plane_get_hw_state; + plane->check_plane = g4x_sprite_check; + + formats = snb_plane_formats; + num_formats = ARRAY_SIZE(snb_plane_formats); modifiers = i9xx_plane_format_modifiers; plane_funcs = &snb_sprite_funcs; } else { - intel_plane->max_stride = g4x_sprite_max_stride; - intel_plane->update_plane = g4x_update_plane; - intel_plane->disable_plane = g4x_disable_plane; - intel_plane->get_hw_state = g4x_plane_get_hw_state; - intel_plane->check_plane = g4x_sprite_check; + plane->max_stride = g4x_sprite_max_stride; + plane->update_plane = g4x_update_plane; + plane->disable_plane = g4x_disable_plane; + plane->get_hw_state = g4x_plane_get_hw_state; + plane->check_plane = g4x_sprite_check; modifiers = i9xx_plane_format_modifiers; if (IS_GEN6(dev_priv)) { - plane_formats = snb_plane_formats; - num_plane_formats = ARRAY_SIZE(snb_plane_formats); + formats = snb_plane_formats; + num_formats = ARRAY_SIZE(snb_plane_formats); plane_funcs = &snb_sprite_funcs; } else { - plane_formats = g4x_plane_formats; - num_plane_formats = ARRAY_SIZE(g4x_plane_formats); + formats = g4x_plane_formats; + num_formats = ARRAY_SIZE(g4x_plane_formats); plane_funcs = &g4x_sprite_funcs; } } - if (INTEL_GEN(dev_priv) >= 9) { - supported_rotations = - DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | - DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; - } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { + if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { supported_rotations = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | DRM_MODE_REFLECT_X; @@ -1853,35 +2185,25 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; } - intel_plane->pipe = pipe; - intel_plane->i9xx_plane = plane; - intel_plane->id = PLANE_SPRITE0 + plane; - intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, intel_plane->id); + plane->pipe = pipe; + plane->id = PLANE_SPRITE0 + sprite; + plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); - possible_crtcs = (1 << pipe); + possible_crtcs = BIT(pipe); - if (INTEL_GEN(dev_priv) >= 9) - ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base, - possible_crtcs, plane_funcs, - plane_formats, num_plane_formats, - modifiers, - DRM_PLANE_TYPE_OVERLAY, - "plane %d%c", plane + 2, pipe_name(pipe)); - else - ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base, - possible_crtcs, plane_funcs, - plane_formats, num_plane_formats, - modifiers, - DRM_PLANE_TYPE_OVERLAY, - "sprite %c", sprite_name(pipe, plane)); + ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, + possible_crtcs, plane_funcs, + formats, num_formats, modifiers, + DRM_PLANE_TYPE_OVERLAY, + "sprite %c", sprite_name(pipe, sprite)); if (ret) goto fail; - drm_plane_create_rotation_property(&intel_plane->base, + drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0, supported_rotations); - drm_plane_create_color_properties(&intel_plane->base, + drm_plane_create_color_properties(&plane->base, BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709), BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | @@ -1889,13 +2211,12 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); - drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs); + drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); - return intel_plane; + return plane; fail: - kfree(state); - kfree(intel_plane); + intel_plane_free(plane); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index b5b04cb892e9..860f306a23ba 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -885,6 +885,7 @@ intel_tv_compute_config(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return false; + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; adjusted_mode->crtc_clock = tv_mode->clock; DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); pipe_config->pipe_bpp = 8*3; @@ -1377,17 +1378,10 @@ intel_tv_get_modes(struct drm_connector *connector) return count; } -static void -intel_tv_destroy(struct drm_connector *connector) -{ - drm_connector_cleanup(connector); - kfree(connector); -} - static const struct drm_connector_funcs intel_tv_connector_funcs = { .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, - .destroy = intel_tv_destroy, + .destroy = intel_connector_destroy, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index b1b3e81b6e24..b34c318b238d 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -376,7 +376,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915) intel_guc_init_params(guc); ret = intel_guc_fw_upload(guc); - if (ret == 0 || ret != -EAGAIN) + if (ret == 0 || ret != -ETIMEDOUT) break; DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and " diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h index 87910aa83267..0e3bd580e267 100644 --- a/drivers/gpu/drm/i915/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/intel_uc_fw.h @@ -115,9 +115,14 @@ static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw) return uc_fw->path != NULL; } +static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw) +{ + return uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS; +} + static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw) { - if (uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS) + if (intel_uc_fw_is_loaded(uc_fw)) uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING; } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 3ad302c66254..9289515108c3 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1437,7 +1437,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) FORCEWAKE_MEDIA_VEBOX_GEN11(i), FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); } - } else if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) { + } else if (IS_GEN10(dev_priv) || IS_GEN9(dev_priv)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get_with_fallback; dev_priv->uncore.funcs.force_wake_put = fw_domains_put; diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index bba98cf83cbd..bf3662ad5fed 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h @@ -326,6 +326,13 @@ enum vbt_gmbus_ddi { ICL_DDC_BUS_PORT_4, }; +#define DP_AUX_A 0x40 +#define DP_AUX_B 0x10 +#define DP_AUX_C 0x20 +#define DP_AUX_D 0x30 +#define DP_AUX_E 0x50 +#define DP_AUX_F 0x60 + #define VBT_DP_MAX_LINK_RATE_HBR3 0 #define VBT_DP_MAX_LINK_RATE_HBR2 1 #define VBT_DP_MAX_LINK_RATE_HBR 2 diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c new file mode 100644 index 000000000000..c56ba0e04044 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_vdsc.c @@ -0,0 +1,1088 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2018 Intel Corporation + * + * Author: Gaurav K Singh <gaurav.k.singh@intel.com> + * Manasi Navare <manasi.d.navare@intel.com> + */ + +#include <drm/drmP.h> +#include <drm/i915_drm.h> +#include "i915_drv.h" +#include "intel_drv.h" + +enum ROW_INDEX_BPP { + ROW_INDEX_6BPP = 0, + ROW_INDEX_8BPP, + ROW_INDEX_10BPP, + ROW_INDEX_12BPP, + ROW_INDEX_15BPP, + MAX_ROW_INDEX +}; + +enum COLUMN_INDEX_BPC { + COLUMN_INDEX_8BPC = 0, + COLUMN_INDEX_10BPC, + COLUMN_INDEX_12BPC, + COLUMN_INDEX_14BPC, + COLUMN_INDEX_16BPC, + MAX_COLUMN_INDEX +}; + +#define DSC_SUPPORTED_VERSION_MIN 1 + +/* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */ +static u16 rc_buf_thresh[] = { + 896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616, + 7744, 7872, 8000, 8064 +}; + +struct rc_parameters { + u16 initial_xmit_delay; + u8 first_line_bpg_offset; + u16 initial_offset; + u8 flatness_min_qp; + u8 flatness_max_qp; + u8 rc_quant_incr_limit0; + u8 rc_quant_incr_limit1; + struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES]; +}; + +/* + * Selected Rate Control Related Parameter Recommended Values + * from DSC_v1.11 spec & C Model release: DSC_model_20161212 + */ +static struct rc_parameters rc_params[][MAX_COLUMN_INDEX] = { +{ + /* 6BPP/8BPC */ + { 768, 15, 6144, 3, 13, 11, 11, { + { 0, 4, 0 }, { 1, 6, -2 }, { 3, 8, -2 }, { 4, 8, -4 }, + { 5, 9, -6 }, { 5, 9, -6 }, { 6, 9, -6 }, { 6, 10, -8 }, + { 7, 11, -8 }, { 8, 12, -10 }, { 9, 12, -10 }, { 10, 12, -12 }, + { 10, 12, -12 }, { 11, 12, -12 }, { 13, 14, -12 } + } + }, + /* 6BPP/10BPC */ + { 768, 15, 6144, 7, 17, 15, 15, { + { 0, 8, 0 }, { 3, 10, -2 }, { 7, 12, -2 }, { 8, 12, -4 }, + { 9, 13, -6 }, { 9, 13, -6 }, { 10, 13, -6 }, { 10, 14, -8 }, + { 11, 15, -8 }, { 12, 16, -10 }, { 13, 16, -10 }, + { 14, 16, -12 }, { 14, 16, -12 }, { 15, 16, -12 }, + { 17, 18, -12 } + } + }, + /* 6BPP/12BPC */ + { 768, 15, 6144, 11, 21, 19, 19, { + { 0, 12, 0 }, { 5, 14, -2 }, { 11, 16, -2 }, { 12, 16, -4 }, + { 13, 17, -6 }, { 13, 17, -6 }, { 14, 17, -6 }, { 14, 18, -8 }, + { 15, 19, -8 }, { 16, 20, -10 }, { 17, 20, -10 }, + { 18, 20, -12 }, { 18, 20, -12 }, { 19, 20, -12 }, + { 21, 22, -12 } + } + }, + /* 6BPP/14BPC */ + { 768, 15, 6144, 15, 25, 23, 27, { + { 0, 16, 0 }, { 7, 18, -2 }, { 15, 20, -2 }, { 16, 20, -4 }, + { 17, 21, -6 }, { 17, 21, -6 }, { 18, 21, -6 }, { 18, 22, -8 }, + { 19, 23, -8 }, { 20, 24, -10 }, { 21, 24, -10 }, + { 22, 24, -12 }, { 22, 24, -12 }, { 23, 24, -12 }, + { 25, 26, -12 } + } + }, + /* 6BPP/16BPC */ + { 768, 15, 6144, 19, 29, 27, 27, { + { 0, 20, 0 }, { 9, 22, -2 }, { 19, 24, -2 }, { 20, 24, -4 }, + { 21, 25, -6 }, { 21, 25, -6 }, { 22, 25, -6 }, { 22, 26, -8 }, + { 23, 27, -8 }, { 24, 28, -10 }, { 25, 28, -10 }, + { 26, 28, -12 }, { 26, 28, -12 }, { 27, 28, -12 }, + { 29, 30, -12 } + } + }, +}, +{ + /* 8BPP/8BPC */ + { 512, 12, 6144, 3, 12, 11, 11, { + { 0, 4, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 }, + { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, + { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 }, { 5, 12, -12 }, + { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 } + } + }, + /* 8BPP/10BPC */ + { 512, 12, 6144, 7, 16, 15, 15, { + { 0, 4, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 5, 10, -2 }, + { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, + { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 }, + { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 } + } + }, + /* 8BPP/12BPC */ + { 512, 12, 6144, 11, 20, 19, 19, { + { 0, 12, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 9, 14, -2 }, + { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, + { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 }, + { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 }, + { 21, 23, -12 } + } + }, + /* 8BPP/14BPC */ + { 512, 12, 6144, 15, 24, 23, 23, { + { 0, 12, 0 }, { 5, 13, 0 }, { 11, 15, 0 }, { 12, 17, -2 }, + { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 }, + { 15, 21, -8 }, { 15, 22, -10 }, { 17, 22, -10 }, + { 17, 23, -12 }, { 17, 23, -12 }, { 21, 24, -12 }, + { 24, 25, -12 } + } + }, + /* 8BPP/16BPC */ + { 512, 12, 6144, 19, 28, 27, 27, { + { 0, 12, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 15, 20, -2 }, + { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 }, + { 19, 25, -8 }, { 19, 26, -10 }, { 21, 26, -10 }, + { 21, 27, -12 }, { 21, 27, -12 }, { 25, 28, -12 }, + { 28, 29, -12 } + } + }, +}, +{ + /* 10BPP/8BPC */ + { 410, 15, 5632, 3, 12, 11, 11, { + { 0, 3, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 2, 6, -2 }, + { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, + { 3, 9, -8 }, { 3, 9, -10 }, { 5, 10, -10 }, { 5, 10, -10 }, + { 5, 11, -12 }, { 7, 11, -12 }, { 11, 12, -12 } + } + }, + /* 10BPP/10BPC */ + { 410, 15, 5632, 7, 16, 15, 15, { + { 0, 7, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 6, 10, -2 }, + { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, + { 7, 13, -8 }, { 7, 13, -10 }, { 9, 14, -10 }, { 9, 14, -10 }, + { 9, 15, -12 }, { 11, 15, -12 }, { 15, 16, -12 } + } + }, + /* 10BPP/12BPC */ + { 410, 15, 5632, 11, 20, 19, 19, { + { 0, 11, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 10, 14, -2 }, + { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, + { 11, 17, -8 }, { 11, 17, -10 }, { 13, 18, -10 }, + { 13, 18, -10 }, { 13, 19, -12 }, { 15, 19, -12 }, + { 19, 20, -12 } + } + }, + /* 10BPP/14BPC */ + { 410, 15, 5632, 15, 24, 23, 23, { + { 0, 11, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 13, 18, -2 }, + { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 }, + { 15, 21, -8 }, { 15, 21, -10 }, { 17, 22, -10 }, + { 17, 22, -10 }, { 17, 23, -12 }, { 19, 23, -12 }, + { 23, 24, -12 } + } + }, + /* 10BPP/16BPC */ + { 410, 15, 5632, 19, 28, 27, 27, { + { 0, 11, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 16, 20, -2 }, + { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 }, + { 19, 25, -8 }, { 19, 25, -10 }, { 21, 26, -10 }, + { 21, 26, -10 }, { 21, 27, -12 }, { 23, 27, -12 }, + { 27, 28, -12 } + } + }, +}, +{ + /* 12BPP/8BPC */ + { 341, 15, 2048, 3, 12, 11, 11, { + { 0, 2, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 }, + { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 }, + { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 }, + { 5, 12, -12 }, { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 } + } + }, + /* 12BPP/10BPC */ + { 341, 15, 2048, 7, 16, 15, 15, { + { 0, 2, 2 }, { 2, 5, 0 }, { 3, 7, 0 }, { 4, 8, -2 }, + { 6, 9, -4 }, { 7, 10, -6 }, { 7, 11, -8 }, { 7, 12, -8 }, + { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 }, + { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 } + } + }, + /* 12BPP/12BPC */ + { 341, 15, 2048, 11, 20, 19, 19, { + { 0, 6, 2 }, { 4, 9, 0 }, { 7, 11, 0 }, { 8, 12, -2 }, + { 10, 13, -4 }, { 11, 14, -6 }, { 11, 15, -8 }, { 11, 16, -8 }, + { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 }, + { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 }, + { 21, 23, -12 } + } + }, + /* 12BPP/14BPC */ + { 341, 15, 2048, 15, 24, 23, 23, { + { 0, 6, 2 }, { 7, 10, 0 }, { 9, 13, 0 }, { 11, 16, -2 }, + { 14, 17, -4 }, { 15, 18, -6 }, { 15, 19, -8 }, { 15, 20, -8 }, + { 15, 20, -8 }, { 15, 21, -10 }, { 17, 21, -10 }, + { 17, 21, -12 }, { 17, 21, -12 }, { 19, 22, -12 }, + { 22, 23, -12 } + } + }, + /* 12BPP/16BPC */ + { 341, 15, 2048, 19, 28, 27, 27, { + { 0, 6, 2 }, { 6, 11, 0 }, { 11, 15, 0 }, { 14, 18, -2 }, + { 18, 21, -4 }, { 19, 22, -6 }, { 19, 23, -8 }, { 19, 24, -8 }, + { 19, 24, -8 }, { 19, 25, -10 }, { 21, 25, -10 }, + { 21, 25, -12 }, { 21, 25, -12 }, { 23, 26, -12 }, + { 26, 27, -12 } + } + }, +}, +{ + /* 15BPP/8BPC */ + { 273, 15, 2048, 3, 12, 11, 11, { + { 0, 0, 10 }, { 0, 1, 8 }, { 0, 1, 6 }, { 0, 2, 4 }, + { 1, 2, 2 }, { 1, 3, 0 }, { 1, 3, -2 }, { 2, 4, -4 }, + { 2, 5, -6 }, { 3, 5, -8 }, { 4, 6, -10 }, { 4, 7, -10 }, + { 5, 7, -12 }, { 7, 8, -12 }, { 8, 9, -12 } + } + }, + /* 15BPP/10BPC */ + { 273, 15, 2048, 7, 16, 15, 15, { + { 0, 2, 10 }, { 2, 5, 8 }, { 3, 5, 6 }, { 4, 6, 4 }, + { 5, 6, 2 }, { 5, 7, 0 }, { 5, 7, -2 }, { 6, 8, -4 }, + { 6, 9, -6 }, { 7, 9, -8 }, { 8, 10, -10 }, { 8, 11, -10 }, + { 9, 11, -12 }, { 11, 12, -12 }, { 12, 13, -12 } + } + }, + /* 15BPP/12BPC */ + { 273, 15, 2048, 11, 20, 19, 19, { + { 0, 4, 10 }, { 2, 7, 8 }, { 4, 9, 6 }, { 6, 11, 4 }, + { 9, 11, 2 }, { 9, 11, 0 }, { 9, 12, -2 }, { 10, 12, -4 }, + { 11, 13, -6 }, { 11, 13, -8 }, { 12, 14, -10 }, + { 13, 15, -10 }, { 13, 15, -12 }, { 15, 16, -12 }, + { 16, 17, -12 } + } + }, + /* 15BPP/14BPC */ + { 273, 15, 2048, 15, 24, 23, 23, { + { 0, 4, 10 }, { 3, 8, 8 }, { 6, 11, 6 }, { 9, 14, 4 }, + { 13, 15, 2 }, { 13, 15, 0 }, { 13, 16, -2 }, { 14, 16, -4 }, + { 15, 17, -6 }, { 15, 17, -8 }, { 16, 18, -10 }, + { 17, 19, -10 }, { 17, 19, -12 }, { 19, 20, -12 }, + { 20, 21, -12 } + } + }, + /* 15BPP/16BPC */ + { 273, 15, 2048, 19, 28, 27, 27, { + { 0, 4, 10 }, { 4, 9, 8 }, { 8, 13, 6 }, { 12, 17, 4 }, + { 17, 19, 2 }, { 17, 20, 0 }, { 17, 20, -2 }, { 18, 20, -4 }, + { 19, 21, -6 }, { 19, 21, -8 }, { 20, 22, -10 }, + { 21, 23, -10 }, { 21, 23, -12 }, { 23, 24, -12 }, + { 24, 25, -12 } + } + } +} + +}; + +static int get_row_index_for_rc_params(u16 compressed_bpp) +{ + switch (compressed_bpp) { + case 6: + return ROW_INDEX_6BPP; + case 8: + return ROW_INDEX_8BPP; + case 10: + return ROW_INDEX_10BPP; + case 12: + return ROW_INDEX_12BPP; + case 15: + return ROW_INDEX_15BPP; + default: + return -EINVAL; + } +} + +static int get_column_index_for_rc_params(u8 bits_per_component) +{ + switch (bits_per_component) { + case 8: + return COLUMN_INDEX_8BPC; + case 10: + return COLUMN_INDEX_10BPC; + case 12: + return COLUMN_INDEX_12BPC; + case 14: + return COLUMN_INDEX_14BPC; + case 16: + return COLUMN_INDEX_16BPC; + default: + return -EINVAL; + } +} + +static int intel_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg) +{ + unsigned long groups_per_line = 0; + unsigned long groups_total = 0; + unsigned long num_extra_mux_bits = 0; + unsigned long slice_bits = 0; + unsigned long hrd_delay = 0; + unsigned long final_scale = 0; + unsigned long rbs_min = 0; + + /* Number of groups used to code each line of a slice */ + groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width, + DSC_RC_PIXELS_PER_GROUP); + + /* chunksize in Bytes */ + vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width * + vdsc_cfg->bits_per_pixel, + (8 * 16)); + + if (vdsc_cfg->convert_rgb) + num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size + + (4 * vdsc_cfg->bits_per_component + 4) + - 2); + else + num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size + + (4 * vdsc_cfg->bits_per_component + 4) + + 2 * (4 * vdsc_cfg->bits_per_component) - 2; + /* Number of bits in one Slice */ + slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height; + + while ((num_extra_mux_bits > 0) && + ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size)) + num_extra_mux_bits--; + + if (groups_per_line < vdsc_cfg->initial_scale_value - 8) + vdsc_cfg->initial_scale_value = groups_per_line + 8; + + /* scale_decrement_interval calculation according to DSC spec 1.11 */ + if (vdsc_cfg->initial_scale_value > 8) + vdsc_cfg->scale_decrement_interval = groups_per_line / + (vdsc_cfg->initial_scale_value - 8); + else + vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX; + + vdsc_cfg->final_offset = vdsc_cfg->rc_model_size - + (vdsc_cfg->initial_xmit_delay * + vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits; + + if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) { + DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n"); + return -ERANGE; + } + + final_scale = (vdsc_cfg->rc_model_size * 8) / + (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset); + if (vdsc_cfg->slice_height > 1) + /* + * NflBpgOffset is 16 bit value with 11 fractional bits + * hence we multiply by 2^11 for preserving the + * fractional part + */ + vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11), + (vdsc_cfg->slice_height - 1)); + else + vdsc_cfg->nfl_bpg_offset = 0; + + /* 2^16 - 1 */ + if (vdsc_cfg->nfl_bpg_offset > 65535) { + DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n"); + return -ERANGE; + } + + /* Number of groups used to code the entire slice */ + groups_total = groups_per_line * vdsc_cfg->slice_height; + + /* slice_bpg_offset is 16 bit value with 11 fractional bits */ + vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size - + vdsc_cfg->initial_offset + + num_extra_mux_bits) << 11), + groups_total); + + if (final_scale > 9) { + /* + * ScaleIncrementInterval = + * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125)) + * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value, + * we need divide by 2^11 from pstDscCfg values + */ + vdsc_cfg->scale_increment_interval = + (vdsc_cfg->final_offset * (1 << 11)) / + ((vdsc_cfg->nfl_bpg_offset + + vdsc_cfg->slice_bpg_offset) * + (final_scale - 9)); + } else { + /* + * If finalScaleValue is less than or equal to 9, a value of 0 should + * be used to disable the scale increment at the end of the slice + */ + vdsc_cfg->scale_increment_interval = 0; + } + + if (vdsc_cfg->scale_increment_interval > 65535) { + DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n"); + return -ERANGE; + } + + /* + * DSC spec mentions that bits_per_pixel specifies the target + * bits/pixel (bpp) rate that is used by the encoder, + * in steps of 1/16 of a bit per pixel + */ + rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset + + DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay * + vdsc_cfg->bits_per_pixel, 16) + + groups_per_line * vdsc_cfg->first_line_bpg_offset; + + hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel); + vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16; + vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay; + + return 0; +} + +int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config) +{ + struct drm_dsc_config *vdsc_cfg = &pipe_config->dp_dsc_cfg; + u16 compressed_bpp = pipe_config->dsc_params.compressed_bpp; + u8 i = 0; + int row_index = 0; + int column_index = 0; + u8 line_buf_depth = 0; + + vdsc_cfg->pic_width = pipe_config->base.adjusted_mode.crtc_hdisplay; + vdsc_cfg->pic_height = pipe_config->base.adjusted_mode.crtc_vdisplay; + vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width, + pipe_config->dsc_params.slice_count); + /* + * Slice Height of 8 works for all currently available panels. So start + * with that if pic_height is an integral multiple of 8. + * Eventually add logic to try multiple slice heights. + */ + if (vdsc_cfg->pic_height % 8 == 0) + vdsc_cfg->slice_height = 8; + else if (vdsc_cfg->pic_height % 4 == 0) + vdsc_cfg->slice_height = 4; + else + vdsc_cfg->slice_height = 2; + + /* Values filled from DSC Sink DPCD */ + vdsc_cfg->dsc_version_major = + (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & + DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; + vdsc_cfg->dsc_version_minor = + min(DSC_SUPPORTED_VERSION_MIN, + (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & + DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); + + vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & + DP_DSC_RGB; + + line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); + if (!line_buf_depth) { + DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n"); + return -EINVAL; + } + if (vdsc_cfg->dsc_version_minor == 2) + vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? + DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; + else + vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? + DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; + + /* Gen 11 does not support YCbCr */ + vdsc_cfg->enable422 = false; + /* Gen 11 does not support VBR */ + vdsc_cfg->vbr_enable = false; + vdsc_cfg->block_pred_enable = + intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & + DP_DSC_BLK_PREDICTION_IS_SUPPORTED; + + /* Gen 11 only supports integral values of bpp */ + vdsc_cfg->bits_per_pixel = compressed_bpp << 4; + vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3; + + for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) { + /* + * six 0s are appended to the lsb of each threshold value + * internally in h/w. + * Only 8 bits are allowed for programming RcBufThreshold + */ + vdsc_cfg->rc_buf_thresh[i] = rc_buf_thresh[i] >> 6; + } + + /* + * For 6bpp, RC Buffer threshold 12 and 13 need a different value + * as per C Model + */ + if (compressed_bpp == 6) { + vdsc_cfg->rc_buf_thresh[12] = 0x7C; + vdsc_cfg->rc_buf_thresh[13] = 0x7D; + } + + row_index = get_row_index_for_rc_params(compressed_bpp); + column_index = + get_column_index_for_rc_params(vdsc_cfg->bits_per_component); + + if (row_index < 0 || column_index < 0) + return -EINVAL; + + vdsc_cfg->first_line_bpg_offset = + rc_params[row_index][column_index].first_line_bpg_offset; + vdsc_cfg->initial_xmit_delay = + rc_params[row_index][column_index].initial_xmit_delay; + vdsc_cfg->initial_offset = + rc_params[row_index][column_index].initial_offset; + vdsc_cfg->flatness_min_qp = + rc_params[row_index][column_index].flatness_min_qp; + vdsc_cfg->flatness_max_qp = + rc_params[row_index][column_index].flatness_max_qp; + vdsc_cfg->rc_quant_incr_limit0 = + rc_params[row_index][column_index].rc_quant_incr_limit0; + vdsc_cfg->rc_quant_incr_limit1 = + rc_params[row_index][column_index].rc_quant_incr_limit1; + + for (i = 0; i < DSC_NUM_BUF_RANGES; i++) { + vdsc_cfg->rc_range_params[i].range_min_qp = + rc_params[row_index][column_index].rc_range_params[i].range_min_qp; + vdsc_cfg->rc_range_params[i].range_max_qp = + rc_params[row_index][column_index].rc_range_params[i].range_max_qp; + /* + * Range BPG Offset uses 2's complement and is only a 6 bits. So + * mask it to get only 6 bits. + */ + vdsc_cfg->rc_range_params[i].range_bpg_offset = + rc_params[row_index][column_index].rc_range_params[i].range_bpg_offset & + DSC_RANGE_BPG_OFFSET_MASK; + } + + /* + * BitsPerComponent value determines mux_word_size: + * When BitsPerComponent is 12bpc, muxWordSize will be equal to 64 bits + * When BitsPerComponent is 8 or 10bpc, muxWordSize will be equal to + * 48 bits + */ + if (vdsc_cfg->bits_per_component == 8 || + vdsc_cfg->bits_per_component == 10) + vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC; + else if (vdsc_cfg->bits_per_component == 12) + vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC; + + /* RC_MODEL_SIZE is a constant across all configurations */ + vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; + /* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */ + vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) / + (vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset); + + return intel_compute_rc_parameters(vdsc_cfg); +} + +enum intel_display_power_domain +intel_dsc_power_domain(const struct intel_crtc_state *crtc_state) +{ + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + /* + * On ICL VDSC/joining for eDP transcoder uses a separate power well PW2 + * This requires POWER_DOMAIN_TRANSCODER_EDP_VDSC power domain. + * For any other transcoder, VDSC/joining uses the power well associated + * with the pipe/transcoder in use. Hence another reference on the + * transcoder power domain will suffice. + */ + if (cpu_transcoder == TRANSCODER_EDP) + return POWER_DOMAIN_TRANSCODER_EDP_VDSC; + else + return POWER_DOMAIN_TRANSCODER(cpu_transcoder); +} + +static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg; + enum pipe pipe = crtc->pipe; + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 pps_val = 0; + u32 rc_buf_thresh_dword[4]; + u32 rc_range_params_dword[8]; + u8 num_vdsc_instances = (crtc_state->dsc_params.dsc_split) ? 2 : 1; + int i = 0; + + /* Populate PICTURE_PARAMETER_SET_0 registers */ + pps_val = DSC_VER_MAJ | vdsc_cfg->dsc_version_minor << + DSC_VER_MIN_SHIFT | + vdsc_cfg->bits_per_component << DSC_BPC_SHIFT | + vdsc_cfg->line_buf_depth << DSC_LINE_BUF_DEPTH_SHIFT; + if (vdsc_cfg->block_pred_enable) + pps_val |= DSC_BLOCK_PREDICTION; + if (vdsc_cfg->convert_rgb) + pps_val |= DSC_COLOR_SPACE_CONVERSION; + if (vdsc_cfg->enable422) + pps_val |= DSC_422_ENABLE; + if (vdsc_cfg->vbr_enable) + pps_val |= DSC_VBR_ENABLE; + DRM_INFO("PPS0 = 0x%08x\n", pps_val); + if (cpu_transcoder == TRANSCODER_EDP) { + I915_WRITE(DSCA_PICTURE_PARAMETER_SET_0, pps_val); + /* + * If 2 VDSC instances are needed, configure PPS for second + * VDSC + */ + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val); + } else { + I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val); + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe), + pps_val); + } + + /* Populate PICTURE_PARAMETER_SET_1 registers */ + pps_val = 0; + pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel); + DRM_INFO("PPS1 = 0x%08x\n", pps_val); + if (cpu_transcoder == TRANSCODER_EDP) { + I915_WRITE(DSCA_PICTURE_PARAMETER_SET_1, pps_val); + /* + * If 2 VDSC instances are needed, configure PPS for second + * VDSC + */ + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val); + } else { + I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val); + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe), + pps_val); + } + + /* Populate PICTURE_PARAMETER_SET_2 registers */ + pps_val = 0; + pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) | + DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances); + DRM_INFO("PPS2 = 0x%08x\n", pps_val); + if (encoder->type == INTEL_OUTPUT_EDP) { + I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val); + /* + * If 2 VDSC instances are needed, configure PPS for second + * VDSC + */ + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val); + } else { + I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val); + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe), + pps_val); + } + + /* Populate PICTURE_PARAMETER_SET_3 registers */ + pps_val = 0; + pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) | + DSC_SLICE_WIDTH(vdsc_cfg->slice_width); + DRM_INFO("PPS3 = 0x%08x\n", pps_val); + if (cpu_transcoder == TRANSCODER_EDP) { + I915_WRITE(DSCA_PICTURE_PARAMETER_SET_3, pps_val); + /* + * If 2 VDSC instances are needed, configure PPS for second + * VDSC + */ + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val); + } else { + I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val); + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe), + pps_val); + } + + /* Populate PICTURE_PARAMETER_SET_4 registers */ + pps_val = 0; + pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) | + DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay); + DRM_INFO("PPS4 = 0x%08x\n", pps_val); + if (cpu_transcoder == TRANSCODER_EDP) { + I915_WRITE(DSCA_PICTURE_PARAMETER_SET_4, pps_val); + /* + * If 2 VDSC instances are needed, configure PPS for second + * VDSC + */ + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val); + } else { + I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val); + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe), + pps_val); + } + + /* Populate PICTURE_PARAMETER_SET_5 registers */ + pps_val = 0; + pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) | + DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval); + DRM_INFO("PPS5 = 0x%08x\n", pps_val); + if (cpu_transcoder == TRANSCODER_EDP) { + I915_WRITE(DSCA_PICTURE_PARAMETER_SET_5, pps_val); + /* + * If 2 VDSC instances are needed, configure PPS for second + * VDSC + */ + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val); + } else { + I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val); + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe), + pps_val); + } + + /* Populate PICTURE_PARAMETER_SET_6 registers */ + pps_val = 0; + pps_val |= DSC_INITIAL_SCALE_VALUE(vdsc_cfg->initial_scale_value) | + DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) | + DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) | + DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp); + DRM_INFO("PPS6 = 0x%08x\n", pps_val); + if (cpu_transcoder == TRANSCODER_EDP) { + I915_WRITE(DSCA_PICTURE_PARAMETER_SET_6, pps_val); + /* + * If 2 VDSC instances are needed, configure PPS for second + * VDSC + */ + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val); + } else { + I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val); + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe), + pps_val); + } + + /* Populate PICTURE_PARAMETER_SET_7 registers */ + pps_val = 0; + pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) | + DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset); + DRM_INFO("PPS7 = 0x%08x\n", pps_val); + if (cpu_transcoder == TRANSCODER_EDP) { + I915_WRITE(DSCA_PICTURE_PARAMETER_SET_7, pps_val); + /* + * If 2 VDSC instances are needed, configure PPS for second + * VDSC + */ + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val); + } else { + I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val); + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe), + pps_val); + } + + /* Populate PICTURE_PARAMETER_SET_8 registers */ + pps_val = 0; + pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) | + DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset); + DRM_INFO("PPS8 = 0x%08x\n", pps_val); + if (cpu_transcoder == TRANSCODER_EDP) { + I915_WRITE(DSCA_PICTURE_PARAMETER_SET_8, pps_val); + /* + * If 2 VDSC instances are needed, configure PPS for second + * VDSC + */ + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val); + } else { + I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val); + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe), + pps_val); + } + + /* Populate PICTURE_PARAMETER_SET_9 registers */ + pps_val = 0; + pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) | + DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST); + DRM_INFO("PPS9 = 0x%08x\n", pps_val); + if (cpu_transcoder == TRANSCODER_EDP) { + I915_WRITE(DSCA_PICTURE_PARAMETER_SET_9, pps_val); + /* + * If 2 VDSC instances are needed, configure PPS for second + * VDSC + */ + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val); + } else { + I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val); + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe), + pps_val); + } + + /* Populate PICTURE_PARAMETER_SET_10 registers */ + pps_val = 0; + pps_val |= DSC_RC_QUANT_INC_LIMIT0(vdsc_cfg->rc_quant_incr_limit0) | + DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) | + DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) | + DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST); + DRM_INFO("PPS10 = 0x%08x\n", pps_val); + if (cpu_transcoder == TRANSCODER_EDP) { + I915_WRITE(DSCA_PICTURE_PARAMETER_SET_10, pps_val); + /* + * If 2 VDSC instances are needed, configure PPS for second + * VDSC + */ + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val); + } else { + I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val); + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe), + pps_val); + } + + /* Populate Picture parameter set 16 */ + pps_val = 0; + pps_val |= DSC_SLICE_CHUNK_SIZE(vdsc_cfg->slice_chunk_size) | + DSC_SLICE_PER_LINE((vdsc_cfg->pic_width / num_vdsc_instances) / + vdsc_cfg->slice_width) | + DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height / + vdsc_cfg->slice_height); + DRM_INFO("PPS16 = 0x%08x\n", pps_val); + if (cpu_transcoder == TRANSCODER_EDP) { + I915_WRITE(DSCA_PICTURE_PARAMETER_SET_16, pps_val); + /* + * If 2 VDSC instances are needed, configure PPS for second + * VDSC + */ + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val); + } else { + I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val); + if (crtc_state->dsc_params.dsc_split) + I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe), + pps_val); + } + + /* Populate the RC_BUF_THRESH registers */ + memset(rc_buf_thresh_dword, 0, sizeof(rc_buf_thresh_dword)); + for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) { + rc_buf_thresh_dword[i / 4] |= + (u32)(vdsc_cfg->rc_buf_thresh[i] << + BITS_PER_BYTE * (i % 4)); + DRM_INFO(" RC_BUF_THRESH%d = 0x%08x\n", i, + rc_buf_thresh_dword[i / 4]); + } + if (cpu_transcoder == TRANSCODER_EDP) { + I915_WRITE(DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]); + I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]); + I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]); + I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]); + if (crtc_state->dsc_params.dsc_split) { + I915_WRITE(DSCC_RC_BUF_THRESH_0, + rc_buf_thresh_dword[0]); + I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW, + rc_buf_thresh_dword[1]); + I915_WRITE(DSCC_RC_BUF_THRESH_1, + rc_buf_thresh_dword[2]); + I915_WRITE(DSCC_RC_BUF_THRESH_1_UDW, + rc_buf_thresh_dword[3]); + } + } else { + I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0(pipe), + rc_buf_thresh_dword[0]); + I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe), + rc_buf_thresh_dword[1]); + I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1(pipe), + rc_buf_thresh_dword[2]); + I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe), + rc_buf_thresh_dword[3]); + if (crtc_state->dsc_params.dsc_split) { + I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe), + rc_buf_thresh_dword[0]); + I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe), + rc_buf_thresh_dword[1]); + I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1(pipe), + rc_buf_thresh_dword[2]); + I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe), + rc_buf_thresh_dword[3]); + } + } + + /* Populate the RC_RANGE_PARAMETERS registers */ + memset(rc_range_params_dword, 0, sizeof(rc_range_params_dword)); + for (i = 0; i < DSC_NUM_BUF_RANGES; i++) { + rc_range_params_dword[i / 2] |= + (u32)(((vdsc_cfg->rc_range_params[i].range_bpg_offset << + RC_BPG_OFFSET_SHIFT) | + (vdsc_cfg->rc_range_params[i].range_max_qp << + RC_MAX_QP_SHIFT) | + (vdsc_cfg->rc_range_params[i].range_min_qp << + RC_MIN_QP_SHIFT)) << 16 * (i % 2)); + DRM_INFO(" RC_RANGE_PARAM_%d = 0x%08x\n", i, + rc_range_params_dword[i / 2]); + } + if (cpu_transcoder == TRANSCODER_EDP) { + I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0, + rc_range_params_dword[0]); + I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0_UDW, + rc_range_params_dword[1]); + I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1, + rc_range_params_dword[2]); + I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1_UDW, + rc_range_params_dword[3]); + I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2, + rc_range_params_dword[4]); + I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2_UDW, + rc_range_params_dword[5]); + I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3, + rc_range_params_dword[6]); + I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW, + rc_range_params_dword[7]); + if (crtc_state->dsc_params.dsc_split) { + I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0, + rc_range_params_dword[0]); + I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW, + rc_range_params_dword[1]); + I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1, + rc_range_params_dword[2]); + I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1_UDW, + rc_range_params_dword[3]); + I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2, + rc_range_params_dword[4]); + I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2_UDW, + rc_range_params_dword[5]); + I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3, + rc_range_params_dword[6]); + I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3_UDW, + rc_range_params_dword[7]); + } + } else { + I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe), + rc_range_params_dword[0]); + I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe), + rc_range_params_dword[1]); + I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe), + rc_range_params_dword[2]); + I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe), + rc_range_params_dword[3]); + I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe), + rc_range_params_dword[4]); + I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe), + rc_range_params_dword[5]); + I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe), + rc_range_params_dword[6]); + I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe), + rc_range_params_dword[7]); + if (crtc_state->dsc_params.dsc_split) { + I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe), + rc_range_params_dword[0]); + I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe), + rc_range_params_dword[1]); + I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe), + rc_range_params_dword[2]); + I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe), + rc_range_params_dword[3]); + I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe), + rc_range_params_dword[4]); + I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe), + rc_range_params_dword[5]); + I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe), + rc_range_params_dword[6]); + I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe), + rc_range_params_dword[7]); + } + } +} + +static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg; + struct drm_dsc_pps_infoframe dp_dsc_pps_sdp; + + /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */ + drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp); + + /* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */ + drm_dsc_pps_infoframe_pack(&dp_dsc_pps_sdp, vdsc_cfg); + + intel_dig_port->write_infoframe(encoder, crtc_state, + DP_SDP_PPS, &dp_dsc_pps_sdp, + sizeof(dp_dsc_pps_sdp)); +} + +void intel_dsc_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum pipe pipe = crtc->pipe; + i915_reg_t dss_ctl1_reg, dss_ctl2_reg; + u32 dss_ctl1_val = 0; + u32 dss_ctl2_val = 0; + + if (!crtc_state->dsc_params.compression_enable) + return; + + /* Enable Power wells for VDSC/joining */ + intel_display_power_get(dev_priv, + intel_dsc_power_domain(crtc_state)); + + intel_configure_pps_for_dsc_encoder(encoder, crtc_state); + + intel_dp_write_dsc_pps_sdp(encoder, crtc_state); + + if (crtc_state->cpu_transcoder == TRANSCODER_EDP) { + dss_ctl1_reg = DSS_CTL1; + dss_ctl2_reg = DSS_CTL2; + } else { + dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe); + dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe); + } + dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE; + if (crtc_state->dsc_params.dsc_split) { + dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE; + dss_ctl1_val |= JOINER_ENABLE; + } + I915_WRITE(dss_ctl1_reg, dss_ctl1_val); + I915_WRITE(dss_ctl2_reg, dss_ctl2_val); +} + +void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + i915_reg_t dss_ctl1_reg, dss_ctl2_reg; + u32 dss_ctl1_val = 0, dss_ctl2_val = 0; + + if (!old_crtc_state->dsc_params.compression_enable) + return; + + if (old_crtc_state->cpu_transcoder == TRANSCODER_EDP) { + dss_ctl1_reg = DSS_CTL1; + dss_ctl2_reg = DSS_CTL2; + } else { + dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe); + dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe); + } + dss_ctl1_val = I915_READ(dss_ctl1_reg); + if (dss_ctl1_val & JOINER_ENABLE) + dss_ctl1_val &= ~JOINER_ENABLE; + I915_WRITE(dss_ctl1_reg, dss_ctl1_val); + + dss_ctl2_val = I915_READ(dss_ctl2_reg); + if (dss_ctl2_val & LEFT_BRANCH_VDSC_ENABLE || + dss_ctl2_val & RIGHT_BRANCH_VDSC_ENABLE) + dss_ctl2_val &= ~(LEFT_BRANCH_VDSC_ENABLE | + RIGHT_BRANCH_VDSC_ENABLE); + I915_WRITE(dss_ctl2_reg, dss_ctl2_val); + + /* Disable Power wells for VDSC/joining */ + intel_display_power_put(dev_priv, + intel_dsc_power_domain(old_crtc_state)); +} diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c index 4bcdeaf8d98f..4f41e326f3f3 100644 --- a/drivers/gpu/drm/i915/intel_workarounds.c +++ b/drivers/gpu/drm/i915/intel_workarounds.c @@ -48,58 +48,112 @@ * - Public functions to init or apply the given workaround type. */ -static void wa_add(struct drm_i915_private *i915, - i915_reg_t reg, const u32 mask, const u32 val) +static void wa_init_start(struct i915_wa_list *wal, const char *name) { - struct i915_workarounds *wa = &i915->workarounds; - unsigned int start = 0, end = wa->count; - unsigned int addr = i915_mmio_reg_offset(reg); - struct i915_wa_reg *r; + wal->name = name; +} + +#define WA_LIST_CHUNK (1 << 4) + +static void wa_init_finish(struct i915_wa_list *wal) +{ + /* Trim unused entries. */ + if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) { + struct i915_wa *list = kmemdup(wal->list, + wal->count * sizeof(*list), + GFP_KERNEL); + + if (list) { + kfree(wal->list); + wal->list = list; + } + } + + if (!wal->count) + return; + + DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n", + wal->wa_count, wal->name); +} + +static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) +{ + unsigned int addr = i915_mmio_reg_offset(wa->reg); + unsigned int start = 0, end = wal->count; + const unsigned int grow = WA_LIST_CHUNK; + struct i915_wa *wa_; + + GEM_BUG_ON(!is_power_of_2(grow)); + + if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */ + struct i915_wa *list; + + list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa), + GFP_KERNEL); + if (!list) { + DRM_ERROR("No space for workaround init!\n"); + return; + } + + if (wal->list) + memcpy(list, wal->list, sizeof(*wa) * wal->count); + + wal->list = list; + } while (start < end) { unsigned int mid = start + (end - start) / 2; - if (wa->reg[mid].addr < addr) { + if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) { start = mid + 1; - } else if (wa->reg[mid].addr > addr) { + } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) { end = mid; } else { - r = &wa->reg[mid]; + wa_ = &wal->list[mid]; - if ((mask & ~r->mask) == 0) { + if ((wa->mask & ~wa_->mask) == 0) { DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n", - addr, r->mask, r->value); + i915_mmio_reg_offset(wa_->reg), + wa_->mask, wa_->val); - r->value &= ~mask; + wa_->val &= ~wa->mask; } - r->value |= val; - r->mask |= mask; + wal->wa_count++; + wa_->val |= wa->val; + wa_->mask |= wa->mask; return; } } - if (WARN_ON_ONCE(wa->count >= I915_MAX_WA_REGS)) { - DRM_ERROR("Dropping w/a for reg %04x (mask: %08x, value: %08x)\n", - addr, mask, val); - return; - } - - r = &wa->reg[wa->count++]; - r->addr = addr; - r->value = val; - r->mask = mask; + wal->wa_count++; + wa_ = &wal->list[wal->count++]; + *wa_ = *wa; - while (r-- > wa->reg) { - GEM_BUG_ON(r[0].addr == r[1].addr); - if (r[1].addr > r[0].addr) + while (wa_-- > wal->list) { + GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) == + i915_mmio_reg_offset(wa_[1].reg)); + if (i915_mmio_reg_offset(wa_[1].reg) > + i915_mmio_reg_offset(wa_[0].reg)) break; - swap(r[1], r[0]); + swap(wa_[1], wa_[0]); } } -#define WA_REG(addr, mask, val) wa_add(dev_priv, (addr), (mask), (val)) +static void +__wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val) +{ + struct i915_wa wa = { + .reg = reg, + .mask = mask, + .val = val + }; + + _wa_add(wal, &wa); +} + +#define WA_REG(addr, mask, val) __wa_add(wal, (addr), (mask), (val)) #define WA_SET_BIT_MASKED(addr, mask) \ WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask)) @@ -110,8 +164,10 @@ static void wa_add(struct drm_i915_private *i915, #define WA_SET_FIELD_MASKED(addr, mask, value) \ WA_REG(addr, (mask), _MASKED_FIELD(mask, value)) -static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv) +static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine) { + struct i915_wa_list *wal = &engine->ctx_wa_list; + WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); /* WaDisableAsyncFlipPerfMode:bdw,chv */ @@ -155,17 +211,14 @@ static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv) WA_SET_FIELD_MASKED(GEN7_GT_MODE, GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4); - - return 0; } -static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv) +static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine) { - int ret; + struct drm_i915_private *i915 = engine->i915; + struct i915_wa_list *wal = &engine->ctx_wa_list; - ret = gen8_ctx_workarounds_init(dev_priv); - if (ret) - return ret; + gen8_ctx_workarounds_init(engine); /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); @@ -185,31 +238,28 @@ static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv) /* WaForceContextSaveRestoreNonCoherent:bdw */ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ - (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); - - return 0; + (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); } -static int chv_ctx_workarounds_init(struct drm_i915_private *dev_priv) +static void chv_ctx_workarounds_init(struct intel_engine_cs *engine) { - int ret; + struct i915_wa_list *wal = &engine->ctx_wa_list; - ret = gen8_ctx_workarounds_init(dev_priv); - if (ret) - return ret; + gen8_ctx_workarounds_init(engine); /* WaDisableThreadStallDopClockGating:chv */ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); /* Improve HiZ throughput on CHV. */ WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); - - return 0; } -static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv) +static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine) { - if (HAS_LLC(dev_priv)) { + struct drm_i915_private *i915 = engine->i915; + struct i915_wa_list *wal = &engine->ctx_wa_list; + + if (HAS_LLC(i915)) { /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl * * Must match Display Engine. See @@ -228,7 +278,7 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv) PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); /* Syncing dependencies between camera and graphics:skl,bxt,kbl */ - if (!IS_COFFEELAKE(dev_priv)) + if (!IS_COFFEELAKE(i915)) WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); @@ -271,9 +321,7 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv) HDC_FORCE_NON_COHERENT); /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */ - if (IS_SKYLAKE(dev_priv) || - IS_KABYLAKE(dev_priv) || - IS_COFFEELAKE(dev_priv)) + if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, GEN8_SAMPLER_POWER_BYPASS_DIS); @@ -300,14 +348,14 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv) GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); /* WaClearHIZ_WM_CHICKEN3:bxt,glk */ - if (IS_GEN9_LP(dev_priv)) + if (IS_GEN9_LP(i915)) WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ); - - return 0; } -static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv) +static void skl_tune_iz_hashing(struct intel_engine_cs *engine) { + struct drm_i915_private *i915 = engine->i915; + struct i915_wa_list *wal = &engine->ctx_wa_list; u8 vals[3] = { 0, 0, 0 }; unsigned int i; @@ -318,7 +366,7 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv) * Only consider slices where one, and only one, subslice has 7 * EUs */ - if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i])) + if (!is_power_of_2(INTEL_INFO(i915)->sseu.subslice_7eu[i])) continue; /* @@ -327,12 +375,12 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv) * * -> 0 <= ss <= 3; */ - ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1; + ss = ffs(INTEL_INFO(i915)->sseu.subslice_7eu[i]) - 1; vals[i] = 3 - ss; } if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0) - return 0; + return; /* Tune IZ hashing. See intel_device_info_runtime_init() */ WA_SET_FIELD_MASKED(GEN7_GT_MODE, @@ -342,28 +390,19 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv) GEN9_IZ_HASHING(2, vals[2]) | GEN9_IZ_HASHING(1, vals[1]) | GEN9_IZ_HASHING(0, vals[0])); - - return 0; } -static int skl_ctx_workarounds_init(struct drm_i915_private *dev_priv) +static void skl_ctx_workarounds_init(struct intel_engine_cs *engine) { - int ret; - - ret = gen9_ctx_workarounds_init(dev_priv); - if (ret) - return ret; - - return skl_tune_iz_hashing(dev_priv); + gen9_ctx_workarounds_init(engine); + skl_tune_iz_hashing(engine); } -static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv) +static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine) { - int ret; + struct i915_wa_list *wal = &engine->ctx_wa_list; - ret = gen9_ctx_workarounds_init(dev_priv); - if (ret) - return ret; + gen9_ctx_workarounds_init(engine); /* WaDisableThreadStallDopClockGating:bxt */ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, @@ -372,57 +411,41 @@ static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv) /* WaToEnableHwFixForPushConstHWBug:bxt */ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); - - return 0; } -static int kbl_ctx_workarounds_init(struct drm_i915_private *dev_priv) +static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine) { - int ret; - - ret = gen9_ctx_workarounds_init(dev_priv); - if (ret) - return ret; + struct drm_i915_private *i915 = engine->i915; + struct i915_wa_list *wal = &engine->ctx_wa_list; - /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */ - if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0)) - WA_SET_BIT_MASKED(HDC_CHICKEN0, - HDC_FENCE_DEST_SLM_DISABLE); + gen9_ctx_workarounds_init(engine); /* WaToEnableHwFixForPushConstHWBug:kbl */ - if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) + if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER)) WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); /* WaDisableSbeCacheDispatchPortSharing:kbl */ WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); - - return 0; } -static int glk_ctx_workarounds_init(struct drm_i915_private *dev_priv) +static void glk_ctx_workarounds_init(struct intel_engine_cs *engine) { - int ret; + struct i915_wa_list *wal = &engine->ctx_wa_list; - ret = gen9_ctx_workarounds_init(dev_priv); - if (ret) - return ret; + gen9_ctx_workarounds_init(engine); /* WaToEnableHwFixForPushConstHWBug:glk */ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); - - return 0; } -static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv) +static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine) { - int ret; + struct i915_wa_list *wal = &engine->ctx_wa_list; - ret = gen9_ctx_workarounds_init(dev_priv); - if (ret) - return ret; + gen9_ctx_workarounds_init(engine); /* WaToEnableHwFixForPushConstHWBug:cfl */ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, @@ -431,18 +454,19 @@ static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv) /* WaDisableSbeCacheDispatchPortSharing:cfl */ WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); - - return 0; } -static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv) +static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine) { + struct drm_i915_private *i915 = engine->i915; + struct i915_wa_list *wal = &engine->ctx_wa_list; + /* WaForceContextSaveRestoreNonCoherent:cnl */ WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0, HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT); /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */ - if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0)) + if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0)) WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5); /* WaDisableReplayBufferBankArbitrationOptimization:cnl */ @@ -450,7 +474,7 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv) GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */ - if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0)) + if (IS_CNL_REVID(i915, 0, CNL_REVID_B0)) WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE); @@ -470,16 +494,17 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv) /* WaDisableEarlyEOT:cnl */ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT); - - return 0; } -static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv) +static void icl_ctx_workarounds_init(struct intel_engine_cs *engine) { + struct drm_i915_private *i915 = engine->i915; + struct i915_wa_list *wal = &engine->ctx_wa_list; + /* Wa_1604370585:icl (pre-prod) * Formerly known as WaPushConstantDereferenceHoldDisable */ - if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0)) + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE); @@ -495,7 +520,7 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv) /* Wa_2006611047:icl (pre-prod) * Formerly known as WaDisableImprovedTdlClkGating */ - if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0)) + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0)) WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, GEN11_TDL_CLOCK_GATING_FIX_DISABLE); @@ -504,70 +529,67 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv) GEN11_STATE_CACHE_REDIRECT_TO_CS); /* Wa_2006665173:icl (pre-prod) */ - if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0)) + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0)) WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC); - - return 0; } -int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv) +void intel_engine_init_ctx_wa(struct intel_engine_cs *engine) { - int err = 0; - - dev_priv->workarounds.count = 0; - - if (INTEL_GEN(dev_priv) < 8) - err = 0; - else if (IS_BROADWELL(dev_priv)) - err = bdw_ctx_workarounds_init(dev_priv); - else if (IS_CHERRYVIEW(dev_priv)) - err = chv_ctx_workarounds_init(dev_priv); - else if (IS_SKYLAKE(dev_priv)) - err = skl_ctx_workarounds_init(dev_priv); - else if (IS_BROXTON(dev_priv)) - err = bxt_ctx_workarounds_init(dev_priv); - else if (IS_KABYLAKE(dev_priv)) - err = kbl_ctx_workarounds_init(dev_priv); - else if (IS_GEMINILAKE(dev_priv)) - err = glk_ctx_workarounds_init(dev_priv); - else if (IS_COFFEELAKE(dev_priv)) - err = cfl_ctx_workarounds_init(dev_priv); - else if (IS_CANNONLAKE(dev_priv)) - err = cnl_ctx_workarounds_init(dev_priv); - else if (IS_ICELAKE(dev_priv)) - err = icl_ctx_workarounds_init(dev_priv); + struct drm_i915_private *i915 = engine->i915; + struct i915_wa_list *wal = &engine->ctx_wa_list; + + wa_init_start(wal, "context"); + + if (INTEL_GEN(i915) < 8) + return; + else if (IS_BROADWELL(i915)) + bdw_ctx_workarounds_init(engine); + else if (IS_CHERRYVIEW(i915)) + chv_ctx_workarounds_init(engine); + else if (IS_SKYLAKE(i915)) + skl_ctx_workarounds_init(engine); + else if (IS_BROXTON(i915)) + bxt_ctx_workarounds_init(engine); + else if (IS_KABYLAKE(i915)) + kbl_ctx_workarounds_init(engine); + else if (IS_GEMINILAKE(i915)) + glk_ctx_workarounds_init(engine); + else if (IS_COFFEELAKE(i915)) + cfl_ctx_workarounds_init(engine); + else if (IS_CANNONLAKE(i915)) + cnl_ctx_workarounds_init(engine); + else if (IS_ICELAKE(i915)) + icl_ctx_workarounds_init(engine); else - MISSING_CASE(INTEL_GEN(dev_priv)); - if (err) - return err; + MISSING_CASE(INTEL_GEN(i915)); - DRM_DEBUG_DRIVER("Number of context specific w/a: %d\n", - dev_priv->workarounds.count); - return 0; + wa_init_finish(wal); } -int intel_ctx_workarounds_emit(struct i915_request *rq) +int intel_engine_emit_ctx_wa(struct i915_request *rq) { - struct i915_workarounds *w = &rq->i915->workarounds; + struct i915_wa_list *wal = &rq->engine->ctx_wa_list; + struct i915_wa *wa; + unsigned int i; u32 *cs; - int ret, i; + int ret; - if (w->count == 0) + if (wal->count == 0) return 0; ret = rq->engine->emit_flush(rq, EMIT_BARRIER); if (ret) return ret; - cs = intel_ring_begin(rq, (w->count * 2 + 2)); + cs = intel_ring_begin(rq, (wal->count * 2 + 2)); if (IS_ERR(cs)) return PTR_ERR(cs); - *cs++ = MI_LOAD_REGISTER_IMM(w->count); - for (i = 0; i < w->count; i++) { - *cs++ = w->reg[i].addr; - *cs++ = w->reg[i].value; + *cs++ = MI_LOAD_REGISTER_IMM(wal->count); + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { + *cs++ = i915_mmio_reg_offset(wa->reg); + *cs++ = wa->val; } *cs++ = MI_NOOP; @@ -580,160 +602,149 @@ int intel_ctx_workarounds_emit(struct i915_request *rq) return 0; } -static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void +wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val) { + struct i915_wa wa = { + .reg = reg, + .mask = val, + .val = _MASKED_BIT_ENABLE(val) + }; + + _wa_add(wal, &wa); +} + +static void +wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, + u32 val) +{ + struct i915_wa wa = { + .reg = reg, + .mask = mask, + .val = val + }; + + _wa_add(wal, &wa); } -static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void +wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val) { + wa_write_masked_or(wal, reg, ~0, val); } -static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void +wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val) { - /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */ - I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, - _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE)); + wa_write_masked_or(wal, reg, val, val); +} - /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */ - I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | - GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); +static void gen9_gt_workarounds_init(struct drm_i915_private *i915) +{ + struct i915_wa_list *wal = &i915->gt_wa_list; /* WaDisableKillLogic:bxt,skl,kbl */ - if (!IS_COFFEELAKE(dev_priv)) - I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | - ECOCHK_DIS_TLB); + if (!IS_COFFEELAKE(i915)) + wa_write_or(wal, + GAM_ECOCHK, + ECOCHK_DIS_TLB); - if (HAS_LLC(dev_priv)) { + if (HAS_LLC(i915)) { /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl * * Must match Display Engine. See * WaCompressedResourceDisplayNewHashMode. */ - I915_WRITE(MMCD_MISC_CTRL, - I915_READ(MMCD_MISC_CTRL) | - MMCD_PCLA | - MMCD_HOTSPOT_EN); + wa_write_or(wal, + MMCD_MISC_CTRL, + MMCD_PCLA | MMCD_HOTSPOT_EN); } /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */ - I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | - BDW_DISABLE_HDC_INVALIDATION); - - /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */ - if (IS_GEN9_LP(dev_priv)) { - u32 val = I915_READ(GEN8_L3SQCREG1); - - val &= ~L3_PRIO_CREDITS_MASK; - val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2); - I915_WRITE(GEN8_L3SQCREG1, val); - } - - /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */ - I915_WRITE(GEN8_L3SQCREG4, - I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES); - - /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */ - I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, - _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); + wa_write_or(wal, + GAM_ECOCHK, + BDW_DISABLE_HDC_INVALIDATION); } -static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void skl_gt_workarounds_init(struct drm_i915_private *i915) { - gen9_gt_workarounds_apply(dev_priv); + struct i915_wa_list *wal = &i915->gt_wa_list; - /* WaEnableGapsTsvCreditFix:skl */ - I915_WRITE(GEN8_GARBCNTL, - I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE); + gen9_gt_workarounds_init(i915); /* WaDisableGafsUnitClkGating:skl */ - I915_WRITE(GEN7_UCGCTL4, - I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + wa_write_or(wal, + GEN7_UCGCTL4, + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); /* WaInPlaceDecompressionHang:skl */ - if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER)) - I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, - I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER)) + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); } -static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void bxt_gt_workarounds_init(struct drm_i915_private *i915) { - gen9_gt_workarounds_apply(dev_priv); + struct i915_wa_list *wal = &i915->gt_wa_list; - /* WaDisablePooledEuLoadBalancingFix:bxt */ - I915_WRITE(FF_SLICE_CS_CHICKEN2, - _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE)); + gen9_gt_workarounds_init(i915); /* WaInPlaceDecompressionHang:bxt */ - I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, - I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); } -static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void kbl_gt_workarounds_init(struct drm_i915_private *i915) { - gen9_gt_workarounds_apply(dev_priv); + struct i915_wa_list *wal = &i915->gt_wa_list; - /* WaEnableGapsTsvCreditFix:kbl */ - I915_WRITE(GEN8_GARBCNTL, - I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE); + gen9_gt_workarounds_init(i915); /* WaDisableDynamicCreditSharing:kbl */ - if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) - I915_WRITE(GAMT_CHKN_BIT_REG, - I915_READ(GAMT_CHKN_BIT_REG) | - GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); + if (IS_KBL_REVID(i915, 0, KBL_REVID_B0)) + wa_write_or(wal, + GAMT_CHKN_BIT_REG, + GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); /* WaDisableGafsUnitClkGating:kbl */ - I915_WRITE(GEN7_UCGCTL4, - I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + wa_write_or(wal, + GEN7_UCGCTL4, + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); /* WaInPlaceDecompressionHang:kbl */ - I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, - I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); - - /* WaKBLVECSSemaphoreWaitPoll:kbl */ - if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) { - struct intel_engine_cs *engine; - unsigned int tmp; - - for_each_engine(engine, dev_priv, tmp) { - if (engine->id == RCS) - continue; - - I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1); - } - } + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); } -static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void glk_gt_workarounds_init(struct drm_i915_private *i915) { - gen9_gt_workarounds_apply(dev_priv); + gen9_gt_workarounds_init(i915); } -static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void cfl_gt_workarounds_init(struct drm_i915_private *i915) { - gen9_gt_workarounds_apply(dev_priv); + struct i915_wa_list *wal = &i915->gt_wa_list; - /* WaEnableGapsTsvCreditFix:cfl */ - I915_WRITE(GEN8_GARBCNTL, - I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE); + gen9_gt_workarounds_init(i915); /* WaDisableGafsUnitClkGating:cfl */ - I915_WRITE(GEN7_UCGCTL4, - I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + wa_write_or(wal, + GEN7_UCGCTL4, + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); /* WaInPlaceDecompressionHang:cfl */ - I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, - I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); } static void wa_init_mcr(struct drm_i915_private *dev_priv) { const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu); - u32 mcr; + struct i915_wa_list *wal = &dev_priv->gt_wa_list; u32 mcr_slice_subslice_mask; /* @@ -770,8 +781,6 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv) WARN_ON((enabled_mask & disabled_mask) != enabled_mask); } - mcr = I915_READ(GEN8_MCR_SELECTOR); - if (INTEL_GEN(dev_priv) >= 11) mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK; @@ -789,173 +798,220 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv) * occasions, such as INSTDONE, where this value is dependent * on s/ss combo, the read should be done with read_subslice_reg. */ - mcr &= ~mcr_slice_subslice_mask; - mcr |= intel_calculate_mcr_s_ss_select(dev_priv); - I915_WRITE(GEN8_MCR_SELECTOR, mcr); + wa_write_masked_or(wal, + GEN8_MCR_SELECTOR, + mcr_slice_subslice_mask, + intel_calculate_mcr_s_ss_select(dev_priv)); } -static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void cnl_gt_workarounds_init(struct drm_i915_private *i915) { - wa_init_mcr(dev_priv); + struct i915_wa_list *wal = &i915->gt_wa_list; + + wa_init_mcr(i915); /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */ - if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0)) - I915_WRITE(GAMT_CHKN_BIT_REG, - I915_READ(GAMT_CHKN_BIT_REG) | - GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT); + if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0)) + wa_write_or(wal, + GAMT_CHKN_BIT_REG, + GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT); /* WaInPlaceDecompressionHang:cnl */ - I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, - I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); - - /* WaEnablePreemptionGranularityControlByUMD:cnl */ - I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, - _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); } -static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv) +static void icl_gt_workarounds_init(struct drm_i915_private *i915) { - wa_init_mcr(dev_priv); + struct i915_wa_list *wal = &i915->gt_wa_list; - /* This is not an Wa. Enable for better image quality */ - I915_WRITE(_3D_CHICKEN3, - _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE)); + wa_init_mcr(i915); /* WaInPlaceDecompressionHang:icl */ - I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); - - /* WaPipelineFlushCoherentLines:icl */ - I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | - GEN8_LQSC_FLUSH_COHERENT_LINES); - - /* Wa_1405543622:icl - * Formerly known as WaGAPZPriorityScheme - */ - I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) | - GEN11_ARBITRATION_PRIO_ORDER_MASK); - - /* Wa_1604223664:icl - * Formerly known as WaL3BankAddressHashing - */ - I915_WRITE(GEN8_GARBCNTL, - (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) | - GEN11_HASH_CTRL_EXCL_BIT0); - I915_WRITE(GEN11_GLBLINVL, - (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) | - GEN11_BANK_HASH_ADDR_EXCL_BIT0); + wa_write_or(wal, + GEN9_GAMT_ECO_REG_RW_IA, + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); /* WaModifyGamTlbPartitioning:icl */ - I915_WRITE(GEN11_GACB_PERF_CTRL, - (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) | - GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4); - - /* Wa_1405733216:icl - * Formerly known as WaDisableCleanEvicts - */ - I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | - GEN11_LQSC_CLEAN_EVICT_DISABLE); + wa_write_masked_or(wal, + GEN11_GACB_PERF_CTRL, + GEN11_HASH_CTRL_MASK, + GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4); /* Wa_1405766107:icl * Formerly known as WaCL2SFHalfMaxAlloc */ - I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) | - GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | - GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); + wa_write_or(wal, + GEN11_LSN_UNSLCVC, + GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | + GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); /* Wa_220166154:icl * Formerly known as WaDisCtxReload */ - I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) | - GAMW_ECO_DEV_CTX_RELOAD_DISABLE); + wa_write_or(wal, + GEN8_GAMW_ECO_DEV_RW_IA, + GAMW_ECO_DEV_CTX_RELOAD_DISABLE); /* Wa_1405779004:icl (pre-prod) */ - if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0)) - I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, - I915_READ(SLICE_UNIT_LEVEL_CLKGATE) | - MSCUNIT_CLKGATE_DIS); + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0)) + wa_write_or(wal, + SLICE_UNIT_LEVEL_CLKGATE, + MSCUNIT_CLKGATE_DIS); /* Wa_1406680159:icl */ - I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, - I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) | - GWUNIT_CLKGATE_DIS); - - /* Wa_1604302699:icl */ - I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER, - I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) | - GEN11_I2M_WRITE_DISABLE); + wa_write_or(wal, + SUBSLICE_UNIT_LEVEL_CLKGATE, + GWUNIT_CLKGATE_DIS); /* Wa_1406838659:icl (pre-prod) */ - if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0)) - I915_WRITE(INF_UNIT_LEVEL_CLKGATE, - I915_READ(INF_UNIT_LEVEL_CLKGATE) | - CGPSF_CLKGATE_DIS); - - /* WaForwardProgressSoftReset:icl */ - I915_WRITE(GEN10_SCRATCH_LNCF2, - I915_READ(GEN10_SCRATCH_LNCF2) | - PMFLUSHDONE_LNICRSDROP | - PMFLUSH_GAPL3UNBLOCK | - PMFLUSHDONE_LNEBLK); + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) + wa_write_or(wal, + INF_UNIT_LEVEL_CLKGATE, + CGPSF_CLKGATE_DIS); /* Wa_1406463099:icl * Formerly known as WaGamTlbPendError */ - I915_WRITE(GAMT_CHKN_BIT_REG, - I915_READ(GAMT_CHKN_BIT_REG) | - GAMT_CHKN_DISABLE_L3_COH_PIPE); + wa_write_or(wal, + GAMT_CHKN_BIT_REG, + GAMT_CHKN_DISABLE_L3_COH_PIPE); } -void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv) +void intel_gt_init_workarounds(struct drm_i915_private *i915) { - if (INTEL_GEN(dev_priv) < 8) + struct i915_wa_list *wal = &i915->gt_wa_list; + + wa_init_start(wal, "GT"); + + if (INTEL_GEN(i915) < 8) return; - else if (IS_BROADWELL(dev_priv)) - bdw_gt_workarounds_apply(dev_priv); - else if (IS_CHERRYVIEW(dev_priv)) - chv_gt_workarounds_apply(dev_priv); - else if (IS_SKYLAKE(dev_priv)) - skl_gt_workarounds_apply(dev_priv); - else if (IS_BROXTON(dev_priv)) - bxt_gt_workarounds_apply(dev_priv); - else if (IS_KABYLAKE(dev_priv)) - kbl_gt_workarounds_apply(dev_priv); - else if (IS_GEMINILAKE(dev_priv)) - glk_gt_workarounds_apply(dev_priv); - else if (IS_COFFEELAKE(dev_priv)) - cfl_gt_workarounds_apply(dev_priv); - else if (IS_CANNONLAKE(dev_priv)) - cnl_gt_workarounds_apply(dev_priv); - else if (IS_ICELAKE(dev_priv)) - icl_gt_workarounds_apply(dev_priv); + else if (IS_BROADWELL(i915)) + return; + else if (IS_CHERRYVIEW(i915)) + return; + else if (IS_SKYLAKE(i915)) + skl_gt_workarounds_init(i915); + else if (IS_BROXTON(i915)) + bxt_gt_workarounds_init(i915); + else if (IS_KABYLAKE(i915)) + kbl_gt_workarounds_init(i915); + else if (IS_GEMINILAKE(i915)) + glk_gt_workarounds_init(i915); + else if (IS_COFFEELAKE(i915)) + cfl_gt_workarounds_init(i915); + else if (IS_CANNONLAKE(i915)) + cnl_gt_workarounds_init(i915); + else if (IS_ICELAKE(i915)) + icl_gt_workarounds_init(i915); else - MISSING_CASE(INTEL_GEN(dev_priv)); + MISSING_CASE(INTEL_GEN(i915)); + + wa_init_finish(wal); } -struct whitelist { - i915_reg_t reg[RING_MAX_NONPRIV_SLOTS]; - unsigned int count; - u32 nopid; -}; +static enum forcewake_domains +wal_get_fw_for_rmw(struct drm_i915_private *dev_priv, + const struct i915_wa_list *wal) +{ + enum forcewake_domains fw = 0; + struct i915_wa *wa; + unsigned int i; + + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) + fw |= intel_uncore_forcewake_for_reg(dev_priv, + wa->reg, + FW_REG_READ | + FW_REG_WRITE); + + return fw; +} -static void whitelist_reg(struct whitelist *w, i915_reg_t reg) +static void +wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal) { - if (GEM_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS)) + enum forcewake_domains fw; + unsigned long flags; + struct i915_wa *wa; + unsigned int i; + + if (!wal->count) return; - w->reg[w->count++] = reg; + fw = wal_get_fw_for_rmw(dev_priv, wal); + + spin_lock_irqsave(&dev_priv->uncore.lock, flags); + intel_uncore_forcewake_get__locked(dev_priv, fw); + + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { + u32 val = I915_READ_FW(wa->reg); + + val &= ~wa->mask; + val |= wa->val; + + I915_WRITE_FW(wa->reg, val); + } + + intel_uncore_forcewake_put__locked(dev_priv, fw); + spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); + + DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name); +} + +void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv) +{ + wa_list_apply(dev_priv, &dev_priv->gt_wa_list); +} + +static bool +wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from) +{ + if ((cur ^ wa->val) & wa->mask) { + DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n", + name, from, i915_mmio_reg_offset(wa->reg), cur, + cur & wa->mask, wa->val, wa->mask); + + return false; + } + + return true; } -static void bdw_whitelist_build(struct whitelist *w) +static bool wa_list_verify(struct drm_i915_private *dev_priv, + const struct i915_wa_list *wal, + const char *from) { + struct i915_wa *wa; + unsigned int i; + bool ok = true; + + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) + ok &= wa_verify(wa, I915_READ(wa->reg), wal->name, from); + + return ok; } -static void chv_whitelist_build(struct whitelist *w) +bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv, + const char *from) { + return wa_list_verify(dev_priv, &dev_priv->gt_wa_list, from); } -static void gen9_whitelist_build(struct whitelist *w) +static void +whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg) +{ + struct i915_wa wa = { + .reg = reg + }; + + if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS)) + return; + + _wa_add(wal, &wa); +} + +static void gen9_whitelist_build(struct i915_wa_list *w) { /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */ whitelist_reg(w, GEN9_CTX_PREEMPT_REG); @@ -967,7 +1023,7 @@ static void gen9_whitelist_build(struct whitelist *w) whitelist_reg(w, GEN8_HDC_CHICKEN1); } -static void skl_whitelist_build(struct whitelist *w) +static void skl_whitelist_build(struct i915_wa_list *w) { gen9_whitelist_build(w); @@ -975,12 +1031,12 @@ static void skl_whitelist_build(struct whitelist *w) whitelist_reg(w, GEN8_L3SQCREG4); } -static void bxt_whitelist_build(struct whitelist *w) +static void bxt_whitelist_build(struct i915_wa_list *w) { gen9_whitelist_build(w); } -static void kbl_whitelist_build(struct whitelist *w) +static void kbl_whitelist_build(struct i915_wa_list *w) { gen9_whitelist_build(w); @@ -988,7 +1044,7 @@ static void kbl_whitelist_build(struct whitelist *w) whitelist_reg(w, GEN8_L3SQCREG4); } -static void glk_whitelist_build(struct whitelist *w) +static void glk_whitelist_build(struct i915_wa_list *w) { gen9_whitelist_build(w); @@ -996,37 +1052,41 @@ static void glk_whitelist_build(struct whitelist *w) whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); } -static void cfl_whitelist_build(struct whitelist *w) +static void cfl_whitelist_build(struct i915_wa_list *w) { gen9_whitelist_build(w); } -static void cnl_whitelist_build(struct whitelist *w) +static void cnl_whitelist_build(struct i915_wa_list *w) { /* WaEnablePreemptionGranularityControlByUMD:cnl */ whitelist_reg(w, GEN8_CS_CHICKEN1); } -static void icl_whitelist_build(struct whitelist *w) +static void icl_whitelist_build(struct i915_wa_list *w) { + /* WaAllowUMDToModifyHalfSliceChicken7:icl */ + whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7); + + /* WaAllowUMDToModifySamplerMode:icl */ + whitelist_reg(w, GEN10_SAMPLER_MODE); } -static struct whitelist *whitelist_build(struct intel_engine_cs *engine, - struct whitelist *w) +void intel_engine_init_whitelist(struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; + struct i915_wa_list *w = &engine->whitelist; GEM_BUG_ON(engine->id != RCS); - w->count = 0; - w->nopid = i915_mmio_reg_offset(RING_NOPID(engine->mmio_base)); + wa_init_start(w, "whitelist"); if (INTEL_GEN(i915) < 8) - return NULL; + return; else if (IS_BROADWELL(i915)) - bdw_whitelist_build(w); + return; else if (IS_CHERRYVIEW(i915)) - chv_whitelist_build(w); + return; else if (IS_SKYLAKE(i915)) skl_whitelist_build(w); else if (IS_BROXTON(i915)) @@ -1044,39 +1104,180 @@ static struct whitelist *whitelist_build(struct intel_engine_cs *engine, else MISSING_CASE(INTEL_GEN(i915)); - return w; + wa_init_finish(w); } -static void whitelist_apply(struct intel_engine_cs *engine, - const struct whitelist *w) +void intel_engine_apply_whitelist(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; + const struct i915_wa_list *wal = &engine->whitelist; const u32 base = engine->mmio_base; + struct i915_wa *wa; unsigned int i; - if (!w) + if (!wal->count) return; - intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL); - - for (i = 0; i < w->count; i++) - I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i), - i915_mmio_reg_offset(w->reg[i])); + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) + I915_WRITE(RING_FORCE_TO_NONPRIV(base, i), + i915_mmio_reg_offset(wa->reg)); /* And clear the rest just in case of garbage */ for (; i < RING_MAX_NONPRIV_SLOTS; i++) - I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i), w->nopid); + I915_WRITE(RING_FORCE_TO_NONPRIV(base, i), + i915_mmio_reg_offset(RING_NOPID(base))); - intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL); + DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name); } -void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine) +static void rcs_engine_wa_init(struct intel_engine_cs *engine) { - struct whitelist w; + struct drm_i915_private *i915 = engine->i915; + struct i915_wa_list *wal = &engine->wa_list; + + if (IS_ICELAKE(i915)) { + /* This is not an Wa. Enable for better image quality */ + wa_masked_en(wal, + _3D_CHICKEN3, + _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE); + + /* WaPipelineFlushCoherentLines:icl */ + wa_write_or(wal, + GEN8_L3SQCREG4, + GEN8_LQSC_FLUSH_COHERENT_LINES); + + /* + * Wa_1405543622:icl + * Formerly known as WaGAPZPriorityScheme + */ + wa_write_or(wal, + GEN8_GARBCNTL, + GEN11_ARBITRATION_PRIO_ORDER_MASK); + + /* + * Wa_1604223664:icl + * Formerly known as WaL3BankAddressHashing + */ + wa_write_masked_or(wal, + GEN8_GARBCNTL, + GEN11_HASH_CTRL_EXCL_MASK, + GEN11_HASH_CTRL_EXCL_BIT0); + wa_write_masked_or(wal, + GEN11_GLBLINVL, + GEN11_BANK_HASH_ADDR_EXCL_MASK, + GEN11_BANK_HASH_ADDR_EXCL_BIT0); - whitelist_apply(engine, whitelist_build(engine, &w)); + /* + * Wa_1405733216:icl + * Formerly known as WaDisableCleanEvicts + */ + wa_write_or(wal, + GEN8_L3SQCREG4, + GEN11_LQSC_CLEAN_EVICT_DISABLE); + + /* WaForwardProgressSoftReset:icl */ + wa_write_or(wal, + GEN10_SCRATCH_LNCF2, + PMFLUSHDONE_LNICRSDROP | + PMFLUSH_GAPL3UNBLOCK | + PMFLUSHDONE_LNEBLK); + + /* Wa_1406609255:icl (pre-prod) */ + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) + wa_write_or(wal, + GEN7_SARCHKMD, + GEN7_DISABLE_DEMAND_PREFETCH | + GEN7_DISABLE_SAMPLER_PREFETCH); + } + + if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) { + /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */ + wa_masked_en(wal, + GEN7_FF_SLICE_CS_CHICKEN1, + GEN9_FFSC_PERCTX_PREEMPT_CTRL); + } + + if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) { + /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */ + wa_write_or(wal, + GEN8_GARBCNTL, + GEN9_GAPS_TSV_CREDIT_DISABLE); + } + + if (IS_BROXTON(i915)) { + /* WaDisablePooledEuLoadBalancingFix:bxt */ + wa_masked_en(wal, + FF_SLICE_CS_CHICKEN2, + GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); + } + + if (IS_GEN9(i915)) { + /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */ + wa_masked_en(wal, + GEN9_CSFE_CHICKEN1_RCS, + GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE); + + /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */ + wa_write_or(wal, + BDW_SCRATCH1, + GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); + + /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */ + if (IS_GEN9_LP(i915)) + wa_write_masked_or(wal, + GEN8_L3SQCREG1, + L3_PRIO_CREDITS_MASK, + L3_GENERAL_PRIO_CREDITS(62) | + L3_HIGH_PRIO_CREDITS(2)); + + /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */ + wa_write_or(wal, + GEN8_L3SQCREG4, + GEN8_LQSC_FLUSH_COHERENT_LINES); + } +} + +static void xcs_engine_wa_init(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + struct i915_wa_list *wal = &engine->wa_list; + + /* WaKBLVECSSemaphoreWaitPoll:kbl */ + if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) { + wa_write(wal, + RING_SEMA_WAIT_POLL(engine->mmio_base), + 1); + } +} + +void intel_engine_init_workarounds(struct intel_engine_cs *engine) +{ + struct i915_wa_list *wal = &engine->wa_list; + + if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8)) + return; + + wa_init_start(wal, engine->name); + + if (engine->id == RCS) + rcs_engine_wa_init(engine); + else + xcs_engine_wa_init(engine); + + wa_init_finish(wal); +} + +void intel_engine_apply_workarounds(struct intel_engine_cs *engine) +{ + wa_list_apply(engine->i915, &engine->wa_list); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +static bool intel_engine_verify_workarounds(struct intel_engine_cs *engine, + const char *from) +{ + return wa_list_verify(engine->i915, &engine->wa_list, from); +} + #include "selftests/intel_workarounds.c" #endif diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h index b11d0623e626..7c734714b05e 100644 --- a/drivers/gpu/drm/i915/intel_workarounds.h +++ b/drivers/gpu/drm/i915/intel_workarounds.h @@ -7,11 +7,39 @@ #ifndef _I915_WORKAROUNDS_H_ #define _I915_WORKAROUNDS_H_ -int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv); -int intel_ctx_workarounds_emit(struct i915_request *rq); +#include <linux/slab.h> -void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv); +struct i915_wa { + i915_reg_t reg; + u32 mask; + u32 val; +}; -void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine); +struct i915_wa_list { + const char *name; + struct i915_wa *list; + unsigned int count; + unsigned int wa_count; +}; + +static inline void intel_wa_list_free(struct i915_wa_list *wal) +{ + kfree(wal->list); + memset(wal, 0, sizeof(*wal)); +} + +void intel_engine_init_ctx_wa(struct intel_engine_cs *engine); +int intel_engine_emit_ctx_wa(struct i915_request *rq); + +void intel_gt_init_workarounds(struct drm_i915_private *dev_priv); +void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv); +bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv, + const char *from); + +void intel_engine_init_whitelist(struct intel_engine_cs *engine); +void intel_engine_apply_whitelist(struct intel_engine_cs *engine); + +void intel_engine_init_workarounds(struct intel_engine_cs *engine); +void intel_engine_apply_workarounds(struct intel_engine_cs *engine); #endif diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index 8d03f64eabd7..26c065c8d2c0 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -551,7 +551,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg) err = igt_check_page_sizes(vma); if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) { - pr_err("page_sizes.gtt=%u, expected %lu\n", + pr_err("page_sizes.gtt=%u, expected %llu\n", vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K); err = -EINVAL; } @@ -1135,7 +1135,8 @@ static int igt_write_huge(struct i915_gem_context *ctx, n = 0; for_each_engine(engine, i915, id) { if (!intel_engine_can_store_dword(engine)) { - pr_info("store-dword-imm not supported on engine=%u\n", id); + pr_info("store-dword-imm not supported on engine=%u\n", + id); continue; } engines[n++] = engine; @@ -1167,17 +1168,30 @@ static int igt_write_huge(struct i915_gem_context *ctx, engine = engines[order[i] % n]; i = (i + 1) % (n * I915_NUM_ENGINES); - err = __igt_write_huge(ctx, engine, obj, size, offset_low, dword, num + 1); + /* + * In order to utilize 64K pages we need to both pad the vma + * size and ensure the vma offset is at the start of the pt + * boundary, however to improve coverage we opt for testing both + * aligned and unaligned offsets. + */ + if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K) + offset_low = round_down(offset_low, + I915_GTT_PAGE_SIZE_2M); + + err = __igt_write_huge(ctx, engine, obj, size, offset_low, + dword, num + 1); if (err) break; - err = __igt_write_huge(ctx, engine, obj, size, offset_high, dword, num + 1); + err = __igt_write_huge(ctx, engine, obj, size, offset_high, + dword, num + 1); if (err) break; if (igt_timeout(end_time, "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n", - __func__, engine->id, offset_low, offset_high, max_page_size)) + __func__, engine->id, offset_low, offset_high, + max_page_size)) break; } @@ -1436,7 +1450,7 @@ static int igt_ppgtt_pin_update(void *arg) * huge-gtt-pages. */ - if (!USES_FULL_48BIT_PPGTT(dev_priv)) { + if (!HAS_FULL_48BIT_PPGTT(dev_priv)) { pr_info("48b PPGTT not supported, skipping\n"); return 0; } @@ -1687,10 +1701,9 @@ int i915_gem_huge_page_mock_selftests(void) SUBTEST(igt_mock_ppgtt_huge_fill), SUBTEST(igt_mock_ppgtt_64K), }; - int saved_ppgtt = i915_modparams.enable_ppgtt; struct drm_i915_private *dev_priv; - struct pci_dev *pdev; struct i915_hw_ppgtt *ppgtt; + struct pci_dev *pdev; int err; dev_priv = mock_gem_device(); @@ -1698,7 +1711,7 @@ int i915_gem_huge_page_mock_selftests(void) return -ENOMEM; /* Pretend to be a device which supports the 48b PPGTT */ - i915_modparams.enable_ppgtt = 3; + mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL; pdev = dev_priv->drm.pdev; dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39)); @@ -1731,9 +1744,6 @@ out_close: out_unlock: mutex_unlock(&dev_priv->drm.struct_mutex); - - i915_modparams.enable_ppgtt = saved_ppgtt; - drm_dev_put(&dev_priv->drm); return err; @@ -1753,7 +1763,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) struct i915_gem_context *ctx; int err; - if (!USES_PPGTT(dev_priv)) { + if (!HAS_PPGTT(dev_priv)) { pr_info("PPGTT not supported, skipping live-selftests\n"); return 0; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 76df25aa90c9..7d82043aff10 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -39,7 +39,8 @@ struct live_test { const char *func; const char *name; - unsigned int reset_count; + unsigned int reset_global; + unsigned int reset_engine[I915_NUM_ENGINES]; }; static int begin_live_test(struct live_test *t, @@ -47,6 +48,8 @@ static int begin_live_test(struct live_test *t, const char *func, const char *name) { + struct intel_engine_cs *engine; + enum intel_engine_id id; int err; t->i915 = i915; @@ -63,7 +66,11 @@ static int begin_live_test(struct live_test *t, } i915->gpu_error.missed_irq_rings = 0; - t->reset_count = i915_reset_count(&i915->gpu_error); + t->reset_global = i915_reset_count(&i915->gpu_error); + + for_each_engine(engine, i915, id) + t->reset_engine[id] = + i915_reset_engine_count(&i915->gpu_error, engine); return 0; } @@ -71,14 +78,28 @@ static int begin_live_test(struct live_test *t, static int end_live_test(struct live_test *t) { struct drm_i915_private *i915 = t->i915; + struct intel_engine_cs *engine; + enum intel_engine_id id; if (igt_flush_test(i915, I915_WAIT_LOCKED)) return -EIO; - if (t->reset_count != i915_reset_count(&i915->gpu_error)) { + if (t->reset_global != i915_reset_count(&i915->gpu_error)) { pr_err("%s(%s): GPU was reset %d times!\n", t->func, t->name, - i915_reset_count(&i915->gpu_error) - t->reset_count); + i915_reset_count(&i915->gpu_error) - t->reset_global); + return -EIO; + } + + for_each_engine(engine, i915, id) { + if (t->reset_engine[id] == + i915_reset_engine_count(&i915->gpu_error, engine)) + continue; + + pr_err("%s(%s): engine '%s' was reset %d times!\n", + t->func, t->name, engine->name, + i915_reset_engine_count(&i915->gpu_error, engine) - + t->reset_engine[id]); return -EIO; } @@ -531,11 +552,11 @@ static int igt_ctx_exec(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj = NULL; + unsigned long ncontexts, ndwords, dw; struct drm_file *file; IGT_TIMEOUT(end_time); LIST_HEAD(objects); - unsigned long ncontexts, ndwords, dw; - bool first_shared_gtt = true; + struct live_test t; int err = -ENODEV; /* @@ -553,6 +574,10 @@ static int igt_ctx_exec(void *arg) mutex_lock(&i915->drm.struct_mutex); + err = begin_live_test(&t, i915, __func__, ""); + if (err) + goto out_unlock; + ncontexts = 0; ndwords = 0; dw = 0; @@ -561,12 +586,7 @@ static int igt_ctx_exec(void *arg) struct i915_gem_context *ctx; unsigned int id; - if (first_shared_gtt) { - ctx = __create_hw_context(i915, file->driver_priv); - first_shared_gtt = false; - } else { - ctx = i915_gem_create_context(i915, file->driver_priv); - } + ctx = i915_gem_create_context(i915, file->driver_priv); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out_unlock; @@ -622,7 +642,7 @@ static int igt_ctx_exec(void *arg) } out_unlock: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + if (end_live_test(&t)) err = -EIO; mutex_unlock(&i915->drm.struct_mutex); @@ -634,13 +654,14 @@ static int igt_ctx_readonly(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj = NULL; + struct i915_gem_context *ctx; + struct i915_hw_ppgtt *ppgtt; + unsigned long ndwords, dw; struct drm_file *file; I915_RND_STATE(prng); IGT_TIMEOUT(end_time); LIST_HEAD(objects); - struct i915_gem_context *ctx; - struct i915_hw_ppgtt *ppgtt; - unsigned long ndwords, dw; + struct live_test t; int err = -ENODEV; /* @@ -655,6 +676,10 @@ static int igt_ctx_readonly(void *arg) mutex_lock(&i915->drm.struct_mutex); + err = begin_live_test(&t, i915, __func__, ""); + if (err) + goto out_unlock; + ctx = i915_gem_create_context(i915, file->driver_priv); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); @@ -727,7 +752,324 @@ static int igt_ctx_readonly(void *arg) } out_unlock: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + if (end_live_test(&t)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); + + mock_file_free(i915, file); + return err; +} + +static int check_scratch(struct i915_gem_context *ctx, u64 offset) +{ + struct drm_mm_node *node = + __drm_mm_interval_first(&ctx->ppgtt->vm.mm, + offset, offset + sizeof(u32) - 1); + if (!node || node->start > offset) + return 0; + + GEM_BUG_ON(offset >= node->start + node->size); + + pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n", + upper_32_bits(offset), lower_32_bits(offset)); + return -EINVAL; +} + +static int write_to_scratch(struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + u64 offset, u32 value) +{ + struct drm_i915_private *i915 = ctx->i915; + struct drm_i915_gem_object *obj; + struct i915_request *rq; + struct i915_vma *vma; + u32 *cmd; + int err; + + GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE); + + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(cmd)) { + err = PTR_ERR(cmd); + goto err; + } + + *cmd++ = MI_STORE_DWORD_IMM_GEN4; + if (INTEL_GEN(i915) >= 8) { + *cmd++ = lower_32_bits(offset); + *cmd++ = upper_32_bits(offset); + } else { + *cmd++ = 0; + *cmd++ = offset; + } + *cmd++ = value; + *cmd = MI_BATCH_BUFFER_END; + i915_gem_object_unpin_map(obj); + + err = i915_gem_object_set_to_gtt_domain(obj, false); + if (err) + goto err; + + vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED); + if (err) + goto err; + + err = check_scratch(ctx, offset); + if (err) + goto err_unpin; + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_unpin; + } + + err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0); + if (err) + goto err_request; + + err = i915_vma_move_to_active(vma, rq, 0); + if (err) + goto skip_request; + + i915_gem_object_set_active_reference(obj); + i915_vma_unpin(vma); + i915_vma_close(vma); + + i915_request_add(rq); + + return 0; + +skip_request: + i915_request_skip(rq, err); +err_request: + i915_request_add(rq); +err_unpin: + i915_vma_unpin(vma); +err: + i915_gem_object_put(obj); + return err; +} + +static int read_from_scratch(struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + u64 offset, u32 *value) +{ + struct drm_i915_private *i915 = ctx->i915; + struct drm_i915_gem_object *obj; + const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */ + const u32 result = 0x100; + struct i915_request *rq; + struct i915_vma *vma; + u32 *cmd; + int err; + + GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE); + + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(cmd)) { + err = PTR_ERR(cmd); + goto err; + } + + memset(cmd, POISON_INUSE, PAGE_SIZE); + if (INTEL_GEN(i915) >= 8) { + *cmd++ = MI_LOAD_REGISTER_MEM_GEN8; + *cmd++ = RCS_GPR0; + *cmd++ = lower_32_bits(offset); + *cmd++ = upper_32_bits(offset); + *cmd++ = MI_STORE_REGISTER_MEM_GEN8; + *cmd++ = RCS_GPR0; + *cmd++ = result; + *cmd++ = 0; + } else { + *cmd++ = MI_LOAD_REGISTER_MEM; + *cmd++ = RCS_GPR0; + *cmd++ = offset; + *cmd++ = MI_STORE_REGISTER_MEM; + *cmd++ = RCS_GPR0; + *cmd++ = result; + } + *cmd = MI_BATCH_BUFFER_END; + i915_gem_object_unpin_map(obj); + + err = i915_gem_object_set_to_gtt_domain(obj, false); + if (err) + goto err; + + vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED); + if (err) + goto err; + + err = check_scratch(ctx, offset); + if (err) + goto err_unpin; + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_unpin; + } + + err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0); + if (err) + goto err_request; + + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + goto skip_request; + + i915_vma_unpin(vma); + i915_vma_close(vma); + + i915_request_add(rq); + + err = i915_gem_object_set_to_cpu_domain(obj, false); + if (err) + goto err; + + cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(cmd)) { + err = PTR_ERR(cmd); + goto err; + } + + *value = cmd[result / sizeof(*cmd)]; + i915_gem_object_unpin_map(obj); + i915_gem_object_put(obj); + + return 0; + +skip_request: + i915_request_skip(rq, err); +err_request: + i915_request_add(rq); +err_unpin: + i915_vma_unpin(vma); +err: + i915_gem_object_put(obj); + return err; +} + +static int igt_vm_isolation(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_gem_context *ctx_a, *ctx_b; + struct intel_engine_cs *engine; + struct drm_file *file; + I915_RND_STATE(prng); + unsigned long count; + struct live_test t; + unsigned int id; + u64 vm_total; + int err; + + if (INTEL_GEN(i915) < 7) + return 0; + + /* + * The simple goal here is that a write into one context is not + * observed in a second (separate page tables and scratch). + */ + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(&i915->drm.struct_mutex); + + err = begin_live_test(&t, i915, __func__, ""); + if (err) + goto out_unlock; + + ctx_a = i915_gem_create_context(i915, file->driver_priv); + if (IS_ERR(ctx_a)) { + err = PTR_ERR(ctx_a); + goto out_unlock; + } + + ctx_b = i915_gem_create_context(i915, file->driver_priv); + if (IS_ERR(ctx_b)) { + err = PTR_ERR(ctx_b); + goto out_unlock; + } + + /* We can only test vm isolation, if the vm are distinct */ + if (ctx_a->ppgtt == ctx_b->ppgtt) + goto out_unlock; + + vm_total = ctx_a->ppgtt->vm.total; + GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total); + vm_total -= I915_GTT_PAGE_SIZE; + + intel_runtime_pm_get(i915); + + count = 0; + for_each_engine(engine, i915, id) { + IGT_TIMEOUT(end_time); + unsigned long this = 0; + + if (!intel_engine_can_store_dword(engine)) + continue; + + while (!__igt_timeout(end_time, NULL)) { + u32 value = 0xc5c5c5c5; + u64 offset; + + div64_u64_rem(i915_prandom_u64_state(&prng), + vm_total, &offset); + offset &= ~sizeof(u32); + offset += I915_GTT_PAGE_SIZE; + + err = write_to_scratch(ctx_a, engine, + offset, 0xdeadbeef); + if (err == 0) + err = read_from_scratch(ctx_b, engine, + offset, &value); + if (err) + goto out_rpm; + + if (value) { + pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n", + engine->name, value, + upper_32_bits(offset), + lower_32_bits(offset), + this); + err = -EINVAL; + goto out_rpm; + } + + this++; + } + count += this; + } + pr_info("Checked %lu scratch offsets across %d engines\n", + count, INTEL_INFO(i915)->num_rings); + +out_rpm: + intel_runtime_pm_put(i915); +out_unlock: + if (end_live_test(&t)) err = -EIO; mutex_unlock(&i915->drm.struct_mutex); @@ -865,33 +1207,6 @@ out_unlock: return err; } -static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915) -{ - struct drm_i915_gem_object *obj; - int err; - - err = i915_gem_init_aliasing_ppgtt(i915); - if (err) - return err; - - list_for_each_entry(obj, &i915->mm.bound_list, mm.link) { - struct i915_vma *vma; - - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); - if (IS_ERR(vma)) - continue; - - vma->flags &= ~I915_VMA_LOCAL_BIND; - } - - return 0; -} - -static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915) -{ - i915_gem_fini_aliasing_ppgtt(i915); -} - int i915_gem_context_mock_selftests(void) { static const struct i915_subtest tests[] = { @@ -917,32 +1232,11 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv) SUBTEST(live_nop_switch), SUBTEST(igt_ctx_exec), SUBTEST(igt_ctx_readonly), + SUBTEST(igt_vm_isolation), }; - bool fake_alias = false; - int err; if (i915_terminally_wedged(&dev_priv->gpu_error)) return 0; - /* Install a fake aliasing gtt for exercise */ - if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) { - mutex_lock(&dev_priv->drm.struct_mutex); - err = fake_aliasing_ppgtt_enable(dev_priv); - mutex_unlock(&dev_priv->drm.struct_mutex); - if (err) - return err; - - GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt); - fake_alias = true; - } - - err = i915_subtests(tests, dev_priv); - - if (fake_alias) { - mutex_lock(&dev_priv->drm.struct_mutex); - fake_aliasing_ppgtt_disable(dev_priv); - mutex_unlock(&dev_priv->drm.struct_mutex); - } - - return err; + return i915_subtests(tests, dev_priv); } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 128ad1cf0647..4365979d8222 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -351,7 +351,7 @@ static int igt_evict_contexts(void *arg) * where the GTT space of the request is separate from the GGTT * allocation required to build the request. */ - if (!USES_FULL_PPGTT(i915)) + if (!HAS_FULL_PPGTT(i915)) return 0; mutex_lock(&i915->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 8e2e269db97e..69fe86b30fbb 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -153,7 +153,7 @@ static int igt_ppgtt_alloc(void *arg) /* Allocate a ppggt and try to fill the entire range */ - if (!USES_PPGTT(dev_priv)) + if (!HAS_PPGTT(dev_priv)) return 0; ppgtt = __hw_ppgtt_create(dev_priv); @@ -1001,7 +1001,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, IGT_TIMEOUT(end_time); int err; - if (!USES_FULL_PPGTT(dev_priv)) + if (!HAS_FULL_PPGTT(dev_priv)) return 0; file = mock_file(dev_priv); @@ -1337,7 +1337,7 @@ static int igt_gtt_reserve(void *arg) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); if (vma->node.start != total || vma->node.size != 2*I915_GTT_PAGE_SIZE) { - pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", + pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", vma->node.start, vma->node.size, total, 2*I915_GTT_PAGE_SIZE); err = -EINVAL; @@ -1386,7 +1386,7 @@ static int igt_gtt_reserve(void *arg) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); if (vma->node.start != total || vma->node.size != 2*I915_GTT_PAGE_SIZE) { - pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", + pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", vma->node.start, vma->node.size, total, 2*I915_GTT_PAGE_SIZE); err = -EINVAL; @@ -1430,7 +1430,7 @@ static int igt_gtt_reserve(void *arg) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); if (vma->node.start != offset || vma->node.size != 2*I915_GTT_PAGE_SIZE) { - pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", + pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", vma->node.start, vma->node.size, offset, 2*I915_GTT_PAGE_SIZE); err = -EINVAL; diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c new file mode 100644 index 000000000000..208a966da8ca --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_reset.c @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include "igt_reset.h" + +#include "../i915_drv.h" +#include "../intel_ringbuffer.h" + +void igt_global_reset_lock(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + pr_debug("%s: current gpu_error=%08lx\n", + __func__, i915->gpu_error.flags); + + while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)) + wait_event(i915->gpu_error.reset_queue, + !test_bit(I915_RESET_BACKOFF, + &i915->gpu_error.flags)); + + for_each_engine(engine, i915, id) { + while (test_and_set_bit(I915_RESET_ENGINE + id, + &i915->gpu_error.flags)) + wait_on_bit(&i915->gpu_error.flags, + I915_RESET_ENGINE + id, + TASK_UNINTERRUPTIBLE); + } +} + +void igt_global_reset_unlock(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, i915, id) + clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + + clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); + wake_up_all(&i915->gpu_error.reset_queue); +} diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h new file mode 100644 index 000000000000..5f0234d045d5 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_reset.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#ifndef __I915_SELFTESTS_IGT_RESET_H__ +#define __I915_SELFTESTS_IGT_RESET_H__ + +#include "../i915_drv.h" + +void igt_global_reset_lock(struct drm_i915_private *i915); +void igt_global_reset_unlock(struct drm_i915_private *i915); + +#endif diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c new file mode 100644 index 000000000000..8cd34f6e6859 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -0,0 +1,199 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include "igt_spinner.h" + +int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915) +{ + unsigned int mode; + void *vaddr; + int err; + + GEM_BUG_ON(INTEL_GEN(i915) < 8); + + memset(spin, 0, sizeof(*spin)); + spin->i915 = i915; + + spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(spin->hws)) { + err = PTR_ERR(spin->hws); + goto err; + } + + spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(spin->obj)) { + err = PTR_ERR(spin->obj); + goto err_hws; + } + + i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC); + vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto err_obj; + } + spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); + + mode = i915_coherent_map_type(i915); + vaddr = i915_gem_object_pin_map(spin->obj, mode); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto err_unpin_hws; + } + spin->batch = vaddr; + + return 0; + +err_unpin_hws: + i915_gem_object_unpin_map(spin->hws); +err_obj: + i915_gem_object_put(spin->obj); +err_hws: + i915_gem_object_put(spin->hws); +err: + return err; +} + +static unsigned int seqno_offset(u64 fence) +{ + return offset_in_page(sizeof(u32) * fence); +} + +static u64 hws_address(const struct i915_vma *hws, + const struct i915_request *rq) +{ + return hws->node.start + seqno_offset(rq->fence.context); +} + +static int emit_recurse_batch(struct igt_spinner *spin, + struct i915_request *rq, + u32 arbitration_command) +{ + struct i915_address_space *vm = &rq->gem_context->ppgtt->vm; + struct i915_vma *hws, *vma; + u32 *batch; + int err; + + vma = i915_vma_instance(spin->obj, vm, NULL); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + hws = i915_vma_instance(spin->hws, vm, NULL); + if (IS_ERR(hws)) + return PTR_ERR(hws); + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + return err; + + err = i915_vma_pin(hws, 0, 0, PIN_USER); + if (err) + goto unpin_vma; + + err = i915_vma_move_to_active(vma, rq, 0); + if (err) + goto unpin_hws; + + if (!i915_gem_object_has_active_reference(vma->obj)) { + i915_gem_object_get(vma->obj); + i915_gem_object_set_active_reference(vma->obj); + } + + err = i915_vma_move_to_active(hws, rq, 0); + if (err) + goto unpin_hws; + + if (!i915_gem_object_has_active_reference(hws->obj)) { + i915_gem_object_get(hws->obj); + i915_gem_object_set_active_reference(hws->obj); + } + + batch = spin->batch; + + *batch++ = MI_STORE_DWORD_IMM_GEN4; + *batch++ = lower_32_bits(hws_address(hws, rq)); + *batch++ = upper_32_bits(hws_address(hws, rq)); + *batch++ = rq->fence.seqno; + + *batch++ = arbitration_command; + + *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; + *batch++ = lower_32_bits(vma->node.start); + *batch++ = upper_32_bits(vma->node.start); + *batch++ = MI_BATCH_BUFFER_END; /* not reached */ + + i915_gem_chipset_flush(spin->i915); + + err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); + +unpin_hws: + i915_vma_unpin(hws); +unpin_vma: + i915_vma_unpin(vma); + return err; +} + +struct i915_request * +igt_spinner_create_request(struct igt_spinner *spin, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + u32 arbitration_command) +{ + struct i915_request *rq; + int err; + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) + return rq; + + err = emit_recurse_batch(spin, rq, arbitration_command); + if (err) { + i915_request_add(rq); + return ERR_PTR(err); + } + + return rq; +} + +static u32 +hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq) +{ + u32 *seqno = spin->seqno + seqno_offset(rq->fence.context); + + return READ_ONCE(*seqno); +} + +void igt_spinner_end(struct igt_spinner *spin) +{ + *spin->batch = MI_BATCH_BUFFER_END; + i915_gem_chipset_flush(spin->i915); +} + +void igt_spinner_fini(struct igt_spinner *spin) +{ + igt_spinner_end(spin); + + i915_gem_object_unpin_map(spin->obj); + i915_gem_object_put(spin->obj); + + i915_gem_object_unpin_map(spin->hws); + i915_gem_object_put(spin->hws); +} + +bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq) +{ + if (!wait_event_timeout(rq->execute, + READ_ONCE(rq->global_seqno), + msecs_to_jiffies(10))) + return false; + + return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq), + rq->fence.seqno), + 10) && + wait_for(i915_seqno_passed(hws_seqno(spin, rq), + rq->fence.seqno), + 1000)); +} diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h new file mode 100644 index 000000000000..391777c76dc7 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#ifndef __I915_SELFTESTS_IGT_SPINNER_H__ +#define __I915_SELFTESTS_IGT_SPINNER_H__ + +#include "../i915_selftest.h" + +#include "../i915_drv.h" +#include "../i915_request.h" +#include "../intel_ringbuffer.h" +#include "../i915_gem_context.h" + +struct igt_spinner { + struct drm_i915_private *i915; + struct drm_i915_gem_object *hws; + struct drm_i915_gem_object *obj; + u32 *batch; + void *seqno; +}; + +int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915); +void igt_spinner_fini(struct igt_spinner *spin); + +struct i915_request * +igt_spinner_create_request(struct igt_spinner *spin, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + u32 arbitration_command); +void igt_spinner_end(struct igt_spinner *spin); + +bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq); + +#endif diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c index 0c0ab82b6228..32cba4cae31a 100644 --- a/drivers/gpu/drm/i915/selftests/intel_guc.c +++ b/drivers/gpu/drm/i915/selftests/intel_guc.c @@ -159,6 +159,7 @@ static int igt_guc_clients(void *args) * Get rid of clients created during driver load because the test will * recreate them. */ + guc_clients_disable(guc); guc_clients_destroy(guc); if (guc->execbuf_client || guc->preempt_client) { pr_err("guc_clients_destroy lied!\n"); @@ -197,8 +198,8 @@ static int igt_guc_clients(void *args) goto out; } - /* Now create the doorbells */ - guc_clients_doorbell_init(guc); + /* Now enable the clients */ + guc_clients_enable(guc); /* each client should now have received a doorbell */ if (!client_doorbell_in_sync(guc->execbuf_client) || @@ -212,63 +213,17 @@ static int igt_guc_clients(void *args) * Basic test - an attempt to reallocate a valid doorbell to the * client it is currently assigned should not cause a failure. */ - err = guc_clients_doorbell_init(guc); - if (err) - goto out; - - /* - * Negative test - a client with no doorbell (invalid db id). - * After destroying the doorbell, the db id is changed to - * GUC_DOORBELL_INVALID and the firmware will reject any attempt to - * allocate a doorbell with an invalid id (db has to be reserved before - * allocation). - */ - destroy_doorbell(guc->execbuf_client); - if (client_doorbell_in_sync(guc->execbuf_client)) { - pr_err("destroy db did not work\n"); - err = -EINVAL; - goto out; - } - - unreserve_doorbell(guc->execbuf_client); - - __create_doorbell(guc->execbuf_client); - err = __guc_allocate_doorbell(guc, guc->execbuf_client->stage_id); - if (err != -EIO) { - pr_err("unexpected (err = %d)", err); - goto out_db; - } - - if (!available_dbs(guc, guc->execbuf_client->priority)) { - pr_err("doorbell not available when it should\n"); - err = -EIO; - goto out_db; - } - -out_db: - /* clean after test */ - __destroy_doorbell(guc->execbuf_client); - err = reserve_doorbell(guc->execbuf_client); - if (err) { - pr_err("failed to reserve back the doorbell back\n"); - } err = create_doorbell(guc->execbuf_client); - if (err) { - pr_err("recreate doorbell failed\n"); - goto out; - } out: /* * Leave clean state for other test, plus the driver always destroy the * clients during unload. */ - destroy_doorbell(guc->execbuf_client); - if (guc->preempt_client) - destroy_doorbell(guc->preempt_client); + guc_clients_disable(guc); guc_clients_destroy(guc); guc_clients_create(guc); - guc_clients_doorbell_init(guc); + guc_clients_enable(guc); unlock: intel_runtime_pm_put(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); @@ -352,7 +307,7 @@ static int igt_guc_doorbells(void *arg) db_id = clients[i]->doorbell_id; - err = create_doorbell(clients[i]); + err = __guc_client_enable(clients[i]); if (err) { pr_err("[%d] Failed to create a doorbell\n", i); goto out; @@ -378,7 +333,7 @@ static int igt_guc_doorbells(void *arg) out: for (i = 0; i < ATTEMPTS; i++) if (!IS_ERR_OR_NULL(clients[i])) { - destroy_doorbell(clients[i]); + __guc_client_disable(clients[i]); guc_client_free(clients[i]); } unlock: diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index db378226ac10..40efbed611de 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -27,6 +27,7 @@ #include "../i915_selftest.h" #include "i915_random.h" #include "igt_flush_test.h" +#include "igt_reset.h" #include "igt_wedge_me.h" #include "mock_context.h" @@ -76,7 +77,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915) h->seqno = memset(vaddr, 0xff, PAGE_SIZE); vaddr = i915_gem_object_pin_map(h->obj, - HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC); + i915_coherent_map_type(i915)); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); goto err_unpin_hws; @@ -234,7 +235,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) return ERR_CAST(obj); vaddr = i915_gem_object_pin_map(obj, - HAS_LLC(h->i915) ? I915_MAP_WB : I915_MAP_WC); + i915_coherent_map_type(h->i915)); if (IS_ERR(vaddr)) { i915_gem_object_put(obj); return ERR_CAST(vaddr); @@ -308,6 +309,7 @@ static int igt_hang_sanitycheck(void *arg) goto unlock; for_each_engine(engine, i915, id) { + struct igt_wedge_me w; long timeout; if (!intel_engine_can_store_dword(engine)) @@ -328,9 +330,14 @@ static int igt_hang_sanitycheck(void *arg) i915_request_add(rq); - timeout = i915_request_wait(rq, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); + timeout = 0; + igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/) + timeout = i915_request_wait(rq, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); + if (i915_terminally_wedged(&i915->gpu_error)) + timeout = -EIO; + i915_request_put(rq); if (timeout < 0) { @@ -348,40 +355,6 @@ unlock: return err; } -static void global_reset_lock(struct drm_i915_private *i915) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - pr_debug("%s: current gpu_error=%08lx\n", - __func__, i915->gpu_error.flags); - - while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)) - wait_event(i915->gpu_error.reset_queue, - !test_bit(I915_RESET_BACKOFF, - &i915->gpu_error.flags)); - - for_each_engine(engine, i915, id) { - while (test_and_set_bit(I915_RESET_ENGINE + id, - &i915->gpu_error.flags)) - wait_on_bit(&i915->gpu_error.flags, - I915_RESET_ENGINE + id, - TASK_UNINTERRUPTIBLE); - } -} - -static void global_reset_unlock(struct drm_i915_private *i915) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, i915, id) - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - - clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); - wake_up_all(&i915->gpu_error.reset_queue); -} - static int igt_global_reset(void *arg) { struct drm_i915_private *i915 = arg; @@ -390,7 +363,7 @@ static int igt_global_reset(void *arg) /* Check that we can issue a global GPU reset */ - global_reset_lock(i915); + igt_global_reset_lock(i915); set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags); mutex_lock(&i915->drm.struct_mutex); @@ -405,7 +378,7 @@ static int igt_global_reset(void *arg) mutex_unlock(&i915->drm.struct_mutex); GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags)); - global_reset_unlock(i915); + igt_global_reset_unlock(i915); if (i915_terminally_wedged(&i915->gpu_error)) err = -EIO; @@ -936,7 +909,7 @@ static int igt_reset_wait(void *arg) /* Check that we detect a stuck waiter and issue a reset */ - global_reset_lock(i915); + igt_global_reset_lock(i915); mutex_lock(&i915->drm.struct_mutex); err = hang_init(&h, i915); @@ -988,7 +961,7 @@ fini: hang_fini(&h); unlock: mutex_unlock(&i915->drm.struct_mutex); - global_reset_unlock(i915); + igt_global_reset_unlock(i915); if (i915_terminally_wedged(&i915->gpu_error)) return -EIO; @@ -1066,7 +1039,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, /* Check that we can recover an unbind stuck on a hanging request */ - global_reset_lock(i915); + igt_global_reset_lock(i915); mutex_lock(&i915->drm.struct_mutex); err = hang_init(&h, i915); @@ -1150,6 +1123,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, tsk = NULL; goto out_reset; } + get_task_struct(tsk); wait_for_completion(&arg.completion); @@ -1172,6 +1146,8 @@ out_reset: /* The reset, even indirectly, should take less than 10ms. */ igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/) err = kthread_stop(tsk); + + put_task_struct(tsk); } mutex_lock(&i915->drm.struct_mutex); @@ -1183,7 +1159,7 @@ fini: hang_fini(&h); unlock: mutex_unlock(&i915->drm.struct_mutex); - global_reset_unlock(i915); + igt_global_reset_unlock(i915); if (i915_terminally_wedged(&i915->gpu_error)) return -EIO; @@ -1263,7 +1239,7 @@ static int igt_reset_queue(void *arg) /* Check that we replay pending requests following a hang */ - global_reset_lock(i915); + igt_global_reset_lock(i915); mutex_lock(&i915->drm.struct_mutex); err = hang_init(&h, i915); @@ -1394,7 +1370,7 @@ fini: hang_fini(&h); unlock: mutex_unlock(&i915->drm.struct_mutex); - global_reset_unlock(i915); + igt_global_reset_unlock(i915); if (i915_terminally_wedged(&i915->gpu_error)) return -EIO; diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index 1aea7a8f2224..ca461e3a5f27 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c @@ -6,215 +6,18 @@ #include "../i915_selftest.h" #include "igt_flush_test.h" +#include "igt_spinner.h" +#include "i915_random.h" #include "mock_context.h" -struct spinner { - struct drm_i915_private *i915; - struct drm_i915_gem_object *hws; - struct drm_i915_gem_object *obj; - u32 *batch; - void *seqno; -}; - -static int spinner_init(struct spinner *spin, struct drm_i915_private *i915) -{ - unsigned int mode; - void *vaddr; - int err; - - GEM_BUG_ON(INTEL_GEN(i915) < 8); - - memset(spin, 0, sizeof(*spin)); - spin->i915 = i915; - - spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(spin->hws)) { - err = PTR_ERR(spin->hws); - goto err; - } - - spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(spin->obj)) { - err = PTR_ERR(spin->obj); - goto err_hws; - } - - i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC); - vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_obj; - } - spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); - - mode = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC; - vaddr = i915_gem_object_pin_map(spin->obj, mode); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_unpin_hws; - } - spin->batch = vaddr; - - return 0; - -err_unpin_hws: - i915_gem_object_unpin_map(spin->hws); -err_obj: - i915_gem_object_put(spin->obj); -err_hws: - i915_gem_object_put(spin->hws); -err: - return err; -} - -static unsigned int seqno_offset(u64 fence) -{ - return offset_in_page(sizeof(u32) * fence); -} - -static u64 hws_address(const struct i915_vma *hws, - const struct i915_request *rq) -{ - return hws->node.start + seqno_offset(rq->fence.context); -} - -static int emit_recurse_batch(struct spinner *spin, - struct i915_request *rq, - u32 arbitration_command) -{ - struct i915_address_space *vm = &rq->gem_context->ppgtt->vm; - struct i915_vma *hws, *vma; - u32 *batch; - int err; - - vma = i915_vma_instance(spin->obj, vm, NULL); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - hws = i915_vma_instance(spin->hws, vm, NULL); - if (IS_ERR(hws)) - return PTR_ERR(hws); - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - return err; - - err = i915_vma_pin(hws, 0, 0, PIN_USER); - if (err) - goto unpin_vma; - - err = i915_vma_move_to_active(vma, rq, 0); - if (err) - goto unpin_hws; - - if (!i915_gem_object_has_active_reference(vma->obj)) { - i915_gem_object_get(vma->obj); - i915_gem_object_set_active_reference(vma->obj); - } - - err = i915_vma_move_to_active(hws, rq, 0); - if (err) - goto unpin_hws; - - if (!i915_gem_object_has_active_reference(hws->obj)) { - i915_gem_object_get(hws->obj); - i915_gem_object_set_active_reference(hws->obj); - } - - batch = spin->batch; - - *batch++ = MI_STORE_DWORD_IMM_GEN4; - *batch++ = lower_32_bits(hws_address(hws, rq)); - *batch++ = upper_32_bits(hws_address(hws, rq)); - *batch++ = rq->fence.seqno; - - *batch++ = arbitration_command; - - *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; - *batch++ = lower_32_bits(vma->node.start); - *batch++ = upper_32_bits(vma->node.start); - *batch++ = MI_BATCH_BUFFER_END; /* not reached */ - - i915_gem_chipset_flush(spin->i915); - - err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); - -unpin_hws: - i915_vma_unpin(hws); -unpin_vma: - i915_vma_unpin(vma); - return err; -} - -static struct i915_request * -spinner_create_request(struct spinner *spin, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - u32 arbitration_command) -{ - struct i915_request *rq; - int err; - - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) - return rq; - - err = emit_recurse_batch(spin, rq, arbitration_command); - if (err) { - i915_request_add(rq); - return ERR_PTR(err); - } - - return rq; -} - -static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq) -{ - u32 *seqno = spin->seqno + seqno_offset(rq->fence.context); - - return READ_ONCE(*seqno); -} - -static void spinner_end(struct spinner *spin) -{ - *spin->batch = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(spin->i915); -} - -static void spinner_fini(struct spinner *spin) -{ - spinner_end(spin); - - i915_gem_object_unpin_map(spin->obj); - i915_gem_object_put(spin->obj); - - i915_gem_object_unpin_map(spin->hws); - i915_gem_object_put(spin->hws); -} - -static bool wait_for_spinner(struct spinner *spin, struct i915_request *rq) -{ - if (!wait_event_timeout(rq->execute, - READ_ONCE(rq->global_seqno), - msecs_to_jiffies(10))) - return false; - - return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq), - rq->fence.seqno), - 10) && - wait_for(i915_seqno_passed(hws_seqno(spin, rq), - rq->fence.seqno), - 1000)); -} - static int live_sanitycheck(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; struct i915_gem_context *ctx; enum intel_engine_id id; - struct spinner spin; + struct igt_spinner spin; int err = -ENOMEM; if (!HAS_LOGICAL_RING_CONTEXTS(i915)) @@ -223,7 +26,7 @@ static int live_sanitycheck(void *arg) mutex_lock(&i915->drm.struct_mutex); intel_runtime_pm_get(i915); - if (spinner_init(&spin, i915)) + if (igt_spinner_init(&spin, i915)) goto err_unlock; ctx = kernel_context(i915); @@ -233,14 +36,14 @@ static int live_sanitycheck(void *arg) for_each_engine(engine, i915, id) { struct i915_request *rq; - rq = spinner_create_request(&spin, ctx, engine, MI_NOOP); + rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_ctx; } i915_request_add(rq); - if (!wait_for_spinner(&spin, rq)) { + if (!igt_wait_for_spinner(&spin, rq)) { GEM_TRACE("spinner failed to start\n"); GEM_TRACE_DUMP(); i915_gem_set_wedged(i915); @@ -248,7 +51,7 @@ static int live_sanitycheck(void *arg) goto err_ctx; } - spinner_end(&spin); + igt_spinner_end(&spin); if (igt_flush_test(i915, I915_WAIT_LOCKED)) { err = -EIO; goto err_ctx; @@ -259,7 +62,7 @@ static int live_sanitycheck(void *arg) err_ctx: kernel_context_close(ctx); err_spin: - spinner_fini(&spin); + igt_spinner_fini(&spin); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(i915); @@ -271,7 +74,7 @@ static int live_preempt(void *arg) { struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx_hi, *ctx_lo; - struct spinner spin_hi, spin_lo; + struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; enum intel_engine_id id; int err = -ENOMEM; @@ -282,34 +85,36 @@ static int live_preempt(void *arg) mutex_lock(&i915->drm.struct_mutex); intel_runtime_pm_get(i915); - if (spinner_init(&spin_hi, i915)) + if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; - if (spinner_init(&spin_lo, i915)) + if (igt_spinner_init(&spin_lo, i915)) goto err_spin_hi; ctx_hi = kernel_context(i915); if (!ctx_hi) goto err_spin_lo; - ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; + ctx_hi->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); ctx_lo = kernel_context(i915); if (!ctx_lo) goto err_ctx_hi; - ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; + ctx_lo->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); for_each_engine(engine, i915, id) { struct i915_request *rq; - rq = spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); + rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_ctx_lo; } i915_request_add(rq); - if (!wait_for_spinner(&spin_lo, rq)) { + if (!igt_wait_for_spinner(&spin_lo, rq)) { GEM_TRACE("lo spinner failed to start\n"); GEM_TRACE_DUMP(); i915_gem_set_wedged(i915); @@ -317,16 +122,16 @@ static int live_preempt(void *arg) goto err_ctx_lo; } - rq = spinner_create_request(&spin_hi, ctx_hi, engine, - MI_ARB_CHECK); + rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { - spinner_end(&spin_lo); + igt_spinner_end(&spin_lo); err = PTR_ERR(rq); goto err_ctx_lo; } i915_request_add(rq); - if (!wait_for_spinner(&spin_hi, rq)) { + if (!igt_wait_for_spinner(&spin_hi, rq)) { GEM_TRACE("hi spinner failed to start\n"); GEM_TRACE_DUMP(); i915_gem_set_wedged(i915); @@ -334,8 +139,8 @@ static int live_preempt(void *arg) goto err_ctx_lo; } - spinner_end(&spin_hi); - spinner_end(&spin_lo); + igt_spinner_end(&spin_hi); + igt_spinner_end(&spin_lo); if (igt_flush_test(i915, I915_WAIT_LOCKED)) { err = -EIO; goto err_ctx_lo; @@ -348,9 +153,9 @@ err_ctx_lo: err_ctx_hi: kernel_context_close(ctx_hi); err_spin_lo: - spinner_fini(&spin_lo); + igt_spinner_fini(&spin_lo); err_spin_hi: - spinner_fini(&spin_hi); + igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(i915); @@ -362,7 +167,7 @@ static int live_late_preempt(void *arg) { struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx_hi, *ctx_lo; - struct spinner spin_hi, spin_lo; + struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; struct i915_sched_attr attr = {}; enum intel_engine_id id; @@ -374,10 +179,10 @@ static int live_late_preempt(void *arg) mutex_lock(&i915->drm.struct_mutex); intel_runtime_pm_get(i915); - if (spinner_init(&spin_hi, i915)) + if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; - if (spinner_init(&spin_lo, i915)) + if (igt_spinner_init(&spin_lo, i915)) goto err_spin_hi; ctx_hi = kernel_context(i915); @@ -391,43 +196,44 @@ static int live_late_preempt(void *arg) for_each_engine(engine, i915, id) { struct i915_request *rq; - rq = spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); + rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_ctx_lo; } i915_request_add(rq); - if (!wait_for_spinner(&spin_lo, rq)) { + if (!igt_wait_for_spinner(&spin_lo, rq)) { pr_err("First context failed to start\n"); goto err_wedged; } - rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP); + rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, + MI_NOOP); if (IS_ERR(rq)) { - spinner_end(&spin_lo); + igt_spinner_end(&spin_lo); err = PTR_ERR(rq); goto err_ctx_lo; } i915_request_add(rq); - if (wait_for_spinner(&spin_hi, rq)) { + if (igt_wait_for_spinner(&spin_hi, rq)) { pr_err("Second context overtook first?\n"); goto err_wedged; } - attr.priority = I915_PRIORITY_MAX; + attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); engine->schedule(rq, &attr); - if (!wait_for_spinner(&spin_hi, rq)) { + if (!igt_wait_for_spinner(&spin_hi, rq)) { pr_err("High priority context failed to preempt the low priority context\n"); GEM_TRACE_DUMP(); goto err_wedged; } - spinner_end(&spin_hi); - spinner_end(&spin_lo); + igt_spinner_end(&spin_hi); + igt_spinner_end(&spin_lo); if (igt_flush_test(i915, I915_WAIT_LOCKED)) { err = -EIO; goto err_ctx_lo; @@ -440,9 +246,9 @@ err_ctx_lo: err_ctx_hi: kernel_context_close(ctx_hi); err_spin_lo: - spinner_fini(&spin_lo); + igt_spinner_fini(&spin_lo); err_spin_hi: - spinner_fini(&spin_hi); + igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(i915); @@ -450,8 +256,8 @@ err_unlock: return err; err_wedged: - spinner_end(&spin_hi); - spinner_end(&spin_lo); + igt_spinner_end(&spin_hi); + igt_spinner_end(&spin_lo); i915_gem_set_wedged(i915); err = -EIO; goto err_ctx_lo; @@ -461,7 +267,7 @@ static int live_preempt_hang(void *arg) { struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx_hi, *ctx_lo; - struct spinner spin_hi, spin_lo; + struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; enum intel_engine_id id; int err = -ENOMEM; @@ -475,10 +281,10 @@ static int live_preempt_hang(void *arg) mutex_lock(&i915->drm.struct_mutex); intel_runtime_pm_get(i915); - if (spinner_init(&spin_hi, i915)) + if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; - if (spinner_init(&spin_lo, i915)) + if (igt_spinner_init(&spin_lo, i915)) goto err_spin_hi; ctx_hi = kernel_context(i915); @@ -497,15 +303,15 @@ static int live_preempt_hang(void *arg) if (!intel_engine_has_preemption(engine)) continue; - rq = spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); + rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_ctx_lo; } i915_request_add(rq); - if (!wait_for_spinner(&spin_lo, rq)) { + if (!igt_wait_for_spinner(&spin_lo, rq)) { GEM_TRACE("lo spinner failed to start\n"); GEM_TRACE_DUMP(); i915_gem_set_wedged(i915); @@ -513,10 +319,10 @@ static int live_preempt_hang(void *arg) goto err_ctx_lo; } - rq = spinner_create_request(&spin_hi, ctx_hi, engine, - MI_ARB_CHECK); + rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { - spinner_end(&spin_lo); + igt_spinner_end(&spin_lo); err = PTR_ERR(rq); goto err_ctx_lo; } @@ -541,7 +347,7 @@ static int live_preempt_hang(void *arg) engine->execlists.preempt_hang.inject_hang = false; - if (!wait_for_spinner(&spin_hi, rq)) { + if (!igt_wait_for_spinner(&spin_hi, rq)) { GEM_TRACE("hi spinner failed to start\n"); GEM_TRACE_DUMP(); i915_gem_set_wedged(i915); @@ -549,8 +355,8 @@ static int live_preempt_hang(void *arg) goto err_ctx_lo; } - spinner_end(&spin_hi); - spinner_end(&spin_lo); + igt_spinner_end(&spin_hi); + igt_spinner_end(&spin_lo); if (igt_flush_test(i915, I915_WAIT_LOCKED)) { err = -EIO; goto err_ctx_lo; @@ -563,9 +369,9 @@ err_ctx_lo: err_ctx_hi: kernel_context_close(ctx_hi); err_spin_lo: - spinner_fini(&spin_lo); + igt_spinner_fini(&spin_lo); err_spin_hi: - spinner_fini(&spin_hi); + igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(i915); @@ -573,6 +379,261 @@ err_unlock: return err; } +static int random_range(struct rnd_state *rnd, int min, int max) +{ + return i915_prandom_u32_max_state(max - min, rnd) + min; +} + +static int random_priority(struct rnd_state *rnd) +{ + return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX); +} + +struct preempt_smoke { + struct drm_i915_private *i915; + struct i915_gem_context **contexts; + struct intel_engine_cs *engine; + struct drm_i915_gem_object *batch; + unsigned int ncontext; + struct rnd_state prng; + unsigned long count; +}; + +static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke) +{ + return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext, + &smoke->prng)]; +} + +static int smoke_submit(struct preempt_smoke *smoke, + struct i915_gem_context *ctx, int prio, + struct drm_i915_gem_object *batch) +{ + struct i915_request *rq; + struct i915_vma *vma = NULL; + int err = 0; + + if (batch) { + vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + return err; + } + + ctx->sched.priority = prio; + + rq = i915_request_alloc(smoke->engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto unpin; + } + + if (vma) { + err = rq->engine->emit_bb_start(rq, + vma->node.start, + PAGE_SIZE, 0); + if (!err) + err = i915_vma_move_to_active(vma, rq, 0); + } + + i915_request_add(rq); + +unpin: + if (vma) + i915_vma_unpin(vma); + + return err; +} + +static int smoke_crescendo_thread(void *arg) +{ + struct preempt_smoke *smoke = arg; + IGT_TIMEOUT(end_time); + unsigned long count; + + count = 0; + do { + struct i915_gem_context *ctx = smoke_context(smoke); + int err; + + mutex_lock(&smoke->i915->drm.struct_mutex); + err = smoke_submit(smoke, + ctx, count % I915_PRIORITY_MAX, + smoke->batch); + mutex_unlock(&smoke->i915->drm.struct_mutex); + if (err) + return err; + + count++; + } while (!__igt_timeout(end_time, NULL)); + + smoke->count = count; + return 0; +} + +static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) +#define BATCH BIT(0) +{ + struct task_struct *tsk[I915_NUM_ENGINES] = {}; + struct preempt_smoke arg[I915_NUM_ENGINES]; + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned long count; + int err = 0; + + mutex_unlock(&smoke->i915->drm.struct_mutex); + + for_each_engine(engine, smoke->i915, id) { + arg[id] = *smoke; + arg[id].engine = engine; + if (!(flags & BATCH)) + arg[id].batch = NULL; + arg[id].count = 0; + + tsk[id] = kthread_run(smoke_crescendo_thread, &arg, + "igt/smoke:%d", id); + if (IS_ERR(tsk[id])) { + err = PTR_ERR(tsk[id]); + break; + } + get_task_struct(tsk[id]); + } + + count = 0; + for_each_engine(engine, smoke->i915, id) { + int status; + + if (IS_ERR_OR_NULL(tsk[id])) + continue; + + status = kthread_stop(tsk[id]); + if (status && !err) + err = status; + + count += arg[id].count; + + put_task_struct(tsk[id]); + } + + mutex_lock(&smoke->i915->drm.struct_mutex); + + pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", + count, flags, + INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext); + return 0; +} + +static int smoke_random(struct preempt_smoke *smoke, unsigned int flags) +{ + enum intel_engine_id id; + IGT_TIMEOUT(end_time); + unsigned long count; + + count = 0; + do { + for_each_engine(smoke->engine, smoke->i915, id) { + struct i915_gem_context *ctx = smoke_context(smoke); + int err; + + err = smoke_submit(smoke, + ctx, random_priority(&smoke->prng), + flags & BATCH ? smoke->batch : NULL); + if (err) + return err; + + count++; + } + } while (!__igt_timeout(end_time, NULL)); + + pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", + count, flags, + INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext); + return 0; +} + +static int live_preempt_smoke(void *arg) +{ + struct preempt_smoke smoke = { + .i915 = arg, + .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed), + .ncontext = 1024, + }; + const unsigned int phase[] = { 0, BATCH }; + int err = -ENOMEM; + u32 *cs; + int n; + + if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915)) + return 0; + + smoke.contexts = kmalloc_array(smoke.ncontext, + sizeof(*smoke.contexts), + GFP_KERNEL); + if (!smoke.contexts) + return -ENOMEM; + + mutex_lock(&smoke.i915->drm.struct_mutex); + intel_runtime_pm_get(smoke.i915); + + smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE); + if (IS_ERR(smoke.batch)) { + err = PTR_ERR(smoke.batch); + goto err_unlock; + } + + cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err_batch; + } + for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++) + cs[n] = MI_ARB_CHECK; + cs[n] = MI_BATCH_BUFFER_END; + i915_gem_object_unpin_map(smoke.batch); + + err = i915_gem_object_set_to_gtt_domain(smoke.batch, false); + if (err) + goto err_batch; + + for (n = 0; n < smoke.ncontext; n++) { + smoke.contexts[n] = kernel_context(smoke.i915); + if (!smoke.contexts[n]) + goto err_ctx; + } + + for (n = 0; n < ARRAY_SIZE(phase); n++) { + err = smoke_crescendo(&smoke, phase[n]); + if (err) + goto err_ctx; + + err = smoke_random(&smoke, phase[n]); + if (err) + goto err_ctx; + } + +err_ctx: + if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED)) + err = -EIO; + + for (n = 0; n < smoke.ncontext; n++) { + if (!smoke.contexts[n]) + break; + kernel_context_close(smoke.contexts[n]); + } + +err_batch: + i915_gem_object_put(smoke.batch); +err_unlock: + intel_runtime_pm_put(smoke.i915); + mutex_unlock(&smoke.i915->drm.struct_mutex); + kfree(smoke.contexts); + + return err; +} + int intel_execlists_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { @@ -580,6 +641,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) SUBTEST(live_preempt), SUBTEST(live_late_preempt), SUBTEST(live_preempt_hang), + SUBTEST(live_preempt_smoke), }; if (!HAS_EXECLISTS(i915)) diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index d1a0923d2f38..67017d5175b8 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c @@ -6,6 +6,9 @@ #include "../i915_selftest.h" +#include "igt_flush_test.h" +#include "igt_reset.h" +#include "igt_spinner.h" #include "igt_wedge_me.h" #include "mock_context.h" @@ -91,17 +94,23 @@ err_obj: return ERR_PTR(err); } -static u32 get_whitelist_reg(const struct whitelist *w, unsigned int i) +static u32 +get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i) { - return i < w->count ? i915_mmio_reg_offset(w->reg[i]) : w->nopid; + i915_reg_t reg = i < engine->whitelist.count ? + engine->whitelist.list[i].reg : + RING_NOPID(engine->mmio_base); + + return i915_mmio_reg_offset(reg); } -static void print_results(const struct whitelist *w, const u32 *results) +static void +print_results(const struct intel_engine_cs *engine, const u32 *results) { unsigned int i; for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { - u32 expected = get_whitelist_reg(w, i); + u32 expected = get_whitelist_reg(engine, i); u32 actual = results[i]; pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n", @@ -109,8 +118,7 @@ static void print_results(const struct whitelist *w, const u32 *results) } } -static int check_whitelist(const struct whitelist *w, - struct i915_gem_context *ctx, +static int check_whitelist(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { struct drm_i915_gem_object *results; @@ -138,11 +146,11 @@ static int check_whitelist(const struct whitelist *w, } for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { - u32 expected = get_whitelist_reg(w, i); + u32 expected = get_whitelist_reg(engine, i); u32 actual = vaddr[i]; if (expected != actual) { - print_results(w, vaddr); + print_results(engine, vaddr); pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n", i, expected, actual); @@ -159,66 +167,107 @@ out_put: static int do_device_reset(struct intel_engine_cs *engine) { - i915_reset(engine->i915, ENGINE_MASK(engine->id), NULL); + set_bit(I915_RESET_HANDOFF, &engine->i915->gpu_error.flags); + i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds"); return 0; } static int do_engine_reset(struct intel_engine_cs *engine) { - return i915_reset_engine(engine, NULL); + return i915_reset_engine(engine, "live_workarounds"); } -static int switch_to_scratch_context(struct intel_engine_cs *engine) +static int +switch_to_scratch_context(struct intel_engine_cs *engine, + struct igt_spinner *spin) { struct i915_gem_context *ctx; struct i915_request *rq; + int err = 0; ctx = kernel_context(engine->i915); if (IS_ERR(ctx)) return PTR_ERR(ctx); intel_runtime_pm_get(engine->i915); - rq = i915_request_alloc(engine, ctx); + + if (spin) + rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); + else + rq = i915_request_alloc(engine, ctx); + intel_runtime_pm_put(engine->i915); kernel_context_close(ctx); - if (IS_ERR(rq)) - return PTR_ERR(rq); + + if (IS_ERR(rq)) { + spin = NULL; + err = PTR_ERR(rq); + goto err; + } i915_request_add(rq); - return 0; + if (spin && !igt_wait_for_spinner(spin, rq)) { + pr_err("Spinner failed to start\n"); + err = -ETIMEDOUT; + } + +err: + if (err && spin) + igt_spinner_end(spin); + + return err; } static int check_whitelist_across_reset(struct intel_engine_cs *engine, int (*reset)(struct intel_engine_cs *), - const struct whitelist *w, const char *name) { + struct drm_i915_private *i915 = engine->i915; + bool want_spin = reset == do_engine_reset; struct i915_gem_context *ctx; + struct igt_spinner spin; int err; - ctx = kernel_context(engine->i915); + pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n", + engine->whitelist.count, name); + + if (want_spin) { + err = igt_spinner_init(&spin, i915); + if (err) + return err; + } + + ctx = kernel_context(i915); if (IS_ERR(ctx)) return PTR_ERR(ctx); - err = check_whitelist(w, ctx, engine); + err = check_whitelist(ctx, engine); if (err) { pr_err("Invalid whitelist *before* %s reset!\n", name); goto out; } - err = switch_to_scratch_context(engine); + err = switch_to_scratch_context(engine, want_spin ? &spin : NULL); if (err) goto out; + intel_runtime_pm_get(i915); err = reset(engine); + intel_runtime_pm_put(i915); + + if (want_spin) { + igt_spinner_end(&spin); + igt_spinner_fini(&spin); + } + if (err) { pr_err("%s reset failed\n", name); goto out; } - err = check_whitelist(w, ctx, engine); + err = check_whitelist(ctx, engine); if (err) { pr_err("Whitelist not preserved in context across %s reset!\n", name); @@ -227,11 +276,11 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, kernel_context_close(ctx); - ctx = kernel_context(engine->i915); + ctx = kernel_context(i915); if (IS_ERR(ctx)) return PTR_ERR(ctx); - err = check_whitelist(w, ctx, engine); + err = check_whitelist(ctx, engine); if (err) { pr_err("Invalid whitelist *after* %s reset in fresh context!\n", name); @@ -247,26 +296,18 @@ static int live_reset_whitelist(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine = i915->engine[RCS]; - struct i915_gpu_error *error = &i915->gpu_error; - struct whitelist w; int err = 0; /* If we reset the gpu, we should not lose the RING_NONPRIV */ - if (!engine) - return 0; - - if (!whitelist_build(engine, &w)) + if (!engine || engine->whitelist.count == 0) return 0; - pr_info("Checking %d whitelisted registers (RING_NONPRIV)\n", w.count); - - set_bit(I915_RESET_BACKOFF, &error->flags); - set_bit(I915_RESET_ENGINE + engine->id, &error->flags); + igt_global_reset_lock(i915); if (intel_has_reset_engine(i915)) { err = check_whitelist_across_reset(engine, - do_engine_reset, &w, + do_engine_reset, "engine"); if (err) goto out; @@ -274,22 +315,156 @@ static int live_reset_whitelist(void *arg) if (intel_has_gpu_reset(i915)) { err = check_whitelist_across_reset(engine, - do_device_reset, &w, + do_device_reset, "device"); if (err) goto out; } out: - clear_bit(I915_RESET_ENGINE + engine->id, &error->flags); - clear_bit(I915_RESET_BACKOFF, &error->flags); + igt_global_reset_unlock(i915); return err; } +static bool verify_gt_engine_wa(struct drm_i915_private *i915, const char *str) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + bool ok = true; + + ok &= intel_gt_verify_workarounds(i915, str); + + for_each_engine(engine, i915, id) + ok &= intel_engine_verify_workarounds(engine, str); + + return ok; +} + +static int +live_gpu_reset_gt_engine_workarounds(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_gpu_error *error = &i915->gpu_error; + bool ok; + + if (!intel_has_gpu_reset(i915)) + return 0; + + pr_info("Verifying after GPU reset...\n"); + + igt_global_reset_lock(i915); + + ok = verify_gt_engine_wa(i915, "before reset"); + if (!ok) + goto out; + + intel_runtime_pm_get(i915); + set_bit(I915_RESET_HANDOFF, &error->flags); + i915_reset(i915, ALL_ENGINES, "live_workarounds"); + intel_runtime_pm_put(i915); + + ok = verify_gt_engine_wa(i915, "after reset"); + +out: + igt_global_reset_unlock(i915); + + return ok ? 0 : -ESRCH; +} + +static int +live_engine_reset_gt_engine_workarounds(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *ctx; + struct igt_spinner spin; + enum intel_engine_id id; + struct i915_request *rq; + int ret = 0; + + if (!intel_has_reset_engine(i915)) + return 0; + + ctx = kernel_context(i915); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + igt_global_reset_lock(i915); + + for_each_engine(engine, i915, id) { + bool ok; + + pr_info("Verifying after %s reset...\n", engine->name); + + ok = verify_gt_engine_wa(i915, "before reset"); + if (!ok) { + ret = -ESRCH; + goto err; + } + + intel_runtime_pm_get(i915); + i915_reset_engine(engine, "live_workarounds"); + intel_runtime_pm_put(i915); + + ok = verify_gt_engine_wa(i915, "after idle reset"); + if (!ok) { + ret = -ESRCH; + goto err; + } + + ret = igt_spinner_init(&spin, i915); + if (ret) + goto err; + + intel_runtime_pm_get(i915); + + rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + igt_spinner_fini(&spin); + intel_runtime_pm_put(i915); + goto err; + } + + i915_request_add(rq); + + if (!igt_wait_for_spinner(&spin, rq)) { + pr_err("Spinner failed to start\n"); + igt_spinner_fini(&spin); + intel_runtime_pm_put(i915); + ret = -ETIMEDOUT; + goto err; + } + + i915_reset_engine(engine, "live_workarounds"); + + intel_runtime_pm_put(i915); + + igt_spinner_end(&spin); + igt_spinner_fini(&spin); + + ok = verify_gt_engine_wa(i915, "after busy reset"); + if (!ok) { + ret = -ESRCH; + goto err; + } + } + +err: + igt_global_reset_unlock(i915); + kernel_context_close(ctx); + + igt_flush_test(i915, I915_WAIT_LOCKED); + + return ret; +} + int intel_workarounds_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_reset_whitelist), + SUBTEST(live_gpu_reset_gt_engine_workarounds), + SUBTEST(live_engine_reset_gt_engine_workarounds), }; int err; diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index 22a73da45ad5..d0c44c18db42 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -200,7 +200,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, engine->base.submit_request = mock_submit_request; i915_timeline_init(i915, &engine->base.timeline, engine->base.name); - lockdep_set_subclass(&engine->base.timeline.lock, TIMELINE_ENGINE); + i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE); intel_engine_init_breadcrumbs(&engine->base); engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */ diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c index 435a2c35ee8c..361e962a7969 100644 --- a/drivers/gpu/drm/i915/vlv_dsi.c +++ b/drivers/gpu/drm/i915/vlv_dsi.c @@ -206,39 +206,6 @@ static const struct mipi_dsi_host_ops intel_dsi_host_ops = { .transfer = intel_dsi_host_transfer, }; -static struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi, - enum port port) -{ - struct intel_dsi_host *host; - struct mipi_dsi_device *device; - - host = kzalloc(sizeof(*host), GFP_KERNEL); - if (!host) - return NULL; - - host->base.ops = &intel_dsi_host_ops; - host->intel_dsi = intel_dsi; - host->port = port; - - /* - * We should call mipi_dsi_host_register(&host->base) here, but we don't - * have a host->dev, and we don't have OF stuff either. So just use the - * dsi framework as a library and hope for the best. Create the dsi - * devices by ourselves here too. Need to be careful though, because we - * don't initialize any of the driver model devices here. - */ - device = kzalloc(sizeof(*device), GFP_KERNEL); - if (!device) { - kfree(host); - return NULL; - } - - device->host = &host->base; - host->device = device; - - return host; -} - /* * send a video mode command * @@ -290,16 +257,6 @@ static void band_gap_reset(struct drm_i915_private *dev_priv) mutex_unlock(&dev_priv->sb_lock); } -static inline bool is_vid_mode(struct intel_dsi *intel_dsi) -{ - return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE; -} - -static inline bool is_cmd_mode(struct intel_dsi *intel_dsi) -{ - return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE; -} - static bool intel_dsi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -314,6 +271,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, int ret; DRM_DEBUG_KMS("\n"); + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; if (fixed_mode) { intel_fixed_panel_mode(fixed_mode, adjusted_mode); @@ -745,17 +703,6 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, const struct intel_crtc_state *pipe_config); static void intel_dsi_unprepare(struct intel_encoder *encoder); -static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec) -{ - struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); - - /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */ - if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3) - return; - - msleep(msec); -} - /* * Panel enable/disable sequences from the VBT spec. * @@ -793,6 +740,10 @@ static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec) * - wait t4 - wait t4 */ +/* + * DSI port enable has to be done before pipe and plane enable, so we do it in + * the pre_enable hook instead of the enable hook. + */ static void intel_dsi_pre_enable(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) @@ -895,17 +846,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, } /* - * DSI port enable has to be done before pipe and plane enable, so we do it in - * the pre_enable hook. - */ -static void intel_dsi_enable_nop(struct intel_encoder *encoder, - const struct intel_crtc_state *pipe_config, - const struct drm_connector_state *conn_state) -{ - DRM_DEBUG_KMS("\n"); -} - -/* * DSI port disable has to be done after pipe and plane disable, so we do it in * the post_disable hook. */ @@ -1272,31 +1212,6 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, } } -static enum drm_mode_status -intel_dsi_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - struct intel_connector *intel_connector = to_intel_connector(connector); - const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; - int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; - - DRM_DEBUG_KMS("\n"); - - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) - return MODE_NO_DBLESCAN; - - if (fixed_mode) { - if (mode->hdisplay > fixed_mode->hdisplay) - return MODE_PANEL; - if (mode->vdisplay > fixed_mode->vdisplay) - return MODE_PANEL; - if (fixed_mode->clock > max_dotclk) - return MODE_CLOCK_HIGH; - } - - return MODE_OK; -} - /* return txclkesc cycles in terms of divider and duration in us */ static u16 txclkesc(u32 divider, unsigned int us) { @@ -1619,39 +1534,6 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder) } } -static int intel_dsi_get_modes(struct drm_connector *connector) -{ - struct intel_connector *intel_connector = to_intel_connector(connector); - struct drm_display_mode *mode; - - DRM_DEBUG_KMS("\n"); - - if (!intel_connector->panel.fixed_mode) { - DRM_DEBUG_KMS("no fixed mode\n"); - return 0; - } - - mode = drm_mode_duplicate(connector->dev, - intel_connector->panel.fixed_mode); - if (!mode) { - DRM_DEBUG_KMS("drm_mode_duplicate failed\n"); - return 0; - } - - drm_mode_probed_add(connector, mode); - return 1; -} - -static void intel_dsi_connector_destroy(struct drm_connector *connector) -{ - struct intel_connector *intel_connector = to_intel_connector(connector); - - DRM_DEBUG_KMS("\n"); - intel_panel_fini(&intel_connector->panel); - drm_connector_cleanup(connector); - kfree(connector); -} - static void intel_dsi_encoder_destroy(struct drm_encoder *encoder) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); @@ -1676,7 +1558,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs static const struct drm_connector_funcs intel_dsi_connector_funcs = { .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, - .destroy = intel_dsi_connector_destroy, + .destroy = intel_connector_destroy, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, @@ -1684,27 +1566,57 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = { .atomic_duplicate_state = intel_digital_connector_duplicate_state, }; -static int intel_dsi_get_panel_orientation(struct intel_connector *connector) +static enum drm_panel_orientation +vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL; - enum i9xx_plane_id i9xx_plane; + struct intel_encoder *encoder = connector->encoder; + enum intel_display_power_domain power_domain; + enum drm_panel_orientation orientation; + struct intel_plane *plane; + struct intel_crtc *crtc; + enum pipe pipe; u32 val; - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - if (connector->encoder->crtc_mask == BIT(PIPE_B)) - i9xx_plane = PLANE_B; - else - i9xx_plane = PLANE_A; + if (!encoder->get_hw_state(encoder, &pipe)) + return DRM_MODE_PANEL_ORIENTATION_UNKNOWN; - val = I915_READ(DSPCNTR(i9xx_plane)); - if (val & DISPPLANE_ROTATE_180) - orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP; - } + crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + plane = to_intel_plane(crtc->base.primary); + + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return DRM_MODE_PANEL_ORIENTATION_UNKNOWN; + + val = I915_READ(DSPCNTR(plane->i9xx_plane)); + + if (!(val & DISPLAY_PLANE_ENABLE)) + orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; + else if (val & DISPPLANE_ROTATE_180) + orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP; + else + orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL; + + intel_display_power_put(dev_priv, power_domain); return orientation; } +static enum drm_panel_orientation +vlv_dsi_get_panel_orientation(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + enum drm_panel_orientation orientation; + + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + orientation = vlv_dsi_get_hw_panel_orientation(connector); + if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) + return orientation; + } + + return intel_dsi_get_panel_orientation(connector); +} + static void intel_dsi_add_properties(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); @@ -1722,7 +1634,7 @@ static void intel_dsi_add_properties(struct intel_connector *connector) connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT; connector->base.display_info.panel_orientation = - intel_dsi_get_panel_orientation(connector); + vlv_dsi_get_panel_orientation(connector); drm_connector_init_panel_orientation_property( &connector->base, connector->panel.fixed_mode->hdisplay, @@ -1773,7 +1685,6 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) intel_encoder->compute_config = intel_dsi_compute_config; intel_encoder->pre_enable = intel_dsi_pre_enable; - intel_encoder->enable = intel_dsi_enable_nop; intel_encoder->disable = intel_dsi_disable; intel_encoder->post_disable = intel_dsi_post_disable; intel_encoder->get_hw_state = intel_dsi_get_hw_state; @@ -1806,7 +1717,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) for_each_dsi_port(port, intel_dsi->ports) { struct intel_dsi_host *host; - host = intel_dsi_host_init(intel_dsi, port); + host = intel_dsi_host_init(intel_dsi, &intel_dsi_host_ops, + port); if (!host) goto err; |