diff options
Diffstat (limited to 'drivers/gpu')
60 files changed, 7048 insertions, 7429 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 0f2837f07741..7ae3b7d67fcf 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -106,40 +106,30 @@ config DRM_I915_USERPTR If in doubt, say "Y". config DRM_I915_GVT - bool "Enable Intel GVT-g graphics virtualization host support" + bool + +config DRM_I915_GVT_KVMGT + tristate "Enable KVM host support Intel GVT-g graphics virtualization" depends on DRM_I915 depends on X86 depends on 64BIT - default n + depends on KVM + depends on VFIO_MDEV + select DRM_I915_GVT + select KVM_EXTERNAL_WRITE_TRACKING + help Choose this option if you want to enable Intel GVT-g graphics virtualization technology host support with integrated graphics. With GVT-g, it's possible to have one integrated graphics - device shared by multiple VMs under different hypervisors. - - Note that at least one hypervisor like Xen or KVM is required for - this driver to work, and it only supports newer device from - Broadwell+. For further information and setup guide, you can - visit: http://01.org/igvt-g. + device shared by multiple VMs under KVM. - Now it's just a stub to support the modifications of i915 for - GVT device model. It requires at least one MPT modules for Xen/KVM - and other components of GVT device model to work. Use it under - you own risk. + Note that this driver only supports newer device from Broadwell on. + For further information and setup guide, you can visit: + http://01.org/igvt-g. If in doubt, say "N". -config DRM_I915_GVT_KVMGT - tristate "Enable KVM/VFIO support for Intel GVT-g" - depends on DRM_I915_GVT - depends on KVM - depends on VFIO_MDEV - select KVM_EXTERNAL_WRITE_TRACKING - default n - help - Choose this option if you want to enable KVMGT support for - Intel GVT-g. - config DRM_I915_PXP bool "Enable Intel PXP support" depends on DRM_I915 diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index cd0bf6806228..d2b18f03a33c 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -223,6 +223,7 @@ i915-y += \ display/intel_cursor.o \ display/intel_display.o \ display/intel_display_power.o \ + display/intel_display_power_map.o \ display/intel_display_power_well.o \ display/intel_dmc.o \ display/intel_dpio_phy.o \ @@ -331,13 +332,13 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \ # virtual gpu code i915-y += i915_vgpu.o -ifeq ($(CONFIG_DRM_I915_GVT),y) -i915-y += intel_gvt.o +i915-$(CONFIG_DRM_I915_GVT) += \ + intel_gvt.o \ + intel_gvt_mmio_table.o include $(src)/gvt/Makefile -endif obj-$(CONFIG_DRM_I915) += i915.o -obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o +obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o # header test diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index 16bb21ad898b..5a957acebfd6 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -13,6 +13,7 @@ #include "intel_connector.h" #include "intel_crtc.h" #include "intel_de.h" +#include "intel_display_power.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_link_training.h" @@ -1375,7 +1376,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv, dig_port->max_lanes = 4; intel_encoder->type = INTEL_OUTPUT_DP; - intel_encoder->power_domain = intel_port_to_power_domain(port); + intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port); if (IS_CHERRYVIEW(dev_priv)) { if (port == PORT_D) intel_encoder->pipe_mask = BIT(PIPE_C); diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c index 8bfef08b7c43..5fbd2ae95869 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.c +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c @@ -10,6 +10,7 @@ #include "intel_connector.h" #include "intel_crtc.h" #include "intel_de.h" +#include "intel_display_power.h" #include "intel_display_types.h" #include "intel_dpio_phy.h" #include "intel_fifo_underrun.h" @@ -574,7 +575,7 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv, intel_encoder->shutdown = intel_hdmi_encoder_shutdown; intel_encoder->type = INTEL_OUTPUT_HDMI; - intel_encoder->power_domain = intel_port_to_power_domain(port); + intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port); intel_encoder->port = port; if (IS_CHERRYVIEW(dev_priv)) { if (port == PORT_D) diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index d1abd00d4f4d..19bf717fd4cb 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -400,8 +400,8 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv, intel_dsi->io_wakeref[port] = intel_display_power_get(dev_priv, port == PORT_A ? - POWER_DOMAIN_PORT_DDI_A_IO : - POWER_DOMAIN_PORT_DDI_B_IO); + POWER_DOMAIN_PORT_DDI_IO_A : + POWER_DOMAIN_PORT_DDI_IO_B); } } @@ -1426,8 +1426,8 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]); intel_display_power_put(dev_priv, port == PORT_A ? - POWER_DOMAIN_PORT_DDI_A_IO : - POWER_DOMAIN_PORT_DDI_B_IO, + POWER_DOMAIN_PORT_DDI_IO_A : + POWER_DOMAIN_PORT_DDI_IO_B, wakeref); } diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 81949c36ab96..0c5638f5b72b 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -186,10 +186,14 @@ static const struct { .min_size = sizeof(struct bdb_edp), }, { .section_id = BDB_LVDS_OPTIONS, .min_size = sizeof(struct bdb_lvds_options), }, + /* + * BDB_LVDS_LFP_DATA depends on BDB_LVDS_LFP_DATA_PTRS, + * so keep the two ordered. + */ { .section_id = BDB_LVDS_LFP_DATA_PTRS, .min_size = sizeof(struct bdb_lvds_lfp_data_ptrs), }, { .section_id = BDB_LVDS_LFP_DATA, - .min_size = sizeof(struct bdb_lvds_lfp_data), }, + .min_size = 0, /* special case */ }, { .section_id = BDB_LVDS_BACKLIGHT, .min_size = sizeof(struct bdb_lfp_backlight_data), }, { .section_id = BDB_LFP_POWER, @@ -204,6 +208,23 @@ static const struct { .min_size = sizeof(struct bdb_generic_dtd), }, }; +static size_t lfp_data_min_size(struct drm_i915_private *i915) +{ + const struct bdb_lvds_lfp_data_ptrs *ptrs; + size_t size; + + ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS); + if (!ptrs) + return 0; + + size = sizeof(struct bdb_lvds_lfp_data); + if (ptrs->panel_name.table_size) + size = max(size, ptrs->panel_name.offset + + sizeof(struct bdb_lvds_lfp_data_tail)); + + return size; +} + static bool validate_lfp_data_ptrs(const void *bdb, const struct bdb_lvds_lfp_data_ptrs *ptrs) { @@ -311,16 +332,144 @@ static bool fixup_lfp_data_ptrs(const void *bdb, void *ptrs_block) return validate_lfp_data_ptrs(bdb, ptrs); } +static const void *find_fp_timing_terminator(const u8 *data, int size) +{ + int i; + + for (i = 0; i < size - 1; i++) { + if (data[i] == 0xff && data[i+1] == 0xff) + return &data[i]; + } + + return NULL; +} + +static int make_lfp_data_ptr(struct lvds_lfp_data_ptr_table *table, + int table_size, int total_size) +{ + if (total_size < table_size) + return total_size; + + table->table_size = table_size; + table->offset = total_size - table_size; + + return total_size - table_size; +} + +static void next_lfp_data_ptr(struct lvds_lfp_data_ptr_table *next, + const struct lvds_lfp_data_ptr_table *prev, + int size) +{ + next->table_size = prev->table_size; + next->offset = prev->offset + size; +} + +static void *generate_lfp_data_ptrs(struct drm_i915_private *i915, + const void *bdb) +{ + int i, size, table_size, block_size, offset; + const void *t0, *t1, *block; + struct bdb_lvds_lfp_data_ptrs *ptrs; + void *ptrs_block; + + block = find_raw_section(bdb, BDB_LVDS_LFP_DATA); + if (!block) + return NULL; + + drm_dbg_kms(&i915->drm, "Generating LFP data table pointers\n"); + + block_size = get_blocksize(block); + + size = block_size; + t0 = find_fp_timing_terminator(block, size); + if (!t0) + return NULL; + + size -= t0 - block - 2; + t1 = find_fp_timing_terminator(t0 + 2, size); + if (!t1) + return NULL; + + size = t1 - t0; + if (size * 16 > block_size) + return NULL; + + ptrs_block = kzalloc(sizeof(*ptrs) + 3, GFP_KERNEL); + if (!ptrs_block) + return NULL; + + *(u8 *)(ptrs_block + 0) = BDB_LVDS_LFP_DATA_PTRS; + *(u16 *)(ptrs_block + 1) = sizeof(*ptrs); + ptrs = ptrs_block + 3; + + table_size = sizeof(struct lvds_pnp_id); + size = make_lfp_data_ptr(&ptrs->ptr[0].panel_pnp_id, table_size, size); + + table_size = sizeof(struct lvds_dvo_timing); + size = make_lfp_data_ptr(&ptrs->ptr[0].dvo_timing, table_size, size); + + table_size = t0 - block + 2; + size = make_lfp_data_ptr(&ptrs->ptr[0].fp_timing, table_size, size); + + if (ptrs->ptr[0].fp_timing.table_size) + ptrs->lvds_entries++; + if (ptrs->ptr[0].dvo_timing.table_size) + ptrs->lvds_entries++; + if (ptrs->ptr[0].panel_pnp_id.table_size) + ptrs->lvds_entries++; + + if (size != 0 || ptrs->lvds_entries != 3) { + kfree(ptrs); + return NULL; + } + + size = t1 - t0; + for (i = 1; i < 16; i++) { + next_lfp_data_ptr(&ptrs->ptr[i].fp_timing, &ptrs->ptr[i-1].fp_timing, size); + next_lfp_data_ptr(&ptrs->ptr[i].dvo_timing, &ptrs->ptr[i-1].dvo_timing, size); + next_lfp_data_ptr(&ptrs->ptr[i].panel_pnp_id, &ptrs->ptr[i-1].panel_pnp_id, size); + } + + size = t1 - t0; + table_size = sizeof(struct lvds_lfp_panel_name); + + if (16 * (size + table_size) <= block_size) { + ptrs->panel_name.table_size = table_size; + ptrs->panel_name.offset = size * 16; + } + + offset = block - bdb; + + for (i = 0; i < 16; i++) { + ptrs->ptr[i].fp_timing.offset += offset; + ptrs->ptr[i].dvo_timing.offset += offset; + ptrs->ptr[i].panel_pnp_id.offset += offset; + } + + if (ptrs->panel_name.table_size) + ptrs->panel_name.offset += offset; + + return ptrs_block; +} + static void init_bdb_block(struct drm_i915_private *i915, const void *bdb, enum bdb_block_id section_id, size_t min_size) { struct bdb_block_entry *entry; + void *temp_block = NULL; const void *block; size_t block_size; block = find_raw_section(bdb, section_id); + + /* Modern VBTs lack the LFP data table pointers block, make one up */ + if (!block && section_id == BDB_LVDS_LFP_DATA_PTRS) { + temp_block = generate_lfp_data_ptrs(i915, bdb); + if (temp_block) + block = temp_block + 3; + } if (!block) return; @@ -331,12 +480,16 @@ init_bdb_block(struct drm_i915_private *i915, entry = kzalloc(struct_size(entry, data, max(min_size, block_size) + 3), GFP_KERNEL); - if (!entry) + if (!entry) { + kfree(temp_block); return; + } entry->section_id = section_id; memcpy(entry->data, block - 3, block_size + 3); + kfree(temp_block); + drm_dbg_kms(&i915->drm, "Found BDB block %d (size %zu, min size %zu)\n", section_id, block_size, min_size); @@ -359,6 +512,9 @@ static void init_bdb_blocks(struct drm_i915_private *i915, enum bdb_block_id section_id = bdb_blocks[i].section_id; size_t min_size = bdb_blocks[i].min_size; + if (section_id == BDB_LVDS_LFP_DATA) + min_size = lfp_data_min_size(i915); + init_bdb_block(i915, bdb, section_id, min_size); } } @@ -429,6 +585,94 @@ get_lvds_fp_timing(const struct bdb_lvds_lfp_data *data, return (const void *)data + ptrs->ptr[index].fp_timing.offset; } +static const struct bdb_lvds_lfp_data_tail * +get_lfp_data_tail(const struct bdb_lvds_lfp_data *data, + const struct bdb_lvds_lfp_data_ptrs *ptrs) +{ + if (ptrs->panel_name.table_size) + return (const void *)data + ptrs->panel_name.offset; + else + return NULL; +} + +static int opregion_get_panel_type(struct drm_i915_private *i915) +{ + return intel_opregion_get_panel_type(i915); +} + +static int vbt_get_panel_type(struct drm_i915_private *i915) +{ + const struct bdb_lvds_options *lvds_options; + + lvds_options = find_section(i915, BDB_LVDS_OPTIONS); + if (!lvds_options) + return -1; + + if (lvds_options->panel_type > 0xf) { + drm_dbg_kms(&i915->drm, "Invalid VBT panel type 0x%x\n", + lvds_options->panel_type); + return -1; + } + + return lvds_options->panel_type; +} + +static int fallback_get_panel_type(struct drm_i915_private *i915) +{ + return 0; +} + +enum panel_type { + PANEL_TYPE_OPREGION, + PANEL_TYPE_VBT, + PANEL_TYPE_FALLBACK, +}; + +static int get_panel_type(struct drm_i915_private *i915) +{ + struct { + const char *name; + int (*get_panel_type)(struct drm_i915_private *i915); + int panel_type; + } panel_types[] = { + [PANEL_TYPE_OPREGION] = { + .name = "OpRegion", + .get_panel_type = opregion_get_panel_type, + }, + [PANEL_TYPE_VBT] = { + .name = "VBT", + .get_panel_type = vbt_get_panel_type, + }, + [PANEL_TYPE_FALLBACK] = { + .name = "fallback", + .get_panel_type = fallback_get_panel_type, + }, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(panel_types); i++) { + panel_types[i].panel_type = panel_types[i].get_panel_type(i915); + + drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf); + + if (panel_types[i].panel_type >= 0) + drm_dbg_kms(&i915->drm, "Panel type (%s): %d\n", + panel_types[i].name, panel_types[i].panel_type); + } + + if (panel_types[PANEL_TYPE_OPREGION].panel_type >= 0) + i = PANEL_TYPE_OPREGION; + else if (panel_types[PANEL_TYPE_VBT].panel_type >= 0) + i = PANEL_TYPE_VBT; + else + i = PANEL_TYPE_FALLBACK; + + drm_dbg_kms(&i915->drm, "Selected panel type (%s): %d\n", + panel_types[i].name, panel_types[i].panel_type); + + return panel_types[i].panel_type; +} + /* Parse general panel options */ static void parse_panel_options(struct drm_i915_private *i915) @@ -436,7 +680,6 @@ parse_panel_options(struct drm_i915_private *i915) const struct bdb_lvds_options *lvds_options; int panel_type; int drrs_mode; - int ret; lvds_options = find_section(i915, BDB_LVDS_OPTIONS); if (!lvds_options) @@ -444,23 +687,7 @@ parse_panel_options(struct drm_i915_private *i915) i915->vbt.lvds_dither = lvds_options->pixel_dither; - ret = intel_opregion_get_panel_type(i915); - if (ret >= 0) { - drm_WARN_ON(&i915->drm, ret > 0xf); - panel_type = ret; - drm_dbg_kms(&i915->drm, "Panel type: %d (OpRegion)\n", - panel_type); - } else { - if (lvds_options->panel_type > 0xf) { - drm_dbg_kms(&i915->drm, - "Invalid VBT panel type 0x%x\n", - lvds_options->panel_type); - return; - } - panel_type = lvds_options->panel_type; - drm_dbg_kms(&i915->drm, "Panel type: %d (VBT)\n", - panel_type); - } + panel_type = get_panel_type(i915); i915->vbt.panel_type = panel_type; @@ -489,25 +716,16 @@ parse_panel_options(struct drm_i915_private *i915) } } -/* Try to find integrated panel timing data */ static void -parse_lfp_panel_dtd(struct drm_i915_private *i915) +parse_lfp_panel_dtd(struct drm_i915_private *i915, + const struct bdb_lvds_lfp_data *lvds_lfp_data, + const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs) { - const struct bdb_lvds_lfp_data *lvds_lfp_data; - const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; const struct lvds_dvo_timing *panel_dvo_timing; const struct lvds_fp_timing *fp_timing; struct drm_display_mode *panel_fixed_mode; int panel_type = i915->vbt.panel_type; - lvds_lfp_data = find_section(i915, BDB_LVDS_LFP_DATA); - if (!lvds_lfp_data) - return; - - lvds_lfp_data_ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS); - if (!lvds_lfp_data_ptrs) - return; - panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, lvds_lfp_data_ptrs, panel_type); @@ -539,6 +757,38 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915) } static void +parse_lfp_data(struct drm_i915_private *i915) +{ + const struct bdb_lvds_lfp_data *data; + const struct bdb_lvds_lfp_data_tail *tail; + const struct bdb_lvds_lfp_data_ptrs *ptrs; + int panel_type = i915->vbt.panel_type; + + ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS); + if (!ptrs) + return; + + data = find_section(i915, BDB_LVDS_LFP_DATA); + if (!data) + return; + + if (!i915->vbt.lfp_lvds_vbt_mode) + parse_lfp_panel_dtd(i915, data, ptrs); + + tail = get_lfp_data_tail(data, ptrs); + if (!tail) + return; + + if (i915->vbt.version >= 188) { + i915->vbt.seamless_drrs_min_refresh_rate = + tail->seamless_drrs_min_refresh_rate[panel_type]; + drm_dbg_kms(&i915->drm, + "Seamless DRRS min refresh rate: %d Hz\n", + i915->vbt.seamless_drrs_min_refresh_rate); + } +} + +static void parse_generic_dtd(struct drm_i915_private *i915) { const struct bdb_generic_dtd *generic_dtd; @@ -546,6 +796,17 @@ parse_generic_dtd(struct drm_i915_private *i915) struct drm_display_mode *panel_fixed_mode; int num_dtd; + /* + * Older VBTs provided DTD information for internal displays through + * the "LFP panel tables" block (42). As of VBT revision 229 the + * DTD information should be provided via a newer "generic DTD" + * block (58). Just to be safe, we'll try the new generic DTD block + * first on VBT >= 229, but still fall back to trying the old LFP + * block if that fails. + */ + if (i915->vbt.version < 229) + return; + generic_dtd = find_section(i915, BDB_GENERIC_DTD); if (!generic_dtd) return; @@ -617,23 +878,6 @@ parse_generic_dtd(struct drm_i915_private *i915) } static void -parse_panel_dtd(struct drm_i915_private *i915) -{ - /* - * Older VBTs provided provided DTD information for internal displays - * through the "LFP panel DTD" block (42). As of VBT revision 229, - * that block is now deprecated and DTD information should be provided - * via a newer "generic DTD" block (58). Just to be safe, we'll - * try the new generic DTD block first on VBT >= 229, but still fall - * back to trying the old LFP block if that fails. - */ - if (i915->vbt.version >= 229) - parse_generic_dtd(i915); - if (!i915->vbt.lfp_lvds_vbt_mode) - parse_lfp_panel_dtd(i915); -} - -static void parse_lfp_backlight(struct drm_i915_private *i915) { const struct bdb_lfp_backlight_data *backlight_data; @@ -2709,7 +2953,8 @@ void intel_bios_init(struct drm_i915_private *i915) parse_general_features(i915); parse_general_definitions(i915); parse_panel_options(i915); - parse_panel_dtd(i915); + parse_generic_dtd(i915); + parse_lfp_data(i915); parse_lfp_backlight(i915); parse_sdvo_panel_data(i915); parse_driver_features(i915); diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index dd02afaac43f..9e6fa59eabba 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -40,6 +40,7 @@ #include "intel_ddi.h" #include "intel_ddi_buf_trans.h" #include "intel_de.h" +#include "intel_display_power.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_link_training.h" @@ -4364,7 +4365,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->get_power_domains = intel_ddi_get_power_domains; encoder->type = INTEL_OUTPUT_DDI; - encoder->power_domain = intel_port_to_power_domain(port); + encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port); encoder->port = port; encoder->cloneable = 0; encoder->pipe_mask = ~0; @@ -4492,8 +4493,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) } drm_WARN_ON(&dev_priv->drm, port > PORT_I); - dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO + - port - PORT_A; + dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port); if (init_dp) { if (!intel_ddi_init_dp_connector(dig_port)) diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c index 94e64661b4fd..85f58dd3df72 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c +++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c @@ -1673,7 +1673,9 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder) encoder->get_buf_trans = skl_get_buf_trans; } else if (IS_BROADWELL(i915)) { encoder->get_buf_trans = bdw_get_buf_trans; - } else { + } else if (IS_HASWELL(i915)) { encoder->get_buf_trans = hsw_get_buf_trans; + } else { + MISSING_CASE(INTEL_INFO(i915)->platform); } } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 5fab9fb1d2f5..806d50b302ab 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -51,6 +51,7 @@ #include "display/intel_crt.h" #include "display/intel_ddi.h" #include "display/intel_display_debugfs.h" +#include "display/intel_display_power.h" #include "display/intel_dp.h" #include "display/intel_dp_mst.h" #include "display/intel_dpll.h" @@ -2157,153 +2158,82 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) return TC_PORT_1 + port - PORT_C; } -enum intel_display_power_domain intel_port_to_power_domain(enum port port) -{ - switch (port) { - case PORT_A: - return POWER_DOMAIN_PORT_DDI_A_LANES; - case PORT_B: - return POWER_DOMAIN_PORT_DDI_B_LANES; - case PORT_C: - return POWER_DOMAIN_PORT_DDI_C_LANES; - case PORT_D: - return POWER_DOMAIN_PORT_DDI_D_LANES; - case PORT_E: - return POWER_DOMAIN_PORT_DDI_E_LANES; - case PORT_F: - return POWER_DOMAIN_PORT_DDI_F_LANES; - case PORT_G: - return POWER_DOMAIN_PORT_DDI_G_LANES; - case PORT_H: - return POWER_DOMAIN_PORT_DDI_H_LANES; - case PORT_I: - return POWER_DOMAIN_PORT_DDI_I_LANES; - default: - MISSING_CASE(port); - return POWER_DOMAIN_PORT_OTHER; - } -} - enum intel_display_power_domain intel_aux_power_domain(struct intel_digital_port *dig_port) { - if (intel_tc_port_in_tbt_alt_mode(dig_port)) { - switch (dig_port->aux_ch) { - case AUX_CH_C: - return POWER_DOMAIN_AUX_C_TBT; - case AUX_CH_D: - return POWER_DOMAIN_AUX_D_TBT; - case AUX_CH_E: - return POWER_DOMAIN_AUX_E_TBT; - case AUX_CH_F: - return POWER_DOMAIN_AUX_F_TBT; - case AUX_CH_G: - return POWER_DOMAIN_AUX_G_TBT; - case AUX_CH_H: - return POWER_DOMAIN_AUX_H_TBT; - case AUX_CH_I: - return POWER_DOMAIN_AUX_I_TBT; - default: - MISSING_CASE(dig_port->aux_ch); - return POWER_DOMAIN_AUX_C_TBT; - } - } + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - return intel_legacy_aux_to_power_domain(dig_port->aux_ch); -} + if (intel_tc_port_in_tbt_alt_mode(dig_port)) + return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch); -/* - * Converts aux_ch to power_domain without caring about TBT ports for that use - * intel_aux_power_domain() - */ -enum intel_display_power_domain -intel_legacy_aux_to_power_domain(enum aux_ch aux_ch) -{ - switch (aux_ch) { - case AUX_CH_A: - return POWER_DOMAIN_AUX_A; - case AUX_CH_B: - return POWER_DOMAIN_AUX_B; - case AUX_CH_C: - return POWER_DOMAIN_AUX_C; - case AUX_CH_D: - return POWER_DOMAIN_AUX_D; - case AUX_CH_E: - return POWER_DOMAIN_AUX_E; - case AUX_CH_F: - return POWER_DOMAIN_AUX_F; - case AUX_CH_G: - return POWER_DOMAIN_AUX_G; - case AUX_CH_H: - return POWER_DOMAIN_AUX_H; - case AUX_CH_I: - return POWER_DOMAIN_AUX_I; - default: - MISSING_CASE(aux_ch); - return POWER_DOMAIN_AUX_A; - } + return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); } -static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) +static void get_crtc_power_domains(struct intel_crtc_state *crtc_state, + struct intel_power_domain_mask *mask) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; struct drm_encoder *encoder; enum pipe pipe = crtc->pipe; - u64 mask; + + bitmap_zero(mask->bits, POWER_DOMAIN_NUM); if (!crtc_state->hw.active) - return 0; + return; - mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe)); - mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder)); + set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits); + set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits); if (crtc_state->pch_pfit.enabled || crtc_state->pch_pfit.force_thru) - mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); + set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits); drm_for_each_encoder_mask(encoder, &dev_priv->drm, crtc_state->uapi.encoder_mask) { struct intel_encoder *intel_encoder = to_intel_encoder(encoder); - mask |= BIT_ULL(intel_encoder->power_domain); + set_bit(intel_encoder->power_domain, mask->bits); } if (HAS_DDI(dev_priv) && crtc_state->has_audio) - mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO); + set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits); if (crtc_state->shared_dpll) - mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE); + set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits); if (crtc_state->dsc.compression_enable) - mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder)); - - return mask; + set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits); } -static u64 -modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state) +static void +modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state, + struct intel_power_domain_mask *old_domains) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum intel_display_power_domain domain; - u64 domains, new_domains, old_domains; + struct intel_power_domain_mask domains, new_domains; - domains = get_crtc_power_domains(crtc_state); + get_crtc_power_domains(crtc_state, &domains); - new_domains = domains & ~crtc->enabled_power_domains.mask; - old_domains = crtc->enabled_power_domains.mask & ~domains; + bitmap_andnot(new_domains.bits, + domains.bits, + crtc->enabled_power_domains.mask.bits, + POWER_DOMAIN_NUM); + bitmap_andnot(old_domains->bits, + crtc->enabled_power_domains.mask.bits, + domains.bits, + POWER_DOMAIN_NUM); - for_each_power_domain(domain, new_domains) + for_each_power_domain(domain, &new_domains) intel_display_power_get_in_set(dev_priv, &crtc->enabled_power_domains, domain); - - return old_domains; } static void modeset_put_crtc_power_domains(struct intel_crtc *crtc, - u64 domains) + struct intel_power_domain_mask *domains) { intel_display_power_put_mask_in_set(to_i915(crtc->base.dev), &crtc->enabled_power_domains, @@ -4974,9 +4904,12 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, mode_changed && !crtc_state->hw.active) crtc_state->update_wm_post = true; - if (mode_changed && crtc_state->hw.enable && - !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) { - ret = intel_dpll_crtc_compute_clock(crtc_state); + if (mode_changed) { + ret = intel_dpll_crtc_compute_clock(state, crtc); + if (ret) + return ret; + + ret = intel_dpll_crtc_get_shared_dpll(state, crtc); if (ret) return ret; } @@ -6969,8 +6902,9 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct drm_display_mode adjusted_mode = - crtc_state->hw.adjusted_mode; + struct drm_display_mode adjusted_mode; + + drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode); if (crtc_state->vrr.enable) { adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax; @@ -7028,14 +6962,10 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) static void intel_modeset_clear_plls(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *new_crtc_state; struct intel_crtc *crtc; int i; - if (!dev_priv->dpll_funcs) - return; - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (!intel_crtc_needs_modeset(new_crtc_state)) continue; @@ -8505,7 +8435,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc_state *new_crtc_state, *old_crtc_state; struct intel_crtc *crtc; - u64 put_domains[I915_MAX_PIPES] = {}; + struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {}; intel_wakeref_t wakeref = 0; int i; @@ -8522,9 +8452,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) new_crtc_state, i) { if (intel_crtc_needs_modeset(new_crtc_state) || new_crtc_state->update_pipe) { - - put_domains[crtc->pipe] = - modeset_get_crtc_power_domains(new_crtc_state); + modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]); } } @@ -8624,7 +8552,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { intel_post_plane_update(state, crtc); - modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]); + modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]); intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); @@ -9737,7 +9665,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915) } intel_plane_possible_crtcs_init(i915); - intel_shared_dpll_init(dev); + intel_shared_dpll_init(i915); intel_fdi_pll_freq_update(i915); intel_update_czclk(i915); @@ -9844,9 +9772,6 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) PLL_REF_INPUT_DREFCLK | DPLL_VCO_ENABLE; - intel_de_write(dev_priv, FP0(pipe), fp); - intel_de_write(dev_priv, FP1(pipe), fp); - intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); @@ -9855,6 +9780,9 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); + intel_de_write(dev_priv, FP0(pipe), fp); + intel_de_write(dev_priv, FP1(pipe), fp); + /* * Apparently we need to have VGA mode enabled prior to changing * the P1/P2 dividers. Otherwise the DPLL will keep using the old @@ -10465,11 +10393,11 @@ intel_modeset_setup_hw_state(struct drm_device *dev, for_each_intel_crtc(dev, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); - u64 put_domains; + struct intel_power_domain_mask put_domains; - put_domains = modeset_get_crtc_power_domains(crtc_state); - if (drm_WARN_ON(dev, put_domains)) - modeset_put_crtc_power_domains(crtc, put_domains); + modeset_get_crtc_power_domains(crtc_state, &put_domains); + if (drm_WARN_ON(dev, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM))) + modeset_put_crtc_power_domains(crtc, &put_domains); } intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 867fa248f042..187910d94ec6 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -635,11 +635,9 @@ void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, void i9xx_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); -enum intel_display_power_domain intel_port_to_power_domain(enum port port); +enum intel_display_power_domain intel_port_to_power_domain(struct intel_digital_port *dig_port); enum intel_display_power_domain intel_aux_power_domain(struct intel_digital_port *dig_port); -enum intel_display_power_domain -intel_legacy_aux_to_power_domain(enum aux_ch aux_ch); void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 6a5695008f7c..1d9bd5808849 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -9,26 +9,27 @@ #include "i915_irq.h" #include "intel_cdclk.h" #include "intel_combo_phy.h" -#include "intel_combo_phy_regs.h" -#include "intel_crt.h" #include "intel_de.h" #include "intel_display_power.h" +#include "intel_display_power_map.h" #include "intel_display_power_well.h" #include "intel_display_types.h" #include "intel_dmc.h" -#include "intel_dpio_phy.h" -#include "intel_dpll.h" -#include "intel_hotplug.h" #include "intel_mchbar_regs.h" #include "intel_pch_refclk.h" #include "intel_pcode.h" #include "intel_pm.h" -#include "intel_pps.h" #include "intel_snps_phy.h" -#include "intel_tc.h" -#include "intel_vga.h" #include "vlv_sideband.h" +#define for_each_power_domain_well(__dev_priv, __power_well, __domain) \ + for_each_power_well(__dev_priv, __power_well) \ + for_each_if(test_bit((__domain), (__power_well)->domains.bits)) + +#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \ + for_each_power_well_reverse(__dev_priv, __power_well) \ + for_each_if(test_bit((__domain), (__power_well)->domains.bits)) + const char * intel_display_power_domain_str(enum intel_display_power_domain domain) { @@ -43,14 +44,14 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "PIPE_C"; case POWER_DOMAIN_PIPE_D: return "PIPE_D"; - case POWER_DOMAIN_PIPE_A_PANEL_FITTER: - return "PIPE_A_PANEL_FITTER"; - case POWER_DOMAIN_PIPE_B_PANEL_FITTER: - return "PIPE_B_PANEL_FITTER"; - case POWER_DOMAIN_PIPE_C_PANEL_FITTER: - return "PIPE_C_PANEL_FITTER"; - case POWER_DOMAIN_PIPE_D_PANEL_FITTER: - return "PIPE_D_PANEL_FITTER"; + case POWER_DOMAIN_PIPE_PANEL_FITTER_A: + return "PIPE_PANEL_FITTER_A"; + case POWER_DOMAIN_PIPE_PANEL_FITTER_B: + return "PIPE_PANEL_FITTER_B"; + case POWER_DOMAIN_PIPE_PANEL_FITTER_C: + return "PIPE_PANEL_FITTER_C"; + case POWER_DOMAIN_PIPE_PANEL_FITTER_D: + return "PIPE_PANEL_FITTER_D"; case POWER_DOMAIN_TRANSCODER_A: return "TRANSCODER_A"; case POWER_DOMAIN_TRANSCODER_B: @@ -67,42 +68,54 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "TRANSCODER_DSI_C"; case POWER_DOMAIN_TRANSCODER_VDSC_PW2: return "TRANSCODER_VDSC_PW2"; - case POWER_DOMAIN_PORT_DDI_A_LANES: - return "PORT_DDI_A_LANES"; - case POWER_DOMAIN_PORT_DDI_B_LANES: - return "PORT_DDI_B_LANES"; - case POWER_DOMAIN_PORT_DDI_C_LANES: - return "PORT_DDI_C_LANES"; - case POWER_DOMAIN_PORT_DDI_D_LANES: - return "PORT_DDI_D_LANES"; - case POWER_DOMAIN_PORT_DDI_E_LANES: - return "PORT_DDI_E_LANES"; - case POWER_DOMAIN_PORT_DDI_F_LANES: - return "PORT_DDI_F_LANES"; - case POWER_DOMAIN_PORT_DDI_G_LANES: - return "PORT_DDI_G_LANES"; - case POWER_DOMAIN_PORT_DDI_H_LANES: - return "PORT_DDI_H_LANES"; - case POWER_DOMAIN_PORT_DDI_I_LANES: - return "PORT_DDI_I_LANES"; - case POWER_DOMAIN_PORT_DDI_A_IO: - return "PORT_DDI_A_IO"; - case POWER_DOMAIN_PORT_DDI_B_IO: - return "PORT_DDI_B_IO"; - case POWER_DOMAIN_PORT_DDI_C_IO: - return "PORT_DDI_C_IO"; - case POWER_DOMAIN_PORT_DDI_D_IO: - return "PORT_DDI_D_IO"; - case POWER_DOMAIN_PORT_DDI_E_IO: - return "PORT_DDI_E_IO"; - case POWER_DOMAIN_PORT_DDI_F_IO: - return "PORT_DDI_F_IO"; - case POWER_DOMAIN_PORT_DDI_G_IO: - return "PORT_DDI_G_IO"; - case POWER_DOMAIN_PORT_DDI_H_IO: - return "PORT_DDI_H_IO"; - case POWER_DOMAIN_PORT_DDI_I_IO: - return "PORT_DDI_I_IO"; + case POWER_DOMAIN_PORT_DDI_LANES_A: + return "PORT_DDI_LANES_A"; + case POWER_DOMAIN_PORT_DDI_LANES_B: + return "PORT_DDI_LANES_B"; + case POWER_DOMAIN_PORT_DDI_LANES_C: + return "PORT_DDI_LANES_C"; + case POWER_DOMAIN_PORT_DDI_LANES_D: + return "PORT_DDI_LANES_D"; + case POWER_DOMAIN_PORT_DDI_LANES_E: + return "PORT_DDI_LANES_E"; + case POWER_DOMAIN_PORT_DDI_LANES_F: + return "PORT_DDI_LANES_F"; + case POWER_DOMAIN_PORT_DDI_LANES_TC1: + return "PORT_DDI_LANES_TC1"; + case POWER_DOMAIN_PORT_DDI_LANES_TC2: + return "PORT_DDI_LANES_TC2"; + case POWER_DOMAIN_PORT_DDI_LANES_TC3: + return "PORT_DDI_LANES_TC3"; + case POWER_DOMAIN_PORT_DDI_LANES_TC4: + return "PORT_DDI_LANES_TC4"; + case POWER_DOMAIN_PORT_DDI_LANES_TC5: + return "PORT_DDI_LANES_TC5"; + case POWER_DOMAIN_PORT_DDI_LANES_TC6: + return "PORT_DDI_LANES_TC6"; + case POWER_DOMAIN_PORT_DDI_IO_A: + return "PORT_DDI_IO_A"; + case POWER_DOMAIN_PORT_DDI_IO_B: + return "PORT_DDI_IO_B"; + case POWER_DOMAIN_PORT_DDI_IO_C: + return "PORT_DDI_IO_C"; + case POWER_DOMAIN_PORT_DDI_IO_D: + return "PORT_DDI_IO_D"; + case POWER_DOMAIN_PORT_DDI_IO_E: + return "PORT_DDI_IO_E"; + case POWER_DOMAIN_PORT_DDI_IO_F: + return "PORT_DDI_IO_F"; + case POWER_DOMAIN_PORT_DDI_IO_TC1: + return "PORT_DDI_IO_TC1"; + case POWER_DOMAIN_PORT_DDI_IO_TC2: + return "PORT_DDI_IO_TC2"; + case POWER_DOMAIN_PORT_DDI_IO_TC3: + return "PORT_DDI_IO_TC3"; + case POWER_DOMAIN_PORT_DDI_IO_TC4: + return "PORT_DDI_IO_TC4"; + case POWER_DOMAIN_PORT_DDI_IO_TC5: + return "PORT_DDI_IO_TC5"; + case POWER_DOMAIN_PORT_DDI_IO_TC6: + return "PORT_DDI_IO_TC6"; case POWER_DOMAIN_PORT_DSI: return "PORT_DSI"; case POWER_DOMAIN_PORT_CRT: @@ -127,28 +140,32 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "AUX_E"; case POWER_DOMAIN_AUX_F: return "AUX_F"; - case POWER_DOMAIN_AUX_G: - return "AUX_G"; - case POWER_DOMAIN_AUX_H: - return "AUX_H"; - case POWER_DOMAIN_AUX_I: - return "AUX_I"; + case POWER_DOMAIN_AUX_USBC1: + return "AUX_USBC1"; + case POWER_DOMAIN_AUX_USBC2: + return "AUX_USBC2"; + case POWER_DOMAIN_AUX_USBC3: + return "AUX_USBC3"; + case POWER_DOMAIN_AUX_USBC4: + return "AUX_USBC4"; + case POWER_DOMAIN_AUX_USBC5: + return "AUX_USBC5"; + case POWER_DOMAIN_AUX_USBC6: + return "AUX_USBC6"; case POWER_DOMAIN_AUX_IO_A: return "AUX_IO_A"; - case POWER_DOMAIN_AUX_C_TBT: - return "AUX_C_TBT"; - case POWER_DOMAIN_AUX_D_TBT: - return "AUX_D_TBT"; - case POWER_DOMAIN_AUX_E_TBT: - return "AUX_E_TBT"; - case POWER_DOMAIN_AUX_F_TBT: - return "AUX_F_TBT"; - case POWER_DOMAIN_AUX_G_TBT: - return "AUX_G_TBT"; - case POWER_DOMAIN_AUX_H_TBT: - return "AUX_H_TBT"; - case POWER_DOMAIN_AUX_I_TBT: - return "AUX_I_TBT"; + case POWER_DOMAIN_AUX_TBT1: + return "AUX_TBT1"; + case POWER_DOMAIN_AUX_TBT2: + return "AUX_TBT2"; + case POWER_DOMAIN_AUX_TBT3: + return "AUX_TBT3"; + case POWER_DOMAIN_AUX_TBT4: + return "AUX_TBT4"; + case POWER_DOMAIN_AUX_TBT5: + return "AUX_TBT5"; + case POWER_DOMAIN_AUX_TBT6: + return "AUX_TBT6"; case POWER_DOMAIN_GMBUS: return "GMBUS"; case POWER_DOMAIN_INIT: @@ -190,7 +207,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, is_enabled = true; - for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) { + for_each_power_domain_well_reverse(dev_priv, power_well, domain) { if (intel_power_well_is_always_on(power_well)) continue; @@ -235,604 +252,6 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, return ret; } -/* - * Starting with Haswell, we have a "Power Down Well" that can be turned off - * when not needed anymore. We have 4 registers that can request the power well - * to be enabled, and it will only be disabled if none of the registers is - * requesting it to be enabled. - */ -static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, - u8 irq_pipe_mask, bool has_vga) -{ - if (has_vga) - intel_vga_reset_io_mem(dev_priv); - - if (irq_pipe_mask) - gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); -} - -static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, - u8 irq_pipe_mask) -{ - if (irq_pipe_mask) - gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); -} - -#define ICL_AUX_PW_TO_CH(pw_idx) \ - ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) - -#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ - ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) - -static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well) -{ - int pw_idx = power_well->desc->hsw.idx; - - return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : - ICL_AUX_PW_TO_CH(pw_idx); -} - -static struct intel_digital_port * -aux_ch_to_digital_port(struct drm_i915_private *dev_priv, - enum aux_ch aux_ch) -{ - struct intel_digital_port *dig_port = NULL; - struct intel_encoder *encoder; - - for_each_intel_encoder(&dev_priv->drm, encoder) { - /* We'll check the MST primary port */ - if (encoder->type == INTEL_OUTPUT_DP_MST) - continue; - - dig_port = enc_to_dig_port(encoder); - if (!dig_port) - continue; - - if (dig_port->aux_ch != aux_ch) { - dig_port = NULL; - continue; - } - - break; - } - - return dig_port; -} - -static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915, - const struct i915_power_well *power_well) -{ - enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); - struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch); - - return intel_port_to_phy(i915, dig_port->base.port); -} - -static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well, - bool timeout_expected) -{ - const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; - int enable_delay = power_well->desc->hsw.fixed_enable_delay; - - /* - * For some power wells we're not supposed to watch the status bit for - * an ack, but rather just wait a fixed amount of time and then - * proceed. This is only used on DG2. - */ - if (IS_DG2(dev_priv) && enable_delay) { - usleep_range(enable_delay, 2 * enable_delay); - return; - } - - /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ - if (intel_de_wait_for_set(dev_priv, regs->driver, - HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { - drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", - intel_power_well_name(power_well)); - - drm_WARN_ON(&dev_priv->drm, !timeout_expected); - - } -} - -static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, - const struct i915_power_well_regs *regs, - int pw_idx) -{ - u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); - u32 ret; - - ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0; - ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0; - if (regs->kvmr.reg) - ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0; - ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0; - - return ret; -} - -static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; - bool disabled; - u32 reqs; - - /* - * Bspec doesn't require waiting for PWs to get disabled, but still do - * this for paranoia. The known cases where a PW will be forced on: - * - a KVMR request on any power well via the KVMR request register - * - a DMC request on PW1 and MISC_IO power wells via the BIOS and - * DEBUG request registers - * Skip the wait in case any of the request bits are set and print a - * diagnostic message. - */ - wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) & - HSW_PWR_WELL_CTL_STATE(pw_idx))) || - (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); - if (disabled) - return; - - drm_dbg_kms(&dev_priv->drm, - "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", - intel_power_well_name(power_well), - !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); -} - -static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, - enum skl_power_gate pg) -{ - /* Timeout 5us for PG#0, for other PGs 1us */ - drm_WARN_ON(&dev_priv->drm, - intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, - SKL_FUSE_PG_DIST_STATUS(pg), 1)); -} - -static void hsw_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; - u32 val; - - if (power_well->desc->hsw.has_fuses) { - enum skl_power_gate pg; - - pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : - SKL_PW_CTL_IDX_TO_PG(pw_idx); - - /* Wa_16013190616:adlp */ - if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1) - intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC); - - /* - * For PW1 we have to wait both for the PW0/PG0 fuse state - * before enabling the power well and PW1/PG1's own fuse - * state after the enabling. For all other power wells with - * fuses we only have to wait for that PW/PG's fuse state - * after the enabling. - */ - if (pg == SKL_PG1) - gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); - } - - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val | HSW_PWR_WELL_CTL_REQ(pw_idx)); - - hsw_wait_for_power_well_enable(dev_priv, power_well, false); - - if (power_well->desc->hsw.has_fuses) { - enum skl_power_gate pg; - - pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : - SKL_PW_CTL_IDX_TO_PG(pw_idx); - gen9_wait_for_power_well_fuses(dev_priv, pg); - } - - hsw_power_well_post_enable(dev_priv, - power_well->desc->hsw.irq_pipe_mask, - power_well->desc->hsw.has_vga); -} - -static void hsw_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; - u32 val; - - hsw_power_well_pre_disable(dev_priv, - power_well->desc->hsw.irq_pipe_mask); - - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); - hsw_wait_for_power_well_disable(dev_priv, power_well); -} - -static void -icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; - enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); - u32 val; - - drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); - - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val | HSW_PWR_WELL_CTL_REQ(pw_idx)); - - if (DISPLAY_VER(dev_priv) < 12) { - val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); - intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), - val | ICL_LANE_ENABLE_AUX); - } - - hsw_wait_for_power_well_enable(dev_priv, power_well, false); - - /* Display WA #1178: icl */ - if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && - !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { - val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx)); - val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; - intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val); - } -} - -static void -icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; - enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); - u32 val; - - drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); - - val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); - intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), - val & ~ICL_LANE_ENABLE_AUX); - - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); - - hsw_wait_for_power_well_disable(dev_priv, power_well); -} - -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) - -static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well, - struct intel_digital_port *dig_port) -{ - if (drm_WARN_ON(&dev_priv->drm, !dig_port)) - return; - - if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) - return; - - drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port)); -} - -#else - -static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well, - struct intel_digital_port *dig_port) -{ -} - -#endif - -#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) - -static void icl_tc_cold_exit(struct drm_i915_private *i915) -{ - int ret, tries = 0; - - while (1) { - ret = snb_pcode_write_timeout(i915, ICL_PCODE_EXIT_TCCOLD, 0, - 250, 1); - if (ret != -EAGAIN || ++tries == 3) - break; - msleep(1); - } - - /* Spec states that TC cold exit can take up to 1ms to complete */ - if (!ret) - msleep(1); - - /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ - drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : - "succeeded"); -} - -static void -icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); - struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); - const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - bool is_tbt = power_well->desc->hsw.is_tc_tbt; - bool timeout_expected; - u32 val; - - icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); - - val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)); - val &= ~DP_AUX_CH_CTL_TBT_IO; - if (is_tbt) - val |= DP_AUX_CH_CTL_TBT_IO; - intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val); - - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx)); - - /* - * An AUX timeout is expected if the TBT DP tunnel is down, - * or need to enable AUX on a legacy TypeC port as part of the TC-cold - * exit sequence. - */ - timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port); - if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) - icl_tc_cold_exit(dev_priv); - - hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected); - - if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) { - enum tc_port tc_port; - - tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), - HIP_INDEX_VAL(tc_port, 0x2)); - - if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), - DKL_CMN_UC_DW27_UC_HEALTH, 1)) - drm_warn(&dev_priv->drm, - "Timeout waiting TC uC health\n"); - } -} - -static void -icl_aux_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); - - if (intel_phy_is_tc(dev_priv, phy)) - return icl_tc_phy_aux_power_well_enable(dev_priv, power_well); - else if (IS_ICELAKE(dev_priv)) - return icl_combo_phy_aux_power_well_enable(dev_priv, - power_well); - else - return hsw_power_well_enable(dev_priv, power_well); -} - -static void -icl_aux_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); - - if (intel_phy_is_tc(dev_priv, phy)) - return hsw_power_well_disable(dev_priv, power_well); - else if (IS_ICELAKE(dev_priv)) - return icl_combo_phy_aux_power_well_disable(dev_priv, - power_well); - else - return hsw_power_well_disable(dev_priv, power_well); -} - -/* - * We should only use the power well if we explicitly asked the hardware to - * enable it, so check if it's enabled and also check if we've requested it to - * be enabled. - */ -static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - enum i915_power_well_id id = power_well->desc->id; - int pw_idx = power_well->desc->hsw.idx; - u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | - HSW_PWR_WELL_CTL_STATE(pw_idx); - u32 val; - - val = intel_de_read(dev_priv, regs->driver); - - /* - * On GEN9 big core due to a DMC bug the driver's request bits for PW1 - * and the MISC_IO PW will be not restored, so check instead for the - * BIOS's own request bits, which are forced-on for these power wells - * when exiting DC5/6. - */ - if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) && - (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) - val |= intel_de_read(dev_priv, regs->bios); - - return (val & mask) == mask; -} - -static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) -{ - drm_WARN_ONCE(&dev_priv->drm, - (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9), - "DC9 already programmed to be enabled.\n"); - drm_WARN_ONCE(&dev_priv->drm, - intel_de_read(dev_priv, DC_STATE_EN) & - DC_STATE_EN_UPTO_DC5, - "DC5 still not disabled to enable DC9.\n"); - drm_WARN_ONCE(&dev_priv->drm, - intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) & - HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), - "Power well 2 on.\n"); - drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), - "Interrupts not disabled yet.\n"); - - /* - * TODO: check for the following to verify the conditions to enter DC9 - * state are satisfied: - * 1] Check relevant display engine registers to verify if mode set - * disable sequence was followed. - * 2] Check if display uninitialize sequence is initialized. - */ -} - -static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) -{ - drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), - "Interrupts not disabled yet.\n"); - drm_WARN_ONCE(&dev_priv->drm, - intel_de_read(dev_priv, DC_STATE_EN) & - DC_STATE_EN_UPTO_DC5, - "DC5 still not disabled.\n"); - - /* - * TODO: check for the following to verify DC9 state was indeed - * entered before programming to disable it: - * 1] Check relevant display engine registers to verify if mode - * set disable sequence was followed. - * 2] Check if display uninitialize sequence is initialized. - */ -} - -static void gen9_write_dc_state(struct drm_i915_private *dev_priv, - u32 state) -{ - int rewrites = 0; - int rereads = 0; - u32 v; - - intel_de_write(dev_priv, DC_STATE_EN, state); - - /* It has been observed that disabling the dc6 state sometimes - * doesn't stick and dmc keeps returning old value. Make sure - * the write really sticks enough times and also force rewrite until - * we are confident that state is exactly what we want. - */ - do { - v = intel_de_read(dev_priv, DC_STATE_EN); - - if (v != state) { - intel_de_write(dev_priv, DC_STATE_EN, state); - rewrites++; - rereads = 0; - } else if (rereads++ > 5) { - break; - } - - } while (rewrites < 100); - - if (v != state) - drm_err(&dev_priv->drm, - "Writing dc state to 0x%x failed, now 0x%x\n", - state, v); - - /* Most of the times we need one retry, avoid spam */ - if (rewrites > 1) - drm_dbg_kms(&dev_priv->drm, - "Rewrote dc state to 0x%x %d times\n", - state, rewrites); -} - -static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) -{ - u32 mask; - - mask = DC_STATE_EN_UPTO_DC5; - - if (DISPLAY_VER(dev_priv) >= 12) - mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 - | DC_STATE_EN_DC9; - else if (DISPLAY_VER(dev_priv) == 11) - mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; - else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - mask |= DC_STATE_EN_DC9; - else - mask |= DC_STATE_EN_UPTO_DC6; - - return mask; -} - -static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) -{ - u32 val; - - if (!HAS_DISPLAY(dev_priv)) - return; - - val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv); - - drm_dbg_kms(&dev_priv->drm, - "Resetting DC state tracking from %02x to %02x\n", - dev_priv->dmc.dc_state, val); - dev_priv->dmc.dc_state = val; -} - -/** - * gen9_set_dc_state - set target display C power state - * @dev_priv: i915 device instance - * @state: target DC power state - * - DC_STATE_DISABLE - * - DC_STATE_EN_UPTO_DC5 - * - DC_STATE_EN_UPTO_DC6 - * - DC_STATE_EN_DC9 - * - * Signal to DMC firmware/HW the target DC power state passed in @state. - * DMC/HW can turn off individual display clocks and power rails when entering - * a deeper DC power state (higher in number) and turns these back when exiting - * that state to a shallower power state (lower in number). The HW will decide - * when to actually enter a given state on an on-demand basis, for instance - * depending on the active state of display pipes. The state of display - * registers backed by affected power rails are saved/restored as needed. - * - * Based on the above enabling a deeper DC power state is asynchronous wrt. - * enabling it. Disabling a deeper power state is synchronous: for instance - * setting %DC_STATE_DISABLE won't complete until all HW resources are turned - * back on and register state is restored. This is guaranteed by the MMIO write - * to DC_STATE_EN blocking until the state is restored. - */ -static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) -{ - u32 val; - u32 mask; - - if (!HAS_DISPLAY(dev_priv)) - return; - - if (drm_WARN_ON_ONCE(&dev_priv->drm, - state & ~dev_priv->dmc.allowed_dc_mask)) - state &= dev_priv->dmc.allowed_dc_mask; - - val = intel_de_read(dev_priv, DC_STATE_EN); - mask = gen9_dc_mask(dev_priv); - drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n", - val & mask, state); - - /* Check if DMC is ignoring our DC state requests */ - if ((val & mask) != dev_priv->dmc.dc_state) - drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", - dev_priv->dmc.dc_state, val & mask); - - val &= ~mask; - val |= state; - - gen9_write_dc_state(dev_priv, val); - - dev_priv->dmc.dc_state = val & mask; -} - static u32 sanitize_target_dc_state(struct drm_i915_private *dev_priv, u32 target_dc_state) @@ -858,53 +277,6 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv, return target_dc_state; } -static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) -{ - drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n"); - gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); -} - -static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) -{ - u32 val; - - drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); - val = intel_de_read(dev_priv, DC_STATE_EN); - val &= ~DC_STATE_DC3CO_STATUS; - intel_de_write(dev_priv, DC_STATE_EN, val); - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - /* - * Delay of 200us DC3CO Exit time B.Spec 49196 - */ - usleep_range(200, 210); -} - -static void bxt_enable_dc9(struct drm_i915_private *dev_priv) -{ - assert_can_enable_dc9(dev_priv); - - drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); - /* - * Power sequencer reset is not needed on - * platforms with South Display Engine on PCH, - * because PPS registers are always on. - */ - if (!HAS_PCH_SPLIT(dev_priv)) - intel_pps_reset_all(dev_priv); - gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); -} - -static void bxt_disable_dc9(struct drm_i915_private *dev_priv) -{ - assert_can_disable_dc9(dev_priv); - - drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); - - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - - intel_pps_unlock_regs_wa(dev_priv); -} - /** * intel_display_power_set_target_dc_state - Set target dc state. * @dev_priv: i915 device @@ -949,916 +321,15 @@ unlock: mutex_unlock(&power_domains->lock); } -static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) -{ - enum i915_power_well_id high_pg; - - /* Power wells at this level and above must be disabled for DC5 entry */ - if (DISPLAY_VER(dev_priv) == 12) - high_pg = ICL_DISP_PW_3; - else - high_pg = SKL_DISP_PW_2; - - drm_WARN_ONCE(&dev_priv->drm, - intel_display_power_well_is_enabled(dev_priv, high_pg), - "Power wells above platform's DC5 limit still enabled.\n"); - - drm_WARN_ONCE(&dev_priv->drm, - (intel_de_read(dev_priv, DC_STATE_EN) & - DC_STATE_EN_UPTO_DC5), - "DC5 already programmed to be enabled.\n"); - assert_rpm_wakelock_held(&dev_priv->runtime_pm); - - assert_dmc_loaded(dev_priv); -} - -static void gen9_enable_dc5(struct drm_i915_private *dev_priv) -{ - assert_can_enable_dc5(dev_priv); - - drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n"); - - /* Wa Display #1183: skl,kbl,cfl */ - if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) - intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, - intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); - - gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); -} - -static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) -{ - drm_WARN_ONCE(&dev_priv->drm, - intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, - "Backlight is not disabled.\n"); - drm_WARN_ONCE(&dev_priv->drm, - (intel_de_read(dev_priv, DC_STATE_EN) & - DC_STATE_EN_UPTO_DC6), - "DC6 already programmed to be enabled.\n"); - - assert_dmc_loaded(dev_priv); -} - -static void skl_enable_dc6(struct drm_i915_private *dev_priv) -{ - assert_can_enable_dc6(dev_priv); - - drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n"); - - /* Wa Display #1183: skl,kbl,cfl */ - if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) - intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, - intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); - - gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); -} - -static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->ops->regs; - int pw_idx = power_well->desc->hsw.idx; - u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); - u32 bios_req = intel_de_read(dev_priv, regs->bios); - - /* Take over the request bit if set by BIOS. */ - if (bios_req & mask) { - u32 drv_req = intel_de_read(dev_priv, regs->driver); - - if (!(drv_req & mask)) - intel_de_write(dev_priv, regs->driver, drv_req | mask); - intel_de_write(dev_priv, regs->bios, bios_req & ~mask); - } -} - -static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); -} - -static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); -} - -static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); -} - -static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) -{ - struct i915_power_well *power_well; - - power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); - if (intel_power_well_refcount(power_well) > 0) - bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); - - power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); - if (intel_power_well_refcount(power_well) > 0) - bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); - - if (IS_GEMINILAKE(dev_priv)) { - power_well = lookup_power_well(dev_priv, - GLK_DISP_PW_DPIO_CMN_C); - if (intel_power_well_refcount(power_well) > 0) - bxt_ddi_phy_verify_state(dev_priv, - power_well->desc->bxt.phy); - } -} - -static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && - (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); -} - -static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) -{ - u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); - u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices; - - drm_WARN(&dev_priv->drm, - hw_enabled_dbuf_slices != enabled_dbuf_slices, - "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n", - hw_enabled_dbuf_slices, - enabled_dbuf_slices); -} - -static void gen9_disable_dc_states(struct drm_i915_private *dev_priv) -{ - struct intel_cdclk_config cdclk_config = {}; - - if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) { - tgl_disable_dc3co(dev_priv); - return; - } - - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - - if (!HAS_DISPLAY(dev_priv)) - return; - - intel_cdclk_get_cdclk(dev_priv, &cdclk_config); - /* Can't read out voltage_level so can't use intel_cdclk_changed() */ - drm_WARN_ON(&dev_priv->drm, - intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, - &cdclk_config)); - - gen9_assert_dbuf_enabled(dev_priv); - - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - bxt_verify_ddi_phy_power_wells(dev_priv); - - if (DISPLAY_VER(dev_priv) >= 11) - /* - * DMC retains HW context only for port A, the other combo - * PHY's HW context for port B is lost after DC transitions, - * so we need to restore it manually. - */ - intel_combo_phy_init(dev_priv); -} - -static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - gen9_disable_dc_states(dev_priv); -} - -static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - if (!intel_dmc_has_payload(dev_priv)) - return; - - switch (dev_priv->dmc.target_dc_state) { - case DC_STATE_EN_DC3CO: - tgl_enable_dc3co(dev_priv); - break; - case DC_STATE_EN_UPTO_DC6: - skl_enable_dc6(dev_priv); - break; - case DC_STATE_EN_UPTO_DC5: - gen9_enable_dc5(dev_priv); - break; - } -} - -static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ -} - -static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ -} - -static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - return true; -} - -static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) - i830_enable_pipe(dev_priv, PIPE_A); - if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) - i830_enable_pipe(dev_priv, PIPE_B); -} - -static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - i830_disable_pipe(dev_priv, PIPE_B); - i830_disable_pipe(dev_priv, PIPE_A); -} - -static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE && - intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; -} - -static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - if (intel_power_well_refcount(power_well) > 0) - i830_pipes_power_well_enable(dev_priv, power_well); - else - i830_pipes_power_well_disable(dev_priv, power_well); -} - -static void vlv_set_power_well(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well, bool enable) -{ - int pw_idx = power_well->desc->vlv.idx; - u32 mask; - u32 state; - u32 ctrl; - - mask = PUNIT_PWRGT_MASK(pw_idx); - state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : - PUNIT_PWRGT_PWR_GATE(pw_idx); - - vlv_punit_get(dev_priv); - -#define COND \ - ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) - - if (COND) - goto out; - - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); - ctrl &= ~mask; - ctrl |= state; - vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); - - if (wait_for(COND, 100)) - drm_err(&dev_priv->drm, - "timeout setting power well state %08x (%08x)\n", - state, - vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); - -#undef COND - -out: - vlv_punit_put(dev_priv); -} - -static void vlv_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - vlv_set_power_well(dev_priv, power_well, true); -} - -static void vlv_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - vlv_set_power_well(dev_priv, power_well, false); -} - -static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - int pw_idx = power_well->desc->vlv.idx; - bool enabled = false; - u32 mask; - u32 state; - u32 ctrl; - - mask = PUNIT_PWRGT_MASK(pw_idx); - ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); - - vlv_punit_get(dev_priv); - - state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; - /* - * We only ever set the power-on and power-gate states, anything - * else is unexpected. - */ - drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) && - state != PUNIT_PWRGT_PWR_GATE(pw_idx)); - if (state == ctrl) - enabled = true; - - /* - * A transient state at this point would mean some unexpected party - * is poking at the power controls too. - */ - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; - drm_WARN_ON(&dev_priv->drm, ctrl != state); - - vlv_punit_put(dev_priv); - - return enabled; -} - -static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) -{ - u32 val; - - /* - * On driver load, a pipe may be active and driving a DSI display. - * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck - * (and never recovering) in this case. intel_dsi_post_disable() will - * clear it when we turn off the display. - */ - val = intel_de_read(dev_priv, DSPCLK_GATE_D); - val &= DPOUNIT_CLOCK_GATE_DISABLE; - val |= VRHUNIT_CLOCK_GATE_DISABLE; - intel_de_write(dev_priv, DSPCLK_GATE_D, val); - - /* - * Disable trickle feed and enable pnd deadline calculation - */ - intel_de_write(dev_priv, MI_ARB_VLV, - MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); - intel_de_write(dev_priv, CBR1_VLV, 0); - - drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0); - intel_de_write(dev_priv, RAWCLK_FREQ_VLV, - DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, - 1000)); -} - -static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) -{ - struct intel_encoder *encoder; - enum pipe pipe; - - /* - * Enable the CRI clock source so we can get at the - * display and the reference clock for VGA - * hotplug / manual detection. Supposedly DSI also - * needs the ref clock up and running. - * - * CHV DPLL B/C have some issues if VGA mode is enabled. - */ - for_each_pipe(dev_priv, pipe) { - u32 val = intel_de_read(dev_priv, DPLL(pipe)); - - val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; - if (pipe != PIPE_A) - val |= DPLL_INTEGRATED_CRI_CLK_VLV; - - intel_de_write(dev_priv, DPLL(pipe), val); - } - - vlv_init_display_clock_gating(dev_priv); - - spin_lock_irq(&dev_priv->irq_lock); - valleyview_enable_display_irqs(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); - - /* - * During driver initialization/resume we can avoid restoring the - * part of the HW/SW state that will be inited anyway explicitly. - */ - if (dev_priv->power_domains.initializing) - return; - - intel_hpd_init(dev_priv); - intel_hpd_poll_disable(dev_priv); - - /* Re-enable the ADPA, if we have one */ - for_each_intel_encoder(&dev_priv->drm, encoder) { - if (encoder->type == INTEL_OUTPUT_ANALOG) - intel_crt_reset(&encoder->base); - } - - intel_vga_redisable_power_on(dev_priv); - - intel_pps_unlock_regs_wa(dev_priv); -} - -static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) -{ - spin_lock_irq(&dev_priv->irq_lock); - valleyview_disable_display_irqs(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); - - /* make sure we're done processing display irqs */ - intel_synchronize_irq(dev_priv); - - intel_pps_reset_all(dev_priv); - - /* Prevent us from re-enabling polling on accident in late suspend */ - if (!dev_priv->drm.dev->power.is_suspended) - intel_hpd_poll_enable(dev_priv); -} - -static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - vlv_set_power_well(dev_priv, power_well, true); - - vlv_display_power_well_init(dev_priv); -} - -static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - vlv_display_power_well_deinit(dev_priv); - - vlv_set_power_well(dev_priv, power_well, false); -} - -static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - /* since ref/cri clock was enabled */ - udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ - - vlv_set_power_well(dev_priv, power_well, true); - - /* - * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - - * 6. De-assert cmn_reset/side_reset. Same as VLV X0. - * a. GUnit 0x2110 bit[0] set to 1 (def 0) - * b. The other bits such as sfr settings / modesel may all - * be set to 0. - * - * This should only be done on init and resume from S3 with - * both PLLs disabled, or we risk losing DPIO and PLL - * synchronization. - */ - intel_de_write(dev_priv, DPIO_CTL, - intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST); -} - -static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum pipe pipe; - - for_each_pipe(dev_priv, pipe) - assert_pll_disabled(dev_priv, pipe); - - /* Assert common reset */ - intel_de_write(dev_priv, DPIO_CTL, - intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST); - - vlv_set_power_well(dev_priv, power_well, false); -} - #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) -#define BITS_SET(val, bits) (((val) & (bits)) == (bits)) - -static void assert_chv_phy_status(struct drm_i915_private *dev_priv) -{ - struct i915_power_well *cmn_bc = - lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); - struct i915_power_well *cmn_d = - lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); - u32 phy_control = dev_priv->chv_phy_control; - u32 phy_status = 0; - u32 phy_status_mask = 0xffffffff; - - /* - * The BIOS can leave the PHY is some weird state - * where it doesn't fully power down some parts. - * Disable the asserts until the PHY has been fully - * reset (ie. the power well has been disabled at - * least once). - */ - if (!dev_priv->chv_phy_assert[DPIO_PHY0]) - phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | - PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | - PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | - PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | - PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | - PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); - - if (!dev_priv->chv_phy_assert[DPIO_PHY1]) - phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | - PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | - PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); - - if (intel_power_well_is_enabled(dev_priv, cmn_bc)) { - phy_status |= PHY_POWERGOOD(DPIO_PHY0); - - /* this assumes override is only used to enable lanes */ - if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) - phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); - - if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) - phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); - - /* CL1 is on whenever anything is on in either channel */ - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | - PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) - phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); - - /* - * The DPLLB check accounts for the pipe B + port A usage - * with CL2 powered up but all the lanes in the second channel - * powered down. - */ - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && - (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) - phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); - - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) - phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) - phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); - - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) - phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) - phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); - } - - if (intel_power_well_is_enabled(dev_priv, cmn_d)) { - phy_status |= PHY_POWERGOOD(DPIO_PHY1); - - /* this assumes override is only used to enable lanes */ - if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) - phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); - - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) - phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); - - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) - phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) - phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); - } - - phy_status &= phy_status_mask; - - /* - * The PHY may be busy with some initial calibration and whatnot, - * so the power state can take a while to actually change. - */ - if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, - phy_status_mask, phy_status, 10)) - drm_err(&dev_priv->drm, - "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", - intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, - phy_status, dev_priv->chv_phy_control); -} - -#undef BITS_SET - -static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum dpio_phy phy; - enum pipe pipe; - u32 tmp; - - drm_WARN_ON_ONCE(&dev_priv->drm, - power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && - power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); - - if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { - pipe = PIPE_A; - phy = DPIO_PHY0; - } else { - pipe = PIPE_C; - phy = DPIO_PHY1; - } - - /* since ref/cri clock was enabled */ - udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ - vlv_set_power_well(dev_priv, power_well, true); - - /* Poll for phypwrgood signal */ - if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, - PHY_POWERGOOD(phy), 1)) - drm_err(&dev_priv->drm, "Display PHY %d is not power up\n", - phy); - - vlv_dpio_get(dev_priv); - - /* Enable dynamic power down */ - tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); - tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | - DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); - - if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { - tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); - tmp |= DPIO_DYNPWRDOWNEN_CH1; - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); - } else { - /* - * Force the non-existing CL2 off. BXT does this - * too, so maybe it saves some power even though - * CL2 doesn't exist? - */ - tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); - tmp |= DPIO_CL2_LDOFUSE_PWRENB; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); - } - - vlv_dpio_put(dev_priv); - - dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); - intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); - - drm_dbg_kms(&dev_priv->drm, - "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", - phy, dev_priv->chv_phy_control); - - assert_chv_phy_status(dev_priv); -} - -static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum dpio_phy phy; - - drm_WARN_ON_ONCE(&dev_priv->drm, - power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && - power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); - - if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { - phy = DPIO_PHY0; - assert_pll_disabled(dev_priv, PIPE_A); - assert_pll_disabled(dev_priv, PIPE_B); - } else { - phy = DPIO_PHY1; - assert_pll_disabled(dev_priv, PIPE_C); - } - - dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); - intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); - - vlv_set_power_well(dev_priv, power_well, false); - - drm_dbg_kms(&dev_priv->drm, - "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", - phy, dev_priv->chv_phy_control); - - /* PHY is fully reset now, so we can enable the PHY state asserts */ - dev_priv->chv_phy_assert[phy] = true; - - assert_chv_phy_status(dev_priv); -} - -static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, - enum dpio_channel ch, bool override, unsigned int mask) -{ - enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; - u32 reg, val, expected, actual; - - /* - * The BIOS can leave the PHY is some weird state - * where it doesn't fully power down some parts. - * Disable the asserts until the PHY has been fully - * reset (ie. the power well has been disabled at - * least once). - */ - if (!dev_priv->chv_phy_assert[phy]) - return; - - if (ch == DPIO_CH0) - reg = _CHV_CMN_DW0_CH0; - else - reg = _CHV_CMN_DW6_CH1; - - vlv_dpio_get(dev_priv); - val = vlv_dpio_read(dev_priv, pipe, reg); - vlv_dpio_put(dev_priv); - - /* - * This assumes !override is only used when the port is disabled. - * All lanes should power down even without the override when - * the port is disabled. - */ - if (!override || mask == 0xf) { - expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; - /* - * If CH1 common lane is not active anymore - * (eg. for pipe B DPLL) the entire channel will - * shut down, which causes the common lane registers - * to read as 0. That means we can't actually check - * the lane power down status bits, but as the entire - * register reads as 0 it's a good indication that the - * channel is indeed entirely powered down. - */ - if (ch == DPIO_CH1 && val == 0) - expected = 0; - } else if (mask != 0x0) { - expected = DPIO_ANYDL_POWERDOWN; - } else { - expected = 0; - } - - if (ch == DPIO_CH0) - actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; - else - actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; - actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; - - drm_WARN(&dev_priv->drm, actual != expected, - "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", - !!(actual & DPIO_ALLDL_POWERDOWN), - !!(actual & DPIO_ANYDL_POWERDOWN), - !!(expected & DPIO_ALLDL_POWERDOWN), - !!(expected & DPIO_ANYDL_POWERDOWN), - reg, val); -} - -bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, - enum dpio_channel ch, bool override) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - bool was_override; - - mutex_lock(&power_domains->lock); - - was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - - if (override == was_override) - goto out; - - if (override) - dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - else - dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - - intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); - - drm_dbg_kms(&dev_priv->drm, - "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", - phy, ch, dev_priv->chv_phy_control); - - assert_chv_phy_status(dev_priv); - -out: - mutex_unlock(&power_domains->lock); - - return was_override; -} - -void chv_phy_powergate_lanes(struct intel_encoder *encoder, - bool override, unsigned int mask) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_power_domains *power_domains = &dev_priv->power_domains; - enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); - enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); - - mutex_lock(&power_domains->lock); - - dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); - dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); - - if (override) - dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - else - dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - - intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); - - drm_dbg_kms(&dev_priv->drm, - "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", - phy, ch, mask, dev_priv->chv_phy_control); - - assert_chv_phy_status(dev_priv); - - assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); - - mutex_unlock(&power_domains->lock); -} - -static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum pipe pipe = PIPE_A; - bool enabled; - u32 state, ctrl; - - vlv_punit_get(dev_priv); - - state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); - /* - * We only ever set the power-on and power-gate states, anything - * else is unexpected. - */ - drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) && - state != DP_SSS_PWR_GATE(pipe)); - enabled = state == DP_SSS_PWR_ON(pipe); - - /* - * A transient state at this point would mean some unexpected party - * is poking at the power controls too. - */ - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); - drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state); - - vlv_punit_put(dev_priv); - - return enabled; -} - -static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well, - bool enable) -{ - enum pipe pipe = PIPE_A; - u32 state; - u32 ctrl; - - state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); - - vlv_punit_get(dev_priv); - -#define COND \ - ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) - - if (COND) - goto out; - - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); - ctrl &= ~DP_SSC_MASK(pipe); - ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); - vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); - - if (wait_for(COND, 100)) - drm_err(&dev_priv->drm, - "timeout setting power well state %08x (%08x)\n", - state, - vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); - -#undef COND - -out: - vlv_punit_put(dev_priv); -} - -static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); -} - -static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - chv_set_pipe_power_well(dev_priv, power_well, true); - - vlv_display_power_well_init(dev_priv); -} - -static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) +static void __async_put_domains_mask(struct i915_power_domains *power_domains, + struct intel_power_domain_mask *mask) { - vlv_display_power_well_deinit(dev_priv); - - chv_set_pipe_power_well(dev_priv, power_well, false); -} - -static u64 __async_put_domains_mask(struct i915_power_domains *power_domains) -{ - return power_domains->async_put_domains[0] | - power_domains->async_put_domains[1]; + bitmap_or(mask->bits, + power_domains->async_put_domains[0].bits, + power_domains->async_put_domains[1].bits, + POWER_DOMAIN_NUM); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) @@ -1869,8 +340,11 @@ assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, power_domains); - return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] & - power_domains->async_put_domains[1]); + + return !drm_WARN_ON(&i915->drm, + bitmap_intersects(power_domains->async_put_domains[0].bits, + power_domains->async_put_domains[1].bits, + POWER_DOMAIN_NUM)); } static bool @@ -1879,14 +353,17 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains) struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, power_domains); + struct intel_power_domain_mask async_put_mask; enum intel_display_power_domain domain; bool err = false; err |= !assert_async_put_domain_masks_disjoint(power_domains); - err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref != - !!__async_put_domains_mask(power_domains)); + __async_put_domains_mask(power_domains, &async_put_mask); + err |= drm_WARN_ON(&i915->drm, + !!power_domains->async_put_wakeref != + !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM)); - for_each_power_domain(domain, __async_put_domains_mask(power_domains)) + for_each_power_domain(domain, &async_put_mask) err |= drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); @@ -1894,14 +371,14 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains) } static void print_power_domains(struct i915_power_domains *power_domains, - const char *prefix, u64 mask) + const char *prefix, struct intel_power_domain_mask *mask) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, power_domains); enum intel_display_power_domain domain; - drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask)); + drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM)); for_each_power_domain(domain, mask) drm_dbg(&i915->drm, "%s use_count %d\n", intel_display_power_domain_str(domain), @@ -1919,9 +396,9 @@ print_async_put_domains_state(struct i915_power_domains *power_domains) power_domains->async_put_wakeref); print_power_domains(power_domains, "async_put_domains[0]", - power_domains->async_put_domains[0]); + &power_domains->async_put_domains[0]); print_power_domains(power_domains, "async_put_domains[1]", - power_domains->async_put_domains[1]); + &power_domains->async_put_domains[1]); } static void @@ -1945,11 +422,13 @@ verify_async_put_domains_state(struct i915_power_domains *power_domains) #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ -static u64 async_put_domains_mask(struct i915_power_domains *power_domains) +static void async_put_domains_mask(struct i915_power_domains *power_domains, + struct intel_power_domain_mask *mask) + { assert_async_put_domain_masks_disjoint(power_domains); - return __async_put_domains_mask(power_domains); + __async_put_domains_mask(power_domains, mask); } static void @@ -1958,8 +437,8 @@ async_put_domains_clear_domain(struct i915_power_domains *power_domains, { assert_async_put_domain_masks_disjoint(power_domains); - power_domains->async_put_domains[0] &= ~BIT_ULL(domain); - power_domains->async_put_domains[1] &= ~BIT_ULL(domain); + clear_bit(domain, power_domains->async_put_domains[0].bits); + clear_bit(domain, power_domains->async_put_domains[1].bits); } static bool @@ -1967,16 +446,19 @@ intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct intel_power_domain_mask async_put_mask; bool ret = false; - if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain))) + async_put_domains_mask(power_domains, &async_put_mask); + if (!test_bit(domain, async_put_mask.bits)) goto out_verify; async_put_domains_clear_domain(power_domains, domain); ret = true; - if (async_put_domains_mask(power_domains)) + async_put_domains_mask(power_domains, &async_put_mask); + if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM)) goto out_verify; cancel_delayed_work(&power_domains->async_put_work); @@ -1998,7 +480,7 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv, if (intel_display_power_grab_async_put_ref(dev_priv, domain)) return; - for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) + for_each_power_domain_well(dev_priv, power_well, domain) intel_power_well_get(dev_priv, power_well); power_domains->domain_use_count[domain]++; @@ -2079,20 +561,22 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv, struct i915_power_domains *power_domains; struct i915_power_well *power_well; const char *name = intel_display_power_domain_str(domain); + struct intel_power_domain_mask async_put_mask; power_domains = &dev_priv->power_domains; drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], "Use count on domain %s is already zero\n", name); + async_put_domains_mask(power_domains, &async_put_mask); drm_WARN(&dev_priv->drm, - async_put_domains_mask(power_domains) & BIT_ULL(domain), + test_bit(domain, async_put_mask.bits), "Async disabling of domain %s is pending\n", name); power_domains->domain_use_count[domain]--; - for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) + for_each_power_domain_well_reverse(dev_priv, power_well, domain) intel_power_well_put(dev_priv, power_well); } @@ -2121,7 +605,8 @@ queue_async_put_domains_work(struct i915_power_domains *power_domains, } static void -release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) +release_async_put_domains(struct i915_power_domains *power_domains, + struct intel_power_domain_mask *mask) { struct drm_i915_private *dev_priv = container_of(power_domains, struct drm_i915_private, @@ -2169,12 +654,15 @@ intel_display_power_put_async_work(struct work_struct *work) goto out_verify; release_async_put_domains(power_domains, - power_domains->async_put_domains[0]); + &power_domains->async_put_domains[0]); /* Requeue the work if more domains were async put meanwhile. */ - if (power_domains->async_put_domains[1]) { - power_domains->async_put_domains[0] = - fetch_and_zero(&power_domains->async_put_domains[1]); + if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) { + bitmap_copy(power_domains->async_put_domains[0].bits, + power_domains->async_put_domains[1].bits, + POWER_DOMAIN_NUM); + bitmap_zero(power_domains->async_put_domains[1].bits, + POWER_DOMAIN_NUM); queue_async_put_domains_work(power_domains, fetch_and_zero(&new_work_wakeref)); } else { @@ -2226,9 +714,9 @@ void __intel_display_power_put_async(struct drm_i915_private *i915, /* Let a pending work requeue itself or queue a new one. */ if (power_domains->async_put_wakeref) { - power_domains->async_put_domains[1] |= BIT_ULL(domain); + set_bit(domain, power_domains->async_put_domains[1].bits); } else { - power_domains->async_put_domains[0] |= BIT_ULL(domain); + set_bit(domain, power_domains->async_put_domains[0].bits); queue_async_put_domains_work(power_domains, fetch_and_zero(&work_wakeref)); } @@ -2259,6 +747,7 @@ out_verify: void intel_display_power_flush_work(struct drm_i915_private *i915) { struct i915_power_domains *power_domains = &i915->power_domains; + struct intel_power_domain_mask async_put_mask; intel_wakeref_t work_wakeref; mutex_lock(&power_domains->lock); @@ -2267,8 +756,8 @@ void intel_display_power_flush_work(struct drm_i915_private *i915) if (!work_wakeref) goto out_verify; - release_async_put_domains(power_domains, - async_put_domains_mask(power_domains)); + async_put_domains_mask(power_domains, &async_put_mask); + release_async_put_domains(power_domains, &async_put_mask); cancel_delayed_work(&power_domains->async_put_work); out_verify: @@ -2347,13 +836,13 @@ intel_display_power_get_in_set(struct drm_i915_private *i915, { intel_wakeref_t __maybe_unused wf; - drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); + drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits)); wf = intel_display_power_get(i915, domain); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) power_domain_set->wakerefs[domain] = wf; #endif - power_domain_set->mask |= BIT_ULL(domain); + set_bit(domain, power_domain_set->mask.bits); } bool @@ -2363,7 +852,7 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, { intel_wakeref_t wf; - drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); + drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits)); wf = intel_display_power_get_if_enabled(i915, domain); if (!wf) @@ -2372,7 +861,7 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) power_domain_set->wakerefs[domain] = wf; #endif - power_domain_set->mask |= BIT_ULL(domain); + set_bit(domain, power_domain_set->mask.bits); return true; } @@ -2380,11 +869,12 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, void intel_display_power_put_mask_in_set(struct drm_i915_private *i915, struct intel_display_power_domain_set *power_domain_set, - u64 mask) + struct intel_power_domain_mask *mask) { enum intel_display_power_domain domain; - drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask); + drm_WARN_ON(&i915->drm, + !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM)); for_each_power_domain(domain, mask) { intel_wakeref_t __maybe_unused wf = -1; @@ -2393,2363 +883,10 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915, wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); #endif intel_display_power_put(i915, domain, wf); - power_domain_set->mask &= ~BIT_ULL(domain); + clear_bit(domain, power_domain_set->mask.bits); } } -#define I830_PIPES_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DISPLAY_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ - BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_GMBUS) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define CHV_DISPLAY_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_GMBUS) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define HSW_DISPLAY_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define BDW_DISPLAY_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_GMBUS) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) -#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) -#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) -#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_GMBUS) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -/* - * ICL PW_0/PG_0 domains (HW/DMC control): - * - PCI - * - clocks except port PLL - * - central power except FBC - * - shared functions except pipe interrupts, pipe MBUS, DBUF registers - * ICL PW_1/PG_1 domains (HW/DMC control): - * - DBUF function - * - PIPE_A and its planes, except VGA - * - transcoder EDP + PSR - * - transcoder DSI - * - DDI_A - * - FBC - */ -#define ICL_PW_4_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - /* VDSC/joining */ -#define ICL_PW_3_POWER_DOMAINS ( \ - ICL_PW_4_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_AUX_E) | \ - BIT_ULL(POWER_DOMAIN_AUX_F) | \ - BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ - BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ - BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ - BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - /* - * - transcoder WD - * - KVMR (HW control) - */ -#define ICL_PW_2_POWER_DOMAINS ( \ - ICL_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - /* - * - KVMR (HW control) - */ -#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - ICL_PW_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_DC_OFF) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define ICL_DDI_IO_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) -#define ICL_DDI_IO_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) -#define ICL_DDI_IO_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) -#define ICL_DDI_IO_D_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) -#define ICL_DDI_IO_E_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) -#define ICL_DDI_IO_F_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) - -#define ICL_AUX_A_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_A)) -#define ICL_AUX_B_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_B)) -#define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_C)) -#define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_D)) -#define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_E)) -#define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_F)) -#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_C_TBT)) -#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) -#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) -#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) - -#define TGL_PW_5_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_D) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ - BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define TGL_PW_4_POWER_DOMAINS ( \ - TGL_PW_5_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define TGL_PW_3_POWER_DOMAINS ( \ - TGL_PW_4_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define TGL_PW_2_POWER_DOMAINS ( \ - TGL_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - TGL_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define TGL_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) -#define TGL_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) -#define TGL_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) -#define TGL_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) -#define TGL_DDI_IO_TC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5) -#define TGL_DDI_IO_TC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6) - -#define TGL_AUX_A_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_A)) -#define TGL_AUX_B_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_B)) -#define TGL_AUX_C_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_C)) - -#define TGL_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) -#define TGL_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) -#define TGL_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) -#define TGL_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) -#define TGL_AUX_IO_USBC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC5) -#define TGL_AUX_IO_USBC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC6) - -#define TGL_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) -#define TGL_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) -#define TGL_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) -#define TGL_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) -#define TGL_AUX_IO_TBT5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT5) -#define TGL_AUX_IO_TBT6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT6) - -#define TGL_TC_COLD_OFF_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ - BIT_ULL(POWER_DOMAIN_TC_COLD_OFF)) - -#define RKL_PW_4_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define RKL_PW_3_POWER_DOMAINS ( \ - RKL_PW_4_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -/* - * There is no PW_2/PG_2 on RKL. - * - * RKL PW_1/PG_1 domains (under HW/DMC control): - * - DBUF function (note: registers are in PW0) - * - PIPE_A and its planes and VDSC/joining, except VGA - * - transcoder A - * - DDI_A and DDI_B - * - FBC - * - * RKL PW_0/PG_0 domains (under HW/DMC control): - * - PCI - * - clocks except port PLL - * - shared functions: - * * interrupts except pipe interrupts - * * MBus except PIPE_MBUS_DBOX_CTL - * * DBUF registers - * - central power except FBC - * - top-level GTC (DDI-level GTC is in the well associated with the DDI) - */ - -#define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - RKL_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -/* - * DG1 onwards Audio MMIO/VERBS lies in PG0 power well. - */ -#define DG1_PW_3_POWER_DOMAINS ( \ - TGL_PW_4_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define DG1_PW_2_POWER_DOMAINS ( \ - DG1_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define DG1_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - DG1_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -/* - * XE_LPD Power Domains - * - * Previous platforms required that PG(n-1) be enabled before PG(n). That - * dependency chain turns into a dependency tree on XE_LPD: - * - * PG0 - * | - * --PG1-- - * / \ - * PGA --PG2-- - * / | \ - * PGB PGC PGD - * - * Power wells must be enabled from top to bottom and disabled from bottom - * to top. This allows pipes to be power gated independently. - */ - -#define XELPD_PW_D_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_D) | \ - BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define XELPD_PW_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define XELPD_PW_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define XELPD_PW_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define XELPD_PW_2_POWER_DOMAINS ( \ - XELPD_PW_B_POWER_DOMAINS | \ - XELPD_PW_C_POWER_DOMAINS | \ - XELPD_PW_D_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) | \ - BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ - BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -/* - * XELPD PW_1/PG_1 domains (under HW/DMC control): - * - DBUF function (registers are in PW0) - * - Transcoder A - * - DDI_A and DDI_B - * - * XELPD PW_0/PW_1 domains (under HW/DMC control): - * - PCI - * - Clocks except port PLL - * - Shared functions: - * * interrupts except pipe interrupts - * * MBus except PIPE_MBUS_DBOX_CTL - * * DBUF registers - * - Central power except FBC - * - Top-level GTC (DDI-level GTC is in the well associated with the DDI) - */ - -#define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - XELPD_PW_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) -#define XELPD_AUX_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) -#define XELPD_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) -#define XELPD_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) -#define XELPD_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) -#define XELPD_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) - -#define XELPD_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) -#define XELPD_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) -#define XELPD_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) -#define XELPD_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) - -#define XELPD_DDI_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D_XELPD) -#define XELPD_DDI_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E_XELPD) -#define XELPD_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) -#define XELPD_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) -#define XELPD_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) -#define XELPD_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) - -static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = i9xx_always_on_power_well_noop, - .disable = i9xx_always_on_power_well_noop, - .is_enabled = i9xx_always_on_power_well_enabled, -}; - -static const struct i915_power_well_ops chv_pipe_power_well_ops = { - .sync_hw = chv_pipe_power_well_sync_hw, - .enable = chv_pipe_power_well_enable, - .disable = chv_pipe_power_well_disable, - .is_enabled = chv_pipe_power_well_enabled, -}; - -static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = chv_dpio_cmn_power_well_enable, - .disable = chv_dpio_cmn_power_well_disable, - .is_enabled = vlv_power_well_enabled, -}; - -static const struct i915_power_well_desc i9xx_always_on_power_well[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, -}; - -static const struct i915_power_well_ops i830_pipes_power_well_ops = { - .sync_hw = i830_pipes_power_well_sync_hw, - .enable = i830_pipes_power_well_enable, - .disable = i830_pipes_power_well_disable, - .is_enabled = i830_pipes_power_well_enabled, -}; - -static const struct i915_power_well_desc i830_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "pipes", - .domains = I830_PIPES_POWER_DOMAINS, - .ops = &i830_pipes_power_well_ops, - .id = DISP_PW_ID_NONE, - }, -}; - -static const struct i915_power_well_regs hsw_power_well_regs = { - .bios = HSW_PWR_WELL_CTL1, - .driver = HSW_PWR_WELL_CTL2, - .kvmr = HSW_PWR_WELL_CTL3, - .debug = HSW_PWR_WELL_CTL4, -}; - -static const struct i915_power_well_ops hsw_power_well_ops = { - .regs = &hsw_power_well_regs, - .sync_hw = hsw_power_well_sync_hw, - .enable = hsw_power_well_enable, - .disable = hsw_power_well_disable, - .is_enabled = hsw_power_well_enabled, -}; - -static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = gen9_dc_off_power_well_enable, - .disable = gen9_dc_off_power_well_disable, - .is_enabled = gen9_dc_off_power_well_enabled, -}; - -static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = bxt_dpio_cmn_power_well_enable, - .disable = bxt_dpio_cmn_power_well_disable, - .is_enabled = bxt_dpio_cmn_power_well_enabled, -}; - -static const struct i915_power_well_desc hsw_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "display", - .domains = HSW_DISPLAY_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = HSW_DISP_PW_GLOBAL, - { - .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, - .hsw.has_vga = true, - }, - }, -}; - -static const struct i915_power_well_desc bdw_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "display", - .domains = BDW_DISPLAY_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = HSW_DISP_PW_GLOBAL, - { - .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, - .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), - .hsw.has_vga = true, - }, - }, -}; - -static const struct i915_power_well_ops vlv_display_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = vlv_display_power_well_enable, - .disable = vlv_display_power_well_disable, - .is_enabled = vlv_power_well_enabled, -}; - -static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = vlv_dpio_cmn_power_well_enable, - .disable = vlv_dpio_cmn_power_well_disable, - .is_enabled = vlv_power_well_enabled, -}; - -static const struct i915_power_well_ops vlv_dpio_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = vlv_power_well_enable, - .disable = vlv_power_well_disable, - .is_enabled = vlv_power_well_enabled, -}; - -static const struct i915_power_well_desc vlv_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "display", - .domains = VLV_DISPLAY_POWER_DOMAINS, - .ops = &vlv_display_power_well_ops, - .id = VLV_DISP_PW_DISP2D, - { - .vlv.idx = PUNIT_PWGT_IDX_DISP2D, - }, - }, - { - .name = "dpio-tx-b-01", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, - }, - }, - { - .name = "dpio-tx-b-23", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, - }, - }, - { - .name = "dpio-tx-c-01", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, - }, - }, - { - .name = "dpio-tx-c-23", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, - }, - }, - { - .name = "dpio-common", - .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, - .ops = &vlv_dpio_cmn_power_well_ops, - .id = VLV_DISP_PW_DPIO_CMN_BC, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, - }, - }, -}; - -static const struct i915_power_well_desc chv_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "display", - /* - * Pipe A power well is the new disp2d well. Pipe B and C - * power wells don't actually exist. Pipe A power well is - * required for any pipe to work. - */ - .domains = CHV_DISPLAY_POWER_DOMAINS, - .ops = &chv_pipe_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "dpio-common-bc", - .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, - .ops = &chv_dpio_cmn_power_well_ops, - .id = VLV_DISP_PW_DPIO_CMN_BC, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, - }, - }, - { - .name = "dpio-common-d", - .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, - .ops = &chv_dpio_cmn_power_well_ops, - .id = CHV_DISP_PW_DPIO_CMN_D, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, - }, - }, -}; - -static const struct i915_power_well_desc skl_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.idx = SKL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "MISC IO power well", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_MISC_IO, - { - .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, - }, - }, - { - .name = "DC off", - .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, - }, - { - .name = "power well 2", - .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.idx = SKL_PW_CTL_IDX_PW_2, - .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "DDI A/E IO power well", - .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, - }, - }, - { - .name = "DDI B IO power well", - .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = SKL_PW_CTL_IDX_DDI_B, - }, - }, - { - .name = "DDI C IO power well", - .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = SKL_PW_CTL_IDX_DDI_C, - }, - }, - { - .name = "DDI D IO power well", - .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = SKL_PW_CTL_IDX_DDI_D, - }, - }, -}; - -static const struct i915_power_well_desc bxt_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.idx = SKL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "DC off", - .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, - }, - { - .name = "power well 2", - .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.idx = SKL_PW_CTL_IDX_PW_2, - .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "dpio-common-a", - .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, - .ops = &bxt_dpio_cmn_power_well_ops, - .id = BXT_DISP_PW_DPIO_CMN_A, - { - .bxt.phy = DPIO_PHY1, - }, - }, - { - .name = "dpio-common-bc", - .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, - .ops = &bxt_dpio_cmn_power_well_ops, - .id = VLV_DISP_PW_DPIO_CMN_BC, - { - .bxt.phy = DPIO_PHY0, - }, - }, -}; - -static const struct i915_power_well_desc glk_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.idx = SKL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "DC off", - .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, - }, - { - .name = "power well 2", - .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.idx = SKL_PW_CTL_IDX_PW_2, - .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "dpio-common-a", - .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, - .ops = &bxt_dpio_cmn_power_well_ops, - .id = BXT_DISP_PW_DPIO_CMN_A, - { - .bxt.phy = DPIO_PHY1, - }, - }, - { - .name = "dpio-common-b", - .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, - .ops = &bxt_dpio_cmn_power_well_ops, - .id = VLV_DISP_PW_DPIO_CMN_BC, - { - .bxt.phy = DPIO_PHY0, - }, - }, - { - .name = "dpio-common-c", - .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, - .ops = &bxt_dpio_cmn_power_well_ops, - .id = GLK_DISP_PW_DPIO_CMN_C, - { - .bxt.phy = DPIO_PHY2, - }, - }, - { - .name = "AUX A", - .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = GLK_PW_CTL_IDX_AUX_A, - }, - }, - { - .name = "AUX B", - .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = GLK_PW_CTL_IDX_AUX_B, - }, - }, - { - .name = "AUX C", - .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = GLK_PW_CTL_IDX_AUX_C, - }, - }, - { - .name = "DDI A IO power well", - .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = GLK_PW_CTL_IDX_DDI_A, - }, - }, - { - .name = "DDI B IO power well", - .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = SKL_PW_CTL_IDX_DDI_B, - }, - }, - { - .name = "DDI C IO power well", - .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = SKL_PW_CTL_IDX_DDI_C, - }, - }, -}; - -static const struct i915_power_well_regs icl_aux_power_well_regs = { - .bios = ICL_PWR_WELL_CTL_AUX1, - .driver = ICL_PWR_WELL_CTL_AUX2, - .debug = ICL_PWR_WELL_CTL_AUX4, -}; - -static const struct i915_power_well_ops icl_aux_power_well_ops = { - .regs = &icl_aux_power_well_regs, - .sync_hw = hsw_power_well_sync_hw, - .enable = icl_aux_power_well_enable, - .disable = icl_aux_power_well_disable, - .is_enabled = hsw_power_well_enabled, -}; - -static const struct i915_power_well_regs icl_ddi_power_well_regs = { - .bios = ICL_PWR_WELL_CTL_DDI1, - .driver = ICL_PWR_WELL_CTL_DDI2, - .debug = ICL_PWR_WELL_CTL_DDI4, -}; - -static const struct i915_power_well_ops icl_ddi_power_well_ops = { - .regs = &icl_ddi_power_well_regs, - .sync_hw = hsw_power_well_sync_hw, - .enable = hsw_power_well_enable, - .disable = hsw_power_well_disable, - .is_enabled = hsw_power_well_enabled, -}; - -static const struct i915_power_well_desc icl_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "DC off", - .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, - }, - { - .name = "power well 2", - .domains = ICL_PW_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_2, - .hsw.has_fuses = true, - }, - }, - { - .name = "power well 3", - .domains = ICL_PW_3_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_3, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_3, - .hsw.irq_pipe_mask = BIT(PIPE_B), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "DDI A IO", - .domains = ICL_DDI_IO_A_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_A, - }, - }, - { - .name = "DDI B IO", - .domains = ICL_DDI_IO_B_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_B, - }, - }, - { - .name = "DDI C IO", - .domains = ICL_DDI_IO_C_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_C, - }, - }, - { - .name = "DDI D IO", - .domains = ICL_DDI_IO_D_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_D, - }, - }, - { - .name = "DDI E IO", - .domains = ICL_DDI_IO_E_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_E, - }, - }, - { - .name = "DDI F IO", - .domains = ICL_DDI_IO_F_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_F, - }, - }, - { - .name = "AUX A", - .domains = ICL_AUX_A_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_A, - }, - }, - { - .name = "AUX B", - .domains = ICL_AUX_B_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_B, - }, - }, - { - .name = "AUX C TC1", - .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_C, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX D TC2", - .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_D, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX E TC3", - .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_E, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX F TC4", - .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_F, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX C TBT1", - .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX D TBT2", - .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX E TBT3", - .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX F TBT4", - .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "power well 4", - .domains = ICL_PW_4_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_4, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_C), - }, - }, -}; - -static void -tgl_tc_cold_request(struct drm_i915_private *i915, bool block) -{ - u8 tries = 0; - int ret; - - while (1) { - u32 low_val; - u32 high_val = 0; - - if (block) - low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; - else - low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ; - - /* - * Spec states that we should timeout the request after 200us - * but the function below will timeout after 500us - */ - ret = snb_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, &high_val); - if (ret == 0) { - if (block && - (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) - ret = -EIO; - else - break; - } - - if (++tries == 3) - break; - - msleep(1); - } - - if (ret) - drm_err(&i915->drm, "TC cold %sblock failed\n", - block ? "" : "un"); - else - drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", - block ? "" : "un"); -} - -static void -tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915, - struct i915_power_well *power_well) -{ - tgl_tc_cold_request(i915, true); -} - -static void -tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915, - struct i915_power_well *power_well) -{ - tgl_tc_cold_request(i915, false); -} - -static void -tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915, - struct i915_power_well *power_well) -{ - if (intel_power_well_refcount(power_well) > 0) - tgl_tc_cold_off_power_well_enable(i915, power_well); - else - tgl_tc_cold_off_power_well_disable(i915, power_well); -} - -static bool -tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - /* - * Not the correctly implementation but there is no way to just read it - * from PCODE, so returning count to avoid state mismatch errors - */ - return intel_power_well_refcount(power_well); -} - -static const struct i915_power_well_ops tgl_tc_cold_off_ops = { - .sync_hw = tgl_tc_cold_off_power_well_sync_hw, - .enable = tgl_tc_cold_off_power_well_enable, - .disable = tgl_tc_cold_off_power_well_disable, - .is_enabled = tgl_tc_cold_off_power_well_is_enabled, -}; - -static const struct i915_power_well_desc tgl_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "DC off", - .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, - }, - { - .name = "power well 2", - .domains = TGL_PW_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_2, - .hsw.has_fuses = true, - }, - }, - { - .name = "power well 3", - .domains = TGL_PW_3_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_3, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_3, - .hsw.irq_pipe_mask = BIT(PIPE_B), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "DDI A IO", - .domains = ICL_DDI_IO_A_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_A, - } - }, - { - .name = "DDI B IO", - .domains = ICL_DDI_IO_B_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_B, - } - }, - { - .name = "DDI C IO", - .domains = ICL_DDI_IO_C_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_C, - } - }, - { - .name = "DDI IO TC1", - .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, - }, - }, - { - .name = "DDI IO TC2", - .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, - }, - }, - { - .name = "DDI IO TC3", - .domains = TGL_DDI_IO_TC3_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, - }, - }, - { - .name = "DDI IO TC4", - .domains = TGL_DDI_IO_TC4_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, - }, - }, - { - .name = "DDI IO TC5", - .domains = TGL_DDI_IO_TC5_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, - }, - }, - { - .name = "DDI IO TC6", - .domains = TGL_DDI_IO_TC6_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, - }, - }, - { - .name = "TC cold off", - .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, - .ops = &tgl_tc_cold_off_ops, - .id = TGL_DISP_PW_TC_COLD_OFF, - }, - { - .name = "AUX A", - .domains = TGL_AUX_A_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_A, - }, - }, - { - .name = "AUX B", - .domains = TGL_AUX_B_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_B, - }, - }, - { - .name = "AUX C", - .domains = TGL_AUX_C_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_C, - }, - }, - { - .name = "AUX USBC1", - .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX USBC2", - .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX USBC3", - .domains = TGL_AUX_IO_USBC3_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX USBC4", - .domains = TGL_AUX_IO_USBC4_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX USBC5", - .domains = TGL_AUX_IO_USBC5_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX USBC6", - .domains = TGL_AUX_IO_USBC6_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX TBT1", - .domains = TGL_AUX_IO_TBT1_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX TBT2", - .domains = TGL_AUX_IO_TBT2_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX TBT3", - .domains = TGL_AUX_IO_TBT3_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX TBT4", - .domains = TGL_AUX_IO_TBT4_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX TBT5", - .domains = TGL_AUX_IO_TBT5_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX TBT6", - .domains = TGL_AUX_IO_TBT6_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "power well 4", - .domains = TGL_PW_4_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_4, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_C), - } - }, - { - .name = "power well 5", - .domains = TGL_PW_5_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_PW_5, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_D), - }, - }, -}; - -static const struct i915_power_well_desc rkl_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "DC off", - .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, - }, - { - .name = "power well 3", - .domains = RKL_PW_3_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_3, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_3, - .hsw.irq_pipe_mask = BIT(PIPE_B), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "power well 4", - .domains = RKL_PW_4_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_4, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_C), - } - }, - { - .name = "DDI A IO", - .domains = ICL_DDI_IO_A_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_A, - } - }, - { - .name = "DDI B IO", - .domains = ICL_DDI_IO_B_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_B, - } - }, - { - .name = "DDI IO TC1", - .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, - }, - }, - { - .name = "DDI IO TC2", - .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, - }, - }, - { - .name = "AUX A", - .domains = ICL_AUX_A_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_A, - }, - }, - { - .name = "AUX B", - .domains = ICL_AUX_B_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_B, - }, - }, - { - .name = "AUX USBC1", - .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, - }, - }, - { - .name = "AUX USBC2", - .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, - }, - }, -}; - -static const struct i915_power_well_desc dg1_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "DC off", - .domains = DG1_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, - }, - { - .name = "power well 2", - .domains = DG1_PW_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_2, - .hsw.has_fuses = true, - }, - }, - { - .name = "power well 3", - .domains = DG1_PW_3_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_3, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_3, - .hsw.irq_pipe_mask = BIT(PIPE_B), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "DDI A IO", - .domains = ICL_DDI_IO_A_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_A, - } - }, - { - .name = "DDI B IO", - .domains = ICL_DDI_IO_B_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_B, - } - }, - { - .name = "DDI IO TC1", - .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, - }, - }, - { - .name = "DDI IO TC2", - .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, - }, - }, - { - .name = "AUX A", - .domains = TGL_AUX_A_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_A, - }, - }, - { - .name = "AUX B", - .domains = TGL_AUX_B_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_B, - }, - }, - { - .name = "AUX USBC1", - .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX USBC2", - .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "power well 4", - .domains = TGL_PW_4_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_4, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_C), - } - }, - { - .name = "power well 5", - .domains = TGL_PW_5_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_PW_5, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_D), - }, - }, -}; - -static const struct i915_power_well_desc xelpd_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "DC off", - .domains = XELPD_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_DC_OFF, - }, - { - .name = "power well 2", - .domains = XELPD_PW_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.idx = ICL_PW_CTL_IDX_PW_2, - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "power well A", - .domains = XELPD_PW_A_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_PW_A, - .hsw.irq_pipe_mask = BIT(PIPE_A), - .hsw.has_fuses = true, - }, - }, - { - .name = "power well B", - .domains = XELPD_PW_B_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_PW_B, - .hsw.irq_pipe_mask = BIT(PIPE_B), - .hsw.has_fuses = true, - }, - }, - { - .name = "power well C", - .domains = XELPD_PW_C_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_PW_C, - .hsw.irq_pipe_mask = BIT(PIPE_C), - .hsw.has_fuses = true, - }, - }, - { - .name = "power well D", - .domains = XELPD_PW_D_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_PW_D, - .hsw.irq_pipe_mask = BIT(PIPE_D), - .hsw.has_fuses = true, - }, - }, - { - .name = "DDI A IO", - .domains = ICL_DDI_IO_A_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_A, - } - }, - { - .name = "DDI B IO", - .domains = ICL_DDI_IO_B_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_B, - } - }, - { - .name = "DDI C IO", - .domains = ICL_DDI_IO_C_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_DDI_C, - } - }, - { - .name = "DDI IO D_XELPD", - .domains = XELPD_DDI_IO_D_XELPD_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_DDI_D, - } - }, - { - .name = "DDI IO E_XELPD", - .domains = XELPD_DDI_IO_E_XELPD_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_DDI_E, - } - }, - { - .name = "DDI IO TC1", - .domains = XELPD_DDI_IO_TC1_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, - } - }, - { - .name = "DDI IO TC2", - .domains = XELPD_DDI_IO_TC2_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, - } - }, - { - .name = "DDI IO TC3", - .domains = XELPD_DDI_IO_TC3_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, - } - }, - { - .name = "DDI IO TC4", - .domains = XELPD_DDI_IO_TC4_POWER_DOMAINS, - .ops = &icl_ddi_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, - } - }, - { - .name = "AUX A", - .domains = ICL_AUX_A_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_A, - .hsw.fixed_enable_delay = 600, - }, - }, - { - .name = "AUX B", - .domains = ICL_AUX_B_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_B, - .hsw.fixed_enable_delay = 600, - }, - }, - { - .name = "AUX C", - .domains = TGL_AUX_C_IO_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = ICL_PW_CTL_IDX_AUX_C, - .hsw.fixed_enable_delay = 600, - }, - }, - { - .name = "AUX D_XELPD", - .domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_AUX_D, - .hsw.fixed_enable_delay = 600, - }, - }, - { - .name = "AUX E_XELPD", - .domains = XELPD_AUX_IO_E_XELPD_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = XELPD_PW_CTL_IDX_AUX_E, - }, - }, - { - .name = "AUX USBC1", - .domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, - .hsw.fixed_enable_delay = 600, - }, - }, - { - .name = "AUX USBC2", - .domains = XELPD_AUX_IO_USBC2_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, - }, - }, - { - .name = "AUX USBC3", - .domains = XELPD_AUX_IO_USBC3_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, - }, - }, - { - .name = "AUX USBC4", - .domains = XELPD_AUX_IO_USBC4_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, - }, - }, - { - .name = "AUX TBT1", - .domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX TBT2", - .domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX TBT3", - .domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX TBT4", - .domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS, - .ops = &icl_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, - .hsw.is_tc_tbt = true, - }, - }, -}; - static int sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, int disable_power_well) @@ -4828,57 +965,6 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, return mask; } -static int -__set_power_wells(struct i915_power_domains *power_domains, - const struct i915_power_well_desc *power_well_descs, - int power_well_descs_sz, u64 skip_mask) -{ - struct drm_i915_private *i915 = container_of(power_domains, - struct drm_i915_private, - power_domains); - u64 power_well_ids = 0; - int power_well_count = 0; - int i, plt_idx = 0; - - for (i = 0; i < power_well_descs_sz; i++) - if (!(BIT_ULL(power_well_descs[i].id) & skip_mask)) - power_well_count++; - - power_domains->power_well_count = power_well_count; - power_domains->power_wells = - kcalloc(power_well_count, - sizeof(*power_domains->power_wells), - GFP_KERNEL); - if (!power_domains->power_wells) - return -ENOMEM; - - for (i = 0; i < power_well_descs_sz; i++) { - enum i915_power_well_id id = power_well_descs[i].id; - - if (BIT_ULL(id) & skip_mask) - continue; - - power_domains->power_wells[plt_idx++].desc = - &power_well_descs[i]; - - if (id == DISP_PW_ID_NONE) - continue; - - drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8); - drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id)); - power_well_ids |= BIT_ULL(id); - } - - return 0; -} - -#define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \ - __set_power_wells(power_domains, __power_well_descs, \ - ARRAY_SIZE(__power_well_descs), skip_mask) - -#define set_power_wells(power_domains, __power_well_descs) \ - set_power_wells_mask(power_domains, __power_well_descs, 0) - /** * intel_power_domains_init - initializes the power domain structures * @dev_priv: i915 device instance @@ -4889,7 +975,6 @@ __set_power_wells(struct i915_power_domains *power_domains, int intel_power_domains_init(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->power_domains; - int err; dev_priv->params.disable_power_well = sanitize_disable_power_well_option(dev_priv, @@ -4900,54 +985,12 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) dev_priv->dmc.target_dc_state = sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); - BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); - mutex_init(&power_domains->lock); INIT_DELAYED_WORK(&power_domains->async_put_work, intel_display_power_put_async_work); - /* - * The enabling order will be from lower to higher indexed wells, - * the disabling order is reversed. - */ - if (!HAS_DISPLAY(dev_priv)) { - power_domains->power_well_count = 0; - err = 0; - } else if (DISPLAY_VER(dev_priv) >= 13) { - err = set_power_wells(power_domains, xelpd_power_wells); - } else if (IS_DG1(dev_priv)) { - err = set_power_wells(power_domains, dg1_power_wells); - } else if (IS_ALDERLAKE_S(dev_priv)) { - err = set_power_wells_mask(power_domains, tgl_power_wells, - BIT_ULL(TGL_DISP_PW_TC_COLD_OFF)); - } else if (IS_ROCKETLAKE(dev_priv)) { - err = set_power_wells(power_domains, rkl_power_wells); - } else if (DISPLAY_VER(dev_priv) == 12) { - err = set_power_wells(power_domains, tgl_power_wells); - } else if (DISPLAY_VER(dev_priv) == 11) { - err = set_power_wells(power_domains, icl_power_wells); - } else if (IS_GEMINILAKE(dev_priv)) { - err = set_power_wells(power_domains, glk_power_wells); - } else if (IS_BROXTON(dev_priv)) { - err = set_power_wells(power_domains, bxt_power_wells); - } else if (DISPLAY_VER(dev_priv) == 9) { - err = set_power_wells(power_domains, skl_power_wells); - } else if (IS_CHERRYVIEW(dev_priv)) { - err = set_power_wells(power_domains, chv_power_wells); - } else if (IS_BROADWELL(dev_priv)) { - err = set_power_wells(power_domains, bdw_power_wells); - } else if (IS_HASWELL(dev_priv)) { - err = set_power_wells(power_domains, hsw_power_wells); - } else if (IS_VALLEYVIEW(dev_priv)) { - err = set_power_wells(power_domains, vlv_power_wells); - } else if (IS_I830(dev_priv)) { - err = set_power_wells(power_domains, i830_power_wells); - } else { - err = set_power_wells(power_domains, i9xx_always_on_power_well); - } - - return err; + return intel_display_power_map_init(power_domains); } /** @@ -4958,7 +1001,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) */ void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) { - kfree(dev_priv->power_domains.power_wells); + intel_display_power_map_cleanup(&dev_priv->power_domains); } static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) @@ -6229,3 +2272,209 @@ void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m mutex_unlock(&power_domains->lock); } + +struct intel_ddi_port_domains { + enum port port_start; + enum port port_end; + enum aux_ch aux_ch_start; + enum aux_ch aux_ch_end; + + enum intel_display_power_domain ddi_lanes; + enum intel_display_power_domain ddi_io; + enum intel_display_power_domain aux_legacy_usbc; + enum intel_display_power_domain aux_tbt; +}; + +static const struct intel_ddi_port_domains +i9xx_port_domains[] = { + { + .port_start = PORT_A, + .port_end = PORT_F, + .aux_ch_start = AUX_CH_A, + .aux_ch_end = AUX_CH_F, + + .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, + .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, + .aux_legacy_usbc = POWER_DOMAIN_AUX_A, + .aux_tbt = POWER_DOMAIN_INVALID, + }, +}; + +static const struct intel_ddi_port_domains +d11_port_domains[] = { + { + .port_start = PORT_A, + .port_end = PORT_B, + .aux_ch_start = AUX_CH_A, + .aux_ch_end = AUX_CH_B, + + .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, + .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, + .aux_legacy_usbc = POWER_DOMAIN_AUX_A, + .aux_tbt = POWER_DOMAIN_INVALID, + }, { + .port_start = PORT_C, + .port_end = PORT_F, + .aux_ch_start = AUX_CH_C, + .aux_ch_end = AUX_CH_F, + + .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C, + .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C, + .aux_legacy_usbc = POWER_DOMAIN_AUX_C, + .aux_tbt = POWER_DOMAIN_AUX_TBT1, + }, +}; + +static const struct intel_ddi_port_domains +d12_port_domains[] = { + { + .port_start = PORT_A, + .port_end = PORT_C, + .aux_ch_start = AUX_CH_A, + .aux_ch_end = AUX_CH_C, + + .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, + .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, + .aux_legacy_usbc = POWER_DOMAIN_AUX_A, + .aux_tbt = POWER_DOMAIN_INVALID, + }, { + .port_start = PORT_TC1, + .port_end = PORT_TC6, + .aux_ch_start = AUX_CH_USBC1, + .aux_ch_end = AUX_CH_USBC6, + + .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1, + .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1, + .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1, + .aux_tbt = POWER_DOMAIN_AUX_TBT1, + }, +}; + +static const struct intel_ddi_port_domains +d13_port_domains[] = { + { + .port_start = PORT_A, + .port_end = PORT_C, + .aux_ch_start = AUX_CH_A, + .aux_ch_end = AUX_CH_C, + + .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, + .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, + .aux_legacy_usbc = POWER_DOMAIN_AUX_A, + .aux_tbt = POWER_DOMAIN_INVALID, + }, { + .port_start = PORT_TC1, + .port_end = PORT_TC4, + .aux_ch_start = AUX_CH_USBC1, + .aux_ch_end = AUX_CH_USBC4, + + .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1, + .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1, + .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1, + .aux_tbt = POWER_DOMAIN_AUX_TBT1, + }, { + .port_start = PORT_D_XELPD, + .port_end = PORT_E_XELPD, + .aux_ch_start = AUX_CH_D_XELPD, + .aux_ch_end = AUX_CH_E_XELPD, + + .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D, + .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D, + .aux_legacy_usbc = POWER_DOMAIN_AUX_D, + .aux_tbt = POWER_DOMAIN_INVALID, + }, +}; + +static void +intel_port_domains_for_platform(struct drm_i915_private *i915, + const struct intel_ddi_port_domains **domains, + int *domains_size) +{ + if (DISPLAY_VER(i915) >= 13) { + *domains = d13_port_domains; + *domains_size = ARRAY_SIZE(d13_port_domains); + } else if (DISPLAY_VER(i915) >= 12) { + *domains = d12_port_domains; + *domains_size = ARRAY_SIZE(d12_port_domains); + } else if (DISPLAY_VER(i915) >= 11) { + *domains = d11_port_domains; + *domains_size = ARRAY_SIZE(d11_port_domains); + } else { + *domains = i9xx_port_domains; + *domains_size = ARRAY_SIZE(i9xx_port_domains); + } +} + +static const struct intel_ddi_port_domains * +intel_port_domains_for_port(struct drm_i915_private *i915, enum port port) +{ + const struct intel_ddi_port_domains *domains; + int domains_size; + int i; + + intel_port_domains_for_platform(i915, &domains, &domains_size); + for (i = 0; i < domains_size; i++) + if (port >= domains[i].port_start && port <= domains[i].port_end) + return &domains[i]; + + return NULL; +} + +enum intel_display_power_domain +intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port) +{ + const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); + + if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_io == POWER_DOMAIN_INVALID) + return POWER_DOMAIN_PORT_DDI_IO_A; + + return domains->ddi_io + port - domains->port_start; +} + +enum intel_display_power_domain +intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port) +{ + const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); + + if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_lanes == POWER_DOMAIN_INVALID) + return POWER_DOMAIN_PORT_DDI_LANES_A; + + return domains->ddi_lanes + port - domains->port_start; +} + +static const struct intel_ddi_port_domains * +intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch) +{ + const struct intel_ddi_port_domains *domains; + int domains_size; + int i; + + intel_port_domains_for_platform(i915, &domains, &domains_size); + for (i = 0; i < domains_size; i++) + if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end) + return &domains[i]; + + return NULL; +} + +enum intel_display_power_domain +intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch) +{ + const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); + + if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID) + return POWER_DOMAIN_AUX_A; + + return domains->aux_legacy_usbc + aux_ch - domains->aux_ch_start; +} + +enum intel_display_power_domain +intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch) +{ + const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); + + if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_tbt == POWER_DOMAIN_INVALID) + return POWER_DOMAIN_AUX_TBT1; + + return domains->aux_tbt + aux_ch - domains->aux_ch_start; +} diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index ced384b0a165..7136ea3f233e 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -8,8 +8,10 @@ #include "intel_runtime_pm.h" +enum aux_ch; enum dpio_channel; enum dpio_phy; +enum port; struct drm_i915_private; struct i915_power_well; struct intel_encoder; @@ -25,10 +27,10 @@ enum intel_display_power_domain { POWER_DOMAIN_PIPE_B, POWER_DOMAIN_PIPE_C, POWER_DOMAIN_PIPE_D, - POWER_DOMAIN_PIPE_A_PANEL_FITTER, - POWER_DOMAIN_PIPE_B_PANEL_FITTER, - POWER_DOMAIN_PIPE_C_PANEL_FITTER, - POWER_DOMAIN_PIPE_D_PANEL_FITTER, + POWER_DOMAIN_PIPE_PANEL_FITTER_A, + POWER_DOMAIN_PIPE_PANEL_FITTER_B, + POWER_DOMAIN_PIPE_PANEL_FITTER_C, + POWER_DOMAIN_PIPE_PANEL_FITTER_D, POWER_DOMAIN_TRANSCODER_A, POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_TRANSCODER_C, @@ -40,46 +42,34 @@ enum intel_display_power_domain { /* VDSC/joining for eDP/DSI transcoder (ICL) or pipe A (TGL) */ POWER_DOMAIN_TRANSCODER_VDSC_PW2, - POWER_DOMAIN_PORT_DDI_A_LANES, - POWER_DOMAIN_PORT_DDI_B_LANES, - POWER_DOMAIN_PORT_DDI_C_LANES, - POWER_DOMAIN_PORT_DDI_D_LANES, - POWER_DOMAIN_PORT_DDI_E_LANES, - POWER_DOMAIN_PORT_DDI_F_LANES, - POWER_DOMAIN_PORT_DDI_G_LANES, - POWER_DOMAIN_PORT_DDI_H_LANES, - POWER_DOMAIN_PORT_DDI_I_LANES, - - POWER_DOMAIN_PORT_DDI_LANES_TC1 = POWER_DOMAIN_PORT_DDI_D_LANES, /* tgl+ */ + POWER_DOMAIN_PORT_DDI_LANES_A, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_PORT_DDI_LANES_D, + POWER_DOMAIN_PORT_DDI_LANES_E, + POWER_DOMAIN_PORT_DDI_LANES_F, + + POWER_DOMAIN_PORT_DDI_LANES_TC1, POWER_DOMAIN_PORT_DDI_LANES_TC2, POWER_DOMAIN_PORT_DDI_LANES_TC3, POWER_DOMAIN_PORT_DDI_LANES_TC4, POWER_DOMAIN_PORT_DDI_LANES_TC5, POWER_DOMAIN_PORT_DDI_LANES_TC6, - POWER_DOMAIN_PORT_DDI_LANES_D_XELPD = POWER_DOMAIN_PORT_DDI_LANES_TC5, /* XELPD */ - POWER_DOMAIN_PORT_DDI_LANES_E_XELPD, - - POWER_DOMAIN_PORT_DDI_A_IO, - POWER_DOMAIN_PORT_DDI_B_IO, - POWER_DOMAIN_PORT_DDI_C_IO, - POWER_DOMAIN_PORT_DDI_D_IO, - POWER_DOMAIN_PORT_DDI_E_IO, - POWER_DOMAIN_PORT_DDI_F_IO, - POWER_DOMAIN_PORT_DDI_G_IO, - POWER_DOMAIN_PORT_DDI_H_IO, - POWER_DOMAIN_PORT_DDI_I_IO, + POWER_DOMAIN_PORT_DDI_IO_A, + POWER_DOMAIN_PORT_DDI_IO_B, + POWER_DOMAIN_PORT_DDI_IO_C, + POWER_DOMAIN_PORT_DDI_IO_D, + POWER_DOMAIN_PORT_DDI_IO_E, + POWER_DOMAIN_PORT_DDI_IO_F, - POWER_DOMAIN_PORT_DDI_IO_TC1 = POWER_DOMAIN_PORT_DDI_D_IO, /* tgl+ */ + POWER_DOMAIN_PORT_DDI_IO_TC1, POWER_DOMAIN_PORT_DDI_IO_TC2, POWER_DOMAIN_PORT_DDI_IO_TC3, POWER_DOMAIN_PORT_DDI_IO_TC4, POWER_DOMAIN_PORT_DDI_IO_TC5, POWER_DOMAIN_PORT_DDI_IO_TC6, - POWER_DOMAIN_PORT_DDI_IO_D_XELPD = POWER_DOMAIN_PORT_DDI_IO_TC5, /* XELPD */ - POWER_DOMAIN_PORT_DDI_IO_E_XELPD, - POWER_DOMAIN_PORT_DSI, POWER_DOMAIN_PORT_CRT, POWER_DOMAIN_PORT_OTHER, @@ -92,30 +82,17 @@ enum intel_display_power_domain { POWER_DOMAIN_AUX_D, POWER_DOMAIN_AUX_E, POWER_DOMAIN_AUX_F, - POWER_DOMAIN_AUX_G, - POWER_DOMAIN_AUX_H, - POWER_DOMAIN_AUX_I, - POWER_DOMAIN_AUX_USBC1 = POWER_DOMAIN_AUX_D, /* tgl+ */ + POWER_DOMAIN_AUX_USBC1, POWER_DOMAIN_AUX_USBC2, POWER_DOMAIN_AUX_USBC3, POWER_DOMAIN_AUX_USBC4, POWER_DOMAIN_AUX_USBC5, POWER_DOMAIN_AUX_USBC6, - POWER_DOMAIN_AUX_D_XELPD = POWER_DOMAIN_AUX_USBC5, /* XELPD */ - POWER_DOMAIN_AUX_E_XELPD, - POWER_DOMAIN_AUX_IO_A, - POWER_DOMAIN_AUX_C_TBT, - POWER_DOMAIN_AUX_D_TBT, - POWER_DOMAIN_AUX_E_TBT, - POWER_DOMAIN_AUX_F_TBT, - POWER_DOMAIN_AUX_G_TBT, - POWER_DOMAIN_AUX_H_TBT, - POWER_DOMAIN_AUX_I_TBT, - - POWER_DOMAIN_AUX_TBT1 = POWER_DOMAIN_AUX_D_TBT, /* tgl+ */ + + POWER_DOMAIN_AUX_TBT1, POWER_DOMAIN_AUX_TBT2, POWER_DOMAIN_AUX_TBT3, POWER_DOMAIN_AUX_TBT4, @@ -130,15 +107,20 @@ enum intel_display_power_domain { POWER_DOMAIN_INIT, POWER_DOMAIN_NUM, + POWER_DOMAIN_INVALID = POWER_DOMAIN_NUM, }; #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ - ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) + ((pipe) + POWER_DOMAIN_PIPE_PANEL_FITTER_A) #define POWER_DOMAIN_TRANSCODER(tran) \ ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ (tran) + POWER_DOMAIN_TRANSCODER_A) +struct intel_power_domain_mask { + DECLARE_BITMAP(bits, POWER_DOMAIN_NUM); +}; + struct i915_power_domains { /* * Power wells needed for initialization at driver init and suspend @@ -156,41 +138,21 @@ struct i915_power_domains { struct delayed_work async_put_work; intel_wakeref_t async_put_wakeref; - u64 async_put_domains[2]; + struct intel_power_domain_mask async_put_domains[2]; struct i915_power_well *power_wells; }; struct intel_display_power_domain_set { - u64 mask; + struct intel_power_domain_mask mask; #ifdef CONFIG_DRM_I915_DEBUG_RUNTIME_PM intel_wakeref_t wakerefs[POWER_DOMAIN_NUM]; #endif }; -#define for_each_power_domain(domain, mask) \ - for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ - for_each_if(BIT_ULL(domain) & (mask)) - -#define for_each_power_well(__dev_priv, __power_well) \ - for ((__power_well) = (__dev_priv)->power_domains.power_wells; \ - (__power_well) - (__dev_priv)->power_domains.power_wells < \ - (__dev_priv)->power_domains.power_well_count; \ - (__power_well)++) - -#define for_each_power_well_reverse(__dev_priv, __power_well) \ - for ((__power_well) = (__dev_priv)->power_domains.power_wells + \ - (__dev_priv)->power_domains.power_well_count - 1; \ - (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \ - (__power_well)--) - -#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \ - for_each_power_well(__dev_priv, __power_well) \ - for_each_if((__power_well)->desc->domains & (__domain_mask)) - -#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \ - for_each_power_well_reverse(__dev_priv, __power_well) \ - for_each_if((__power_well)->desc->domains & (__domain_mask)) +#define for_each_power_domain(__domain, __mask) \ + for ((__domain) = 0; (__domain) < POWER_DOMAIN_NUM; (__domain)++) \ + for_each_if(test_bit((__domain), (__mask)->bits)) int intel_power_domains_init(struct drm_i915_private *dev_priv); void intel_power_domains_cleanup(struct drm_i915_private *dev_priv); @@ -271,17 +233,26 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, void intel_display_power_put_mask_in_set(struct drm_i915_private *i915, struct intel_display_power_domain_set *power_domain_set, - u64 mask); + struct intel_power_domain_mask *mask); static inline void intel_display_power_put_all_in_set(struct drm_i915_private *i915, struct intel_display_power_domain_set *power_domain_set) { - intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask); + intel_display_power_put_mask_in_set(i915, power_domain_set, &power_domain_set->mask); } void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m); +enum intel_display_power_domain +intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port); +enum intel_display_power_domain +intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port); +enum intel_display_power_domain +intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch); +enum intel_display_power_domain +intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch); + /* * FIXME: We should probably switch this to a 0-based scheme to be consistent * with how we now name/number DBUF_CTL instances. @@ -305,9 +276,4 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, for ((wf) = intel_display_power_get_if_enabled((i915), (domain)); (wf); \ intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0) -void chv_phy_powergate_lanes(struct intel_encoder *encoder, - bool override, unsigned int mask); -bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, - enum dpio_channel ch, bool override); - #endif /* __INTEL_DISPLAY_POWER_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c new file mode 100644 index 000000000000..97b367f39f35 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -0,0 +1,1501 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_reg.h" + +#include "vlv_sideband_reg.h" + +#include "intel_display_power_map.h" +#include "intel_display_power_well.h" + +#define __LIST_INLINE_ELEMS(__elem_type, ...) \ + ((__elem_type[]) { __VA_ARGS__ }) + +#define __LIST(__elems) { \ + .list = __elems, \ + .count = ARRAY_SIZE(__elems), \ +} + +#define I915_PW_DOMAINS(...) \ + (const struct i915_power_domain_list) \ + __LIST(__LIST_INLINE_ELEMS(const enum intel_display_power_domain, __VA_ARGS__)) + +#define I915_DECL_PW_DOMAINS(__name, ...) \ + static const struct i915_power_domain_list __name = I915_PW_DOMAINS(__VA_ARGS__) + +/* Zero-length list assigns all power domains, a NULL list assigns none. */ +#define I915_PW_DOMAINS_NONE NULL +#define I915_PW_DOMAINS_ALL /* zero-length list */ + +#define I915_PW_INSTANCES(...) \ + (const struct i915_power_well_instance_list) \ + __LIST(__LIST_INLINE_ELEMS(const struct i915_power_well_instance, __VA_ARGS__)) + +#define I915_PW(_name, _domain_list, ...) \ + { .name = _name, .domain_list = _domain_list, ## __VA_ARGS__ } + + +struct i915_power_well_desc_list { + const struct i915_power_well_desc *list; + u8 count; +}; + +#define I915_PW_DESCRIPTORS(x) __LIST(x) + + +I915_DECL_PW_DOMAINS(i9xx_pwdoms_always_on, I915_PW_DOMAINS_ALL); + +static const struct i915_power_well_desc i9xx_power_wells_always_on[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("always-on", &i9xx_pwdoms_always_on), + ), + .ops = &i9xx_always_on_power_well_ops, + .always_on = true, + }, +}; + +static const struct i915_power_well_desc_list i9xx_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), +}; + +I915_DECL_PW_DOMAINS(i830_pwdoms_pipes, + POWER_DOMAIN_PIPE_A, + POWER_DOMAIN_PIPE_B, + POWER_DOMAIN_PIPE_PANEL_FITTER_A, + POWER_DOMAIN_PIPE_PANEL_FITTER_B, + POWER_DOMAIN_TRANSCODER_A, + POWER_DOMAIN_TRANSCODER_B, + POWER_DOMAIN_INIT); + +static const struct i915_power_well_desc i830_power_wells_main[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("pipes", &i830_pwdoms_pipes), + ), + .ops = &i830_pipes_power_well_ops, + }, +}; + +static const struct i915_power_well_desc_list i830_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(i830_power_wells_main), +}; + +I915_DECL_PW_DOMAINS(hsw_pwdoms_display, + POWER_DOMAIN_PIPE_B, + POWER_DOMAIN_PIPE_C, + POWER_DOMAIN_PIPE_PANEL_FITTER_A, + POWER_DOMAIN_PIPE_PANEL_FITTER_B, + POWER_DOMAIN_PIPE_PANEL_FITTER_C, + POWER_DOMAIN_TRANSCODER_A, + POWER_DOMAIN_TRANSCODER_B, + POWER_DOMAIN_TRANSCODER_C, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_PORT_DDI_LANES_D, + POWER_DOMAIN_PORT_CRT, /* DDI E */ + POWER_DOMAIN_VGA, + POWER_DOMAIN_AUDIO_MMIO, + POWER_DOMAIN_AUDIO_PLAYBACK, + POWER_DOMAIN_INIT); + +static const struct i915_power_well_desc hsw_power_wells_main[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("display", &hsw_pwdoms_display, + .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, + .id = HSW_DISP_PW_GLOBAL), + ), + .ops = &hsw_power_well_ops, + .has_vga = true, + }, +}; + +static const struct i915_power_well_desc_list hsw_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(hsw_power_wells_main), +}; + +I915_DECL_PW_DOMAINS(bdw_pwdoms_display, + POWER_DOMAIN_PIPE_B, + POWER_DOMAIN_PIPE_C, + POWER_DOMAIN_PIPE_PANEL_FITTER_B, + POWER_DOMAIN_PIPE_PANEL_FITTER_C, + POWER_DOMAIN_TRANSCODER_A, + POWER_DOMAIN_TRANSCODER_B, + POWER_DOMAIN_TRANSCODER_C, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_PORT_DDI_LANES_D, + POWER_DOMAIN_PORT_CRT, /* DDI E */ + POWER_DOMAIN_VGA, + POWER_DOMAIN_AUDIO_MMIO, + POWER_DOMAIN_AUDIO_PLAYBACK, + POWER_DOMAIN_INIT); + +static const struct i915_power_well_desc bdw_power_wells_main[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("display", &bdw_pwdoms_display, + .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, + .id = HSW_DISP_PW_GLOBAL), + ), + .ops = &hsw_power_well_ops, + .has_vga = true, + .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), + }, +}; + +static const struct i915_power_well_desc_list bdw_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(bdw_power_wells_main), +}; + +I915_DECL_PW_DOMAINS(vlv_pwdoms_display, + POWER_DOMAIN_DISPLAY_CORE, + POWER_DOMAIN_PIPE_A, + POWER_DOMAIN_PIPE_B, + POWER_DOMAIN_PIPE_PANEL_FITTER_A, + POWER_DOMAIN_PIPE_PANEL_FITTER_B, + POWER_DOMAIN_TRANSCODER_A, + POWER_DOMAIN_TRANSCODER_B, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_PORT_DSI, + POWER_DOMAIN_PORT_CRT, + POWER_DOMAIN_VGA, + POWER_DOMAIN_AUDIO_MMIO, + POWER_DOMAIN_AUDIO_PLAYBACK, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_GMBUS, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(vlv_pwdoms_dpio_cmn_bc, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_PORT_CRT, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(vlv_pwdoms_dpio_tx_bc_lanes, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_INIT); + +static const struct i915_power_well_desc vlv_power_wells_main[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("display", &vlv_pwdoms_display, + .vlv.idx = PUNIT_PWGT_IDX_DISP2D, + .id = VLV_DISP_PW_DISP2D), + ), + .ops = &vlv_display_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("dpio-tx-b-01", &vlv_pwdoms_dpio_tx_bc_lanes, + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01), + I915_PW("dpio-tx-b-23", &vlv_pwdoms_dpio_tx_bc_lanes, + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23), + I915_PW("dpio-tx-c-01", &vlv_pwdoms_dpio_tx_bc_lanes, + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01), + I915_PW("dpio-tx-c-23", &vlv_pwdoms_dpio_tx_bc_lanes, + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23), + ), + .ops = &vlv_dpio_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("dpio-common", &vlv_pwdoms_dpio_cmn_bc, + .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, + .id = VLV_DISP_PW_DPIO_CMN_BC), + ), + .ops = &vlv_dpio_cmn_power_well_ops, + }, +}; + +static const struct i915_power_well_desc_list vlv_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(vlv_power_wells_main), +}; + +I915_DECL_PW_DOMAINS(chv_pwdoms_display, + POWER_DOMAIN_DISPLAY_CORE, + POWER_DOMAIN_PIPE_A, + POWER_DOMAIN_PIPE_B, + POWER_DOMAIN_PIPE_C, + POWER_DOMAIN_PIPE_PANEL_FITTER_A, + POWER_DOMAIN_PIPE_PANEL_FITTER_B, + POWER_DOMAIN_PIPE_PANEL_FITTER_C, + POWER_DOMAIN_TRANSCODER_A, + POWER_DOMAIN_TRANSCODER_B, + POWER_DOMAIN_TRANSCODER_C, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_PORT_DDI_LANES_D, + POWER_DOMAIN_PORT_DSI, + POWER_DOMAIN_VGA, + POWER_DOMAIN_AUDIO_MMIO, + POWER_DOMAIN_AUDIO_PLAYBACK, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_AUX_D, + POWER_DOMAIN_GMBUS, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(chv_pwdoms_dpio_cmn_bc, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(chv_pwdoms_dpio_cmn_d, + POWER_DOMAIN_PORT_DDI_LANES_D, + POWER_DOMAIN_AUX_D, + POWER_DOMAIN_INIT); + +static const struct i915_power_well_desc chv_power_wells_main[] = { + { + /* + * Pipe A power well is the new disp2d well. Pipe B and C + * power wells don't actually exist. Pipe A power well is + * required for any pipe to work. + */ + .instances = &I915_PW_INSTANCES( + I915_PW("display", &chv_pwdoms_display), + ), + .ops = &chv_pipe_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("dpio-common-bc", &chv_pwdoms_dpio_cmn_bc, + .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, + .id = VLV_DISP_PW_DPIO_CMN_BC), + I915_PW("dpio-common-d", &chv_pwdoms_dpio_cmn_d, + .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, + .id = CHV_DISP_PW_DPIO_CMN_D), + ), + .ops = &chv_dpio_cmn_power_well_ops, + }, +}; + +static const struct i915_power_well_desc_list chv_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(chv_power_wells_main), +}; + +#define SKL_PW_2_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_B, \ + POWER_DOMAIN_PIPE_C, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_C, \ + POWER_DOMAIN_TRANSCODER_A, \ + POWER_DOMAIN_TRANSCODER_B, \ + POWER_DOMAIN_TRANSCODER_C, \ + POWER_DOMAIN_PORT_DDI_LANES_B, \ + POWER_DOMAIN_PORT_DDI_LANES_C, \ + POWER_DOMAIN_PORT_DDI_LANES_D, \ + POWER_DOMAIN_PORT_DDI_LANES_E, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_AUDIO_MMIO, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_AUX_B, \ + POWER_DOMAIN_AUX_C, \ + POWER_DOMAIN_AUX_D + +I915_DECL_PW_DOMAINS(skl_pwdoms_pw_2, + SKL_PW_2_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(skl_pwdoms_dc_off, + SKL_PW_2_POWER_DOMAINS, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_GT_IRQ, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_a_e, + POWER_DOMAIN_PORT_DDI_IO_A, + POWER_DOMAIN_PORT_DDI_IO_E, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_b, + POWER_DOMAIN_PORT_DDI_IO_B, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_c, + POWER_DOMAIN_PORT_DDI_IO_C, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_d, + POWER_DOMAIN_PORT_DDI_IO_D, + POWER_DOMAIN_INIT); + +static const struct i915_power_well_desc skl_power_wells_pw_1[] = { + { + /* Handled by the DMC firmware */ + .instances = &I915_PW_INSTANCES( + I915_PW("PW_1", I915_PW_DOMAINS_NONE, + .hsw.idx = SKL_PW_CTL_IDX_PW_1, + .id = SKL_DISP_PW_1), + ), + .ops = &hsw_power_well_ops, + .always_on = true, + .has_fuses = true, + }, +}; + +static const struct i915_power_well_desc skl_power_wells_main[] = { + { + /* Handled by the DMC firmware */ + .instances = &I915_PW_INSTANCES( + I915_PW("MISC_IO", I915_PW_DOMAINS_NONE, + .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, + .id = SKL_DISP_PW_MISC_IO), + ), + .ops = &hsw_power_well_ops, + .always_on = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &skl_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), + .ops = &gen9_dc_off_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_2", &skl_pwdoms_pw_2, + .hsw.idx = SKL_PW_CTL_IDX_PW_2, + .id = SKL_DISP_PW_2), + ), + .ops = &hsw_power_well_ops, + .has_vga = true, + .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("DDI_IO_A_E", &skl_pwdoms_ddi_io_a_e, .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E), + I915_PW("DDI_IO_B", &skl_pwdoms_ddi_io_b, .hsw.idx = SKL_PW_CTL_IDX_DDI_B), + I915_PW("DDI_IO_C", &skl_pwdoms_ddi_io_c, .hsw.idx = SKL_PW_CTL_IDX_DDI_C), + I915_PW("DDI_IO_D", &skl_pwdoms_ddi_io_d, .hsw.idx = SKL_PW_CTL_IDX_DDI_D), + ), + .ops = &hsw_power_well_ops, + }, +}; + +static const struct i915_power_well_desc_list skl_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(skl_power_wells_pw_1), + I915_PW_DESCRIPTORS(skl_power_wells_main), +}; + +#define BXT_PW_2_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_B, \ + POWER_DOMAIN_PIPE_C, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_C, \ + POWER_DOMAIN_TRANSCODER_A, \ + POWER_DOMAIN_TRANSCODER_B, \ + POWER_DOMAIN_TRANSCODER_C, \ + POWER_DOMAIN_PORT_DDI_LANES_B, \ + POWER_DOMAIN_PORT_DDI_LANES_C, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_AUDIO_MMIO, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_AUX_B, \ + POWER_DOMAIN_AUX_C + +I915_DECL_PW_DOMAINS(bxt_pwdoms_pw_2, + BXT_PW_2_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(bxt_pwdoms_dc_off, + BXT_PW_2_POWER_DOMAINS, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_GMBUS, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_GT_IRQ, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(bxt_pwdoms_dpio_cmn_a, + POWER_DOMAIN_PORT_DDI_LANES_A, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(bxt_pwdoms_dpio_cmn_bc, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_INIT); + +static const struct i915_power_well_desc bxt_power_wells_main[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &bxt_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), + .ops = &gen9_dc_off_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_2", &bxt_pwdoms_pw_2, + .hsw.idx = SKL_PW_CTL_IDX_PW_2, + .id = SKL_DISP_PW_2), + ), + .ops = &hsw_power_well_ops, + .has_vga = true, + .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("dpio-common-a", &bxt_pwdoms_dpio_cmn_a, + .bxt.phy = DPIO_PHY1, + .id = BXT_DISP_PW_DPIO_CMN_A), + I915_PW("dpio-common-bc", &bxt_pwdoms_dpio_cmn_bc, + .bxt.phy = DPIO_PHY0, + .id = VLV_DISP_PW_DPIO_CMN_BC), + ), + .ops = &bxt_dpio_cmn_power_well_ops, + }, +}; + +static const struct i915_power_well_desc_list bxt_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(skl_power_wells_pw_1), + I915_PW_DESCRIPTORS(bxt_power_wells_main), +}; + +#define GLK_PW_2_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_B, \ + POWER_DOMAIN_PIPE_C, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_C, \ + POWER_DOMAIN_TRANSCODER_A, \ + POWER_DOMAIN_TRANSCODER_B, \ + POWER_DOMAIN_TRANSCODER_C, \ + POWER_DOMAIN_PORT_DDI_LANES_B, \ + POWER_DOMAIN_PORT_DDI_LANES_C, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_AUDIO_MMIO, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_AUX_B, \ + POWER_DOMAIN_AUX_C + +I915_DECL_PW_DOMAINS(glk_pwdoms_pw_2, + GLK_PW_2_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(glk_pwdoms_dc_off, + GLK_PW_2_POWER_DOMAINS, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_GMBUS, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_GT_IRQ, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(glk_pwdoms_ddi_io_a, POWER_DOMAIN_PORT_DDI_IO_A); +I915_DECL_PW_DOMAINS(glk_pwdoms_ddi_io_b, POWER_DOMAIN_PORT_DDI_IO_B); +I915_DECL_PW_DOMAINS(glk_pwdoms_ddi_io_c, POWER_DOMAIN_PORT_DDI_IO_C); + +I915_DECL_PW_DOMAINS(glk_pwdoms_dpio_cmn_a, + POWER_DOMAIN_PORT_DDI_LANES_A, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(glk_pwdoms_dpio_cmn_b, + POWER_DOMAIN_PORT_DDI_LANES_B, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(glk_pwdoms_dpio_cmn_c, + POWER_DOMAIN_PORT_DDI_LANES_C, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(glk_pwdoms_aux_a, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_AUX_IO_A, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(glk_pwdoms_aux_b, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(glk_pwdoms_aux_c, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_INIT); + +static const struct i915_power_well_desc glk_power_wells_main[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &glk_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), + .ops = &gen9_dc_off_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_2", &glk_pwdoms_pw_2, + .hsw.idx = SKL_PW_CTL_IDX_PW_2, + .id = SKL_DISP_PW_2), + ), + .ops = &hsw_power_well_ops, + .has_vga = true, + .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("dpio-common-a", &glk_pwdoms_dpio_cmn_a, + .bxt.phy = DPIO_PHY1, + .id = BXT_DISP_PW_DPIO_CMN_A), + I915_PW("dpio-common-b", &glk_pwdoms_dpio_cmn_b, + .bxt.phy = DPIO_PHY0, + .id = VLV_DISP_PW_DPIO_CMN_BC), + I915_PW("dpio-common-c", &glk_pwdoms_dpio_cmn_c, + .bxt.phy = DPIO_PHY2, + .id = GLK_DISP_PW_DPIO_CMN_C), + ), + .ops = &bxt_dpio_cmn_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_A", &glk_pwdoms_aux_a, .hsw.idx = GLK_PW_CTL_IDX_AUX_A), + I915_PW("AUX_B", &glk_pwdoms_aux_b, .hsw.idx = GLK_PW_CTL_IDX_AUX_B), + I915_PW("AUX_C", &glk_pwdoms_aux_c, .hsw.idx = GLK_PW_CTL_IDX_AUX_C), + I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = GLK_PW_CTL_IDX_DDI_A), + I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = SKL_PW_CTL_IDX_DDI_B), + I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = SKL_PW_CTL_IDX_DDI_C), + ), + .ops = &hsw_power_well_ops, + }, +}; + +static const struct i915_power_well_desc_list glk_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(skl_power_wells_pw_1), + I915_PW_DESCRIPTORS(glk_power_wells_main), +}; + +/* + * ICL PW_0/PG_0 domains (HW/DMC control): + * - PCI + * - clocks except port PLL + * - central power except FBC + * - shared functions except pipe interrupts, pipe MBUS, DBUF registers + * ICL PW_1/PG_1 domains (HW/DMC control): + * - DBUF function + * - PIPE_A and its planes, except VGA + * - transcoder EDP + PSR + * - transcoder DSI + * - DDI_A + * - FBC + */ +#define ICL_PW_4_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_C, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_C + +I915_DECL_PW_DOMAINS(icl_pwdoms_pw_4, + ICL_PW_4_POWER_DOMAINS, + POWER_DOMAIN_INIT); + /* VDSC/joining */ + +#define ICL_PW_3_POWER_DOMAINS \ + ICL_PW_4_POWER_DOMAINS, \ + POWER_DOMAIN_PIPE_B, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ + POWER_DOMAIN_TRANSCODER_A, \ + POWER_DOMAIN_TRANSCODER_B, \ + POWER_DOMAIN_TRANSCODER_C, \ + POWER_DOMAIN_PORT_DDI_LANES_B, \ + POWER_DOMAIN_PORT_DDI_LANES_C, \ + POWER_DOMAIN_PORT_DDI_LANES_D, \ + POWER_DOMAIN_PORT_DDI_LANES_E, \ + POWER_DOMAIN_PORT_DDI_LANES_F, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_AUDIO_MMIO, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_AUX_B, \ + POWER_DOMAIN_AUX_C, \ + POWER_DOMAIN_AUX_D, \ + POWER_DOMAIN_AUX_E, \ + POWER_DOMAIN_AUX_F, \ + POWER_DOMAIN_AUX_TBT1, \ + POWER_DOMAIN_AUX_TBT2, \ + POWER_DOMAIN_AUX_TBT3, \ + POWER_DOMAIN_AUX_TBT4 + +I915_DECL_PW_DOMAINS(icl_pwdoms_pw_3, + ICL_PW_3_POWER_DOMAINS, + POWER_DOMAIN_INIT); + /* + * - transcoder WD + * - KVMR (HW control) + */ + +#define ICL_PW_2_POWER_DOMAINS \ + ICL_PW_3_POWER_DOMAINS, \ + POWER_DOMAIN_TRANSCODER_VDSC_PW2 + +I915_DECL_PW_DOMAINS(icl_pwdoms_pw_2, + ICL_PW_2_POWER_DOMAINS, + POWER_DOMAIN_INIT); + /* + * - KVMR (HW control) + */ + +I915_DECL_PW_DOMAINS(icl_pwdoms_dc_off, + ICL_PW_2_POWER_DOMAINS, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_DC_OFF, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_d, POWER_DOMAIN_PORT_DDI_IO_D); +I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_e, POWER_DOMAIN_PORT_DDI_IO_E); +I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_f, POWER_DOMAIN_PORT_DDI_IO_F); + +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_a, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_AUX_IO_A); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_b, POWER_DOMAIN_AUX_B); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_c, POWER_DOMAIN_AUX_C); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_d, POWER_DOMAIN_AUX_D); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_e, POWER_DOMAIN_AUX_E); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_f, POWER_DOMAIN_AUX_F); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt1, POWER_DOMAIN_AUX_TBT1); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt2, POWER_DOMAIN_AUX_TBT2); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt3, POWER_DOMAIN_AUX_TBT3); +I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt4, POWER_DOMAIN_AUX_TBT4); + +static const struct i915_power_well_desc icl_power_wells_pw_1[] = { + { + /* Handled by the DMC firmware */ + .instances = &I915_PW_INSTANCES( + I915_PW("PW_1", I915_PW_DOMAINS_NONE, + .hsw.idx = ICL_PW_CTL_IDX_PW_1, + .id = SKL_DISP_PW_1), + ), + .ops = &hsw_power_well_ops, + .always_on = true, + .has_fuses = true, + }, +}; + +static const struct i915_power_well_desc icl_power_wells_main[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &icl_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), + .ops = &gen9_dc_off_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_2", &icl_pwdoms_pw_2, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .id = SKL_DISP_PW_2), + ), + .ops = &hsw_power_well_ops, + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_3", &icl_pwdoms_pw_3, + .hsw.idx = ICL_PW_CTL_IDX_PW_3, + .id = ICL_DISP_PW_3), + ), + .ops = &hsw_power_well_ops, + .has_vga = true, + .irq_pipe_mask = BIT(PIPE_B), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), + I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), + I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C), + I915_PW("DDI_IO_D", &icl_pwdoms_ddi_io_d, .hsw.idx = ICL_PW_CTL_IDX_DDI_D), + I915_PW("DDI_IO_E", &icl_pwdoms_ddi_io_e, .hsw.idx = ICL_PW_CTL_IDX_DDI_E), + I915_PW("DDI_IO_F", &icl_pwdoms_ddi_io_f, .hsw.idx = ICL_PW_CTL_IDX_DDI_F), + ), + .ops = &icl_ddi_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A), + I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B), + I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C), + I915_PW("AUX_D", &icl_pwdoms_aux_d, .hsw.idx = ICL_PW_CTL_IDX_AUX_D), + I915_PW("AUX_E", &icl_pwdoms_aux_e, .hsw.idx = ICL_PW_CTL_IDX_AUX_E), + I915_PW("AUX_F", &icl_pwdoms_aux_f, .hsw.idx = ICL_PW_CTL_IDX_AUX_F), + ), + .ops = &icl_aux_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1), + I915_PW("AUX_TBT2", &icl_pwdoms_aux_tbt2, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2), + I915_PW("AUX_TBT3", &icl_pwdoms_aux_tbt3, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3), + I915_PW("AUX_TBT4", &icl_pwdoms_aux_tbt4, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4), + ), + .ops = &icl_aux_power_well_ops, + .is_tc_tbt = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_4", &icl_pwdoms_pw_4, + .hsw.idx = ICL_PW_CTL_IDX_PW_4), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_C), + .has_fuses = true, + }, +}; + +static const struct i915_power_well_desc_list icl_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(icl_power_wells_main), +}; + +#define TGL_PW_5_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_D, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_D, \ + POWER_DOMAIN_TRANSCODER_D + +I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_5, + TGL_PW_5_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +#define TGL_PW_4_POWER_DOMAINS \ + TGL_PW_5_POWER_DOMAINS, \ + POWER_DOMAIN_PIPE_C, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_C, \ + POWER_DOMAIN_TRANSCODER_C + +I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_4, + TGL_PW_4_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +#define TGL_PW_3_POWER_DOMAINS \ + TGL_PW_4_POWER_DOMAINS, \ + POWER_DOMAIN_PIPE_B, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ + POWER_DOMAIN_TRANSCODER_B, \ + POWER_DOMAIN_PORT_DDI_LANES_TC1, \ + POWER_DOMAIN_PORT_DDI_LANES_TC2, \ + POWER_DOMAIN_PORT_DDI_LANES_TC3, \ + POWER_DOMAIN_PORT_DDI_LANES_TC4, \ + POWER_DOMAIN_PORT_DDI_LANES_TC5, \ + POWER_DOMAIN_PORT_DDI_LANES_TC6, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_AUDIO_MMIO, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_AUX_USBC1, \ + POWER_DOMAIN_AUX_USBC2, \ + POWER_DOMAIN_AUX_USBC3, \ + POWER_DOMAIN_AUX_USBC4, \ + POWER_DOMAIN_AUX_USBC5, \ + POWER_DOMAIN_AUX_USBC6, \ + POWER_DOMAIN_AUX_TBT1, \ + POWER_DOMAIN_AUX_TBT2, \ + POWER_DOMAIN_AUX_TBT3, \ + POWER_DOMAIN_AUX_TBT4, \ + POWER_DOMAIN_AUX_TBT5, \ + POWER_DOMAIN_AUX_TBT6 + +I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_3, + TGL_PW_3_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_2, + TGL_PW_3_POWER_DOMAINS, + POWER_DOMAIN_TRANSCODER_VDSC_PW2, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(tgl_pwdoms_dc_off, + TGL_PW_3_POWER_DOMAINS, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc1, POWER_DOMAIN_PORT_DDI_IO_TC1); +I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc2, POWER_DOMAIN_PORT_DDI_IO_TC2); +I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc3, POWER_DOMAIN_PORT_DDI_IO_TC3); +I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc4, POWER_DOMAIN_PORT_DDI_IO_TC4); +I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc5, POWER_DOMAIN_PORT_DDI_IO_TC5); +I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc6, POWER_DOMAIN_PORT_DDI_IO_TC6); + +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc1, POWER_DOMAIN_AUX_USBC1); +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc2, POWER_DOMAIN_AUX_USBC2); +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc3, POWER_DOMAIN_AUX_USBC3); +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc4, POWER_DOMAIN_AUX_USBC4); +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc5, POWER_DOMAIN_AUX_USBC5); +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc6, POWER_DOMAIN_AUX_USBC6); + +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt5, POWER_DOMAIN_AUX_TBT5); +I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt6, POWER_DOMAIN_AUX_TBT6); + +I915_DECL_PW_DOMAINS(tgl_pwdoms_tc_cold_off, + POWER_DOMAIN_AUX_USBC1, + POWER_DOMAIN_AUX_USBC2, + POWER_DOMAIN_AUX_USBC3, + POWER_DOMAIN_AUX_USBC4, + POWER_DOMAIN_AUX_USBC5, + POWER_DOMAIN_AUX_USBC6, + POWER_DOMAIN_AUX_TBT1, + POWER_DOMAIN_AUX_TBT2, + POWER_DOMAIN_AUX_TBT3, + POWER_DOMAIN_AUX_TBT4, + POWER_DOMAIN_AUX_TBT5, + POWER_DOMAIN_AUX_TBT6, + POWER_DOMAIN_TC_COLD_OFF); + +static const struct i915_power_well_desc tgl_power_wells_main[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &tgl_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), + .ops = &gen9_dc_off_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_2", &tgl_pwdoms_pw_2, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .id = SKL_DISP_PW_2), + ), + .ops = &hsw_power_well_ops, + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_3", &tgl_pwdoms_pw_3, + .hsw.idx = ICL_PW_CTL_IDX_PW_3, + .id = ICL_DISP_PW_3), + ), + .ops = &hsw_power_well_ops, + .has_vga = true, + .irq_pipe_mask = BIT(PIPE_B), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), + I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), + I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C), + I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1), + I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2), + I915_PW("DDI_IO_TC3", &tgl_pwdoms_ddi_io_tc3, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3), + I915_PW("DDI_IO_TC4", &tgl_pwdoms_ddi_io_tc4, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4), + I915_PW("DDI_IO_TC5", &tgl_pwdoms_ddi_io_tc5, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5), + I915_PW("DDI_IO_TC6", &tgl_pwdoms_ddi_io_tc6, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6), + ), + .ops = &icl_ddi_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_4", &tgl_pwdoms_pw_4, + .hsw.idx = ICL_PW_CTL_IDX_PW_4), + ), + .ops = &hsw_power_well_ops, + .has_fuses = true, + .irq_pipe_mask = BIT(PIPE_C), + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_5", &tgl_pwdoms_pw_5, + .hsw.idx = TGL_PW_CTL_IDX_PW_5), + ), + .ops = &hsw_power_well_ops, + .has_fuses = true, + .irq_pipe_mask = BIT(PIPE_D), + }, +}; + +static const struct i915_power_well_desc tgl_power_wells_tc_cold_off[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("TC_cold_off", &tgl_pwdoms_tc_cold_off, + .id = TGL_DISP_PW_TC_COLD_OFF), + ), + .ops = &tgl_tc_cold_off_ops, + }, +}; + +static const struct i915_power_well_desc tgl_power_wells_aux[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A), + I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B), + I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C), + I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1), + I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2), + I915_PW("AUX_USBC3", &tgl_pwdoms_aux_usbc3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3), + I915_PW("AUX_USBC4", &tgl_pwdoms_aux_usbc4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4), + I915_PW("AUX_USBC5", &tgl_pwdoms_aux_usbc5, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5), + I915_PW("AUX_USBC6", &tgl_pwdoms_aux_usbc6, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6), + ), + .ops = &icl_aux_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1), + I915_PW("AUX_TBT2", &icl_pwdoms_aux_tbt2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2), + I915_PW("AUX_TBT3", &icl_pwdoms_aux_tbt3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3), + I915_PW("AUX_TBT4", &icl_pwdoms_aux_tbt4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4), + I915_PW("AUX_TBT5", &tgl_pwdoms_aux_tbt5, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5), + I915_PW("AUX_TBT6", &tgl_pwdoms_aux_tbt6, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6), + ), + .ops = &icl_aux_power_well_ops, + .is_tc_tbt = true, + }, +}; + +static const struct i915_power_well_desc_list tgl_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(tgl_power_wells_main), + I915_PW_DESCRIPTORS(tgl_power_wells_tc_cold_off), + I915_PW_DESCRIPTORS(tgl_power_wells_aux), +}; + +static const struct i915_power_well_desc_list adls_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(tgl_power_wells_main), + I915_PW_DESCRIPTORS(tgl_power_wells_aux), +}; + +#define RKL_PW_4_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_C, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_C, \ + POWER_DOMAIN_TRANSCODER_C + +I915_DECL_PW_DOMAINS(rkl_pwdoms_pw_4, + RKL_PW_4_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +#define RKL_PW_3_POWER_DOMAINS \ + RKL_PW_4_POWER_DOMAINS, \ + POWER_DOMAIN_PIPE_B, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ + POWER_DOMAIN_TRANSCODER_B, \ + POWER_DOMAIN_PORT_DDI_LANES_TC1, \ + POWER_DOMAIN_PORT_DDI_LANES_TC2, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_AUDIO_MMIO, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_AUX_USBC1, \ + POWER_DOMAIN_AUX_USBC2 + +I915_DECL_PW_DOMAINS(rkl_pwdoms_pw_3, + RKL_PW_3_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +/* + * There is no PW_2/PG_2 on RKL. + * + * RKL PW_1/PG_1 domains (under HW/DMC control): + * - DBUF function (note: registers are in PW0) + * - PIPE_A and its planes and VDSC/joining, except VGA + * - transcoder A + * - DDI_A and DDI_B + * - FBC + * + * RKL PW_0/PG_0 domains (under HW/DMC control): + * - PCI + * - clocks except port PLL + * - shared functions: + * * interrupts except pipe interrupts + * * MBus except PIPE_MBUS_DBOX_CTL + * * DBUF registers + * - central power except FBC + * - top-level GTC (DDI-level GTC is in the well associated with the DDI) + */ + +I915_DECL_PW_DOMAINS(rkl_pwdoms_dc_off, + RKL_PW_3_POWER_DOMAINS, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_INIT); + +static const struct i915_power_well_desc rkl_power_wells_main[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &rkl_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), + .ops = &gen9_dc_off_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_3", &rkl_pwdoms_pw_3, + .hsw.idx = ICL_PW_CTL_IDX_PW_3, + .id = ICL_DISP_PW_3), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_B), + .has_vga = true, + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_4", &rkl_pwdoms_pw_4, + .hsw.idx = ICL_PW_CTL_IDX_PW_4), + ), + .ops = &hsw_power_well_ops, + .has_fuses = true, + .irq_pipe_mask = BIT(PIPE_C), + }, +}; + +static const struct i915_power_well_desc rkl_power_wells_ddi_aux[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), + I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), + I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1), + I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2), + ), + .ops = &icl_ddi_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A), + I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B), + I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1), + I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2), + ), + .ops = &icl_aux_power_well_ops, + }, +}; + +static const struct i915_power_well_desc_list rkl_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(rkl_power_wells_main), + I915_PW_DESCRIPTORS(rkl_power_wells_ddi_aux), +}; + +/* + * DG1 onwards Audio MMIO/VERBS lies in PG0 power well. + */ +#define DG1_PW_3_POWER_DOMAINS \ + TGL_PW_4_POWER_DOMAINS, \ + POWER_DOMAIN_PIPE_B, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ + POWER_DOMAIN_TRANSCODER_B, \ + POWER_DOMAIN_PORT_DDI_LANES_TC1, \ + POWER_DOMAIN_PORT_DDI_LANES_TC2, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_AUX_USBC1, \ + POWER_DOMAIN_AUX_USBC2 + +I915_DECL_PW_DOMAINS(dg1_pwdoms_pw_3, + DG1_PW_3_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(dg1_pwdoms_dc_off, + DG1_PW_3_POWER_DOMAINS, + POWER_DOMAIN_AUDIO_MMIO, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(dg1_pwdoms_pw_2, + DG1_PW_3_POWER_DOMAINS, + POWER_DOMAIN_TRANSCODER_VDSC_PW2, + POWER_DOMAIN_INIT); + +static const struct i915_power_well_desc dg1_power_wells_main[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &dg1_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), + .ops = &gen9_dc_off_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_2", &dg1_pwdoms_pw_2, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .id = SKL_DISP_PW_2), + ), + .ops = &hsw_power_well_ops, + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_3", &dg1_pwdoms_pw_3, + .hsw.idx = ICL_PW_CTL_IDX_PW_3, + .id = ICL_DISP_PW_3), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_B), + .has_vga = true, + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_4", &tgl_pwdoms_pw_4, + .hsw.idx = ICL_PW_CTL_IDX_PW_4), + ), + .ops = &hsw_power_well_ops, + .has_fuses = true, + .irq_pipe_mask = BIT(PIPE_C), + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_5", &tgl_pwdoms_pw_5, + .hsw.idx = TGL_PW_CTL_IDX_PW_5), + ), + .ops = &hsw_power_well_ops, + .has_fuses = true, + .irq_pipe_mask = BIT(PIPE_D), + }, +}; + +static const struct i915_power_well_desc_list dg1_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(dg1_power_wells_main), + I915_PW_DESCRIPTORS(rkl_power_wells_ddi_aux), +}; + +/* + * XE_LPD Power Domains + * + * Previous platforms required that PG(n-1) be enabled before PG(n). That + * dependency chain turns into a dependency tree on XE_LPD: + * + * PG0 + * | + * --PG1-- + * / \ + * PGA --PG2-- + * / | \ + * PGB PGC PGD + * + * Power wells must be enabled from top to bottom and disabled from bottom + * to top. This allows pipes to be power gated independently. + */ + +#define XELPD_PW_D_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_D, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_D, \ + POWER_DOMAIN_TRANSCODER_D + +I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_d, + XELPD_PW_D_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +#define XELPD_PW_C_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_C, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_C, \ + POWER_DOMAIN_TRANSCODER_C + +I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_c, + XELPD_PW_C_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +#define XELPD_PW_B_POWER_DOMAINS \ + POWER_DOMAIN_PIPE_B, \ + POWER_DOMAIN_PIPE_PANEL_FITTER_B, \ + POWER_DOMAIN_TRANSCODER_B + +I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_b, + XELPD_PW_B_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_a, + POWER_DOMAIN_PIPE_A, + POWER_DOMAIN_PIPE_PANEL_FITTER_A, + POWER_DOMAIN_INIT); + +#define XELPD_PW_2_POWER_DOMAINS \ + XELPD_PW_B_POWER_DOMAINS, \ + XELPD_PW_C_POWER_DOMAINS, \ + XELPD_PW_D_POWER_DOMAINS, \ + POWER_DOMAIN_PORT_DDI_LANES_C, \ + POWER_DOMAIN_PORT_DDI_LANES_D, \ + POWER_DOMAIN_PORT_DDI_LANES_E, \ + POWER_DOMAIN_PORT_DDI_LANES_TC1, \ + POWER_DOMAIN_PORT_DDI_LANES_TC2, \ + POWER_DOMAIN_PORT_DDI_LANES_TC3, \ + POWER_DOMAIN_PORT_DDI_LANES_TC4, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_AUX_C, \ + POWER_DOMAIN_AUX_D, \ + POWER_DOMAIN_AUX_E, \ + POWER_DOMAIN_AUX_USBC1, \ + POWER_DOMAIN_AUX_USBC2, \ + POWER_DOMAIN_AUX_USBC3, \ + POWER_DOMAIN_AUX_USBC4, \ + POWER_DOMAIN_AUX_TBT1, \ + POWER_DOMAIN_AUX_TBT2, \ + POWER_DOMAIN_AUX_TBT3, \ + POWER_DOMAIN_AUX_TBT4 + +I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_2, + XELPD_PW_2_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +/* + * XELPD PW_1/PG_1 domains (under HW/DMC control): + * - DBUF function (registers are in PW0) + * - Transcoder A + * - DDI_A and DDI_B + * + * XELPD PW_0/PW_1 domains (under HW/DMC control): + * - PCI + * - Clocks except port PLL + * - Shared functions: + * * interrupts except pipe interrupts + * * MBus except PIPE_MBUS_DBOX_CTL + * * DBUF registers + * - Central power except FBC + * - Top-level GTC (DDI-level GTC is in the well associated with the DDI) + */ + +I915_DECL_PW_DOMAINS(xelpd_pwdoms_dc_off, + XELPD_PW_2_POWER_DOMAINS, + POWER_DOMAIN_PORT_DSI, + POWER_DOMAIN_AUDIO_MMIO, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_INIT); + +static const struct i915_power_well_desc xelpd_power_wells_main[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &xelpd_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), + .ops = &gen9_dc_off_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_2", &xelpd_pwdoms_pw_2, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .id = SKL_DISP_PW_2), + ), + .ops = &hsw_power_well_ops, + .has_vga = true, + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_A", &xelpd_pwdoms_pw_a, + .hsw.idx = XELPD_PW_CTL_IDX_PW_A), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_A), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_B", &xelpd_pwdoms_pw_b, + .hsw.idx = XELPD_PW_CTL_IDX_PW_B), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_B), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_C", &xelpd_pwdoms_pw_c, + .hsw.idx = XELPD_PW_CTL_IDX_PW_C), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_C), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_D", &xelpd_pwdoms_pw_d, + .hsw.idx = XELPD_PW_CTL_IDX_PW_D), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_D), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A), + I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B), + I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C), + I915_PW("DDI_IO_D", &icl_pwdoms_ddi_io_d, .hsw.idx = XELPD_PW_CTL_IDX_DDI_D), + I915_PW("DDI_IO_E", &icl_pwdoms_ddi_io_e, .hsw.idx = XELPD_PW_CTL_IDX_DDI_E), + I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1), + I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2), + I915_PW("DDI_IO_TC3", &tgl_pwdoms_ddi_io_tc3, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3), + I915_PW("DDI_IO_TC4", &tgl_pwdoms_ddi_io_tc4, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4), + ), + .ops = &icl_ddi_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A), + I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B), + I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C), + I915_PW("AUX_D", &icl_pwdoms_aux_d, .hsw.idx = XELPD_PW_CTL_IDX_AUX_D), + I915_PW("AUX_E", &icl_pwdoms_aux_e, .hsw.idx = XELPD_PW_CTL_IDX_AUX_E), + I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1), + I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2), + I915_PW("AUX_USBC3", &tgl_pwdoms_aux_usbc3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3), + I915_PW("AUX_USBC4", &tgl_pwdoms_aux_usbc4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4), + ), + .ops = &icl_aux_power_well_ops, + .fixed_enable_delay = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1), + I915_PW("AUX_TBT2", &icl_pwdoms_aux_tbt2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2), + I915_PW("AUX_TBT3", &icl_pwdoms_aux_tbt3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3), + I915_PW("AUX_TBT4", &icl_pwdoms_aux_tbt4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4), + ), + .ops = &icl_aux_power_well_ops, + .is_tc_tbt = true, + }, +}; + +static const struct i915_power_well_desc_list xelpd_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(xelpd_power_wells_main), +}; + +static void init_power_well_domains(const struct i915_power_well_instance *inst, + struct i915_power_well *power_well) +{ + int j; + + if (!inst->domain_list) + return; + + if (inst->domain_list->count == 0) { + bitmap_fill(power_well->domains.bits, POWER_DOMAIN_NUM); + + return; + } + + for (j = 0; j < inst->domain_list->count; j++) + set_bit(inst->domain_list->list[j], power_well->domains.bits); +} + +#define for_each_power_well_instance_in_desc_list(_desc_list, _desc_count, _desc, _inst) \ + for ((_desc) = (_desc_list); (_desc) - (_desc_list) < (_desc_count); (_desc)++) \ + for ((_inst) = (_desc)->instances->list; \ + (_inst) - (_desc)->instances->list < (_desc)->instances->count; \ + (_inst)++) + +#define for_each_power_well_instance(_desc_list, _desc_count, _descs, _desc, _inst) \ + for ((_descs) = (_desc_list); \ + (_descs) - (_desc_list) < (_desc_count); \ + (_descs)++) \ + for_each_power_well_instance_in_desc_list((_descs)->list, (_descs)->count, \ + (_desc), (_inst)) + +static int +__set_power_wells(struct i915_power_domains *power_domains, + const struct i915_power_well_desc_list *power_well_descs, + int power_well_descs_sz) +{ + struct drm_i915_private *i915 = container_of(power_domains, + struct drm_i915_private, + power_domains); + u64 power_well_ids = 0; + const struct i915_power_well_desc_list *desc_list; + const struct i915_power_well_desc *desc; + const struct i915_power_well_instance *inst; + int power_well_count = 0; + int plt_idx = 0; + + for_each_power_well_instance(power_well_descs, power_well_descs_sz, desc_list, desc, inst) + power_well_count++; + + power_domains->power_well_count = power_well_count; + power_domains->power_wells = + kcalloc(power_well_count, + sizeof(*power_domains->power_wells), + GFP_KERNEL); + if (!power_domains->power_wells) + return -ENOMEM; + + for_each_power_well_instance(power_well_descs, power_well_descs_sz, desc_list, desc, inst) { + struct i915_power_well *pw = &power_domains->power_wells[plt_idx]; + enum i915_power_well_id id = inst->id; + + pw->desc = desc; + drm_WARN_ON(&i915->drm, + overflows_type(inst - desc->instances->list, pw->instance_idx)); + pw->instance_idx = inst - desc->instances->list; + + init_power_well_domains(inst, pw); + + plt_idx++; + + if (id == DISP_PW_ID_NONE) + continue; + + drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8); + drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id)); + power_well_ids |= BIT_ULL(id); + } + + return 0; +} + +#define set_power_wells(power_domains, __power_well_descs) \ + __set_power_wells(power_domains, __power_well_descs, \ + ARRAY_SIZE(__power_well_descs)) + +/** + * intel_display_power_map_init - initialize power domain -> power well mappings + * @power_domains: power domain state + * + * Creates all the power wells for the current platform, initializes the + * dynamic state for them and initializes the mapping of each power well to + * all the power domains the power well belongs to. + */ +int intel_display_power_map_init(struct i915_power_domains *power_domains) +{ + struct drm_i915_private *i915 = container_of(power_domains, + struct drm_i915_private, + power_domains); + /* + * The enabling order will be from lower to higher indexed wells, + * the disabling order is reversed. + */ + if (!HAS_DISPLAY(i915)) { + power_domains->power_well_count = 0; + return 0; + } + + if (DISPLAY_VER(i915) >= 13) + return set_power_wells(power_domains, xelpd_power_wells); + else if (IS_DG1(i915)) + return set_power_wells(power_domains, dg1_power_wells); + else if (IS_ALDERLAKE_S(i915)) + return set_power_wells(power_domains, adls_power_wells); + else if (IS_ROCKETLAKE(i915)) + return set_power_wells(power_domains, rkl_power_wells); + else if (DISPLAY_VER(i915) == 12) + return set_power_wells(power_domains, tgl_power_wells); + else if (DISPLAY_VER(i915) == 11) + return set_power_wells(power_domains, icl_power_wells); + else if (IS_GEMINILAKE(i915)) + return set_power_wells(power_domains, glk_power_wells); + else if (IS_BROXTON(i915)) + return set_power_wells(power_domains, bxt_power_wells); + else if (DISPLAY_VER(i915) == 9) + return set_power_wells(power_domains, skl_power_wells); + else if (IS_CHERRYVIEW(i915)) + return set_power_wells(power_domains, chv_power_wells); + else if (IS_BROADWELL(i915)) + return set_power_wells(power_domains, bdw_power_wells); + else if (IS_HASWELL(i915)) + return set_power_wells(power_domains, hsw_power_wells); + else if (IS_VALLEYVIEW(i915)) + return set_power_wells(power_domains, vlv_power_wells); + else if (IS_I830(i915)) + return set_power_wells(power_domains, i830_power_wells); + else + return set_power_wells(power_domains, i9xx_power_wells); +} + +/** + * intel_display_power_map_cleanup - clean up power domain -> power well mappings + * @power_domains: power domain state + * + * Cleans up all the state that was initialized by intel_display_power_map_init(). + */ +void intel_display_power_map_cleanup(struct i915_power_domains *power_domains) +{ + kfree(power_domains->power_wells); +} diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.h b/drivers/gpu/drm/i915/display/intel_display_power_map.h new file mode 100644 index 000000000000..da8f7055a44c --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_DISPLAY_POWER_MAP_H__ +#define __INTEL_DISPLAY_POWER_MAP_H__ + +struct i915_power_domains; + +int intel_display_power_map_init(struct i915_power_domains *power_domains); +void intel_display_power_map_cleanup(struct i915_power_domains *power_domains); + +#endif diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 2a0fb9d9c60f..5be18eb94042 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -4,7 +4,65 @@ */ #include "i915_drv.h" +#include "i915_irq.h" +#include "intel_combo_phy.h" +#include "intel_combo_phy_regs.h" +#include "intel_crt.h" +#include "intel_de.h" #include "intel_display_power_well.h" +#include "intel_display_types.h" +#include "intel_dmc.h" +#include "intel_dpio_phy.h" +#include "intel_dpll.h" +#include "intel_hotplug.h" +#include "intel_pcode.h" +#include "intel_pm.h" +#include "intel_pps.h" +#include "intel_tc.h" +#include "intel_vga.h" +#include "vlv_sideband.h" +#include "vlv_sideband_reg.h" + +struct i915_power_well_regs { + i915_reg_t bios; + i915_reg_t driver; + i915_reg_t kvmr; + i915_reg_t debug; +}; + +struct i915_power_well_ops { + const struct i915_power_well_regs *regs; + /* + * Synchronize the well's hw state to match the current sw state, for + * example enable/disable it based on the current refcount. Called + * during driver init and resume time, possibly after first calling + * the enable/disable handlers. + */ + void (*sync_hw)(struct drm_i915_private *i915, + struct i915_power_well *power_well); + /* + * Enable the well and resources that depend on it (for example + * interrupts located on the well). Called after the 0->1 refcount + * transition. + */ + void (*enable)(struct drm_i915_private *i915, + struct i915_power_well *power_well); + /* + * Disable the well and resources that depend on it. Called after + * the 1->0 refcount transition. + */ + void (*disable)(struct drm_i915_private *i915, + struct i915_power_well *power_well); + /* Returns the hw enabled state. */ + bool (*is_enabled)(struct drm_i915_private *i915, + struct i915_power_well *power_well); +}; + +static const struct i915_power_well_instance * +i915_power_well_instance(const struct i915_power_well *power_well) +{ + return &power_well->desc->instances->list[power_well->instance_idx]; +} struct i915_power_well * lookup_power_well(struct drm_i915_private *i915, @@ -13,7 +71,7 @@ lookup_power_well(struct drm_i915_private *i915, struct i915_power_well *power_well; for_each_power_well(i915, power_well) - if (power_well->desc->id == power_well_id) + if (i915_power_well_instance(power_well)->id == power_well_id) return power_well; /* @@ -32,7 +90,7 @@ lookup_power_well(struct drm_i915_private *i915, void intel_power_well_enable(struct drm_i915_private *i915, struct i915_power_well *power_well) { - drm_dbg_kms(&i915->drm, "enabling %s\n", power_well->desc->name); + drm_dbg_kms(&i915->drm, "enabling %s\n", intel_power_well_name(power_well)); power_well->desc->ops->enable(i915, power_well); power_well->hw_enabled = true; } @@ -40,7 +98,7 @@ void intel_power_well_enable(struct drm_i915_private *i915, void intel_power_well_disable(struct drm_i915_private *i915, struct i915_power_well *power_well) { - drm_dbg_kms(&i915->drm, "disabling %s\n", power_well->desc->name); + drm_dbg_kms(&i915->drm, "disabling %s\n", intel_power_well_name(power_well)); power_well->hw_enabled = false; power_well->desc->ops->disable(i915, power_well); } @@ -65,7 +123,7 @@ void intel_power_well_put(struct drm_i915_private *i915, { drm_WARN(&i915->drm, !power_well->count, "Use count on power well %s is already zero", - power_well->desc->name); + i915_power_well_instance(power_well)->name); if (!--power_well->count) intel_power_well_disable(i915, power_well); @@ -99,15 +157,1756 @@ bool intel_power_well_is_always_on(struct i915_power_well *power_well) const char *intel_power_well_name(struct i915_power_well *power_well) { - return power_well->desc->name; + return i915_power_well_instance(power_well)->name; } -u64 intel_power_well_domains(struct i915_power_well *power_well) +struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well) { - return power_well->desc->domains; + return &power_well->domains; } int intel_power_well_refcount(struct i915_power_well *power_well) { return power_well->count; } + +/* + * Starting with Haswell, we have a "Power Down Well" that can be turned off + * when not needed anymore. We have 4 registers that can request the power well + * to be enabled, and it will only be disabled if none of the registers is + * requesting it to be enabled. + */ +static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, + u8 irq_pipe_mask, bool has_vga) +{ + if (has_vga) + intel_vga_reset_io_mem(dev_priv); + + if (irq_pipe_mask) + gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); +} + +static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, + u8 irq_pipe_mask) +{ + if (irq_pipe_mask) + gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); +} + +#define ICL_AUX_PW_TO_CH(pw_idx) \ + ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) + +#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ + ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) + +static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well) +{ + int pw_idx = i915_power_well_instance(power_well)->hsw.idx; + + return power_well->desc->is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : + ICL_AUX_PW_TO_CH(pw_idx); +} + +static struct intel_digital_port * +aux_ch_to_digital_port(struct drm_i915_private *dev_priv, + enum aux_ch aux_ch) +{ + struct intel_digital_port *dig_port = NULL; + struct intel_encoder *encoder; + + for_each_intel_encoder(&dev_priv->drm, encoder) { + /* We'll check the MST primary port */ + if (encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + dig_port = enc_to_dig_port(encoder); + if (!dig_port) + continue; + + if (dig_port->aux_ch != aux_ch) { + dig_port = NULL; + continue; + } + + break; + } + + return dig_port; +} + +static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915, + const struct i915_power_well *power_well) +{ + enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); + struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch); + + return intel_port_to_phy(i915, dig_port->base.port); +} + +static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well, + bool timeout_expected) +{ + const struct i915_power_well_regs *regs = power_well->desc->ops->regs; + int pw_idx = i915_power_well_instance(power_well)->hsw.idx; + + /* + * For some power wells we're not supposed to watch the status bit for + * an ack, but rather just wait a fixed amount of time and then + * proceed. This is only used on DG2. + */ + if (IS_DG2(dev_priv) && power_well->desc->fixed_enable_delay) { + usleep_range(600, 1200); + return; + } + + /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ + if (intel_de_wait_for_set(dev_priv, regs->driver, + HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { + drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", + intel_power_well_name(power_well)); + + drm_WARN_ON(&dev_priv->drm, !timeout_expected); + + } +} + +static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, + const struct i915_power_well_regs *regs, + int pw_idx) +{ + u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); + u32 ret; + + ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0; + ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0; + if (regs->kvmr.reg) + ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0; + ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0; + + return ret; +} + +static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->ops->regs; + int pw_idx = i915_power_well_instance(power_well)->hsw.idx; + bool disabled; + u32 reqs; + + /* + * Bspec doesn't require waiting for PWs to get disabled, but still do + * this for paranoia. The known cases where a PW will be forced on: + * - a KVMR request on any power well via the KVMR request register + * - a DMC request on PW1 and MISC_IO power wells via the BIOS and + * DEBUG request registers + * Skip the wait in case any of the request bits are set and print a + * diagnostic message. + */ + wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) & + HSW_PWR_WELL_CTL_STATE(pw_idx))) || + (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); + if (disabled) + return; + + drm_dbg_kms(&dev_priv->drm, + "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", + intel_power_well_name(power_well), + !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); +} + +static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, + enum skl_power_gate pg) +{ + /* Timeout 5us for PG#0, for other PGs 1us */ + drm_WARN_ON(&dev_priv->drm, + intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, + SKL_FUSE_PG_DIST_STATUS(pg), 1)); +} + +static void hsw_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->ops->regs; + int pw_idx = i915_power_well_instance(power_well)->hsw.idx; + u32 val; + + if (power_well->desc->has_fuses) { + enum skl_power_gate pg; + + pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : + SKL_PW_CTL_IDX_TO_PG(pw_idx); + + /* Wa_16013190616:adlp */ + if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1) + intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC); + + /* + * For PW1 we have to wait both for the PW0/PG0 fuse state + * before enabling the power well and PW1/PG1's own fuse + * state after the enabling. For all other power wells with + * fuses we only have to wait for that PW/PG's fuse state + * after the enabling. + */ + if (pg == SKL_PG1) + gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); + } + + val = intel_de_read(dev_priv, regs->driver); + intel_de_write(dev_priv, regs->driver, + val | HSW_PWR_WELL_CTL_REQ(pw_idx)); + + hsw_wait_for_power_well_enable(dev_priv, power_well, false); + + if (power_well->desc->has_fuses) { + enum skl_power_gate pg; + + pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : + SKL_PW_CTL_IDX_TO_PG(pw_idx); + gen9_wait_for_power_well_fuses(dev_priv, pg); + } + + hsw_power_well_post_enable(dev_priv, + power_well->desc->irq_pipe_mask, + power_well->desc->has_vga); +} + +static void hsw_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->ops->regs; + int pw_idx = i915_power_well_instance(power_well)->hsw.idx; + u32 val; + + hsw_power_well_pre_disable(dev_priv, + power_well->desc->irq_pipe_mask); + + val = intel_de_read(dev_priv, regs->driver); + intel_de_write(dev_priv, regs->driver, + val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); + hsw_wait_for_power_well_disable(dev_priv, power_well); +} + +static void +icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->ops->regs; + int pw_idx = i915_power_well_instance(power_well)->hsw.idx; + enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); + u32 val; + + drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); + + val = intel_de_read(dev_priv, regs->driver); + intel_de_write(dev_priv, regs->driver, + val | HSW_PWR_WELL_CTL_REQ(pw_idx)); + + if (DISPLAY_VER(dev_priv) < 12) { + val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); + intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), + val | ICL_LANE_ENABLE_AUX); + } + + hsw_wait_for_power_well_enable(dev_priv, power_well, false); + + /* Display WA #1178: icl */ + if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && + !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { + val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx)); + val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; + intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val); + } +} + +static void +icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->ops->regs; + int pw_idx = i915_power_well_instance(power_well)->hsw.idx; + enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); + u32 val; + + drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); + + val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); + intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), + val & ~ICL_LANE_ENABLE_AUX); + + val = intel_de_read(dev_priv, regs->driver); + intel_de_write(dev_priv, regs->driver, + val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); + + hsw_wait_for_power_well_disable(dev_priv, power_well); +} + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + +static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well, + struct intel_digital_port *dig_port) +{ + if (drm_WARN_ON(&dev_priv->drm, !dig_port)) + return; + + if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) + return; + + drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port)); +} + +#else + +static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well, + struct intel_digital_port *dig_port) +{ +} + +#endif + +#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) + +static void icl_tc_cold_exit(struct drm_i915_private *i915) +{ + int ret, tries = 0; + + while (1) { + ret = snb_pcode_write_timeout(i915, ICL_PCODE_EXIT_TCCOLD, 0, + 250, 1); + if (ret != -EAGAIN || ++tries == 3) + break; + msleep(1); + } + + /* Spec states that TC cold exit can take up to 1ms to complete */ + if (!ret) + msleep(1); + + /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ + drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : + "succeeded"); +} + +static void +icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); + struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); + const struct i915_power_well_regs *regs = power_well->desc->ops->regs; + bool is_tbt = power_well->desc->is_tc_tbt; + bool timeout_expected; + u32 val; + + icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); + + val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)); + val &= ~DP_AUX_CH_CTL_TBT_IO; + if (is_tbt) + val |= DP_AUX_CH_CTL_TBT_IO; + intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val); + + val = intel_de_read(dev_priv, regs->driver); + intel_de_write(dev_priv, regs->driver, + val | HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx)); + + /* + * An AUX timeout is expected if the TBT DP tunnel is down, + * or need to enable AUX on a legacy TypeC port as part of the TC-cold + * exit sequence. + */ + timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port); + if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) + icl_tc_cold_exit(dev_priv); + + hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected); + + if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) { + enum tc_port tc_port; + + tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx); + intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), + HIP_INDEX_VAL(tc_port, 0x2)); + + if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), + DKL_CMN_UC_DW27_UC_HEALTH, 1)) + drm_warn(&dev_priv->drm, + "Timeout waiting TC uC health\n"); + } +} + +static void +icl_aux_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); + + if (intel_phy_is_tc(dev_priv, phy)) + return icl_tc_phy_aux_power_well_enable(dev_priv, power_well); + else if (IS_ICELAKE(dev_priv)) + return icl_combo_phy_aux_power_well_enable(dev_priv, + power_well); + else + return hsw_power_well_enable(dev_priv, power_well); +} + +static void +icl_aux_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); + + if (intel_phy_is_tc(dev_priv, phy)) + return hsw_power_well_disable(dev_priv, power_well); + else if (IS_ICELAKE(dev_priv)) + return icl_combo_phy_aux_power_well_disable(dev_priv, + power_well); + else + return hsw_power_well_disable(dev_priv, power_well); +} + +/* + * We should only use the power well if we explicitly asked the hardware to + * enable it, so check if it's enabled and also check if we've requested it to + * be enabled. + */ +static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->ops->regs; + enum i915_power_well_id id = i915_power_well_instance(power_well)->id; + int pw_idx = i915_power_well_instance(power_well)->hsw.idx; + u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | + HSW_PWR_WELL_CTL_STATE(pw_idx); + u32 val; + + val = intel_de_read(dev_priv, regs->driver); + + /* + * On GEN9 big core due to a DMC bug the driver's request bits for PW1 + * and the MISC_IO PW will be not restored, so check instead for the + * BIOS's own request bits, which are forced-on for these power wells + * when exiting DC5/6. + */ + if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) && + (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) + val |= intel_de_read(dev_priv, regs->bios); + + return (val & mask) == mask; +} + +static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) +{ + drm_WARN_ONCE(&dev_priv->drm, + (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9), + "DC9 already programmed to be enabled.\n"); + drm_WARN_ONCE(&dev_priv->drm, + intel_de_read(dev_priv, DC_STATE_EN) & + DC_STATE_EN_UPTO_DC5, + "DC5 still not disabled to enable DC9.\n"); + drm_WARN_ONCE(&dev_priv->drm, + intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) & + HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), + "Power well 2 on.\n"); + drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), + "Interrupts not disabled yet.\n"); + + /* + * TODO: check for the following to verify the conditions to enter DC9 + * state are satisfied: + * 1] Check relevant display engine registers to verify if mode set + * disable sequence was followed. + * 2] Check if display uninitialize sequence is initialized. + */ +} + +static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) +{ + drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), + "Interrupts not disabled yet.\n"); + drm_WARN_ONCE(&dev_priv->drm, + intel_de_read(dev_priv, DC_STATE_EN) & + DC_STATE_EN_UPTO_DC5, + "DC5 still not disabled.\n"); + + /* + * TODO: check for the following to verify DC9 state was indeed + * entered before programming to disable it: + * 1] Check relevant display engine registers to verify if mode + * set disable sequence was followed. + * 2] Check if display uninitialize sequence is initialized. + */ +} + +static void gen9_write_dc_state(struct drm_i915_private *dev_priv, + u32 state) +{ + int rewrites = 0; + int rereads = 0; + u32 v; + + intel_de_write(dev_priv, DC_STATE_EN, state); + + /* It has been observed that disabling the dc6 state sometimes + * doesn't stick and dmc keeps returning old value. Make sure + * the write really sticks enough times and also force rewrite until + * we are confident that state is exactly what we want. + */ + do { + v = intel_de_read(dev_priv, DC_STATE_EN); + + if (v != state) { + intel_de_write(dev_priv, DC_STATE_EN, state); + rewrites++; + rereads = 0; + } else if (rereads++ > 5) { + break; + } + + } while (rewrites < 100); + + if (v != state) + drm_err(&dev_priv->drm, + "Writing dc state to 0x%x failed, now 0x%x\n", + state, v); + + /* Most of the times we need one retry, avoid spam */ + if (rewrites > 1) + drm_dbg_kms(&dev_priv->drm, + "Rewrote dc state to 0x%x %d times\n", + state, rewrites); +} + +static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) +{ + u32 mask; + + mask = DC_STATE_EN_UPTO_DC5; + + if (DISPLAY_VER(dev_priv) >= 12) + mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 + | DC_STATE_EN_DC9; + else if (DISPLAY_VER(dev_priv) == 11) + mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; + else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + mask |= DC_STATE_EN_DC9; + else + mask |= DC_STATE_EN_UPTO_DC6; + + return mask; +} + +void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) +{ + u32 val; + + if (!HAS_DISPLAY(dev_priv)) + return; + + val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv); + + drm_dbg_kms(&dev_priv->drm, + "Resetting DC state tracking from %02x to %02x\n", + dev_priv->dmc.dc_state, val); + dev_priv->dmc.dc_state = val; +} + +/** + * gen9_set_dc_state - set target display C power state + * @dev_priv: i915 device instance + * @state: target DC power state + * - DC_STATE_DISABLE + * - DC_STATE_EN_UPTO_DC5 + * - DC_STATE_EN_UPTO_DC6 + * - DC_STATE_EN_DC9 + * + * Signal to DMC firmware/HW the target DC power state passed in @state. + * DMC/HW can turn off individual display clocks and power rails when entering + * a deeper DC power state (higher in number) and turns these back when exiting + * that state to a shallower power state (lower in number). The HW will decide + * when to actually enter a given state on an on-demand basis, for instance + * depending on the active state of display pipes. The state of display + * registers backed by affected power rails are saved/restored as needed. + * + * Based on the above enabling a deeper DC power state is asynchronous wrt. + * enabling it. Disabling a deeper power state is synchronous: for instance + * setting %DC_STATE_DISABLE won't complete until all HW resources are turned + * back on and register state is restored. This is guaranteed by the MMIO write + * to DC_STATE_EN blocking until the state is restored. + */ +void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) +{ + u32 val; + u32 mask; + + if (!HAS_DISPLAY(dev_priv)) + return; + + if (drm_WARN_ON_ONCE(&dev_priv->drm, + state & ~dev_priv->dmc.allowed_dc_mask)) + state &= dev_priv->dmc.allowed_dc_mask; + + val = intel_de_read(dev_priv, DC_STATE_EN); + mask = gen9_dc_mask(dev_priv); + drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n", + val & mask, state); + + /* Check if DMC is ignoring our DC state requests */ + if ((val & mask) != dev_priv->dmc.dc_state) + drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", + dev_priv->dmc.dc_state, val & mask); + + val &= ~mask; + val |= state; + + gen9_write_dc_state(dev_priv, val); + + dev_priv->dmc.dc_state = val & mask; +} + +static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) +{ + drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n"); + gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); +} + +static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) +{ + u32 val; + + drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); + val = intel_de_read(dev_priv, DC_STATE_EN); + val &= ~DC_STATE_DC3CO_STATUS; + intel_de_write(dev_priv, DC_STATE_EN, val); + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + /* + * Delay of 200us DC3CO Exit time B.Spec 49196 + */ + usleep_range(200, 210); +} + +static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) +{ + enum i915_power_well_id high_pg; + + /* Power wells at this level and above must be disabled for DC5 entry */ + if (DISPLAY_VER(dev_priv) == 12) + high_pg = ICL_DISP_PW_3; + else + high_pg = SKL_DISP_PW_2; + + drm_WARN_ONCE(&dev_priv->drm, + intel_display_power_well_is_enabled(dev_priv, high_pg), + "Power wells above platform's DC5 limit still enabled.\n"); + + drm_WARN_ONCE(&dev_priv->drm, + (intel_de_read(dev_priv, DC_STATE_EN) & + DC_STATE_EN_UPTO_DC5), + "DC5 already programmed to be enabled.\n"); + assert_rpm_wakelock_held(&dev_priv->runtime_pm); + + assert_dmc_loaded(dev_priv); +} + +void gen9_enable_dc5(struct drm_i915_private *dev_priv) +{ + assert_can_enable_dc5(dev_priv); + + drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n"); + + /* Wa Display #1183: skl,kbl,cfl */ + if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) + intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, + intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); + + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); +} + +static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) +{ + drm_WARN_ONCE(&dev_priv->drm, + intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, + "Backlight is not disabled.\n"); + drm_WARN_ONCE(&dev_priv->drm, + (intel_de_read(dev_priv, DC_STATE_EN) & + DC_STATE_EN_UPTO_DC6), + "DC6 already programmed to be enabled.\n"); + + assert_dmc_loaded(dev_priv); +} + +void skl_enable_dc6(struct drm_i915_private *dev_priv) +{ + assert_can_enable_dc6(dev_priv); + + drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n"); + + /* Wa Display #1183: skl,kbl,cfl */ + if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) + intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, + intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); + + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); +} + +void bxt_enable_dc9(struct drm_i915_private *dev_priv) +{ + assert_can_enable_dc9(dev_priv); + + drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); + /* + * Power sequencer reset is not needed on + * platforms with South Display Engine on PCH, + * because PPS registers are always on. + */ + if (!HAS_PCH_SPLIT(dev_priv)) + intel_pps_reset_all(dev_priv); + gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); +} + +void bxt_disable_dc9(struct drm_i915_private *dev_priv) +{ + assert_can_disable_dc9(dev_priv); + + drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); + + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + + intel_pps_unlock_regs_wa(dev_priv); +} + +static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->ops->regs; + int pw_idx = i915_power_well_instance(power_well)->hsw.idx; + u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); + u32 bios_req = intel_de_read(dev_priv, regs->bios); + + /* Take over the request bit if set by BIOS. */ + if (bios_req & mask) { + u32 drv_req = intel_de_read(dev_priv, regs->driver); + + if (!(drv_req & mask)) + intel_de_write(dev_priv, regs->driver, drv_req | mask); + intel_de_write(dev_priv, regs->bios, bios_req & ~mask); + } +} + +static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + bxt_ddi_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy); +} + +static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + bxt_ddi_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy); +} + +static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + return bxt_ddi_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy); +} + +static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) +{ + struct i915_power_well *power_well; + + power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); + if (intel_power_well_refcount(power_well) > 0) + bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy); + + power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); + if (intel_power_well_refcount(power_well) > 0) + bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy); + + if (IS_GEMINILAKE(dev_priv)) { + power_well = lookup_power_well(dev_priv, + GLK_DISP_PW_DPIO_CMN_C); + if (intel_power_well_refcount(power_well) > 0) + bxt_ddi_phy_verify_state(dev_priv, + i915_power_well_instance(power_well)->bxt.phy); + } +} + +static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && + (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); +} + +static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) +{ + u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); + u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices; + + drm_WARN(&dev_priv->drm, + hw_enabled_dbuf_slices != enabled_dbuf_slices, + "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n", + hw_enabled_dbuf_slices, + enabled_dbuf_slices); +} + +void gen9_disable_dc_states(struct drm_i915_private *dev_priv) +{ + struct intel_cdclk_config cdclk_config = {}; + + if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) { + tgl_disable_dc3co(dev_priv); + return; + } + + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + + if (!HAS_DISPLAY(dev_priv)) + return; + + intel_cdclk_get_cdclk(dev_priv, &cdclk_config); + /* Can't read out voltage_level so can't use intel_cdclk_changed() */ + drm_WARN_ON(&dev_priv->drm, + intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, + &cdclk_config)); + + gen9_assert_dbuf_enabled(dev_priv); + + if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + bxt_verify_ddi_phy_power_wells(dev_priv); + + if (DISPLAY_VER(dev_priv) >= 11) + /* + * DMC retains HW context only for port A, the other combo + * PHY's HW context for port B is lost after DC transitions, + * so we need to restore it manually. + */ + intel_combo_phy_init(dev_priv); +} + +static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + gen9_disable_dc_states(dev_priv); +} + +static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + if (!intel_dmc_has_payload(dev_priv)) + return; + + switch (dev_priv->dmc.target_dc_state) { + case DC_STATE_EN_DC3CO: + tgl_enable_dc3co(dev_priv); + break; + case DC_STATE_EN_UPTO_DC6: + skl_enable_dc6(dev_priv); + break; + case DC_STATE_EN_UPTO_DC5: + gen9_enable_dc5(dev_priv); + break; + } +} + +static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ +} + +static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ +} + +static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + return true; +} + +static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) + i830_enable_pipe(dev_priv, PIPE_A); + if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) + i830_enable_pipe(dev_priv, PIPE_B); +} + +static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + i830_disable_pipe(dev_priv, PIPE_B); + i830_disable_pipe(dev_priv, PIPE_A); +} + +static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE && + intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; +} + +static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + if (intel_power_well_refcount(power_well) > 0) + i830_pipes_power_well_enable(dev_priv, power_well); + else + i830_pipes_power_well_disable(dev_priv, power_well); +} + +static void vlv_set_power_well(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well, bool enable) +{ + int pw_idx = i915_power_well_instance(power_well)->vlv.idx; + u32 mask; + u32 state; + u32 ctrl; + + mask = PUNIT_PWRGT_MASK(pw_idx); + state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : + PUNIT_PWRGT_PWR_GATE(pw_idx); + + vlv_punit_get(dev_priv); + +#define COND \ + ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) + + if (COND) + goto out; + + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); + ctrl &= ~mask; + ctrl |= state; + vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); + + if (wait_for(COND, 100)) + drm_err(&dev_priv->drm, + "timeout setting power well state %08x (%08x)\n", + state, + vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); + +#undef COND + +out: + vlv_punit_put(dev_priv); +} + +static void vlv_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_set_power_well(dev_priv, power_well, true); +} + +static void vlv_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_set_power_well(dev_priv, power_well, false); +} + +static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + int pw_idx = i915_power_well_instance(power_well)->vlv.idx; + bool enabled = false; + u32 mask; + u32 state; + u32 ctrl; + + mask = PUNIT_PWRGT_MASK(pw_idx); + ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); + + vlv_punit_get(dev_priv); + + state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; + /* + * We only ever set the power-on and power-gate states, anything + * else is unexpected. + */ + drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) && + state != PUNIT_PWRGT_PWR_GATE(pw_idx)); + if (state == ctrl) + enabled = true; + + /* + * A transient state at this point would mean some unexpected party + * is poking at the power controls too. + */ + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; + drm_WARN_ON(&dev_priv->drm, ctrl != state); + + vlv_punit_put(dev_priv); + + return enabled; +} + +static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) +{ + u32 val; + + /* + * On driver load, a pipe may be active and driving a DSI display. + * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck + * (and never recovering) in this case. intel_dsi_post_disable() will + * clear it when we turn off the display. + */ + val = intel_de_read(dev_priv, DSPCLK_GATE_D); + val &= DPOUNIT_CLOCK_GATE_DISABLE; + val |= VRHUNIT_CLOCK_GATE_DISABLE; + intel_de_write(dev_priv, DSPCLK_GATE_D, val); + + /* + * Disable trickle feed and enable pnd deadline calculation + */ + intel_de_write(dev_priv, MI_ARB_VLV, + MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); + intel_de_write(dev_priv, CBR1_VLV, 0); + + drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0); + intel_de_write(dev_priv, RAWCLK_FREQ_VLV, + DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, + 1000)); +} + +static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) +{ + struct intel_encoder *encoder; + enum pipe pipe; + + /* + * Enable the CRI clock source so we can get at the + * display and the reference clock for VGA + * hotplug / manual detection. Supposedly DSI also + * needs the ref clock up and running. + * + * CHV DPLL B/C have some issues if VGA mode is enabled. + */ + for_each_pipe(dev_priv, pipe) { + u32 val = intel_de_read(dev_priv, DPLL(pipe)); + + val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; + if (pipe != PIPE_A) + val |= DPLL_INTEGRATED_CRI_CLK_VLV; + + intel_de_write(dev_priv, DPLL(pipe), val); + } + + vlv_init_display_clock_gating(dev_priv); + + spin_lock_irq(&dev_priv->irq_lock); + valleyview_enable_display_irqs(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); + + /* + * During driver initialization/resume we can avoid restoring the + * part of the HW/SW state that will be inited anyway explicitly. + */ + if (dev_priv->power_domains.initializing) + return; + + intel_hpd_init(dev_priv); + intel_hpd_poll_disable(dev_priv); + + /* Re-enable the ADPA, if we have one */ + for_each_intel_encoder(&dev_priv->drm, encoder) { + if (encoder->type == INTEL_OUTPUT_ANALOG) + intel_crt_reset(&encoder->base); + } + + intel_vga_redisable_power_on(dev_priv); + + intel_pps_unlock_regs_wa(dev_priv); +} + +static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) +{ + spin_lock_irq(&dev_priv->irq_lock); + valleyview_disable_display_irqs(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); + + /* make sure we're done processing display irqs */ + intel_synchronize_irq(dev_priv); + + intel_pps_reset_all(dev_priv); + + /* Prevent us from re-enabling polling on accident in late suspend */ + if (!dev_priv->drm.dev->power.is_suspended) + intel_hpd_poll_enable(dev_priv); +} + +static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_set_power_well(dev_priv, power_well, true); + + vlv_display_power_well_init(dev_priv); +} + +static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_display_power_well_deinit(dev_priv); + + vlv_set_power_well(dev_priv, power_well, false); +} + +static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + /* since ref/cri clock was enabled */ + udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ + + vlv_set_power_well(dev_priv, power_well, true); + + /* + * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - + * 6. De-assert cmn_reset/side_reset. Same as VLV X0. + * a. GUnit 0x2110 bit[0] set to 1 (def 0) + * b. The other bits such as sfr settings / modesel may all + * be set to 0. + * + * This should only be done on init and resume from S3 with + * both PLLs disabled, or we risk losing DPIO and PLL + * synchronization. + */ + intel_de_write(dev_priv, DPIO_CTL, + intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST); +} + +static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) + assert_pll_disabled(dev_priv, pipe); + + /* Assert common reset */ + intel_de_write(dev_priv, DPIO_CTL, + intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST); + + vlv_set_power_well(dev_priv, power_well, false); +} + +#define BITS_SET(val, bits) (((val) & (bits)) == (bits)) + +static void assert_chv_phy_status(struct drm_i915_private *dev_priv) +{ + struct i915_power_well *cmn_bc = + lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); + struct i915_power_well *cmn_d = + lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); + u32 phy_control = dev_priv->chv_phy_control; + u32 phy_status = 0; + u32 phy_status_mask = 0xffffffff; + + /* + * The BIOS can leave the PHY is some weird state + * where it doesn't fully power down some parts. + * Disable the asserts until the PHY has been fully + * reset (ie. the power well has been disabled at + * least once). + */ + if (!dev_priv->chv_phy_assert[DPIO_PHY0]) + phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | + PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | + PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | + PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | + PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | + PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); + + if (!dev_priv->chv_phy_assert[DPIO_PHY1]) + phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | + PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | + PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); + + if (intel_power_well_is_enabled(dev_priv, cmn_bc)) { + phy_status |= PHY_POWERGOOD(DPIO_PHY0); + + /* this assumes override is only used to enable lanes */ + if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) + phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); + + if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) + phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); + + /* CL1 is on whenever anything is on in either channel */ + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | + PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) + phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); + + /* + * The DPLLB check accounts for the pipe B + port A usage + * with CL2 powered up but all the lanes in the second channel + * powered down. + */ + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && + (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) + phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); + + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) + phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) + phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); + + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) + phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) + phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); + } + + if (intel_power_well_is_enabled(dev_priv, cmn_d)) { + phy_status |= PHY_POWERGOOD(DPIO_PHY1); + + /* this assumes override is only used to enable lanes */ + if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) + phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); + + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) + phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); + + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) + phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) + phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); + } + + phy_status &= phy_status_mask; + + /* + * The PHY may be busy with some initial calibration and whatnot, + * so the power state can take a while to actually change. + */ + if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, + phy_status_mask, phy_status, 10)) + drm_err(&dev_priv->drm, + "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", + intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, + phy_status, dev_priv->chv_phy_control); +} + +#undef BITS_SET + +static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum i915_power_well_id id = i915_power_well_instance(power_well)->id; + enum dpio_phy phy; + enum pipe pipe; + u32 tmp; + + drm_WARN_ON_ONCE(&dev_priv->drm, + id != VLV_DISP_PW_DPIO_CMN_BC && + id != CHV_DISP_PW_DPIO_CMN_D); + + if (id == VLV_DISP_PW_DPIO_CMN_BC) { + pipe = PIPE_A; + phy = DPIO_PHY0; + } else { + pipe = PIPE_C; + phy = DPIO_PHY1; + } + + /* since ref/cri clock was enabled */ + udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ + vlv_set_power_well(dev_priv, power_well, true); + + /* Poll for phypwrgood signal */ + if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, + PHY_POWERGOOD(phy), 1)) + drm_err(&dev_priv->drm, "Display PHY %d is not power up\n", + phy); + + vlv_dpio_get(dev_priv); + + /* Enable dynamic power down */ + tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); + tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | + DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; + vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); + + if (id == VLV_DISP_PW_DPIO_CMN_BC) { + tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); + tmp |= DPIO_DYNPWRDOWNEN_CH1; + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); + } else { + /* + * Force the non-existing CL2 off. BXT does this + * too, so maybe it saves some power even though + * CL2 doesn't exist? + */ + tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); + tmp |= DPIO_CL2_LDOFUSE_PWRENB; + vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); + } + + vlv_dpio_put(dev_priv); + + dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); + intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, + dev_priv->chv_phy_control); + + drm_dbg_kms(&dev_priv->drm, + "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", + phy, dev_priv->chv_phy_control); + + assert_chv_phy_status(dev_priv); +} + +static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum i915_power_well_id id = i915_power_well_instance(power_well)->id; + enum dpio_phy phy; + + drm_WARN_ON_ONCE(&dev_priv->drm, + id != VLV_DISP_PW_DPIO_CMN_BC && + id != CHV_DISP_PW_DPIO_CMN_D); + + if (id == VLV_DISP_PW_DPIO_CMN_BC) { + phy = DPIO_PHY0; + assert_pll_disabled(dev_priv, PIPE_A); + assert_pll_disabled(dev_priv, PIPE_B); + } else { + phy = DPIO_PHY1; + assert_pll_disabled(dev_priv, PIPE_C); + } + + dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); + intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, + dev_priv->chv_phy_control); + + vlv_set_power_well(dev_priv, power_well, false); + + drm_dbg_kms(&dev_priv->drm, + "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", + phy, dev_priv->chv_phy_control); + + /* PHY is fully reset now, so we can enable the PHY state asserts */ + dev_priv->chv_phy_assert[phy] = true; + + assert_chv_phy_status(dev_priv); +} + +static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, + enum dpio_channel ch, bool override, unsigned int mask) +{ + enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; + u32 reg, val, expected, actual; + + /* + * The BIOS can leave the PHY is some weird state + * where it doesn't fully power down some parts. + * Disable the asserts until the PHY has been fully + * reset (ie. the power well has been disabled at + * least once). + */ + if (!dev_priv->chv_phy_assert[phy]) + return; + + if (ch == DPIO_CH0) + reg = _CHV_CMN_DW0_CH0; + else + reg = _CHV_CMN_DW6_CH1; + + vlv_dpio_get(dev_priv); + val = vlv_dpio_read(dev_priv, pipe, reg); + vlv_dpio_put(dev_priv); + + /* + * This assumes !override is only used when the port is disabled. + * All lanes should power down even without the override when + * the port is disabled. + */ + if (!override || mask == 0xf) { + expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; + /* + * If CH1 common lane is not active anymore + * (eg. for pipe B DPLL) the entire channel will + * shut down, which causes the common lane registers + * to read as 0. That means we can't actually check + * the lane power down status bits, but as the entire + * register reads as 0 it's a good indication that the + * channel is indeed entirely powered down. + */ + if (ch == DPIO_CH1 && val == 0) + expected = 0; + } else if (mask != 0x0) { + expected = DPIO_ANYDL_POWERDOWN; + } else { + expected = 0; + } + + if (ch == DPIO_CH0) + actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; + else + actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; + actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; + + drm_WARN(&dev_priv->drm, actual != expected, + "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", + !!(actual & DPIO_ALLDL_POWERDOWN), + !!(actual & DPIO_ANYDL_POWERDOWN), + !!(expected & DPIO_ALLDL_POWERDOWN), + !!(expected & DPIO_ANYDL_POWERDOWN), + reg, val); +} + +bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, + enum dpio_channel ch, bool override) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + bool was_override; + + mutex_lock(&power_domains->lock); + + was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + + if (override == was_override) + goto out; + + if (override) + dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + else + dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + + intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, + dev_priv->chv_phy_control); + + drm_dbg_kms(&dev_priv->drm, + "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", + phy, ch, dev_priv->chv_phy_control); + + assert_chv_phy_status(dev_priv); + +out: + mutex_unlock(&power_domains->lock); + + return was_override; +} + +void chv_phy_powergate_lanes(struct intel_encoder *encoder, + bool override, unsigned int mask) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct i915_power_domains *power_domains = &dev_priv->power_domains; + enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); + enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); + + mutex_lock(&power_domains->lock); + + dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); + dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); + + if (override) + dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + else + dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + + intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, + dev_priv->chv_phy_control); + + drm_dbg_kms(&dev_priv->drm, + "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", + phy, ch, mask, dev_priv->chv_phy_control); + + assert_chv_phy_status(dev_priv); + + assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); + + mutex_unlock(&power_domains->lock); +} + +static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum pipe pipe = PIPE_A; + bool enabled; + u32 state, ctrl; + + vlv_punit_get(dev_priv); + + state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); + /* + * We only ever set the power-on and power-gate states, anything + * else is unexpected. + */ + drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) && + state != DP_SSS_PWR_GATE(pipe)); + enabled = state == DP_SSS_PWR_ON(pipe); + + /* + * A transient state at this point would mean some unexpected party + * is poking at the power controls too. + */ + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); + drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state); + + vlv_punit_put(dev_priv); + + return enabled; +} + +static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well, + bool enable) +{ + enum pipe pipe = PIPE_A; + u32 state; + u32 ctrl; + + state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); + + vlv_punit_get(dev_priv); + +#define COND \ + ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) + + if (COND) + goto out; + + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); + ctrl &= ~DP_SSC_MASK(pipe); + ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); + vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); + + if (wait_for(COND, 100)) + drm_err(&dev_priv->drm, + "timeout setting power well state %08x (%08x)\n", + state, + vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); + +#undef COND + +out: + vlv_punit_put(dev_priv); +} + +static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, + dev_priv->chv_phy_control); +} + +static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + chv_set_pipe_power_well(dev_priv, power_well, true); + + vlv_display_power_well_init(dev_priv); +} + +static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_display_power_well_deinit(dev_priv); + + chv_set_pipe_power_well(dev_priv, power_well, false); +} + +static void +tgl_tc_cold_request(struct drm_i915_private *i915, bool block) +{ + u8 tries = 0; + int ret; + + while (1) { + u32 low_val; + u32 high_val = 0; + + if (block) + low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; + else + low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ; + + /* + * Spec states that we should timeout the request after 200us + * but the function below will timeout after 500us + */ + ret = snb_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, &high_val); + if (ret == 0) { + if (block && + (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) + ret = -EIO; + else + break; + } + + if (++tries == 3) + break; + + msleep(1); + } + + if (ret) + drm_err(&i915->drm, "TC cold %sblock failed\n", + block ? "" : "un"); + else + drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", + block ? "" : "un"); +} + +static void +tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915, + struct i915_power_well *power_well) +{ + tgl_tc_cold_request(i915, true); +} + +static void +tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915, + struct i915_power_well *power_well) +{ + tgl_tc_cold_request(i915, false); +} + +static void +tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915, + struct i915_power_well *power_well) +{ + if (intel_power_well_refcount(power_well) > 0) + tgl_tc_cold_off_power_well_enable(i915, power_well); + else + tgl_tc_cold_off_power_well_disable(i915, power_well); +} + +static bool +tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + /* + * Not the correctly implementation but there is no way to just read it + * from PCODE, so returning count to avoid state mismatch errors + */ + return intel_power_well_refcount(power_well); +} + + +const struct i915_power_well_ops i9xx_always_on_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = i9xx_always_on_power_well_noop, + .disable = i9xx_always_on_power_well_noop, + .is_enabled = i9xx_always_on_power_well_enabled, +}; + +const struct i915_power_well_ops chv_pipe_power_well_ops = { + .sync_hw = chv_pipe_power_well_sync_hw, + .enable = chv_pipe_power_well_enable, + .disable = chv_pipe_power_well_disable, + .is_enabled = chv_pipe_power_well_enabled, +}; + +const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = chv_dpio_cmn_power_well_enable, + .disable = chv_dpio_cmn_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + +const struct i915_power_well_ops i830_pipes_power_well_ops = { + .sync_hw = i830_pipes_power_well_sync_hw, + .enable = i830_pipes_power_well_enable, + .disable = i830_pipes_power_well_disable, + .is_enabled = i830_pipes_power_well_enabled, +}; + +static const struct i915_power_well_regs hsw_power_well_regs = { + .bios = HSW_PWR_WELL_CTL1, + .driver = HSW_PWR_WELL_CTL2, + .kvmr = HSW_PWR_WELL_CTL3, + .debug = HSW_PWR_WELL_CTL4, +}; + +const struct i915_power_well_ops hsw_power_well_ops = { + .regs = &hsw_power_well_regs, + .sync_hw = hsw_power_well_sync_hw, + .enable = hsw_power_well_enable, + .disable = hsw_power_well_disable, + .is_enabled = hsw_power_well_enabled, +}; + +const struct i915_power_well_ops gen9_dc_off_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = gen9_dc_off_power_well_enable, + .disable = gen9_dc_off_power_well_disable, + .is_enabled = gen9_dc_off_power_well_enabled, +}; + +const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = bxt_dpio_cmn_power_well_enable, + .disable = bxt_dpio_cmn_power_well_disable, + .is_enabled = bxt_dpio_cmn_power_well_enabled, +}; + +const struct i915_power_well_ops vlv_display_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = vlv_display_power_well_enable, + .disable = vlv_display_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + +const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = vlv_dpio_cmn_power_well_enable, + .disable = vlv_dpio_cmn_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + +const struct i915_power_well_ops vlv_dpio_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = vlv_power_well_enable, + .disable = vlv_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + +static const struct i915_power_well_regs icl_aux_power_well_regs = { + .bios = ICL_PWR_WELL_CTL_AUX1, + .driver = ICL_PWR_WELL_CTL_AUX2, + .debug = ICL_PWR_WELL_CTL_AUX4, +}; + +const struct i915_power_well_ops icl_aux_power_well_ops = { + .regs = &icl_aux_power_well_regs, + .sync_hw = hsw_power_well_sync_hw, + .enable = icl_aux_power_well_enable, + .disable = icl_aux_power_well_disable, + .is_enabled = hsw_power_well_enabled, +}; + +static const struct i915_power_well_regs icl_ddi_power_well_regs = { + .bios = ICL_PWR_WELL_CTL_DDI1, + .driver = ICL_PWR_WELL_CTL_DDI2, + .debug = ICL_PWR_WELL_CTL_DDI4, +}; + +const struct i915_power_well_ops icl_ddi_power_well_ops = { + .regs = &icl_ddi_power_well_regs, + .sync_hw = hsw_power_well_sync_hw, + .enable = hsw_power_well_enable, + .disable = hsw_power_well_disable, + .is_enabled = hsw_power_well_enabled, +}; + +const struct i915_power_well_ops tgl_tc_cold_off_ops = { + .sync_hw = tgl_tc_cold_off_power_well_sync_hw, + .enable = tgl_tc_cold_off_power_well_enable, + .disable = tgl_tc_cold_off_power_well_disable, + .is_enabled = tgl_tc_cold_off_power_well_is_enabled, +}; diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h index 9a3756fdcf7f..d0624642dcb6 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.h +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h @@ -8,10 +8,23 @@ #include <linux/types.h> #include "intel_display.h" +#include "intel_display_power.h" struct drm_i915_private; struct i915_power_well; +#define for_each_power_well(__dev_priv, __power_well) \ + for ((__power_well) = (__dev_priv)->power_domains.power_wells; \ + (__power_well) - (__dev_priv)->power_domains.power_wells < \ + (__dev_priv)->power_domains.power_well_count; \ + (__power_well)++) + +#define for_each_power_well_reverse(__dev_priv, __power_well) \ + for ((__power_well) = (__dev_priv)->power_domains.power_wells + \ + (__dev_priv)->power_domains.power_well_count - 1; \ + (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \ + (__power_well)--) + /* * i915_power_well_id: * @@ -20,7 +33,7 @@ struct i915_power_well; * wells must be assigned DISP_PW_ID_NONE. */ enum i915_power_well_id { - DISP_PW_ID_NONE, + DISP_PW_ID_NONE = 0, /* must be kept zero */ VLV_DISP_PW_DISP2D, BXT_DISP_PW_DPIO_CMN_A, @@ -36,45 +49,13 @@ enum i915_power_well_id { TGL_DISP_PW_TC_COLD_OFF, }; -struct i915_power_well_regs { - i915_reg_t bios; - i915_reg_t driver; - i915_reg_t kvmr; - i915_reg_t debug; -}; - -struct i915_power_well_ops { - const struct i915_power_well_regs *regs; - /* - * Synchronize the well's hw state to match the current sw state, for - * example enable/disable it based on the current refcount. Called - * during driver init and resume time, possibly after first calling - * the enable/disable handlers. - */ - void (*sync_hw)(struct drm_i915_private *i915, - struct i915_power_well *power_well); - /* - * Enable the well and resources that depend on it (for example - * interrupts located on the well). Called after the 0->1 refcount - * transition. - */ - void (*enable)(struct drm_i915_private *i915, - struct i915_power_well *power_well); - /* - * Disable the well and resources that depend on it. Called after - * the 1->0 refcount transition. - */ - void (*disable)(struct drm_i915_private *i915, - struct i915_power_well *power_well); - /* Returns the hw enabled state. */ - bool (*is_enabled)(struct drm_i915_private *i915, - struct i915_power_well *power_well); -}; - -struct i915_power_well_desc { +struct i915_power_well_instance { const char *name; - bool always_on; - u64 domains; + const struct i915_power_domain_list { + const enum intel_display_power_domain *list; + u8 count; + } *domain_list; + /* unique identifier for this power well */ enum i915_power_well_id id; /* @@ -98,33 +79,45 @@ struct i915_power_well_desc { * constrol/status registers. */ u8 idx; - /* Mask of pipes whose IRQ logic is backed by the pw */ - u8 irq_pipe_mask; - /* - * Instead of waiting for the status bit to ack enables, - * just wait a specific amount of time and then consider - * the well enabled. - */ - u16 fixed_enable_delay; - /* The pw is backing the VGA functionality */ - bool has_vga:1; - bool has_fuses:1; - /* - * The pw is for an ICL+ TypeC PHY port in - * Thunderbolt mode. - */ - bool is_tc_tbt:1; } hsw; }; +}; + +struct i915_power_well_desc { const struct i915_power_well_ops *ops; + const struct i915_power_well_instance_list { + const struct i915_power_well_instance *list; + u8 count; + } *instances; + + /* Mask of pipes whose IRQ logic is backed by the pw */ + u16 irq_pipe_mask:4; + u16 always_on:1; + /* + * Instead of waiting for the status bit to ack enables, + * just wait a specific amount of time and then consider + * the well enabled. + */ + u16 fixed_enable_delay:1; + /* The pw is backing the VGA functionality */ + u16 has_vga:1; + u16 has_fuses:1; + /* + * The pw is for an ICL+ TypeC PHY port in + * Thunderbolt mode. + */ + u16 is_tc_tbt:1; }; struct i915_power_well { const struct i915_power_well_desc *desc; + struct intel_power_domain_mask domains; /* power well enable/disable usage count */ int count; /* cached hw enabled state */ bool hw_enabled; + /* index into desc->instances->list */ + u8 instance_idx; }; struct i915_power_well *lookup_power_well(struct drm_i915_private *i915, @@ -147,7 +140,34 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, enum i915_power_well_id power_well_id); bool intel_power_well_is_always_on(struct i915_power_well *power_well); const char *intel_power_well_name(struct i915_power_well *power_well); -u64 intel_power_well_domains(struct i915_power_well *power_well); +struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well); int intel_power_well_refcount(struct i915_power_well *power_well); +void chv_phy_powergate_lanes(struct intel_encoder *encoder, + bool override, unsigned int mask); +bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, + enum dpio_channel ch, bool override); + +void gen9_enable_dc5(struct drm_i915_private *dev_priv); +void skl_enable_dc6(struct drm_i915_private *dev_priv); +void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv); +void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state); +void gen9_disable_dc_states(struct drm_i915_private *dev_priv); +void bxt_enable_dc9(struct drm_i915_private *dev_priv); +void bxt_disable_dc9(struct drm_i915_private *dev_priv); + +extern const struct i915_power_well_ops i9xx_always_on_power_well_ops; +extern const struct i915_power_well_ops chv_pipe_power_well_ops; +extern const struct i915_power_well_ops chv_dpio_cmn_power_well_ops; +extern const struct i915_power_well_ops i830_pipes_power_well_ops; +extern const struct i915_power_well_ops hsw_power_well_ops; +extern const struct i915_power_well_ops gen9_dc_off_power_well_ops; +extern const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops; +extern const struct i915_power_well_ops vlv_display_power_well_ops; +extern const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops; +extern const struct i915_power_well_ops vlv_dpio_power_well_ops; +extern const struct i915_power_well_ops icl_aux_power_well_ops; +extern const struct i915_power_well_ops icl_ddi_power_well_ops; +extern const struct i915_power_well_ops tgl_tc_cold_off_ops; + #endif diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 26f9e2b748e4..9feaf1a589f3 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -82,19 +82,8 @@ static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - struct drm_i915_private *i915 = to_i915(encoder->base.dev); int ret; - if (intel_dp_is_edp(intel_dp)) - return false; - - /* - * Detecting LTTPRs must be avoided on platforms with an AUX timeout - * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1). - */ - if (DISPLAY_VER(i915) < 10 || IS_GEMINILAKE(i915)) - return false; - ret = drm_dp_read_lttpr_common_caps(&intel_dp->aux, dpcd, intel_dp->lttpr_common_caps); if (ret < 0) @@ -197,13 +186,25 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI */ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp) { - u8 dpcd[DP_RECEIVER_CAP_SIZE]; - int lttpr_count; + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + int lttpr_count = 0; - if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd)) - return -EIO; + /* + * Detecting LTTPRs must be avoided on platforms with an AUX timeout + * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1). + */ + if (!intel_dp_is_edp(intel_dp) && + (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) { + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + + if (drm_dp_dpcd_probe(&intel_dp->aux, DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV)) + return -EIO; - lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd); + if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd)) + return -EIO; + + lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd); + } /* * The DPTX shall read the DPRX caps after LTTPR detection, so re-read diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c index 44edeb2e55c0..cc6abe761f5e 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c @@ -24,6 +24,7 @@ #include "intel_ddi.h" #include "intel_ddi_buf_trans.h" #include "intel_de.h" +#include "intel_display_power_well.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dpio_phy.h" diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 95b9d327ed4d..6eef0b8a91eb 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -18,7 +18,10 @@ #include "vlv_sideband.h" struct intel_dpll_funcs { - int (*crtc_compute_clock)(struct intel_crtc_state *crtc_state); + int (*crtc_compute_clock)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + int (*crtc_get_shared_dpll)(struct intel_atomic_state *state, + struct intel_crtc *crtc); }; struct intel_limit { @@ -759,8 +762,8 @@ chv_find_best_dpll(const struct intel_limit *limit, bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, struct dpll *best_clock) { - int refclk = 100000; const struct intel_limit *limit = &intel_limits_bxt; + int refclk = 100000; return chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, refclk, @@ -927,32 +930,48 @@ static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state, crtc_state->dpll_hw_state.dpll = dpll; } -static int hsw_crtc_compute_clock(struct intel_crtc_state *crtc_state) +static int hsw_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_atomic_state *state = - to_intel_atomic_state(crtc_state->uapi.state); + return 0; +} + +static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); - - if (IS_DG2(dev_priv)) - return intel_mpllb_calc_state(crtc_state, encoder); + int ret; if (DISPLAY_VER(dev_priv) < 11 && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) return 0; - if (!intel_reserve_shared_dplls(state, crtc, encoder)) { + ret = intel_reserve_shared_dplls(state, crtc, encoder); + if (ret) { drm_dbg_kms(&dev_priv->drm, "failed to find PLL for pipe %c\n", pipe_name(crtc->pipe)); - return -EINVAL; + return ret; } return 0; } +static int dg2_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct intel_encoder *encoder = + intel_get_crtc_new_encoder(state, crtc_state); + + return intel_mpllb_calc_state(crtc_state, encoder); +} + static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor) { return dpll->m < factor * dpll->n; @@ -1068,18 +1087,15 @@ static void ilk_compute_dpll(struct intel_crtc_state *crtc_state, crtc_state->dpll_hw_state.dpll = dpll; } -static int ilk_crtc_compute_clock(struct intel_crtc_state *crtc_state) +static int ilk_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_atomic_state *state = - to_intel_atomic_state(crtc_state->uapi.state); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; int refclk = 120000; - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ if (!crtc_state->has_pch_encoder) return 0; @@ -1118,11 +1134,27 @@ static int ilk_crtc_compute_clock(struct intel_crtc_state *crtc_state) ilk_compute_dpll(crtc_state, &crtc_state->dpll, &crtc_state->dpll); - if (!intel_reserve_shared_dplls(state, crtc, NULL)) { + return 0; +} + +static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + int ret; + + /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ + if (!crtc_state->has_pch_encoder) + return 0; + + ret = intel_reserve_shared_dplls(state, crtc, NULL); + if (ret) { drm_dbg_kms(&dev_priv->drm, "failed to find PLL for pipe %c\n", pipe_name(crtc->pipe)); - return -EINVAL; + return ret; } return 0; @@ -1163,14 +1195,14 @@ void chv_compute_dpll(struct intel_crtc_state *crtc_state) (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; } -static int chv_crtc_compute_clock(struct intel_crtc_state *crtc_state) +static int chv_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - int refclk = 100000; + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit = &intel_limits_chv; - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); + int refclk = 100000; if (!crtc_state->clock_set && !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, @@ -1184,14 +1216,14 @@ static int chv_crtc_compute_clock(struct intel_crtc_state *crtc_state) return 0; } -static int vlv_crtc_compute_clock(struct intel_crtc_state *crtc_state) +static int vlv_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - int refclk = 100000; + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit = &intel_limits_vlv; - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); + int refclk = 100000; if (!crtc_state->clock_set && !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, @@ -1205,16 +1237,15 @@ static int vlv_crtc_compute_clock(struct intel_crtc_state *crtc_state) return 0; } -static int g4x_crtc_compute_clock(struct intel_crtc_state *crtc_state) +static int g4x_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; int refclk = 96000; - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; @@ -1251,16 +1282,15 @@ static int g4x_crtc_compute_clock(struct intel_crtc_state *crtc_state) return 0; } -static int pnv_crtc_compute_clock(struct intel_crtc_state *crtc_state) +static int pnv_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; int refclk = 96000; - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; @@ -1288,16 +1318,15 @@ static int pnv_crtc_compute_clock(struct intel_crtc_state *crtc_state) return 0; } -static int i9xx_crtc_compute_clock(struct intel_crtc_state *crtc_state) +static int i9xx_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; int refclk = 96000; - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; @@ -1325,16 +1354,15 @@ static int i9xx_crtc_compute_clock(struct intel_crtc_state *crtc_state) return 0; } -static int i8xx_crtc_compute_clock(struct intel_crtc_state *crtc_state) +static int i8xx_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; int refclk = 48000; - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; @@ -1364,12 +1392,18 @@ static int i8xx_crtc_compute_clock(struct intel_crtc_state *crtc_state) return 0; } +static const struct intel_dpll_funcs dg2_dpll_funcs = { + .crtc_compute_clock = dg2_crtc_compute_clock, +}; + static const struct intel_dpll_funcs hsw_dpll_funcs = { .crtc_compute_clock = hsw_crtc_compute_clock, + .crtc_get_shared_dpll = hsw_crtc_get_shared_dpll, }; static const struct intel_dpll_funcs ilk_dpll_funcs = { .crtc_compute_clock = ilk_crtc_compute_clock, + .crtc_get_shared_dpll = ilk_crtc_get_shared_dpll, }; static const struct intel_dpll_funcs chv_dpll_funcs = { @@ -1396,18 +1430,54 @@ static const struct intel_dpll_funcs i8xx_dpll_funcs = { .crtc_compute_clock = i8xx_crtc_compute_clock, }; -int intel_dpll_crtc_compute_clock(struct intel_crtc_state *crtc_state) +int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state)); + + if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll)) + return 0; - return i915->dpll_funcs->crtc_compute_clock(crtc_state); + memset(&crtc_state->dpll_hw_state, 0, + sizeof(crtc_state->dpll_hw_state)); + + if (!crtc_state->hw.enable) + return 0; + + return i915->dpll_funcs->crtc_compute_clock(state, crtc); +} + +int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state)); + + if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll)) + return 0; + + if (!crtc_state->hw.enable) + return 0; + + if (!i915->dpll_funcs->crtc_get_shared_dpll) + return 0; + + return i915->dpll_funcs->crtc_get_shared_dpll(state, crtc); } void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv) { - if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv)) + if (IS_DG2(dev_priv)) + dev_priv->dpll_funcs = &dg2_dpll_funcs; + else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv)) dev_priv->dpll_funcs = &hsw_dpll_funcs; else if (HAS_PCH_SPLIT(dev_priv)) dev_priv->dpll_funcs = &ilk_dpll_funcs; diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h index 69b06a9e473e..bbc30542f29f 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.h +++ b/drivers/gpu/drm/i915/display/intel_dpll.h @@ -10,12 +10,16 @@ struct dpll; struct drm_i915_private; +struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; enum pipe; void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv); -int intel_dpll_crtc_compute_clock(struct intel_crtc_state *crtc_state); +int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, + struct intel_crtc *crtc); +int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc); int vlv_calc_dpll_params(int refclk, struct dpll *clock); int pnv_calc_dpll_params(int refclk, struct dpll *clock); int i9xx_calc_dpll_params(int refclk, struct dpll *clock); diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index b7071da4b7e5..22f55574a35c 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -90,9 +90,9 @@ struct intel_shared_dpll_funcs { struct intel_dpll_mgr { const struct dpll_info *dpll_info; - bool (*get_dplls)(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder); + int (*get_dplls)(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder); void (*put_dplls)(struct intel_atomic_state *state, struct intel_crtc *crtc); void (*update_active_dpll)(struct intel_atomic_state *state, @@ -514,9 +514,9 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, udelay(200); } -static bool ibx_get_dpll(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder) +static int ibx_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -541,7 +541,7 @@ static bool ibx_get_dpll(struct intel_atomic_state *state, } if (!pll) - return false; + return -EINVAL; /* reference the pll */ intel_reference_shared_dpll(state, crtc, @@ -549,7 +549,7 @@ static bool ibx_get_dpll(struct intel_atomic_state *state, crtc_state->shared_dpll = pll; - return true; + return 0; } static void ibx_dump_hw_state(struct drm_i915_private *dev_priv, @@ -584,7 +584,7 @@ static const struct intel_dpll_mgr pch_pll_mgr = { }; static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) + struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; @@ -1060,16 +1060,13 @@ static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915, return link_clock * 2; } -static bool hsw_get_dpll(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder) +static int hsw_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - struct intel_shared_dpll *pll; - - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); + struct intel_shared_dpll *pll = NULL; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) pll = hsw_ddi_wrpll_get_dpll(state, crtc); @@ -1077,18 +1074,16 @@ static bool hsw_get_dpll(struct intel_atomic_state *state, pll = hsw_ddi_lcpll_get_dpll(crtc_state); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) pll = hsw_ddi_spll_get_dpll(state, crtc); - else - return false; if (!pll) - return false; + return -EINVAL; intel_reference_shared_dpll(state, crtc, pll, &crtc_state->dpll_hw_state); crtc_state->shared_dpll = pll; - return true; + return 0; } static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915) @@ -1493,7 +1488,7 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params, params->dco_integer * MHz(1)) * 0x8000, MHz(1)); } -static bool +static int skl_ddi_calculate_wrpll(int clock /* in Hz */, int ref_clock, struct skl_wrpll_params *wrpll_params) @@ -1552,7 +1547,7 @@ skip_remaining_dividers: if (!ctx.p) { DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock); - return false; + return -EINVAL; } /* @@ -1564,14 +1559,15 @@ skip_remaining_dividers: skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock, ctx.central_freq, p0, p1, p2); - return true; + return 0; } -static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) +static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct skl_wrpll_params wrpll_params = {}; u32 ctrl1, cfgcr1, cfgcr2; + int ret; /* * See comment in intel_dpll_hw_state to understand why we always use 0 @@ -1581,10 +1577,10 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); - if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000, - i915->dpll.ref_clks.nssc, - &wrpll_params)) - return false; + ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000, + i915->dpll.ref_clks.nssc, &wrpll_params); + if (ret) + return ret; cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE | DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) | @@ -1596,13 +1592,11 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | wrpll_params.central_freq; - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - crtc_state->dpll_hw_state.ctrl1 = ctrl1; crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; crtc_state->dpll_hw_state.cfgcr2 = cfgcr2; - return true; + + return 0; } static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915, @@ -1676,7 +1670,7 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915, return dco_freq / (p0 * p1 * p2 * 5); } -static bool +static int skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { u32 ctrl1; @@ -1708,12 +1702,9 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) break; } - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - crtc_state->dpll_hw_state.ctrl1 = ctrl1; - return true; + return 0; } static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915, @@ -1750,33 +1741,23 @@ static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915, return link_clock * 2; } -static bool skl_get_dpll(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder) +static int skl_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; - bool bret; - - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { - bret = skl_ddi_hdmi_pll_dividers(crtc_state); - if (!bret) { - drm_dbg_kms(&i915->drm, - "Could not get HDMI pll dividers.\n"); - return false; - } - } else if (intel_crtc_has_dp_encoder(crtc_state)) { - bret = skl_ddi_dp_set_dpll_hw_state(crtc_state); - if (!bret) { - drm_dbg_kms(&i915->drm, - "Could not set DP dpll HW state.\n"); - return false; - } - } else { - return false; - } + int ret; + + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + ret = skl_ddi_hdmi_pll_dividers(crtc_state); + else if (intel_crtc_has_dp_encoder(crtc_state)) + ret = skl_ddi_dp_set_dpll_hw_state(crtc_state); + else + ret = -EINVAL; + if (ret) + return ret; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) pll = intel_find_shared_dpll(state, crtc, @@ -1789,14 +1770,14 @@ static bool skl_get_dpll(struct intel_atomic_state *state, BIT(DPLL_ID_SKL_DPLL2) | BIT(DPLL_ID_SKL_DPLL1)); if (!pll) - return false; + return -EINVAL; intel_reference_shared_dpll(state, crtc, pll, &crtc_state->dpll_hw_state); crtc_state->shared_dpll = pll; - return true; + return 0; } static int skl_ddi_pll_get_freq(struct drm_i915_private *i915, @@ -2095,7 +2076,7 @@ static const struct dpll bxt_dp_clk_val[] = { { .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ }, }; -static bool +static int bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state, struct dpll *clk_div) { @@ -2111,12 +2092,12 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state, drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n", crtc_state->port_clock, pipe_name(crtc->pipe)); - return false; + return -EINVAL; } drm_WARN_ON(&i915->drm, clk_div->m1 != 2); - return true; + return 0; } static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state, @@ -2139,8 +2120,8 @@ static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state, clk_div->dot != crtc_state->port_clock); } -static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, - const struct dpll *clk_div) +static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, + const struct dpll *clk_div) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state; @@ -2149,8 +2130,6 @@ static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, u32 prop_coef, int_coef, gain_ctl, targ_cnt; u32 lanestagger; - memset(dpll_hw_state, 0, sizeof(*dpll_hw_state)); - if (vco >= 6200000 && vco <= 6700000) { prop_coef = 4; int_coef = 9; @@ -2169,7 +2148,7 @@ static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, targ_cnt = 9; } else { drm_err(&i915->drm, "Invalid VCO\n"); - return false; + return -EINVAL; } if (clock > 270000) @@ -2206,10 +2185,10 @@ static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger; - return true; + return 0; } -static bool +static int bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { struct dpll clk_div = {}; @@ -2219,7 +2198,7 @@ bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div); } -static bool +static int bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { struct dpll clk_div = {}; @@ -2246,23 +2225,25 @@ static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915, return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock); } -static bool bxt_get_dpll(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder) +static int bxt_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; enum intel_dpll_id id; + int ret; - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && - !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state)) - return false; - - if (intel_crtc_has_dp_encoder(crtc_state) && - !bxt_ddi_dp_set_dpll_hw_state(crtc_state)) - return false; + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + ret = bxt_ddi_hdmi_set_dpll_hw_state(crtc_state); + else if (intel_crtc_has_dp_encoder(crtc_state)) + ret = bxt_ddi_dp_set_dpll_hw_state(crtc_state); + else + ret = -EINVAL; + if (ret) + return ret; /* 1:1 mapping between ports and PLLs */ id = (enum intel_dpll_id) encoder->port; @@ -2276,7 +2257,7 @@ static bool bxt_get_dpll(struct intel_atomic_state *state, crtc_state->shared_dpll = pll; - return true; + return 0; } static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915) @@ -2513,8 +2494,8 @@ static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = { /* the following params are unused */ }; -static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, - struct skl_wrpll_params *pll_params) +static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, + struct skl_wrpll_params *pll_params) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); const struct icl_combo_pll_params *params = @@ -2527,16 +2508,16 @@ static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) { if (clock == params[i].clock) { *pll_params = params[i].wrpll; - return true; + return 0; } } MISSING_CASE(clock); - return false; + return -EINVAL; } -static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, - struct skl_wrpll_params *pll_params) +static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, + struct skl_wrpll_params *pll_params) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); @@ -2568,7 +2549,7 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, } } - return true; + return 0; } static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915, @@ -2598,7 +2579,7 @@ static int icl_wrpll_ref_clock(struct drm_i915_private *i915) return ref_clock; } -static bool +static int icl_calc_wrpll(struct intel_crtc_state *crtc_state, struct skl_wrpll_params *wrpll_params) { @@ -2633,13 +2614,13 @@ icl_calc_wrpll(struct intel_crtc_state *crtc_state, } if (best_div == 0) - return false; + return -EINVAL; icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv); icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, pdiv, qdiv, kdiv); - return true; + return 0; } static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915, @@ -2709,8 +2690,6 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915, { u32 dco_fraction = pll_params->dco_fraction; - memset(pll_state, 0, sizeof(*pll_state)); - if (ehl_combo_pll_div_frac_wa_needed(i915)) dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2); @@ -2731,10 +2710,10 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915, pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->vbt.override_afc_startup_val); } -static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, - u32 *target_dco_khz, - struct intel_dpll_hw_state *state, - bool is_dkl) +static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, + u32 *target_dco_khz, + struct intel_dpll_hw_state *state, + bool is_dkl) { static const u8 div1_vals[] = { 7, 5, 3, 2 }; u32 dco_min_freq, dco_max_freq; @@ -2800,19 +2779,19 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, hsdiv | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2); - return true; + return 0; } } - return false; + return -EINVAL; } /* * The specification for this function uses real numbers, so the math had to be * adapted to integer-only calculation, that's why it looks so different. */ -static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, - struct intel_dpll_hw_state *pll_state) +static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, + struct intel_dpll_hw_state *pll_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); int refclk_khz = dev_priv->dpll.ref_clks.nssc; @@ -2826,14 +2805,14 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, bool use_ssc = false; bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI); bool is_dkl = DISPLAY_VER(dev_priv) >= 12; + int ret; - memset(pll_state, 0, sizeof(*pll_state)); - - if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz, - pll_state, is_dkl)) { + ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz, + pll_state, is_dkl); + if (ret) { drm_dbg_kms(&dev_priv->drm, "Failed to find divisors for clock %d\n", clock); - return false; + return ret; } m1div = 2; @@ -2848,7 +2827,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, drm_dbg_kms(&dev_priv->drm, "Failed to find mdiv for clock %d\n", clock); - return false; + return -EINVAL; } } m2div_rem = dco_khz % (refclk_khz * m1div); @@ -2875,7 +2854,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, break; default: MISSING_CASE(refclk_khz); - return false; + return -EINVAL; } /* @@ -3018,7 +2997,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask; } - return true; + return 0; } static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv, @@ -3140,9 +3119,9 @@ static u32 intel_get_hti_plls(struct drm_i915_private *i915) return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state); } -static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder) +static int icl_get_combo_phy_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -3160,11 +3139,10 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, else ret = icl_calc_dp_combo_pll(crtc_state, &pll_params); - if (!ret) { + if (ret) { drm_dbg_kms(&dev_priv->drm, "Could not calculate combo PHY PLL state.\n"); - - return false; + return ret; } icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state); @@ -3209,7 +3187,7 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, drm_dbg_kms(&dev_priv->drm, "No combo PHY PLL found for [ENCODER:%d:%s]\n", encoder->base.base.id, encoder->base.name); - return false; + return -EINVAL; } intel_reference_shared_dpll(state, crtc, @@ -3217,12 +3195,12 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, icl_update_active_dpll(state, crtc, encoder); - return true; + return 0; } -static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder) +static int icl_get_tc_phy_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = @@ -3230,12 +3208,14 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, struct skl_wrpll_params pll_params = { }; struct icl_port_dpll *port_dpll; enum intel_dpll_id dpll_id; + int ret; port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; - if (!icl_calc_tbt_pll(crtc_state, &pll_params)) { + ret = icl_calc_tbt_pll(crtc_state, &pll_params); + if (ret) { drm_dbg_kms(&dev_priv->drm, "Could not calculate TBT PLL state.\n"); - return false; + return ret; } icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state); @@ -3245,14 +3225,15 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, BIT(DPLL_ID_ICL_TBTPLL)); if (!port_dpll->pll) { drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n"); - return false; + return -EINVAL; } intel_reference_shared_dpll(state, crtc, port_dpll->pll, &port_dpll->hw_state); port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY]; - if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) { + ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state); + if (ret) { drm_dbg_kms(&dev_priv->drm, "Could not calculate MG PHY PLL state.\n"); goto err_unreference_tbt_pll; @@ -3264,6 +3245,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, &port_dpll->hw_state, BIT(dpll_id)); if (!port_dpll->pll) { + ret = -EINVAL; drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n"); goto err_unreference_tbt_pll; } @@ -3272,18 +3254,18 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, icl_update_active_dpll(state, crtc, encoder); - return true; + return 0; err_unreference_tbt_pll: port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; intel_unreference_shared_dpll(state, crtc, port_dpll->pll); - return false; + return ret; } -static bool icl_get_dplls(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder) +static int icl_get_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); @@ -3295,7 +3277,7 @@ static bool icl_get_dplls(struct intel_atomic_state *state, MISSING_CASE(phy); - return false; + return -EINVAL; } static void icl_put_dplls(struct intel_atomic_state *state, @@ -4081,13 +4063,12 @@ static const struct intel_dpll_mgr adlp_pll_mgr = { /** * intel_shared_dpll_init - Initialize shared DPLLs - * @dev: drm device + * @dev_priv: i915 device * - * Initialize shared DPLLs for @dev. + * Initialize shared DPLLs for @dev_priv. */ -void intel_shared_dpll_init(struct drm_device *dev) +void intel_shared_dpll_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); const struct intel_dpll_mgr *dpll_mgr = NULL; const struct dpll_info *dpll_info; int i; @@ -4126,7 +4107,7 @@ void intel_shared_dpll_init(struct drm_device *dev) dpll_info = dpll_mgr->dpll_info; for (i = 0; dpll_info[i].name; i++) { - drm_WARN_ON(dev, i != dpll_info[i].id); + drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id); dev_priv->dpll.shared_dplls[i].info = &dpll_info[i]; } @@ -4154,17 +4135,18 @@ void intel_shared_dpll_init(struct drm_device *dev) * intel_release_shared_dplls(). * * Returns: - * True if all required DPLLs were successfully reserved. + * 0 if all required DPLLs were successfully reserved, + * negative error code otherwise. */ -bool intel_reserve_shared_dplls(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder) +int intel_reserve_shared_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr; if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr)) - return false; + return -EINVAL; return dpll_mgr->get_dplls(state, crtc, encoder); } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index ba2fdfce1579..f7c96a1f13c8 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -37,7 +37,6 @@ __a > __b ? (__a - __b) : (__b - __a); }) enum tc_port; -struct drm_device; struct drm_i915_private; struct intel_atomic_state; struct intel_crtc; @@ -337,9 +336,9 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, bool state); #define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) -bool intel_reserve_shared_dplls(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder); +int intel_reserve_shared_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder); void intel_release_shared_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc); void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state, @@ -356,7 +355,7 @@ bool intel_dpll_get_hw_state(struct drm_i915_private *i915, void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_shared_dpll_swap_state(struct intel_atomic_state *state); -void intel_shared_dpll_init(struct drm_device *dev); +void intel_shared_dpll_init(struct drm_i915_private *dev_priv); void intel_dpll_update_ref_clks(struct drm_i915_private *dev_priv); void intel_dpll_readout_hw_state(struct drm_i915_private *dev_priv); void intel_dpll_sanitize_state(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index ff303c7d3a57..bbdc34a23d54 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -811,6 +811,14 @@ static void intel_fbc_program_cfb(struct intel_fbc *fbc) fbc->funcs->program_cfb(fbc); } +static void intel_fbc_program_workarounds(struct intel_fbc *fbc) +{ + /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,dg2,adlp */ + if (DISPLAY_VER(fbc->i915) >= 11) + intel_de_rmw(fbc->i915, ILK_DPFC_CHICKEN(fbc->id), 0, + DPFC_CHICKEN_FORCE_SLB_INVALIDATION); +} + static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; @@ -1086,7 +1094,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, */ if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) { plane_state->no_fbc_reason = "PSR2 enabled"; - return false; + return 0; } if (!pixel_format_is_valid(plane_state)) { @@ -1112,7 +1120,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, if (plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && fb->format->has_alpha) { plane_state->no_fbc_reason = "per-pixel alpha not supported"; - return false; + return 0; } if (!intel_fbc_hw_tracking_covers_screen(plane_state)) { @@ -1128,7 +1136,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, if (DISPLAY_VER(i915) >= 9 && plane_state->view.color_plane[0].y & 3) { plane_state->no_fbc_reason = "plane start Y offset misaligned"; - return false; + return 0; } /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */ @@ -1136,7 +1144,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, (plane_state->view.color_plane[0].y + (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) { plane_state->no_fbc_reason = "plane end Y offset misaligned"; - return false; + return 0; } /* WaFbcExceedCdClockThreshold:hsw,bdw */ @@ -1462,6 +1470,7 @@ static void __intel_fbc_enable(struct intel_atomic_state *state, intel_fbc_update_state(state, crtc, plane); + intel_fbc_program_workarounds(fbc); intel_fbc_program_cfb(fbc); } diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 03398feb6676..d1d1b59102d6 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -75,13 +75,17 @@ const struct drm_display_mode * intel_panel_downclock_mode(struct intel_connector *connector, const struct drm_display_mode *adjusted_mode) { + struct drm_i915_private *i915 = to_i915(connector->base.dev); const struct drm_display_mode *fixed_mode, *best_mode = NULL; - int vrefresh = drm_mode_vrefresh(adjusted_mode); + int min_vrefresh = i915->vbt.seamless_drrs_min_refresh_rate; + int max_vrefresh = drm_mode_vrefresh(adjusted_mode); /* pick the fixed_mode with the lowest refresh rate */ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) { - if (drm_mode_vrefresh(fixed_mode) < vrefresh) { - vrefresh = drm_mode_vrefresh(fixed_mode); + int vrefresh = drm_mode_vrefresh(fixed_mode); + + if (vrefresh >= min_vrefresh && vrefresh < max_vrefresh) { + max_vrefresh = vrefresh; best_mode = fixed_mode; } } diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index 64bd4ca0edd4..5a598dd06039 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -6,6 +6,7 @@ #include "g4x_dp.h" #include "i915_drv.h" #include "intel_de.h" +#include "intel_display_power_well.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dpll.h" diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 8ec7c161284b..06db407e2749 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -1353,6 +1353,9 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0); intel_dp->psr.enabled = false; + intel_dp->psr.psr2_enabled = false; + intel_dp->psr.psr2_sel_fetch_enabled = false; + intel_dp->psr.psr2_sel_fetch_cff_enabled = false; } /** diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index fc037c027ea5..b8b822ea3755 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -6,6 +6,7 @@ #include "i915_drv.h" #include "i915_reg.h" #include "intel_display.h" +#include "intel_display_power_map.h" #include "intel_display_types.h" #include "intel_dp_mst.h" #include "intel_tc.h" @@ -61,10 +62,12 @@ bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port) static enum intel_display_power_domain tc_cold_get_power_domain(struct intel_digital_port *dig_port, enum tc_port_mode mode) { + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + if (mode == TC_PORT_TBT_ALT || !intel_tc_cold_requires_aux_pw(dig_port)) return POWER_DOMAIN_TC_COLD_OFF; - return intel_legacy_aux_to_power_domain(dig_port->aux_ch); + return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); } static intel_wakeref_t diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index e4a11c3e3f3e..4b98bab3b890 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -735,7 +735,7 @@ struct lvds_lfp_data_ptr { } __packed; struct bdb_lvds_lfp_data_ptrs { - u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */ + u8 lvds_entries; struct lvds_lfp_data_ptr ptr[16]; struct lvds_lfp_data_ptr_table panel_name; /* 156-163? */ } __packed; @@ -769,6 +769,11 @@ struct lvds_pnp_id { u8 mfg_year; } __packed; +/* + * For reference only. fp_timing has variable size so + * the data must be accessed using the data table pointers. + * Do not use this directly! + */ struct lvds_lfp_data_entry { struct lvds_fp_timing fp_timing; struct lvds_dvo_timing dvo_timing; @@ -783,6 +788,23 @@ struct lvds_lfp_panel_name { u8 name[13]; } __packed; +struct lvds_lfp_black_border { + u8 top; /* 227 */ + u8 bottom; /* 227 */ + u8 left; /* 238 */ + u8 right; /* 238 */ +} __packed; + +struct bdb_lvds_lfp_data_tail { + struct lvds_lfp_panel_name panel_name[16]; /* 156-163? */ + u16 scaling_enable; /* 187 */ + u8 seamless_drrs_min_refresh_rate[16]; /* 188 */ + u8 pixel_overlap_count[16]; /* 208 */ + struct lvds_lfp_black_border black_border[16]; /* 227 */ + u16 dual_lfp_port_sync_enable; /* 231 */ + u16 gpu_dithering_for_banding_artifacts; /* 245 */ +} __packed; + /* * Block 43 - LFP Backlight Control Data Block */ diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile index ea8324abc784..1699f644298e 100644 --- a/drivers/gpu/drm/i915/gvt/Makefile +++ b/drivers/gpu/drm/i915/gvt/Makefile @@ -1,9 +1,25 @@ # SPDX-License-Identifier: GPL-2.0 -GVT_DIR := gvt -GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ - interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \ - execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \ - fb_decoder.o dmabuf.o page_track.o -ccflags-y += -I $(srctree)/$(src) -I $(srctree)/$(src)/$(GVT_DIR)/ -i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) +kvmgt-$(CONFIG_DRM_I915_GVT) += \ + gvt/aperture_gm.o \ + gvt/cfg_space.o \ + gvt/cmd_parser.o \ + gvt/debugfs.o \ + gvt/display.o \ + gvt/dmabuf.o \ + gvt/edid.o \ + gvt/execlist.o \ + gvt/fb_decoder.o \ + gvt/firmware.o \ + gvt/gtt.o \ + gvt/handlers.o \ + gvt/interrupt.o \ + gvt/kvmgt.o \ + gvt/mmio.o \ + gvt/mmio_context.o \ + gvt/opregion.o \ + gvt/page_track.o \ + gvt/sched_policy.o \ + gvt/scheduler.o \ + gvt/trace_points.o \ + gvt/vgpu.o diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index b490e3db2e38..dad3a6054335 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -129,60 +129,16 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, return 0; } -static int map_aperture(struct intel_vgpu *vgpu, bool map) +static void map_aperture(struct intel_vgpu *vgpu, bool map) { - phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu); - unsigned long aperture_sz = vgpu_aperture_sz(vgpu); - u64 first_gfn; - u64 val; - int ret; - - if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked) - return 0; - - val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2]; - if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) - val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); - else - val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); - - first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT; - - ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn, - aperture_pa >> PAGE_SHIFT, - aperture_sz >> PAGE_SHIFT, - map); - if (ret) - return ret; - - vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map; - return 0; + if (map != vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked) + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map; } -static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap) +static void trap_gttmmio(struct intel_vgpu *vgpu, bool trap) { - u64 start, end; - u64 val; - int ret; - - if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked) - return 0; - - val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0]; - if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) - start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); - else - start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); - - start &= ~GENMASK(3, 0); - end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1; - - ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap); - if (ret) - return ret; - - vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap; - return 0; + if (trap != vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked) + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap; } static int emulate_pci_command_write(struct intel_vgpu *vgpu, @@ -191,26 +147,17 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, u8 old = vgpu_cfg_space(vgpu)[offset]; u8 new = *(u8 *)p_data; u8 changed = old ^ new; - int ret; vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); if (!(changed & PCI_COMMAND_MEMORY)) return 0; if (old & PCI_COMMAND_MEMORY) { - ret = trap_gttmmio(vgpu, false); - if (ret) - return ret; - ret = map_aperture(vgpu, false); - if (ret) - return ret; + trap_gttmmio(vgpu, false); + map_aperture(vgpu, false); } else { - ret = trap_gttmmio(vgpu, true); - if (ret) - return ret; - ret = map_aperture(vgpu, true); - if (ret) - return ret; + trap_gttmmio(vgpu, true); + map_aperture(vgpu, true); } return 0; @@ -230,13 +177,12 @@ static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu, return 0; } -static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, +static void emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 new = *(u32 *)(p_data); bool lo = IS_ALIGNED(offset, 8); u64 size; - int ret = 0; bool mmio_enabled = vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY; struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar; @@ -259,14 +205,14 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, * Untrap the BAR, since guest hasn't configured a * valid GPA */ - ret = trap_gttmmio(vgpu, false); + trap_gttmmio(vgpu, false); break; case PCI_BASE_ADDRESS_2: case PCI_BASE_ADDRESS_3: size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1); intel_vgpu_write_pci_bar(vgpu, offset, size >> (lo ? 0 : 32), lo); - ret = map_aperture(vgpu, false); + map_aperture(vgpu, false); break; default: /* Unimplemented BARs */ @@ -282,19 +228,18 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, */ trap_gttmmio(vgpu, false); intel_vgpu_write_pci_bar(vgpu, offset, new, lo); - ret = trap_gttmmio(vgpu, mmio_enabled); + trap_gttmmio(vgpu, mmio_enabled); break; case PCI_BASE_ADDRESS_2: case PCI_BASE_ADDRESS_3: map_aperture(vgpu, false); intel_vgpu_write_pci_bar(vgpu, offset, new, lo); - ret = map_aperture(vgpu, mmio_enabled); + map_aperture(vgpu, mmio_enabled); break; default: intel_vgpu_write_pci_bar(vgpu, offset, new, lo); } } - return ret; } /** @@ -336,8 +281,8 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5: if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4))) return -EINVAL; - return emulate_pci_bar_write(vgpu, offset, p_data, bytes); - + emulate_pci_bar_write(vgpu, offset, p_data, bytes); + break; case INTEL_GVT_PCI_SWSCI: if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4))) return -EINVAL; diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 2459213b6c87..b9eb75a2b400 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1011,7 +1011,7 @@ static int cmd_reg_handler(struct parser_exec_state *s, if (GRAPHICS_VER(s->engine->i915) == 9 && intel_gvt_mmio_is_sr_in_ctx(gvt, offset) && !strncmp(cmd, "lri", 3)) { - intel_gvt_hypervisor_read_gpa(s->vgpu, + intel_gvt_read_gpa(s->vgpu, s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); /* check inhibit context */ if (ctx_sr_ctl & 1) { @@ -1775,7 +1775,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm, copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ? I915_GTT_PAGE_SIZE - offset : end_gma - gma; - intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len); + intel_gvt_read_gpa(vgpu, gpa, va + len, copy_len); len += copy_len; gma += copy_len; diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index c95c25d2addb..01e54b45c5c1 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -29,7 +29,7 @@ */ #include <linux/dma-buf.h> -#include <linux/vfio.h> +#include <linux/mdev.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane.h> @@ -42,24 +42,6 @@ #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12)) -static int vgpu_pin_dma_address(struct intel_vgpu *vgpu, - unsigned long size, - dma_addr_t dma_addr) -{ - int ret = 0; - - if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr)) - ret = -EINVAL; - - return ret; -} - -static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu, - dma_addr_t dma_addr) -{ - intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr); -} - static int vgpu_gem_get_pages( struct drm_i915_gem_object *obj) { @@ -95,7 +77,7 @@ static int vgpu_gem_get_pages( for_each_sg(st->sgl, sg, page_num, i) { dma_addr_t dma_addr = GEN8_DECODE_PTE(readq(>t_entries[i])); - if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) { + if (intel_gvt_dma_pin_guest_page(vgpu, dma_addr)) { ret = -EINVAL; goto out; } @@ -114,7 +96,7 @@ out: for_each_sg(st->sgl, sg, i, j) { dma_addr = sg_dma_address(sg); if (dma_addr) - vgpu_unpin_dma_address(vgpu, dma_addr); + intel_gvt_dma_unmap_guest_page(vgpu, dma_addr); } sg_free_table(st); kfree(st); @@ -136,7 +118,7 @@ static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj, int i; for_each_sg(pages->sgl, sg, fb_info->size, i) - vgpu_unpin_dma_address(vgpu, + intel_gvt_dma_unmap_guest_page(vgpu, sg_dma_address(sg)); } @@ -157,7 +139,6 @@ static void dmabuf_gem_object_free(struct kref *kref) dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); if (dmabuf_obj == obj) { list_del(pos); - intel_gvt_hypervisor_put_vfio_device(vgpu); idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id); kfree(dmabuf_obj->info); @@ -491,14 +472,6 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args) kref_init(&dmabuf_obj->kref); - mutex_lock(&vgpu->dmabuf_lock); - if (intel_gvt_hypervisor_get_vfio_device(vgpu)) { - gvt_vgpu_err("get vfio device failed\n"); - mutex_unlock(&vgpu->dmabuf_lock); - goto out_free_info; - } - mutex_unlock(&vgpu->dmabuf_lock); - update_fb_info(gfx_plane_info, &fb_info); INIT_LIST_HEAD(&dmabuf_obj->list); @@ -603,7 +576,6 @@ void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu) dmabuf_obj->vgpu = NULL; idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id); - intel_gvt_hypervisor_put_vfio_device(vgpu); list_del(pos); /* dmabuf_obj might be freed in dmabuf_obj_put */ diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 66d354c4195b..274c6ef42400 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -159,12 +159,12 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist, hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, vgpu->hws_pga[execlist->engine->id]); if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) { - intel_gvt_hypervisor_write_gpa(vgpu, - hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8, - status, 8); - intel_gvt_hypervisor_write_gpa(vgpu, - hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4, - &write_pointer, 4); + intel_gvt_write_gpa(vgpu, + hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8, + status, 8); + intel_gvt_write_gpa(vgpu, + hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4, + &write_pointer, 4); } gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n", diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 1a8274a3f4b1..54fe442238c6 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -66,22 +66,16 @@ static struct bin_attribute firmware_attr = { .mmap = NULL, }; -static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data) -{ - *(u32 *)(data + offset) = intel_uncore_read_notrace(gvt->gt->uncore, - _MMIO(offset)); - return 0; -} - static int expose_firmware_sysfs(struct intel_gvt *gvt) { struct intel_gvt_device_info *info = &gvt->device_info; - struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); + struct drm_i915_private *i915 = gvt->gt->i915; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); struct gvt_firmware_header *h; void *firmware; void *p; unsigned long size, crc32_start; - int i, ret; + int ret; size = sizeof(*h) + info->mmio_size + info->cfg_space_size; firmware = vzalloc(size); @@ -99,17 +93,16 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) p = firmware + h->cfg_space_offset; - for (i = 0; i < h->cfg_space_size; i += 4) - pci_read_config_dword(pdev, i, p + i); - - memcpy(gvt->firmware.cfg_space, p, info->cfg_space_size); + memcpy(gvt->firmware.cfg_space, i915->vgpu.initial_cfg_space, + info->cfg_space_size); + memcpy(p, gvt->firmware.cfg_space, info->cfg_space_size); p = firmware + h->mmio_offset; - /* Take a snapshot of hw mmio registers. */ - intel_gvt_for_each_tracked_mmio(gvt, mmio_snapshot_handler, p); + memcpy(gvt->firmware.mmio, i915->vgpu.initial_mmio, + info->mmio_size); - memcpy(gvt->firmware.mmio, p, info->mmio_size); + memcpy(p, gvt->firmware.mmio, info->mmio_size); crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4; h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start); diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index d4082f4b9be1..9c5cc2800975 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -49,6 +49,22 @@ static bool enable_out_of_sync = false; static int preallocated_oos_pages = 8192; +static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn) +{ + struct kvm *kvm = vgpu->kvm; + int idx; + bool ret; + + if (!vgpu->attached) + return false; + + idx = srcu_read_lock(&kvm->srcu); + ret = kvm_is_visible_gfn(kvm, gfn); + srcu_read_unlock(&kvm->srcu, idx); + + return ret; +} + /* * validate a gm address and related range size, * translate it to host gm address @@ -314,7 +330,7 @@ static inline int gtt_get_entry64(void *pt, return -EINVAL; if (hypervisor_access) { - ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa + + ret = intel_gvt_read_gpa(vgpu, gpa + (index << info->gtt_entry_size_shift), &e->val64, 8); if (WARN_ON(ret)) @@ -339,7 +355,7 @@ static inline int gtt_set_entry64(void *pt, return -EINVAL; if (hypervisor_access) { - ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa + + ret = intel_gvt_write_gpa(vgpu, gpa + (index << info->gtt_entry_size_shift), &e->val64, 8); if (WARN_ON(ret)) @@ -997,7 +1013,7 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt, if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn) return; - intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); + intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); } static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) @@ -1162,15 +1178,16 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *entry) { const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; - unsigned long pfn; + kvm_pfn_t pfn; if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M)) return 0; - pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry)); - if (pfn == INTEL_GVT_INVALID_ADDR) + if (!vgpu->attached) + return -EINVAL; + pfn = gfn_to_pfn(vgpu->kvm, ops->get_pfn(entry)); + if (is_error_noslot_pfn(pfn)) return -EINVAL; - return PageTransHuge(pfn_to_page(pfn)); } @@ -1195,8 +1212,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, return PTR_ERR(sub_spt); for_each_shadow_entry(sub_spt, &sub_se, sub_index) { - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, - start_gfn + sub_index, PAGE_SIZE, &dma_addr); + ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index, + PAGE_SIZE, &dma_addr); if (ret) { ppgtt_invalidate_spt(spt); return ret; @@ -1241,8 +1258,8 @@ static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, ops->set_64k_splited(&entry); for (i = 0; i < GTT_64K_PTE_STRIDE; i++) { - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, - start_gfn + i, PAGE_SIZE, &dma_addr); + ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + i, + PAGE_SIZE, &dma_addr); if (ret) return ret; @@ -1296,8 +1313,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, } /* direct shadow */ - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size, - &dma_addr); + ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr); if (ret) return -ENXIO; @@ -1331,7 +1347,7 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) ppgtt_set_shadow_entry(spt, &se, i); } else { gfn = ops->get_pfn(&ge); - if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { + if (!intel_gvt_is_valid_gfn(vgpu, gfn)) { ops->set_pfn(&se, gvt->gtt.scratch_mfn); ppgtt_set_shadow_entry(spt, &se, i); continue; @@ -1497,7 +1513,7 @@ static int attach_oos_page(struct intel_vgpu_oos_page *oos_page, struct intel_gvt *gvt = spt->vgpu->gvt; int ret; - ret = intel_gvt_hypervisor_read_gpa(spt->vgpu, + ret = intel_gvt_read_gpa(spt->vgpu, spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, oos_page->mem, I915_GTT_PAGE_SIZE); if (ret) @@ -2228,8 +2244,7 @@ static void ggtt_invalidate_pte(struct intel_vgpu *vgpu, pfn = pte_ops->get_pfn(entry); if (pfn != vgpu->gvt->gtt.scratch_mfn) - intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, - pfn << PAGE_SHIFT); + intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); } static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, @@ -2315,13 +2330,13 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, /* one PTE update may be issued in multiple writes and the * first write may not construct a valid gfn */ - if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { + if (!intel_gvt_is_valid_gfn(vgpu, gfn)) { ops->set_pfn(&m, gvt->gtt.scratch_mfn); goto out; } - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, - PAGE_SIZE, &dma_addr); + ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, + &dma_addr); if (ret) { gvt_vgpu_err("fail to populate guest ggtt entry\n"); /* guest driver may read/write the entry when partial diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c deleted file mode 100644 index f0b69e4dcb52..000000000000 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ /dev/null @@ -1,340 +0,0 @@ -/* - * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * Authors: - * Kevin Tian <kevin.tian@intel.com> - * Eddie Dong <eddie.dong@intel.com> - * - * Contributors: - * Niu Bing <bing.niu@intel.com> - * Zhi Wang <zhi.a.wang@intel.com> - * - */ - -#include <linux/types.h> -#include <linux/kthread.h> - -#include "i915_drv.h" -#include "intel_gvt.h" -#include "gvt.h" -#include <linux/vfio.h> -#include <linux/mdev.h> - -struct intel_gvt_host intel_gvt_host; - -static const char * const supported_hypervisors[] = { - [INTEL_GVT_HYPERVISOR_XEN] = "XEN", - [INTEL_GVT_HYPERVISOR_KVM] = "KVM", -}; - -static const struct intel_gvt_ops intel_gvt_ops = { - .emulate_cfg_read = intel_vgpu_emulate_cfg_read, - .emulate_cfg_write = intel_vgpu_emulate_cfg_write, - .emulate_mmio_read = intel_vgpu_emulate_mmio_read, - .emulate_mmio_write = intel_vgpu_emulate_mmio_write, - .vgpu_create = intel_gvt_create_vgpu, - .vgpu_destroy = intel_gvt_destroy_vgpu, - .vgpu_release = intel_gvt_release_vgpu, - .vgpu_reset = intel_gvt_reset_vgpu, - .vgpu_activate = intel_gvt_activate_vgpu, - .vgpu_deactivate = intel_gvt_deactivate_vgpu, - .vgpu_query_plane = intel_vgpu_query_plane, - .vgpu_get_dmabuf = intel_vgpu_get_dmabuf, - .write_protect_handler = intel_vgpu_page_track_handler, - .emulate_hotplug = intel_vgpu_emulate_hotplug, -}; - -static void init_device_info(struct intel_gvt *gvt) -{ - struct intel_gvt_device_info *info = &gvt->device_info; - struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); - - info->max_support_vgpus = 8; - info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE; - info->mmio_size = 2 * 1024 * 1024; - info->mmio_bar = 0; - info->gtt_start_offset = 8 * 1024 * 1024; - info->gtt_entry_size = 8; - info->gtt_entry_size_shift = 3; - info->gmadr_bytes_in_cmd = 8; - info->max_surface_size = 36 * 1024 * 1024; - info->msi_cap_offset = pdev->msi_cap; -} - -static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt) -{ - struct intel_vgpu *vgpu; - int id; - - mutex_lock(&gvt->lock); - idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) { - if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id, - (void *)&gvt->service_request)) { - if (vgpu->active) - intel_vgpu_emulate_vblank(vgpu); - } - } - mutex_unlock(&gvt->lock); -} - -static int gvt_service_thread(void *data) -{ - struct intel_gvt *gvt = (struct intel_gvt *)data; - int ret; - - gvt_dbg_core("service thread start\n"); - - while (!kthread_should_stop()) { - ret = wait_event_interruptible(gvt->service_thread_wq, - kthread_should_stop() || gvt->service_request); - - if (kthread_should_stop()) - break; - - if (WARN_ONCE(ret, "service thread is waken up by signal.\n")) - continue; - - intel_gvt_test_and_emulate_vblank(gvt); - - if (test_bit(INTEL_GVT_REQUEST_SCHED, - (void *)&gvt->service_request) || - test_bit(INTEL_GVT_REQUEST_EVENT_SCHED, - (void *)&gvt->service_request)) { - intel_gvt_schedule(gvt); - } - } - - return 0; -} - -static void clean_service_thread(struct intel_gvt *gvt) -{ - kthread_stop(gvt->service_thread); -} - -static int init_service_thread(struct intel_gvt *gvt) -{ - init_waitqueue_head(&gvt->service_thread_wq); - - gvt->service_thread = kthread_run(gvt_service_thread, - gvt, "gvt_service_thread"); - if (IS_ERR(gvt->service_thread)) { - gvt_err("fail to start service thread.\n"); - return PTR_ERR(gvt->service_thread); - } - return 0; -} - -/** - * intel_gvt_clean_device - clean a GVT device - * @i915: i915 private - * - * This function is called at the driver unloading stage, to free the - * resources owned by a GVT device. - * - */ -void intel_gvt_clean_device(struct drm_i915_private *i915) -{ - struct intel_gvt *gvt = fetch_and_zero(&i915->gvt); - - if (drm_WARN_ON(&i915->drm, !gvt)) - return; - - intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); - intel_gvt_clean_vgpu_types(gvt); - - intel_gvt_debugfs_clean(gvt); - clean_service_thread(gvt); - intel_gvt_clean_cmd_parser(gvt); - intel_gvt_clean_sched_policy(gvt); - intel_gvt_clean_workload_scheduler(gvt); - intel_gvt_clean_gtt(gvt); - intel_gvt_free_firmware(gvt); - intel_gvt_clean_mmio_info(gvt); - idr_destroy(&gvt->vgpu_idr); - - kfree(i915->gvt); -} - -/** - * intel_gvt_init_device - initialize a GVT device - * @i915: drm i915 private data - * - * This function is called at the initialization stage, to initialize - * necessary GVT components. - * - * Returns: - * Zero on success, negative error code if failed. - * - */ -int intel_gvt_init_device(struct drm_i915_private *i915) -{ - struct intel_gvt *gvt; - struct intel_vgpu *vgpu; - int ret; - - if (drm_WARN_ON(&i915->drm, i915->gvt)) - return -EEXIST; - - gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL); - if (!gvt) - return -ENOMEM; - - gvt_dbg_core("init gvt device\n"); - - idr_init_base(&gvt->vgpu_idr, 1); - spin_lock_init(&gvt->scheduler.mmio_context_lock); - mutex_init(&gvt->lock); - mutex_init(&gvt->sched_lock); - gvt->gt = to_gt(i915); - i915->gvt = gvt; - - init_device_info(gvt); - - ret = intel_gvt_setup_mmio_info(gvt); - if (ret) - goto out_clean_idr; - - intel_gvt_init_engine_mmio_context(gvt); - - ret = intel_gvt_load_firmware(gvt); - if (ret) - goto out_clean_mmio_info; - - ret = intel_gvt_init_irq(gvt); - if (ret) - goto out_free_firmware; - - ret = intel_gvt_init_gtt(gvt); - if (ret) - goto out_free_firmware; - - ret = intel_gvt_init_workload_scheduler(gvt); - if (ret) - goto out_clean_gtt; - - ret = intel_gvt_init_sched_policy(gvt); - if (ret) - goto out_clean_workload_scheduler; - - ret = intel_gvt_init_cmd_parser(gvt); - if (ret) - goto out_clean_sched_policy; - - ret = init_service_thread(gvt); - if (ret) - goto out_clean_cmd_parser; - - ret = intel_gvt_init_vgpu_types(gvt); - if (ret) - goto out_clean_thread; - - vgpu = intel_gvt_create_idle_vgpu(gvt); - if (IS_ERR(vgpu)) { - ret = PTR_ERR(vgpu); - gvt_err("failed to create idle vgpu\n"); - goto out_clean_types; - } - gvt->idle_vgpu = vgpu; - - intel_gvt_debugfs_init(gvt); - - gvt_dbg_core("gvt device initialization is done\n"); - intel_gvt_host.dev = i915->drm.dev; - intel_gvt_host.initialized = true; - return 0; - -out_clean_types: - intel_gvt_clean_vgpu_types(gvt); -out_clean_thread: - clean_service_thread(gvt); -out_clean_cmd_parser: - intel_gvt_clean_cmd_parser(gvt); -out_clean_sched_policy: - intel_gvt_clean_sched_policy(gvt); -out_clean_workload_scheduler: - intel_gvt_clean_workload_scheduler(gvt); -out_clean_gtt: - intel_gvt_clean_gtt(gvt); -out_free_firmware: - intel_gvt_free_firmware(gvt); -out_clean_mmio_info: - intel_gvt_clean_mmio_info(gvt); -out_clean_idr: - idr_destroy(&gvt->vgpu_idr); - kfree(gvt); - i915->gvt = NULL; - return ret; -} - -int -intel_gvt_pm_resume(struct intel_gvt *gvt) -{ - intel_gvt_restore_fence(gvt); - intel_gvt_restore_mmio(gvt); - intel_gvt_restore_ggtt(gvt); - return 0; -} - -int -intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m) -{ - int ret; - void *gvt; - - if (!intel_gvt_host.initialized) - return -ENODEV; - - if (m->type != INTEL_GVT_HYPERVISOR_KVM && - m->type != INTEL_GVT_HYPERVISOR_XEN) - return -EINVAL; - - /* Get a reference for device model module */ - if (!try_module_get(THIS_MODULE)) - return -ENODEV; - - intel_gvt_host.mpt = m; - intel_gvt_host.hypervisor_type = m->type; - gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt; - - ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt, - &intel_gvt_ops); - if (ret < 0) { - gvt_err("Failed to init %s hypervisor module\n", - supported_hypervisors[intel_gvt_host.hypervisor_type]); - module_put(THIS_MODULE); - return -ENODEV; - } - gvt_dbg_core("Running with hypervisor %s in host mode\n", - supported_hypervisors[intel_gvt_host.hypervisor_type]); - return 0; -} -EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor); - -void -intel_gvt_unregister_hypervisor(void) -{ - void *gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt; - intel_gvt_hypervisor_host_exit(intel_gvt_host.dev, gvt); - module_put(THIS_MODULE); -} -EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor); diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 0ebffc327528..03ecffc2ba56 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -34,11 +34,13 @@ #define _GVT_H_ #include <uapi/linux/pci_regs.h> +#include <linux/kvm_host.h> +#include <linux/vfio.h> #include "i915_drv.h" +#include "intel_gvt.h" #include "debug.h" -#include "hypercall.h" #include "mmio.h" #include "reg.h" #include "interrupt.h" @@ -56,15 +58,6 @@ #define GVT_MAX_VGPU 8 -struct intel_gvt_host { - struct device *dev; - bool initialized; - int hypervisor_type; - const struct intel_gvt_mpt *mpt; -}; - -extern struct intel_gvt_host intel_gvt_host; - /* Describe per-platform limitations. */ struct intel_gvt_device_info { u32 max_support_vgpus; @@ -176,12 +169,14 @@ struct intel_vgpu_submission { } last_ctx[I915_NUM_ENGINES]; }; +#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries" + struct intel_vgpu { struct intel_gvt *gvt; struct mutex vgpu_lock; int id; - unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ bool active; + bool attached; bool pv_notified; bool failsafe; unsigned int resetting_eng; @@ -209,21 +204,40 @@ struct intel_vgpu { struct dentry *debugfs; - /* Hypervisor-specific device state. */ - void *vdev; - struct list_head dmabuf_obj_list_head; struct mutex dmabuf_lock; struct idr object_idr; struct intel_vgpu_vblank_timer vblank_timer; u32 scan_nonprivbb; -}; -static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu) -{ - return vgpu->vdev; -} + struct vfio_device vfio_device; + struct vfio_region *region; + int num_regions; + struct eventfd_ctx *intx_trigger; + struct eventfd_ctx *msi_trigger; + + /* + * Two caches are used to avoid mapping duplicated pages (eg. + * scratch pages). This help to reduce dma setup overhead. + */ + struct rb_root gfn_cache; + struct rb_root dma_addr_cache; + unsigned long nr_cache_entries; + struct mutex cache_lock; + + struct notifier_block iommu_notifier; + struct notifier_block group_notifier; + struct kvm *kvm; + struct work_struct release_work; + atomic_t released; + struct vfio_group *vfio_group; + + struct kvm_page_track_notifier_node track_node; +#define NR_BKT (1 << 18) + struct hlist_head ptable[NR_BKT]; +#undef NR_BKT +}; /* validating GM healthy status*/ #define vgpu_is_vm_unhealthy(ret_val) \ @@ -272,7 +286,7 @@ struct intel_gvt_mmio { /* Value of command write of this reg needs to be patched */ #define F_CMD_WRITE_PATCH (1 << 8) - const struct gvt_mmio_block *mmio_block; + struct gvt_mmio_block *mmio_block; unsigned int num_mmio_block; DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); @@ -428,7 +442,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt); #define RING_CTX_SIZE 320 struct intel_vgpu_creation_params { - __u64 handle; __u64 low_gm_sz; /* in MB */ __u64 high_gm_sz; /* in MB */ __u64 fence_sz; @@ -496,6 +509,9 @@ void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu); void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu); +int intel_gvt_set_opregion(struct intel_vgpu *vgpu); +int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num); + /* validating GM functions */ #define vgpu_gmadr_is_aperture(vgpu, gmadr) \ ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \ @@ -557,30 +573,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu); int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload); void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason); - -struct intel_gvt_ops { - int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *, - unsigned int); - int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *, - unsigned int); - int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *, - unsigned int); - int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *, - unsigned int); - struct intel_vgpu *(*vgpu_create)(struct intel_gvt *, - struct intel_vgpu_type *); - void (*vgpu_destroy)(struct intel_vgpu *vgpu); - void (*vgpu_release)(struct intel_vgpu *vgpu); - void (*vgpu_reset)(struct intel_vgpu *); - void (*vgpu_activate)(struct intel_vgpu *); - void (*vgpu_deactivate)(struct intel_vgpu *); - int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *); - int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int); - int (*write_protect_handler)(struct intel_vgpu *, u64, void *, - unsigned int); - void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected); -}; - +void intel_vgpu_detach_regions(struct intel_vgpu *vgpu); enum { GVT_FAILSAFE_UNSUPPORTED_GUEST, @@ -724,13 +717,54 @@ static inline bool intel_gvt_mmio_is_cmd_write_patch( return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH; } +/** + * intel_gvt_read_gpa - copy data from GPA to host data buffer + * @vgpu: a vGPU + * @gpa: guest physical address + * @buf: host data buffer + * @len: data length + * + * Returns: + * Zero on success, negative error code if failed. + */ +static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa, + void *buf, unsigned long len) +{ + if (!vgpu->attached) + return -ESRCH; + return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, false); +} + +/** + * intel_gvt_write_gpa - copy data from host data buffer to GPA + * @vgpu: a vGPU + * @gpa: guest physical address + * @buf: host data buffer + * @len: data length + * + * Returns: + * Zero on success, negative error code if failed. + */ +static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu, + unsigned long gpa, void *buf, unsigned long len) +{ + if (!vgpu->attached) + return -ESRCH; + return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, true); +} + void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); void intel_gvt_debugfs_init(struct intel_gvt *gvt); void intel_gvt_debugfs_clean(struct intel_gvt *gvt); -int intel_gvt_pm_resume(struct intel_gvt *gvt); +int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn); +int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn); +int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr); +int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, + unsigned long size, dma_addr_t *dma_addr); +void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, + dma_addr_t dma_addr); #include "trace.h" -#include "mpt.h" #endif diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 57b0f4977760..beea5895e499 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -72,7 +72,7 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) return 0; } -bool intel_gvt_match_device(struct intel_gvt *gvt, +static bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device) { return intel_gvt_get_device_type(gvt) & device; @@ -102,12 +102,11 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, return NULL; } -static int new_mmio_info(struct intel_gvt *gvt, - u32 offset, u16 flags, u32 size, - u32 addr_mask, u32 ro_mask, u32 device, - gvt_mmio_func read, gvt_mmio_func write) +static int setup_mmio_info(struct intel_gvt *gvt, u32 offset, u32 size, + u16 flags, u32 addr_mask, u32 ro_mask, u32 device, + gvt_mmio_func read, gvt_mmio_func write) { - struct intel_gvt_mmio_info *info, *p; + struct intel_gvt_mmio_info *p; u32 start, end, i; if (!intel_gvt_match_device(gvt, device)) @@ -120,32 +119,18 @@ static int new_mmio_info(struct intel_gvt *gvt, end = offset + size; for (i = start; i < end; i += 4) { - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) - return -ENOMEM; - - info->offset = i; - p = intel_gvt_find_mmio_info(gvt, info->offset); - if (p) { - WARN(1, "dup mmio definition offset %x\n", - info->offset); - kfree(info); - - /* We return -EEXIST here to make GVT-g load fail. - * So duplicated MMIO can be found as soon as - * possible. - */ - return -EEXIST; + p = intel_gvt_find_mmio_info(gvt, i); + if (!p) { + WARN(1, "assign a handler to a non-tracked mmio %x\n", + i); + return -ENODEV; } - - info->ro_mask = ro_mask; - info->device = device; - info->read = read ? read : intel_vgpu_default_mmio_read; - info->write = write ? write : intel_vgpu_default_mmio_write; - gvt->mmio.mmio_attribute[info->offset / 4] = flags; - INIT_HLIST_NODE(&info->node); - hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset); - gvt->mmio.num_tracked_mmio++; + p->ro_mask = ro_mask; + gvt->mmio.mmio_attribute[i / 4] = flags; + if (read) + p->read = read; + if (write) + p->write = write; } return 0; } @@ -2143,15 +2128,12 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu, } #define MMIO_F(reg, s, f, am, rm, d, r, w) do { \ - ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \ - f, s, am, rm, d, r, w); \ + ret = setup_mmio_info(gvt, i915_mmio_reg_offset(reg), \ + s, f, am, rm, d, r, w); \ if (ret) \ return ret; \ } while (0) -#define MMIO_D(reg, d) \ - MMIO_F(reg, 4, 0, 0, 0, d, NULL, NULL) - #define MMIO_DH(reg, d, r, w) \ MMIO_F(reg, 4, 0, 0, 0, d, r, w) @@ -2176,9 +2158,6 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu, MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \ } while (0) -#define MMIO_RING_D(prefix, d) \ - MMIO_RING_F(prefix, 4, 0, 0, 0, d, NULL, NULL) - #define MMIO_RING_DFH(prefix, d, f, r, w) \ MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w) @@ -2202,7 +2181,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler); MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(SDEISR, D_ALL); MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL); @@ -2230,7 +2208,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL); MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL); MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL); - MMIO_D(GEN7_CXT_SIZE, D_ALL); MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL); MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL); @@ -2284,257 +2261,32 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); /* display */ - MMIO_F(_MMIO(0x60220), 0x20, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_D(_MMIO(0x602a0), D_ALL); - - MMIO_D(_MMIO(0x65050), D_ALL); - MMIO_D(_MMIO(0x650b4), D_ALL); - - MMIO_D(_MMIO(0xc4040), D_ALL); - MMIO_D(DERRMR, D_ALL); - - MMIO_D(PIPEDSL(PIPE_A), D_ALL); - MMIO_D(PIPEDSL(PIPE_B), D_ALL); - MMIO_D(PIPEDSL(PIPE_C), D_ALL); - MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL); - MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write); MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write); MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write); MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write); - - MMIO_D(PIPESTAT(PIPE_A), D_ALL); - MMIO_D(PIPESTAT(PIPE_B), D_ALL); - MMIO_D(PIPESTAT(PIPE_C), D_ALL); - MMIO_D(PIPESTAT(_PIPE_EDP), D_ALL); - - MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A), D_ALL); - MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B), D_ALL); - MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C), D_ALL); - MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP), D_ALL); - - MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A), D_ALL); - MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B), D_ALL); - MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C), D_ALL); - MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP), D_ALL); - - MMIO_D(CURCNTR(PIPE_A), D_ALL); - MMIO_D(CURCNTR(PIPE_B), D_ALL); - MMIO_D(CURCNTR(PIPE_C), D_ALL); - - MMIO_D(CURPOS(PIPE_A), D_ALL); - MMIO_D(CURPOS(PIPE_B), D_ALL); - MMIO_D(CURPOS(PIPE_C), D_ALL); - - MMIO_D(CURBASE(PIPE_A), D_ALL); - MMIO_D(CURBASE(PIPE_B), D_ALL); - MMIO_D(CURBASE(PIPE_C), D_ALL); - - MMIO_D(CUR_FBC_CTL(PIPE_A), D_ALL); - MMIO_D(CUR_FBC_CTL(PIPE_B), D_ALL); - MMIO_D(CUR_FBC_CTL(PIPE_C), D_ALL); - - MMIO_D(_MMIO(0x700ac), D_ALL); - MMIO_D(_MMIO(0x710ac), D_ALL); - MMIO_D(_MMIO(0x720ac), D_ALL); - - MMIO_D(_MMIO(0x70090), D_ALL); - MMIO_D(_MMIO(0x70094), D_ALL); - MMIO_D(_MMIO(0x70098), D_ALL); - MMIO_D(_MMIO(0x7009c), D_ALL); - - MMIO_D(DSPCNTR(PIPE_A), D_ALL); - MMIO_D(DSPADDR(PIPE_A), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_A), D_ALL); - MMIO_D(DSPPOS(PIPE_A), D_ALL); - MMIO_D(DSPSIZE(PIPE_A), D_ALL); MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_A), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL); MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL, reg50080_mmio_write); - - MMIO_D(DSPCNTR(PIPE_B), D_ALL); - MMIO_D(DSPADDR(PIPE_B), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_B), D_ALL); - MMIO_D(DSPPOS(PIPE_B), D_ALL); - MMIO_D(DSPSIZE(PIPE_B), D_ALL); MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_B), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL); MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL, reg50080_mmio_write); - - MMIO_D(DSPCNTR(PIPE_C), D_ALL); - MMIO_D(DSPADDR(PIPE_C), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_C), D_ALL); - MMIO_D(DSPPOS(PIPE_C), D_ALL); - MMIO_D(DSPSIZE(PIPE_C), D_ALL); MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_C), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL); MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL, reg50080_mmio_write); - - MMIO_D(SPRCTL(PIPE_A), D_ALL); - MMIO_D(SPRLINOFF(PIPE_A), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_A), D_ALL); - MMIO_D(SPRPOS(PIPE_A), D_ALL); - MMIO_D(SPRSIZE(PIPE_A), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_A), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_A), D_ALL); MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_A), D_ALL); - MMIO_D(SPROFFSET(PIPE_A), D_ALL); - MMIO_D(SPRSCALE(PIPE_A), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL); MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL, reg50080_mmio_write); - - MMIO_D(SPRCTL(PIPE_B), D_ALL); - MMIO_D(SPRLINOFF(PIPE_B), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_B), D_ALL); - MMIO_D(SPRPOS(PIPE_B), D_ALL); - MMIO_D(SPRSIZE(PIPE_B), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_B), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_B), D_ALL); MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_B), D_ALL); - MMIO_D(SPROFFSET(PIPE_B), D_ALL); - MMIO_D(SPRSCALE(PIPE_B), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL); MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL, reg50080_mmio_write); - - MMIO_D(SPRCTL(PIPE_C), D_ALL); - MMIO_D(SPRLINOFF(PIPE_C), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_C), D_ALL); - MMIO_D(SPRPOS(PIPE_C), D_ALL); - MMIO_D(SPRSIZE(PIPE_C), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_C), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_C), D_ALL); MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_C), D_ALL); - MMIO_D(SPROFFSET(PIPE_C), D_ALL); - MMIO_D(SPRSCALE(PIPE_C), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL); MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL, reg50080_mmio_write); - MMIO_D(HTOTAL(TRANSCODER_A), D_ALL); - MMIO_D(HBLANK(TRANSCODER_A), D_ALL); - MMIO_D(HSYNC(TRANSCODER_A), D_ALL); - MMIO_D(VTOTAL(TRANSCODER_A), D_ALL); - MMIO_D(VBLANK(TRANSCODER_A), D_ALL); - MMIO_D(VSYNC(TRANSCODER_A), D_ALL); - MMIO_D(BCLRPAT(TRANSCODER_A), D_ALL); - MMIO_D(VSYNCSHIFT(TRANSCODER_A), D_ALL); - MMIO_D(PIPESRC(TRANSCODER_A), D_ALL); - - MMIO_D(HTOTAL(TRANSCODER_B), D_ALL); - MMIO_D(HBLANK(TRANSCODER_B), D_ALL); - MMIO_D(HSYNC(TRANSCODER_B), D_ALL); - MMIO_D(VTOTAL(TRANSCODER_B), D_ALL); - MMIO_D(VBLANK(TRANSCODER_B), D_ALL); - MMIO_D(VSYNC(TRANSCODER_B), D_ALL); - MMIO_D(BCLRPAT(TRANSCODER_B), D_ALL); - MMIO_D(VSYNCSHIFT(TRANSCODER_B), D_ALL); - MMIO_D(PIPESRC(TRANSCODER_B), D_ALL); - - MMIO_D(HTOTAL(TRANSCODER_C), D_ALL); - MMIO_D(HBLANK(TRANSCODER_C), D_ALL); - MMIO_D(HSYNC(TRANSCODER_C), D_ALL); - MMIO_D(VTOTAL(TRANSCODER_C), D_ALL); - MMIO_D(VBLANK(TRANSCODER_C), D_ALL); - MMIO_D(VSYNC(TRANSCODER_C), D_ALL); - MMIO_D(BCLRPAT(TRANSCODER_C), D_ALL); - MMIO_D(VSYNCSHIFT(TRANSCODER_C), D_ALL); - MMIO_D(PIPESRC(TRANSCODER_C), D_ALL); - - MMIO_D(HTOTAL(TRANSCODER_EDP), D_ALL); - MMIO_D(HBLANK(TRANSCODER_EDP), D_ALL); - MMIO_D(HSYNC(TRANSCODER_EDP), D_ALL); - MMIO_D(VTOTAL(TRANSCODER_EDP), D_ALL); - MMIO_D(VBLANK(TRANSCODER_EDP), D_ALL); - MMIO_D(VSYNC(TRANSCODER_EDP), D_ALL); - MMIO_D(BCLRPAT(TRANSCODER_EDP), D_ALL); - MMIO_D(VSYNCSHIFT(TRANSCODER_EDP), D_ALL); - - MMIO_D(PIPE_DATA_M1(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_DATA_N1(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_DATA_M2(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_DATA_N2(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_LINK_M1(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_LINK_N1(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_LINK_M2(TRANSCODER_A), D_ALL); - MMIO_D(PIPE_LINK_N2(TRANSCODER_A), D_ALL); - - MMIO_D(PIPE_DATA_M1(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_DATA_N1(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_DATA_M2(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_DATA_N2(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_LINK_M1(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_LINK_N1(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_LINK_M2(TRANSCODER_B), D_ALL); - MMIO_D(PIPE_LINK_N2(TRANSCODER_B), D_ALL); - - MMIO_D(PIPE_DATA_M1(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_DATA_N1(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_DATA_M2(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_DATA_N2(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_LINK_M1(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_LINK_N1(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_LINK_M2(TRANSCODER_C), D_ALL); - MMIO_D(PIPE_LINK_N2(TRANSCODER_C), D_ALL); - - MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP), D_ALL); - MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP), D_ALL); - - MMIO_D(PF_CTL(PIPE_A), D_ALL); - MMIO_D(PF_WIN_SZ(PIPE_A), D_ALL); - MMIO_D(PF_WIN_POS(PIPE_A), D_ALL); - MMIO_D(PF_VSCALE(PIPE_A), D_ALL); - MMIO_D(PF_HSCALE(PIPE_A), D_ALL); - - MMIO_D(PF_CTL(PIPE_B), D_ALL); - MMIO_D(PF_WIN_SZ(PIPE_B), D_ALL); - MMIO_D(PF_WIN_POS(PIPE_B), D_ALL); - MMIO_D(PF_VSCALE(PIPE_B), D_ALL); - MMIO_D(PF_HSCALE(PIPE_B), D_ALL); - - MMIO_D(PF_CTL(PIPE_C), D_ALL); - MMIO_D(PF_WIN_SZ(PIPE_C), D_ALL); - MMIO_D(PF_WIN_POS(PIPE_C), D_ALL); - MMIO_D(PF_VSCALE(PIPE_C), D_ALL); - MMIO_D(PF_HSCALE(PIPE_C), D_ALL); - - MMIO_D(WM0_PIPE_ILK(PIPE_A), D_ALL); - MMIO_D(WM0_PIPE_ILK(PIPE_B), D_ALL); - MMIO_D(WM0_PIPE_ILK(PIPE_C), D_ALL); - MMIO_D(WM1_LP_ILK, D_ALL); - MMIO_D(WM2_LP_ILK, D_ALL); - MMIO_D(WM3_LP_ILK, D_ALL); - MMIO_D(WM1S_LP_ILK, D_ALL); - MMIO_D(WM2S_LP_IVB, D_ALL); - MMIO_D(WM3S_LP_IVB, D_ALL); - - MMIO_D(BLC_PWM_CPU_CTL2, D_ALL); - MMIO_D(BLC_PWM_CPU_CTL, D_ALL); - MMIO_D(BLC_PWM_PCH_CTL1, D_ALL); - MMIO_D(BLC_PWM_PCH_CTL2, D_ALL); - - MMIO_D(_MMIO(0x48268), D_ALL); - MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read, gmbus_mmio_write); MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL, dp_aux_ch_ctl_mmio_write); @@ -2557,74 +2309,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status); MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status); MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status); - - MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_A), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_HBLANK_A), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_HSYNC_A), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_A), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VBLANK_A), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VSYNC_A), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_A), D_ALL); - - MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_B), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_HBLANK_B), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_HSYNC_B), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_B), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VBLANK_B), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VSYNC_B), D_ALL); - MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_B), D_ALL); - - MMIO_D(_MMIO(_PCH_TRANSA_DATA_M1), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_DATA_N1), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_DATA_M2), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_DATA_N2), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_LINK_M1), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_LINK_N1), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_LINK_M2), D_ALL); - MMIO_D(_MMIO(_PCH_TRANSA_LINK_N2), D_ALL); - - MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL); - MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL); - MMIO_D(TRANS_DP_CTL(PIPE_C), D_ALL); - - MMIO_D(TVIDEO_DIP_CTL(PIPE_A), D_ALL); - MMIO_D(TVIDEO_DIP_DATA(PIPE_A), D_ALL); - MMIO_D(TVIDEO_DIP_GCP(PIPE_A), D_ALL); - - MMIO_D(TVIDEO_DIP_CTL(PIPE_B), D_ALL); - MMIO_D(TVIDEO_DIP_DATA(PIPE_B), D_ALL); - MMIO_D(TVIDEO_DIP_GCP(PIPE_B), D_ALL); - - MMIO_D(TVIDEO_DIP_CTL(PIPE_C), D_ALL); - MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL); - MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL); - - MMIO_D(_MMIO(_FDI_RXA_MISC), D_ALL); - MMIO_D(_MMIO(_FDI_RXB_MISC), D_ALL); - MMIO_D(_MMIO(_FDI_RXA_TUSIZE1), D_ALL); - MMIO_D(_MMIO(_FDI_RXA_TUSIZE2), D_ALL); - MMIO_D(_MMIO(_FDI_RXB_TUSIZE1), D_ALL); - MMIO_D(_MMIO(_FDI_RXB_TUSIZE2), D_ALL); - MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write); - MMIO_D(PCH_PP_DIVISOR, D_ALL); - MMIO_D(PCH_PP_STATUS, D_ALL); - MMIO_D(PCH_LVDS, D_ALL); - MMIO_D(_MMIO(_PCH_DPLL_A), D_ALL); - MMIO_D(_MMIO(_PCH_DPLL_B), D_ALL); - MMIO_D(_MMIO(_PCH_FPA0), D_ALL); - MMIO_D(_MMIO(_PCH_FPA1), D_ALL); - MMIO_D(_MMIO(_PCH_FPB0), D_ALL); - MMIO_D(_MMIO(_PCH_FPB1), D_ALL); - MMIO_D(PCH_DREF_CONTROL, D_ALL); - MMIO_D(PCH_RAWCLK_FREQ, D_ALL); - MMIO_D(PCH_DPLL_SEL, D_ALL); - - MMIO_D(_MMIO(0x61208), D_ALL); - MMIO_D(_MMIO(0x6120c), D_ALL); - MMIO_D(PCH_PP_ON_DELAYS, D_ALL); - MMIO_D(PCH_PP_OFF_DELAYS, D_ALL); - MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL); @@ -2640,143 +2325,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) NULL, NULL); MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write); - MMIO_D(FUSE_STRAP, D_ALL); - MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL, D_ALL); - - MMIO_D(DISP_ARB_CTL, D_ALL); - MMIO_D(DISP_ARB_CTL2, D_ALL); - - MMIO_D(ILK_DISPLAY_CHICKEN1, D_ALL); - MMIO_D(ILK_DISPLAY_CHICKEN2, D_ALL); - MMIO_D(ILK_DSPCLK_GATE_D, D_ALL); - - MMIO_D(SOUTH_CHICKEN1, D_ALL); MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write); - MMIO_D(_MMIO(_TRANSA_CHICKEN1), D_ALL); - MMIO_D(_MMIO(_TRANSB_CHICKEN1), D_ALL); - MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL); - MMIO_D(_MMIO(_TRANSA_CHICKEN2), D_ALL); - MMIO_D(_MMIO(_TRANSB_CHICKEN2), D_ALL); - - MMIO_D(ILK_DPFC_CB_BASE(INTEL_FBC_A), D_ALL); - MMIO_D(ILK_DPFC_CONTROL(INTEL_FBC_A), D_ALL); - MMIO_D(ILK_DPFC_RECOMP_CTL(INTEL_FBC_A), D_ALL); - MMIO_D(ILK_DPFC_STATUS(INTEL_FBC_A), D_ALL); - MMIO_D(ILK_DPFC_FENCE_YOFF(INTEL_FBC_A), D_ALL); - MMIO_D(ILK_DPFC_CHICKEN(INTEL_FBC_A), D_ALL); - MMIO_D(ILK_FBC_RT_BASE, D_ALL); - - MMIO_D(IPS_CTL, D_ALL); - - MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_MODE(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A), D_ALL); - - MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_MODE(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B), D_ALL); - - MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_MODE(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C), D_ALL); - MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C), D_ALL); - - MMIO_D(PREC_PAL_INDEX(PIPE_A), D_ALL); - MMIO_D(PREC_PAL_DATA(PIPE_A), D_ALL); - MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL); - - MMIO_D(PREC_PAL_INDEX(PIPE_B), D_ALL); - MMIO_D(PREC_PAL_DATA(PIPE_B), D_ALL); - MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL); - - MMIO_D(PREC_PAL_INDEX(PIPE_C), D_ALL); - MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL); - MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL); - - MMIO_D(_MMIO(0x60110), D_ALL); - MMIO_D(_MMIO(0x61110), D_ALL); - MMIO_F(_MMIO(0x70400), 0x40, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x71400), 0x40, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x72400), 0x40, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x70440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); - MMIO_F(_MMIO(0x71440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); - MMIO_F(_MMIO(0x72440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); - MMIO_F(_MMIO(0x7044c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); - MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); - MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); - - MMIO_D(WM_LINETIME(PIPE_A), D_ALL); - MMIO_D(WM_LINETIME(PIPE_B), D_ALL); - MMIO_D(WM_LINETIME(PIPE_C), D_ALL); - MMIO_D(SPLL_CTL, D_ALL); - MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL); - MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL); - MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL); - MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL); - MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL); - MMIO_D(PORT_CLK_SEL(PORT_D), D_ALL); - MMIO_D(PORT_CLK_SEL(PORT_E), D_ALL); - MMIO_D(TRANS_CLK_SEL(TRANSCODER_A), D_ALL); - MMIO_D(TRANS_CLK_SEL(TRANSCODER_B), D_ALL); - MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL); - - MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL); - MMIO_D(_MMIO(0x46508), D_ALL); - - MMIO_D(_MMIO(0x49080), D_ALL); - MMIO_D(_MMIO(0x49180), D_ALL); - MMIO_D(_MMIO(0x49280), D_ALL); - - MMIO_F(_MMIO(0x49090), 0x14, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x49190), 0x14, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x49290), 0x14, 0, 0, 0, D_ALL, NULL, NULL); - - MMIO_D(GAMMA_MODE(PIPE_A), D_ALL); - MMIO_D(GAMMA_MODE(PIPE_B), D_ALL); - MMIO_D(GAMMA_MODE(PIPE_C), D_ALL); - - MMIO_D(PIPE_MULT(PIPE_A), D_ALL); - MMIO_D(PIPE_MULT(PIPE_B), D_ALL); - MMIO_D(PIPE_MULT(PIPE_C), D_ALL); - - MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A), D_ALL); - MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B), D_ALL); - MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C), D_ALL); - MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL); - MMIO_D(SBI_ADDR, D_ALL); MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL); MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write); - MMIO_D(PIXCLK_GATE, D_ALL); MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_ALL, NULL, dp_aux_ch_ctl_mmio_write); @@ -2799,65 +2351,18 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write); MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL); - MMIO_F(_MMIO(_DDI_BUF_TRANS_A), 0x50, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x64e60), 0x50, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x64eC0), 0x50, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x64f20), 0x50, 0, 0, 0, D_ALL, NULL, NULL); - MMIO_F(_MMIO(0x64f80), 0x50, 0, 0, 0, D_ALL, NULL, NULL); - - MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL); - MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL); - MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A), D_ALL); - MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL); MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL); MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL); MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL); - MMIO_D(_MMIO(_TRANSA_MSA_MISC), D_ALL); - MMIO_D(_MMIO(_TRANSB_MSA_MISC), D_ALL); - MMIO_D(_MMIO(_TRANSC_MSA_MISC), D_ALL); - MMIO_D(_MMIO(_TRANS_EDP_MSA_MISC), D_ALL); - MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL); - MMIO_D(FORCEWAKE_ACK, D_ALL); - MMIO_D(GEN6_GT_CORE_STATUS, D_ALL); - MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL); MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write); MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL); - MMIO_D(ECOBUS, D_ALL); MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL); MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL); - MMIO_D(GEN6_RPNSWREQ, D_ALL); - MMIO_D(GEN6_RC_VIDEO_FREQ, D_ALL); - MMIO_D(GEN6_RP_DOWN_TIMEOUT, D_ALL); - MMIO_D(GEN6_RP_INTERRUPT_LIMITS, D_ALL); - MMIO_D(GEN6_RPSTAT1, D_ALL); - MMIO_D(GEN6_RP_CONTROL, D_ALL); - MMIO_D(GEN6_RP_UP_THRESHOLD, D_ALL); - MMIO_D(GEN6_RP_DOWN_THRESHOLD, D_ALL); - MMIO_D(GEN6_RP_CUR_UP_EI, D_ALL); - MMIO_D(GEN6_RP_CUR_UP, D_ALL); - MMIO_D(GEN6_RP_PREV_UP, D_ALL); - MMIO_D(GEN6_RP_CUR_DOWN_EI, D_ALL); - MMIO_D(GEN6_RP_CUR_DOWN, D_ALL); - MMIO_D(GEN6_RP_PREV_DOWN, D_ALL); - MMIO_D(GEN6_RP_UP_EI, D_ALL); - MMIO_D(GEN6_RP_DOWN_EI, D_ALL); - MMIO_D(GEN6_RP_IDLE_HYSTERSIS, D_ALL); - MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT, D_ALL); - MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT, D_ALL); - MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT, D_ALL); - MMIO_D(GEN6_RC_EVALUATION_INTERVAL, D_ALL); - MMIO_D(GEN6_RC_IDLE_HYSTERSIS, D_ALL); - MMIO_D(GEN6_RC_SLEEP, D_ALL); - MMIO_D(GEN6_RC1e_THRESHOLD, D_ALL); - MMIO_D(GEN6_RC6_THRESHOLD, D_ALL); - MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL); - MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL); - MMIO_D(GEN6_PMINTRMSK, D_ALL); MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write); MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write); MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write); @@ -2865,97 +2370,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write); MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write); - MMIO_D(RSTDBYCTL, D_ALL); - MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write); MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write); MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write); - MMIO_D(TILECTL, D_ALL); - - MMIO_D(GEN6_UCGCTL1, D_ALL); - MMIO_D(GEN6_UCGCTL2, D_ALL); - - MMIO_F(_MMIO(0x4f000), 0x90, 0, 0, 0, D_ALL, NULL, NULL); - - MMIO_D(GEN6_PCODE_DATA, D_ALL); - MMIO_D(_MMIO(0x13812c), D_ALL); MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL); - MMIO_D(HSW_EDRAM_CAP, D_ALL); - MMIO_D(HSW_IDICR, D_ALL); MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL); - MMIO_D(_MMIO(0x3c), D_ALL); - MMIO_D(_MMIO(0x860), D_ALL); - MMIO_D(ECOSKPD(RENDER_RING_BASE), D_ALL); - MMIO_D(_MMIO(0x121d0), D_ALL); - MMIO_D(ECOSKPD(BLT_RING_BASE), D_ALL); - MMIO_D(_MMIO(0x41d0), D_ALL); - MMIO_D(GAC_ECO_BITS, D_ALL); - MMIO_D(_MMIO(0x6200), D_ALL); - MMIO_D(_MMIO(0x6204), D_ALL); - MMIO_D(_MMIO(0x6208), D_ALL); - MMIO_D(_MMIO(0x7118), D_ALL); - MMIO_D(_MMIO(0x7180), D_ALL); - MMIO_D(_MMIO(0x7408), D_ALL); - MMIO_D(_MMIO(0x7c00), D_ALL); MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write); - MMIO_D(_MMIO(0x911c), D_ALL); - MMIO_D(_MMIO(0x9120), D_ALL); MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL); - MMIO_D(GAB_CTL, D_ALL); - MMIO_D(_MMIO(0x48800), D_ALL); - MMIO_D(_MMIO(0xce044), D_ALL); - MMIO_D(_MMIO(0xe6500), D_ALL); - MMIO_D(_MMIO(0xe6504), D_ALL); - MMIO_D(_MMIO(0xe6600), D_ALL); - MMIO_D(_MMIO(0xe6604), D_ALL); - MMIO_D(_MMIO(0xe6700), D_ALL); - MMIO_D(_MMIO(0xe6704), D_ALL); - MMIO_D(_MMIO(0xe6800), D_ALL); - MMIO_D(_MMIO(0xe6804), D_ALL); - MMIO_D(PCH_GMBUS4, D_ALL); - MMIO_D(PCH_GMBUS5, D_ALL); - - MMIO_D(_MMIO(0x902c), D_ALL); - MMIO_D(_MMIO(0xec008), D_ALL); - MMIO_D(_MMIO(0xec00c), D_ALL); - MMIO_D(_MMIO(0xec008 + 0x18), D_ALL); - MMIO_D(_MMIO(0xec00c + 0x18), D_ALL); - MMIO_D(_MMIO(0xec008 + 0x18 * 2), D_ALL); - MMIO_D(_MMIO(0xec00c + 0x18 * 2), D_ALL); - MMIO_D(_MMIO(0xec008 + 0x18 * 3), D_ALL); - MMIO_D(_MMIO(0xec00c + 0x18 * 3), D_ALL); - MMIO_D(_MMIO(0xec408), D_ALL); - MMIO_D(_MMIO(0xec40c), D_ALL); - MMIO_D(_MMIO(0xec408 + 0x18), D_ALL); - MMIO_D(_MMIO(0xec40c + 0x18), D_ALL); - MMIO_D(_MMIO(0xec408 + 0x18 * 2), D_ALL); - MMIO_D(_MMIO(0xec40c + 0x18 * 2), D_ALL); - MMIO_D(_MMIO(0xec408 + 0x18 * 3), D_ALL); - MMIO_D(_MMIO(0xec40c + 0x18 * 3), D_ALL); - MMIO_D(_MMIO(0xfc810), D_ALL); - MMIO_D(_MMIO(0xfc81c), D_ALL); - MMIO_D(_MMIO(0xfc828), D_ALL); - MMIO_D(_MMIO(0xfc834), D_ALL); - MMIO_D(_MMIO(0xfcc00), D_ALL); - MMIO_D(_MMIO(0xfcc0c), D_ALL); - MMIO_D(_MMIO(0xfcc18), D_ALL); - MMIO_D(_MMIO(0xfcc24), D_ALL); - MMIO_D(_MMIO(0xfd000), D_ALL); - MMIO_D(_MMIO(0xfd00c), D_ALL); - MMIO_D(_MMIO(0xfd018), D_ALL); - MMIO_D(_MMIO(0xfd024), D_ALL); - MMIO_D(_MMIO(0xfd034), D_ALL); - MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write); - MMIO_D(_MMIO(0x2054), D_ALL); - MMIO_D(_MMIO(0x12054), D_ALL); - MMIO_D(_MMIO(0x22054), D_ALL); - MMIO_D(_MMIO(0x1a054), D_ALL); - - MMIO_D(_MMIO(0x44070), D_ALL); MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL); @@ -2963,8 +2388,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL); - MMIO_D(_MMIO(0x2b00), D_BDW_PLUS); - MMIO_D(_MMIO(0x2360), D_BDW_PLUS); MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); @@ -3012,28 +2435,23 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) static int init_bdw_mmio_info(struct intel_gvt *gvt) { - struct drm_i915_private *dev_priv = gvt->gt->i915; int ret; MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_GT_ISR(0), D_BDW_PLUS); MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_GT_ISR(1), D_BDW_PLUS); MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_GT_ISR(2), D_BDW_PLUS); MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_GT_ISR(3), D_BDW_PLUS); MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); @@ -3041,7 +2459,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A), D_BDW_PLUS); MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); @@ -3049,7 +2466,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B), D_BDW_PLUS); MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); @@ -3057,22 +2473,18 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C), D_BDW_PLUS); MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_DE_PORT_ISR, D_BDW_PLUS); MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_DE_MISC_ISR, D_BDW_PLUS); MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); - MMIO_D(GEN8_PCU_ISR, D_BDW_PLUS); MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, intel_vgpu_reg_master_irq_handler); @@ -3107,21 +2519,8 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); #undef RING_REG - MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS); - MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS); - MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS); - MMIO_D(_MMIO(0x1c1d0), D_BDW_PLUS); - MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS); - MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS); - MMIO_D(_MMIO(0x1c054), D_BDW_PLUS); - MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write); - MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT); - MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS); - - MMIO_D(GAMTARBMODE, D_BDW_PLUS); - #define RING_REG(base) _MMIO((base) + 0x270) MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL); #undef RING_REG @@ -3130,24 +2529,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); - MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS); - MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS); - MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS); - - MMIO_D(WM_MISC, D_BDW); - MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW); - - MMIO_D(_MMIO(0x6671c), D_BDW_PLUS); - MMIO_D(_MMIO(0x66c00), D_BDW_PLUS); - MMIO_D(_MMIO(0x66c04), D_BDW_PLUS); - - MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS); - - MMIO_D(GEN8_EU_DISABLE0, D_BDW_PLUS); - MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS); - MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS); - - MMIO_D(_MMIO(0xfdc), D_BDW_PLUS); MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, @@ -3159,27 +2540,14 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL); - MMIO_D(_MMIO(0xb110), D_BDW); - MMIO_D(GEN9_SCRATCH_LNCF1, D_BDW_PLUS); MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0, D_BDW_PLUS, NULL, force_nonpriv_write); - MMIO_D(_MMIO(0x44484), D_BDW_PLUS); - MMIO_D(_MMIO(0x4448c), D_BDW_PLUS); - MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL); - MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS); MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL); - MMIO_D(_MMIO(0x110000), D_BDW_PLUS); - - MMIO_D(_MMIO(0x48400), D_BDW_PLUS); - - MMIO_D(_MMIO(0x6e570), D_BDW_PLUS); - MMIO_D(_MMIO(0x65f10), D_BDW_PLUS); - MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); @@ -3219,30 +2587,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); - MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS); MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write); MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write); - MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS); - MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); - MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL); - MMIO_D(DC_STATE_EN, D_SKL_PLUS); - MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS); - MMIO_D(CDCLK_CTL, D_SKL_PLUS); MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write); MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write); - MMIO_D(_MMIO(_DPLL1_CFGCR1), D_SKL_PLUS); - MMIO_D(_MMIO(_DPLL2_CFGCR1), D_SKL_PLUS); - MMIO_D(_MMIO(_DPLL3_CFGCR1), D_SKL_PLUS); - MMIO_D(_MMIO(_DPLL1_CFGCR2), D_SKL_PLUS); - MMIO_D(_MMIO(_DPLL2_CFGCR2), D_SKL_PLUS); - MMIO_D(_MMIO(_DPLL3_CFGCR2), D_SKL_PLUS); - MMIO_D(DPLL_CTRL1, D_SKL_PLUS); - MMIO_D(DPLL_CTRL2, D_SKL_PLUS); MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL); MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); @@ -3285,22 +2638,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - - MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); @@ -3362,30 +2699,13 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL); MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL); - MMIO_D(_MMIO(_PLANE_CTL_3_A), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_CTL_3_B), D_SKL_PLUS); - MMIO_D(_MMIO(0x72380), D_SKL_PLUS); - MMIO_D(_MMIO(0x7239c), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS); - - MMIO_D(DMC_SSP_BASE, D_SKL_PLUS); - MMIO_D(DMC_HTP_SKL, D_SKL_PLUS); - MMIO_D(DMC_LAST_WRITE, D_SKL_PLUS); - MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_D(SKL_DFSM, D_SKL_PLUS); - MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS); - MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, NULL, NULL); MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_D(RPM_CONFIG0, D_SKL_PLUS); - MMIO_D(_MMIO(0xd08), D_SKL_PLUS); - MMIO_D(RC6_LOCATION, D_SKL_PLUS); MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, @@ -3402,40 +2722,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE, NULL, gen9_trtt_chicken_write); - MMIO_D(_MMIO(0x46430), D_SKL_PLUS); - - MMIO_D(_MMIO(0x46520), D_SKL_PLUS); - - MMIO_D(_MMIO(0xc403c), D_SKL_PLUS); MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write); - MMIO_D(_MMIO(0x65900), D_SKL_PLUS); - MMIO_D(GEN6_STOLEN_RESERVED, D_SKL_PLUS); - MMIO_D(_MMIO(0x4068), D_SKL_PLUS); - MMIO_D(_MMIO(0x67054), D_SKL_PLUS); - MMIO_D(_MMIO(0x6e560), D_SKL_PLUS); - MMIO_D(_MMIO(0x6e554), D_SKL_PLUS); - MMIO_D(_MMIO(0x2b20), D_SKL_PLUS); - MMIO_D(_MMIO(0x65f00), D_SKL_PLUS); - MMIO_D(_MMIO(0x65f08), D_SKL_PLUS); - MMIO_D(_MMIO(0x320f0), D_SKL_PLUS); - - MMIO_D(_MMIO(0x70034), D_SKL_PLUS); - MMIO_D(_MMIO(0x71034), D_SKL_PLUS); - MMIO_D(_MMIO(0x72034), D_SKL_PLUS); - - MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS); - - MMIO_D(_MMIO(0x44500), D_SKL_PLUS); #define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4) MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, csfe_chicken1_mmio_write); @@ -3446,7 +2735,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) NULL, NULL); MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL); - MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT); MMIO_DFH(_MMIO(0xe4cc), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); return 0; @@ -3454,43 +2742,13 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) static int init_bxt_mmio_info(struct intel_gvt *gvt) { - struct drm_i915_private *dev_priv = gvt->gt->i915; int ret; - MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL); - - MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT); - MMIO_D(GEN7_ROW_INSTDONE, D_BXT); - MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT); - MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT); - MMIO_D(ERROR_GEN6, D_BXT); - MMIO_D(DONE_REG, D_BXT); - MMIO_D(EIR, D_BXT); - MMIO_D(PGTBL_ER, D_BXT); - MMIO_D(_MMIO(0x4194), D_BXT); - MMIO_D(_MMIO(0x4294), D_BXT); - MMIO_D(_MMIO(0x4494), D_BXT); - - MMIO_RING_D(RING_PSMI_CTL, D_BXT); - MMIO_RING_D(RING_DMA_FADD, D_BXT); - MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT); - MMIO_RING_D(RING_IPEHR, D_BXT); - MMIO_RING_D(RING_INSTPS, D_BXT); - MMIO_RING_D(RING_BBADDR_UDW, D_BXT); - MMIO_RING_D(RING_BBSTATE, D_BXT); - MMIO_RING_D(RING_IPEIR, D_BXT); - - MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL); - MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write); - MMIO_D(BXT_RP_STATE_CAP, D_BXT); MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT, NULL, bxt_phy_ctl_family_write); MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT, NULL, bxt_phy_ctl_family_write); - MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT); - MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT); - MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT); MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT, NULL, bxt_port_pll_enable_write); MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT, @@ -3498,128 +2756,19 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL, bxt_port_pll_enable_write); - MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT); - MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT); - - MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT); - MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT); - - MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT); MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT, NULL, bxt_pcs_dw12_grp_write); - MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT, bxt_port_tx_dw3_read, NULL); - MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT); - - MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT); MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT, NULL, bxt_pcs_dw12_grp_write); - MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT, bxt_port_tx_dw3_read, NULL); - MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT); - - MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT); MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT, NULL, bxt_pcs_dw12_grp_write); - MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT, bxt_port_tx_dw3_read, NULL); - MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT); - MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT); - MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT); - - MMIO_D(BXT_DE_PLL_CTL, D_BXT); MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write); - MMIO_D(BXT_DSI_PLL_CTL, D_BXT); - MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT); - - MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT); - MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT); - - MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT); - MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); - MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT); - - MMIO_D(RC6_CTX_BASE, D_BXT); - - MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT); - MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT); - MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT); - MMIO_D(GEN6_GFXPAUSE, D_BXT); MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL); @@ -3639,17 +2788,14 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) return 0; } -static const struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, - unsigned int offset) +static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, + unsigned int offset) { - unsigned long device = intel_gvt_get_device_type(gvt); - const struct gvt_mmio_block *block = gvt->mmio.mmio_block; + struct gvt_mmio_block *block = gvt->mmio.mmio_block; int num = gvt->mmio.num_mmio_block; int i; for (i = 0; i < num; i++, block++) { - if (!(device & block->device)) - continue; if (offset >= i915_mmio_reg_offset(block->offset) && offset < i915_mmio_reg_offset(block->offset) + block->size) return block; @@ -3674,23 +2820,117 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt) hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node) kfree(e); + kfree(gvt->mmio.mmio_block); + gvt->mmio.mmio_block = NULL; + gvt->mmio.num_mmio_block = 0; + vfree(gvt->mmio.mmio_attribute); gvt->mmio.mmio_attribute = NULL; } -/* Special MMIO blocks. registers in MMIO block ranges should not be command - * accessible (should have no F_CMD_ACCESS flag). - * otherwise, need to update cmd_reg_handler in cmd_parser.c - */ -static const struct gvt_mmio_block mmio_blocks[] = { - {D_SKL_PLUS, _MMIO(DMC_MMIO_START_RANGE), 0x3000, NULL, NULL}, - {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL}, - {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE, - pvinfo_mmio_read, pvinfo_mmio_write}, - {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL}, - {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL}, - {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL}, -}; +static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset, + u32 size) +{ + struct intel_gvt *gvt = iter->data; + struct intel_gvt_mmio_info *info, *p; + u32 start, end, i; + + if (WARN_ON(!IS_ALIGNED(offset, 4))) + return -EINVAL; + + start = offset; + end = offset + size; + + for (i = start; i < end; i += 4) { + p = intel_gvt_find_mmio_info(gvt, i); + if (p) { + WARN(1, "dup mmio definition offset %x\n", + info->offset); + + /* We return -EEXIST here to make GVT-g load fail. + * So duplicated MMIO can be found as soon as + * possible. + */ + return -EEXIST; + } + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->offset = i; + info->read = intel_vgpu_default_mmio_read; + info->write = intel_vgpu_default_mmio_write; + INIT_HLIST_NODE(&info->node); + hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset); + gvt->mmio.num_tracked_mmio++; + } + return 0; +} + +static int handle_mmio_block(struct intel_gvt_mmio_table_iter *iter, + u32 offset, u32 size) +{ + struct intel_gvt *gvt = iter->data; + struct gvt_mmio_block *block = gvt->mmio.mmio_block; + void *ret; + + ret = krealloc(block, + (gvt->mmio.num_mmio_block + 1) * sizeof(*block), + GFP_KERNEL); + if (!ret) + return -ENOMEM; + + gvt->mmio.mmio_block = block = ret; + + block += gvt->mmio.num_mmio_block; + + memset(block, 0, sizeof(*block)); + + block->offset = _MMIO(offset); + block->size = size; + + gvt->mmio.num_mmio_block++; + + return 0; +} + +static int handle_mmio_cb(struct intel_gvt_mmio_table_iter *iter, u32 offset, + u32 size) +{ + if (size < 1024 || offset == i915_mmio_reg_offset(GEN9_GFX_MOCS(0))) + return handle_mmio(iter, offset, size); + else + return handle_mmio_block(iter, offset, size); +} + +static int init_mmio_info(struct intel_gvt *gvt) +{ + struct intel_gvt_mmio_table_iter iter = { + .i915 = gvt->gt->i915, + .data = gvt, + .handle_mmio_cb = handle_mmio_cb, + }; + + return intel_gvt_iterate_mmio_table(&iter); +} + +static int init_mmio_block_handlers(struct intel_gvt *gvt) +{ + struct gvt_mmio_block *block; + + block = find_mmio_block(gvt, VGT_PVINFO_PAGE); + if (!block) { + WARN(1, "fail to assign handlers to mmio block %x\n", + i915_mmio_reg_offset(gvt->mmio.mmio_block->offset)); + return -ENODEV; + } + + block->read = pvinfo_mmio_read; + block->write = pvinfo_mmio_write; + + return 0; +} /** * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device @@ -3713,6 +2953,14 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) if (!gvt->mmio.mmio_attribute) return -ENOMEM; + ret = init_mmio_info(gvt); + if (ret) + goto err; + + ret = init_mmio_block_handlers(gvt); + if (ret) + goto err; + ret = init_generic_mmio_info(gvt); if (ret) goto err; @@ -3743,9 +2991,6 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) goto err; } - gvt->mmio.mmio_block = mmio_blocks; - gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks); - return 0; err: intel_gvt_clean_mmio_info(gvt); @@ -3765,7 +3010,7 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, int (*handler)(struct intel_gvt *gvt, u32 offset, void *data), void *data) { - const struct gvt_mmio_block *block = gvt->mmio.mmio_block; + struct gvt_mmio_block *block = gvt->mmio.mmio_block; struct intel_gvt_mmio_info *e; int i, j, ret; @@ -3781,9 +3026,7 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, continue; for (j = 0; j < block->size; j += 4) { - ret = handler(gvt, - i915_mmio_reg_offset(block->offset) + j, - data); + ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, data); if (ret) return ret; } @@ -3883,7 +3126,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_mmio_info *mmio_info; - const struct gvt_mmio_block *mmio_block; + struct gvt_mmio_block *mmio_block; gvt_mmio_func func; int ret; diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h deleted file mode 100644 index f33e3cbd0439..000000000000 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * Authors: - * Eddie Dong <eddie.dong@intel.com> - * Dexuan Cui - * Jike Song <jike.song@intel.com> - * - * Contributors: - * Zhi Wang <zhi.a.wang@intel.com> - * - */ - -#ifndef _GVT_HYPERCALL_H_ -#define _GVT_HYPERCALL_H_ - -#include <linux/types.h> - -struct device; - -enum hypervisor_type { - INTEL_GVT_HYPERVISOR_XEN = 0, - INTEL_GVT_HYPERVISOR_KVM, -}; - -/* - * Specific GVT-g MPT modules function collections. Currently GVT-g supports - * both Xen and KVM by providing dedicated hypervisor-related MPT modules. - */ -struct intel_gvt_mpt { - enum hypervisor_type type; - int (*host_init)(struct device *dev, void *gvt, const void *ops); - void (*host_exit)(struct device *dev, void *gvt); - int (*attach_vgpu)(void *vgpu, unsigned long *handle); - void (*detach_vgpu)(void *vgpu); - int (*inject_msi)(unsigned long handle, u32 addr, u16 data); - unsigned long (*from_virt_to_mfn)(void *p); - int (*enable_page_track)(unsigned long handle, u64 gfn); - int (*disable_page_track)(unsigned long handle, u64 gfn); - int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf, - unsigned long len); - int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf, - unsigned long len); - unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn); - - int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn, - unsigned long size, dma_addr_t *dma_addr); - void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr); - - int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr); - - int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn, - unsigned long mfn, unsigned int nr, bool map); - int (*set_trap_area)(unsigned long handle, u64 start, u64 end, - bool map); - int (*set_opregion)(void *vgpu); - int (*set_edid)(void *vgpu, int port_num); - int (*get_vfio_device)(void *vgpu); - void (*put_vfio_device)(void *vgpu); - bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn); -}; - -#endif /* _GVT_HYPERCALL_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 228f623d466d..a6b2021b665f 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -29,6 +29,8 @@ * */ +#include <linux/eventfd.h> + #include "i915_drv.h" #include "i915_reg.h" #include "gvt.h" @@ -397,9 +399,45 @@ static void init_irq_map(struct intel_gvt_irq *irq) } /* =======================vEvent injection===================== */ + +#define MSI_CAP_CONTROL(offset) (offset + 2) +#define MSI_CAP_ADDRESS(offset) (offset + 4) +#define MSI_CAP_DATA(offset) (offset + 8) +#define MSI_CAP_EN 0x1 + static int inject_virtual_interrupt(struct intel_vgpu *vgpu) { - return intel_gvt_hypervisor_inject_msi(vgpu); + unsigned long offset = vgpu->gvt->device_info.msi_cap_offset; + u16 control, data; + u32 addr; + + control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset)); + addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset)); + data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset)); + + /* Do not generate MSI if MSIEN is disabled */ + if (!(control & MSI_CAP_EN)) + return 0; + + if (WARN(control & GENMASK(15, 1), "only support one MSI format\n")) + return -EINVAL; + + trace_inject_msi(vgpu->id, addr, data); + + /* + * When guest is powered off, msi_trigger is set to NULL, but vgpu's + * config and mmio register isn't restored to default during guest + * poweroff. If this vgpu is still used in next vm, this vgpu's pipe + * may be enabled, then once this vgpu is active, it will get inject + * vblank interrupt request. But msi_trigger is null until msi is + * enabled by guest. so if msi_trigger is null, success is still + * returned and don't inject interrupt into guest. + */ + if (!vgpu->attached) + return -ESRCH; + if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1) + return -EFAULT; + return 0; } static void propagate_event(struct intel_gvt_irq *irq, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 057ec4490104..0787ba5c301f 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1,7 +1,7 @@ /* * KVMGT - the implementation of Intel mediated pass-through framework for KVM * - * Copyright(c) 2014-2016 Intel Corporation. All rights reserved. + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -26,6 +26,11 @@ * Kevin Tian <kevin.tian@intel.com> * Jike Song <jike.song@intel.com> * Xiaoguang Chen <xiaoguang.chen@intel.com> + * Eddie Dong <eddie.dong@intel.com> + * + * Contributors: + * Niu Bing <bing.niu@intel.com> + * Zhi Wang <zhi.a.wang@intel.com> */ #include <linux/init.h> @@ -39,8 +44,6 @@ #include <linux/spinlock.h> #include <linux/eventfd.h> #include <linux/uuid.h> -#include <linux/kvm_host.h> -#include <linux/vfio.h> #include <linux/mdev.h> #include <linux/debugfs.h> @@ -49,9 +52,11 @@ #include <drm/drm_edid.h> #include "i915_drv.h" +#include "intel_gvt.h" #include "gvt.h" -static const struct intel_gvt_ops *intel_gvt_ops; +MODULE_IMPORT_NS(DMA_BUF); +MODULE_IMPORT_NS(I915_GVT); /* helper macros copied from vfio-pci */ #define VFIO_PCI_OFFSET_SHIFT 40 @@ -90,16 +95,6 @@ struct kvmgt_pgfn { struct hlist_node hnode; }; -#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries" -struct kvmgt_guest_info { - struct kvm *kvm; - struct intel_vgpu *vgpu; - struct kvm_page_track_notifier_node track_node; -#define NR_BKT (1 << 18) - struct hlist_head ptable[NR_BKT]; -#undef NR_BKT -}; - struct gvt_dma { struct intel_vgpu *vgpu; struct rb_node gfn_node; @@ -110,41 +105,15 @@ struct gvt_dma { struct kref ref; }; -struct kvmgt_vdev { - struct intel_vgpu *vgpu; - struct mdev_device *mdev; - struct vfio_region *region; - int num_regions; - struct eventfd_ctx *intx_trigger; - struct eventfd_ctx *msi_trigger; +#define vfio_dev_to_vgpu(vfio_dev) \ + container_of((vfio_dev), struct intel_vgpu, vfio_device) - /* - * Two caches are used to avoid mapping duplicated pages (eg. - * scratch pages). This help to reduce dma setup overhead. - */ - struct rb_root gfn_cache; - struct rb_root dma_addr_cache; - unsigned long nr_cache_entries; - struct mutex cache_lock; - - struct notifier_block iommu_notifier; - struct notifier_block group_notifier; - struct kvm *kvm; - struct work_struct release_work; - atomic_t released; - struct vfio_device *vfio_device; - struct vfio_group *vfio_group; -}; - -static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu) -{ - return intel_vgpu_vdev(vgpu); -} - -static inline bool handle_valid(unsigned long handle) -{ - return !!(handle & ~0xff); -} +static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, + const u8 *val, int len, + struct kvm_page_track_notifier_node *node); +static void kvmgt_page_track_flush_slot(struct kvm *kvm, + struct kvm_memory_slot *slot, + struct kvm_page_track_notifier_node *node); static ssize_t available_instances_show(struct mdev_type *mtype, struct mdev_type_attribute *attr, @@ -259,15 +228,12 @@ static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) } } -static int kvmgt_guest_init(struct mdev_device *mdev); static void intel_vgpu_release_work(struct work_struct *work); -static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); int total_pages; int npage; int ret; @@ -277,7 +243,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, for (npage = 0; npage < total_pages; npage++) { unsigned long cur_gfn = gfn + npage; - ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1); + ret = vfio_group_unpin_pages(vgpu->vfio_group, &cur_gfn, 1); drm_WARN_ON(&i915->drm, ret != 1); } } @@ -286,7 +252,6 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, struct page **page) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned long base_pfn = 0; int total_pages; int npage; @@ -301,7 +266,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long cur_gfn = gfn + npage; unsigned long pfn; - ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1, + ret = vfio_group_pin_pages(vgpu->vfio_group, &cur_gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn); if (ret != 1) { gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n", @@ -368,7 +333,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu, dma_addr_t dma_addr) { - struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node; + struct rb_node *node = vgpu->dma_addr_cache.rb_node; struct gvt_dma *itr; while (node) { @@ -386,7 +351,7 @@ static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu, static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) { - struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node; + struct rb_node *node = vgpu->gfn_cache.rb_node; struct gvt_dma *itr; while (node) { @@ -407,7 +372,6 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, { struct gvt_dma *new, *itr; struct rb_node **link, *parent = NULL; - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL); if (!new) @@ -420,7 +384,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kref_init(&new->ref); /* gfn_cache maps gfn to struct gvt_dma. */ - link = &vdev->gfn_cache.rb_node; + link = &vgpu->gfn_cache.rb_node; while (*link) { parent = *link; itr = rb_entry(parent, struct gvt_dma, gfn_node); @@ -431,11 +395,11 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, link = &parent->rb_right; } rb_link_node(&new->gfn_node, parent, link); - rb_insert_color(&new->gfn_node, &vdev->gfn_cache); + rb_insert_color(&new->gfn_node, &vgpu->gfn_cache); /* dma_addr_cache maps dma addr to struct gvt_dma. */ parent = NULL; - link = &vdev->dma_addr_cache.rb_node; + link = &vgpu->dma_addr_cache.rb_node; while (*link) { parent = *link; itr = rb_entry(parent, struct gvt_dma, dma_addr_node); @@ -446,59 +410,54 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, link = &parent->rb_right; } rb_link_node(&new->dma_addr_node, parent, link); - rb_insert_color(&new->dma_addr_node, &vdev->dma_addr_cache); + rb_insert_color(&new->dma_addr_node, &vgpu->dma_addr_cache); - vdev->nr_cache_entries++; + vgpu->nr_cache_entries++; return 0; } static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu, struct gvt_dma *entry) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); - - rb_erase(&entry->gfn_node, &vdev->gfn_cache); - rb_erase(&entry->dma_addr_node, &vdev->dma_addr_cache); + rb_erase(&entry->gfn_node, &vgpu->gfn_cache); + rb_erase(&entry->dma_addr_node, &vgpu->dma_addr_cache); kfree(entry); - vdev->nr_cache_entries--; + vgpu->nr_cache_entries--; } static void gvt_cache_destroy(struct intel_vgpu *vgpu) { struct gvt_dma *dma; struct rb_node *node = NULL; - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); for (;;) { - mutex_lock(&vdev->cache_lock); - node = rb_first(&vdev->gfn_cache); + mutex_lock(&vgpu->cache_lock); + node = rb_first(&vgpu->gfn_cache); if (!node) { - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&vgpu->cache_lock); break; } dma = rb_entry(node, struct gvt_dma, gfn_node); gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size); __gvt_cache_remove_entry(vgpu, dma); - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&vgpu->cache_lock); } } static void gvt_cache_init(struct intel_vgpu *vgpu) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); - - vdev->gfn_cache = RB_ROOT; - vdev->dma_addr_cache = RB_ROOT; - vdev->nr_cache_entries = 0; - mutex_init(&vdev->cache_lock); + vgpu->gfn_cache = RB_ROOT; + vgpu->dma_addr_cache = RB_ROOT; + vgpu->nr_cache_entries = 0; + mutex_init(&vgpu->cache_lock); } -static void kvmgt_protect_table_init(struct kvmgt_guest_info *info) +static void kvmgt_protect_table_init(struct intel_vgpu *info) { hash_init(info->ptable); } -static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info) +static void kvmgt_protect_table_destroy(struct intel_vgpu *info) { struct kvmgt_pgfn *p; struct hlist_node *tmp; @@ -511,7 +470,7 @@ static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info) } static struct kvmgt_pgfn * -__kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn) +__kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn) { struct kvmgt_pgfn *p, *res = NULL; @@ -525,8 +484,7 @@ __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn) return res; } -static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info, - gfn_t gfn) +static bool kvmgt_gfn_is_write_protected(struct intel_vgpu *info, gfn_t gfn) { struct kvmgt_pgfn *p; @@ -534,7 +492,7 @@ static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info, return !!p; } -static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn) +static void kvmgt_protect_table_add(struct intel_vgpu *info, gfn_t gfn) { struct kvmgt_pgfn *p; @@ -549,8 +507,7 @@ static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn) hash_add(info->ptable, &p->hnode, gfn); } -static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, - gfn_t gfn) +static void kvmgt_protect_table_del(struct intel_vgpu *info, gfn_t gfn) { struct kvmgt_pgfn *p; @@ -564,18 +521,17 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf, size_t count, loff_t *ppos, bool iswrite) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; - void *base = vdev->region[i].data; + void *base = vgpu->region[i].data; loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; - if (pos >= vdev->region[i].size || iswrite) { + if (pos >= vgpu->region[i].size || iswrite) { gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n"); return -EINVAL; } - count = min(count, (size_t)(vdev->region[i].size - pos)); + count = min(count, (size_t)(vgpu->region[i].size - pos)); memcpy(buf, base + pos, count); return count; @@ -617,9 +573,9 @@ static int handle_edid_regs(struct intel_vgpu *vgpu, gvt_vgpu_err("invalid EDID blob\n"); return -EINVAL; } - intel_gvt_ops->emulate_hotplug(vgpu, true); + intel_vgpu_emulate_hotplug(vgpu, true); } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN) - intel_gvt_ops->emulate_hotplug(vgpu, false); + intel_vgpu_emulate_hotplug(vgpu, false); else { gvt_vgpu_err("invalid EDID link state %d\n", regs->link_state); @@ -668,8 +624,7 @@ static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf, int ret; unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; - struct vfio_edid_region *region = - (struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data; + struct vfio_edid_region *region = vgpu->region[i].data; loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; if (pos < region->vfio_edid_regs.edid_offset) { @@ -701,44 +656,27 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu, const struct intel_vgpu_regops *ops, size_t size, u32 flags, void *data) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); struct vfio_region *region; - region = krealloc(vdev->region, - (vdev->num_regions + 1) * sizeof(*region), + region = krealloc(vgpu->region, + (vgpu->num_regions + 1) * sizeof(*region), GFP_KERNEL); if (!region) return -ENOMEM; - vdev->region = region; - vdev->region[vdev->num_regions].type = type; - vdev->region[vdev->num_regions].subtype = subtype; - vdev->region[vdev->num_regions].ops = ops; - vdev->region[vdev->num_regions].size = size; - vdev->region[vdev->num_regions].flags = flags; - vdev->region[vdev->num_regions].data = data; - vdev->num_regions++; + vgpu->region = region; + vgpu->region[vgpu->num_regions].type = type; + vgpu->region[vgpu->num_regions].subtype = subtype; + vgpu->region[vgpu->num_regions].ops = ops; + vgpu->region[vgpu->num_regions].size = size; + vgpu->region[vgpu->num_regions].flags = flags; + vgpu->region[vgpu->num_regions].data = data; + vgpu->num_regions++; return 0; } -static int kvmgt_get_vfio_device(void *p_vgpu) +int intel_gvt_set_opregion(struct intel_vgpu *vgpu) { - struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); - - vdev->vfio_device = vfio_device_get_from_dev( - mdev_dev(vdev->mdev)); - if (!vdev->vfio_device) { - gvt_vgpu_err("failed to get vfio device\n"); - return -ENODEV; - } - return 0; -} - - -static int kvmgt_set_opregion(void *p_vgpu) -{ - struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; void *base; int ret; @@ -764,9 +702,8 @@ static int kvmgt_set_opregion(void *p_vgpu) return ret; } -static int kvmgt_set_edid(void *p_vgpu, int port_num) +int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num) { - struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); struct vfio_edid_region *base; int ret; @@ -794,71 +731,11 @@ static int kvmgt_set_edid(void *p_vgpu, int port_num) return ret; } -static void kvmgt_put_vfio_device(void *vgpu) -{ - struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu); - - if (WARN_ON(!vdev->vfio_device)) - return; - - vfio_device_put(vdev->vfio_device); -} - -static int intel_vgpu_create(struct mdev_device *mdev) -{ - struct intel_vgpu *vgpu = NULL; - struct intel_vgpu_type *type; - struct device *pdev; - struct intel_gvt *gvt; - int ret; - - pdev = mdev_parent_dev(mdev); - gvt = kdev_to_i915(pdev)->gvt; - - type = &gvt->types[mdev_get_type_group_id(mdev)]; - if (!type) { - ret = -EINVAL; - goto out; - } - - vgpu = intel_gvt_ops->vgpu_create(gvt, type); - if (IS_ERR_OR_NULL(vgpu)) { - ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); - gvt_err("failed to create intel vgpu: %d\n", ret); - goto out; - } - - INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work); - - kvmgt_vdev(vgpu)->mdev = mdev; - mdev_set_drvdata(mdev, vgpu); - - gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", - dev_name(mdev_dev(mdev))); - ret = 0; - -out: - return ret; -} - -static int intel_vgpu_remove(struct mdev_device *mdev) -{ - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); - - if (handle_valid(vgpu->handle)) - return -EBUSY; - - intel_gvt_ops->vgpu_destroy(vgpu); - return 0; -} - static int intel_vgpu_iommu_notifier(struct notifier_block *nb, unsigned long action, void *data) { - struct kvmgt_vdev *vdev = container_of(nb, - struct kvmgt_vdev, - iommu_notifier); - struct intel_vgpu *vgpu = vdev->vgpu; + struct intel_vgpu *vgpu = + container_of(nb, struct intel_vgpu, iommu_notifier); if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) { struct vfio_iommu_type1_dma_unmap *unmap = data; @@ -868,7 +745,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, iov_pfn = unmap->iova >> PAGE_SHIFT; end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE; - mutex_lock(&vdev->cache_lock); + mutex_lock(&vgpu->cache_lock); for (; iov_pfn < end_iov_pfn; iov_pfn++) { entry = __gvt_cache_find_gfn(vgpu, iov_pfn); if (!entry) @@ -878,7 +755,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, entry->size); __gvt_cache_remove_entry(vgpu, entry); } - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&vgpu->cache_lock); } return NOTIFY_OK; @@ -887,35 +764,54 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, static int intel_vgpu_group_notifier(struct notifier_block *nb, unsigned long action, void *data) { - struct kvmgt_vdev *vdev = container_of(nb, - struct kvmgt_vdev, - group_notifier); + struct intel_vgpu *vgpu = + container_of(nb, struct intel_vgpu, group_notifier); /* the only action we care about */ if (action == VFIO_GROUP_NOTIFY_SET_KVM) { - vdev->kvm = data; + vgpu->kvm = data; if (!data) - schedule_work(&vdev->release_work); + schedule_work(&vgpu->release_work); } return NOTIFY_OK; } -static int intel_vgpu_open_device(struct mdev_device *mdev) +static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu) { - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); + struct intel_vgpu *itr; + int id; + bool ret = false; + + mutex_lock(&vgpu->gvt->lock); + for_each_active_vgpu(vgpu->gvt, itr, id) { + if (!itr->attached) + continue; + + if (vgpu->kvm == itr->kvm) { + ret = true; + goto out; + } + } +out: + mutex_unlock(&vgpu->gvt->lock); + return ret; +} + +static int intel_vgpu_open_device(struct vfio_device *vfio_dev) +{ + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned long events; int ret; struct vfio_group *vfio_group; - vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier; - vdev->group_notifier.notifier_call = intel_vgpu_group_notifier; + vgpu->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier; + vgpu->group_notifier.notifier_call = intel_vgpu_group_notifier; events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; - ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, - &vdev->iommu_notifier); + ret = vfio_register_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY, &events, + &vgpu->iommu_notifier); if (ret != 0) { gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n", ret); @@ -923,117 +819,129 @@ static int intel_vgpu_open_device(struct mdev_device *mdev) } events = VFIO_GROUP_NOTIFY_SET_KVM; - ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, - &vdev->group_notifier); + ret = vfio_register_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY, &events, + &vgpu->group_notifier); if (ret != 0) { gvt_vgpu_err("vfio_register_notifier for group failed: %d\n", ret); goto undo_iommu; } - vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev)); + vfio_group = + vfio_group_get_external_user_from_dev(vgpu->vfio_device.dev); if (IS_ERR_OR_NULL(vfio_group)) { ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group); gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n"); goto undo_register; } - vdev->vfio_group = vfio_group; + vgpu->vfio_group = vfio_group; - /* Take a module reference as mdev core doesn't take - * a reference for vendor driver. - */ - if (!try_module_get(THIS_MODULE)) { - ret = -ENODEV; + ret = -EEXIST; + if (vgpu->attached) + goto undo_group; + + ret = -ESRCH; + if (!vgpu->kvm || vgpu->kvm->mm != current->mm) { + gvt_vgpu_err("KVM is required to use Intel vGPU\n"); goto undo_group; } - ret = kvmgt_guest_init(mdev); - if (ret) + ret = -EEXIST; + if (__kvmgt_vgpu_exist(vgpu)) goto undo_group; - intel_gvt_ops->vgpu_activate(vgpu); + vgpu->attached = true; + kvm_get_kvm(vgpu->kvm); - atomic_set(&vdev->released, 0); - return ret; + kvmgt_protect_table_init(vgpu); + gvt_cache_init(vgpu); + + vgpu->track_node.track_write = kvmgt_page_track_write; + vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot; + kvm_page_track_register_notifier(vgpu->kvm, &vgpu->track_node); + + debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs, + &vgpu->nr_cache_entries); + + intel_gvt_activate_vgpu(vgpu); + + atomic_set(&vgpu->released, 0); + return 0; undo_group: - vfio_group_put_external_user(vdev->vfio_group); - vdev->vfio_group = NULL; + vfio_group_put_external_user(vgpu->vfio_group); + vgpu->vfio_group = NULL; undo_register: - vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, - &vdev->group_notifier); + vfio_unregister_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY, + &vgpu->group_notifier); undo_iommu: - vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, - &vdev->iommu_notifier); + vfio_unregister_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY, + &vgpu->iommu_notifier); out: return ret; } static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); struct eventfd_ctx *trigger; - trigger = vdev->msi_trigger; + trigger = vgpu->msi_trigger; if (trigger) { eventfd_ctx_put(trigger); - vdev->msi_trigger = NULL; + vgpu->msi_trigger = NULL; } } static void __intel_vgpu_release(struct intel_vgpu *vgpu) { - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); struct drm_i915_private *i915 = vgpu->gvt->gt->i915; - struct kvmgt_guest_info *info; int ret; - if (!handle_valid(vgpu->handle)) + if (!vgpu->attached) return; - if (atomic_cmpxchg(&vdev->released, 0, 1)) + if (atomic_cmpxchg(&vgpu->released, 0, 1)) return; - intel_gvt_ops->vgpu_release(vgpu); + intel_gvt_release_vgpu(vgpu); - ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY, - &vdev->iommu_notifier); + ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_IOMMU_NOTIFY, + &vgpu->iommu_notifier); drm_WARN(&i915->drm, ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); - ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY, - &vdev->group_notifier); + ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_GROUP_NOTIFY, + &vgpu->group_notifier); drm_WARN(&i915->drm, ret, "vfio_unregister_notifier for group failed: %d\n", ret); - /* dereference module reference taken at open */ - module_put(THIS_MODULE); + debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs)); - info = (struct kvmgt_guest_info *)vgpu->handle; - kvmgt_guest_exit(info); + kvm_page_track_unregister_notifier(vgpu->kvm, &vgpu->track_node); + kvm_put_kvm(vgpu->kvm); + kvmgt_protect_table_destroy(vgpu); + gvt_cache_destroy(vgpu); intel_vgpu_release_msi_eventfd_ctx(vgpu); - vfio_group_put_external_user(vdev->vfio_group); + vfio_group_put_external_user(vgpu->vfio_group); - vdev->kvm = NULL; - vgpu->handle = 0; + vgpu->kvm = NULL; + vgpu->attached = false; } -static void intel_vgpu_close_device(struct mdev_device *mdev) +static void intel_vgpu_close_device(struct vfio_device *vfio_dev) { - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); - - __intel_vgpu_release(vgpu); + __intel_vgpu_release(vfio_dev_to_vgpu(vfio_dev)); } static void intel_vgpu_release_work(struct work_struct *work) { - struct kvmgt_vdev *vdev = container_of(work, struct kvmgt_vdev, - release_work); + struct intel_vgpu *vgpu = + container_of(work, struct intel_vgpu, release_work); - __intel_vgpu_release(vdev->vgpu); + __intel_vgpu_release(vgpu); } static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) @@ -1070,10 +978,10 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off, int ret; if (is_write) - ret = intel_gvt_ops->emulate_mmio_write(vgpu, + ret = intel_vgpu_emulate_mmio_write(vgpu, bar_start + off, buf, count); else - ret = intel_gvt_ops->emulate_mmio_read(vgpu, + ret = intel_vgpu_emulate_mmio_read(vgpu, bar_start + off, buf, count); return ret; } @@ -1111,17 +1019,15 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, return 0; } -static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, +static ssize_t intel_vgpu_rw(struct intel_vgpu *vgpu, char *buf, size_t count, loff_t *ppos, bool is_write) { - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); u64 pos = *ppos & VFIO_PCI_OFFSET_MASK; int ret = -EINVAL; - if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) { + if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions) { gvt_vgpu_err("invalid index: %u\n", index); return -EINVAL; } @@ -1129,10 +1035,10 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, switch (index) { case VFIO_PCI_CONFIG_REGION_INDEX: if (is_write) - ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos, + ret = intel_vgpu_emulate_cfg_write(vgpu, pos, buf, count); else - ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos, + ret = intel_vgpu_emulate_cfg_read(vgpu, pos, buf, count); break; case VFIO_PCI_BAR0_REGION_INDEX: @@ -1150,20 +1056,19 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, case VFIO_PCI_ROM_REGION_INDEX: break; default: - if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) + if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions) return -EINVAL; index -= VFIO_PCI_NUM_REGIONS; - return vdev->region[index].ops->rw(vgpu, buf, count, + return vgpu->region[index].ops->rw(vgpu, buf, count, ppos, is_write); } return ret == 0 ? count : ret; } -static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos) +static bool gtt_entry(struct intel_vgpu *vgpu, loff_t *ppos) { - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); struct intel_gvt *gvt = vgpu->gvt; int offset; @@ -1180,9 +1085,10 @@ static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos) true : false; } -static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, +static ssize_t intel_vgpu_read(struct vfio_device *vfio_dev, char __user *buf, size_t count, loff_t *ppos) { + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned int done = 0; int ret; @@ -1191,10 +1097,10 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, /* Only support GGTT entry 8 bytes read */ if (count >= 8 && !(*ppos % 8) && - gtt_entry(mdev, ppos)) { + gtt_entry(vgpu, ppos)) { u64 val; - ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, false); if (ret <= 0) goto read_err; @@ -1206,7 +1112,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, } else if (count >= 4 && !(*ppos % 4)) { u32 val; - ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, false); if (ret <= 0) goto read_err; @@ -1218,7 +1124,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, } else if (count >= 2 && !(*ppos % 2)) { u16 val; - ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, false); if (ret <= 0) goto read_err; @@ -1230,7 +1136,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, } else { u8 val; - ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos, + ret = intel_vgpu_rw(vgpu, &val, sizeof(val), ppos, false); if (ret <= 0) goto read_err; @@ -1253,10 +1159,11 @@ read_err: return -EFAULT; } -static ssize_t intel_vgpu_write(struct mdev_device *mdev, +static ssize_t intel_vgpu_write(struct vfio_device *vfio_dev, const char __user *buf, size_t count, loff_t *ppos) { + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned int done = 0; int ret; @@ -1265,13 +1172,13 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev, /* Only support GGTT entry 8 bytes write */ if (count >= 8 && !(*ppos % 8) && - gtt_entry(mdev, ppos)) { + gtt_entry(vgpu, ppos)) { u64 val; if (copy_from_user(&val, buf, sizeof(val))) goto write_err; - ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, true); if (ret <= 0) goto write_err; @@ -1283,7 +1190,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev, if (copy_from_user(&val, buf, sizeof(val))) goto write_err; - ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, true); if (ret <= 0) goto write_err; @@ -1295,7 +1202,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev, if (copy_from_user(&val, buf, sizeof(val))) goto write_err; - ret = intel_vgpu_rw(mdev, (char *)&val, + ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val), ppos, true); if (ret <= 0) goto write_err; @@ -1307,7 +1214,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev, if (copy_from_user(&val, buf, sizeof(val))) goto write_err; - ret = intel_vgpu_rw(mdev, &val, sizeof(val), + ret = intel_vgpu_rw(vgpu, &val, sizeof(val), ppos, true); if (ret <= 0) goto write_err; @@ -1326,13 +1233,14 @@ write_err: return -EFAULT; } -static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) +static int intel_vgpu_mmap(struct vfio_device *vfio_dev, + struct vm_area_struct *vma) { + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned int index; u64 virtaddr; unsigned long req_size, pgoff, req_start; pgprot_t pg_prot; - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); if (index >= VFIO_PCI_ROM_REGION_INDEX) @@ -1407,7 +1315,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu, gvt_vgpu_err("eventfd_ctx_fdget failed\n"); return PTR_ERR(trigger); } - kvmgt_vdev(vgpu)->msi_trigger = trigger; + vgpu->msi_trigger = trigger; } else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count) intel_vgpu_release_msi_eventfd_ctx(vgpu); @@ -1455,11 +1363,10 @@ static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags, return func(vgpu, index, start, count, flags, data); } -static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, +static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd, unsigned long arg) { - struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned long minsz; gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd); @@ -1478,7 +1385,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, info.flags = VFIO_DEVICE_FLAGS_PCI; info.flags |= VFIO_DEVICE_FLAGS_RESET; info.num_regions = VFIO_PCI_NUM_REGIONS + - vdev->num_regions; + vgpu->num_regions; info.num_irqs = VFIO_PCI_NUM_IRQS; return copy_to_user((void __user *)arg, &info, minsz) ? @@ -1569,22 +1476,22 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, .header.version = 1 }; if (info.index >= VFIO_PCI_NUM_REGIONS + - vdev->num_regions) + vgpu->num_regions) return -EINVAL; info.index = array_index_nospec(info.index, VFIO_PCI_NUM_REGIONS + - vdev->num_regions); + vgpu->num_regions); i = info.index - VFIO_PCI_NUM_REGIONS; info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); - info.size = vdev->region[i].size; - info.flags = vdev->region[i].flags; + info.size = vgpu->region[i].size; + info.flags = vgpu->region[i].flags; - cap_type.type = vdev->region[i].type; - cap_type.subtype = vdev->region[i].subtype; + cap_type.type = vgpu->region[i].type; + cap_type.subtype = vgpu->region[i].subtype; ret = vfio_info_add_capability(&caps, &cap_type.header, @@ -1700,7 +1607,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, return ret; } else if (cmd == VFIO_DEVICE_RESET) { - intel_gvt_ops->vgpu_reset(vgpu); + intel_gvt_reset_vgpu(vgpu); return 0; } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) { struct vfio_device_gfx_plane_info dmabuf; @@ -1713,7 +1620,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, if (dmabuf.argsz < minsz) return -EINVAL; - ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf); + ret = intel_vgpu_query_plane(vgpu, &dmabuf); if (ret != 0) return ret; @@ -1721,14 +1628,10 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, -EFAULT : 0; } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) { __u32 dmabuf_id; - __s32 dmabuf_fd; if (get_user(dmabuf_id, (__u32 __user *)arg)) return -EFAULT; - - dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id); - return dmabuf_fd; - + return intel_vgpu_get_dmabuf(vgpu, dmabuf_id); } return -ENOTTY; @@ -1738,14 +1641,9 @@ static ssize_t vgpu_id_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct mdev_device *mdev = mdev_from_dev(dev); + struct intel_vgpu *vgpu = dev_get_drvdata(dev); - if (mdev) { - struct intel_vgpu *vgpu = (struct intel_vgpu *) - mdev_get_drvdata(mdev); - return sprintf(buf, "%d\n", vgpu->id); - } - return sprintf(buf, "\n"); + return sprintf(buf, "%d\n", vgpu->id); } static DEVICE_ATTR_RO(vgpu_id); @@ -1765,57 +1663,78 @@ static const struct attribute_group *intel_vgpu_groups[] = { NULL, }; -static struct mdev_parent_ops intel_vgpu_ops = { - .mdev_attr_groups = intel_vgpu_groups, - .create = intel_vgpu_create, - .remove = intel_vgpu_remove, - - .open_device = intel_vgpu_open_device, - .close_device = intel_vgpu_close_device, - - .read = intel_vgpu_read, - .write = intel_vgpu_write, - .mmap = intel_vgpu_mmap, - .ioctl = intel_vgpu_ioctl, +static const struct vfio_device_ops intel_vgpu_dev_ops = { + .open_device = intel_vgpu_open_device, + .close_device = intel_vgpu_close_device, + .read = intel_vgpu_read, + .write = intel_vgpu_write, + .mmap = intel_vgpu_mmap, + .ioctl = intel_vgpu_ioctl, }; -static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) +static int intel_vgpu_probe(struct mdev_device *mdev) { + struct device *pdev = mdev_parent_dev(mdev); + struct intel_gvt *gvt = kdev_to_i915(pdev)->gvt; + struct intel_vgpu_type *type; + struct intel_vgpu *vgpu; int ret; - ret = intel_gvt_init_vgpu_type_groups((struct intel_gvt *)gvt); - if (ret) - return ret; + type = &gvt->types[mdev_get_type_group_id(mdev)]; + if (!type) + return -EINVAL; - intel_gvt_ops = ops; - intel_vgpu_ops.supported_type_groups = gvt_vgpu_type_groups; + vgpu = intel_gvt_create_vgpu(gvt, type); + if (IS_ERR(vgpu)) { + gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu)); + return PTR_ERR(vgpu); + } - ret = mdev_register_device(dev, &intel_vgpu_ops); - if (ret) - intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt); + INIT_WORK(&vgpu->release_work, intel_vgpu_release_work); + vfio_init_group_dev(&vgpu->vfio_device, &mdev->dev, + &intel_vgpu_dev_ops); - return ret; + dev_set_drvdata(&mdev->dev, vgpu); + ret = vfio_register_emulated_iommu_dev(&vgpu->vfio_device); + if (ret) { + intel_gvt_destroy_vgpu(vgpu); + return ret; + } + + gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", + dev_name(mdev_dev(mdev))); + return 0; } -static void kvmgt_host_exit(struct device *dev, void *gvt) +static void intel_vgpu_remove(struct mdev_device *mdev) { - mdev_unregister_device(dev); - intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt); -} + struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev); -static int kvmgt_page_track_add(unsigned long handle, u64 gfn) + if (WARN_ON_ONCE(vgpu->attached)) + return; + intel_gvt_destroy_vgpu(vgpu); +} + +static struct mdev_driver intel_vgpu_mdev_driver = { + .driver = { + .name = "intel_vgpu_mdev", + .owner = THIS_MODULE, + .dev_groups = intel_vgpu_groups, + }, + .probe = intel_vgpu_probe, + .remove = intel_vgpu_remove, + .supported_type_groups = gvt_vgpu_type_groups, +}; + +int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) { - struct kvmgt_guest_info *info; - struct kvm *kvm; + struct kvm *kvm = info->kvm; struct kvm_memory_slot *slot; int idx; - if (!handle_valid(handle)) + if (!info->attached) return -ESRCH; - info = (struct kvmgt_guest_info *)handle; - kvm = info->kvm; - idx = srcu_read_lock(&kvm->srcu); slot = gfn_to_memslot(kvm, gfn); if (!slot) { @@ -1837,19 +1756,15 @@ out: return 0; } -static int kvmgt_page_track_remove(unsigned long handle, u64 gfn) +int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn) { - struct kvmgt_guest_info *info; - struct kvm *kvm; + struct kvm *kvm = info->kvm; struct kvm_memory_slot *slot; int idx; - if (!handle_valid(handle)) + if (!info->attached) return 0; - info = (struct kvmgt_guest_info *)handle; - kvm = info->kvm; - idx = srcu_read_lock(&kvm->srcu); slot = gfn_to_memslot(kvm, gfn); if (!slot) { @@ -1875,11 +1790,11 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *val, int len, struct kvm_page_track_notifier_node *node) { - struct kvmgt_guest_info *info = container_of(node, - struct kvmgt_guest_info, track_node); + struct intel_vgpu *info = + container_of(node, struct intel_vgpu, track_node); if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa))) - intel_gvt_ops->write_protect_handler(info->vgpu, gpa, + intel_vgpu_page_track_handler(info, gpa, (void *)val, len); } @@ -1889,8 +1804,8 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, { int i; gfn_t gfn; - struct kvmgt_guest_info *info = container_of(node, - struct kvmgt_guest_info, track_node); + struct intel_vgpu *info = + container_of(node, struct intel_vgpu, track_node); write_lock(&kvm->mmu_lock); for (i = 0; i < slot->npages; i++) { @@ -1904,182 +1819,32 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, write_unlock(&kvm->mmu_lock); } -static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) -{ - struct intel_vgpu *itr; - struct kvmgt_guest_info *info; - int id; - bool ret = false; - - mutex_lock(&vgpu->gvt->lock); - for_each_active_vgpu(vgpu->gvt, itr, id) { - if (!handle_valid(itr->handle)) - continue; - - info = (struct kvmgt_guest_info *)itr->handle; - if (kvm && kvm == info->kvm) { - ret = true; - goto out; - } - } -out: - mutex_unlock(&vgpu->gvt->lock); - return ret; -} - -static int kvmgt_guest_init(struct mdev_device *mdev) -{ - struct kvmgt_guest_info *info; - struct intel_vgpu *vgpu; - struct kvmgt_vdev *vdev; - struct kvm *kvm; - - vgpu = mdev_get_drvdata(mdev); - if (handle_valid(vgpu->handle)) - return -EEXIST; - - vdev = kvmgt_vdev(vgpu); - kvm = vdev->kvm; - if (!kvm || kvm->mm != current->mm) { - gvt_vgpu_err("KVM is required to use Intel vGPU\n"); - return -ESRCH; - } - - if (__kvmgt_vgpu_exist(vgpu, kvm)) - return -EEXIST; - - info = vzalloc(sizeof(struct kvmgt_guest_info)); - if (!info) - return -ENOMEM; - - vgpu->handle = (unsigned long)info; - info->vgpu = vgpu; - info->kvm = kvm; - kvm_get_kvm(info->kvm); - - kvmgt_protect_table_init(info); - gvt_cache_init(vgpu); - - info->track_node.track_write = kvmgt_page_track_write; - info->track_node.track_flush_slot = kvmgt_page_track_flush_slot; - kvm_page_track_register_notifier(kvm, &info->track_node); - - debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs, - &vdev->nr_cache_entries); - return 0; -} - -static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) -{ - debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, - info->vgpu->debugfs)); - - kvm_page_track_unregister_notifier(info->kvm, &info->track_node); - kvm_put_kvm(info->kvm); - kvmgt_protect_table_destroy(info); - gvt_cache_destroy(info->vgpu); - vfree(info); - - return true; -} - -static int kvmgt_attach_vgpu(void *p_vgpu, unsigned long *handle) -{ - struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; - - vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL); - - if (!vgpu->vdev) - return -ENOMEM; - - kvmgt_vdev(vgpu)->vgpu = vgpu; - - return 0; -} - -static void kvmgt_detach_vgpu(void *p_vgpu) +void intel_vgpu_detach_regions(struct intel_vgpu *vgpu) { int i; - struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; - struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); - if (!vdev->region) + if (!vgpu->region) return; - for (i = 0; i < vdev->num_regions; i++) - if (vdev->region[i].ops->release) - vdev->region[i].ops->release(vgpu, - &vdev->region[i]); - vdev->num_regions = 0; - kfree(vdev->region); - vdev->region = NULL; - - kfree(vdev); + for (i = 0; i < vgpu->num_regions; i++) + if (vgpu->region[i].ops->release) + vgpu->region[i].ops->release(vgpu, + &vgpu->region[i]); + vgpu->num_regions = 0; + kfree(vgpu->region); + vgpu->region = NULL; } -static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) -{ - struct kvmgt_guest_info *info; - struct intel_vgpu *vgpu; - struct kvmgt_vdev *vdev; - - if (!handle_valid(handle)) - return -ESRCH; - - info = (struct kvmgt_guest_info *)handle; - vgpu = info->vgpu; - vdev = kvmgt_vdev(vgpu); - - /* - * When guest is poweroff, msi_trigger is set to NULL, but vgpu's - * config and mmio register isn't restored to default during guest - * poweroff. If this vgpu is still used in next vm, this vgpu's pipe - * may be enabled, then once this vgpu is active, it will get inject - * vblank interrupt request. But msi_trigger is null until msi is - * enabled by guest. so if msi_trigger is null, success is still - * returned and don't inject interrupt into guest. - */ - if (vdev->msi_trigger == NULL) - return 0; - - if (eventfd_signal(vdev->msi_trigger, 1) == 1) - return 0; - - return -EFAULT; -} - -static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) -{ - struct kvmgt_guest_info *info; - kvm_pfn_t pfn; - - if (!handle_valid(handle)) - return INTEL_GVT_INVALID_ADDR; - - info = (struct kvmgt_guest_info *)handle; - - pfn = gfn_to_pfn(info->kvm, gfn); - if (is_error_noslot_pfn(pfn)) - return INTEL_GVT_INVALID_ADDR; - - return pfn; -} - -static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, +int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) { - struct intel_vgpu *vgpu; - struct kvmgt_vdev *vdev; struct gvt_dma *entry; int ret; - if (!handle_valid(handle)) + if (!vgpu->attached) return -EINVAL; - vgpu = ((struct kvmgt_guest_info *)handle)->vgpu; - vdev = kvmgt_vdev(vgpu); - - mutex_lock(&vdev->cache_lock); + mutex_lock(&vgpu->cache_lock); entry = __gvt_cache_find_gfn(vgpu, gfn); if (!entry) { @@ -2107,36 +1872,31 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, *dma_addr = entry->dma_addr; } - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&vgpu->cache_lock); return 0; err_unmap: gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size); err_unlock: - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&vgpu->cache_lock); return ret; } -static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr) +int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr) { - struct kvmgt_guest_info *info; - struct kvmgt_vdev *vdev; struct gvt_dma *entry; int ret = 0; - if (!handle_valid(handle)) + if (!vgpu->attached) return -ENODEV; - info = (struct kvmgt_guest_info *)handle; - vdev = kvmgt_vdev(info->vgpu); - - mutex_lock(&vdev->cache_lock); - entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr); + mutex_lock(&vgpu->cache_lock); + entry = __gvt_cache_find_dma_addr(vgpu, dma_addr); if (entry) kref_get(&entry->ref); else ret = -ENOMEM; - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&vgpu->cache_lock); return ret; } @@ -2150,109 +1910,290 @@ static void __gvt_dma_release(struct kref *ref) __gvt_cache_remove_entry(entry->vgpu, entry); } -static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr) +void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu, + dma_addr_t dma_addr) { - struct intel_vgpu *vgpu; - struct kvmgt_vdev *vdev; struct gvt_dma *entry; - if (!handle_valid(handle)) + if (!vgpu->attached) return; - vgpu = ((struct kvmgt_guest_info *)handle)->vgpu; - vdev = kvmgt_vdev(vgpu); - - mutex_lock(&vdev->cache_lock); + mutex_lock(&vgpu->cache_lock); entry = __gvt_cache_find_dma_addr(vgpu, dma_addr); if (entry) kref_put(&entry->ref, __gvt_dma_release); - mutex_unlock(&vdev->cache_lock); + mutex_unlock(&vgpu->cache_lock); } -static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, - void *buf, unsigned long len, bool write) +static void init_device_info(struct intel_gvt *gvt) { - struct kvmgt_guest_info *info; + struct intel_gvt_device_info *info = &gvt->device_info; + struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev); - if (!handle_valid(handle)) - return -ESRCH; + info->max_support_vgpus = 8; + info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE; + info->mmio_size = 2 * 1024 * 1024; + info->mmio_bar = 0; + info->gtt_start_offset = 8 * 1024 * 1024; + info->gtt_entry_size = 8; + info->gtt_entry_size_shift = 3; + info->gmadr_bytes_in_cmd = 8; + info->max_surface_size = 36 * 1024 * 1024; + info->msi_cap_offset = pdev->msi_cap; +} - info = (struct kvmgt_guest_info *)handle; +static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt) +{ + struct intel_vgpu *vgpu; + int id; - return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group, - gpa, buf, len, write); + mutex_lock(&gvt->lock); + idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) { + if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id, + (void *)&gvt->service_request)) { + if (vgpu->active) + intel_vgpu_emulate_vblank(vgpu); + } + } + mutex_unlock(&gvt->lock); } -static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa, - void *buf, unsigned long len) +static int gvt_service_thread(void *data) { - return kvmgt_rw_gpa(handle, gpa, buf, len, false); + struct intel_gvt *gvt = (struct intel_gvt *)data; + int ret; + + gvt_dbg_core("service thread start\n"); + + while (!kthread_should_stop()) { + ret = wait_event_interruptible(gvt->service_thread_wq, + kthread_should_stop() || gvt->service_request); + + if (kthread_should_stop()) + break; + + if (WARN_ONCE(ret, "service thread is waken up by signal.\n")) + continue; + + intel_gvt_test_and_emulate_vblank(gvt); + + if (test_bit(INTEL_GVT_REQUEST_SCHED, + (void *)&gvt->service_request) || + test_bit(INTEL_GVT_REQUEST_EVENT_SCHED, + (void *)&gvt->service_request)) { + intel_gvt_schedule(gvt); + } + } + + return 0; } -static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa, - void *buf, unsigned long len) +static void clean_service_thread(struct intel_gvt *gvt) { - return kvmgt_rw_gpa(handle, gpa, buf, len, true); + kthread_stop(gvt->service_thread); } -static unsigned long kvmgt_virt_to_pfn(void *addr) +static int init_service_thread(struct intel_gvt *gvt) { - return PFN_DOWN(__pa(addr)); + init_waitqueue_head(&gvt->service_thread_wq); + + gvt->service_thread = kthread_run(gvt_service_thread, + gvt, "gvt_service_thread"); + if (IS_ERR(gvt->service_thread)) { + gvt_err("fail to start service thread.\n"); + return PTR_ERR(gvt->service_thread); + } + return 0; } -static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) +/** + * intel_gvt_clean_device - clean a GVT device + * @i915: i915 private + * + * This function is called at the driver unloading stage, to free the + * resources owned by a GVT device. + * + */ +static void intel_gvt_clean_device(struct drm_i915_private *i915) { - struct kvmgt_guest_info *info; - struct kvm *kvm; - int idx; - bool ret; + struct intel_gvt *gvt = fetch_and_zero(&i915->gvt); - if (!handle_valid(handle)) - return false; + if (drm_WARN_ON(&i915->drm, !gvt)) + return; - info = (struct kvmgt_guest_info *)handle; - kvm = info->kvm; + mdev_unregister_device(i915->drm.dev); + intel_gvt_cleanup_vgpu_type_groups(gvt); + intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); + intel_gvt_clean_vgpu_types(gvt); - idx = srcu_read_lock(&kvm->srcu); - ret = kvm_is_visible_gfn(kvm, gfn); - srcu_read_unlock(&kvm->srcu, idx); + intel_gvt_debugfs_clean(gvt); + clean_service_thread(gvt); + intel_gvt_clean_cmd_parser(gvt); + intel_gvt_clean_sched_policy(gvt); + intel_gvt_clean_workload_scheduler(gvt); + intel_gvt_clean_gtt(gvt); + intel_gvt_free_firmware(gvt); + intel_gvt_clean_mmio_info(gvt); + idr_destroy(&gvt->vgpu_idr); + + kfree(i915->gvt); +} + +/** + * intel_gvt_init_device - initialize a GVT device + * @i915: drm i915 private data + * + * This function is called at the initialization stage, to initialize + * necessary GVT components. + * + * Returns: + * Zero on success, negative error code if failed. + * + */ +static int intel_gvt_init_device(struct drm_i915_private *i915) +{ + struct intel_gvt *gvt; + struct intel_vgpu *vgpu; + int ret; + + if (drm_WARN_ON(&i915->drm, i915->gvt)) + return -EEXIST; + + gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL); + if (!gvt) + return -ENOMEM; + + gvt_dbg_core("init gvt device\n"); + + idr_init_base(&gvt->vgpu_idr, 1); + spin_lock_init(&gvt->scheduler.mmio_context_lock); + mutex_init(&gvt->lock); + mutex_init(&gvt->sched_lock); + gvt->gt = to_gt(i915); + i915->gvt = gvt; + + init_device_info(gvt); + + ret = intel_gvt_setup_mmio_info(gvt); + if (ret) + goto out_clean_idr; + + intel_gvt_init_engine_mmio_context(gvt); + + ret = intel_gvt_load_firmware(gvt); + if (ret) + goto out_clean_mmio_info; + + ret = intel_gvt_init_irq(gvt); + if (ret) + goto out_free_firmware; + + ret = intel_gvt_init_gtt(gvt); + if (ret) + goto out_free_firmware; + ret = intel_gvt_init_workload_scheduler(gvt); + if (ret) + goto out_clean_gtt; + + ret = intel_gvt_init_sched_policy(gvt); + if (ret) + goto out_clean_workload_scheduler; + + ret = intel_gvt_init_cmd_parser(gvt); + if (ret) + goto out_clean_sched_policy; + + ret = init_service_thread(gvt); + if (ret) + goto out_clean_cmd_parser; + + ret = intel_gvt_init_vgpu_types(gvt); + if (ret) + goto out_clean_thread; + + vgpu = intel_gvt_create_idle_vgpu(gvt); + if (IS_ERR(vgpu)) { + ret = PTR_ERR(vgpu); + gvt_err("failed to create idle vgpu\n"); + goto out_clean_types; + } + gvt->idle_vgpu = vgpu; + + intel_gvt_debugfs_init(gvt); + + ret = intel_gvt_init_vgpu_type_groups(gvt); + if (ret) + goto out_destroy_idle_vgpu; + + ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_driver); + if (ret) + goto out_cleanup_vgpu_type_groups; + + gvt_dbg_core("gvt device initialization is done\n"); + return 0; + +out_cleanup_vgpu_type_groups: + intel_gvt_cleanup_vgpu_type_groups(gvt); +out_destroy_idle_vgpu: + intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); + intel_gvt_debugfs_clean(gvt); +out_clean_types: + intel_gvt_clean_vgpu_types(gvt); +out_clean_thread: + clean_service_thread(gvt); +out_clean_cmd_parser: + intel_gvt_clean_cmd_parser(gvt); +out_clean_sched_policy: + intel_gvt_clean_sched_policy(gvt); +out_clean_workload_scheduler: + intel_gvt_clean_workload_scheduler(gvt); +out_clean_gtt: + intel_gvt_clean_gtt(gvt); +out_free_firmware: + intel_gvt_free_firmware(gvt); +out_clean_mmio_info: + intel_gvt_clean_mmio_info(gvt); +out_clean_idr: + idr_destroy(&gvt->vgpu_idr); + kfree(gvt); + i915->gvt = NULL; return ret; } -static const struct intel_gvt_mpt kvmgt_mpt = { - .type = INTEL_GVT_HYPERVISOR_KVM, - .host_init = kvmgt_host_init, - .host_exit = kvmgt_host_exit, - .attach_vgpu = kvmgt_attach_vgpu, - .detach_vgpu = kvmgt_detach_vgpu, - .inject_msi = kvmgt_inject_msi, - .from_virt_to_mfn = kvmgt_virt_to_pfn, - .enable_page_track = kvmgt_page_track_add, - .disable_page_track = kvmgt_page_track_remove, - .read_gpa = kvmgt_read_gpa, - .write_gpa = kvmgt_write_gpa, - .gfn_to_mfn = kvmgt_gfn_to_pfn, - .dma_map_guest_page = kvmgt_dma_map_guest_page, - .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, - .dma_pin_guest_page = kvmgt_dma_pin_guest_page, - .set_opregion = kvmgt_set_opregion, - .set_edid = kvmgt_set_edid, - .get_vfio_device = kvmgt_get_vfio_device, - .put_vfio_device = kvmgt_put_vfio_device, - .is_valid_gfn = kvmgt_is_valid_gfn, +static void intel_gvt_pm_resume(struct drm_i915_private *i915) +{ + struct intel_gvt *gvt = i915->gvt; + + intel_gvt_restore_fence(gvt); + intel_gvt_restore_mmio(gvt); + intel_gvt_restore_ggtt(gvt); +} + +static const struct intel_vgpu_ops intel_gvt_vgpu_ops = { + .init_device = intel_gvt_init_device, + .clean_device = intel_gvt_clean_device, + .pm_resume = intel_gvt_pm_resume, }; static int __init kvmgt_init(void) { - if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0) - return -ENODEV; - return 0; + int ret; + + ret = intel_gvt_set_ops(&intel_gvt_vgpu_ops); + if (ret) + return ret; + + ret = mdev_register_driver(&intel_vgpu_mdev_driver); + if (ret) + intel_gvt_clear_ops(&intel_gvt_vgpu_ops); + return ret; } static void __exit kvmgt_exit(void) { - intel_gvt_unregister_hypervisor(); + mdev_unregister_driver(&intel_vgpu_mdev_driver); + intel_gvt_clear_ops(&intel_gvt_vgpu_ops); } module_init(kvmgt_init); diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 5db0ef83d522..9acc00505fde 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -139,7 +139,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, } if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) { - ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); + ret = intel_gvt_read_gpa(vgpu, pa, p_data, bytes); goto out; } @@ -215,7 +215,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa, } if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) { - ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes); + ret = intel_gvt_write_gpa(vgpu, pa, p_data, bytes); goto out; } diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 7c26af39fbfc..bba154e38705 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -72,7 +72,6 @@ struct intel_gvt_mmio_info { const struct intel_engine_cs * intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int reg); unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt); -bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device); int intel_gvt_setup_mmio_info(struct intel_gvt *gvt); void intel_gvt_clean_mmio_info(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h deleted file mode 100644 index e6c5a792a49a..000000000000 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ /dev/null @@ -1,400 +0,0 @@ -/* - * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * Authors: - * Eddie Dong <eddie.dong@intel.com> - * Dexuan Cui - * Jike Song <jike.song@intel.com> - * - * Contributors: - * Zhi Wang <zhi.a.wang@intel.com> - * - */ - -#ifndef _GVT_MPT_H_ -#define _GVT_MPT_H_ - -#include "gvt.h" - -/** - * DOC: Hypervisor Service APIs for GVT-g Core Logic - * - * This is the glue layer between specific hypervisor MPT modules and GVT-g core - * logic. Each kind of hypervisor MPT module provides a collection of function - * callbacks and will be attached to GVT host when the driver is loading. - * GVT-g core logic will call these APIs to request specific services from - * hypervisor. - */ - -/** - * intel_gvt_hypervisor_host_init - init GVT-g host side - * - * Returns: - * Zero on success, negative error code if failed - */ -static inline int intel_gvt_hypervisor_host_init(struct device *dev, - void *gvt, const void *ops) -{ - if (!intel_gvt_host.mpt->host_init) - return -ENODEV; - - return intel_gvt_host.mpt->host_init(dev, gvt, ops); -} - -/** - * intel_gvt_hypervisor_host_exit - exit GVT-g host side - */ -static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) -{ - /* optional to provide */ - if (!intel_gvt_host.mpt->host_exit) - return; - - intel_gvt_host.mpt->host_exit(dev, gvt); -} - -/** - * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU - * related stuffs inside hypervisor. - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu) -{ - /* optional to provide */ - if (!intel_gvt_host.mpt->attach_vgpu) - return 0; - - return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle); -} - -/** - * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU - * related stuffs inside hypervisor. - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu) -{ - /* optional to provide */ - if (!intel_gvt_host.mpt->detach_vgpu) - return; - - intel_gvt_host.mpt->detach_vgpu(vgpu); -} - -#define MSI_CAP_CONTROL(offset) (offset + 2) -#define MSI_CAP_ADDRESS(offset) (offset + 4) -#define MSI_CAP_DATA(offset) (offset + 8) -#define MSI_CAP_EN 0x1 - -/** - * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu) -{ - unsigned long offset = vgpu->gvt->device_info.msi_cap_offset; - u16 control, data; - u32 addr; - int ret; - - control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset)); - addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset)); - data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset)); - - /* Do not generate MSI if MSIEN is disable */ - if (!(control & MSI_CAP_EN)) - return 0; - - if (WARN(control & GENMASK(15, 1), "only support one MSI format\n")) - return -EINVAL; - - trace_inject_msi(vgpu->id, addr, data); - - ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data); - if (ret) - return ret; - return 0; -} - -/** - * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN - * @p: host kernel virtual address - * - * Returns: - * MFN on success, INTEL_GVT_INVALID_ADDR if failed. - */ -static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p) -{ - return intel_gvt_host.mpt->from_virt_to_mfn(p); -} - -/** - * intel_gvt_hypervisor_enable_page_track - track a guest page - * @vgpu: a vGPU - * @gfn: the gfn of guest - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_enable_page_track( - struct intel_vgpu *vgpu, unsigned long gfn) -{ - return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn); -} - -/** - * intel_gvt_hypervisor_disable_page_track - untrack a guest page - * @vgpu: a vGPU - * @gfn: the gfn of guest - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_disable_page_track( - struct intel_vgpu *vgpu, unsigned long gfn) -{ - return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn); -} - -/** - * intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer - * @vgpu: a vGPU - * @gpa: guest physical address - * @buf: host data buffer - * @len: data length - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu, - unsigned long gpa, void *buf, unsigned long len) -{ - return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len); -} - -/** - * intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA - * @vgpu: a vGPU - * @gpa: guest physical address - * @buf: host data buffer - * @len: data length - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu, - unsigned long gpa, void *buf, unsigned long len) -{ - return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len); -} - -/** - * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN - * @vgpu: a vGPU - * @gpfn: guest pfn - * - * Returns: - * MFN on success, INTEL_GVT_INVALID_ADDR if failed. - */ -static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn( - struct intel_vgpu *vgpu, unsigned long gfn) -{ - return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn); -} - -/** - * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page - * @vgpu: a vGPU - * @gfn: guest pfn - * @size: page size - * @dma_addr: retrieve allocated dma addr - * - * Returns: - * 0 on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_dma_map_guest_page( - struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, - dma_addr_t *dma_addr) -{ - return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size, - dma_addr); -} - -/** - * intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page - * @vgpu: a vGPU - * @dma_addr: the mapped dma addr - */ -static inline void intel_gvt_hypervisor_dma_unmap_guest_page( - struct intel_vgpu *vgpu, dma_addr_t dma_addr) -{ - intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr); -} - -/** - * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf - * @vgpu: a vGPU - * @dma_addr: guest dma addr - * - * Returns: - * 0 on success, negative error code if failed. - */ -static inline int -intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu, - dma_addr_t dma_addr) -{ - return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr); -} - -/** - * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN - * @vgpu: a vGPU - * @gfn: guest PFN - * @mfn: host PFN - * @nr: amount of PFNs - * @map: map or unmap - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_map_gfn_to_mfn( - struct intel_vgpu *vgpu, unsigned long gfn, - unsigned long mfn, unsigned int nr, - bool map) -{ - /* a MPT implementation could have MMIO mapped elsewhere */ - if (!intel_gvt_host.mpt->map_gfn_to_mfn) - return 0; - - return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr, - map); -} - -/** - * intel_gvt_hypervisor_set_trap_area - Trap a guest PA region - * @vgpu: a vGPU - * @start: the beginning of the guest physical address region - * @end: the end of the guest physical address region - * @map: map or unmap - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_set_trap_area( - struct intel_vgpu *vgpu, u64 start, u64 end, bool map) -{ - /* a MPT implementation could have MMIO trapped elsewhere */ - if (!intel_gvt_host.mpt->set_trap_area) - return 0; - - return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map); -} - -/** - * intel_gvt_hypervisor_set_opregion - Set opregion for guest - * @vgpu: a vGPU - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu) -{ - if (!intel_gvt_host.mpt->set_opregion) - return 0; - - return intel_gvt_host.mpt->set_opregion(vgpu); -} - -/** - * intel_gvt_hypervisor_set_edid - Set EDID region for guest - * @vgpu: a vGPU - * @port_num: display port number - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu *vgpu, - int port_num) -{ - if (!intel_gvt_host.mpt->set_edid) - return 0; - - return intel_gvt_host.mpt->set_edid(vgpu, port_num); -} - -/** - * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count - * @vgpu: a vGPU - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu) -{ - if (!intel_gvt_host.mpt->get_vfio_device) - return 0; - - return intel_gvt_host.mpt->get_vfio_device(vgpu); -} - -/** - * intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count - * @vgpu: a vGPU - * - * Returns: - * Zero on success, negative error code if failed. - */ -static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu) -{ - if (!intel_gvt_host.mpt->put_vfio_device) - return; - - intel_gvt_host.mpt->put_vfio_device(vgpu); -} - -/** - * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn - * @vgpu: a vGPU - * @gfn: guest PFN - * - * Returns: - * true on valid gfn, false on not. - */ -static inline bool intel_gvt_hypervisor_is_valid_gfn( - struct intel_vgpu *vgpu, unsigned long gfn) -{ - if (!intel_gvt_host.mpt->is_valid_gfn) - return true; - - return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn); -} - -int intel_gvt_register_hypervisor(const struct intel_gvt_mpt *); -void intel_gvt_unregister_hypervisor(void); - -#endif /* _GVT_MPT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 33569b910ed5..d2bed466540a 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -255,33 +255,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu) return 0; } -static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) -{ - u64 mfn; - int i, ret; - - for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) { - mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va - + i * PAGE_SIZE); - if (mfn == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("fail to get MFN from VA\n"); - return -EINVAL; - } - ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, - vgpu_opregion(vgpu)->gfn[i], - mfn, 1, map); - if (ret) { - gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n", - ret); - return ret; - } - } - - vgpu_opregion(vgpu)->mapped = map; - - return 0; -} - /** * intel_vgpu_opregion_base_write_handler - Opregion base register write handler * @@ -294,34 +267,13 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa) { - int i, ret = 0; + int i; gvt_dbg_core("emulate opregion from kernel\n"); - switch (intel_gvt_host.hypervisor_type) { - case INTEL_GVT_HYPERVISOR_KVM: - for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) - vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; - break; - case INTEL_GVT_HYPERVISOR_XEN: - /** - * Wins guest on Xengt will write this register twice: xen - * hvmloader and windows graphic driver. - */ - if (vgpu_opregion(vgpu)->mapped) - map_vgpu_opregion(vgpu, false); - - for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) - vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; - - ret = map_vgpu_opregion(vgpu, true); - break; - default: - ret = -EINVAL; - gvt_vgpu_err("not supported hypervisor\n"); - } - - return ret; + for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) + vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; + return 0; } /** @@ -336,12 +288,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) if (!vgpu_opregion(vgpu)->va) return; - if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { - if (vgpu_opregion(vgpu)->mapped) - map_vgpu_opregion(vgpu, false); - } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) { - /* Guest opregion is released by VFIO */ - } + /* Guest opregion is released by VFIO */ free_pages((unsigned long)vgpu_opregion(vgpu)->va, get_order(INTEL_GVT_OPREGION_SIZE)); @@ -470,39 +417,22 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) u64 scic_pa = 0, parm_pa = 0; int ret; - switch (intel_gvt_host.hypervisor_type) { - case INTEL_GVT_HYPERVISOR_XEN: - scic = *((u32 *)vgpu_opregion(vgpu)->va + - INTEL_GVT_OPREGION_SCIC); - parm = *((u32 *)vgpu_opregion(vgpu)->va + - INTEL_GVT_OPREGION_PARM); - break; - case INTEL_GVT_HYPERVISOR_KVM: - scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + - INTEL_GVT_OPREGION_SCIC; - parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + - INTEL_GVT_OPREGION_PARM; - - ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa, - &scic, sizeof(scic)); - if (ret) { - gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", - ret, scic_pa, sizeof(scic)); - return ret; - } - - ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa, - &parm, sizeof(parm)); - if (ret) { - gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", - ret, scic_pa, sizeof(scic)); - return ret; - } + scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + + INTEL_GVT_OPREGION_SCIC; + parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + + INTEL_GVT_OPREGION_PARM; + ret = intel_gvt_read_gpa(vgpu, scic_pa, &scic, sizeof(scic)); + if (ret) { + gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; + } - break; - default: - gvt_vgpu_err("not supported hypervisor\n"); - return -EINVAL; + ret = intel_gvt_read_gpa(vgpu, parm_pa, &parm, sizeof(parm)); + if (ret) { + gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; } if (!(swsci & SWSCI_SCI_SELECT)) { @@ -535,34 +465,18 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) parm = 0; out: - switch (intel_gvt_host.hypervisor_type) { - case INTEL_GVT_HYPERVISOR_XEN: - *((u32 *)vgpu_opregion(vgpu)->va + - INTEL_GVT_OPREGION_SCIC) = scic; - *((u32 *)vgpu_opregion(vgpu)->va + - INTEL_GVT_OPREGION_PARM) = parm; - break; - case INTEL_GVT_HYPERVISOR_KVM: - ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa, - &scic, sizeof(scic)); - if (ret) { - gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", - ret, scic_pa, sizeof(scic)); - return ret; - } - - ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa, - &parm, sizeof(parm)); - if (ret) { - gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", - ret, scic_pa, sizeof(scic)); - return ret; - } + ret = intel_gvt_write_gpa(vgpu, scic_pa, &scic, sizeof(scic)); + if (ret) { + gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; + } - break; - default: - gvt_vgpu_err("not supported hypervisor\n"); - return -EINVAL; + ret = intel_gvt_write_gpa(vgpu, parm_pa, &parm, sizeof(parm)); + if (ret) { + gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; } return 0; diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c index 84856022528e..3375b51c75f1 100644 --- a/drivers/gpu/drm/i915/gvt/page_track.c +++ b/drivers/gpu/drm/i915/gvt/page_track.c @@ -87,7 +87,7 @@ void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu, track = radix_tree_delete(&vgpu->page_track_tree, gfn); if (track) { if (track->tracked) - intel_gvt_hypervisor_disable_page_track(vgpu, gfn); + intel_gvt_page_track_remove(vgpu, gfn); kfree(track); } } @@ -112,7 +112,7 @@ int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) if (track->tracked) return 0; - ret = intel_gvt_hypervisor_enable_page_track(vgpu, gfn); + ret = intel_gvt_page_track_add(vgpu, gfn); if (ret) return ret; track->tracked = true; @@ -139,7 +139,7 @@ int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) if (!track->tracked) return 0; - ret = intel_gvt_hypervisor_disable_page_track(vgpu, gfn); + ret = intel_gvt_page_track_remove(vgpu, gfn); if (ret) return ret; track->tracked = false; @@ -172,7 +172,7 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa, if (unlikely(vgpu->failsafe)) { /* Remove write protection to prevent furture traps. */ - intel_vgpu_disable_page_track(vgpu, gpa >> PAGE_SHIFT); + intel_gvt_page_track_remove(vgpu, gpa >> PAGE_SHIFT); } else { ret = page_track->handler(page_track, gpa, data, bytes); if (ret) diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 7d666d34f9ff..d8216c63c39a 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -132,6 +132,13 @@ #define RING_GFX_MODE(base) _MMIO((base) + 0x29c) #define VF_GUARDBAND _MMIO(0x83a4) - #define BCS_TILE_REGISTER_VAL_OFFSET (0x43*4) + +/* XXX FIXME i915 has changed PP_XXX definition */ +#define PCH_PP_STATUS _MMIO(0xc7200) +#define PCH_PP_CONTROL _MMIO(0xc7204) +#define PCH_PP_ON_DELAYS _MMIO(0xc7208) +#define PCH_PP_OFF_DELAYS _MMIO(0xc720c) +#define PCH_PP_DIVISOR _MMIO(0xc7210) + #endif diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 679476da0640..d6fe94cd0fdb 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -150,10 +150,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) sr_oa_regs(workload, (u32 *)shadow_ring_context, true); #define COPY_REG(name) \ - intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ + intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \ + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) #define COPY_REG_MASKED(name) {\ - intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ + intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \ + RING_CTX_OFF(name.val),\ &shadow_ring_context->name.val, 4);\ shadow_ring_context->name.val |= 0xffff << 16;\ @@ -167,7 +167,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) COPY_REG(rcs_indirect_ctx); COPY_REG(rcs_indirect_ctx_offset); } else if (workload->engine->id == BCS0) - intel_gvt_hypervisor_read_gpa(vgpu, + intel_gvt_read_gpa(vgpu, workload->ring_context_gpa + BCS_TILE_REGISTER_VAL_OFFSET, (void *)shadow_ring_context + @@ -178,7 +178,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) /* don't copy Ring Context (the first 0x50 dwords), * only copy the Engine Context part from guest */ - intel_gvt_hypervisor_read_gpa(vgpu, + intel_gvt_read_gpa(vgpu, workload->ring_context_gpa + RING_CTX_SIZE, (void *)shadow_ring_context + @@ -245,7 +245,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) continue; read: - intel_gvt_hypervisor_read_gpa(vgpu, gpa_base, dst, gpa_size); + intel_gvt_read_gpa(vgpu, gpa_base, dst, gpa_size); gpa_base = context_gpa; gpa_size = I915_GTT_PAGE_SIZE; dst = context_base + (i << I915_GTT_PAGE_SHIFT); @@ -911,8 +911,7 @@ static void update_guest_pdps(struct intel_vgpu *vgpu, gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val); for (i = 0; i < 8; i++) - intel_gvt_hypervisor_write_gpa(vgpu, - gpa + i * 8, &pdp[7 - i], 4); + intel_gvt_write_gpa(vgpu, gpa + i * 8, &pdp[7 - i], 4); } static __maybe_unused bool @@ -1007,13 +1006,13 @@ static void update_guest_context(struct intel_vgpu_workload *workload) continue; write: - intel_gvt_hypervisor_write_gpa(vgpu, gpa_base, src, gpa_size); + intel_gvt_write_gpa(vgpu, gpa_base, src, gpa_size); gpa_base = context_gpa; gpa_size = I915_GTT_PAGE_SIZE; src = context_base + (i << I915_GTT_PAGE_SHIFT); } - intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + + intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4); shadow_ring_context = (void *) ctx->lrc_reg_state; @@ -1028,7 +1027,7 @@ write: } #define COPY_REG(name) \ - intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \ + intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + \ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) COPY_REG(ctx_ctrl); @@ -1036,7 +1035,7 @@ write: #undef COPY_REG - intel_gvt_hypervisor_write_gpa(vgpu, + intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + sizeof(*shadow_ring_context), (void *)shadow_ring_context + @@ -1573,7 +1572,7 @@ static void read_guest_pdps(struct intel_vgpu *vgpu, gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val); for (i = 0; i < 8; i++) - intel_gvt_hypervisor_read_gpa(vgpu, + intel_gvt_read_gpa(vgpu, gpa + i * 8, &pdp[7 - i], 4); } @@ -1644,10 +1643,10 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, return ERR_PTR(-EINVAL); } - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(ring_header.val), &head, 4); - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(ring_tail.val), &tail, 4); guest_head = head; @@ -1674,11 +1673,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, gvt_dbg_el("ring %s begin a new workload\n", engine->name); /* record some ring buffer register values for scan and shadow */ - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(rb_start.val), &start, 4); - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(rb_ctrl.val), &ctl, 4); - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); if (!intel_gvt_ggtt_validate_range(vgpu, start, @@ -1701,9 +1700,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, workload->rb_ctl = ctl; if (engine->id == RCS0) { - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4); - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + + intel_gvt_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4); workload->wa_ctx.indirect_ctx.guest_gma = diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h index 6d787750d279..020f1aa28322 100644 --- a/drivers/gpu/drm/i915/gvt/trace.h +++ b/drivers/gpu/drm/i915/gvt/trace.h @@ -377,7 +377,7 @@ TRACE_EVENT(render_mmio, /* This part must be out of protection */ #undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915/gvt #define TRACE_INCLUDE_FILE trace #include <trace/define_trace.h> diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 8dddd0a940a1..46da19b3225d 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -293,7 +293,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) intel_vgpu_clean_opregion(vgpu); intel_vgpu_reset_ggtt(vgpu, true); intel_vgpu_clean_gtt(vgpu); - intel_gvt_hypervisor_detach_vgpu(vgpu); + intel_vgpu_detach_regions(vgpu); intel_vgpu_free_resource(vgpu); intel_vgpu_clean_mmio(vgpu); intel_vgpu_dmabuf_cleanup(vgpu); @@ -370,8 +370,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, struct intel_vgpu *vgpu; int ret; - gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n", - param->handle, param->low_gm_sz, param->high_gm_sz, + gvt_dbg_core("low %llu MB high %llu MB fence %llu\n", + param->low_gm_sz, param->high_gm_sz, param->fence_sz); vgpu = vzalloc(sizeof(*vgpu)); @@ -384,7 +384,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, goto out_free_vgpu; vgpu->id = ret; - vgpu->handle = param->handle; vgpu->gvt = gvt; vgpu->sched_ctl.weight = param->weight; mutex_init(&vgpu->vgpu_lock); @@ -405,13 +404,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, populate_pvinfo_page(vgpu); - ret = intel_gvt_hypervisor_attach_vgpu(vgpu); - if (ret) - goto out_clean_vgpu_resource; - ret = intel_vgpu_init_gtt(vgpu); if (ret) - goto out_detach_hypervisor_vgpu; + goto out_clean_vgpu_resource; ret = intel_vgpu_init_opregion(vgpu); if (ret) @@ -431,14 +426,14 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, intel_gvt_debugfs_add_vgpu(vgpu); - ret = intel_gvt_hypervisor_set_opregion(vgpu); + ret = intel_gvt_set_opregion(vgpu); if (ret) goto out_clean_sched_policy; if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv)) - ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B); + ret = intel_gvt_set_edid(vgpu, PORT_B); else - ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D); + ret = intel_gvt_set_edid(vgpu, PORT_D); if (ret) goto out_clean_sched_policy; @@ -454,8 +449,6 @@ out_clean_opregion: intel_vgpu_clean_opregion(vgpu); out_clean_gtt: intel_vgpu_clean_gtt(vgpu); -out_detach_hypervisor_vgpu: - intel_gvt_hypervisor_detach_vgpu(vgpu); out_clean_vgpu_resource: intel_vgpu_free_resource(vgpu); out_clean_vgpu_mmio: @@ -483,7 +476,6 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, struct intel_vgpu_creation_params param; struct intel_vgpu *vgpu; - param.handle = 0; param.primary = 1; param.low_gm_sz = type->low_gm_size; param.high_gm_sz = type->high_gm_size; diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 3ffb617d75c9..90b0ce5051af 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -465,11 +465,6 @@ static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) pci_dev_put(dev_priv->bridge_dev); } -static void intel_sanitize_options(struct drm_i915_private *dev_priv) -{ - intel_gvt_sanitize_options(dev_priv); -} - /** * i915_set_dma_info - set all relevant PCI dma info as configured for the * platform @@ -563,8 +558,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) } } - intel_sanitize_options(dev_priv); - /* needs to be done before ggtt probe */ intel_dram_edram_detect(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3ed9021c615d..00d7eeae33bd 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -327,6 +327,7 @@ struct intel_vbt_data { bool override_afc_startup; u8 override_afc_startup_val; + u8 seamless_drrs_min_refresh_rate; enum drrs_type drrs_type; struct { @@ -401,6 +402,9 @@ struct i915_virtual_gpu { struct mutex lock; /* serialises sending of g2v_notify command pkts */ bool active; u32 caps; + u32 *initial_mmio; + u8 *initial_cfg_space; + struct list_head entry; }; struct i915_selftest_stash { @@ -1068,9 +1072,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_DG2_G12(dev_priv) \ IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G12) #define IS_ADLS_RPLS(dev_priv) \ - IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL_S) + IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL) #define IS_ADLP_N(dev_priv) \ IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N) +#define IS_ADLP_RPLP(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev_priv) \ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 987bdeb090a5..acf688b698c3 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -1060,7 +1060,6 @@ static const struct intel_device_info xehpsdv_info = { BIT(VCS0) | BIT(VCS2) | \ BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3) -__maybe_unused static const struct intel_device_info dg2_info = { DG2_FEATURES, XE_LPD_FEATURES, @@ -1176,6 +1175,8 @@ static const struct pci_device_id pciidlist[] = { INTEL_ADLN_IDS(&adl_p_info), INTEL_DG1_IDS(&dg1_info), INTEL_RPLS_IDS(&adl_s_info), + INTEL_RPLP_IDS(&adl_p_info), + INTEL_DG2_IDS(&dg2_info), {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, pciidlist); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 98bb53226d6b..9ccb67eec1bd 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1404,6 +1404,7 @@ #define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */ #define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */ #define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */ +#define DPFC_CHICKEN_FORCE_SLB_INVALIDATION REG_BIT(13) /* icl+ */ #define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */ #define GLK_FBC_STRIDE(fbc_id) _MMIO_PIPE((fbc_id), 0x43228, 0x43268) diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index b0e62a411534..7eb893666595 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -182,8 +182,21 @@ static const u16 subplatform_n_ids[] = { INTEL_ADLN_IDS(0), }; -static const u16 subplatform_rpls_ids[] = { +static const u16 subplatform_rpl_ids[] = { INTEL_RPLS_IDS(0), + INTEL_RPLP_IDS(0), +}; + +static const u16 subplatform_g10_ids[] = { + INTEL_DG2_G10_IDS(0), +}; + +static const u16 subplatform_g11_ids[] = { + INTEL_DG2_G11_IDS(0), +}; + +static const u16 subplatform_g12_ids[] = { + INTEL_DG2_G12_IDS(0), }; static bool find_devid(u16 id, const u16 *p, unsigned int num) @@ -228,9 +241,18 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915) } else if (find_devid(devid, subplatform_n_ids, ARRAY_SIZE(subplatform_n_ids))) { mask = BIT(INTEL_SUBPLATFORM_N); - } else if (find_devid(devid, subplatform_rpls_ids, - ARRAY_SIZE(subplatform_rpls_ids))) { - mask = BIT(INTEL_SUBPLATFORM_RPL_S); + } else if (find_devid(devid, subplatform_rpl_ids, + ARRAY_SIZE(subplatform_rpl_ids))) { + mask = BIT(INTEL_SUBPLATFORM_RPL); + } else if (find_devid(devid, subplatform_g10_ids, + ARRAY_SIZE(subplatform_g10_ids))) { + mask = BIT(INTEL_SUBPLATFORM_G10); + } else if (find_devid(devid, subplatform_g11_ids, + ARRAY_SIZE(subplatform_g11_ids))) { + mask = BIT(INTEL_SUBPLATFORM_G11); + } else if (find_devid(devid, subplatform_g12_ids, + ARRAY_SIZE(subplatform_g12_ids))) { + mask = BIT(INTEL_SUBPLATFORM_G12); } GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_MASK); diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index ec0b8095e7fa..e7d2cf7d65c8 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -115,11 +115,16 @@ enum intel_platform { #define INTEL_SUBPLATFORM_G11 1 #define INTEL_SUBPLATFORM_G12 2 -/* ADL-S */ -#define INTEL_SUBPLATFORM_RPL_S 0 +/* ADL */ +#define INTEL_SUBPLATFORM_RPL 0 /* ADL-P */ -#define INTEL_SUBPLATFORM_N 0 +/* + * As #define INTEL_SUBPLATFORM_RPL 0 will apply + * here too, SUBPLATFORM_N will have different + * bit set + */ +#define INTEL_SUBPLATFORM_N 1 enum intel_ppgtt_type { INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE, diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index cf6e98962d82..e98b6d69a91a 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -24,7 +24,10 @@ #include "i915_drv.h" #include "i915_vgpu.h" #include "intel_gvt.h" -#include "gvt/gvt.h" +#include "gem/i915_gem_dmabuf.h" +#include "gt/intel_context.h" +#include "gt/intel_ring.h" +#include "gt/shmem_utils.h" /** * DOC: Intel GVT-g host support @@ -41,6 +44,10 @@ * doc is available on https://01.org/group/2230/documentation-list. */ +static LIST_HEAD(intel_gvt_devices); +static const struct intel_vgpu_ops *intel_gvt_ops; +static DEFINE_MUTEX(intel_gvt_mutex); + static bool is_supported_device(struct drm_i915_private *dev_priv) { if (IS_BROADWELL(dev_priv)) @@ -59,32 +66,162 @@ static bool is_supported_device(struct drm_i915_private *dev_priv) return false; } -/** - * intel_gvt_sanitize_options - sanitize GVT related options - * @dev_priv: drm i915 private data - * - * This function is called at the i915 options sanitize stage. - */ -void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) +static void free_initial_hw_state(struct drm_i915_private *dev_priv) +{ + struct i915_virtual_gpu *vgpu = &dev_priv->vgpu; + + vfree(vgpu->initial_mmio); + vgpu->initial_mmio = NULL; + + kfree(vgpu->initial_cfg_space); + vgpu->initial_cfg_space = NULL; +} + +static void save_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset, + u32 size) +{ + struct drm_i915_private *dev_priv = iter->i915; + u32 *mmio, i; + + for (i = offset; i < offset + size; i += 4) { + mmio = iter->data + i; + *mmio = intel_uncore_read_notrace(to_gt(dev_priv)->uncore, + _MMIO(i)); + } +} + +static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, + u32 offset, u32 size) +{ + if (WARN_ON(!IS_ALIGNED(offset, 4))) + return -EINVAL; + + save_mmio(iter, offset, size); + return 0; +} + +static int save_initial_hw_state(struct drm_i915_private *dev_priv) +{ + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct i915_virtual_gpu *vgpu = &dev_priv->vgpu; + struct intel_gvt_mmio_table_iter iter; + void *mem; + int i, ret; + + mem = kzalloc(PCI_CFG_SPACE_EXP_SIZE, GFP_KERNEL); + if (!mem) + return -ENOMEM; + + vgpu->initial_cfg_space = mem; + + for (i = 0; i < PCI_CFG_SPACE_EXP_SIZE; i += 4) + pci_read_config_dword(pdev, i, mem + i); + + mem = vzalloc(2 * SZ_1M); + if (!mem) { + ret = -ENOMEM; + goto err_mmio; + } + + vgpu->initial_mmio = mem; + + iter.i915 = dev_priv; + iter.data = vgpu->initial_mmio; + iter.handle_mmio_cb = handle_mmio; + + ret = intel_gvt_iterate_mmio_table(&iter); + if (ret) + goto err_iterate; + + return 0; + +err_iterate: + vfree(vgpu->initial_mmio); + vgpu->initial_mmio = NULL; +err_mmio: + kfree(vgpu->initial_cfg_space); + vgpu->initial_cfg_space = NULL; + + return ret; +} + +static void intel_gvt_init_device(struct drm_i915_private *dev_priv) { - if (!dev_priv->params.enable_gvt) + if (!dev_priv->params.enable_gvt) { + drm_dbg(&dev_priv->drm, + "GVT-g is disabled by kernel params\n"); return; + } if (intel_vgpu_active(dev_priv)) { drm_info(&dev_priv->drm, "GVT-g is disabled for guest\n"); - goto bail; + return; } if (!is_supported_device(dev_priv)) { drm_info(&dev_priv->drm, "Unsupported device. GVT-g is disabled\n"); - goto bail; + return; + } + + if (intel_uc_wants_guc_submission(&to_gt(dev_priv)->uc)) { + drm_err(&dev_priv->drm, + "Graphics virtualization is not yet supported with GuC submission\n"); + return; } - return; -bail: - dev_priv->params.enable_gvt = 0; + if (save_initial_hw_state(dev_priv)) { + drm_dbg(&dev_priv->drm, "Failed to save initial HW state\n"); + return; + } + + if (intel_gvt_ops->init_device(dev_priv)) + drm_dbg(&dev_priv->drm, "Fail to init GVT device\n"); +} + +static void intel_gvt_clean_device(struct drm_i915_private *dev_priv) +{ + if (dev_priv->gvt) + intel_gvt_ops->clean_device(dev_priv); + free_initial_hw_state(dev_priv); +} + +int intel_gvt_set_ops(const struct intel_vgpu_ops *ops) +{ + struct drm_i915_private *dev_priv; + + mutex_lock(&intel_gvt_mutex); + if (intel_gvt_ops) { + mutex_unlock(&intel_gvt_mutex); + return -EINVAL; + } + intel_gvt_ops = ops; + + list_for_each_entry(dev_priv, &intel_gvt_devices, vgpu.entry) + intel_gvt_init_device(dev_priv); + mutex_unlock(&intel_gvt_mutex); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(intel_gvt_set_ops, I915_GVT); + +void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops) +{ + struct drm_i915_private *dev_priv; + + mutex_lock(&intel_gvt_mutex); + if (intel_gvt_ops != ops) { + mutex_unlock(&intel_gvt_mutex); + return; + } + + list_for_each_entry(dev_priv, &intel_gvt_devices, vgpu.entry) + intel_gvt_clean_device(dev_priv); + + intel_gvt_ops = NULL; + mutex_unlock(&intel_gvt_mutex); } +EXPORT_SYMBOL_NS_GPL(intel_gvt_clear_ops, I915_GVT); /** * intel_gvt_init - initialize GVT components @@ -98,41 +235,18 @@ bail: */ int intel_gvt_init(struct drm_i915_private *dev_priv) { - int ret; - if (i915_inject_probe_failure(dev_priv)) return -ENODEV; - if (!dev_priv->params.enable_gvt) { - drm_dbg(&dev_priv->drm, - "GVT-g is disabled by kernel params\n"); - return 0; - } - - if (intel_uc_wants_guc_submission(&to_gt(dev_priv)->uc)) { - drm_err(&dev_priv->drm, - "i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n"); - return -EIO; - } - - ret = intel_gvt_init_device(dev_priv); - if (ret) { - drm_dbg(&dev_priv->drm, "Fail to init GVT device\n"); - goto bail; - } - - return 0; + mutex_lock(&intel_gvt_mutex); + list_add_tail(&dev_priv->vgpu.entry, &intel_gvt_devices); + if (intel_gvt_ops) + intel_gvt_init_device(dev_priv); + mutex_unlock(&intel_gvt_mutex); -bail: - dev_priv->params.enable_gvt = 0; return 0; } -static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) -{ - return dev_priv->gvt; -} - /** * intel_gvt_driver_remove - cleanup GVT components when i915 driver is * unbinding @@ -143,10 +257,10 @@ static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) */ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) { - if (!intel_gvt_active(dev_priv)) - return; - + mutex_lock(&intel_gvt_mutex); intel_gvt_clean_device(dev_priv); + list_del(&dev_priv->vgpu.entry); + mutex_unlock(&intel_gvt_mutex); } /** @@ -159,6 +273,50 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) */ void intel_gvt_resume(struct drm_i915_private *dev_priv) { - if (intel_gvt_active(dev_priv)) - intel_gvt_pm_resume(dev_priv->gvt); + mutex_lock(&intel_gvt_mutex); + if (dev_priv->gvt) + intel_gvt_ops->pm_resume(dev_priv); + mutex_unlock(&intel_gvt_mutex); } + +/* + * Exported here so that the exports only get created when GVT support is + * actually enabled. + */ +EXPORT_SYMBOL_NS_GPL(i915_gem_object_alloc, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_create_shmem, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_init, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_ggtt_pin_ww, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_pin_map, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_object_set_to_cpu_domain, I915_GVT); +EXPORT_SYMBOL_NS_GPL(__i915_gem_object_flush_map, I915_GVT); +EXPORT_SYMBOL_NS_GPL(__i915_gem_object_set_pages, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_gtt_insert, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_prime_export, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_init, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_backoff, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_fini, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_ppgtt_create, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_request_add, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_request_create, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_request_wait, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_reserve_fence, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_unreserve_fence, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_vm_release, I915_GVT); +EXPORT_SYMBOL_NS_GPL(_i915_vma_move_to_active, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_context_create, I915_GVT); +EXPORT_SYMBOL_NS_GPL(__intel_context_do_pin, I915_GVT); +EXPORT_SYMBOL_NS_GPL(__intel_context_do_unpin, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_ring_begin, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_get, I915_GVT); +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) +EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put, I915_GVT); +#endif +EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put_unchecked, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_for_reg, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_get, I915_GVT); +EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_put, I915_GVT); +EXPORT_SYMBOL_NS_GPL(shmem_pin_map, I915_GVT); +EXPORT_SYMBOL_NS_GPL(shmem_unpin_map, I915_GVT); +EXPORT_SYMBOL_NS_GPL(__px_dma, I915_GVT); +EXPORT_SYMBOL_NS_GPL(i915_fence_ops, I915_GVT); diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h index d7d3fb6186fd..eb2a2be252ca 100644 --- a/drivers/gpu/drm/i915/intel_gvt.h +++ b/drivers/gpu/drm/i915/intel_gvt.h @@ -24,16 +24,34 @@ #ifndef _INTEL_GVT_H_ #define _INTEL_GVT_H_ +#include <linux/types.h> + struct drm_i915_private; #ifdef CONFIG_DRM_I915_GVT + +struct intel_gvt_mmio_table_iter { + struct drm_i915_private *i915; + void *data; + int (*handle_mmio_cb)(struct intel_gvt_mmio_table_iter *iter, + u32 offset, u32 size); +}; + int intel_gvt_init(struct drm_i915_private *dev_priv); void intel_gvt_driver_remove(struct drm_i915_private *dev_priv); -int intel_gvt_init_device(struct drm_i915_private *dev_priv); -void intel_gvt_clean_device(struct drm_i915_private *dev_priv); int intel_gvt_init_host(void); -void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv); void intel_gvt_resume(struct drm_i915_private *dev_priv); +int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter); + +struct intel_vgpu_ops { + int (*init_device)(struct drm_i915_private *dev_priv); + void (*clean_device)(struct drm_i915_private *dev_priv); + void (*pm_resume)(struct drm_i915_private *i915); +}; + +int intel_gvt_set_ops(const struct intel_vgpu_ops *ops); +void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops); + #else static inline int intel_gvt_init(struct drm_i915_private *dev_priv) { @@ -44,12 +62,16 @@ static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) { } -static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) +static inline void intel_gvt_resume(struct drm_i915_private *dev_priv) { } -static inline void intel_gvt_resume(struct drm_i915_private *dev_priv) +struct intel_gvt_mmio_table_iter { +}; + +static inline int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter) { + return 0; } #endif diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c new file mode 100644 index 000000000000..72dac1718f3e --- /dev/null +++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c @@ -0,0 +1,1292 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include "display/intel_dmc_regs.h" +#include "display/vlv_dsi_pll_regs.h" +#include "gt/intel_gt_regs.h" +#include "gvt/gvt.h" +#include "i915_drv.h" +#include "i915_pvinfo.h" +#include "i915_reg.h" +#include "intel_gvt.h" +#include "intel_mchbar_regs.h" + +#define MMIO_F(reg, s) do { \ + int ret; \ + ret = iter->handle_mmio_cb(iter, i915_mmio_reg_offset(reg), s); \ + if (ret) \ + return ret; \ +} while (0) + +#define MMIO_D(reg) MMIO_F(reg, 4) + +#define MMIO_RING_F(prefix, s) do { \ + MMIO_F(prefix(RENDER_RING_BASE), s); \ + MMIO_F(prefix(BLT_RING_BASE), s); \ + MMIO_F(prefix(GEN6_BSD_RING_BASE), s); \ + MMIO_F(prefix(VEBOX_RING_BASE), s); \ + if (HAS_ENGINE(to_gt(iter->i915), VCS1)) \ + MMIO_F(prefix(GEN8_BSD2_RING_BASE), s); \ +} while (0) + +#define MMIO_RING_D(prefix) \ + MMIO_RING_F(prefix, 4) + +static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter) +{ + struct drm_i915_private *dev_priv = iter->i915; + + MMIO_RING_D(RING_IMR); + MMIO_D(SDEIMR); + MMIO_D(SDEIER); + MMIO_D(SDEIIR); + MMIO_D(SDEISR); + MMIO_RING_D(RING_HWSTAM); + MMIO_D(BSD_HWS_PGA_GEN7); + MMIO_D(BLT_HWS_PGA_GEN7); + MMIO_D(VEBOX_HWS_PGA_GEN7); + +#define RING_REG(base) _MMIO((base) + 0x28) + MMIO_RING_D(RING_REG); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x134) + MMIO_RING_D(RING_REG); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x6c) + MMIO_RING_D(RING_REG); +#undef RING_REG + MMIO_D(_MMIO(0x2148)); + MMIO_D(CCID(RENDER_RING_BASE)); + MMIO_D(_MMIO(0x12198)); + MMIO_D(GEN7_CXT_SIZE); + MMIO_RING_D(RING_TAIL); + MMIO_RING_D(RING_HEAD); + MMIO_RING_D(RING_CTL); + MMIO_RING_D(RING_ACTHD); + MMIO_RING_D(RING_START); + + /* RING MODE */ +#define RING_REG(base) _MMIO((base) + 0x29c) + MMIO_RING_D(RING_REG); +#undef RING_REG + + MMIO_RING_D(RING_MI_MODE); + MMIO_RING_D(RING_INSTPM); + MMIO_RING_D(RING_TIMESTAMP); + MMIO_RING_D(RING_TIMESTAMP_UDW); + MMIO_D(GEN7_GT_MODE); + MMIO_D(CACHE_MODE_0_GEN7); + MMIO_D(CACHE_MODE_1); + MMIO_D(CACHE_MODE_0); + MMIO_D(_MMIO(0x2124)); + MMIO_D(_MMIO(0x20dc)); + MMIO_D(_3D_CHICKEN3); + MMIO_D(_MMIO(0x2088)); + MMIO_D(FF_SLICE_CS_CHICKEN2); + MMIO_D(_MMIO(0x2470)); + MMIO_D(GAM_ECOCHK); + MMIO_D(GEN7_COMMON_SLICE_CHICKEN1); + MMIO_D(COMMON_SLICE_CHICKEN2); + MMIO_D(_MMIO(0x9030)); + MMIO_D(_MMIO(0x20a0)); + MMIO_D(_MMIO(0x2420)); + MMIO_D(_MMIO(0x2430)); + MMIO_D(_MMIO(0x2434)); + MMIO_D(_MMIO(0x2438)); + MMIO_D(_MMIO(0x243c)); + MMIO_D(_MMIO(0x7018)); + MMIO_D(HALF_SLICE_CHICKEN3); + MMIO_D(GEN7_HALF_SLICE_CHICKEN1); + /* display */ + MMIO_F(_MMIO(0x60220), 0x20); + MMIO_D(_MMIO(0x602a0)); + MMIO_D(_MMIO(0x65050)); + MMIO_D(_MMIO(0x650b4)); + MMIO_D(_MMIO(0xc4040)); + MMIO_D(DERRMR); + MMIO_D(PIPEDSL(PIPE_A)); + MMIO_D(PIPEDSL(PIPE_B)); + MMIO_D(PIPEDSL(PIPE_C)); + MMIO_D(PIPEDSL(_PIPE_EDP)); + MMIO_D(PIPECONF(PIPE_A)); + MMIO_D(PIPECONF(PIPE_B)); + MMIO_D(PIPECONF(PIPE_C)); + MMIO_D(PIPECONF(_PIPE_EDP)); + MMIO_D(PIPESTAT(PIPE_A)); + MMIO_D(PIPESTAT(PIPE_B)); + MMIO_D(PIPESTAT(PIPE_C)); + MMIO_D(PIPESTAT(_PIPE_EDP)); + MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A)); + MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B)); + MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C)); + MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP)); + MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A)); + MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B)); + MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C)); + MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP)); + MMIO_D(CURCNTR(PIPE_A)); + MMIO_D(CURCNTR(PIPE_B)); + MMIO_D(CURCNTR(PIPE_C)); + MMIO_D(CURPOS(PIPE_A)); + MMIO_D(CURPOS(PIPE_B)); + MMIO_D(CURPOS(PIPE_C)); + MMIO_D(CURBASE(PIPE_A)); + MMIO_D(CURBASE(PIPE_B)); + MMIO_D(CURBASE(PIPE_C)); + MMIO_D(CUR_FBC_CTL(PIPE_A)); + MMIO_D(CUR_FBC_CTL(PIPE_B)); + MMIO_D(CUR_FBC_CTL(PIPE_C)); + MMIO_D(_MMIO(0x700ac)); + MMIO_D(_MMIO(0x710ac)); + MMIO_D(_MMIO(0x720ac)); + MMIO_D(_MMIO(0x70090)); + MMIO_D(_MMIO(0x70094)); + MMIO_D(_MMIO(0x70098)); + MMIO_D(_MMIO(0x7009c)); + MMIO_D(DSPCNTR(PIPE_A)); + MMIO_D(DSPADDR(PIPE_A)); + MMIO_D(DSPSTRIDE(PIPE_A)); + MMIO_D(DSPPOS(PIPE_A)); + MMIO_D(DSPSIZE(PIPE_A)); + MMIO_D(DSPSURF(PIPE_A)); + MMIO_D(DSPOFFSET(PIPE_A)); + MMIO_D(DSPSURFLIVE(PIPE_A)); + MMIO_D(REG_50080(PIPE_A, PLANE_PRIMARY)); + MMIO_D(DSPCNTR(PIPE_B)); + MMIO_D(DSPADDR(PIPE_B)); + MMIO_D(DSPSTRIDE(PIPE_B)); + MMIO_D(DSPPOS(PIPE_B)); + MMIO_D(DSPSIZE(PIPE_B)); + MMIO_D(DSPSURF(PIPE_B)); + MMIO_D(DSPOFFSET(PIPE_B)); + MMIO_D(DSPSURFLIVE(PIPE_B)); + MMIO_D(REG_50080(PIPE_B, PLANE_PRIMARY)); + MMIO_D(DSPCNTR(PIPE_C)); + MMIO_D(DSPADDR(PIPE_C)); + MMIO_D(DSPSTRIDE(PIPE_C)); + MMIO_D(DSPPOS(PIPE_C)); + MMIO_D(DSPSIZE(PIPE_C)); + MMIO_D(DSPSURF(PIPE_C)); + MMIO_D(DSPOFFSET(PIPE_C)); + MMIO_D(DSPSURFLIVE(PIPE_C)); + MMIO_D(REG_50080(PIPE_C, PLANE_PRIMARY)); + MMIO_D(SPRCTL(PIPE_A)); + MMIO_D(SPRLINOFF(PIPE_A)); + MMIO_D(SPRSTRIDE(PIPE_A)); + MMIO_D(SPRPOS(PIPE_A)); + MMIO_D(SPRSIZE(PIPE_A)); + MMIO_D(SPRKEYVAL(PIPE_A)); + MMIO_D(SPRKEYMSK(PIPE_A)); + MMIO_D(SPRSURF(PIPE_A)); + MMIO_D(SPRKEYMAX(PIPE_A)); + MMIO_D(SPROFFSET(PIPE_A)); + MMIO_D(SPRSCALE(PIPE_A)); + MMIO_D(SPRSURFLIVE(PIPE_A)); + MMIO_D(REG_50080(PIPE_A, PLANE_SPRITE0)); + MMIO_D(SPRCTL(PIPE_B)); + MMIO_D(SPRLINOFF(PIPE_B)); + MMIO_D(SPRSTRIDE(PIPE_B)); + MMIO_D(SPRPOS(PIPE_B)); + MMIO_D(SPRSIZE(PIPE_B)); + MMIO_D(SPRKEYVAL(PIPE_B)); + MMIO_D(SPRKEYMSK(PIPE_B)); + MMIO_D(SPRSURF(PIPE_B)); + MMIO_D(SPRKEYMAX(PIPE_B)); + MMIO_D(SPROFFSET(PIPE_B)); + MMIO_D(SPRSCALE(PIPE_B)); + MMIO_D(SPRSURFLIVE(PIPE_B)); + MMIO_D(REG_50080(PIPE_B, PLANE_SPRITE0)); + MMIO_D(SPRCTL(PIPE_C)); + MMIO_D(SPRLINOFF(PIPE_C)); + MMIO_D(SPRSTRIDE(PIPE_C)); + MMIO_D(SPRPOS(PIPE_C)); + MMIO_D(SPRSIZE(PIPE_C)); + MMIO_D(SPRKEYVAL(PIPE_C)); + MMIO_D(SPRKEYMSK(PIPE_C)); + MMIO_D(SPRSURF(PIPE_C)); + MMIO_D(SPRKEYMAX(PIPE_C)); + MMIO_D(SPROFFSET(PIPE_C)); + MMIO_D(SPRSCALE(PIPE_C)); + MMIO_D(SPRSURFLIVE(PIPE_C)); + MMIO_D(REG_50080(PIPE_C, PLANE_SPRITE0)); + MMIO_D(HTOTAL(TRANSCODER_A)); + MMIO_D(HBLANK(TRANSCODER_A)); + MMIO_D(HSYNC(TRANSCODER_A)); + MMIO_D(VTOTAL(TRANSCODER_A)); + MMIO_D(VBLANK(TRANSCODER_A)); + MMIO_D(VSYNC(TRANSCODER_A)); + MMIO_D(BCLRPAT(TRANSCODER_A)); + MMIO_D(VSYNCSHIFT(TRANSCODER_A)); + MMIO_D(PIPESRC(TRANSCODER_A)); + MMIO_D(HTOTAL(TRANSCODER_B)); + MMIO_D(HBLANK(TRANSCODER_B)); + MMIO_D(HSYNC(TRANSCODER_B)); + MMIO_D(VTOTAL(TRANSCODER_B)); + MMIO_D(VBLANK(TRANSCODER_B)); + MMIO_D(VSYNC(TRANSCODER_B)); + MMIO_D(BCLRPAT(TRANSCODER_B)); + MMIO_D(VSYNCSHIFT(TRANSCODER_B)); + MMIO_D(PIPESRC(TRANSCODER_B)); + MMIO_D(HTOTAL(TRANSCODER_C)); + MMIO_D(HBLANK(TRANSCODER_C)); + MMIO_D(HSYNC(TRANSCODER_C)); + MMIO_D(VTOTAL(TRANSCODER_C)); + MMIO_D(VBLANK(TRANSCODER_C)); + MMIO_D(VSYNC(TRANSCODER_C)); + MMIO_D(BCLRPAT(TRANSCODER_C)); + MMIO_D(VSYNCSHIFT(TRANSCODER_C)); + MMIO_D(PIPESRC(TRANSCODER_C)); + MMIO_D(HTOTAL(TRANSCODER_EDP)); + MMIO_D(HBLANK(TRANSCODER_EDP)); + MMIO_D(HSYNC(TRANSCODER_EDP)); + MMIO_D(VTOTAL(TRANSCODER_EDP)); + MMIO_D(VBLANK(TRANSCODER_EDP)); + MMIO_D(VSYNC(TRANSCODER_EDP)); + MMIO_D(BCLRPAT(TRANSCODER_EDP)); + MMIO_D(VSYNCSHIFT(TRANSCODER_EDP)); + MMIO_D(PIPE_DATA_M1(TRANSCODER_A)); + MMIO_D(PIPE_DATA_N1(TRANSCODER_A)); + MMIO_D(PIPE_DATA_M2(TRANSCODER_A)); + MMIO_D(PIPE_DATA_N2(TRANSCODER_A)); + MMIO_D(PIPE_LINK_M1(TRANSCODER_A)); + MMIO_D(PIPE_LINK_N1(TRANSCODER_A)); + MMIO_D(PIPE_LINK_M2(TRANSCODER_A)); + MMIO_D(PIPE_LINK_N2(TRANSCODER_A)); + MMIO_D(PIPE_DATA_M1(TRANSCODER_B)); + MMIO_D(PIPE_DATA_N1(TRANSCODER_B)); + MMIO_D(PIPE_DATA_M2(TRANSCODER_B)); + MMIO_D(PIPE_DATA_N2(TRANSCODER_B)); + MMIO_D(PIPE_LINK_M1(TRANSCODER_B)); + MMIO_D(PIPE_LINK_N1(TRANSCODER_B)); + MMIO_D(PIPE_LINK_M2(TRANSCODER_B)); + MMIO_D(PIPE_LINK_N2(TRANSCODER_B)); + MMIO_D(PIPE_DATA_M1(TRANSCODER_C)); + MMIO_D(PIPE_DATA_N1(TRANSCODER_C)); + MMIO_D(PIPE_DATA_M2(TRANSCODER_C)); + MMIO_D(PIPE_DATA_N2(TRANSCODER_C)); + MMIO_D(PIPE_LINK_M1(TRANSCODER_C)); + MMIO_D(PIPE_LINK_N1(TRANSCODER_C)); + MMIO_D(PIPE_LINK_M2(TRANSCODER_C)); + MMIO_D(PIPE_LINK_N2(TRANSCODER_C)); + MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP)); + MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP)); + MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP)); + MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP)); + MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP)); + MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP)); + MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP)); + MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP)); + MMIO_D(PF_CTL(PIPE_A)); + MMIO_D(PF_WIN_SZ(PIPE_A)); + MMIO_D(PF_WIN_POS(PIPE_A)); + MMIO_D(PF_VSCALE(PIPE_A)); + MMIO_D(PF_HSCALE(PIPE_A)); + MMIO_D(PF_CTL(PIPE_B)); + MMIO_D(PF_WIN_SZ(PIPE_B)); + MMIO_D(PF_WIN_POS(PIPE_B)); + MMIO_D(PF_VSCALE(PIPE_B)); + MMIO_D(PF_HSCALE(PIPE_B)); + MMIO_D(PF_CTL(PIPE_C)); + MMIO_D(PF_WIN_SZ(PIPE_C)); + MMIO_D(PF_WIN_POS(PIPE_C)); + MMIO_D(PF_VSCALE(PIPE_C)); + MMIO_D(PF_HSCALE(PIPE_C)); + MMIO_D(WM0_PIPE_ILK(PIPE_A)); + MMIO_D(WM0_PIPE_ILK(PIPE_B)); + MMIO_D(WM0_PIPE_ILK(PIPE_C)); + MMIO_D(WM1_LP_ILK); + MMIO_D(WM2_LP_ILK); + MMIO_D(WM3_LP_ILK); + MMIO_D(WM1S_LP_ILK); + MMIO_D(WM2S_LP_IVB); + MMIO_D(WM3S_LP_IVB); + MMIO_D(BLC_PWM_CPU_CTL2); + MMIO_D(BLC_PWM_CPU_CTL); + MMIO_D(BLC_PWM_PCH_CTL1); + MMIO_D(BLC_PWM_PCH_CTL2); + MMIO_D(_MMIO(0x48268)); + MMIO_F(PCH_GMBUS0, 4 * 4); + MMIO_F(PCH_GPIO_BASE, 6 * 4); + MMIO_F(_MMIO(0xe4f00), 0x28); + MMIO_D(_MMIO(_PCH_TRANSACONF)); + MMIO_D(_MMIO(_PCH_TRANSBCONF)); + MMIO_D(FDI_RX_IIR(PIPE_A)); + MMIO_D(FDI_RX_IIR(PIPE_B)); + MMIO_D(FDI_RX_IIR(PIPE_C)); + MMIO_D(FDI_RX_IMR(PIPE_A)); + MMIO_D(FDI_RX_IMR(PIPE_B)); + MMIO_D(FDI_RX_IMR(PIPE_C)); + MMIO_D(FDI_RX_CTL(PIPE_A)); + MMIO_D(FDI_RX_CTL(PIPE_B)); + MMIO_D(FDI_RX_CTL(PIPE_C)); + MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_A)); + MMIO_D(_MMIO(_PCH_TRANS_HBLANK_A)); + MMIO_D(_MMIO(_PCH_TRANS_HSYNC_A)); + MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_A)); + MMIO_D(_MMIO(_PCH_TRANS_VBLANK_A)); + MMIO_D(_MMIO(_PCH_TRANS_VSYNC_A)); + MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_A)); + MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_B)); + MMIO_D(_MMIO(_PCH_TRANS_HBLANK_B)); + MMIO_D(_MMIO(_PCH_TRANS_HSYNC_B)); + MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_B)); + MMIO_D(_MMIO(_PCH_TRANS_VBLANK_B)); + MMIO_D(_MMIO(_PCH_TRANS_VSYNC_B)); + MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_B)); + MMIO_D(_MMIO(_PCH_TRANSA_DATA_M1)); + MMIO_D(_MMIO(_PCH_TRANSA_DATA_N1)); + MMIO_D(_MMIO(_PCH_TRANSA_DATA_M2)); + MMIO_D(_MMIO(_PCH_TRANSA_DATA_N2)); + MMIO_D(_MMIO(_PCH_TRANSA_LINK_M1)); + MMIO_D(_MMIO(_PCH_TRANSA_LINK_N1)); + MMIO_D(_MMIO(_PCH_TRANSA_LINK_M2)); + MMIO_D(_MMIO(_PCH_TRANSA_LINK_N2)); + MMIO_D(TRANS_DP_CTL(PIPE_A)); + MMIO_D(TRANS_DP_CTL(PIPE_B)); + MMIO_D(TRANS_DP_CTL(PIPE_C)); + MMIO_D(TVIDEO_DIP_CTL(PIPE_A)); + MMIO_D(TVIDEO_DIP_DATA(PIPE_A)); + MMIO_D(TVIDEO_DIP_GCP(PIPE_A)); + MMIO_D(TVIDEO_DIP_CTL(PIPE_B)); + MMIO_D(TVIDEO_DIP_DATA(PIPE_B)); + MMIO_D(TVIDEO_DIP_GCP(PIPE_B)); + MMIO_D(TVIDEO_DIP_CTL(PIPE_C)); + MMIO_D(TVIDEO_DIP_DATA(PIPE_C)); + MMIO_D(TVIDEO_DIP_GCP(PIPE_C)); + MMIO_D(_MMIO(_FDI_RXA_MISC)); + MMIO_D(_MMIO(_FDI_RXB_MISC)); + MMIO_D(_MMIO(_FDI_RXA_TUSIZE1)); + MMIO_D(_MMIO(_FDI_RXA_TUSIZE2)); + MMIO_D(_MMIO(_FDI_RXB_TUSIZE1)); + MMIO_D(_MMIO(_FDI_RXB_TUSIZE2)); + MMIO_D(PCH_PP_CONTROL); + MMIO_D(PCH_PP_DIVISOR); + MMIO_D(PCH_PP_STATUS); + MMIO_D(PCH_LVDS); + MMIO_D(_MMIO(_PCH_DPLL_A)); + MMIO_D(_MMIO(_PCH_DPLL_B)); + MMIO_D(_MMIO(_PCH_FPA0)); + MMIO_D(_MMIO(_PCH_FPA1)); + MMIO_D(_MMIO(_PCH_FPB0)); + MMIO_D(_MMIO(_PCH_FPB1)); + MMIO_D(PCH_DREF_CONTROL); + MMIO_D(PCH_RAWCLK_FREQ); + MMIO_D(PCH_DPLL_SEL); + MMIO_D(_MMIO(0x61208)); + MMIO_D(_MMIO(0x6120c)); + MMIO_D(PCH_PP_ON_DELAYS); + MMIO_D(PCH_PP_OFF_DELAYS); + MMIO_D(_MMIO(0xe651c)); + MMIO_D(_MMIO(0xe661c)); + MMIO_D(_MMIO(0xe671c)); + MMIO_D(_MMIO(0xe681c)); + MMIO_D(_MMIO(0xe6c04)); + MMIO_D(_MMIO(0xe6e1c)); + MMIO_D(PCH_PORT_HOTPLUG); + MMIO_D(LCPLL_CTL); + MMIO_D(FUSE_STRAP); + MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL); + MMIO_D(DISP_ARB_CTL); + MMIO_D(DISP_ARB_CTL2); + MMIO_D(ILK_DISPLAY_CHICKEN1); + MMIO_D(ILK_DISPLAY_CHICKEN2); + MMIO_D(ILK_DSPCLK_GATE_D); + MMIO_D(SOUTH_CHICKEN1); + MMIO_D(SOUTH_CHICKEN2); + MMIO_D(_MMIO(_TRANSA_CHICKEN1)); + MMIO_D(_MMIO(_TRANSB_CHICKEN1)); + MMIO_D(SOUTH_DSPCLK_GATE_D); + MMIO_D(_MMIO(_TRANSA_CHICKEN2)); + MMIO_D(_MMIO(_TRANSB_CHICKEN2)); + MMIO_D(ILK_DPFC_CB_BASE(INTEL_FBC_A)); + MMIO_D(ILK_DPFC_CONTROL(INTEL_FBC_A)); + MMIO_D(ILK_DPFC_RECOMP_CTL(INTEL_FBC_A)); + MMIO_D(ILK_DPFC_STATUS(INTEL_FBC_A)); + MMIO_D(ILK_DPFC_FENCE_YOFF(INTEL_FBC_A)); + MMIO_D(ILK_DPFC_CHICKEN(INTEL_FBC_A)); + MMIO_D(ILK_FBC_RT_BASE); + MMIO_D(IPS_CTL); + MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A)); + MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A)); + MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A)); + MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A)); + MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A)); + MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A)); + MMIO_D(PIPE_CSC_MODE(PIPE_A)); + MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A)); + MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A)); + MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A)); + MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A)); + MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A)); + MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A)); + MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B)); + MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B)); + MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B)); + MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B)); + MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B)); + MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B)); + MMIO_D(PIPE_CSC_MODE(PIPE_B)); + MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B)); + MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B)); + MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B)); + MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B)); + MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B)); + MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B)); + MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C)); + MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C)); + MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C)); + MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C)); + MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C)); + MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C)); + MMIO_D(PIPE_CSC_MODE(PIPE_C)); + MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C)); + MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C)); + MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C)); + MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C)); + MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C)); + MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C)); + MMIO_D(PREC_PAL_INDEX(PIPE_A)); + MMIO_D(PREC_PAL_DATA(PIPE_A)); + MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3); + MMIO_D(PREC_PAL_INDEX(PIPE_B)); + MMIO_D(PREC_PAL_DATA(PIPE_B)); + MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3); + MMIO_D(PREC_PAL_INDEX(PIPE_C)); + MMIO_D(PREC_PAL_DATA(PIPE_C)); + MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3); + MMIO_D(_MMIO(0x60110)); + MMIO_D(_MMIO(0x61110)); + MMIO_F(_MMIO(0x70400), 0x40); + MMIO_F(_MMIO(0x71400), 0x40); + MMIO_F(_MMIO(0x72400), 0x40); + MMIO_D(WM_LINETIME(PIPE_A)); + MMIO_D(WM_LINETIME(PIPE_B)); + MMIO_D(WM_LINETIME(PIPE_C)); + MMIO_D(SPLL_CTL); + MMIO_D(_MMIO(_WRPLL_CTL1)); + MMIO_D(_MMIO(_WRPLL_CTL2)); + MMIO_D(PORT_CLK_SEL(PORT_A)); + MMIO_D(PORT_CLK_SEL(PORT_B)); + MMIO_D(PORT_CLK_SEL(PORT_C)); + MMIO_D(PORT_CLK_SEL(PORT_D)); + MMIO_D(PORT_CLK_SEL(PORT_E)); + MMIO_D(TRANS_CLK_SEL(TRANSCODER_A)); + MMIO_D(TRANS_CLK_SEL(TRANSCODER_B)); + MMIO_D(TRANS_CLK_SEL(TRANSCODER_C)); + MMIO_D(HSW_NDE_RSTWRN_OPT); + MMIO_D(_MMIO(0x46508)); + MMIO_D(_MMIO(0x49080)); + MMIO_D(_MMIO(0x49180)); + MMIO_D(_MMIO(0x49280)); + MMIO_F(_MMIO(0x49090), 0x14); + MMIO_F(_MMIO(0x49190), 0x14); + MMIO_F(_MMIO(0x49290), 0x14); + MMIO_D(GAMMA_MODE(PIPE_A)); + MMIO_D(GAMMA_MODE(PIPE_B)); + MMIO_D(GAMMA_MODE(PIPE_C)); + MMIO_D(PIPE_MULT(PIPE_A)); + MMIO_D(PIPE_MULT(PIPE_B)); + MMIO_D(PIPE_MULT(PIPE_C)); + MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A)); + MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B)); + MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C)); + MMIO_D(SFUSE_STRAP); + MMIO_D(SBI_ADDR); + MMIO_D(SBI_DATA); + MMIO_D(SBI_CTL_STAT); + MMIO_D(PIXCLK_GATE); + MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4); + MMIO_D(DDI_BUF_CTL(PORT_A)); + MMIO_D(DDI_BUF_CTL(PORT_B)); + MMIO_D(DDI_BUF_CTL(PORT_C)); + MMIO_D(DDI_BUF_CTL(PORT_D)); + MMIO_D(DDI_BUF_CTL(PORT_E)); + MMIO_D(DP_TP_CTL(PORT_A)); + MMIO_D(DP_TP_CTL(PORT_B)); + MMIO_D(DP_TP_CTL(PORT_C)); + MMIO_D(DP_TP_CTL(PORT_D)); + MMIO_D(DP_TP_CTL(PORT_E)); + MMIO_D(DP_TP_STATUS(PORT_A)); + MMIO_D(DP_TP_STATUS(PORT_B)); + MMIO_D(DP_TP_STATUS(PORT_C)); + MMIO_D(DP_TP_STATUS(PORT_D)); + MMIO_D(DP_TP_STATUS(PORT_E)); + MMIO_F(_MMIO(_DDI_BUF_TRANS_A), 0x50); + MMIO_F(_MMIO(0x64e60), 0x50); + MMIO_F(_MMIO(0x64eC0), 0x50); + MMIO_F(_MMIO(0x64f20), 0x50); + MMIO_F(_MMIO(0x64f80), 0x50); + MMIO_D(HSW_AUD_CFG(PIPE_A)); + MMIO_D(HSW_AUD_PIN_ELD_CP_VLD); + MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A)); + MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_A)); + MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_B)); + MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_C)); + MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_EDP)); + MMIO_D(_MMIO(_TRANSA_MSA_MISC)); + MMIO_D(_MMIO(_TRANSB_MSA_MISC)); + MMIO_D(_MMIO(_TRANSC_MSA_MISC)); + MMIO_D(_MMIO(_TRANS_EDP_MSA_MISC)); + MMIO_D(FORCEWAKE); + MMIO_D(FORCEWAKE_ACK); + MMIO_D(GEN6_GT_CORE_STATUS); + MMIO_D(GEN6_GT_THREAD_STATUS_REG); + MMIO_D(GTFIFODBG); + MMIO_D(GTFIFOCTL); + MMIO_D(ECOBUS); + MMIO_D(GEN6_RC_CONTROL); + MMIO_D(GEN6_RC_STATE); + MMIO_D(GEN6_RPNSWREQ); + MMIO_D(GEN6_RC_VIDEO_FREQ); + MMIO_D(GEN6_RP_DOWN_TIMEOUT); + MMIO_D(GEN6_RP_INTERRUPT_LIMITS); + MMIO_D(GEN6_RPSTAT1); + MMIO_D(GEN6_RP_CONTROL); + MMIO_D(GEN6_RP_UP_THRESHOLD); + MMIO_D(GEN6_RP_DOWN_THRESHOLD); + MMIO_D(GEN6_RP_CUR_UP_EI); + MMIO_D(GEN6_RP_CUR_UP); + MMIO_D(GEN6_RP_PREV_UP); + MMIO_D(GEN6_RP_CUR_DOWN_EI); + MMIO_D(GEN6_RP_CUR_DOWN); + MMIO_D(GEN6_RP_PREV_DOWN); + MMIO_D(GEN6_RP_UP_EI); + MMIO_D(GEN6_RP_DOWN_EI); + MMIO_D(GEN6_RP_IDLE_HYSTERSIS); + MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT); + MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT); + MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT); + MMIO_D(GEN6_RC_EVALUATION_INTERVAL); + MMIO_D(GEN6_RC_IDLE_HYSTERSIS); + MMIO_D(GEN6_RC_SLEEP); + MMIO_D(GEN6_RC1e_THRESHOLD); + MMIO_D(GEN6_RC6_THRESHOLD); + MMIO_D(GEN6_RC6p_THRESHOLD); + MMIO_D(GEN6_RC6pp_THRESHOLD); + MMIO_D(GEN6_PMINTRMSK); + + MMIO_D(RSTDBYCTL); + MMIO_D(GEN6_GDRST); + MMIO_F(FENCE_REG_GEN6_LO(0), 0x80); + MMIO_D(CPU_VGACNTRL); + MMIO_D(TILECTL); + MMIO_D(GEN6_UCGCTL1); + MMIO_D(GEN6_UCGCTL2); + MMIO_F(_MMIO(0x4f000), 0x90); + MMIO_D(GEN6_PCODE_DATA); + MMIO_D(_MMIO(0x13812c)); + MMIO_D(GEN7_ERR_INT); + MMIO_D(HSW_EDRAM_CAP); + MMIO_D(HSW_IDICR); + MMIO_D(GFX_FLSH_CNTL_GEN6); + MMIO_D(_MMIO(0x3c)); + MMIO_D(_MMIO(0x860)); + MMIO_D(ECOSKPD(RENDER_RING_BASE)); + MMIO_D(_MMIO(0x121d0)); + MMIO_D(ECOSKPD(BLT_RING_BASE)); + MMIO_D(_MMIO(0x41d0)); + MMIO_D(GAC_ECO_BITS); + MMIO_D(_MMIO(0x6200)); + MMIO_D(_MMIO(0x6204)); + MMIO_D(_MMIO(0x6208)); + MMIO_D(_MMIO(0x7118)); + MMIO_D(_MMIO(0x7180)); + MMIO_D(_MMIO(0x7408)); + MMIO_D(_MMIO(0x7c00)); + MMIO_D(GEN6_MBCTL); + MMIO_D(_MMIO(0x911c)); + MMIO_D(_MMIO(0x9120)); + MMIO_D(GEN7_UCGCTL4); + MMIO_D(GAB_CTL); + MMIO_D(_MMIO(0x48800)); + MMIO_D(_MMIO(0xce044)); + MMIO_D(_MMIO(0xe6500)); + MMIO_D(_MMIO(0xe6504)); + MMIO_D(_MMIO(0xe6600)); + MMIO_D(_MMIO(0xe6604)); + MMIO_D(_MMIO(0xe6700)); + MMIO_D(_MMIO(0xe6704)); + MMIO_D(_MMIO(0xe6800)); + MMIO_D(_MMIO(0xe6804)); + MMIO_D(PCH_GMBUS4); + MMIO_D(PCH_GMBUS5); + MMIO_D(_MMIO(0x902c)); + MMIO_D(_MMIO(0xec008)); + MMIO_D(_MMIO(0xec00c)); + MMIO_D(_MMIO(0xec008 + 0x18)); + MMIO_D(_MMIO(0xec00c + 0x18)); + MMIO_D(_MMIO(0xec008 + 0x18 * 2)); + MMIO_D(_MMIO(0xec00c + 0x18 * 2)); + MMIO_D(_MMIO(0xec008 + 0x18 * 3)); + MMIO_D(_MMIO(0xec00c + 0x18 * 3)); + MMIO_D(_MMIO(0xec408)); + MMIO_D(_MMIO(0xec40c)); + MMIO_D(_MMIO(0xec408 + 0x18)); + MMIO_D(_MMIO(0xec40c + 0x18)); + MMIO_D(_MMIO(0xec408 + 0x18 * 2)); + MMIO_D(_MMIO(0xec40c + 0x18 * 2)); + MMIO_D(_MMIO(0xec408 + 0x18 * 3)); + MMIO_D(_MMIO(0xec40c + 0x18 * 3)); + MMIO_D(_MMIO(0xfc810)); + MMIO_D(_MMIO(0xfc81c)); + MMIO_D(_MMIO(0xfc828)); + MMIO_D(_MMIO(0xfc834)); + MMIO_D(_MMIO(0xfcc00)); + MMIO_D(_MMIO(0xfcc0c)); + MMIO_D(_MMIO(0xfcc18)); + MMIO_D(_MMIO(0xfcc24)); + MMIO_D(_MMIO(0xfd000)); + MMIO_D(_MMIO(0xfd00c)); + MMIO_D(_MMIO(0xfd018)); + MMIO_D(_MMIO(0xfd024)); + MMIO_D(_MMIO(0xfd034)); + MMIO_D(FPGA_DBG); + MMIO_D(_MMIO(0x2054)); + MMIO_D(_MMIO(0x12054)); + MMIO_D(_MMIO(0x22054)); + MMIO_D(_MMIO(0x1a054)); + MMIO_D(_MMIO(0x44070)); + MMIO_D(_MMIO(0x2178)); + MMIO_D(_MMIO(0x217c)); + MMIO_D(_MMIO(0x12178)); + MMIO_D(_MMIO(0x1217c)); + MMIO_F(_MMIO(0x5200), 32); + MMIO_F(_MMIO(0x5240), 32); + MMIO_F(_MMIO(0x5280), 16); + MMIO_D(BCS_SWCTRL); + MMIO_F(HS_INVOCATION_COUNT, 8); + MMIO_F(DS_INVOCATION_COUNT, 8); + MMIO_F(IA_VERTICES_COUNT, 8); + MMIO_F(IA_PRIMITIVES_COUNT, 8); + MMIO_F(VS_INVOCATION_COUNT, 8); + MMIO_F(GS_INVOCATION_COUNT, 8); + MMIO_F(GS_PRIMITIVES_COUNT, 8); + MMIO_F(CL_INVOCATION_COUNT, 8); + MMIO_F(CL_PRIMITIVES_COUNT, 8); + MMIO_F(PS_INVOCATION_COUNT, 8); + MMIO_F(PS_DEPTH_COUNT, 8); + MMIO_D(ARB_MODE); + MMIO_RING_D(RING_BBADDR); + MMIO_D(_MMIO(0x2220)); + MMIO_D(_MMIO(0x12220)); + MMIO_D(_MMIO(0x22220)); + MMIO_RING_D(RING_SYNC_1); + MMIO_RING_D(RING_SYNC_0); + MMIO_D(GUC_STATUS); + + MMIO_F(_MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000); + MMIO_F(_MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE); + MMIO_F(LGC_PALETTE(PIPE_A, 0), 1024); + MMIO_F(LGC_PALETTE(PIPE_B, 0), 1024); + MMIO_F(LGC_PALETTE(PIPE_C, 0), 1024); + + return 0; +} + +static int iterate_bdw_only_mmio(struct intel_gvt_mmio_table_iter *iter) +{ + MMIO_D(HSW_PWR_WELL_CTL1); + MMIO_D(HSW_PWR_WELL_CTL2); + MMIO_D(HSW_PWR_WELL_CTL3); + MMIO_D(HSW_PWR_WELL_CTL4); + MMIO_D(HSW_PWR_WELL_CTL5); + MMIO_D(HSW_PWR_WELL_CTL6); + + MMIO_D(WM_MISC); + MMIO_D(_MMIO(_SRD_CTL_EDP)); + + MMIO_D(_MMIO(0xb1f0)); + MMIO_D(_MMIO(0xb1c0)); + MMIO_D(_MMIO(0xb100)); + MMIO_D(_MMIO(0xb10c)); + MMIO_D(_MMIO(0xb110)); + MMIO_D(_MMIO(0x83a4)); + MMIO_D(_MMIO(0x8430)); + MMIO_D(_MMIO(0x2248)); + MMIO_D(FORCEWAKE_ACK_HSW); + + return 0; +} + +static int iterate_bdw_plus_mmio(struct intel_gvt_mmio_table_iter *iter) +{ + struct drm_i915_private *dev_priv = iter->i915; + + MMIO_D(GEN8_GT_IMR(0)); + MMIO_D(GEN8_GT_IER(0)); + MMIO_D(GEN8_GT_IIR(0)); + MMIO_D(GEN8_GT_ISR(0)); + MMIO_D(GEN8_GT_IMR(1)); + MMIO_D(GEN8_GT_IER(1)); + MMIO_D(GEN8_GT_IIR(1)); + MMIO_D(GEN8_GT_ISR(1)); + MMIO_D(GEN8_GT_IMR(2)); + MMIO_D(GEN8_GT_IER(2)); + MMIO_D(GEN8_GT_IIR(2)); + MMIO_D(GEN8_GT_ISR(2)); + MMIO_D(GEN8_GT_IMR(3)); + MMIO_D(GEN8_GT_IER(3)); + MMIO_D(GEN8_GT_IIR(3)); + MMIO_D(GEN8_GT_ISR(3)); + MMIO_D(GEN8_DE_PIPE_IMR(PIPE_A)); + MMIO_D(GEN8_DE_PIPE_IER(PIPE_A)); + MMIO_D(GEN8_DE_PIPE_IIR(PIPE_A)); + MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A)); + MMIO_D(GEN8_DE_PIPE_IMR(PIPE_B)); + MMIO_D(GEN8_DE_PIPE_IER(PIPE_B)); + MMIO_D(GEN8_DE_PIPE_IIR(PIPE_B)); + MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B)); + MMIO_D(GEN8_DE_PIPE_IMR(PIPE_C)); + MMIO_D(GEN8_DE_PIPE_IER(PIPE_C)); + MMIO_D(GEN8_DE_PIPE_IIR(PIPE_C)); + MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C)); + MMIO_D(GEN8_DE_PORT_IMR); + MMIO_D(GEN8_DE_PORT_IER); + MMIO_D(GEN8_DE_PORT_IIR); + MMIO_D(GEN8_DE_PORT_ISR); + MMIO_D(GEN8_DE_MISC_IMR); + MMIO_D(GEN8_DE_MISC_IER); + MMIO_D(GEN8_DE_MISC_IIR); + MMIO_D(GEN8_DE_MISC_ISR); + MMIO_D(GEN8_PCU_IMR); + MMIO_D(GEN8_PCU_IER); + MMIO_D(GEN8_PCU_IIR); + MMIO_D(GEN8_PCU_ISR); + MMIO_D(GEN8_MASTER_IRQ); + MMIO_RING_D(RING_ACTHD_UDW); + +#define RING_REG(base) _MMIO((base) + 0xd0) + MMIO_RING_D(RING_REG); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x230) + MMIO_RING_D(RING_REG); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x234) + MMIO_RING_F(RING_REG, 8); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x244) + MMIO_RING_D(RING_REG); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x370) + MMIO_RING_F(RING_REG, 48); +#undef RING_REG + +#define RING_REG(base) _MMIO((base) + 0x3a0) + MMIO_RING_D(RING_REG); +#undef RING_REG + + MMIO_D(PIPEMISC(PIPE_A)); + MMIO_D(PIPEMISC(PIPE_B)); + MMIO_D(PIPEMISC(PIPE_C)); + MMIO_D(_MMIO(0x1c1d0)); + MMIO_D(GEN6_MBCUNIT_SNPCR); + MMIO_D(GEN7_MISCCPCTL); + MMIO_D(_MMIO(0x1c054)); + MMIO_D(GEN6_PCODE_MAILBOX); + if (!IS_BROXTON(dev_priv)) + MMIO_D(GEN8_PRIVATE_PAT_LO); + MMIO_D(GEN8_PRIVATE_PAT_HI); + MMIO_D(GAMTARBMODE); + +#define RING_REG(base) _MMIO((base) + 0x270) + MMIO_RING_F(RING_REG, 32); +#undef RING_REG + + MMIO_RING_D(RING_HWS_PGA); + MMIO_D(HDC_CHICKEN0); + MMIO_D(CHICKEN_PIPESL_1(PIPE_A)); + MMIO_D(CHICKEN_PIPESL_1(PIPE_B)); + MMIO_D(CHICKEN_PIPESL_1(PIPE_C)); + MMIO_D(_MMIO(0x6671c)); + MMIO_D(_MMIO(0x66c00)); + MMIO_D(_MMIO(0x66c04)); + MMIO_D(HSW_GTT_CACHE_EN); + MMIO_D(GEN8_EU_DISABLE0); + MMIO_D(GEN8_EU_DISABLE1); + MMIO_D(GEN8_EU_DISABLE2); + MMIO_D(_MMIO(0xfdc)); + MMIO_D(GEN8_ROW_CHICKEN); + MMIO_D(GEN7_ROW_CHICKEN2); + MMIO_D(GEN8_UCGCTL6); + MMIO_D(GEN8_L3SQCREG4); + MMIO_D(GEN9_SCRATCH_LNCF1); + MMIO_F(_MMIO(0x24d0), 48); + MMIO_D(_MMIO(0x44484)); + MMIO_D(_MMIO(0x4448c)); + MMIO_D(GEN8_L3_LRA_1_GPGPU); + MMIO_D(_MMIO(0x110000)); + MMIO_D(_MMIO(0x48400)); + MMIO_D(_MMIO(0x6e570)); + MMIO_D(_MMIO(0x65f10)); + MMIO_D(_MMIO(0xe194)); + MMIO_D(_MMIO(0xe188)); + MMIO_D(HALF_SLICE_CHICKEN2); + MMIO_D(_MMIO(0x2580)); + MMIO_D(_MMIO(0xe220)); + MMIO_D(_MMIO(0xe230)); + MMIO_D(_MMIO(0xe240)); + MMIO_D(_MMIO(0xe260)); + MMIO_D(_MMIO(0xe270)); + MMIO_D(_MMIO(0xe280)); + MMIO_D(_MMIO(0xe2a0)); + MMIO_D(_MMIO(0xe2b0)); + MMIO_D(_MMIO(0xe2c0)); + MMIO_D(_MMIO(0x21f0)); + MMIO_D(GEN8_GAMW_ECO_DEV_RW_IA); + MMIO_D(_MMIO(0x215c)); + MMIO_F(_MMIO(0x2290), 8); + MMIO_D(_MMIO(0x2b00)); + MMIO_D(_MMIO(0x2360)); + MMIO_D(_MMIO(0x1c17c)); + MMIO_D(_MMIO(0x1c178)); + MMIO_D(_MMIO(0x4260)); + MMIO_D(_MMIO(0x4264)); + MMIO_D(_MMIO(0x4268)); + MMIO_D(_MMIO(0x426c)); + MMIO_D(_MMIO(0x4270)); + MMIO_D(_MMIO(0x4094)); + MMIO_D(_MMIO(0x22178)); + MMIO_D(_MMIO(0x1a178)); + MMIO_D(_MMIO(0x1a17c)); + MMIO_D(_MMIO(0x2217c)); + MMIO_D(EDP_PSR_IMR); + MMIO_D(EDP_PSR_IIR); + MMIO_D(_MMIO(0xe4cc)); + MMIO_D(GEN7_SC_INSTDONE); + + return 0; +} + +static int iterate_pre_skl_mmio(struct intel_gvt_mmio_table_iter *iter) +{ + MMIO_D(FORCEWAKE_MT); + + MMIO_D(PCH_ADPA); + MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4); + MMIO_F(_MMIO(_PCH_DPC_AUX_CH_CTL), 6 * 4); + MMIO_F(_MMIO(_PCH_DPD_AUX_CH_CTL), 6 * 4); + + MMIO_F(_MMIO(0x70440), 0xc); + MMIO_F(_MMIO(0x71440), 0xc); + MMIO_F(_MMIO(0x72440), 0xc); + MMIO_F(_MMIO(0x7044c), 0xc); + MMIO_F(_MMIO(0x7144c), 0xc); + MMIO_F(_MMIO(0x7244c), 0xc); + + return 0; +} + +static int iterate_skl_plus_mmio(struct intel_gvt_mmio_table_iter *iter) +{ + struct drm_i915_private *dev_priv = iter->i915; + + MMIO_D(FORCEWAKE_RENDER_GEN9); + MMIO_D(FORCEWAKE_ACK_RENDER_GEN9); + MMIO_D(FORCEWAKE_GT_GEN9); + MMIO_D(FORCEWAKE_ACK_GT_GEN9); + MMIO_D(FORCEWAKE_MEDIA_GEN9); + MMIO_D(FORCEWAKE_ACK_MEDIA_GEN9); + MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4); + MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4); + MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4); + MMIO_D(HSW_PWR_WELL_CTL1); + MMIO_D(HSW_PWR_WELL_CTL2); + MMIO_D(DBUF_CTL_S(0)); + MMIO_D(GEN9_PG_ENABLE); + MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS); + MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS); + MMIO_D(GEN9_GAMT_ECO_REG_RW_IA); + MMIO_D(MMCD_MISC_CTRL); + MMIO_D(CHICKEN_PAR1_1); + MMIO_D(DC_STATE_EN); + MMIO_D(DC_STATE_DEBUG); + MMIO_D(CDCLK_CTL); + MMIO_D(LCPLL1_CTL); + MMIO_D(LCPLL2_CTL); + MMIO_D(_MMIO(_DPLL1_CFGCR1)); + MMIO_D(_MMIO(_DPLL2_CFGCR1)); + MMIO_D(_MMIO(_DPLL3_CFGCR1)); + MMIO_D(_MMIO(_DPLL1_CFGCR2)); + MMIO_D(_MMIO(_DPLL2_CFGCR2)); + MMIO_D(_MMIO(_DPLL3_CFGCR2)); + MMIO_D(DPLL_CTRL1); + MMIO_D(DPLL_CTRL2); + MMIO_D(DPLL_STATUS); + MMIO_D(SKL_PS_WIN_POS(PIPE_A, 0)); + MMIO_D(SKL_PS_WIN_POS(PIPE_A, 1)); + MMIO_D(SKL_PS_WIN_POS(PIPE_B, 0)); + MMIO_D(SKL_PS_WIN_POS(PIPE_B, 1)); + MMIO_D(SKL_PS_WIN_POS(PIPE_C, 0)); + MMIO_D(SKL_PS_WIN_POS(PIPE_C, 1)); + MMIO_D(SKL_PS_WIN_SZ(PIPE_A, 0)); + MMIO_D(SKL_PS_WIN_SZ(PIPE_A, 1)); + MMIO_D(SKL_PS_WIN_SZ(PIPE_B, 0)); + MMIO_D(SKL_PS_WIN_SZ(PIPE_B, 1)); + MMIO_D(SKL_PS_WIN_SZ(PIPE_C, 0)); + MMIO_D(SKL_PS_WIN_SZ(PIPE_C, 1)); + MMIO_D(SKL_PS_CTRL(PIPE_A, 0)); + MMIO_D(SKL_PS_CTRL(PIPE_A, 1)); + MMIO_D(SKL_PS_CTRL(PIPE_B, 0)); + MMIO_D(SKL_PS_CTRL(PIPE_B, 1)); + MMIO_D(SKL_PS_CTRL(PIPE_C, 0)); + MMIO_D(SKL_PS_CTRL(PIPE_C, 1)); + MMIO_D(PLANE_BUF_CFG(PIPE_A, 0)); + MMIO_D(PLANE_BUF_CFG(PIPE_A, 1)); + MMIO_D(PLANE_BUF_CFG(PIPE_A, 2)); + MMIO_D(PLANE_BUF_CFG(PIPE_A, 3)); + MMIO_D(PLANE_BUF_CFG(PIPE_B, 0)); + MMIO_D(PLANE_BUF_CFG(PIPE_B, 1)); + MMIO_D(PLANE_BUF_CFG(PIPE_B, 2)); + MMIO_D(PLANE_BUF_CFG(PIPE_B, 3)); + MMIO_D(PLANE_BUF_CFG(PIPE_C, 0)); + MMIO_D(PLANE_BUF_CFG(PIPE_C, 1)); + MMIO_D(PLANE_BUF_CFG(PIPE_C, 2)); + MMIO_D(PLANE_BUF_CFG(PIPE_C, 3)); + MMIO_D(CUR_BUF_CFG(PIPE_A)); + MMIO_D(CUR_BUF_CFG(PIPE_B)); + MMIO_D(CUR_BUF_CFG(PIPE_C)); + MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8); + MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8); + MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8); + MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8); + MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8); + MMIO_D(PLANE_WM_TRANS(PIPE_A, 0)); + MMIO_D(PLANE_WM_TRANS(PIPE_A, 1)); + MMIO_D(PLANE_WM_TRANS(PIPE_A, 2)); + MMIO_D(PLANE_WM_TRANS(PIPE_B, 0)); + MMIO_D(PLANE_WM_TRANS(PIPE_B, 1)); + MMIO_D(PLANE_WM_TRANS(PIPE_B, 2)); + MMIO_D(PLANE_WM_TRANS(PIPE_C, 0)); + MMIO_D(PLANE_WM_TRANS(PIPE_C, 1)); + MMIO_D(PLANE_WM_TRANS(PIPE_C, 2)); + MMIO_D(CUR_WM_TRANS(PIPE_A)); + MMIO_D(CUR_WM_TRANS(PIPE_B)); + MMIO_D(CUR_WM_TRANS(PIPE_C)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 0)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 1)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 2)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 3)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 0)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 1)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 2)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 3)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 0)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 1)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 2)); + MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 3)); + MMIO_D(_MMIO(_REG_701C0(PIPE_A, 1))); + MMIO_D(_MMIO(_REG_701C0(PIPE_A, 2))); + MMIO_D(_MMIO(_REG_701C0(PIPE_A, 3))); + MMIO_D(_MMIO(_REG_701C0(PIPE_A, 4))); + MMIO_D(_MMIO(_REG_701C0(PIPE_B, 1))); + MMIO_D(_MMIO(_REG_701C0(PIPE_B, 2))); + MMIO_D(_MMIO(_REG_701C0(PIPE_B, 3))); + MMIO_D(_MMIO(_REG_701C0(PIPE_B, 4))); + MMIO_D(_MMIO(_REG_701C0(PIPE_C, 1))); + MMIO_D(_MMIO(_REG_701C0(PIPE_C, 2))); + MMIO_D(_MMIO(_REG_701C0(PIPE_C, 3))); + MMIO_D(_MMIO(_REG_701C0(PIPE_C, 4))); + MMIO_D(_MMIO(_REG_701C4(PIPE_A, 1))); + MMIO_D(_MMIO(_REG_701C4(PIPE_A, 2))); + MMIO_D(_MMIO(_REG_701C4(PIPE_A, 3))); + MMIO_D(_MMIO(_REG_701C4(PIPE_A, 4))); + MMIO_D(_MMIO(_REG_701C4(PIPE_B, 1))); + MMIO_D(_MMIO(_REG_701C4(PIPE_B, 2))); + MMIO_D(_MMIO(_REG_701C4(PIPE_B, 3))); + MMIO_D(_MMIO(_REG_701C4(PIPE_B, 4))); + MMIO_D(_MMIO(_REG_701C4(PIPE_C, 1))); + MMIO_D(_MMIO(_REG_701C4(PIPE_C, 2))); + MMIO_D(_MMIO(_REG_701C4(PIPE_C, 3))); + MMIO_D(_MMIO(_REG_701C4(PIPE_C, 4))); + MMIO_D(_MMIO(_PLANE_CTL_3_A)); + MMIO_D(_MMIO(_PLANE_CTL_3_B)); + MMIO_D(_MMIO(0x72380)); + MMIO_D(_MMIO(0x7239c)); + MMIO_D(_MMIO(_PLANE_SURF_3_A)); + MMIO_D(_MMIO(_PLANE_SURF_3_B)); + MMIO_D(DMC_SSP_BASE); + MMIO_D(DMC_HTP_SKL); + MMIO_D(DMC_LAST_WRITE); + MMIO_D(BDW_SCRATCH1); + MMIO_D(SKL_DFSM); + MMIO_D(DISPIO_CR_TX_BMU_CR0); + MMIO_F(GEN9_GFX_MOCS(0), 0x7f8); + MMIO_F(GEN7_L3CNTLREG2, 0x80); + MMIO_D(RPM_CONFIG0); + MMIO_D(_MMIO(0xd08)); + MMIO_D(RC6_LOCATION); + MMIO_D(GEN7_FF_SLICE_CS_CHICKEN1); + MMIO_D(GEN9_CS_DEBUG_MODE1); + /* TRTT */ + MMIO_D(TRVATTL3PTRDW(0)); + MMIO_D(TRVATTL3PTRDW(1)); + MMIO_D(TRVATTL3PTRDW(2)); + MMIO_D(TRVATTL3PTRDW(3)); + MMIO_D(TRVADR); + MMIO_D(TRTTE); + MMIO_D(_MMIO(0x4dfc)); + MMIO_D(_MMIO(0x46430)); + MMIO_D(_MMIO(0x46520)); + MMIO_D(_MMIO(0xc403c)); + MMIO_D(GEN8_GARBCNTL); + MMIO_D(DMA_CTRL); + MMIO_D(_MMIO(0x65900)); + MMIO_D(GEN6_STOLEN_RESERVED); + MMIO_D(_MMIO(0x4068)); + MMIO_D(_MMIO(0x67054)); + MMIO_D(_MMIO(0x6e560)); + MMIO_D(_MMIO(0x6e554)); + MMIO_D(_MMIO(0x2b20)); + MMIO_D(_MMIO(0x65f00)); + MMIO_D(_MMIO(0x65f08)); + MMIO_D(_MMIO(0x320f0)); + MMIO_D(_MMIO(0x70034)); + MMIO_D(_MMIO(0x71034)); + MMIO_D(_MMIO(0x72034)); + MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A))); + MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B))); + MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C))); + MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A))); + MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B))); + MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C))); + MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A))); + MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B))); + MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C))); + MMIO_D(_MMIO(0x44500)); +#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4) + MMIO_RING_D(CSFE_CHICKEN1_REG); +#undef CSFE_CHICKEN1_REG + MMIO_D(GEN8_HDC_CHICKEN1); + MMIO_D(GEN9_WM_CHICKEN3); + + if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) + MMIO_D(GAMT_CHKN_BIT_REG); + if (!IS_BROXTON(dev_priv)) + MMIO_D(GEN9_CTX_PREEMPT_REG); + MMIO_F(_MMIO(DMC_MMIO_START_RANGE), 0x3000); + return 0; +} + +static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter) +{ + struct drm_i915_private *dev_priv = iter->i915; + + MMIO_F(_MMIO(0x80000), 0x3000); + MMIO_D(GEN7_SAMPLER_INSTDONE); + MMIO_D(GEN7_ROW_INSTDONE); + MMIO_D(GEN8_FAULT_TLB_DATA0); + MMIO_D(GEN8_FAULT_TLB_DATA1); + MMIO_D(ERROR_GEN6); + MMIO_D(DONE_REG); + MMIO_D(EIR); + MMIO_D(PGTBL_ER); + MMIO_D(_MMIO(0x4194)); + MMIO_D(_MMIO(0x4294)); + MMIO_D(_MMIO(0x4494)); + MMIO_RING_D(RING_PSMI_CTL); + MMIO_RING_D(RING_DMA_FADD); + MMIO_RING_D(RING_DMA_FADD_UDW); + MMIO_RING_D(RING_IPEHR); + MMIO_RING_D(RING_INSTPS); + MMIO_RING_D(RING_BBADDR_UDW); + MMIO_RING_D(RING_BBSTATE); + MMIO_RING_D(RING_IPEIR); + MMIO_F(SOFT_SCRATCH(0), 16 * 4); + MMIO_D(BXT_P_CR_GT_DISP_PWRON); + MMIO_D(BXT_RP_STATE_CAP); + MMIO_D(BXT_PHY_CTL_FAMILY(DPIO_PHY0)); + MMIO_D(BXT_PHY_CTL_FAMILY(DPIO_PHY1)); + MMIO_D(BXT_PHY_CTL(PORT_A)); + MMIO_D(BXT_PHY_CTL(PORT_B)); + MMIO_D(BXT_PHY_CTL(PORT_C)); + MMIO_D(BXT_PORT_PLL_ENABLE(PORT_A)); + MMIO_D(BXT_PORT_PLL_ENABLE(PORT_B)); + MMIO_D(BXT_PORT_PLL_ENABLE(PORT_C)); + MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0)); + MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0)); + MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0)); + MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0)); + MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0)); + MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0)); + MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0)); + MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0)); + MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0)); + MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1)); + MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1)); + MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1)); + MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1)); + MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1)); + MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1)); + MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1)); + MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1)); + MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1)); + MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10)); + MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10)); + MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2)); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9)); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10)); + MMIO_D(BXT_DE_PLL_CTL); + MMIO_D(BXT_DE_PLL_ENABLE); + MMIO_D(BXT_DSI_PLL_CTL); + MMIO_D(BXT_DSI_PLL_ENABLE); + MMIO_D(GEN9_CLKGATE_DIS_0); + MMIO_D(GEN9_CLKGATE_DIS_4); + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A)); + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B)); + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C)); + MMIO_D(RC6_CTX_BASE); + MMIO_D(GEN8_PUSHBUS_CONTROL); + MMIO_D(GEN8_PUSHBUS_ENABLE); + MMIO_D(GEN8_PUSHBUS_SHIFT); + MMIO_D(GEN6_GFXPAUSE); + MMIO_D(GEN8_L3SQCREG1); + MMIO_D(GEN8_L3CNTLREG); + MMIO_D(_MMIO(0x20D8)); + MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40); + MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40); + MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40); + MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40); + MMIO_D(GEN9_CTX_PREEMPT_REG); + MMIO_D(GEN8_PRIVATE_PAT_LO); + + return 0; +} + +/** + * intel_gvt_iterate_mmio_table - Iterate the GVT MMIO table + * @iter: the interator + * + * This function is called for iterating the GVT MMIO table when i915 is + * taking the snapshot of the HW and GVT is building MMIO tracking table. + */ +int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter) +{ + struct drm_i915_private *i915 = iter->i915; + int ret; + + ret = iterate_generic_mmio(iter); + if (ret) + goto err; + + if (IS_BROADWELL(i915)) { + ret = iterate_bdw_only_mmio(iter); + if (ret) + goto err; + ret = iterate_bdw_plus_mmio(iter); + if (ret) + goto err; + ret = iterate_pre_skl_mmio(iter); + if (ret) + goto err; + } else if (IS_SKYLAKE(i915) || + IS_KABYLAKE(i915) || + IS_COFFEELAKE(i915) || + IS_COMETLAKE(i915)) { + ret = iterate_bdw_plus_mmio(iter); + if (ret) + goto err; + ret = iterate_skl_plus_mmio(iter); + if (ret) + goto err; + } else if (IS_BROXTON(i915)) { + ret = iterate_bdw_plus_mmio(iter); + if (ret) + goto err; + ret = iterate_skl_plus_mmio(iter); + if (ret) + goto err; + ret = iterate_bxt_mmio(iter); + if (ret) + goto err; + } + + return 0; +err: + return ret; +} +EXPORT_SYMBOL_NS_GPL(intel_gvt_iterate_mmio_table, I915_GVT); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 594ab59e4991..ee0047fdc95d 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5475,6 +5475,25 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, } blocks = fixed16_to_u32_round_up(selected_result) + 1; + /* + * Lets have blocks at minimum equivalent to plane_blocks_per_line + * as there will be at minimum one line for lines configuration. This + * is a work around for FIFO underruns observed with resolutions like + * 4k 60 Hz in single channel DRAM configurations. + * + * As per the Bspec 49325, if the ddb allocation can hold at least + * one plane_blocks_per_line, we should have selected method2 in + * the above logic. Assuming that modern versions have enough dbuf + * and method2 guarantees blocks equivalent to at least 1 line, + * select the blocks as plane_blocks_per_line. + * + * TODO: Revisit the logic when we have better understanding on DRAM + * channels' impact on the level 0 memory latency and the relevant + * wm calculations. + */ + if (skl_wm_has_lines(dev_priv, level)) + blocks = max(blocks, + fixed16_to_u32_round_up(wp->plane_blocks_per_line)); lines = div_round_up_fixed16(selected_result, wp->plane_blocks_per_line); |