diff options
author | Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 2015-04-27 16:32:45 +0300 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 2015-04-27 16:32:45 +0300 |
commit | b3e5ced63e051e8f911b795ac5b06229a5328f7b (patch) | |
tree | e63badb76509839ec948431859255923b6e2a09c /drivers/gpu/drm/i915/i915_debugfs.c | |
parent | e183201b9e917daf2530b637b2f34f1d5afb934d (diff) | |
parent | b787f68c36d49bb1d9236f403813641efa74a031 (diff) | |
download | linux-b3e5ced63e051e8f911b795ac5b06229a5328f7b.tar.xz |
Merge tag 'v4.1-rc1' into patchwork
Linux 4.1-rc1
* tag 'v4.1-rc1': (11651 commits)
Linux 4.1-rc1
x86_64, asm: Work around AMD SYSRET SS descriptor attribute issue
v4l: xilinx: fix for include file movement
platform/chrome: chromeos_laptop - instantiate Atmel at primary address
RCU pathwalk breakage when running into a symlink overmounting something
fix I_DIO_WAKEUP definition
direct-io: only inc/dec inode->i_dio_count for file systems
fs/9p: fix readdir()
Btrfs: prevent list corruption during free space cache processing
toshiba_acpi: Do not register vendor backlight when acpi_video bl is available
x86: fix special __probe_kernel_write() tail zeroing case
crypto: img-hash - CRYPTO_DEV_IMGTEC_HASH should depend on HAS_DMA
crypto: x86/sha512_ssse3 - fixup for asm function prototype change
nios2: rework cache
nios2: Add types.h header required for __u32 type
ALSA: hda - fix headset mic detection problem for one more machine
eth: bf609 eth clock: add pclk clock for stmmac driver probe
blackfin: Wire up missing syscalls
Btrfs: fix inode cache writeout
ACPI / scan: Add a scan handler for PRP0001
...
Diffstat (limited to 'drivers/gpu/drm/i915/i915_debugfs.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 356 |
1 files changed, 280 insertions, 76 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 96e811fe24ca..007c7d7d8295 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -139,10 +139,11 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); if (obj->base.name) seq_printf(m, " (name: %d)", obj->base.name); - list_for_each_entry(vma, &obj->vma_list, vma_link) + list_for_each_entry(vma, &obj->vma_list, vma_link) { if (vma->pin_count > 0) pin_count++; - seq_printf(m, " (pinned x %d)", pin_count); + } + seq_printf(m, " (pinned x %d)", pin_count); if (obj->pin_display) seq_printf(m, " (display)"); if (obj->fence_reg != I915_FENCE_REG_NONE) @@ -152,12 +153,12 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_puts(m, " (pp"); else seq_puts(m, " (g"); - seq_printf(m, "gtt offset: %08lx, size: %08lx, type: %u)", + seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)", vma->node.start, vma->node.size, vma->ggtt_view.type); } if (obj->stolen) - seq_printf(m, " (stolen: %08lx)", obj->stolen->start); + seq_printf(m, " (stolen: %08llx)", obj->stolen->start); if (obj->pin_mappable || obj->fault_mappable) { char s[3], *t = s; if (obj->pin_mappable) @@ -580,7 +581,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", work->flip_queued_vblank, work->flip_ready_vblank, - drm_vblank_count(dev, crtc->pipe)); + drm_crtc_vblank_count(&crtc->base)); if (work->enable_stall_check) seq_puts(m, "Stall check enabled, "); else @@ -1089,7 +1090,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "Current P-state: %d\n", (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) || - IS_BROADWELL(dev)) { + IS_BROADWELL(dev) || IS_GEN9(dev)) { u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); @@ -1108,11 +1109,15 @@ static int i915_frequency_info(struct seq_file *m, void *unused) intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); reqf = I915_READ(GEN6_RPNSWREQ); - reqf &= ~GEN6_TURBO_DISABLE; - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) - reqf >>= 24; - else - reqf >>= 25; + if (IS_GEN9(dev)) + reqf >>= 23; + else { + reqf &= ~GEN6_TURBO_DISABLE; + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + reqf >>= 24; + else + reqf >>= 25; + } reqf = intel_gpu_freq(dev_priv, reqf); rpmodectl = I915_READ(GEN6_RP_CONTROL); @@ -1126,7 +1131,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused) rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + if (IS_GEN9(dev)) + cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; + else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; else cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; @@ -1152,7 +1159,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); seq_printf(m, "Render p-state ratio: %d\n", - (gt_perf_status & 0xff00) >> 8); + (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8); seq_printf(m, "Render p-state VID: %d\n", gt_perf_status & 0xff); seq_printf(m, "Render p-state limit: %d\n", @@ -1177,19 +1184,25 @@ static int i915_frequency_info(struct seq_file *m, void *unused) GEN6_CURBSYTAVG_MASK); max_freq = (rp_state_cap & 0xff0000) >> 16; + max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", intel_gpu_freq(dev_priv, max_freq)); max_freq = (rp_state_cap & 0xff00) >> 8; + max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", intel_gpu_freq(dev_priv, max_freq)); max_freq = rp_state_cap & 0xff; + max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", intel_gpu_freq(dev_priv, max_freq)); seq_printf(m, "Max overclocked frequency: %dMHz\n", intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); + + seq_printf(m, "Idle freq: %d MHz\n", + intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); } else if (IS_VALLEYVIEW(dev)) { u32 freq_sts; @@ -1204,6 +1217,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "min GPU freq: %d MHz\n", intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); + seq_printf(m, "idle GPU freq: %d MHz\n", + intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); + seq_printf(m, "efficient (RPe) frequency: %d MHz\n", intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); @@ -1778,11 +1794,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) ifbdev = dev_priv->fbdev; fb = to_intel_framebuffer(ifbdev->helper.fb); - seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ", + seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", fb->base.width, fb->base.height, fb->base.depth, fb->base.bits_per_pixel, + fb->base.modifier[0], atomic_read(&fb->base.refcount.refcount)); describe_obj(m, fb->obj); seq_putc(m, '\n'); @@ -1793,11 +1810,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) if (ifbdev && &fb->base == ifbdev->helper.fb) continue; - seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ", + seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", fb->base.width, fb->base.height, fb->base.depth, fb->base.bits_per_pixel, + fb->base.modifier[0], atomic_read(&fb->base.refcount.refcount)); describe_obj(m, fb->obj); seq_putc(m, '\n'); @@ -1828,18 +1846,6 @@ static int i915_context_status(struct seq_file *m, void *unused) if (ret) return ret; - if (dev_priv->ips.pwrctx) { - seq_puts(m, "power context "); - describe_obj(m, dev_priv->ips.pwrctx); - seq_putc(m, '\n'); - } - - if (dev_priv->ips.renderctx) { - seq_puts(m, "render context "); - describe_obj(m, dev_priv->ips.renderctx); - seq_putc(m, '\n'); - } - list_for_each_entry(ctx, &dev_priv->context_list, link) { if (!i915.enable_execlists && ctx->legacy_hw_ctx.rcs_state == NULL) @@ -2183,7 +2189,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; seq_puts(m, "aliasing PPGTT:\n"); - seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); + seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset); ppgtt->debug_dump(ppgtt, m); } @@ -2243,6 +2249,11 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) enum pipe pipe; bool enabled = false; + if (!HAS_PSR(dev)) { + seq_puts(m, "PSR not supported\n"); + return 0; + } + intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->psr.lock); @@ -2255,17 +2266,15 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) seq_printf(m, "Re-enable work scheduled: %s\n", yesno(work_busy(&dev_priv->psr.work.work))); - if (HAS_PSR(dev)) { - if (HAS_DDI(dev)) - enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; - else { - for_each_pipe(dev_priv, pipe) { - stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & - VLV_EDP_PSR_CURR_STATE_MASK; - if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || - (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) - enabled = true; - } + if (HAS_DDI(dev)) + enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; + else { + for_each_pipe(dev_priv, pipe) { + stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & + VLV_EDP_PSR_CURR_STATE_MASK; + if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || + (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) + enabled = true; } } seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); @@ -2282,7 +2291,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) yesno((bool)dev_priv->psr.link_standby)); /* CHV PSR has no kind of performance counter */ - if (HAS_PSR(dev) && HAS_DDI(dev)) { + if (HAS_DDI(dev)) { psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & EDP_PSR_PERF_CNT_MASK; @@ -2305,8 +2314,7 @@ static int i915_sink_crc(struct seq_file *m, void *data) u8 crc[6]; drm_modeset_lock_all(dev); - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { if (connector->base.dpms != DRM_MODE_DPMS_ON) continue; @@ -2674,7 +2682,8 @@ static int i915_display_info(struct seq_file *m, void *unused) active = cursor_position(dev, crtc->pipe, &x, &y); seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n", yesno(crtc->cursor_base), - x, y, crtc->cursor_width, crtc->cursor_height, + x, y, crtc->base.cursor->state->crtc_w, + crtc->base.cursor->state->crtc_h, crtc->cursor_addr, yesno(active)); } @@ -2850,7 +2859,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused) for_each_pipe(dev_priv, pipe) { seq_printf(m, "Pipe %c\n", pipe_name(pipe)); - for_each_plane(pipe, plane) { + for_each_plane(dev_priv, pipe, plane) { entry = &ddb->plane[pipe][plane]; seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, entry->start, entry->end, @@ -2867,6 +2876,115 @@ static int i915_ddb_info(struct seq_file *m, void *unused) return 0; } +static void drrs_status_per_crtc(struct seq_file *m, + struct drm_device *dev, struct intel_crtc *intel_crtc) +{ + struct intel_encoder *intel_encoder; + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_drrs *drrs = &dev_priv->drrs; + int vrefresh = 0; + + for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) { + /* Encoder connected on this CRTC */ + switch (intel_encoder->type) { + case INTEL_OUTPUT_EDP: + seq_puts(m, "eDP:\n"); + break; + case INTEL_OUTPUT_DSI: + seq_puts(m, "DSI:\n"); + break; + case INTEL_OUTPUT_HDMI: + seq_puts(m, "HDMI:\n"); + break; + case INTEL_OUTPUT_DISPLAYPORT: + seq_puts(m, "DP:\n"); + break; + default: + seq_printf(m, "Other encoder (id=%d).\n", + intel_encoder->type); + return; + } + } + + if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT) + seq_puts(m, "\tVBT: DRRS_type: Static"); + else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT) + seq_puts(m, "\tVBT: DRRS_type: Seamless"); + else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED) + seq_puts(m, "\tVBT: DRRS_type: None"); + else + seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value"); + + seq_puts(m, "\n\n"); + + if (intel_crtc->config->has_drrs) { + struct intel_panel *panel; + + mutex_lock(&drrs->mutex); + /* DRRS Supported */ + seq_puts(m, "\tDRRS Supported: Yes\n"); + + /* disable_drrs() will make drrs->dp NULL */ + if (!drrs->dp) { + seq_puts(m, "Idleness DRRS: Disabled"); + mutex_unlock(&drrs->mutex); + return; + } + + panel = &drrs->dp->attached_connector->panel; + seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", + drrs->busy_frontbuffer_bits); + + seq_puts(m, "\n\t\t"); + if (drrs->refresh_rate_type == DRRS_HIGH_RR) { + seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); + vrefresh = panel->fixed_mode->vrefresh; + } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { + seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); + vrefresh = panel->downclock_mode->vrefresh; + } else { + seq_printf(m, "DRRS_State: Unknown(%d)\n", + drrs->refresh_rate_type); + mutex_unlock(&drrs->mutex); + return; + } + seq_printf(m, "\t\tVrefresh: %d", vrefresh); + + seq_puts(m, "\n\t\t"); + mutex_unlock(&drrs->mutex); + } else { + /* DRRS not supported. Print the VBT parameter*/ + seq_puts(m, "\tDRRS Supported : No"); + } + seq_puts(m, "\n"); +} + +static int i915_drrs_status(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = m->private; + struct drm_device *dev = node->minor->dev; + struct intel_crtc *intel_crtc; + int active_crtc_cnt = 0; + + for_each_intel_crtc(dev, intel_crtc) { + drm_modeset_lock(&intel_crtc->base.mutex, NULL); + + if (intel_crtc->active) { + active_crtc_cnt++; + seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); + + drrs_status_per_crtc(m, dev, intel_crtc); + } + + drm_modeset_unlock(&intel_crtc->base.mutex); + } + + if (!active_crtc_cnt) + seq_puts(m, "No active crtc found\n"); + + return 0; +} + struct pipe_crc_info { const char *name; struct drm_device *dev; @@ -4189,7 +4307,7 @@ i915_max_freq_set(void *data, u64 val) { struct drm_device *dev = data; struct drm_i915_private *dev_priv = dev->dev_private; - u32 rp_state_cap, hw_max, hw_min; + u32 hw_max, hw_min; int ret; if (INTEL_INFO(dev)->gen < 6) @@ -4206,18 +4324,10 @@ i915_max_freq_set(void *data, u64 val) /* * Turbo will still be enabled, but won't go above the set value. */ - if (IS_VALLEYVIEW(dev)) { - val = intel_freq_opcode(dev_priv, val); - - hw_max = dev_priv->rps.max_freq; - hw_min = dev_priv->rps.min_freq; - } else { - val = intel_freq_opcode(dev_priv, val); + val = intel_freq_opcode(dev_priv, val); - rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); - hw_max = dev_priv->rps.max_freq; - hw_min = (rp_state_cap >> 16) & 0xff; - } + hw_max = dev_priv->rps.max_freq; + hw_min = dev_priv->rps.min_freq; if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { mutex_unlock(&dev_priv->rps.hw_lock); @@ -4226,10 +4336,7 @@ i915_max_freq_set(void *data, u64 val) dev_priv->rps.max_freq_softlimit = val; - if (IS_VALLEYVIEW(dev)) - valleyview_set_rps(dev, val); - else - gen6_set_rps(dev, val); + intel_set_rps(dev, val); mutex_unlock(&dev_priv->rps.hw_lock); @@ -4267,7 +4374,7 @@ i915_min_freq_set(void *data, u64 val) { struct drm_device *dev = data; struct drm_i915_private *dev_priv = dev->dev_private; - u32 rp_state_cap, hw_max, hw_min; + u32 hw_max, hw_min; int ret; if (INTEL_INFO(dev)->gen < 6) @@ -4284,18 +4391,10 @@ i915_min_freq_set(void *data, u64 val) /* * Turbo will still be enabled, but won't go below the set value. */ - if (IS_VALLEYVIEW(dev)) { - val = intel_freq_opcode(dev_priv, val); - - hw_max = dev_priv->rps.max_freq; - hw_min = dev_priv->rps.min_freq; - } else { - val = intel_freq_opcode(dev_priv, val); + val = intel_freq_opcode(dev_priv, val); - rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); - hw_max = dev_priv->rps.max_freq; - hw_min = (rp_state_cap >> 16) & 0xff; - } + hw_max = dev_priv->rps.max_freq; + hw_min = dev_priv->rps.min_freq; if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { mutex_unlock(&dev_priv->rps.hw_lock); @@ -4304,10 +4403,7 @@ i915_min_freq_set(void *data, u64 val) dev_priv->rps.min_freq_softlimit = val; - if (IS_VALLEYVIEW(dev)) - valleyview_set_rps(dev, val); - else - gen6_set_rps(dev, val); + intel_set_rps(dev, val); mutex_unlock(&dev_priv->rps.hw_lock); @@ -4374,6 +4470,112 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, i915_cache_sharing_get, i915_cache_sharing_set, "%llu\n"); +static int i915_sseu_status(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned int s_tot = 0, ss_tot = 0, ss_per = 0, eu_tot = 0, eu_per = 0; + + if ((INTEL_INFO(dev)->gen < 8) || IS_BROADWELL(dev)) + return -ENODEV; + + seq_puts(m, "SSEU Device Info\n"); + seq_printf(m, " Available Slice Total: %u\n", + INTEL_INFO(dev)->slice_total); + seq_printf(m, " Available Subslice Total: %u\n", + INTEL_INFO(dev)->subslice_total); + seq_printf(m, " Available Subslice Per Slice: %u\n", + INTEL_INFO(dev)->subslice_per_slice); + seq_printf(m, " Available EU Total: %u\n", + INTEL_INFO(dev)->eu_total); + seq_printf(m, " Available EU Per Subslice: %u\n", + INTEL_INFO(dev)->eu_per_subslice); + seq_printf(m, " Has Slice Power Gating: %s\n", + yesno(INTEL_INFO(dev)->has_slice_pg)); + seq_printf(m, " Has Subslice Power Gating: %s\n", + yesno(INTEL_INFO(dev)->has_subslice_pg)); + seq_printf(m, " Has EU Power Gating: %s\n", + yesno(INTEL_INFO(dev)->has_eu_pg)); + + seq_puts(m, "SSEU Device Status\n"); + if (IS_CHERRYVIEW(dev)) { + const int ss_max = 2; + int ss; + u32 sig1[ss_max], sig2[ss_max]; + + sig1[0] = I915_READ(CHV_POWER_SS0_SIG1); + sig1[1] = I915_READ(CHV_POWER_SS1_SIG1); + sig2[0] = I915_READ(CHV_POWER_SS0_SIG2); + sig2[1] = I915_READ(CHV_POWER_SS1_SIG2); + + for (ss = 0; ss < ss_max; ss++) { + unsigned int eu_cnt; + + if (sig1[ss] & CHV_SS_PG_ENABLE) + /* skip disabled subslice */ + continue; + + s_tot = 1; + ss_per++; + eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + + ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + + ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + + ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2); + eu_tot += eu_cnt; + eu_per = max(eu_per, eu_cnt); + } + ss_tot = ss_per; + } else if (IS_SKYLAKE(dev)) { + const int s_max = 3, ss_max = 4; + int s, ss; + u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; + + s_reg[0] = I915_READ(GEN9_SLICE0_PGCTL_ACK); + s_reg[1] = I915_READ(GEN9_SLICE1_PGCTL_ACK); + s_reg[2] = I915_READ(GEN9_SLICE2_PGCTL_ACK); + eu_reg[0] = I915_READ(GEN9_SLICE0_SS01_EU_PGCTL_ACK); + eu_reg[1] = I915_READ(GEN9_SLICE0_SS23_EU_PGCTL_ACK); + eu_reg[2] = I915_READ(GEN9_SLICE1_SS01_EU_PGCTL_ACK); + eu_reg[3] = I915_READ(GEN9_SLICE1_SS23_EU_PGCTL_ACK); + eu_reg[4] = I915_READ(GEN9_SLICE2_SS01_EU_PGCTL_ACK); + eu_reg[5] = I915_READ(GEN9_SLICE2_SS23_EU_PGCTL_ACK); + eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | + GEN9_PGCTL_SSA_EU19_ACK | + GEN9_PGCTL_SSA_EU210_ACK | + GEN9_PGCTL_SSA_EU311_ACK; + eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | + GEN9_PGCTL_SSB_EU19_ACK | + GEN9_PGCTL_SSB_EU210_ACK | + GEN9_PGCTL_SSB_EU311_ACK; + + for (s = 0; s < s_max; s++) { + if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) + /* skip disabled slice */ + continue; + + s_tot++; + ss_per = INTEL_INFO(dev)->subslice_per_slice; + ss_tot += ss_per; + for (ss = 0; ss < ss_max; ss++) { + unsigned int eu_cnt; + + eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & + eu_mask[ss%2]); + eu_tot += eu_cnt; + eu_per = max(eu_per, eu_cnt); + } + } + } + seq_printf(m, " Enabled Slice Total: %u\n", s_tot); + seq_printf(m, " Enabled Subslice Total: %u\n", ss_tot); + seq_printf(m, " Enabled Subslice Per Slice: %u\n", ss_per); + seq_printf(m, " Enabled EU Total: %u\n", eu_tot); + seq_printf(m, " Enabled EU Per Subslice: %u\n", eu_per); + + return 0; +} + static int i915_forcewake_open(struct inode *inode, struct file *file) { struct drm_device *dev = inode->i_private; @@ -4487,6 +4689,8 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_dp_mst_info", i915_dp_mst_info, 0}, {"i915_wa_registers", i915_wa_registers, 0}, {"i915_ddb_info", i915_ddb_info, 0}, + {"i915_sseu_status", i915_sseu_status, 0}, + {"i915_drrs_status", i915_drrs_status, 0}, }; #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) |