diff options
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/kvmgt.c | 51 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/mmio_context.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/trace.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_context.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_oa_cflgt3.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_oa_cnl.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_pmu.c | 231 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_pmu.h | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_bios.c | 105 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_breadcrumbs.c | 29 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_cdclk.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_engine_cs.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c | 6 |
17 files changed, 350 insertions, 154 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index e2c3c5ec42d1..c53095b3b0fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -568,6 +568,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { /* HG _PR3 doesn't seem to work on this A+A weston board */ { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0, 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 909499b73d03..021f722e2481 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -733,6 +733,25 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, return ret == 0 ? count : ret; } +static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos) +{ + struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + struct intel_gvt *gvt = vgpu->gvt; + int offset; + + /* Only allow MMIO GGTT entry access */ + if (index != PCI_BASE_ADDRESS_0) + return false; + + offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) - + intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0); + + return (offset >= gvt->device_info.gtt_start_offset && + offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ? + true : false; +} + static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, size_t count, loff_t *ppos) { @@ -742,7 +761,21 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, while (count) { size_t filled; - if (count >= 4 && !(*ppos % 4)) { + /* Only support GGTT entry 8 bytes read */ + if (count >= 8 && !(*ppos % 8) && + gtt_entry(mdev, ppos)) { + u64 val; + + ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ppos, false); + if (ret <= 0) + goto read_err; + + if (copy_to_user(buf, &val, sizeof(val))) + goto read_err; + + filled = 8; + } else if (count >= 4 && !(*ppos % 4)) { u32 val; ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), @@ -802,7 +835,21 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev, while (count) { size_t filled; - if (count >= 4 && !(*ppos % 4)) { + /* Only support GGTT entry 8 bytes write */ + if (count >= 8 && !(*ppos % 8) && + gtt_entry(mdev, ppos)) { + u64 val; + + if (copy_from_user(&val, buf, sizeof(val))) + goto write_err; + + ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ppos, true); + if (ret <= 0) + goto write_err; + + filled = 8; + } else if (count >= 4 && !(*ppos % 4)) { u32 val; if (copy_from_user(&val, buf, sizeof(val))) diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 73ad6e90e49d..256f1bb522b7 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -118,6 +118,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */ {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */ {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */ + {RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */ {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */ {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */ {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */ diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h index 7a2511538f34..736bd2bc5127 100644 --- a/drivers/gpu/drm/i915/gvt/trace.h +++ b/drivers/gpu/drm/i915/gvt/trace.h @@ -333,7 +333,7 @@ TRACE_EVENT(render_mmio, TP_PROTO(int old_id, int new_id, char *action, unsigned int reg, unsigned int old_val, unsigned int new_val), - TP_ARGS(old_id, new_id, action, reg, new_val, old_val), + TP_ARGS(old_id, new_id, action, reg, old_val, new_val), TP_STRUCT__entry( __field(int, old_id) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 173d0095e3b2..2f5209de0391 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1433,19 +1433,7 @@ void i915_driver_unload(struct drm_device *dev) intel_modeset_cleanup(dev); - /* - * free the memory space allocated for the child device - * config parsed from VBT - */ - if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { - kfree(dev_priv->vbt.child_dev); - dev_priv->vbt.child_dev = NULL; - dev_priv->vbt.child_dev_num = 0; - } - kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); - dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; - kfree(dev_priv->vbt.lfp_lvds_vbt_mode); - dev_priv->vbt.lfp_lvds_vbt_mode = NULL; + intel_bios_cleanup(dev_priv); vga_switcheroo_unregister_client(pdev); vga_client_register(pdev, NULL, NULL, NULL); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a42deebedb0f..d307429a5ae0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1349,6 +1349,7 @@ struct intel_vbt_data { u32 size; u8 *data; const u8 *sequence[MIPI_SEQ_MAX]; + u8 *deassert_seq; /* Used by fixup_mipi_sequences() */ } dsi; int crt_ddc_pin; @@ -3657,6 +3658,7 @@ extern void intel_i2c_reset(struct drm_i915_private *dev_priv); /* intel_bios.c */ void intel_bios_init(struct drm_i915_private *dev_priv); +void intel_bios_cleanup(struct drm_i915_private *dev_priv); bool intel_bios_is_valid_vbt(const void *buf, size_t size); bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 648e7536ff51..0c963fcf31ff 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -803,7 +803,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, case I915_CONTEXT_PARAM_PRIORITY: { - int priority = args->value; + s64 priority = args->value; if (args->size) ret = -EINVAL; diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c index 42ff06fe54a3..792facdb6702 100644 --- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c +++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c @@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv) { - strncpy(dev_priv->perf.oa.test_config.uuid, + strlcpy(dev_priv->perf.oa.test_config.uuid, "577e8e2c-3fa0-4875-8743-3538d585e3b0", - UUID_STRING_LEN); + sizeof(dev_priv->perf.oa.test_config.uuid)); dev_priv->perf.oa.test_config.id = 1; dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c index ff0ac3627cc4..ba9140c87cc0 100644 --- a/drivers/gpu/drm/i915/i915_oa_cnl.c +++ b/drivers/gpu/drm/i915/i915_oa_cnl.c @@ -96,9 +96,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv) { - strncpy(dev_priv->perf.oa.test_config.uuid, + strlcpy(dev_priv->perf.oa.test_config.uuid, "db41edd4-d8e7-4730-ad11-b9a2d6833503", - UUID_STRING_LEN); + sizeof(dev_priv->perf.oa.test_config.uuid)); dev_priv->perf.oa.test_config.id = 1; dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 55a8a1e29424..0e9b98c32b62 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -285,26 +285,41 @@ static u64 count_interrupts(struct drm_i915_private *i915) return sum; } -static void i915_pmu_event_destroy(struct perf_event *event) +static void engine_event_destroy(struct perf_event *event) { - WARN_ON(event->parent); + struct drm_i915_private *i915 = + container_of(event->pmu, typeof(*i915), pmu.base); + struct intel_engine_cs *engine; + + engine = intel_engine_lookup_user(i915, + engine_event_class(event), + engine_event_instance(event)); + if (WARN_ON_ONCE(!engine)) + return; + + if (engine_event_sample(event) == I915_SAMPLE_BUSY && + intel_engine_supports_stats(engine)) + intel_disable_engine_stats(engine); } -static int engine_event_init(struct perf_event *event) +static void i915_pmu_event_destroy(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); + WARN_ON(event->parent); - if (!intel_engine_lookup_user(i915, engine_event_class(event), - engine_event_instance(event))) - return -ENODEV; + if (is_engine_event(event)) + engine_event_destroy(event); +} - switch (engine_event_sample(event)) { +static int +engine_event_status(struct intel_engine_cs *engine, + enum drm_i915_pmu_engine_sample sample) +{ + switch (sample) { case I915_SAMPLE_BUSY: case I915_SAMPLE_WAIT: break; case I915_SAMPLE_SEMA: - if (INTEL_GEN(i915) < 6) + if (INTEL_GEN(engine->i915) < 6) return -ENODEV; break; default: @@ -314,6 +329,30 @@ static int engine_event_init(struct perf_event *event) return 0; } +static int engine_event_init(struct perf_event *event) +{ + struct drm_i915_private *i915 = + container_of(event->pmu, typeof(*i915), pmu.base); + struct intel_engine_cs *engine; + u8 sample; + int ret; + + engine = intel_engine_lookup_user(i915, engine_event_class(event), + engine_event_instance(event)); + if (!engine) + return -ENODEV; + + sample = engine_event_sample(event); + ret = engine_event_status(engine, sample); + if (ret) + return ret; + + if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine)) + ret = intel_enable_engine_stats(engine); + + return ret; +} + static int i915_pmu_event_init(struct perf_event *event) { struct drm_i915_private *i915 = @@ -370,7 +409,94 @@ static int i915_pmu_event_init(struct perf_event *event) return 0; } -static u64 __i915_pmu_event_read(struct perf_event *event) +static u64 __get_rc6(struct drm_i915_private *i915) +{ + u64 val; + + val = intel_rc6_residency_ns(i915, + IS_VALLEYVIEW(i915) ? + VLV_GT_RENDER_RC6 : + GEN6_GT_GFX_RC6); + + if (HAS_RC6p(i915)) + val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p); + + if (HAS_RC6pp(i915)) + val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp); + + return val; +} + +static u64 get_rc6(struct drm_i915_private *i915, bool locked) +{ +#if IS_ENABLED(CONFIG_PM) + unsigned long flags; + u64 val; + + if (intel_runtime_pm_get_if_in_use(i915)) { + val = __get_rc6(i915); + intel_runtime_pm_put(i915); + + /* + * If we are coming back from being runtime suspended we must + * be careful not to report a larger value than returned + * previously. + */ + + if (!locked) + spin_lock_irqsave(&i915->pmu.lock, flags); + + if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { + i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; + i915->pmu.sample[__I915_SAMPLE_RC6].cur = val; + } else { + val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; + } + + if (!locked) + spin_unlock_irqrestore(&i915->pmu.lock, flags); + } else { + struct pci_dev *pdev = i915->drm.pdev; + struct device *kdev = &pdev->dev; + unsigned long flags2; + + /* + * We are runtime suspended. + * + * Report the delta from when the device was suspended to now, + * on top of the last known real value, as the approximated RC6 + * counter value. + */ + if (!locked) + spin_lock_irqsave(&i915->pmu.lock, flags); + + spin_lock_irqsave(&kdev->power.lock, flags2); + + if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) + i915->pmu.suspended_jiffies_last = + kdev->power.suspended_jiffies; + + val = kdev->power.suspended_jiffies - + i915->pmu.suspended_jiffies_last; + val += jiffies - kdev->power.accounting_timestamp; + + spin_unlock_irqrestore(&kdev->power.lock, flags2); + + val = jiffies_to_nsecs(val); + val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; + i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; + + if (!locked) + spin_unlock_irqrestore(&i915->pmu.lock, flags); + } + + return val; +#else + return __get_rc6(i915); +#endif +} + +static u64 __i915_pmu_event_read(struct perf_event *event, bool locked) { struct drm_i915_private *i915 = container_of(event->pmu, typeof(*i915), pmu.base); @@ -387,7 +513,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event) if (WARN_ON_ONCE(!engine)) { /* Do nothing */ } else if (sample == I915_SAMPLE_BUSY && - engine->pmu.busy_stats) { + intel_engine_supports_stats(engine)) { val = ktime_to_ns(intel_engine_get_busy_time(engine)); } else { val = engine->pmu.sample[sample].cur; @@ -408,18 +534,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event) val = count_interrupts(i915); break; case I915_PMU_RC6_RESIDENCY: - intel_runtime_pm_get(i915); - val = intel_rc6_residency_ns(i915, - IS_VALLEYVIEW(i915) ? - VLV_GT_RENDER_RC6 : - GEN6_GT_GFX_RC6); - if (HAS_RC6p(i915)) - val += intel_rc6_residency_ns(i915, - GEN6_GT_GFX_RC6p); - if (HAS_RC6pp(i915)) - val += intel_rc6_residency_ns(i915, - GEN6_GT_GFX_RC6pp); - intel_runtime_pm_put(i915); + val = get_rc6(i915, locked); break; } } @@ -434,7 +549,7 @@ static void i915_pmu_event_read(struct perf_event *event) again: prev = local64_read(&hwc->prev_count); - new = __i915_pmu_event_read(event); + new = __i915_pmu_event_read(event, false); if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) goto again; @@ -442,12 +557,6 @@ again: local64_add(new - prev, &event->count); } -static bool engine_needs_busy_stats(struct intel_engine_cs *engine) -{ - return intel_engine_supports_stats(engine) && - (engine->pmu.enable & BIT(I915_SAMPLE_BUSY)); -} - static void i915_pmu_enable(struct perf_event *event) { struct drm_i915_private *i915 = @@ -487,21 +596,7 @@ static void i915_pmu_enable(struct perf_event *event) GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); - if (engine->pmu.enable_count[sample]++ == 0) { - /* - * Enable engine busy stats tracking if needed or - * alternatively cancel the scheduled disable. - * - * If the delayed disable was pending, cancel it and - * in this case do not enable since it already is. - */ - if (engine_needs_busy_stats(engine) && - !engine->pmu.busy_stats) { - engine->pmu.busy_stats = true; - if (!cancel_delayed_work(&engine->pmu.disable_busy_stats)) - intel_enable_engine_stats(engine); - } - } + engine->pmu.enable_count[sample]++; } /* @@ -509,19 +604,11 @@ static void i915_pmu_enable(struct perf_event *event) * for all listeners. Even when the event was already enabled and has * an existing non-zero value. */ - local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); + local64_set(&event->hw.prev_count, __i915_pmu_event_read(event, true)); spin_unlock_irqrestore(&i915->pmu.lock, flags); } -static void __disable_busy_stats(struct work_struct *work) -{ - struct intel_engine_cs *engine = - container_of(work, typeof(*engine), pmu.disable_busy_stats.work); - - intel_disable_engine_stats(engine); -} - static void i915_pmu_disable(struct perf_event *event) { struct drm_i915_private *i915 = @@ -545,26 +632,8 @@ static void i915_pmu_disable(struct perf_event *event) * Decrement the reference count and clear the enabled * bitmask when the last listener on an event goes away. */ - if (--engine->pmu.enable_count[sample] == 0) { + if (--engine->pmu.enable_count[sample] == 0) engine->pmu.enable &= ~BIT(sample); - if (!engine_needs_busy_stats(engine) && - engine->pmu.busy_stats) { - engine->pmu.busy_stats = false; - /* - * We request a delayed disable to handle the - * rapid on/off cycles on events, which can - * happen when tools like perf stat start, in a - * nicer way. - * - * In addition, this also helps with busy stats - * accuracy with background CPU offline/online - * migration events. - */ - queue_delayed_work(system_wq, - &engine->pmu.disable_busy_stats, - round_jiffies_up_relative(HZ)); - } - } } GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); @@ -797,8 +866,6 @@ static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915) void i915_pmu_register(struct drm_i915_private *i915) { - struct intel_engine_cs *engine; - enum intel_engine_id id; int ret; if (INTEL_GEN(i915) <= 2) { @@ -820,10 +887,6 @@ void i915_pmu_register(struct drm_i915_private *i915) hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); i915->pmu.timer.function = i915_sample; - for_each_engine(engine, i915, id) - INIT_DELAYED_WORK(&engine->pmu.disable_busy_stats, - __disable_busy_stats); - ret = perf_pmu_register(&i915->pmu.base, "i915", -1); if (ret) goto err; @@ -843,9 +906,6 @@ err: void i915_pmu_unregister(struct drm_i915_private *i915) { - struct intel_engine_cs *engine; - enum intel_engine_id id; - if (!i915->pmu.base.event_init) return; @@ -853,11 +913,6 @@ void i915_pmu_unregister(struct drm_i915_private *i915) hrtimer_cancel(&i915->pmu.timer); - for_each_engine(engine, i915, id) { - GEM_BUG_ON(engine->pmu.busy_stats); - flush_delayed_work(&engine->pmu.disable_busy_stats); - } - i915_pmu_unregister_cpuhp_state(i915); perf_pmu_unregister(&i915->pmu.base); diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index 40c154d13565..bb62df15afa4 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -27,6 +27,8 @@ enum { __I915_SAMPLE_FREQ_ACT = 0, __I915_SAMPLE_FREQ_REQ, + __I915_SAMPLE_RC6, + __I915_SAMPLE_RC6_ESTIMATED, __I915_NUM_PMU_SAMPLERS }; @@ -94,6 +96,10 @@ struct i915_pmu { * struct intel_engine_cs. */ struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS]; + /** + * @suspended_jiffies_last: Cached suspend time from PM core. + */ + unsigned long suspended_jiffies_last; }; #ifdef CONFIG_PERF_EVENTS diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index f7f771749e48..b49a2df44430 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -947,6 +947,86 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total) return 0; } +/* + * Get len of pre-fixed deassert fragment from a v1 init OTP sequence, + * skip all delay + gpio operands and stop at the first DSI packet op. + */ +static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv) +{ + const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; + int index, len; + + if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1)) + return 0; + + /* index = 1 to skip sequence byte */ + for (index = 1; data[index] != MIPI_SEQ_ELEM_END; index += len) { + switch (data[index]) { + case MIPI_SEQ_ELEM_SEND_PKT: + return index == 1 ? 0 : index; + case MIPI_SEQ_ELEM_DELAY: + len = 5; /* 1 byte for operand + uint32 */ + break; + case MIPI_SEQ_ELEM_GPIO: + len = 3; /* 1 byte for op, 1 for gpio_nr, 1 for value */ + break; + default: + return 0; + } + } + + return 0; +} + +/* + * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence. + * The deassert must be done before calling intel_dsi_device_ready, so for + * these devices we split the init OTP sequence into a deassert sequence and + * the actual init OTP part. + */ +static void fixup_mipi_sequences(struct drm_i915_private *dev_priv) +{ + u8 *init_otp; + int len; + + /* Limit this to VLV for now. */ + if (!IS_VALLEYVIEW(dev_priv)) + return; + + /* Limit this to v1 vid-mode sequences */ + if (dev_priv->vbt.dsi.config->is_cmd_mode || + dev_priv->vbt.dsi.seq_version != 1) + return; + + /* Only do this if there are otp and assert seqs and no deassert seq */ + if (!dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] || + !dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] || + dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) + return; + + /* The deassert-sequence ends at the first DSI packet */ + len = get_init_otp_deassert_fragment_len(dev_priv); + if (!len) + return; + + DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n"); + + /* Copy the fragment, update seq byte and terminate it */ + init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; + dev_priv->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL); + if (!dev_priv->vbt.dsi.deassert_seq) + return; + dev_priv->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET; + dev_priv->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END; + /* Use the copy for deassert */ + dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] = + dev_priv->vbt.dsi.deassert_seq; + /* Replace the last byte of the fragment with init OTP seq byte */ + init_otp[len - 1] = MIPI_SEQ_INIT_OTP; + /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */ + dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1; +} + static void parse_mipi_sequence(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) @@ -1016,6 +1096,8 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv, dev_priv->vbt.dsi.size = seq_size; dev_priv->vbt.dsi.seq_version = sequence->version; + fixup_mipi_sequences(dev_priv); + DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n"); return; @@ -1589,6 +1671,29 @@ out: } /** + * intel_bios_cleanup - Free any resources allocated by intel_bios_init() + * @dev_priv: i915 device instance + */ +void intel_bios_cleanup(struct drm_i915_private *dev_priv) +{ + kfree(dev_priv->vbt.child_dev); + dev_priv->vbt.child_dev = NULL; + dev_priv->vbt.child_dev_num = 0; + kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); + dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; + kfree(dev_priv->vbt.lfp_lvds_vbt_mode); + dev_priv->vbt.lfp_lvds_vbt_mode = NULL; + kfree(dev_priv->vbt.dsi.data); + dev_priv->vbt.dsi.data = NULL; + kfree(dev_priv->vbt.dsi.pps); + dev_priv->vbt.dsi.pps = NULL; + kfree(dev_priv->vbt.dsi.config); + dev_priv->vbt.dsi.config = NULL; + kfree(dev_priv->vbt.dsi.deassert_seq); + dev_priv->vbt.dsi.deassert_seq = NULL; +} + +/** * intel_bios_is_tv_present - is integrated TV present in VBT * @dev_priv: i915 device instance * diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index bd40fea16b4f..f54ddda9fdad 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -594,29 +594,16 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine, spin_unlock_irq(&b->rb_lock); } -static bool signal_valid(const struct drm_i915_gem_request *request) -{ - return intel_wait_check_request(&request->signaling.wait, request); -} - static bool signal_complete(const struct drm_i915_gem_request *request) { if (!request) return false; - /* If another process served as the bottom-half it may have already - * signalled that this wait is already completed. - */ - if (intel_wait_complete(&request->signaling.wait)) - return signal_valid(request); - - /* Carefully check if the request is complete, giving time for the + /* + * Carefully check if the request is complete, giving time for the * seqno to be visible or if the GPU hung. */ - if (__i915_request_irq_complete(request)) - return true; - - return false; + return __i915_request_irq_complete(request); } static struct drm_i915_gem_request *to_signaler(struct rb_node *rb) @@ -659,9 +646,13 @@ static int intel_breadcrumbs_signaler(void *arg) request = i915_gem_request_get_rcu(request); rcu_read_unlock(); if (signal_complete(request)) { - local_bh_disable(); - dma_fence_signal(&request->fence); - local_bh_enable(); /* kick start the tasklets */ + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &request->fence.flags)) { + local_bh_disable(); + dma_fence_signal(&request->fence); + GEM_BUG_ON(!i915_gem_request_completed(request)); + local_bh_enable(); /* kick start the tasklets */ + } spin_lock_irq(&b->rb_lock); diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 5dc118f26b51..1704c8897afd 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -1952,6 +1952,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) min_cdclk = max(2 * 96000, min_cdclk); + /* + * On Valleyview some DSI panels lose (v|h)sync when the clock is lower + * than 320000KHz. + */ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) && + IS_VALLEYVIEW(dev_priv)) + min_cdclk = max(320000, min_cdclk); + if (min_cdclk > dev_priv->max_cdclk_freq) { DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n", min_cdclk, dev_priv->max_cdclk_freq); diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index d790bdc227ff..fa960cfd2764 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -1458,7 +1458,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine) struct drm_i915_private *dev_priv = engine->i915; bool idle = true; - intel_runtime_pm_get(dev_priv); + /* If the whole device is asleep, the engine must be idle */ + if (!intel_runtime_pm_get_if_in_use(dev_priv)) + return true; /* First check that no commands are left in the ring */ if ((I915_READ_HEAD(engine) & HEAD_ADDR) != @@ -1943,16 +1945,22 @@ intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance) */ int intel_enable_engine_stats(struct intel_engine_cs *engine) { + struct intel_engine_execlists *execlists = &engine->execlists; unsigned long flags; + int err = 0; if (!intel_engine_supports_stats(engine)) return -ENODEV; + tasklet_disable(&execlists->tasklet); spin_lock_irqsave(&engine->stats.lock, flags); - if (engine->stats.enabled == ~0) - goto busy; + + if (unlikely(engine->stats.enabled == ~0)) { + err = -EBUSY; + goto unlock; + } + if (engine->stats.enabled++ == 0) { - struct intel_engine_execlists *execlists = &engine->execlists; const struct execlist_port *port = execlists->port; unsigned int num_ports = execlists_num_ports(execlists); @@ -1967,14 +1975,12 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) if (engine->stats.active) engine->stats.start = engine->stats.enabled_at; } - spin_unlock_irqrestore(&engine->stats.lock, flags); - - return 0; -busy: +unlock: spin_unlock_irqrestore(&engine->stats.lock, flags); + tasklet_enable(&execlists->tasklet); - return -EBUSY; + return err; } static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index c5ff203e42d6..a0e7a6c2a57c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -366,20 +366,6 @@ struct intel_engine_cs { */ #define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1) struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX]; - /** - * @busy_stats: Has enablement of engine stats tracking been - * requested. - */ - bool busy_stats; - /** - * @disable_busy_stats: Work item for busy stats disabling. - * - * Same as with @enable_busy_stats action, with the difference - * that we delay it in case there are rapid enable-disable - * actions, which can happen during tool startup (like perf - * stat). - */ - struct delayed_work disable_busy_stats; } pmu; /* diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c index bf62303571b3..3695cde669f8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c @@ -301,7 +301,7 @@ nvkm_therm_attr_set(struct nvkm_therm *therm, void nvkm_therm_clkgate_enable(struct nvkm_therm *therm) { - if (!therm->func->clkgate_enable || !therm->clkgating_enabled) + if (!therm || !therm->func->clkgate_enable || !therm->clkgating_enabled) return; nvkm_debug(&therm->subdev, @@ -312,7 +312,7 @@ nvkm_therm_clkgate_enable(struct nvkm_therm *therm) void nvkm_therm_clkgate_fini(struct nvkm_therm *therm, bool suspend) { - if (!therm->func->clkgate_fini || !therm->clkgating_enabled) + if (!therm || !therm->func->clkgate_fini || !therm->clkgating_enabled) return; nvkm_debug(&therm->subdev, @@ -395,7 +395,7 @@ void nvkm_therm_clkgate_init(struct nvkm_therm *therm, const struct nvkm_therm_clkgate_pack *p) { - if (!therm->func->clkgate_init || !therm->clkgating_enabled) + if (!therm || !therm->func->clkgate_init || !therm->clkgating_enabled) return; therm->func->clkgate_init(therm, p); |