diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2014-09-11 16:46:53 +0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2014-09-11 16:46:53 +0400 |
commit | 336879b1da97fffc097f77c6d6f818660f2826f0 (patch) | |
tree | 4ddb4d1c5d2b67fb096c72e41d2a03b01a605041 /drivers/gpu/drm/i915 | |
parent | 3d3cbd84300e7be1e53083cac0f6f9c12978ecb4 (diff) | |
parent | fdcaa1dbb7c6ed419b10fb8cdb5001ab0a00538f (diff) | |
download | linux-336879b1da97fffc097f77c6d6f818660f2826f0.tar.xz |
Merge remote-tracking branch 'airlied/drm-next' into topic/vblank-rework
Dave asked me to do the backmerge before sending him the revised pull
request, so here we go. Nothing fancy in the conflicts, just a few
things changed right next to each another.
Conflicts:
drivers/gpu/drm/drm_irq.c
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915')
40 files changed, 4735 insertions, 1381 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 91bd167e1cb7..c1dd485aeb6c 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -31,6 +31,7 @@ i915-y += i915_cmd_parser.o \ i915_gpu_error.o \ i915_irq.o \ i915_trace_points.o \ + intel_lrc.o \ intel_ringbuffer.o \ intel_uncore.o diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index dea99d92fb4a..c45856bcc8b9 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -842,8 +842,6 @@ finish: */ bool i915_needs_cmd_parser(struct intel_engine_cs *ring) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; - if (!ring->needs_cmd_parser) return false; @@ -852,7 +850,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring) * disabled. That will cause all of the parser's PPGTT checks to * fail. For now, disable parsing when PPGTT is off. */ - if (!dev_priv->mm.aliasing_ppgtt) + if (USES_PPGTT(ring->dev)) return false; return (i915.enable_cmd_parser == 1); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 9e737b771c40..6c82bdaa0822 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -333,7 +333,7 @@ static int per_file_stats(int id, void *ptr, void *data) } ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base); - if (ppgtt->ctx && ppgtt->ctx->file_priv != stats->file_priv) + if (ppgtt->file_priv != stats->file_priv) continue; if (obj->ring) /* XXX per-vma statistic */ @@ -703,6 +703,12 @@ static int i915_interrupt_info(struct seq_file *m, void *data) } for_each_pipe(pipe) { + if (!intel_display_power_enabled(dev_priv, + POWER_DOMAIN_PIPE(pipe))) { + seq_printf(m, "Pipe %c power disabled\n", + pipe_name(pipe)); + continue; + } seq_printf(m, "Pipe %c IMR:\t%08x\n", pipe_name(pipe), I915_READ(GEN8_DE_PIPE_IMR(pipe))); @@ -1433,6 +1439,47 @@ static int i915_fbc_status(struct seq_file *m, void *unused) return 0; } +static int i915_fbc_fc_get(void *data, u64 *val) +{ + struct drm_device *dev = data; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) + return -ENODEV; + + drm_modeset_lock_all(dev); + *val = dev_priv->fbc.false_color; + drm_modeset_unlock_all(dev); + + return 0; +} + +static int i915_fbc_fc_set(void *data, u64 val) +{ + struct drm_device *dev = data; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg; + + if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) + return -ENODEV; + + drm_modeset_lock_all(dev); + + reg = I915_READ(ILK_DPFC_CONTROL); + dev_priv->fbc.false_color = val; + + I915_WRITE(ILK_DPFC_CONTROL, val ? + (reg | FBC_CTL_FALSE_COLOR) : + (reg & ~FBC_CTL_FALSE_COLOR)); + + drm_modeset_unlock_all(dev); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops, + i915_fbc_fc_get, i915_fbc_fc_set, + "%llu\n"); + static int i915_ips_status(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; @@ -1630,6 +1677,14 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) return 0; } +static void describe_ctx_ringbuf(struct seq_file *m, + struct intel_ringbuffer *ringbuf) +{ + seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)", + ringbuf->space, ringbuf->head, ringbuf->tail, + ringbuf->last_retired_head); +} + static int i915_context_status(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; @@ -1656,16 +1711,168 @@ static int i915_context_status(struct seq_file *m, void *unused) } list_for_each_entry(ctx, &dev_priv->context_list, link) { - if (ctx->legacy_hw_ctx.rcs_state == NULL) + if (!i915.enable_execlists && + ctx->legacy_hw_ctx.rcs_state == NULL) continue; seq_puts(m, "HW context "); describe_ctx(m, ctx); - for_each_ring(ring, dev_priv, i) + for_each_ring(ring, dev_priv, i) { if (ring->default_context == ctx) - seq_printf(m, "(default context %s) ", ring->name); + seq_printf(m, "(default context %s) ", + ring->name); + } + + if (i915.enable_execlists) { + seq_putc(m, '\n'); + for_each_ring(ring, dev_priv, i) { + struct drm_i915_gem_object *ctx_obj = + ctx->engine[i].state; + struct intel_ringbuffer *ringbuf = + ctx->engine[i].ringbuf; + + seq_printf(m, "%s: ", ring->name); + if (ctx_obj) + describe_obj(m, ctx_obj); + if (ringbuf) + describe_ctx_ringbuf(m, ringbuf); + seq_putc(m, '\n'); + } + } else { + describe_obj(m, ctx->legacy_hw_ctx.rcs_state); + } + + seq_putc(m, '\n'); + } + + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +static int i915_dump_lrc(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_engine_cs *ring; + struct intel_context *ctx; + int ret, i; + + if (!i915.enable_execlists) { + seq_printf(m, "Logical Ring Contexts are disabled\n"); + return 0; + } + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + list_for_each_entry(ctx, &dev_priv->context_list, link) { + for_each_ring(ring, dev_priv, i) { + struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; + + if (ring->default_context == ctx) + continue; + + if (ctx_obj) { + struct page *page = i915_gem_object_get_page(ctx_obj, 1); + uint32_t *reg_state = kmap_atomic(page); + int j; + + seq_printf(m, "CONTEXT: %s %u\n", ring->name, + intel_execlists_ctx_id(ctx_obj)); + + for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) { + seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n", + i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4), + reg_state[j], reg_state[j + 1], + reg_state[j + 2], reg_state[j + 3]); + } + kunmap_atomic(reg_state); + + seq_putc(m, '\n'); + } + } + } + + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +static int i915_execlists(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_engine_cs *ring; + u32 status_pointer; + u8 read_pointer; + u8 write_pointer; + u32 status; + u32 ctx_id; + struct list_head *cursor; + int ring_id, i; + int ret; + + if (!i915.enable_execlists) { + seq_puts(m, "Logical Ring Contexts are disabled\n"); + return 0; + } + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + for_each_ring(ring, dev_priv, ring_id) { + struct intel_ctx_submit_request *head_req = NULL; + int count = 0; + unsigned long flags; + + seq_printf(m, "%s\n", ring->name); + + status = I915_READ(RING_EXECLIST_STATUS(ring)); + ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4); + seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n", + status, ctx_id); + + status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); + seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer); + + read_pointer = ring->next_context_status_buffer; + write_pointer = status_pointer & 0x07; + if (read_pointer > write_pointer) + write_pointer += 6; + seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n", + read_pointer, write_pointer); + + for (i = 0; i < 6; i++) { + status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i); + ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4); + + seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n", + i, status, ctx_id); + } + + spin_lock_irqsave(&ring->execlist_lock, flags); + list_for_each(cursor, &ring->execlist_queue) + count++; + head_req = list_first_entry_or_null(&ring->execlist_queue, + struct intel_ctx_submit_request, execlist_link); + spin_unlock_irqrestore(&ring->execlist_lock, flags); + + seq_printf(m, "\t%d requests in queue\n", count); + if (head_req) { + struct drm_i915_gem_object *ctx_obj; + + ctx_obj = head_req->ctx->engine[ring_id].state; + seq_printf(m, "\tHead request id: %u\n", + intel_execlists_ctx_id(ctx_obj)); + seq_printf(m, "\tHead request tail: %u\n", + head_req->tail); + } - describe_obj(m, ctx->legacy_hw_ctx.rcs_state); seq_putc(m, '\n'); } @@ -1774,7 +1981,13 @@ static int per_file_ctx(int id, void *ptr, void *data) { struct intel_context *ctx = ptr; struct seq_file *m = data; - struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx); + struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; + + if (!ppgtt) { + seq_printf(m, " no ppgtt for context %d\n", + ctx->user_handle); + return 0; + } if (i915_gem_context_is_default(ctx)) seq_puts(m, " default context:\n"); @@ -1834,8 +2047,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); ppgtt->debug_dump(ppgtt, m); - } else - return; + } list_for_each_entry_reverse(file, &dev->filelist, lhead) { struct drm_i915_file_private *file_priv = file->driver_priv; @@ -2667,8 +2879,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe, *source = INTEL_PIPE_CRC_SOURCE_PIPE; drm_modeset_lock_all(dev); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { + for_each_intel_encoder(dev, encoder) { if (!encoder->base.crtc) continue; @@ -3923,6 +4134,8 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_opregion", i915_opregion, 0}, {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, {"i915_context_status", i915_context_status, 0}, + {"i915_dump_lrc", i915_dump_lrc, 0}, + {"i915_execlists", i915_execlists, 0}, {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, {"i915_swizzle_info", i915_swizzle_info, 0}, {"i915_ppgtt_info", i915_ppgtt_info, 0}, @@ -3957,6 +4170,7 @@ static const struct i915_debugfs_files { {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, + {"i915_fbc_false_color", &i915_fbc_fc_fops}, }; void intel_display_crc_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2e7f03ad5ee2..689c3326636f 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -196,7 +196,7 @@ static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init) struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; int ret; - master_priv->sarea = drm_getsarea(dev); + master_priv->sarea = drm_legacy_getsarea(dev); if (master_priv->sarea) { master_priv->sarea_priv = (drm_i915_sarea_t *) ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); @@ -999,7 +999,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = HAS_WT(dev); break; case I915_PARAM_HAS_ALIASING_PPGTT: - value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev); + value = USES_PPGTT(dev); break; case I915_PARAM_HAS_WAIT_TIMEOUT: value = 1; @@ -1350,8 +1350,6 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) goto cleanup_irq; - INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); - intel_modeset_gem_init(dev); /* Always safe in the mode setting case. */ @@ -1388,7 +1386,6 @@ cleanup_gem: i915_gem_cleanup_ringbuffer(dev); i915_gem_context_fini(dev); mutex_unlock(&dev->struct_mutex); - WARN_ON(dev_priv->mm.aliasing_ppgtt); cleanup_irq: drm_irq_uninstall(dev); cleanup_gem_stolen: @@ -1603,9 +1600,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev->dev_private = dev_priv; dev_priv->dev = dev; - /* copy initial configuration to dev_priv->info */ + /* Setup the write-once "constant" device info */ device_info = (struct intel_device_info *)&dev_priv->info; - *device_info = *info; + memcpy(device_info, info, sizeof(dev_priv->info)); + device_info->device_id = dev->pdev->device; spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); @@ -1817,7 +1815,7 @@ out_mtrrfree: arch_phys_wc_del(dev_priv->gtt.mtrr); io_mapping_free(dev_priv->gtt.mappable); out_gtt: - dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); + i915_global_gtt_cleanup(dev); out_regs: intel_uncore_fini(dev); pci_iounmap(dev->pdev, dev_priv->regs); @@ -1864,7 +1862,6 @@ int i915_driver_unload(struct drm_device *dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_fbdev_fini(dev); intel_modeset_cleanup(dev); - cancel_work_sync(&dev_priv->console_resume_work); /* * free the memory space allocated for the child device @@ -1897,7 +1894,6 @@ int i915_driver_unload(struct drm_device *dev) mutex_lock(&dev->struct_mutex); i915_gem_cleanup_ringbuffer(dev); i915_gem_context_fini(dev); - WARN_ON(dev_priv->mm.aliasing_ppgtt); mutex_unlock(&dev->struct_mutex); i915_gem_cleanup_stolen(dev); @@ -1905,8 +1901,6 @@ int i915_driver_unload(struct drm_device *dev) i915_free_hws(dev); } - WARN_ON(!list_empty(&dev_priv->vm_list)); - drm_vblank_cleanup(dev); intel_teardown_gmbus(dev); @@ -1916,7 +1910,7 @@ int i915_driver_unload(struct drm_device *dev) destroy_workqueue(dev_priv->wq); pm_qos_remove_request(&dev_priv->pm_qos); - dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); + i915_global_gtt_cleanup(dev); intel_uncore_fini(dev); if (dev_priv->regs != NULL) @@ -1981,6 +1975,9 @@ void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) i915_gem_context_close(dev, file); i915_gem_release(dev, file); mutex_unlock(&dev->struct_mutex); + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + intel_modeset_preclose(dev, file); } void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6c4b25ce8bb0..cdd95956811d 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -481,6 +481,14 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) if (i915.semaphores >= 0) return i915.semaphores; + /* TODO: make semaphores and Execlists play nicely together */ + if (i915.enable_execlists) + return false; + + /* Until we get further testing... */ + if (IS_GEN8(dev)) + return false; + #ifdef CONFIG_INTEL_IOMMU /* Enable semaphores on SNB when IO remapping is off */ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) @@ -490,6 +498,40 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) return true; } +void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) +{ + spin_lock_irq(&dev_priv->irq_lock); + + dev_priv->long_hpd_port_mask = 0; + dev_priv->short_hpd_port_mask = 0; + dev_priv->hpd_event_bits = 0; + + spin_unlock_irq(&dev_priv->irq_lock); + + cancel_work_sync(&dev_priv->dig_port_work); + cancel_work_sync(&dev_priv->hotplug_work); + cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work); +} + +static void intel_suspend_encoders(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + struct drm_encoder *encoder; + + drm_modeset_lock_all(dev); + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + + if (intel_encoder->suspend) + intel_encoder->suspend(intel_encoder); + } + drm_modeset_unlock_all(dev); +} + +static int intel_suspend_complete(struct drm_i915_private *dev_priv); +static int intel_resume_prepare(struct drm_i915_private *dev_priv, + bool rpm_resume); + static int i915_drm_freeze(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -534,6 +576,9 @@ static int i915_drm_freeze(struct drm_device *dev) flush_delayed_work(&dev_priv->rps.delayed_resume_work); intel_runtime_pm_disable_interrupts(dev); + intel_hpd_cancel_work(dev_priv); + + intel_suspend_encoders(dev_priv); intel_suspend_gt_powersave(dev); @@ -554,9 +599,7 @@ static int i915_drm_freeze(struct drm_device *dev) intel_uncore_forcewake_reset(dev, false); intel_opregion_fini(dev); - console_lock(); - intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED); - console_unlock(); + intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); dev_priv->suspend_count++; @@ -595,30 +638,20 @@ int i915_suspend(struct drm_device *dev, pm_message_t state) return 0; } -void intel_console_resume(struct work_struct *work) -{ - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, - console_resume_work); - struct drm_device *dev = dev_priv->dev; - - console_lock(); - intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING); - console_unlock(); -} - static int i915_drm_thaw_early(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + int ret; - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) - hsw_disable_pc8(dev_priv); + ret = intel_resume_prepare(dev_priv, false); + if (ret) + DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret); intel_uncore_early_sanitize(dev, true); intel_uncore_sanitize(dev); intel_power_domains_init_hw(dev_priv); - return 0; + return ret; } static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) @@ -677,17 +710,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) intel_opregion_init(dev); - /* - * The console lock can be pretty contented on resume due - * to all the printk activity. Try to keep it out of the hot - * path of resume if possible. - */ - if (console_trylock()) { - intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING); - console_unlock(); - } else { - schedule_work(&dev_priv->console_resume_work); - } + intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); mutex_lock(&dev_priv->modeset_restore_lock); dev_priv->modeset_restore = MODESET_DONE; @@ -904,6 +927,7 @@ static int i915_pm_suspend_late(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); struct drm_i915_private *dev_priv = drm_dev->dev_private; + int ret; /* * We have a suspedn ordering issue with the snd-hda driver also @@ -917,13 +941,16 @@ static int i915_pm_suspend_late(struct device *dev) if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev)) - hsw_enable_pc8(dev_priv); + ret = intel_suspend_complete(dev_priv); - pci_disable_device(pdev); - pci_set_power_state(pdev, PCI_D3hot); + if (ret) + DRM_ERROR("Suspend complete failed: %d\n", ret); + else { + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + } - return 0; + return ret; } static int i915_pm_resume_early(struct device *dev) @@ -979,23 +1006,26 @@ static int i915_pm_poweroff(struct device *dev) return i915_drm_freeze(drm_dev); } -static int hsw_runtime_suspend(struct drm_i915_private *dev_priv) +static int hsw_suspend_complete(struct drm_i915_private *dev_priv) { hsw_enable_pc8(dev_priv); return 0; } -static int snb_runtime_resume(struct drm_i915_private *dev_priv) +static int snb_resume_prepare(struct drm_i915_private *dev_priv, + bool rpm_resume) { struct drm_device *dev = dev_priv->dev; - intel_init_pch_refclk(dev); + if (rpm_resume) + intel_init_pch_refclk(dev); return 0; } -static int hsw_runtime_resume(struct drm_i915_private *dev_priv) +static int hsw_resume_prepare(struct drm_i915_private *dev_priv, + bool rpm_resume) { hsw_disable_pc8(dev_priv); @@ -1291,7 +1321,7 @@ static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); } -static int vlv_runtime_suspend(struct drm_i915_private *dev_priv) +static int vlv_suspend_complete(struct drm_i915_private *dev_priv) { u32 mask; int err; @@ -1331,7 +1361,8 @@ err1: return err; } -static int vlv_runtime_resume(struct drm_i915_private *dev_priv) +static int vlv_resume_prepare(struct drm_i915_private *dev_priv, + bool rpm_resume) { struct drm_device *dev = dev_priv->dev; int err; @@ -1356,8 +1387,10 @@ static int vlv_runtime_resume(struct drm_i915_private *dev_priv) vlv_check_no_gt_access(dev_priv); - intel_init_clock_gating(dev); - i915_gem_restore_fences(dev); + if (rpm_resume) { + intel_init_clock_gating(dev); + i915_gem_restore_fences(dev); + } return ret; } @@ -1372,7 +1405,9 @@ static int intel_runtime_suspend(struct device *device) if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) return -ENODEV; - WARN_ON(!HAS_RUNTIME_PM(dev)); + if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) + return -ENODEV; + assert_force_wake_inactive(dev_priv); DRM_DEBUG_KMS("Suspending device\n"); @@ -1409,17 +1444,7 @@ static int intel_runtime_suspend(struct device *device) cancel_work_sync(&dev_priv->rps.work); intel_runtime_pm_disable_interrupts(dev); - if (IS_GEN6(dev)) { - ret = 0; - } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { - ret = hsw_runtime_suspend(dev_priv); - } else if (IS_VALLEYVIEW(dev)) { - ret = vlv_runtime_suspend(dev_priv); - } else { - ret = -ENODEV; - WARN_ON(1); - } - + ret = intel_suspend_complete(dev_priv); if (ret) { DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); intel_runtime_pm_restore_interrupts(dev); @@ -1450,24 +1475,15 @@ static int intel_runtime_resume(struct device *device) struct drm_i915_private *dev_priv = dev->dev_private; int ret; - WARN_ON(!HAS_RUNTIME_PM(dev)); + if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) + return -ENODEV; DRM_DEBUG_KMS("Resuming device\n"); intel_opregion_notify_adapter(dev, PCI_D0); dev_priv->pm.suspended = false; - if (IS_GEN6(dev)) { - ret = snb_runtime_resume(dev_priv); - } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { - ret = hsw_runtime_resume(dev_priv); - } else if (IS_VALLEYVIEW(dev)) { - ret = vlv_runtime_resume(dev_priv); - } else { - WARN_ON(1); - ret = -ENODEV; - } - + ret = intel_resume_prepare(dev_priv, true); /* * No point of rolling back things in case of an error, as the best * we can do is to hope that things will still work (and disable RPM). @@ -1486,6 +1502,48 @@ static int intel_runtime_resume(struct device *device) return ret; } +/* + * This function implements common functionality of runtime and system + * suspend sequence. + */ +static int intel_suspend_complete(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + int ret; + + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + ret = hsw_suspend_complete(dev_priv); + else if (IS_VALLEYVIEW(dev)) + ret = vlv_suspend_complete(dev_priv); + else + ret = 0; + + return ret; +} + +/* + * This function implements common functionality of runtime and system + * resume sequence. Variable rpm_resume used for implementing different + * code paths. + */ +static int intel_resume_prepare(struct drm_i915_private *dev_priv, + bool rpm_resume) +{ + struct drm_device *dev = dev_priv->dev; + int ret; + + if (IS_GEN6(dev)) + ret = snb_resume_prepare(dev_priv, rpm_resume); + else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + ret = hsw_resume_prepare(dev_priv, rpm_resume); + else if (IS_VALLEYVIEW(dev)) + ret = vlv_resume_prepare(dev_priv, rpm_resume); + else + ret = 0; + + return ret; +} + static const struct dev_pm_ops i915_pm_ops = { .suspend = i915_pm_suspend, .suspend_late = i915_pm_suspend_late, @@ -1535,6 +1593,7 @@ static struct drm_driver driver = { .lastclose = i915_driver_lastclose, .preclose = i915_driver_preclose, .postclose = i915_driver_postclose, + .set_busid = drm_pci_set_busid, /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ .suspend = i915_suspend, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5de27f9b8c26..bcf8783dbc2e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -35,6 +35,7 @@ #include "i915_reg.h" #include "intel_bios.h" #include "intel_ringbuffer.h" +#include "intel_lrc.h" #include "i915_gem_gtt.h" #include <linux/io-mapping.h> #include <linux/i2c.h> @@ -53,7 +54,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20140620" +#define DRIVER_DATE "20140822" enum pipe { INVALID_PIPE = -1, @@ -171,6 +172,11 @@ enum hpd_pin { #define for_each_intel_crtc(dev, intel_crtc) \ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) +#define for_each_intel_encoder(dev, intel_encoder) \ + list_for_each_entry(intel_encoder, \ + &(dev)->mode_config.encoder_list, \ + base.head) + #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ if ((intel_encoder)->base.crtc == (__crtc)) @@ -197,10 +203,13 @@ enum intel_dpll_id { #define I915_NUM_PLLS 2 struct intel_dpll_hw_state { + /* i9xx, pch plls */ uint32_t dpll; uint32_t dpll_md; uint32_t fp0; uint32_t fp1; + + /* hsw, bdw */ uint32_t wrpll; }; @@ -314,6 +323,7 @@ struct drm_i915_error_state { u32 eir; u32 pgtbl_er; u32 ier; + u32 gtier[4]; u32 ccid; u32 derrmr; u32 forcewake; @@ -386,6 +396,7 @@ struct drm_i915_error_state { pid_t pid; char comm[TASK_COMM_LEN]; } ring[I915_NUM_RINGS]; + struct drm_i915_error_buffer { u32 size; u32 name; @@ -404,6 +415,7 @@ struct drm_i915_error_state { } **active_bo, **pinned_bo; u32 *active_bo_count, *pinned_bo_count; + u32 vm_count; }; struct intel_connector; @@ -549,6 +561,7 @@ struct intel_uncore { struct intel_device_info { u32 display_mmio_offset; + u16 device_id; u8 num_pipes:3; u8 num_sprites[I915_MAX_PIPES]; u8 gen; @@ -613,13 +626,20 @@ struct intel_context { uint8_t remap_slice; struct drm_i915_file_private *file_priv; struct i915_ctx_hang_stats hang_stats; - struct i915_address_space *vm; + struct i915_hw_ppgtt *ppgtt; + /* Legacy ring buffer submission */ struct { struct drm_i915_gem_object *rcs_state; bool initialized; } legacy_hw_ctx; + /* Execlists */ + struct { + struct drm_i915_gem_object *state; + struct intel_ringbuffer *ringbuf; + } engine[I915_NUM_RINGS]; + struct list_head link; }; @@ -633,6 +653,8 @@ struct i915_fbc { struct drm_mm_node compressed_fb; struct drm_mm_node *compressed_llb; + bool false_color; + struct intel_fbc_work { struct delayed_work work; struct drm_crtc *crtc; @@ -972,7 +994,7 @@ struct intel_ilk_power_mgmt { unsigned long last_time1; unsigned long chipset_power; u64 last_count2; - struct timespec last_time2; + u64 last_time2; unsigned long gfx_power; u8 corr; @@ -1226,6 +1248,12 @@ enum modeset_restore { }; struct ddi_vbt_port_info { + /* + * This is an index in the HDMI/DVI DDI buffer translation table. + * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't + * populate this field. + */ +#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff uint8_t hdmi_level_shift; uint8_t supports_dvi:1; @@ -1457,7 +1485,7 @@ struct drm_i915_private { } hpd_mark; } hpd_stats[HPD_NUM_PINS]; u32 hpd_event_bits; - struct timer_list hotplug_reenable_timer; + struct delayed_work hotplug_reenable_work; struct i915_fbc fbc; struct i915_drrs drrs; @@ -1560,14 +1588,9 @@ struct drm_i915_private { #ifdef CONFIG_DRM_I915_FBDEV /* list of fbdev register on this device */ struct intel_fbdev *fbdev; + struct work_struct fbdev_suspend_work; #endif - /* - * The console may be contended at resume, but we don't - * want it to block on it. - */ - struct work_struct console_resume_work; - struct drm_property *broadcast_rgb_property; struct drm_property *force_audio_property; @@ -1619,6 +1642,20 @@ struct drm_i915_private { /* Old ums support infrastructure, same warning applies. */ struct i915_ums_state ums; + /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ + struct { + int (*do_execbuf)(struct drm_device *dev, struct drm_file *file, + struct intel_engine_cs *ring, + struct intel_context *ctx, + struct drm_i915_gem_execbuffer2 *args, + struct list_head *vmas, + struct drm_i915_gem_object *batch_obj, + u64 exec_start, u32 flags); + int (*init_rings)(struct drm_device *dev); + void (*cleanup_ring)(struct intel_engine_cs *ring); + void (*stop_ring)(struct intel_engine_cs *ring); + } gt; + /* * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch * will be rejected. Instead look for a better place. @@ -1760,13 +1797,6 @@ struct drm_i915_gem_object { * Only honoured if hardware has relevant pte bit */ unsigned long gt_ro:1; - - /* - * Is the GPU currently using a fence to access this buffer, - */ - unsigned int pending_fenced_gpu_access:1; - unsigned int fenced_gpu_access:1; - unsigned int cache_level:3; unsigned int has_aliasing_ppgtt_mapping:1; @@ -1970,51 +2000,63 @@ struct drm_i915_cmd_table { int count; }; -#define INTEL_INFO(dev) (&to_i915(dev)->info) - -#define IS_I830(dev) ((dev)->pdev->device == 0x3577) -#define IS_845G(dev) ((dev)->pdev->device == 0x2562) +/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */ +#define __I915__(p) ({ \ + struct drm_i915_private *__p; \ + if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \ + __p = (struct drm_i915_private *)p; \ + else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \ + __p = to_i915((struct drm_device *)p); \ + else \ + BUILD_BUG(); \ + __p; \ +}) +#define INTEL_INFO(p) (&__I915__(p)->info) +#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) + +#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) +#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) -#define IS_I865G(dev) ((dev)->pdev->device == 0x2572) +#define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572) #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) -#define IS_I915GM(dev) ((dev)->pdev->device == 0x2592) -#define IS_I945G(dev) ((dev)->pdev->device == 0x2772) +#define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592) +#define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772) #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) -#define IS_GM45(dev) ((dev)->pdev->device == 0x2A42) +#define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42) #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) -#define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001) -#define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011) +#define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001) +#define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011) #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) -#define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046) +#define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046) #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) -#define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \ - (dev)->pdev->device == 0x0152 || \ - (dev)->pdev->device == 0x015a) -#define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \ - (dev)->pdev->device == 0x0106 || \ - (dev)->pdev->device == 0x010A) +#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \ + INTEL_DEVID(dev) == 0x0152 || \ + INTEL_DEVID(dev) == 0x015a) +#define IS_SNB_GT1(dev) (INTEL_DEVID(dev) == 0x0102 || \ + INTEL_DEVID(dev) == 0x0106 || \ + INTEL_DEVID(dev) == 0x010A) #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) #define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ - ((dev)->pdev->device & 0xFF00) == 0x0C00) + (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ - (((dev)->pdev->device & 0xf) == 0x2 || \ - ((dev)->pdev->device & 0xf) == 0x6 || \ - ((dev)->pdev->device & 0xf) == 0xe)) + ((INTEL_DEVID(dev) & 0xf) == 0x2 || \ + (INTEL_DEVID(dev) & 0xf) == 0x6 || \ + (INTEL_DEVID(dev) & 0xf) == 0xe)) #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ - ((dev)->pdev->device & 0xFF00) == 0x0A00) + (INTEL_DEVID(dev) & 0xFF00) == 0x0A00) #define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ - ((dev)->pdev->device & 0x00F0) == 0x0020) + (INTEL_DEVID(dev) & 0x00F0) == 0x0020) /* ULX machines are also considered ULT. */ -#define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \ - (dev)->pdev->device == 0x0A1E) +#define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \ + INTEL_DEVID(dev) == 0x0A1E) #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) /* @@ -2046,10 +2088,11 @@ struct drm_i915_cmd_table { #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) +#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8) #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6) #define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev)) -#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false) -#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true) +#define USES_PPGTT(dev) (i915.enable_ppgtt) +#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2) #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) @@ -2133,6 +2176,7 @@ struct i915_params { int enable_rc6; int enable_fbc; int enable_ppgtt; + int enable_execlists; int enable_psr; unsigned int preliminary_hw_support; int disable_power_well; @@ -2177,8 +2221,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); - -extern void intel_console_resume(struct work_struct *work); +void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); /* i915_irq.c */ void i915_queue_hangcheck(struct drm_device *dev); @@ -2227,6 +2270,20 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +void i915_gem_execbuffer_move_to_active(struct list_head *vmas, + struct intel_engine_cs *ring); +void i915_gem_execbuffer_retire_commands(struct drm_device *dev, + struct drm_file *file, + struct intel_engine_cs *ring, + struct drm_i915_gem_object *obj); +int i915_gem_ringbuffer_submission(struct drm_device *dev, + struct drm_file *file, + struct intel_engine_cs *ring, + struct intel_context *ctx, + struct drm_i915_gem_execbuffer2 *args, + struct list_head *vmas, + struct drm_i915_gem_object *batch_obj, + u64 exec_start, u32 flags); int i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_execbuffer2(struct drm_device *dev, void *data, @@ -2379,6 +2436,7 @@ void i915_gem_reset(struct drm_device *dev); bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); int __must_check i915_gem_init(struct drm_device *dev); +int i915_gem_init_rings(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice); void i915_gem_init_swizzling(struct drm_device *dev); @@ -2449,7 +2507,7 @@ static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { } /* Some GGTT VM helpers */ -#define obj_to_ggtt(obj) \ +#define i915_obj_to_ggtt(obj) \ (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) static inline bool i915_is_ggtt(struct i915_address_space *vm) { @@ -2458,21 +2516,30 @@ static inline bool i915_is_ggtt(struct i915_address_space *vm) return vm == ggtt; } +static inline struct i915_hw_ppgtt * +i915_vm_to_ppgtt(struct i915_address_space *vm) +{ + WARN_ON(i915_is_ggtt(vm)); + + return container_of(vm, struct i915_hw_ppgtt, base); +} + + static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) { - return i915_gem_obj_bound(obj, obj_to_ggtt(obj)); + return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj)); } static inline unsigned long i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj) { - return i915_gem_obj_offset(obj, obj_to_ggtt(obj)); + return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj)); } static inline unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) { - return i915_gem_obj_size(obj, obj_to_ggtt(obj)); + return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj)); } static inline int __must_check @@ -2480,7 +2547,8 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, uint32_t alignment, unsigned flags) { - return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL); + return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj), + alignment, flags | PIN_GLOBAL); } static inline int @@ -2492,7 +2560,6 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj); /* i915_gem_context.c */ -#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base) int __must_check i915_gem_context_init(struct drm_device *dev); void i915_gem_context_fini(struct drm_device *dev); void i915_gem_context_reset(struct drm_device *dev); @@ -2504,6 +2571,8 @@ int i915_switch_context(struct intel_engine_cs *ring, struct intel_context * i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); void i915_gem_context_free(struct kref *ctx_ref); +struct drm_i915_gem_object * +i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); static inline void i915_gem_context_reference(struct intel_context *ctx) { kref_get(&ctx->ref); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index dcd8d7b42552..f1bb69377a35 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1149,16 +1149,16 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv) static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, unsigned reset_counter, bool interruptible, - struct timespec *timeout, + s64 *timeout, struct drm_i915_file_private *file_priv) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; const bool irq_test_in_progress = ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); - struct timespec before, now; DEFINE_WAIT(wait); unsigned long timeout_expire; + s64 before, now; int ret; WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); @@ -1166,7 +1166,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) return 0; - timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0; + timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0; if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) { gen6_rps_boost(dev_priv); @@ -1181,7 +1181,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, /* Record current time in case interrupted by signal, or wedged */ trace_i915_gem_request_wait_begin(ring, seqno); - getrawmonotonic(&before); + before = ktime_get_raw_ns(); for (;;) { struct timer_list timer; @@ -1230,7 +1230,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, destroy_timer_on_stack(&timer); } } - getrawmonotonic(&now); + now = ktime_get_raw_ns(); trace_i915_gem_request_wait_end(ring, seqno); if (!irq_test_in_progress) @@ -1239,10 +1239,9 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, finish_wait(&ring->irq_queue, &wait); if (timeout) { - struct timespec sleep_time = timespec_sub(now, before); - *timeout = timespec_sub(*timeout, sleep_time); - if (!timespec_valid(timeout)) /* i.e. negative time remains */ - set_normalized_timespec(timeout, 0, 0); + s64 tres = *timeout - (now - before); + + *timeout = tres < 0 ? 0 : tres; } return ret; @@ -2161,8 +2160,6 @@ static void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, struct intel_engine_cs *ring) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; u32 seqno = intel_ring_get_seqno(ring); BUG_ON(ring == NULL); @@ -2181,19 +2178,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, list_move_tail(&obj->ring_list, &ring->active_list); obj->last_read_seqno = seqno; - - if (obj->fenced_gpu_access) { - obj->last_fenced_seqno = seqno; - - /* Bump MRU to take account of the delayed flush */ - if (obj->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_fence_reg *reg; - - reg = &dev_priv->fence_regs[obj->fence_reg]; - list_move_tail(®->lru_list, - &dev_priv->mm.fence_list); - } - } } void i915_vma_move_to_active(struct i915_vma *vma, @@ -2229,7 +2213,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) obj->base.write_domain = 0; obj->last_fenced_seqno = 0; - obj->fenced_gpu_access = false; obj->active = 0; drm_gem_object_unreference(&obj->base); @@ -2327,10 +2310,21 @@ int __i915_add_request(struct intel_engine_cs *ring, { struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_gem_request *request; + struct intel_ringbuffer *ringbuf; u32 request_ring_position, request_start; int ret; - request_start = intel_ring_get_tail(ring->buffer); + request = ring->preallocated_lazy_request; + if (WARN_ON(request == NULL)) + return -ENOMEM; + + if (i915.enable_execlists) { + struct intel_context *ctx = request->ctx; + ringbuf = ctx->engine[ring->id].ringbuf; + } else + ringbuf = ring->buffer; + + request_start = intel_ring_get_tail(ringbuf); /* * Emit any outstanding flushes - execbuf can fail to emit the flush * after having emitted the batchbuffer command. Hence we need to fix @@ -2338,24 +2332,32 @@ int __i915_add_request(struct intel_engine_cs *ring, * is that the flush _must_ happen before the next request, no matter * what. */ - ret = intel_ring_flush_all_caches(ring); - if (ret) - return ret; - - request = ring->preallocated_lazy_request; - if (WARN_ON(request == NULL)) - return -ENOMEM; + if (i915.enable_execlists) { + ret = logical_ring_flush_all_caches(ringbuf); + if (ret) + return ret; + } else { + ret = intel_ring_flush_all_caches(ring); + if (ret) + return ret; + } /* Record the position of the start of the request so that * should we detect the updated seqno part-way through the * GPU processing the request, we never over-estimate the * position of the head. */ - request_ring_position = intel_ring_get_tail(ring->buffer); + request_ring_position = intel_ring_get_tail(ringbuf); - ret = ring->add_request(ring); - if (ret) - return ret; + if (i915.enable_execlists) { + ret = ring->emit_request(ringbuf); + if (ret) + return ret; + } else { + ret = ring->add_request(ring); + if (ret) + return ret; + } request->seqno = intel_ring_get_seqno(ring); request->ring = ring; @@ -2370,12 +2372,14 @@ int __i915_add_request(struct intel_engine_cs *ring, */ request->batch_obj = obj; - /* Hold a reference to the current context so that we can inspect - * it later in case a hangcheck error event fires. - */ - request->ctx = ring->last_context; - if (request->ctx) - i915_gem_context_reference(request->ctx); + if (!i915.enable_execlists) { + /* Hold a reference to the current context so that we can inspect + * it later in case a hangcheck error event fires. + */ + request->ctx = ring->last_context; + if (request->ctx) + i915_gem_context_reference(request->ctx); + } request->emitted_jiffies = jiffies; list_add_tail(&request->list, &ring->request_list); @@ -2546,6 +2550,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, i915_gem_free_request(request); } + while (!list_empty(&ring->execlist_queue)) { + struct intel_ctx_submit_request *submit_req; + + submit_req = list_first_entry(&ring->execlist_queue, + struct intel_ctx_submit_request, + execlist_link); + list_del(&submit_req->execlist_link); + intel_runtime_pm_put(dev_priv); + i915_gem_context_unreference(submit_req->ctx); + kfree(submit_req); + } + /* These may not have been flush before the reset, do so now */ kfree(ring->preallocated_lazy_request); ring->preallocated_lazy_request = NULL; @@ -2630,6 +2646,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; + struct intel_ringbuffer *ringbuf; request = list_first_entry(&ring->request_list, struct drm_i915_gem_request, @@ -2639,12 +2656,24 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) break; trace_i915_gem_request_retire(ring, request->seqno); + + /* This is one of the few common intersection points + * between legacy ringbuffer submission and execlists: + * we need to tell them apart in order to find the correct + * ringbuffer to which the request belongs to. + */ + if (i915.enable_execlists) { + struct intel_context *ctx = request->ctx; + ringbuf = ctx->engine[ring->id].ringbuf; + } else + ringbuf = ring->buffer; + /* We know the GPU must have read the request to have * sent us the seqno + interrupt, so use the position * of tail of the request to update the last known position * of the GPU head. */ - ring->buffer->last_retired_head = request->tail; + ringbuf->last_retired_head = request->tail; i915_gem_free_request(request); } @@ -2757,16 +2786,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) struct drm_i915_gem_wait *args = data; struct drm_i915_gem_object *obj; struct intel_engine_cs *ring = NULL; - struct timespec timeout_stack, *timeout = NULL; unsigned reset_counter; u32 seqno = 0; int ret = 0; - if (args->timeout_ns >= 0) { - timeout_stack = ns_to_timespec(args->timeout_ns); - timeout = &timeout_stack; - } - ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; @@ -2791,9 +2814,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) goto out; /* Do this after OLR check to make sure we make forward progress polling - * on this IOCTL with a 0 timeout (like busy ioctl) + * on this IOCTL with a timeout <=0 (like busy ioctl) */ - if (!args->timeout_ns) { + if (args->timeout_ns <= 0) { ret = -ETIME; goto out; } @@ -2802,10 +2825,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); mutex_unlock(&dev->struct_mutex); - ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv); - if (timeout) - args->timeout_ns = timespec_to_ns(timeout); - return ret; + return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns, + file->driver_priv); out: drm_gem_object_unreference(&obj->base); @@ -2928,9 +2949,8 @@ int i915_vma_unbind(struct i915_vma *vma) vma->unbind_vma(vma); list_del_init(&vma->mm_list); - /* Avoid an unnecessary call to unbind on rebind. */ if (i915_is_ggtt(vma->vm)) - obj->map_and_fenceable = true; + obj->map_and_fenceable = false; drm_mm_remove_node(&vma->node); i915_gem_vma_destroy(vma); @@ -3175,7 +3195,6 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) obj->last_fenced_seqno = 0; } - obj->fenced_gpu_access = false; return 0; } @@ -3282,6 +3301,9 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) return 0; } } else if (enable) { + if (WARN_ON(!obj->map_and_fenceable)) + return -EINVAL; + reg = i915_find_fence_reg(dev); if (IS_ERR(reg)) return PTR_ERR(reg); @@ -3592,11 +3614,12 @@ int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct i915_vma *vma = i915_gem_obj_to_ggtt(obj); uint32_t old_write_domain, old_read_domains; int ret; /* Not valid to be called on unbound objects. */ - if (!i915_gem_obj_bound_any(obj)) + if (vma == NULL) return -EINVAL; if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) @@ -3638,13 +3661,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) old_write_domain); /* And bump the LRU for this access */ - if (i915_gem_object_is_inactive(obj)) { - struct i915_vma *vma = i915_gem_obj_to_ggtt(obj); - if (vma) - list_move_tail(&vma->mm_list, - &dev_priv->gtt.base.inactive_list); - - } + if (i915_gem_object_is_inactive(obj)) + list_move_tail(&vma->mm_list, + &dev_priv->gtt.base.inactive_list); return 0; } @@ -3808,9 +3827,6 @@ static bool is_pin_display(struct drm_i915_gem_object *obj) { struct i915_vma *vma; - if (list_empty(&obj->vma_list)) - return false; - vma = i915_gem_obj_to_ggtt(obj); if (!vma) return false; @@ -4337,8 +4353,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, obj->fence_reg = I915_FENCE_REG_NONE; obj->madv = I915_MADV_WILLNEED; - /* Avoid an unnecessary call to unbind on the first bind. */ - obj->map_and_fenceable = true; i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); } @@ -4499,12 +4513,18 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, void i915_gem_vma_destroy(struct i915_vma *vma) { + struct i915_address_space *vm = NULL; WARN_ON(vma->node.allocated); /* Keep the vma as a placeholder in the execbuffer reservation lists */ if (!list_empty(&vma->exec_list)) return; + vm = vma->vm; + + if (!i915_is_ggtt(vm)) + i915_ppgtt_put(i915_vm_to_ppgtt(vm)); + list_del(&vma->vma_link); kfree(vma); @@ -4518,7 +4538,7 @@ i915_gem_stop_ringbuffers(struct drm_device *dev) int i; for_each_ring(ring, dev_priv, i) - intel_stop_ring_buffer(ring); + dev_priv->gt.stop_ring(ring); } int @@ -4554,7 +4574,7 @@ i915_gem_suspend(struct drm_device *dev) del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); cancel_delayed_work_sync(&dev_priv->mm.retire_work); - cancel_delayed_work_sync(&dev_priv->mm.idle_work); + flush_delayed_work(&dev_priv->mm.idle_work); return 0; @@ -4635,7 +4655,7 @@ intel_enable_blt(struct drm_device *dev) return true; } -static int i915_gem_init_rings(struct drm_device *dev) +int i915_gem_init_rings(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; @@ -4718,7 +4738,7 @@ i915_gem_init_hw(struct drm_device *dev) i915_gem_init_swizzling(dev); - ret = i915_gem_init_rings(dev); + ret = dev_priv->gt.init_rings(dev); if (ret) return ret; @@ -4736,6 +4756,14 @@ i915_gem_init_hw(struct drm_device *dev) if (ret && ret != -EIO) { DRM_ERROR("Context enable failed %d\n", ret); i915_gem_cleanup_ringbuffer(dev); + + return ret; + } + + ret = i915_ppgtt_init_hw(dev); + if (ret && ret != -EIO) { + DRM_ERROR("PPGTT enable failed %d\n", ret); + i915_gem_cleanup_ringbuffer(dev); } return ret; @@ -4746,6 +4774,9 @@ int i915_gem_init(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int ret; + i915.enable_execlists = intel_sanitize_enable_execlists(dev, + i915.enable_execlists); + mutex_lock(&dev->struct_mutex); if (IS_VALLEYVIEW(dev)) { @@ -4756,7 +4787,24 @@ int i915_gem_init(struct drm_device *dev) DRM_DEBUG_DRIVER("allow wake ack timed out\n"); } - i915_gem_init_userptr(dev); + if (!i915.enable_execlists) { + dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission; + dev_priv->gt.init_rings = i915_gem_init_rings; + dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer; + dev_priv->gt.stop_ring = intel_stop_ring_buffer; + } else { + dev_priv->gt.do_execbuf = intel_execlists_submission; + dev_priv->gt.init_rings = intel_logical_rings_init; + dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup; + dev_priv->gt.stop_ring = intel_logical_ring_stop; + } + + ret = i915_gem_init_userptr(dev); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return ret; + } + i915_gem_init_global_gtt(dev); ret = i915_gem_context_init(dev); @@ -4791,7 +4839,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) int i; for_each_ring(ring, dev_priv, i) - intel_cleanup_ring_buffer(ring); + dev_priv->gt.cleanup_ring(ring); } int @@ -5103,9 +5151,7 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, struct drm_i915_private *dev_priv = o->base.dev->dev_private; struct i915_vma *vma; - if (!dev_priv->mm.aliasing_ppgtt || - vm == &dev_priv->mm.aliasing_ppgtt->base) - vm = &dev_priv->gtt.base; + WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); list_for_each_entry(vma, &o->vma_list, vma_link) { if (vma->vm == vm) @@ -5146,9 +5192,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, struct drm_i915_private *dev_priv = o->base.dev->dev_private; struct i915_vma *vma; - if (!dev_priv->mm.aliasing_ppgtt || - vm == &dev_priv->mm.aliasing_ppgtt->base) - vm = &dev_priv->gtt.base; + WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); BUG_ON(list_empty(&o->vma_list)); @@ -5253,14 +5297,8 @@ struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) { struct i915_vma *vma; - /* This WARN has probably outlived its usefulness (callers already - * WARN if they don't find the GGTT vma they expect). When removing, - * remember to remove the pre-check in is_pin_display() as well */ - if (WARN_ON(list_empty(&obj->vma_list))) - return NULL; - vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link); - if (vma->vm != obj_to_ggtt(obj)) + if (vma->vm != i915_obj_to_ggtt(obj)) return NULL; return vma; diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 3b99390e467a..9683e62ec61a 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -96,50 +96,6 @@ #define GEN6_CONTEXT_ALIGN (64<<10) #define GEN7_CONTEXT_ALIGN 4096 -static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) -{ - struct drm_device *dev = ppgtt->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_address_space *vm = &ppgtt->base; - - if (ppgtt == dev_priv->mm.aliasing_ppgtt || - (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) { - ppgtt->base.cleanup(&ppgtt->base); - return; - } - - /* - * Make sure vmas are unbound before we take down the drm_mm - * - * FIXME: Proper refcounting should take care of this, this shouldn't be - * needed at all. - */ - if (!list_empty(&vm->active_list)) { - struct i915_vma *vma; - - list_for_each_entry(vma, &vm->active_list, mm_list) - if (WARN_ON(list_empty(&vma->vma_link) || - list_is_singular(&vma->vma_link))) - break; - - i915_gem_evict_vm(&ppgtt->base, true); - } else { - i915_gem_retire_requests(dev); - i915_gem_evict_vm(&ppgtt->base, false); - } - - ppgtt->base.cleanup(&ppgtt->base); -} - -static void ppgtt_release(struct kref *kref) -{ - struct i915_hw_ppgtt *ppgtt = - container_of(kref, struct i915_hw_ppgtt, ref); - - do_ppgtt_cleanup(ppgtt); - kfree(ppgtt); -} - static size_t get_context_alignment(struct drm_device *dev) { if (IS_GEN6(dev)) @@ -179,24 +135,20 @@ static int get_context_size(struct drm_device *dev) void i915_gem_context_free(struct kref *ctx_ref) { struct intel_context *ctx = container_of(ctx_ref, - typeof(*ctx), ref); - struct i915_hw_ppgtt *ppgtt = NULL; + typeof(*ctx), ref); - if (ctx->legacy_hw_ctx.rcs_state) { - /* We refcount even the aliasing PPGTT to keep the code symmetric */ - if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev)) - ppgtt = ctx_to_ppgtt(ctx); - } + if (i915.enable_execlists) + intel_lr_context_free(ctx); + + i915_ppgtt_put(ctx->ppgtt); - if (ppgtt) - kref_put(&ppgtt->ref, ppgtt_release); if (ctx->legacy_hw_ctx.rcs_state) drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); list_del(&ctx->link); kfree(ctx); } -static struct drm_i915_gem_object * +struct drm_i915_gem_object * i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) { struct drm_i915_gem_object *obj; @@ -226,29 +178,9 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) return obj; } -static struct i915_hw_ppgtt * -create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx) -{ - struct i915_hw_ppgtt *ppgtt; - int ret; - - ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); - if (!ppgtt) - return ERR_PTR(-ENOMEM); - - ret = i915_gem_init_ppgtt(dev, ppgtt); - if (ret) { - kfree(ppgtt); - return ERR_PTR(ret); - } - - ppgtt->ctx = ctx; - return ppgtt; -} - static struct intel_context * __create_hw_context(struct drm_device *dev, - struct drm_i915_file_private *file_priv) + struct drm_i915_file_private *file_priv) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_context *ctx; @@ -301,11 +233,9 @@ err_out: */ static struct intel_context * i915_gem_create_context(struct drm_device *dev, - struct drm_i915_file_private *file_priv, - bool create_vm) + struct drm_i915_file_private *file_priv) { const bool is_global_default_ctx = file_priv == NULL; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_context *ctx; int ret = 0; @@ -331,34 +261,18 @@ i915_gem_create_context(struct drm_device *dev, } } - if (create_vm) { - struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx); + if (USES_FULL_PPGTT(dev)) { + struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv); if (IS_ERR_OR_NULL(ppgtt)) { DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", PTR_ERR(ppgtt)); ret = PTR_ERR(ppgtt); goto err_unpin; - } else - ctx->vm = &ppgtt->base; - - /* This case is reserved for the global default context and - * should only happen once. */ - if (is_global_default_ctx) { - if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) { - ret = -EEXIST; - goto err_unpin; - } - - dev_priv->mm.aliasing_ppgtt = ppgtt; } - } else if (USES_PPGTT(dev)) { - /* For platforms which only have aliasing PPGTT, we fake the - * address space and refcounting. */ - ctx->vm = &dev_priv->mm.aliasing_ppgtt->base; - kref_get(&dev_priv->mm.aliasing_ppgtt->ref); - } else - ctx->vm = &dev_priv->gtt.base; + + ctx->ppgtt = ppgtt; + } return ctx; @@ -417,7 +331,11 @@ int i915_gem_context_init(struct drm_device *dev) if (WARN_ON(dev_priv->ring[RCS].default_context)) return 0; - if (HAS_HW_CONTEXTS(dev)) { + if (i915.enable_execlists) { + /* NB: intentionally left blank. We will allocate our own + * backing objects as we need them, thank you very much */ + dev_priv->hw_context_size = 0; + } else if (HAS_HW_CONTEXTS(dev)) { dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); if (dev_priv->hw_context_size > (1<<20)) { DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n", @@ -426,18 +344,23 @@ int i915_gem_context_init(struct drm_device *dev) } } - ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev)); + ctx = i915_gem_create_context(dev, NULL); if (IS_ERR(ctx)) { DRM_ERROR("Failed to create default global context (error %ld)\n", PTR_ERR(ctx)); return PTR_ERR(ctx); } - /* NB: RCS will hold a ref for all rings */ - for (i = 0; i < I915_NUM_RINGS; i++) - dev_priv->ring[i].default_context = ctx; + for (i = 0; i < I915_NUM_RINGS; i++) { + struct intel_engine_cs *ring = &dev_priv->ring[i]; - DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->hw_context_size ? "HW" : "fake"); + /* NB: RCS will hold a ref for all rings */ + ring->default_context = ctx; + } + + DRM_DEBUG_DRIVER("%s context support initialized\n", + i915.enable_execlists ? "LR" : + dev_priv->hw_context_size ? "HW" : "fake"); return 0; } @@ -489,13 +412,6 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv) struct intel_engine_cs *ring; int ret, i; - /* This is the only place the aliasing PPGTT gets enabled, which means - * it has to happen before we bail on reset */ - if (dev_priv->mm.aliasing_ppgtt) { - struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - ppgtt->enable(ppgtt); - } - /* FIXME: We should make this work, even in reset */ if (i915_reset_in_progress(&dev_priv->gpu_error)) return 0; @@ -527,7 +443,7 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) idr_init(&file_priv->context_idr); mutex_lock(&dev->struct_mutex); - ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev)); + ctx = i915_gem_create_context(dev, file_priv); mutex_unlock(&dev->struct_mutex); if (IS_ERR(ctx)) { @@ -614,7 +530,6 @@ static int do_switch(struct intel_engine_cs *ring, { struct drm_i915_private *dev_priv = ring->dev->dev_private; struct intel_context *from = ring->last_context; - struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to); u32 hw_flags = 0; bool uninitialized = false; int ret, i; @@ -642,8 +557,8 @@ static int do_switch(struct intel_engine_cs *ring, */ from = ring->last_context; - if (USES_FULL_PPGTT(ring->dev)) { - ret = ppgtt->switch_mm(ppgtt, ring, false); + if (to->ppgtt) { + ret = to->ppgtt->switch_mm(to->ppgtt, ring, false); if (ret) goto unpin_out; } @@ -766,9 +681,9 @@ int i915_switch_context(struct intel_engine_cs *ring, return do_switch(ring, to); } -static bool hw_context_enabled(struct drm_device *dev) +static bool contexts_enabled(struct drm_device *dev) { - return to_i915(dev)->hw_context_size; + return i915.enable_execlists || to_i915(dev)->hw_context_size; } int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, @@ -779,14 +694,14 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, struct intel_context *ctx; int ret; - if (!hw_context_enabled(dev)) + if (!contexts_enabled(dev)) return -ENODEV; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; - ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev)); + ctx = i915_gem_create_context(dev, file_priv); mutex_unlock(&dev->struct_mutex); if (IS_ERR(ctx)) return PTR_ERR(ctx); diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 580aa42443ed..82a1f4b57778 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -237,7 +237,8 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, return ERR_PTR(ret); } - return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags); + return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags, + NULL); } static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 2dd19da6b4b3..1a0611bb576b 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -35,6 +35,7 @@ #define __EXEC_OBJECT_HAS_PIN (1<<31) #define __EXEC_OBJECT_HAS_FENCE (1<<30) +#define __EXEC_OBJECT_NEEDS_MAP (1<<29) #define __EXEC_OBJECT_NEEDS_BIAS (1<<28) #define BATCH_OFFSET_BIAS (256*1024) @@ -94,7 +95,6 @@ eb_lookup_vmas(struct eb_vmas *eb, struct i915_address_space *vm, struct drm_file *file) { - struct drm_i915_private *dev_priv = vm->dev->dev_private; struct drm_i915_gem_object *obj; struct list_head objects; int i, ret; @@ -129,20 +129,6 @@ eb_lookup_vmas(struct eb_vmas *eb, i = 0; while (!list_empty(&objects)) { struct i915_vma *vma; - struct i915_address_space *bind_vm = vm; - - if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT && - USES_FULL_PPGTT(vm->dev)) { - ret = -EINVAL; - goto err; - } - - /* If we have secure dispatch, or the userspace assures us that - * they know what they're doing, use the GGTT VM. - */ - if (((args->flags & I915_EXEC_SECURE) && - (i == (args->buffer_count - 1)))) - bind_vm = &dev_priv->gtt.base; obj = list_first_entry(&objects, struct drm_i915_gem_object, @@ -156,7 +142,7 @@ eb_lookup_vmas(struct eb_vmas *eb, * from the (obj, vm) we don't run the risk of creating * duplicated vmas for the same vm. */ - vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm); + vma = i915_gem_obj_lookup_or_create_vma(obj, vm); if (IS_ERR(vma)) { DRM_DEBUG("Failed to lookup VMA\n"); ret = PTR_ERR(vma); @@ -307,7 +293,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint64_t delta = reloc->delta + target_offset; - uint32_t __iomem *reloc_entry; + uint64_t offset; void __iomem *reloc_page; int ret; @@ -320,25 +306,24 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, return ret; /* Map the page containing the relocation we're going to perform. */ - reloc->offset += i915_gem_obj_ggtt_offset(obj); + offset = i915_gem_obj_ggtt_offset(obj); + offset += reloc->offset; reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, - reloc->offset & PAGE_MASK); - reloc_entry = (uint32_t __iomem *) - (reloc_page + offset_in_page(reloc->offset)); - iowrite32(lower_32_bits(delta), reloc_entry); + offset & PAGE_MASK); + iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset)); if (INTEL_INFO(dev)->gen >= 8) { - reloc_entry += 1; + offset += sizeof(uint32_t); - if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) { + if (offset_in_page(offset) == 0) { io_mapping_unmap_atomic(reloc_page); - reloc_page = io_mapping_map_atomic_wc( - dev_priv->gtt.mappable, - reloc->offset + sizeof(uint32_t)); - reloc_entry = reloc_page; + reloc_page = + io_mapping_map_atomic_wc(dev_priv->gtt.mappable, + offset); } - iowrite32(upper_32_bits(delta), reloc_entry); + iowrite32(upper_32_bits(delta), + reloc_page + offset_in_page(offset)); } io_mapping_unmap_atomic(reloc_page); @@ -535,34 +520,18 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb) } static int -need_reloc_mappable(struct i915_vma *vma) -{ - struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; - return entry->relocation_count && !use_cpu_reloc(vma->obj) && - i915_is_ggtt(vma->vm); -} - -static int i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, struct intel_engine_cs *ring, bool *need_reloc) { struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; - bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; - bool need_fence; uint64_t flags; int ret; flags = 0; - - need_fence = - has_fenced_gpu_access && - entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj->tiling_mode != I915_TILING_NONE; - if (need_fence || need_reloc_mappable(vma)) + if (entry->flags & __EXEC_OBJECT_NEEDS_MAP) flags |= PIN_MAPPABLE; - if (entry->flags & EXEC_OBJECT_NEEDS_GTT) flags |= PIN_GLOBAL; if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) @@ -574,17 +543,13 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, entry->flags |= __EXEC_OBJECT_HAS_PIN; - if (has_fenced_gpu_access) { - if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { - ret = i915_gem_object_get_fence(obj); - if (ret) - return ret; - - if (i915_gem_object_pin_fence(obj)) - entry->flags |= __EXEC_OBJECT_HAS_FENCE; + if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { + ret = i915_gem_object_get_fence(obj); + if (ret) + return ret; - obj->pending_fenced_gpu_access = true; - } + if (i915_gem_object_pin_fence(obj)) + entry->flags |= __EXEC_OBJECT_HAS_FENCE; } if (entry->offset != vma->node.start) { @@ -601,26 +566,40 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, } static bool -eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access) +need_reloc_mappable(struct i915_vma *vma) { struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; - struct drm_i915_gem_object *obj = vma->obj; - bool need_fence, need_mappable; - need_fence = - has_fenced_gpu_access && - entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj->tiling_mode != I915_TILING_NONE; - need_mappable = need_fence || need_reloc_mappable(vma); + if (entry->relocation_count == 0) + return false; + + if (!i915_is_ggtt(vma->vm)) + return false; + + /* See also use_cpu_reloc() */ + if (HAS_LLC(vma->obj->base.dev)) + return false; - WARN_ON((need_mappable || need_fence) && + if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU) + return false; + + return true; +} + +static bool +eb_vma_misplaced(struct i915_vma *vma) +{ + struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; + struct drm_i915_gem_object *obj = vma->obj; + + WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !i915_is_ggtt(vma->vm)); if (entry->alignment && vma->node.start & (entry->alignment - 1)) return true; - if (need_mappable && !obj->map_and_fenceable) + if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable) return true; if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS && @@ -642,9 +621,6 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; int retry; - if (list_empty(vmas)) - return 0; - i915_gem_retire_requests_ring(ring); vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; @@ -658,20 +634,21 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, obj = vma->obj; entry = vma->exec_entry; + if (!has_fenced_gpu_access) + entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; need_fence = - has_fenced_gpu_access && entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; need_mappable = need_fence || need_reloc_mappable(vma); - if (need_mappable) + if (need_mappable) { + entry->flags |= __EXEC_OBJECT_NEEDS_MAP; list_move(&vma->exec_list, &ordered_vmas); - else + } else list_move_tail(&vma->exec_list, &ordered_vmas); obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; obj->base.pending_write_domain = 0; - obj->pending_fenced_gpu_access = false; } list_splice(&ordered_vmas, vmas); @@ -696,7 +673,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, if (!drm_mm_node_allocated(&vma->node)) continue; - if (eb_vma_misplaced(vma, has_fenced_gpu_access)) + if (eb_vma_misplaced(vma)) ret = i915_vma_unbind(vma); else ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); @@ -744,9 +721,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, int i, total, ret; unsigned count = args->buffer_count; - if (WARN_ON(list_empty(&eb->vmas))) - return 0; - vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm; /* We may process another execbuffer during the unlock... */ @@ -890,18 +864,24 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) } static int -validate_exec_list(struct drm_i915_gem_exec_object2 *exec, +validate_exec_list(struct drm_device *dev, + struct drm_i915_gem_exec_object2 *exec, int count) { - int i; unsigned relocs_total = 0; unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry); + unsigned invalid_flags; + int i; + + invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; + if (USES_FULL_PPGTT(dev)) + invalid_flags |= EXEC_OBJECT_NEEDS_GTT; for (i = 0; i < count; i++) { char __user *ptr = to_user_ptr(exec[i].relocs_ptr); int length; /* limited by fault_in_pages_readable() */ - if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) + if (exec[i].flags & invalid_flags) return -EINVAL; /* First check for malicious input causing overflow in @@ -951,16 +931,26 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, return ERR_PTR(-EIO); } + if (i915.enable_execlists && !ctx->engine[ring->id].state) { + int ret = intel_lr_context_deferred_create(ctx, ring); + if (ret) { + DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret); + return ERR_PTR(ret); + } + } + return ctx; } -static void +void i915_gem_execbuffer_move_to_active(struct list_head *vmas, struct intel_engine_cs *ring) { + u32 seqno = intel_ring_get_seqno(ring); struct i915_vma *vma; list_for_each_entry(vma, vmas, exec_list) { + struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; struct drm_i915_gem_object *obj = vma->obj; u32 old_read = obj->base.read_domains; u32 old_write = obj->base.write_domain; @@ -969,24 +959,31 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, if (obj->base.write_domain == 0) obj->base.pending_read_domains |= obj->base.read_domains; obj->base.read_domains = obj->base.pending_read_domains; - obj->fenced_gpu_access = obj->pending_fenced_gpu_access; i915_vma_move_to_active(vma, ring); if (obj->base.write_domain) { obj->dirty = 1; - obj->last_write_seqno = intel_ring_get_seqno(ring); + obj->last_write_seqno = seqno; intel_fb_obj_invalidate(obj, ring); /* update for the implicit flush after a batch */ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; } + if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { + obj->last_fenced_seqno = seqno; + if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { + struct drm_i915_private *dev_priv = to_i915(ring->dev); + list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, + &dev_priv->mm.fence_list); + } + } trace_i915_gem_object_change_domain(obj, old_read, old_write); } } -static void +void i915_gem_execbuffer_retire_commands(struct drm_device *dev, struct drm_file *file, struct intel_engine_cs *ring, @@ -1026,14 +1023,14 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, return 0; } -static int -legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, - struct intel_engine_cs *ring, - struct intel_context *ctx, - struct drm_i915_gem_execbuffer2 *args, - struct list_head *vmas, - struct drm_i915_gem_object *batch_obj, - u64 exec_start, u32 flags) +int +i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, + struct intel_engine_cs *ring, + struct intel_context *ctx, + struct drm_i915_gem_execbuffer2 *args, + struct list_head *vmas, + struct drm_i915_gem_object *batch_obj, + u64 exec_start, u32 flags) { struct drm_clip_rect *cliprects = NULL; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1254,13 +1251,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (!i915_gem_check_execbuffer(args)) return -EINVAL; - ret = validate_exec_list(exec, args->buffer_count); + ret = validate_exec_list(dev, exec, args->buffer_count); if (ret) return ret; flags = 0; if (args->flags & I915_EXEC_SECURE) { - if (!drm_is_master(file) || !capable(CAP_SYS_ADMIN)) + if (!file->is_master || !capable(CAP_SYS_ADMIN)) return -EPERM; flags |= I915_DISPATCH_SECURE; @@ -1318,8 +1315,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, i915_gem_context_reference(ctx); - vm = ctx->vm; - if (!USES_FULL_PPGTT(dev)) + if (ctx->ppgtt) + vm = &ctx->ppgtt->base; + else vm = &dev_priv->gtt.base; eb = eb_create(args); @@ -1369,7 +1367,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ret = i915_parse_cmds(ring, batch_obj, args->batch_start_offset, - drm_is_master(file)); + file->is_master); if (ret) goto err; @@ -1386,25 +1384,36 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure * batch" bit. Hence we need to pin secure batches into the global gtt. * hsw should have this fixed, but bdw mucks it up again. */ - if (flags & I915_DISPATCH_SECURE && - !batch_obj->has_global_gtt_mapping) { - /* When we have multiple VMs, we'll need to make sure that we - * allocate space first */ - struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj); - BUG_ON(!vma); - vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND); - } + if (flags & I915_DISPATCH_SECURE) { + /* + * So on first glance it looks freaky that we pin the batch here + * outside of the reservation loop. But: + * - The batch is already pinned into the relevant ppgtt, so we + * already have the backing storage fully allocated. + * - No other BO uses the global gtt (well contexts, but meh), + * so we don't really have issues with mutliple objects not + * fitting due to fragmentation. + * So this is actually safe. + */ + ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0); + if (ret) + goto err; - if (flags & I915_DISPATCH_SECURE) exec_start += i915_gem_obj_ggtt_offset(batch_obj); - else + } else exec_start += i915_gem_obj_offset(batch_obj, vm); - ret = legacy_ringbuffer_submission(dev, file, ring, ctx, - args, &eb->vmas, batch_obj, exec_start, flags); - if (ret) - goto err; + ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args, + &eb->vmas, batch_obj, exec_start, flags); + /* + * FIXME: We crucially rely upon the active tracking for the (ppgtt) + * batch vma for correctness. For less ugly and less fragility this + * needs to be adjusted to also track the ggtt batch vma properly as + * active. + */ + if (flags & I915_DISPATCH_SECURE) + i915_gem_object_ggtt_unpin(batch_obj); err: /* the request owns the ref now */ i915_gem_context_unreference(ctx); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 5188936bca0a..4db237065610 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -33,17 +33,6 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv); static void chv_setup_private_ppat(struct drm_i915_private *dev_priv); -bool intel_enable_ppgtt(struct drm_device *dev, bool full) -{ - if (i915.enable_ppgtt == 0) - return false; - - if (i915.enable_ppgtt == 1 && full) - return false; - - return true; -} - static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) { if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) @@ -78,7 +67,6 @@ static void ppgtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags); static void ppgtt_unbind_vma(struct i915_vma *vma); -static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt); static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, enum i915_cache_level level, @@ -403,9 +391,6 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); - list_del(&vm->global_link); - drm_mm_takedown(&vm->mm); - gen8_ppgtt_unmap_pages(ppgtt); gen8_ppgtt_free(ppgtt); } @@ -615,7 +600,6 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) kunmap_atomic(pd_vaddr); } - ppgtt->enable = gen8_ppgtt_enable; ppgtt->switch_mm = gen8_mm_switch; ppgtt->base.clear_range = gen8_ppgtt_clear_range; ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; @@ -836,39 +820,26 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, return 0; } -static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) +static void gen8_ppgtt_enable(struct drm_device *dev) { - struct drm_device *dev = ppgtt->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; - int j, ret; + int j; + + /* In the case of execlists, PPGTT is enabled by the context descriptor + * and the PDPs are contained within the context itself. We don't + * need to do anything here. */ + if (i915.enable_execlists) + return; for_each_ring(ring, dev_priv, j) { I915_WRITE(RING_MODE_GEN7(ring), _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); - - /* We promise to do a switch later with FULL PPGTT. If this is - * aliasing, this is the one and only switch we'll do */ - if (USES_FULL_PPGTT(dev)) - continue; - - ret = ppgtt->switch_mm(ppgtt, ring, true); - if (ret) - goto err_out; } - - return 0; - -err_out: - for_each_ring(ring, dev_priv, j) - I915_WRITE(RING_MODE_GEN7(ring), - _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE)); - return ret; } -static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) +static void gen7_ppgtt_enable(struct drm_device *dev) { - struct drm_device *dev = ppgtt->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; uint32_t ecochk, ecobits; @@ -887,31 +858,16 @@ static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) I915_WRITE(GAM_ECOCHK, ecochk); for_each_ring(ring, dev_priv, i) { - int ret; /* GFX_MODE is per-ring on gen7+ */ I915_WRITE(RING_MODE_GEN7(ring), _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); - - /* We promise to do a switch later with FULL PPGTT. If this is - * aliasing, this is the one and only switch we'll do */ - if (USES_FULL_PPGTT(dev)) - continue; - - ret = ppgtt->switch_mm(ppgtt, ring, true); - if (ret) - return ret; } - - return 0; } -static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) +static void gen6_ppgtt_enable(struct drm_device *dev) { - struct drm_device *dev = ppgtt->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; uint32_t ecochk, gab_ctl, ecobits; - int i; ecobits = I915_READ(GAC_ECO_BITS); I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | @@ -924,14 +880,6 @@ static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); - - for_each_ring(ring, dev_priv, i) { - int ret = ppgtt->switch_mm(ppgtt, ring, true); - if (ret) - return ret; - } - - return 0; } /* PPGTT support for Sandybdrige/Gen6 and later */ @@ -1029,8 +977,6 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); - list_del(&vm->global_link); - drm_mm_takedown(&ppgtt->base.mm); drm_mm_remove_node(&ppgtt->node); gen6_ppgtt_unmap_pages(ppgtt); @@ -1151,13 +1097,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; if (IS_GEN6(dev)) { - ppgtt->enable = gen6_ppgtt_enable; ppgtt->switch_mm = gen6_mm_switch; } else if (IS_HASWELL(dev)) { - ppgtt->enable = gen7_ppgtt_enable; ppgtt->switch_mm = hsw_mm_switch; } else if (IS_GEN7(dev)) { - ppgtt->enable = gen7_ppgtt_enable; ppgtt->switch_mm = gen7_mm_switch; } else BUG(); @@ -1188,39 +1131,108 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ppgtt->node.size >> 20, ppgtt->node.start / PAGE_SIZE); + gen6_write_pdes(ppgtt); + DRM_DEBUG("Adding PPGTT at offset %x\n", + ppgtt->pd_offset << 10); + return 0; } -int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) +static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) { struct drm_i915_private *dev_priv = dev->dev_private; - int ret = 0; ppgtt->base.dev = dev; ppgtt->base.scratch = dev_priv->gtt.base.scratch; if (INTEL_INFO(dev)->gen < 8) - ret = gen6_ppgtt_init(ppgtt); + return gen6_ppgtt_init(ppgtt); else if (IS_GEN8(dev)) - ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); + return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); else BUG(); +} +int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret = 0; - if (!ret) { - struct drm_i915_private *dev_priv = dev->dev_private; + ret = __hw_ppgtt_init(dev, ppgtt); + if (ret == 0) { kref_init(&ppgtt->ref); drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, ppgtt->base.total); i915_init_vm(dev_priv, &ppgtt->base); - if (INTEL_INFO(dev)->gen < 8) { - gen6_write_pdes(ppgtt); - DRM_DEBUG("Adding PPGTT at offset %x\n", - ppgtt->pd_offset << 10); + } + + return ret; +} + +int i915_ppgtt_init_hw(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_engine_cs *ring; + struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; + int i, ret = 0; + + if (!USES_PPGTT(dev)) + return 0; + + if (IS_GEN6(dev)) + gen6_ppgtt_enable(dev); + else if (IS_GEN7(dev)) + gen7_ppgtt_enable(dev); + else if (INTEL_INFO(dev)->gen >= 8) + gen8_ppgtt_enable(dev); + else + WARN_ON(1); + + if (ppgtt) { + for_each_ring(ring, dev_priv, i) { + ret = ppgtt->switch_mm(ppgtt, ring, true); + if (ret != 0) + return ret; } } return ret; } +struct i915_hw_ppgtt * +i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) +{ + struct i915_hw_ppgtt *ppgtt; + int ret; + + ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); + if (!ppgtt) + return ERR_PTR(-ENOMEM); + + ret = i915_ppgtt_init(dev, ppgtt); + if (ret) { + kfree(ppgtt); + return ERR_PTR(ret); + } + + ppgtt->file_priv = fpriv; + + return ppgtt; +} + +void i915_ppgtt_release(struct kref *kref) +{ + struct i915_hw_ppgtt *ppgtt = + container_of(kref, struct i915_hw_ppgtt, ref); + + /* vmas should already be unbound */ + WARN_ON(!list_empty(&ppgtt->base.active_list)); + WARN_ON(!list_empty(&ppgtt->base.inactive_list)); + + list_del(&ppgtt->base.global_link); + drm_mm_takedown(&ppgtt->base.mm); + + ppgtt->base.cleanup(&ppgtt->base); + kfree(ppgtt); +} static void ppgtt_bind_vma(struct i915_vma *vma, @@ -1415,7 +1427,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; int i = 0; struct sg_page_iter sg_iter; - dma_addr_t addr = 0; + dma_addr_t addr = 0; /* shut up gcc */ for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { addr = sg_dma_address(sg_iter.sg) + @@ -1461,7 +1473,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; int i = 0; struct sg_page_iter sg_iter; - dma_addr_t addr; + dma_addr_t addr = 0; for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { addr = sg_page_iter_dma_address(&sg_iter); @@ -1475,9 +1487,10 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, * of NUMA access patterns. Therefore, even with the way we assume * hardware should work, we must keep this posting read for paranoia. */ - if (i != 0) - WARN_ON(readl(>t_entries[i-1]) != - vm->pte_encode(addr, level, true, flags)); + if (i != 0) { + unsigned long gtt = readl(>t_entries[i-1]); + WARN_ON(gtt != vm->pte_encode(addr, level, true, flags)); + } /* This next bit makes the above posting read even more important. We * want to flush the TLBs only after we're certain all the PTE updates @@ -1674,10 +1687,10 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node, } } -void i915_gem_setup_global_gtt(struct drm_device *dev, - unsigned long start, - unsigned long mappable_end, - unsigned long end) +int i915_gem_setup_global_gtt(struct drm_device *dev, + unsigned long start, + unsigned long mappable_end, + unsigned long end) { /* Let GEM Manage all of the aperture. * @@ -1693,6 +1706,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, struct drm_mm_node *entry; struct drm_i915_gem_object *obj; unsigned long hole_start, hole_end; + int ret; BUG_ON(mappable_end > end); @@ -1704,14 +1718,16 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, /* Mark any preallocated objects as occupied */ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); - int ret; + DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", i915_gem_obj_ggtt_offset(obj), obj->base.size); WARN_ON(i915_gem_obj_ggtt_bound(obj)); ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node); - if (ret) - DRM_DEBUG_KMS("Reservation failed\n"); + if (ret) { + DRM_DEBUG_KMS("Reservation failed: %i\n", ret); + return ret; + } obj->has_global_gtt_mapping = 1; } @@ -1728,6 +1744,22 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, /* And finally clear the reserved guard page */ ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true); + + if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) { + struct i915_hw_ppgtt *ppgtt; + + ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); + if (!ppgtt) + return -ENOMEM; + + ret = __hw_ppgtt_init(dev, ppgtt); + if (ret != 0) + return ret; + + dev_priv->mm.aliasing_ppgtt = ppgtt; + } + + return 0; } void i915_gem_init_global_gtt(struct drm_device *dev) @@ -1741,6 +1773,25 @@ void i915_gem_init_global_gtt(struct drm_device *dev) i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); } +void i915_global_gtt_cleanup(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_address_space *vm = &dev_priv->gtt.base; + + if (dev_priv->mm.aliasing_ppgtt) { + struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; + + ppgtt->base.cleanup(&ppgtt->base); + } + + if (drm_mm_initialized(&vm->mm)) { + drm_mm_takedown(&vm->mm); + list_del(&vm->global_link); + } + + vm->cleanup(vm); +} + static int setup_scratch_page(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2009,10 +2060,6 @@ static void gen6_gmch_remove(struct i915_address_space *vm) struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); - if (drm_mm_initialized(&vm->mm)) { - drm_mm_takedown(&vm->mm); - list_del(&vm->global_link); - } iounmap(gtt->gsm); teardown_scratch_page(vm->dev); } @@ -2045,10 +2092,6 @@ static int i915_gmch_probe(struct drm_device *dev, static void i915_gmch_remove(struct i915_address_space *vm) { - if (drm_mm_initialized(&vm->mm)) { - drm_mm_takedown(&vm->mm); - list_del(&vm->global_link); - } intel_gmch_remove(); } @@ -2163,5 +2206,8 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, if (!vma) vma = __i915_gem_vma_create(obj, vm); + if (!i915_is_ggtt(vm)) + i915_ppgtt_get(i915_vm_to_ppgtt(vm)); + return vma; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 8d6f7c18c404..6280648d4805 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -34,6 +34,8 @@ #ifndef __I915_GEM_GTT_H__ #define __I915_GEM_GTT_H__ +struct drm_i915_file_private; + typedef uint32_t gen6_gtt_pte_t; typedef uint64_t gen8_gtt_pte_t; typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; @@ -258,7 +260,7 @@ struct i915_hw_ppgtt { dma_addr_t *gen8_pt_dma_addr[4]; }; - struct intel_context *ctx; + struct drm_i915_file_private *file_priv; int (*enable)(struct i915_hw_ppgtt *ppgtt); int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, @@ -269,11 +271,26 @@ struct i915_hw_ppgtt { int i915_gem_gtt_init(struct drm_device *dev); void i915_gem_init_global_gtt(struct drm_device *dev); -void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, - unsigned long mappable_end, unsigned long end); - -bool intel_enable_ppgtt(struct drm_device *dev, bool full); -int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt); +int i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, + unsigned long mappable_end, unsigned long end); +void i915_global_gtt_cleanup(struct drm_device *dev); + + +int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt); +int i915_ppgtt_init_hw(struct drm_device *dev); +void i915_ppgtt_release(struct kref *kref); +struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev, + struct drm_i915_file_private *fpriv); +static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt) +{ + if (ppgtt) + kref_get(&ppgtt->ref); +} +static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) +{ + if (ppgtt) + kref_put(&ppgtt->ref, i915_ppgtt_release); +} void i915_check_and_clear_faults(struct drm_device *dev); void i915_gem_suspend_gtt_mappings(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index cb150e8b4336..7e623bf097a1 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -376,7 +376,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, if (ret == 0) { obj->fence_dirty = - obj->fenced_gpu_access || + obj->last_fenced_seqno || obj->fence_reg != I915_FENCE_REG_NONE; obj->tiling_mode = args->tiling_mode; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 0b3f69439451..35e70d5d6282 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -192,10 +192,10 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, struct drm_i915_error_buffer *err, int count) { - err_printf(m, "%s [%d]:\n", name, count); + err_printf(m, " %s [%d]:\n", name, count); while (count--) { - err_printf(m, " %08x %8u %02x %02x %x %x", + err_printf(m, " %08x %8u %02x %02x %x %x", err->gtt_offset, err->size, err->read_domains, @@ -229,6 +229,8 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a) return "wait"; case HANGCHECK_ACTIVE: return "active"; + case HANGCHECK_ACTIVE_LOOP: + return "active (loop)"; case HANGCHECK_KICK: return "kick"; case HANGCHECK_HUNG: @@ -359,6 +361,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device); err_printf(m, "EIR: 0x%08x\n", error->eir); err_printf(m, "IER: 0x%08x\n", error->ier); + if (INTEL_INFO(dev)->gen >= 8) { + for (i = 0; i < 4; i++) + err_printf(m, "GTIER gt %d: 0x%08x\n", i, + error->gtier[i]); + } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev)) + err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]); err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); @@ -385,15 +393,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, i915_ring_error_state(m, dev, &error->ring[i]); } - if (error->active_bo) + for (i = 0; i < error->vm_count; i++) { + err_printf(m, "vm[%d]\n", i); + print_error_buffers(m, "Active", - error->active_bo[0], - error->active_bo_count[0]); + error->active_bo[i], + error->active_bo_count[i]); - if (error->pinned_bo) print_error_buffers(m, "Pinned", - error->pinned_bo[0], - error->pinned_bo_count[0]); + error->pinned_bo[i], + error->pinned_bo_count[i]); + } for (i = 0; i < ARRAY_SIZE(error->ring); i++) { obj = error->ring[i].batchbuffer; @@ -636,13 +646,15 @@ unwind: (src)->base.size>>PAGE_SHIFT) static void capture_bo(struct drm_i915_error_buffer *err, - struct drm_i915_gem_object *obj) + struct i915_vma *vma) { + struct drm_i915_gem_object *obj = vma->obj; + err->size = obj->base.size; err->name = obj->base.name; err->rseqno = obj->last_read_seqno; err->wseqno = obj->last_write_seqno; - err->gtt_offset = i915_gem_obj_ggtt_offset(obj); + err->gtt_offset = vma->node.start; err->read_domains = obj->base.read_domains; err->write_domain = obj->base.write_domain; err->fence_reg = obj->fence_reg; @@ -666,7 +678,7 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err, int i = 0; list_for_each_entry(vma, head, mm_list) { - capture_bo(err++, vma->obj); + capture_bo(err++, vma); if (++i == count) break; } @@ -675,21 +687,27 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err, } static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, - int count, struct list_head *head) + int count, struct list_head *head, + struct i915_address_space *vm) { struct drm_i915_gem_object *obj; - int i = 0; + struct drm_i915_error_buffer * const first = err; + struct drm_i915_error_buffer * const last = err + count; list_for_each_entry(obj, head, global_list) { - if (!i915_gem_obj_is_pinned(obj)) - continue; + struct i915_vma *vma; - capture_bo(err++, obj); - if (++i == count) + if (err == last) break; + + list_for_each_entry(vma, &obj->vma_list, vma_link) + if (vma->vm == vm && vma->pin_count > 0) { + capture_bo(err++, vma); + break; + } } - return i; + return err - first; } /* Generate a semi-unique error code. The code is not meant to have meaning, The @@ -784,7 +802,8 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv, if (ring == to) continue; - signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & PAGE_MASK) / 4; + signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1)) + / 4; tmp = error->semaphore_obj->pages[0]; idx = intel_ring_sync_index(ring, to); @@ -958,6 +977,12 @@ static void i915_gem_record_rings(struct drm_device *dev, request = i915_gem_find_active_request(ring); if (request) { + struct i915_address_space *vm; + + vm = request->ctx && request->ctx->ppgtt ? + &request->ctx->ppgtt->base : + &dev_priv->gtt.base; + /* We need to copy these to an anonymous buffer * as the simplest method to avoid being overwritten * by userspace. @@ -965,9 +990,7 @@ static void i915_gem_record_rings(struct drm_device *dev, error->ring[i].batchbuffer = i915_error_object_create(dev_priv, request->batch_obj, - request->ctx ? - request->ctx->vm : - &dev_priv->gtt.base); + vm); if (HAS_BROKEN_CS_TLB(dev_priv->dev) && ring->scratch.obj) @@ -1040,9 +1063,14 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, list_for_each_entry(vma, &vm->active_list, mm_list) i++; error->active_bo_count[ndx] = i; - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) - if (i915_gem_obj_is_pinned(obj)) - i++; + + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + list_for_each_entry(vma, &obj->vma_list, vma_link) + if (vma->vm == vm && vma->pin_count > 0) { + i++; + break; + } + } error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; if (i) { @@ -1061,7 +1089,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, error->pinned_bo_count[ndx] = capture_pinned_bo(pinned_bo, error->pinned_bo_count[ndx], - &dev_priv->mm.bound_list); + &dev_priv->mm.bound_list, vm); error->active_bo[ndx] = active_bo; error->pinned_bo[ndx] = pinned_bo; } @@ -1082,8 +1110,25 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count), GFP_ATOMIC); - list_for_each_entry(vm, &dev_priv->vm_list, global_link) - i915_gem_capture_vm(dev_priv, error, vm, i++); + if (error->active_bo == NULL || + error->pinned_bo == NULL || + error->active_bo_count == NULL || + error->pinned_bo_count == NULL) { + kfree(error->active_bo); + kfree(error->active_bo_count); + kfree(error->pinned_bo); + kfree(error->pinned_bo_count); + + error->active_bo = NULL; + error->active_bo_count = NULL; + error->pinned_bo = NULL; + error->pinned_bo_count = NULL; + } else { + list_for_each_entry(vm, &dev_priv->vm_list, global_link) + i915_gem_capture_vm(dev_priv, error, vm, i++); + + error->vm_count = cnt; + } } /* Capture all registers which don't fit into another category. */ @@ -1091,6 +1136,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) { struct drm_device *dev = dev_priv->dev; + int i; /* General organization * 1. Registers specific to a single generation @@ -1102,7 +1148,8 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, /* 1: Registers specific to a single generation */ if (IS_VALLEYVIEW(dev)) { - error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); + error->gtier[0] = I915_READ(GTIER); + error->ier = I915_READ(VLV_IER); error->forcewake = I915_READ(FORCEWAKE_VLV); } @@ -1135,16 +1182,18 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, if (HAS_HW_CONTEXTS(dev)) error->ccid = I915_READ(CCID); - if (HAS_PCH_SPLIT(dev)) - error->ier = I915_READ(DEIER) | I915_READ(GTIER); - else { - if (IS_GEN2(dev)) - error->ier = I915_READ16(IER); - else - error->ier = I915_READ(IER); + if (INTEL_INFO(dev)->gen >= 8) { + error->ier = I915_READ(GEN8_DE_MISC_IER); + for (i = 0; i < 4; i++) + error->gtier[i] = I915_READ(GEN8_GT_IER(i)); + } else if (HAS_PCH_SPLIT(dev)) { + error->ier = I915_READ(DEIER); + error->gtier[0] = I915_READ(GTIER); + } else if (IS_GEN2(dev)) { + error->ier = I915_READ16(IER); + } else if (!IS_VALLEYVIEW(dev)) { + error->ier = I915_READ(IER); } - - /* 4: Everything else */ error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 20dd9e233fc6..7391697c25e7 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -151,7 +151,7 @@ ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) { assert_spin_locked(&dev_priv->irq_lock); - if (!intel_irqs_enabled(dev_priv)) + if (WARN_ON(!intel_irqs_enabled(dev_priv))) return; if ((dev_priv->irq_mask & mask) != mask) { @@ -1189,8 +1189,8 @@ static void i915_hotplug_work_func(struct work_struct *work) * some connectors */ if (hpd_disabled) { drm_kms_helper_poll_enable(dev); - mod_timer(&dev_priv->hotplug_reenable_timer, - jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); + mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work, + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); } spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -1213,11 +1213,6 @@ static void i915_hotplug_work_func(struct work_struct *work) drm_kms_helper_hotplug_event(dev); } -static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv) -{ - del_timer_sync(&dev_priv->hotplug_reenable_timer); -} - static void ironlake_rps_change_irq_handler(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1327,10 +1322,10 @@ static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, * @dev_priv: DRM device private * */ -static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) +static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) { u32 residency_C0_up = 0, residency_C0_down = 0; - u8 new_delay, adj; + int new_delay, adj; dev_priv->rps.ei_interrupt_count++; @@ -1632,6 +1627,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, struct drm_i915_private *dev_priv, u32 master_ctl) { + struct intel_engine_cs *ring; u32 rcs, bcs, vcs; uint32_t tmp = 0; irqreturn_t ret = IRQ_NONE; @@ -1641,12 +1637,20 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (tmp) { I915_WRITE(GEN8_GT_IIR(0), tmp); ret = IRQ_HANDLED; + rcs = tmp >> GEN8_RCS_IRQ_SHIFT; - bcs = tmp >> GEN8_BCS_IRQ_SHIFT; + ring = &dev_priv->ring[RCS]; if (rcs & GT_RENDER_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[RCS]); + notify_ring(dev, ring); + if (rcs & GT_CONTEXT_SWITCH_INTERRUPT) + intel_execlists_handle_ctx_events(ring); + + bcs = tmp >> GEN8_BCS_IRQ_SHIFT; + ring = &dev_priv->ring[BCS]; if (bcs & GT_RENDER_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[BCS]); + notify_ring(dev, ring); + if (bcs & GT_CONTEXT_SWITCH_INTERRUPT) + intel_execlists_handle_ctx_events(ring); } else DRM_ERROR("The master control interrupt lied (GT0)!\n"); } @@ -1656,12 +1660,20 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (tmp) { I915_WRITE(GEN8_GT_IIR(1), tmp); ret = IRQ_HANDLED; + vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; + ring = &dev_priv->ring[VCS]; if (vcs & GT_RENDER_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[VCS]); + notify_ring(dev, ring); + if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) + intel_execlists_handle_ctx_events(ring); + vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; + ring = &dev_priv->ring[VCS2]; if (vcs & GT_RENDER_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[VCS2]); + notify_ring(dev, ring); + if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) + intel_execlists_handle_ctx_events(ring); } else DRM_ERROR("The master control interrupt lied (GT1)!\n"); } @@ -1682,9 +1694,13 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (tmp) { I915_WRITE(GEN8_GT_IIR(3), tmp); ret = IRQ_HANDLED; + vcs = tmp >> GEN8_VECS_IRQ_SHIFT; + ring = &dev_priv->ring[VECS]; if (vcs & GT_RENDER_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[VECS]); + notify_ring(dev, ring); + if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) + intel_execlists_handle_ctx_events(ring); } else DRM_ERROR("The master control interrupt lied (GT3)!\n"); } @@ -1777,7 +1793,9 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; } - DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd); + DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", + port_name(port), + long_hpd ? "long" : "short"); /* for long HPD pulses we want to have the digital queue happen, but we still want HPD storm detection to function. */ if (long_hpd) { @@ -1989,14 +2007,9 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) { - struct intel_crtc *crtc; - if (!drm_handle_vblank(dev, pipe)) return false; - crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); - wake_up(&crtc->vbl_wait); - return true; } @@ -3189,8 +3202,14 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd) struct drm_i915_private *dev_priv = dev->dev_private; u32 tmp; - if (ring->hangcheck.acthd != acthd) - return HANGCHECK_ACTIVE; + if (acthd != ring->hangcheck.acthd) { + if (acthd > ring->hangcheck.max_acthd) { + ring->hangcheck.max_acthd = acthd; + return HANGCHECK_ACTIVE; + } + + return HANGCHECK_ACTIVE_LOOP; + } if (IS_GEN2(dev)) return HANGCHECK_HUNG; @@ -3301,8 +3320,9 @@ static void i915_hangcheck_elapsed(unsigned long data) switch (ring->hangcheck.action) { case HANGCHECK_IDLE: case HANGCHECK_WAIT: - break; case HANGCHECK_ACTIVE: + break; + case HANGCHECK_ACTIVE_LOOP: ring->hangcheck.score += BUSY; break; case HANGCHECK_KICK: @@ -3322,6 +3342,8 @@ static void i915_hangcheck_elapsed(unsigned long data) */ if (ring->hangcheck.score > 0) ring->hangcheck.score--; + + ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; } ring->hangcheck.seqno = seqno; @@ -3518,18 +3540,17 @@ static void cherryview_irq_preinstall(struct drm_device *dev) static void ibx_hpd_irq_setup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *intel_encoder; u32 hotplug_irqs, hotplug, enabled_irqs = 0; if (HAS_PCH_IBX(dev)) { hotplug_irqs = SDE_HOTPLUG_MASK; - list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) + for_each_intel_encoder(dev, intel_encoder) if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; } else { hotplug_irqs = SDE_HOTPLUG_MASK_CPT; - list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) + for_each_intel_encoder(dev, intel_encoder) if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; } @@ -3783,12 +3804,17 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) /* These are interrupts we'll toggle with the ring mask register */ uint32_t gt_interrupts[] = { GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT | - GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT, + GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | - GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | + GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 0, - GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT + GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT }; for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) @@ -3883,8 +3909,6 @@ static void gen8_irq_uninstall(struct drm_device *dev) if (!dev_priv) return; - intel_hpd_irq_uninstall(dev_priv); - gen8_irq_reset(dev); } @@ -3899,8 +3923,6 @@ static void valleyview_irq_uninstall(struct drm_device *dev) I915_WRITE(VLV_MASTER_IER, 0); - intel_hpd_irq_uninstall(dev_priv); - for_each_pipe(pipe) I915_WRITE(PIPESTAT(pipe), 0xffff); @@ -3979,8 +4001,6 @@ static void ironlake_irq_uninstall(struct drm_device *dev) if (!dev_priv) return; - intel_hpd_irq_uninstall(dev_priv); - ironlake_irq_reset(dev); } @@ -4351,8 +4371,6 @@ static void i915_irq_uninstall(struct drm_device * dev) struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - intel_hpd_irq_uninstall(dev_priv); - if (I915_HAS_HOTPLUG(dev)) { I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); @@ -4448,7 +4466,6 @@ static int i965_irq_postinstall(struct drm_device *dev) static void i915_hpd_irq_setup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *intel_encoder; u32 hotplug_en; @@ -4459,7 +4476,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev) hotplug_en &= ~HOTPLUG_INT_EN_MASK; /* Note HDMI and DP share hotplug bits */ /* enable bits are the same for all generations */ - list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) + for_each_intel_encoder(dev, intel_encoder) if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; /* Programming the CRT detection parameters tends @@ -4589,8 +4606,6 @@ static void i965_irq_uninstall(struct drm_device * dev) if (!dev_priv) return; - intel_hpd_irq_uninstall(dev_priv); - I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); @@ -4606,14 +4621,18 @@ static void i965_irq_uninstall(struct drm_device * dev) I915_WRITE(IIR, I915_READ(IIR)); } -static void intel_hpd_irq_reenable(unsigned long data) +static void intel_hpd_irq_reenable(struct work_struct *work) { - struct drm_i915_private *dev_priv = (struct drm_i915_private *)data; + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), + hotplug_reenable_work.work); struct drm_device *dev = dev_priv->dev; struct drm_mode_config *mode_config = &dev->mode_config; unsigned long irqflags; int i; + intel_runtime_pm_get(dev_priv); + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { struct drm_connector *connector; @@ -4639,6 +4658,8 @@ static void intel_hpd_irq_reenable(unsigned long data) if (dev_priv->display.hpd_irq_setup) dev_priv->display.hpd_irq_setup(dev); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + intel_runtime_pm_put(dev_priv); } void intel_irq_init(struct drm_device *dev) @@ -4661,8 +4682,8 @@ void intel_irq_init(struct drm_device *dev) setup_timer(&dev_priv->gpu_error.hangcheck_timer, i915_hangcheck_elapsed, (unsigned long) dev); - setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable, - (unsigned long) dev_priv); + INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, + intel_hpd_irq_reenable); pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 62ee8308d682..139f490d464d 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -35,9 +35,10 @@ struct i915_params i915 __read_mostly = { .vbt_sdvo_panel_type = -1, .enable_rc6 = -1, .enable_fbc = -1, + .enable_execlists = 0, .enable_hangcheck = true, .enable_ppgtt = -1, - .enable_psr = 1, + .enable_psr = 0, .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), .disable_power_well = 1, .enable_ips = 1, @@ -118,8 +119,13 @@ MODULE_PARM_DESC(enable_ppgtt, "Override PPGTT usage. " "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)"); +module_param_named(enable_execlists, i915.enable_execlists, int, 0400); +MODULE_PARM_DESC(enable_execlists, + "Override execlists usage. " + "(-1=auto, 0=disabled [default], 1=enabled)"); + module_param_named(enable_psr, i915.enable_psr, int, 0600); -MODULE_PARM_DESC(enable_psr, "Enable PSR (default: true)"); +MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600); MODULE_PARM_DESC(preliminary_hw_support, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index fe5c27630e95..203062e93452 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -272,6 +272,7 @@ #define MI_SEMAPHORE_POLL (1<<15) #define MI_SEMAPHORE_SAD_GTE_SDD (1<<12) #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) +#define MI_STORE_DWORD_IMM_GEN8 MI_INSTR(0x20, 2) #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) #define MI_STORE_DWORD_INDEX_SHIFT 2 @@ -282,6 +283,7 @@ * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! */ #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1) +#define MI_LRI_FORCE_POSTED (1<<12) #define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1) #define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1) #define MI_SRM_LRM_GLOBAL_GTT (1<<22) @@ -497,10 +499,26 @@ #define BUNIT_REG_BISOC 0x11 #define PUNIT_REG_DSPFREQ 0x36 +#define DSPFREQSTAT_SHIFT_CHV 24 +#define DSPFREQSTAT_MASK_CHV (0x1f << DSPFREQSTAT_SHIFT_CHV) +#define DSPFREQGUAR_SHIFT_CHV 8 +#define DSPFREQGUAR_MASK_CHV (0x1f << DSPFREQGUAR_SHIFT_CHV) #define DSPFREQSTAT_SHIFT 30 #define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT) #define DSPFREQGUAR_SHIFT 14 #define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT) +#define _DP_SSC(val, pipe) ((val) << (2 * (pipe))) +#define DP_SSC_MASK(pipe) _DP_SSC(0x3, (pipe)) +#define DP_SSC_PWR_ON(pipe) _DP_SSC(0x0, (pipe)) +#define DP_SSC_CLK_GATE(pipe) _DP_SSC(0x1, (pipe)) +#define DP_SSC_RESET(pipe) _DP_SSC(0x2, (pipe)) +#define DP_SSC_PWR_GATE(pipe) _DP_SSC(0x3, (pipe)) +#define _DP_SSS(val, pipe) ((val) << (2 * (pipe) + 16)) +#define DP_SSS_MASK(pipe) _DP_SSS(0x3, (pipe)) +#define DP_SSS_PWR_ON(pipe) _DP_SSS(0x0, (pipe)) +#define DP_SSS_CLK_GATE(pipe) _DP_SSS(0x1, (pipe)) +#define DP_SSS_RESET(pipe) _DP_SSS(0x2, (pipe)) +#define DP_SSS_PWR_GATE(pipe) _DP_SSS(0x3, (pipe)) /* See the PUNIT HAS v0.8 for the below bits */ enum punit_power_well { @@ -514,6 +532,11 @@ enum punit_power_well { PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9, PUNIT_POWER_WELL_DPIO_RX0 = 10, PUNIT_POWER_WELL_DPIO_RX1 = 11, + PUNIT_POWER_WELL_DPIO_CMN_D = 12, + /* FIXME: guesswork below */ + PUNIT_POWER_WELL_DPIO_TX_D_LANES_01 = 13, + PUNIT_POWER_WELL_DPIO_TX_D_LANES_23 = 14, + PUNIT_POWER_WELL_DPIO_RX2 = 15, PUNIT_POWER_WELL_NUM, }; @@ -834,8 +857,8 @@ enum punit_power_well { #define _VLV_TX_DW2_CH0 0x8288 #define _VLV_TX_DW2_CH1 0x8488 -#define DPIO_SWING_MARGIN_SHIFT 16 -#define DPIO_SWING_MARGIN_MASK (0xff << DPIO_SWING_MARGIN_SHIFT) +#define DPIO_SWING_MARGIN000_SHIFT 16 +#define DPIO_SWING_MARGIN000_MASK (0xff << DPIO_SWING_MARGIN000_SHIFT) #define DPIO_UNIQ_TRANS_SCALE_SHIFT 8 #define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1) @@ -843,12 +866,16 @@ enum punit_power_well { #define _VLV_TX_DW3_CH1 0x848c /* The following bit for CHV phy */ #define DPIO_TX_UNIQ_TRANS_SCALE_EN (1<<27) +#define DPIO_SWING_MARGIN101_SHIFT 16 +#define DPIO_SWING_MARGIN101_MASK (0xff << DPIO_SWING_MARGIN101_SHIFT) #define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1) #define _VLV_TX_DW4_CH0 0x8290 #define _VLV_TX_DW4_CH1 0x8490 #define DPIO_SWING_DEEMPH9P5_SHIFT 24 #define DPIO_SWING_DEEMPH9P5_MASK (0xff << DPIO_SWING_DEEMPH9P5_SHIFT) +#define DPIO_SWING_DEEMPH6P0_SHIFT 16 +#define DPIO_SWING_DEEMPH6P0_MASK (0xff << DPIO_SWING_DEEMPH6P0_SHIFT) #define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1) #define _VLV_TX3_DW4_CH0 0x690 @@ -1060,6 +1087,7 @@ enum punit_power_well { #define RING_ACTHD_UDW(base) ((base)+0x5c) #define RING_NOPID(base) ((base)+0x94) #define RING_IMR(base) ((base)+0xa8) +#define RING_HWSTAM(base) ((base)+0x98) #define RING_TIMESTAMP(base) ((base)+0x358) #define TAIL_ADDR 0x001FFFF8 #define HEAD_WRAP_COUNT 0xFFE00000 @@ -1376,6 +1404,7 @@ enum punit_power_well { #define GT_BSD_CS_ERROR_INTERRUPT (1 << 15) #define GT_BSD_USER_INTERRUPT (1 << 12) #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */ +#define GT_CONTEXT_SWITCH_INTERRUPT (1 << 8) #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */ #define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4) #define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3) @@ -1515,6 +1544,7 @@ enum punit_power_well { /* Framebuffer compression for Ironlake */ #define ILK_DPFC_CB_BASE 0x43200 #define ILK_DPFC_CONTROL 0x43208 +#define FBC_CTL_FALSE_COLOR (1<<10) /* The bit 28-8 is reserved */ #define DPFC_RESERVED (0x1FFFFF00) #define ILK_DPFC_RECOMP_CTL 0x4320c @@ -1671,12 +1701,9 @@ enum punit_power_well { #define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240) #define DPLL_PORTD_READY_MASK (0xf) #define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100) -#define PHY_COM_LANE_RESET_DEASSERT(phy, val) \ - ((phy == DPIO_PHY0) ? (val | 1) : (val | 2)) -#define PHY_COM_LANE_RESET_ASSERT(phy, val) \ - ((phy == DPIO_PHY0) ? (val & ~1) : (val & ~2)) +#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy)) #define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104) -#define PHY_POWERGOOD(phy) ((phy == DPIO_PHY0) ? (1<<31) : (1<<30)) +#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30)) /* * The i830 generation, in LVDS mode, defines P1 as the bit number set within @@ -3472,6 +3499,8 @@ enum punit_power_well { #define DP_LINK_TRAIN_OFF (3 << 28) #define DP_LINK_TRAIN_MASK (3 << 28) #define DP_LINK_TRAIN_SHIFT 28 +#define DP_LINK_TRAIN_PAT_3_CHV (1 << 14) +#define DP_LINK_TRAIN_MASK_CHV ((3 << 28)|(1<<14)) /* CPT Link training mode */ #define DP_LINK_TRAIN_PAT_1_CPT (0 << 8) @@ -3728,7 +3757,6 @@ enum punit_power_well { #define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) #define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) #define PIPE_DPST_EVENT_STATUS (1UL<<7) -#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) #define PIPE_A_PSR_STATUS_VLV (1UL<<6) #define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) #define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) @@ -3838,73 +3866,151 @@ enum punit_power_well { #define DSPARB_BEND_SHIFT 9 /* on 855 */ #define DSPARB_AEND_SHIFT 0 +/* pnv/gen4/g4x/vlv/chv */ #define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034) -#define DSPFW_SR_SHIFT 23 -#define DSPFW_SR_MASK (0x1ff<<23) -#define DSPFW_CURSORB_SHIFT 16 -#define DSPFW_CURSORB_MASK (0x3f<<16) -#define DSPFW_PLANEB_SHIFT 8 -#define DSPFW_PLANEB_MASK (0x7f<<8) -#define DSPFW_PLANEA_MASK (0x7f) +#define DSPFW_SR_SHIFT 23 +#define DSPFW_SR_MASK (0x1ff<<23) +#define DSPFW_CURSORB_SHIFT 16 +#define DSPFW_CURSORB_MASK (0x3f<<16) +#define DSPFW_PLANEB_SHIFT 8 +#define DSPFW_PLANEB_MASK (0x7f<<8) +#define DSPFW_PLANEB_MASK_VLV (0xff<<8) /* vlv/chv */ +#define DSPFW_PLANEA_SHIFT 0 +#define DSPFW_PLANEA_MASK (0x7f<<0) +#define DSPFW_PLANEA_MASK_VLV (0xff<<0) /* vlv/chv */ #define DSPFW2 (dev_priv->info.display_mmio_offset + 0x70038) -#define DSPFW_CURSORA_MASK 0x00003f00 -#define DSPFW_CURSORA_SHIFT 8 -#define DSPFW_PLANEC_MASK (0x7f) +#define DSPFW_FBC_SR_EN (1<<31) /* g4x */ +#define DSPFW_FBC_SR_SHIFT 28 +#define DSPFW_FBC_SR_MASK (0x7<<28) /* g4x */ +#define DSPFW_FBC_HPLL_SR_SHIFT 24 +#define DSPFW_FBC_HPLL_SR_MASK (0xf<<24) /* g4x */ +#define DSPFW_SPRITEB_SHIFT (16) +#define DSPFW_SPRITEB_MASK (0x7f<<16) /* g4x */ +#define DSPFW_SPRITEB_MASK_VLV (0xff<<16) /* vlv/chv */ +#define DSPFW_CURSORA_SHIFT 8 +#define DSPFW_CURSORA_MASK (0x3f<<8) +#define DSPFW_PLANEC_SHIFT_OLD 0 +#define DSPFW_PLANEC_MASK_OLD (0x7f<<0) /* pre-gen4 sprite C */ +#define DSPFW_SPRITEA_SHIFT 0 +#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */ +#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */ #define DSPFW3 (dev_priv->info.display_mmio_offset + 0x7003c) -#define DSPFW_HPLL_SR_EN (1<<31) -#define DSPFW_CURSOR_SR_SHIFT 24 +#define DSPFW_HPLL_SR_EN (1<<31) #define PINEVIEW_SELF_REFRESH_EN (1<<30) +#define DSPFW_CURSOR_SR_SHIFT 24 #define DSPFW_CURSOR_SR_MASK (0x3f<<24) #define DSPFW_HPLL_CURSOR_SHIFT 16 #define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) -#define DSPFW_HPLL_SR_MASK (0x1ff) -#define DSPFW4 (dev_priv->info.display_mmio_offset + 0x70070) -#define DSPFW7 (dev_priv->info.display_mmio_offset + 0x7007c) +#define DSPFW_HPLL_SR_SHIFT 0 +#define DSPFW_HPLL_SR_MASK (0x1ff<<0) + +/* vlv/chv */ +#define DSPFW4 (VLV_DISPLAY_BASE + 0x70070) +#define DSPFW_SPRITEB_WM1_SHIFT 16 +#define DSPFW_SPRITEB_WM1_MASK (0xff<<16) +#define DSPFW_CURSORA_WM1_SHIFT 8 +#define DSPFW_CURSORA_WM1_MASK (0x3f<<8) +#define DSPFW_SPRITEA_WM1_SHIFT 0 +#define DSPFW_SPRITEA_WM1_MASK (0xff<<0) +#define DSPFW5 (VLV_DISPLAY_BASE + 0x70074) +#define DSPFW_PLANEB_WM1_SHIFT 24 +#define DSPFW_PLANEB_WM1_MASK (0xff<<24) +#define DSPFW_PLANEA_WM1_SHIFT 16 +#define DSPFW_PLANEA_WM1_MASK (0xff<<16) +#define DSPFW_CURSORB_WM1_SHIFT 8 +#define DSPFW_CURSORB_WM1_MASK (0x3f<<8) +#define DSPFW_CURSOR_SR_WM1_SHIFT 0 +#define DSPFW_CURSOR_SR_WM1_MASK (0x3f<<0) +#define DSPFW6 (VLV_DISPLAY_BASE + 0x70078) +#define DSPFW_SR_WM1_SHIFT 0 +#define DSPFW_SR_WM1_MASK (0x1ff<<0) +#define DSPFW7 (VLV_DISPLAY_BASE + 0x7007c) +#define DSPFW7_CHV (VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */ +#define DSPFW_SPRITED_WM1_SHIFT 24 +#define DSPFW_SPRITED_WM1_MASK (0xff<<24) +#define DSPFW_SPRITED_SHIFT 16 +#define DSPFW_SPRITED_MASK (0xff<<16) +#define DSPFW_SPRITEC_WM1_SHIFT 8 +#define DSPFW_SPRITEC_WM1_MASK (0xff<<8) +#define DSPFW_SPRITEC_SHIFT 0 +#define DSPFW_SPRITEC_MASK (0xff<<0) +#define DSPFW8_CHV (VLV_DISPLAY_BASE + 0x700b8) +#define DSPFW_SPRITEF_WM1_SHIFT 24 +#define DSPFW_SPRITEF_WM1_MASK (0xff<<24) +#define DSPFW_SPRITEF_SHIFT 16 +#define DSPFW_SPRITEF_MASK (0xff<<16) +#define DSPFW_SPRITEE_WM1_SHIFT 8 +#define DSPFW_SPRITEE_WM1_MASK (0xff<<8) +#define DSPFW_SPRITEE_SHIFT 0 +#define DSPFW_SPRITEE_MASK (0xff<<0) +#define DSPFW9_CHV (VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */ +#define DSPFW_PLANEC_WM1_SHIFT 24 +#define DSPFW_PLANEC_WM1_MASK (0xff<<24) +#define DSPFW_PLANEC_SHIFT 16 +#define DSPFW_PLANEC_MASK (0xff<<16) +#define DSPFW_CURSORC_WM1_SHIFT 8 +#define DSPFW_CURSORC_WM1_MASK (0x3f<<16) +#define DSPFW_CURSORC_SHIFT 0 +#define DSPFW_CURSORC_MASK (0x3f<<0) + +/* vlv/chv high order bits */ +#define DSPHOWM (VLV_DISPLAY_BASE + 0x70064) +#define DSPFW_SR_HI_SHIFT 24 +#define DSPFW_SR_HI_MASK (1<<24) +#define DSPFW_SPRITEF_HI_SHIFT 23 +#define DSPFW_SPRITEF_HI_MASK (1<<23) +#define DSPFW_SPRITEE_HI_SHIFT 22 +#define DSPFW_SPRITEE_HI_MASK (1<<22) +#define DSPFW_PLANEC_HI_SHIFT 21 +#define DSPFW_PLANEC_HI_MASK (1<<21) +#define DSPFW_SPRITED_HI_SHIFT 20 +#define DSPFW_SPRITED_HI_MASK (1<<20) +#define DSPFW_SPRITEC_HI_SHIFT 16 +#define DSPFW_SPRITEC_HI_MASK (1<<16) +#define DSPFW_PLANEB_HI_SHIFT 12 +#define DSPFW_PLANEB_HI_MASK (1<<12) +#define DSPFW_SPRITEB_HI_SHIFT 8 +#define DSPFW_SPRITEB_HI_MASK (1<<8) +#define DSPFW_SPRITEA_HI_SHIFT 4 +#define DSPFW_SPRITEA_HI_MASK (1<<4) +#define DSPFW_PLANEA_HI_SHIFT 0 +#define DSPFW_PLANEA_HI_MASK (1<<0) +#define DSPHOWM1 (VLV_DISPLAY_BASE + 0x70068) +#define DSPFW_SR_WM1_HI_SHIFT 24 +#define DSPFW_SR_WM1_HI_MASK (1<<24) +#define DSPFW_SPRITEF_WM1_HI_SHIFT 23 +#define DSPFW_SPRITEF_WM1_HI_MASK (1<<23) +#define DSPFW_SPRITEE_WM1_HI_SHIFT 22 +#define DSPFW_SPRITEE_WM1_HI_MASK (1<<22) +#define DSPFW_PLANEC_WM1_HI_SHIFT 21 +#define DSPFW_PLANEC_WM1_HI_MASK (1<<21) +#define DSPFW_SPRITED_WM1_HI_SHIFT 20 +#define DSPFW_SPRITED_WM1_HI_MASK (1<<20) +#define DSPFW_SPRITEC_WM1_HI_SHIFT 16 +#define DSPFW_SPRITEC_WM1_HI_MASK (1<<16) +#define DSPFW_PLANEB_WM1_HI_SHIFT 12 +#define DSPFW_PLANEB_WM1_HI_MASK (1<<12) +#define DSPFW_SPRITEB_WM1_HI_SHIFT 8 +#define DSPFW_SPRITEB_WM1_HI_MASK (1<<8) +#define DSPFW_SPRITEA_WM1_HI_SHIFT 4 +#define DSPFW_SPRITEA_WM1_HI_MASK (1<<4) +#define DSPFW_PLANEA_WM1_HI_SHIFT 0 +#define DSPFW_PLANEA_WM1_HI_MASK (1<<0) /* drain latency register values*/ #define DRAIN_LATENCY_PRECISION_32 32 -#define DRAIN_LATENCY_PRECISION_16 16 -#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050) -#define DDL_CURSORA_PRECISION_32 (1<<31) -#define DDL_CURSORA_PRECISION_16 (0<<31) -#define DDL_CURSORA_SHIFT 24 -#define DDL_SPRITEB_PRECISION_32 (1<<23) -#define DDL_SPRITEB_PRECISION_16 (0<<23) -#define DDL_SPRITEB_SHIFT 16 -#define DDL_SPRITEA_PRECISION_32 (1<<15) -#define DDL_SPRITEA_PRECISION_16 (0<<15) -#define DDL_SPRITEA_SHIFT 8 -#define DDL_PLANEA_PRECISION_32 (1<<7) -#define DDL_PLANEA_PRECISION_16 (0<<7) -#define DDL_PLANEA_SHIFT 0 - -#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054) -#define DDL_CURSORB_PRECISION_32 (1<<31) -#define DDL_CURSORB_PRECISION_16 (0<<31) -#define DDL_CURSORB_SHIFT 24 -#define DDL_SPRITED_PRECISION_32 (1<<23) -#define DDL_SPRITED_PRECISION_16 (0<<23) -#define DDL_SPRITED_SHIFT 16 -#define DDL_SPRITEC_PRECISION_32 (1<<15) -#define DDL_SPRITEC_PRECISION_16 (0<<15) -#define DDL_SPRITEC_SHIFT 8 -#define DDL_PLANEB_PRECISION_32 (1<<7) -#define DDL_PLANEB_PRECISION_16 (0<<7) -#define DDL_PLANEB_SHIFT 0 - -#define VLV_DDL3 (VLV_DISPLAY_BASE + 0x70058) -#define DDL_CURSORC_PRECISION_32 (1<<31) -#define DDL_CURSORC_PRECISION_16 (0<<31) -#define DDL_CURSORC_SHIFT 24 -#define DDL_SPRITEF_PRECISION_32 (1<<23) -#define DDL_SPRITEF_PRECISION_16 (0<<23) -#define DDL_SPRITEF_SHIFT 16 -#define DDL_SPRITEE_PRECISION_32 (1<<15) -#define DDL_SPRITEE_PRECISION_16 (0<<15) -#define DDL_SPRITEE_SHIFT 8 -#define DDL_PLANEC_PRECISION_32 (1<<7) -#define DDL_PLANEC_PRECISION_16 (0<<7) -#define DDL_PLANEC_SHIFT 0 +#define DRAIN_LATENCY_PRECISION_64 64 +#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe)) +#define DDL_CURSOR_PRECISION_64 (1<<31) +#define DDL_CURSOR_PRECISION_32 (0<<31) +#define DDL_CURSOR_SHIFT 24 +#define DDL_SPRITE_PRECISION_64(sprite) (1<<(15+8*(sprite))) +#define DDL_SPRITE_PRECISION_32(sprite) (0<<(15+8*(sprite))) +#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite)) +#define DDL_PLANE_PRECISION_64 (1<<7) +#define DDL_PLANE_PRECISION_32 (0<<7) +#define DDL_PLANE_SHIFT 0 +#define DRAIN_LATENCY_MASK 0x7f /* FIFO watermark sizes etc */ #define G4X_FIFO_LINE_SIZE 64 @@ -4022,7 +4128,8 @@ enum punit_power_well { /* Old style CUR*CNTR flags (desktop 8xx) */ #define CURSOR_ENABLE 0x80000000 #define CURSOR_GAMMA_ENABLE 0x40000000 -#define CURSOR_STRIDE_MASK 0x30000000 +#define CURSOR_STRIDE_SHIFT 28 +#define CURSOR_STRIDE(x) ((ffs(x)-9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */ #define CURSOR_PIPE_CSC_ENABLE (1<<24) #define CURSOR_FORMAT_SHIFT 24 #define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT) @@ -4191,6 +4298,7 @@ enum punit_power_well { #define DVS_YUV_ORDER_UYVY (1<<16) #define DVS_YUV_ORDER_YVYU (2<<16) #define DVS_YUV_ORDER_VYUY (3<<16) +#define DVS_ROTATE_180 (1<<15) #define DVS_DEST_KEY (1<<2) #define DVS_TRICKLE_FEED_DISABLE (1<<14) #define DVS_TILED (1<<10) @@ -4261,6 +4369,7 @@ enum punit_power_well { #define SPRITE_YUV_ORDER_UYVY (1<<16) #define SPRITE_YUV_ORDER_YVYU (2<<16) #define SPRITE_YUV_ORDER_VYUY (3<<16) +#define SPRITE_ROTATE_180 (1<<15) #define SPRITE_TRICKLE_FEED_DISABLE (1<<14) #define SPRITE_INT_GAMMA_ENABLE (1<<13) #define SPRITE_TILED (1<<10) @@ -4334,6 +4443,7 @@ enum punit_power_well { #define SP_YUV_ORDER_UYVY (1<<16) #define SP_YUV_ORDER_YVYU (2<<16) #define SP_YUV_ORDER_VYUY (3<<16) +#define SP_ROTATE_180 (1<<15) #define SP_TILED (1<<10) #define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184) #define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188) @@ -5403,7 +5513,6 @@ enum punit_power_well { #define VLV_GTLC_ALLOWWAKEERR (1 << 1) #define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5) #define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7) -#define VLV_GTLC_SURVIVABILITY_REG 0x130098 #define FORCEWAKE_MT 0xa188 /* multi-threaded */ #define FORCEWAKE_KERNEL 0x1 #define FORCEWAKE_USER 0x2 diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 608ed302f24d..031c5657255d 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -878,7 +878,7 @@ err: /* error during parsing so set all pointers to null * because of partial parsing */ - memset(dev_priv->vbt.dsi.sequence, 0, MIPI_SEQ_MAX); + memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence)); } static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, @@ -976,12 +976,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, if (bdb->version >= 158) { /* The VBT HDMI level shift values match the table we have. */ hdmi_level_shift = child->raw[7] & 0xF; - if (hdmi_level_shift < 0xC) { - DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n", - port_name(port), - hdmi_level_shift); - info->hdmi_level_shift = hdmi_level_shift; - } + DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n", + port_name(port), + hdmi_level_shift); + info->hdmi_level_shift = hdmi_level_shift; } } @@ -1114,8 +1112,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port]; - /* Recommended BSpec default: 800mV 0dB. */ - info->hdmi_level_shift = 6; + info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN; info->supports_dvi = (port != PORT_A && port != PORT_E); info->supports_hdmi = info->supports_dvi; diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index b98667796337..905999bee2ac 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -802,7 +802,8 @@ struct mipi_config { u16 rsvd4; - u8 rsvd5[5]; + u8 rsvd5; + u32 target_burst_mode_freq; u32 dsi_ddr_clk; u32 bridge_ref_clk; diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 2efaf8e8d9c4..e8abfce40976 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -699,16 +699,21 @@ intel_crt_detect(struct drm_connector *connector, bool force) goto out; } + drm_modeset_acquire_init(&ctx, 0); + /* for pre-945g platforms use load detect */ if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) { if (intel_crt_detect_ddc(connector)) status = connector_status_connected; else status = intel_crt_load_detect(crt); - intel_release_load_detect_pipe(connector, &tmp, &ctx); + intel_release_load_detect_pipe(connector, &tmp); } else status = connector_status_unknown; + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + out: intel_display_power_put(dev_priv, power_domain); return status; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 5db0b5552e39..02d55843c78d 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -33,7 +33,7 @@ * automatically adapt to HDMI connections as well */ static const u32 hsw_ddi_translations_dp[] = { - 0x00FFFFFF, 0x0006000E, /* DP parameters */ + 0x00FFFFFF, 0x0006000E, 0x00D75FFF, 0x0005000A, 0x00C30FFF, 0x00040006, 0x80AAAFFF, 0x000B0000, @@ -45,7 +45,7 @@ static const u32 hsw_ddi_translations_dp[] = { }; static const u32 hsw_ddi_translations_fdi[] = { - 0x00FFFFFF, 0x0007000E, /* FDI parameters */ + 0x00FFFFFF, 0x0007000E, 0x00D75FFF, 0x000F000A, 0x00C30FFF, 0x00060006, 0x00AAAFFF, 0x001E0000, @@ -73,7 +73,7 @@ static const u32 hsw_ddi_translations_hdmi[] = { }; static const u32 bdw_ddi_translations_edp[] = { - 0x00FFFFFF, 0x00000012, /* eDP parameters */ + 0x00FFFFFF, 0x00000012, 0x00EBAFFF, 0x00020011, 0x00C71FFF, 0x0006000F, 0x00AAAFFF, 0x000E000A, @@ -82,11 +82,10 @@ static const u32 bdw_ddi_translations_edp[] = { 0x00BEEFFF, 0x000A000C, 0x00FFFFFF, 0x0005000F, 0x00DB6FFF, 0x000A000C, - 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/ }; static const u32 bdw_ddi_translations_dp[] = { - 0x00FFFFFF, 0x0007000E, /* DP parameters */ + 0x00FFFFFF, 0x0007000E, 0x00D75FFF, 0x000E000A, 0x00BEFFFF, 0x00140006, 0x80B2CFFF, 0x001B0002, @@ -95,11 +94,10 @@ static const u32 bdw_ddi_translations_dp[] = { 0x80CB2FFF, 0x001B0002, 0x00F7DFFF, 0x00180004, 0x80D75FFF, 0x001B0002, - 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/ }; static const u32 bdw_ddi_translations_fdi[] = { - 0x00FFFFFF, 0x0001000E, /* FDI parameters */ + 0x00FFFFFF, 0x0001000E, 0x00D75FFF, 0x0004000A, 0x00C30FFF, 0x00070006, 0x00AAAFFF, 0x000C0000, @@ -108,7 +106,20 @@ static const u32 bdw_ddi_translations_fdi[] = { 0x00C30FFF, 0x000C0000, 0x00FFFFFF, 0x00070006, 0x00D75FFF, 0x000C0000, - 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/ +}; + +static const u32 bdw_ddi_translations_hdmi[] = { + /* Idx NT mV diff T mV diff db */ + 0x00FFFFFF, 0x0007000E, /* 0: 400 400 0 */ + 0x00D75FFF, 0x000E000A, /* 1: 400 600 3.5 */ + 0x00BEFFFF, 0x00140006, /* 2: 400 800 6 */ + 0x00FFFFFF, 0x0009000D, /* 3: 450 450 0 */ + 0x00FFFFFF, 0x000E000A, /* 4: 600 600 0 */ + 0x00D7FFFF, 0x00140006, /* 5: 600 800 2.5 */ + 0x80CB2FFF, 0x001B0002, /* 6: 600 1000 4.5 */ + 0x00FFFFFF, 0x00140006, /* 7: 800 800 0 */ + 0x80E79FFF, 0x001B0002, /* 8: 800 1000 2 */ + 0x80FFFFFF, 0x001B0002, /* 9: 1000 1000 0 */ }; enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) @@ -145,26 +156,36 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; u32 reg; - int i; + int i, n_hdmi_entries, hdmi_800mV_0dB; int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; const u32 *ddi_translations_fdi; const u32 *ddi_translations_dp; const u32 *ddi_translations_edp; + const u32 *ddi_translations_hdmi; const u32 *ddi_translations; if (IS_BROADWELL(dev)) { ddi_translations_fdi = bdw_ddi_translations_fdi; ddi_translations_dp = bdw_ddi_translations_dp; ddi_translations_edp = bdw_ddi_translations_edp; + ddi_translations_hdmi = bdw_ddi_translations_hdmi; + n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi) / 2; + hdmi_800mV_0dB = 7; } else if (IS_HASWELL(dev)) { ddi_translations_fdi = hsw_ddi_translations_fdi; ddi_translations_dp = hsw_ddi_translations_dp; ddi_translations_edp = hsw_ddi_translations_dp; + ddi_translations_hdmi = hsw_ddi_translations_hdmi; + n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi) / 2; + hdmi_800mV_0dB = 6; } else { WARN(1, "ddi translation table missing\n"); ddi_translations_edp = bdw_ddi_translations_dp; ddi_translations_fdi = bdw_ddi_translations_fdi; ddi_translations_dp = bdw_ddi_translations_dp; + ddi_translations_hdmi = bdw_ddi_translations_hdmi; + n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi) / 2; + hdmi_800mV_0dB = 7; } switch (port) { @@ -193,9 +214,15 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) I915_WRITE(reg, ddi_translations[i]); reg += 4; } + + /* Choose a good default if VBT is badly populated */ + if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN || + hdmi_level >= n_hdmi_entries) + hdmi_level = hdmi_800mV_0dB; + /* Entry 9 is for HDMI: */ for (i = 0; i < 2; i++) { - I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]); + I915_WRITE(reg, ddi_translations_hdmi[hdmi_level * 2 + i]); reg += 4; } } @@ -587,8 +614,8 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, return (refclk * n * 100) / (p * r); } -void intel_ddi_clock_get(struct intel_encoder *encoder, - struct intel_crtc_config *pipe_config) +static void hsw_ddi_clock_get(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; int link_clock = 0; @@ -643,9 +670,15 @@ void intel_ddi_clock_get(struct intel_encoder *encoder, pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock; } +void intel_ddi_clock_get(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + hsw_ddi_clock_get(encoder, pipe_config); +} + static void -intel_ddi_calculate_wrpll(int clock /* in Hz */, - unsigned *r2_out, unsigned *n2_out, unsigned *p_out) +hsw_ddi_calculate_wrpll(int clock /* in Hz */, + unsigned *r2_out, unsigned *n2_out, unsigned *p_out) { uint64_t freq2k; unsigned p, n2, r2; @@ -708,27 +741,17 @@ intel_ddi_calculate_wrpll(int clock /* in Hz */, *r2_out = best.r2; } -/* - * Tries to find a PLL for the CRTC. If it finds, it increases the refcount and - * stores it in intel_crtc->ddi_pll_sel, so other mode sets won't be able to - * steal the selected PLL. You need to call intel_ddi_pll_enable to actually - * enable the PLL. - */ -bool intel_ddi_pll_select(struct intel_crtc *intel_crtc) +static bool +hsw_ddi_pll_select(struct intel_crtc *intel_crtc, + struct intel_encoder *intel_encoder, + int clock) { - struct drm_crtc *crtc = &intel_crtc->base; - struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); - int type = intel_encoder->type; - int clock = intel_crtc->config.port_clock; - - intel_put_shared_dpll(intel_crtc); - - if (type == INTEL_OUTPUT_HDMI) { + if (intel_encoder->type == INTEL_OUTPUT_HDMI) { struct intel_shared_dpll *pll; uint32_t val; unsigned p, n2, r2; - intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); + hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL | WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | @@ -749,6 +772,25 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc) return true; } + +/* + * Tries to find a *shared* PLL for the CRTC and store it in + * intel_crtc->ddi_pll_sel. + * + * For private DPLLs, compute_config() should do the selection for us. This + * function should be folded into compute_config() eventually. + */ +bool intel_ddi_pll_select(struct intel_crtc *intel_crtc) +{ + struct drm_crtc *crtc = &intel_crtc->base; + struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); + int clock = intel_crtc->config.port_clock; + + intel_put_shared_dpll(intel_crtc); + + return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock); +} + void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->dev->dev_private; @@ -1183,31 +1225,52 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder) } } -int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) +static int bdw_get_cdclk_freq(struct drm_i915_private *dev_priv) +{ + uint32_t lcpll = I915_READ(LCPLL_CTL); + uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; + + if (lcpll & LCPLL_CD_SOURCE_FCLK) + return 800000; + else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) + return 450000; + else if (freq == LCPLL_CLK_FREQ_450) + return 450000; + else if (freq == LCPLL_CLK_FREQ_54O_BDW) + return 540000; + else if (freq == LCPLL_CLK_FREQ_337_5_BDW) + return 337500; + else + return 675000; +} + +static int hsw_get_cdclk_freq(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; uint32_t lcpll = I915_READ(LCPLL_CTL); uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; - if (lcpll & LCPLL_CD_SOURCE_FCLK) { + if (lcpll & LCPLL_CD_SOURCE_FCLK) return 800000; - } else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) { + else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) return 450000; - } else if (freq == LCPLL_CLK_FREQ_450) { + else if (freq == LCPLL_CLK_FREQ_450) return 450000; - } else if (IS_HASWELL(dev)) { - if (IS_ULT(dev)) - return 337500; - else - return 540000; - } else { - if (freq == LCPLL_CLK_FREQ_54O_BDW) - return 540000; - else if (freq == LCPLL_CLK_FREQ_337_5_BDW) - return 337500; - else - return 675000; - } + else if (IS_ULT(dev)) + return 337500; + else + return 540000; +} + +int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + + if (IS_BROADWELL(dev)) + return bdw_get_cdclk_freq(dev_priv); + + /* Haswell */ + return hsw_get_cdclk_freq(dev_priv); } static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv, @@ -1248,10 +1311,8 @@ static const char * const hsw_ddi_pll_names[] = { "WRPLL 2", }; -void intel_ddi_pll_init(struct drm_device *dev) +static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t val = I915_READ(LCPLL_CTL); int i; dev_priv->num_shared_dpll = 2; @@ -1264,6 +1325,14 @@ void intel_ddi_pll_init(struct drm_device *dev) dev_priv->shared_dplls[i].get_hw_state = hsw_ddi_pll_get_hw_state; } +} + +void intel_ddi_pll_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t val = I915_READ(LCPLL_CTL); + + hsw_shared_dplls_init(dev_priv); /* The LCPLL register should be turned on by the BIOS. For now let's * just check its state and print errors in case something is wrong. @@ -1444,7 +1513,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder, dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; } - intel_ddi_clock_get(encoder, pipe_config); + hsw_ddi_clock_get(encoder, pipe_config); } static void intel_ddi_destroy(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index de40a44e0ca0..0b327ebb2d9e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -91,15 +91,16 @@ static int intel_framebuffer_init(struct drm_device *dev, struct intel_framebuffer *ifb, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj); -static void intel_dp_set_m_n(struct intel_crtc *crtc); static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, - struct intel_link_m_n *m_n); + struct intel_link_m_n *m_n, + struct intel_link_m_n *m2_n2); static void ironlake_set_pipeconf(struct drm_crtc *crtc); static void haswell_set_pipeconf(struct drm_crtc *crtc); static void intel_set_pipe_csc(struct drm_crtc *crtc); static void vlv_prepare_pll(struct intel_crtc *crtc); +static void chv_prepare_pll(struct intel_crtc *crtc); static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe) { @@ -1519,34 +1520,6 @@ static void intel_init_dpio(struct drm_device *dev) } } -static void intel_reset_dpio(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_CHERRYVIEW(dev)) { - enum dpio_phy phy; - u32 val; - - for (phy = DPIO_PHY0; phy < I915_NUM_PHYS_VLV; phy++) { - /* Poll for phypwrgood signal */ - if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & - PHY_POWERGOOD(phy), 1)) - DRM_ERROR("Display PHY %d is not power up\n", phy); - - /* - * Deassert common lane reset for PHY. - * - * This should only be done on init and resume from S3 - * with both PLLs disabled, or we risk losing DPIO and - * PLL synchronization. - */ - val = I915_READ(DISPLAY_PHY_CONTROL); - I915_WRITE(DISPLAY_PHY_CONTROL, - PHY_COM_LANE_RESET_DEASSERT(phy, val)); - } - } -} - static void vlv_enable_pll(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; @@ -1718,7 +1691,7 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) assert_pipe_disabled(dev_priv, pipe); /* Set PLL en = 0 */ - val = DPLL_SSC_REF_CLOCK_CHV; + val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV; if (pipe != PIPE_A) val |= DPLL_INTEGRATED_CRI_CLK_VLV; I915_WRITE(DPLL(pipe), val); @@ -1812,7 +1785,7 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc) if (WARN_ON(pll->refcount == 0)) return; - DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n", + DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n", pll->name, pll->active, pll->on, crtc->base.base.id); @@ -1830,7 +1803,7 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc) pll->on = true; } -void intel_disable_shared_dpll(struct intel_crtc *crtc) +static void intel_disable_shared_dpll(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -2115,35 +2088,28 @@ void intel_flush_primary_plane(struct drm_i915_private *dev_priv, /** * intel_enable_primary_hw_plane - enable the primary plane on a given pipe - * @dev_priv: i915 private structure - * @plane: plane to enable - * @pipe: pipe being fed + * @plane: plane to be enabled + * @crtc: crtc for the plane * - * Enable @plane on @pipe, making sure that @pipe is running first. + * Enable @plane on @crtc, making sure that the pipe is running first. */ -static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv, - enum plane plane, enum pipe pipe) +static void intel_enable_primary_hw_plane(struct drm_plane *plane, + struct drm_crtc *crtc) { - struct drm_device *dev = dev_priv->dev; - struct intel_crtc *intel_crtc = - to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); - int reg; - u32 val; + struct drm_device *dev = plane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); /* If the pipe isn't enabled, we can't pump pixels and may hang */ - assert_pipe_enabled(dev_priv, pipe); + assert_pipe_enabled(dev_priv, intel_crtc->pipe); if (intel_crtc->primary_enabled) return; intel_crtc->primary_enabled = true; - reg = DSPCNTR(plane); - val = I915_READ(reg); - WARN_ON(val & DISPLAY_PLANE_ENABLE); - - I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); - intel_flush_primary_plane(dev_priv, plane); + dev_priv->display.update_primary_plane(crtc, plane->fb, + crtc->x, crtc->y); /* * BDW signals flip done immediately if the plane @@ -2156,31 +2122,27 @@ static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv, /** * intel_disable_primary_hw_plane - disable the primary hardware plane - * @dev_priv: i915 private structure - * @plane: plane to disable - * @pipe: pipe consuming the data + * @plane: plane to be disabled + * @crtc: crtc for the plane * - * Disable @plane; should be an independent operation. + * Disable @plane on @crtc, making sure that the pipe is running first. */ -static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv, - enum plane plane, enum pipe pipe) +static void intel_disable_primary_hw_plane(struct drm_plane *plane, + struct drm_crtc *crtc) { - struct intel_crtc *intel_crtc = - to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); - int reg; - u32 val; + struct drm_device *dev = plane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + assert_pipe_enabled(dev_priv, intel_crtc->pipe); if (!intel_crtc->primary_enabled) return; intel_crtc->primary_enabled = false; - reg = DSPCNTR(plane); - val = I915_READ(reg); - WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0); - - I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); - intel_flush_primary_plane(dev_priv, plane); + dev_priv->display.update_primary_plane(crtc, plane->fb, + crtc->x, crtc->y); } static bool need_vtd_wa(struct drm_device *dev) @@ -2421,12 +2383,35 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc, int plane = intel_crtc->plane; unsigned long linear_offset; u32 dspcntr; - u32 reg; + u32 reg = DSPCNTR(plane); + + if (!intel_crtc->primary_enabled) { + I915_WRITE(reg, 0); + if (INTEL_INFO(dev)->gen >= 4) + I915_WRITE(DSPSURF(plane), 0); + else + I915_WRITE(DSPADDR(plane), 0); + POSTING_READ(reg); + return; + } + + dspcntr = DISPPLANE_GAMMA_ENABLE; + + dspcntr |= DISPLAY_PLANE_ENABLE; + + if (INTEL_INFO(dev)->gen < 4) { + if (intel_crtc->pipe == PIPE_B) + dspcntr |= DISPPLANE_SEL_PIPE_B; + + /* pipesrc and dspsize control the size that is scaled from, + * which should always be the user's requested size. + */ + I915_WRITE(DSPSIZE(plane), + ((intel_crtc->config.pipe_src_h - 1) << 16) | + (intel_crtc->config.pipe_src_w - 1)); + I915_WRITE(DSPPOS(plane), 0); + } - reg = DSPCNTR(plane); - dspcntr = I915_READ(reg); - /* Mask out pixel format bits in case we change it */ - dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; switch (fb->pixel_format) { case DRM_FORMAT_C8: dspcntr |= DISPPLANE_8BPP; @@ -2458,12 +2443,9 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc, BUG(); } - if (INTEL_INFO(dev)->gen >= 4) { - if (obj->tiling_mode != I915_TILING_NONE) - dspcntr |= DISPPLANE_TILED; - else - dspcntr &= ~DISPPLANE_TILED; - } + if (INTEL_INFO(dev)->gen >= 4 && + obj->tiling_mode != I915_TILING_NONE) + dspcntr |= DISPPLANE_TILED; if (IS_G4X(dev)) dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; @@ -2507,12 +2489,22 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc, int plane = intel_crtc->plane; unsigned long linear_offset; u32 dspcntr; - u32 reg; + u32 reg = DSPCNTR(plane); + + if (!intel_crtc->primary_enabled) { + I915_WRITE(reg, 0); + I915_WRITE(DSPSURF(plane), 0); + POSTING_READ(reg); + return; + } + + dspcntr = DISPPLANE_GAMMA_ENABLE; + + dspcntr |= DISPLAY_PLANE_ENABLE; + + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; - reg = DSPCNTR(plane); - dspcntr = I915_READ(reg); - /* Mask out pixel format bits in case we change it */ - dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; switch (fb->pixel_format) { case DRM_FORMAT_C8: dspcntr |= DISPPLANE_8BPP; @@ -2542,12 +2534,8 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc, if (obj->tiling_mode != I915_TILING_NONE) dspcntr |= DISPPLANE_TILED; - else - dspcntr &= ~DISPPLANE_TILED; - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) - dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE; - else + if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; I915_WRITE(reg, dspcntr); @@ -3906,16 +3894,14 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) static void intel_crtc_enable_planes(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - int plane = intel_crtc->plane; assert_vblank_disabled(crtc); drm_vblank_on(dev, pipe); - intel_enable_primary_hw_plane(dev_priv, plane, pipe); + intel_enable_primary_hw_plane(crtc->primary, crtc); intel_enable_planes(crtc); intel_crtc_update_cursor(crtc, true); intel_crtc_dpms_overlay(intel_crtc, true); @@ -3952,7 +3938,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc) intel_crtc_dpms_overlay(intel_crtc, false); intel_crtc_update_cursor(crtc, false); intel_disable_planes(crtc); - intel_disable_primary_hw_plane(dev_priv, plane, pipe); + intel_disable_primary_hw_plane(crtc->primary, crtc); /* * FIXME: Once we grow proper nuclear flip support out of this we need @@ -3973,7 +3959,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; int pipe = intel_crtc->pipe; - enum plane plane = intel_crtc->plane; WARN_ON(!crtc->enabled); @@ -3990,18 +3975,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->config.has_pch_encoder) { intel_cpu_transcoder_set_m_n(intel_crtc, - &intel_crtc->config.fdi_m_n); + &intel_crtc->config.fdi_m_n, NULL); } ironlake_set_pipeconf(crtc); - /* Set up the display plane register */ - I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); - POSTING_READ(DSPCNTR(plane)); - - dev_priv->display.update_primary_plane(crtc, crtc->primary->fb, - crtc->x, crtc->y); - intel_crtc->active = true; intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); @@ -4086,7 +4064,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; int pipe = intel_crtc->pipe; - enum plane plane = intel_crtc->plane; WARN_ON(!crtc->enabled); @@ -4103,20 +4080,13 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->config.has_pch_encoder) { intel_cpu_transcoder_set_m_n(intel_crtc, - &intel_crtc->config.fdi_m_n); + &intel_crtc->config.fdi_m_n, NULL); } haswell_set_pipeconf(crtc); intel_set_pipe_csc(crtc); - /* Set up the display plane register */ - I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE); - POSTING_READ(DSPCNTR(plane)); - - dev_priv->display.update_primary_plane(crtc, crtc->primary->fb, - crtc->x, crtc->y); - intel_crtc->active = true; intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); @@ -4539,12 +4509,57 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) vlv_update_cdclk(dev); } +static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val, cmd; + + WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq); + + switch (cdclk) { + case 400000: + cmd = 3; + break; + case 333333: + case 320000: + cmd = 2; + break; + case 266667: + cmd = 1; + break; + case 200000: + cmd = 0; + break; + default: + WARN_ON(1); + return; + } + + mutex_lock(&dev_priv->rps.hw_lock); + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + val &= ~DSPFREQGUAR_MASK_CHV; + val |= (cmd << DSPFREQGUAR_SHIFT_CHV); + vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); + if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & + DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), + 50)) { + DRM_ERROR("timed out waiting for CDclk change\n"); + } + mutex_unlock(&dev_priv->rps.hw_lock); + + vlv_update_cdclk(dev); +} + static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, int max_pixclk) { int vco = valleyview_get_vco(dev_priv); int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000; + /* FIXME: Punit isn't quite ready yet */ + if (IS_CHERRYVIEW(dev_priv->dev)) + return 400000; + /* * Really only a few cases to deal with, as only 4 CDclks are supported: * 200MHz @@ -4607,21 +4622,23 @@ static void valleyview_modeset_global_resources(struct drm_device *dev) int max_pixclk = intel_mode_max_pixclk(dev_priv); int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); - if (req_cdclk != dev_priv->vlv_cdclk_freq) - valleyview_set_cdclk(dev, req_cdclk); + if (req_cdclk != dev_priv->vlv_cdclk_freq) { + if (IS_CHERRYVIEW(dev)) + cherryview_set_cdclk(dev, req_cdclk); + else + valleyview_set_cdclk(dev, req_cdclk); + } + modeset_update_crtc_power_domains(dev); } static void valleyview_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; int pipe = intel_crtc->pipe; - int plane = intel_crtc->plane; bool is_dsi; - u32 dspcntr; WARN_ON(!crtc->enabled); @@ -4630,33 +4647,20 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI); - if (!is_dsi && !IS_CHERRYVIEW(dev)) - vlv_prepare_pll(intel_crtc); - - /* Set up the display plane register */ - dspcntr = DISPPLANE_GAMMA_ENABLE; + if (!is_dsi) { + if (IS_CHERRYVIEW(dev)) + chv_prepare_pll(intel_crtc); + else + vlv_prepare_pll(intel_crtc); + } if (intel_crtc->config.has_dp_encoder) intel_dp_set_m_n(intel_crtc); intel_set_pipe_timings(intel_crtc); - /* pipesrc and dspsize control the size that is scaled from, - * which should always be the user's requested size. - */ - I915_WRITE(DSPSIZE(plane), - ((intel_crtc->config.pipe_src_h - 1) << 16) | - (intel_crtc->config.pipe_src_w - 1)); - I915_WRITE(DSPPOS(plane), 0); - i9xx_set_pipeconf(intel_crtc); - I915_WRITE(DSPCNTR(plane), dspcntr); - POSTING_READ(DSPCNTR(plane)); - - dev_priv->display.update_primary_plane(crtc, crtc->primary->fb, - crtc->x, crtc->y); - intel_crtc->active = true; intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); @@ -4704,12 +4708,9 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc) static void i9xx_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; int pipe = intel_crtc->pipe; - int plane = intel_crtc->plane; - u32 dspcntr; WARN_ON(!crtc->enabled); @@ -4718,35 +4719,13 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) i9xx_set_pll_dividers(intel_crtc); - /* Set up the display plane register */ - dspcntr = DISPPLANE_GAMMA_ENABLE; - - if (pipe == 0) - dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; - else - dspcntr |= DISPPLANE_SEL_PIPE_B; - if (intel_crtc->config.has_dp_encoder) intel_dp_set_m_n(intel_crtc); intel_set_pipe_timings(intel_crtc); - /* pipesrc and dspsize control the size that is scaled from, - * which should always be the user's requested size. - */ - I915_WRITE(DSPSIZE(plane), - ((intel_crtc->config.pipe_src_h - 1) << 16) | - (intel_crtc->config.pipe_src_w - 1)); - I915_WRITE(DSPPOS(plane), 0); - i9xx_set_pipeconf(intel_crtc); - I915_WRITE(DSPCNTR(plane), dspcntr); - POSTING_READ(DSPCNTR(plane)); - - dev_priv->display.update_primary_plane(crtc, crtc->primary->fb, - crtc->x, crtc->y); - intel_crtc->active = true; if (!IS_GEN2(dev)) @@ -5275,6 +5254,10 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev) u32 val; int divider; + /* FIXME: Punit isn't quite ready yet */ + if (IS_CHERRYVIEW(dev)) + return 400000; + mutex_lock(&dev_priv->dpio_lock); val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); mutex_unlock(&dev_priv->dpio_lock); @@ -5519,7 +5502,8 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, } static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, - struct intel_link_m_n *m_n) + struct intel_link_m_n *m_n, + struct intel_link_m_n *m2_n2) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -5531,6 +5515,18 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); + /* M2_N2 registers to be set only for gen < 8 (M2_N2 available + * for gen < 8) and if DRRS is supported (to make sure the + * registers are not unnecessarily accessed). + */ + if (m2_n2 && INTEL_INFO(dev)->gen < 8 && + crtc->config.has_drrs) { + I915_WRITE(PIPE_DATA_M2(transcoder), + TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); + I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); + I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); + I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); + } } else { I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); @@ -5539,12 +5535,13 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, } } -static void intel_dp_set_m_n(struct intel_crtc *crtc) +void intel_dp_set_m_n(struct intel_crtc *crtc) { if (crtc->config.has_pch_encoder) intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n); else - intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n); + intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n, + &crtc->config.dp_m2_n2); } static void vlv_update_pll(struct intel_crtc *crtc) @@ -5662,6 +5659,18 @@ static void vlv_prepare_pll(struct intel_crtc *crtc) static void chv_update_pll(struct intel_crtc *crtc) { + crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV | + DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | + DPLL_VCO_ENABLE; + if (crtc->pipe != PIPE_A) + crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; + + crtc->config.dpll_hw_state.dpll_md = + (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; +} + +static void chv_prepare_pll(struct intel_crtc *crtc) +{ struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; int pipe = crtc->pipe; @@ -5671,15 +5680,6 @@ static void chv_update_pll(struct intel_crtc *crtc) u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; int refclk; - crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV | - DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | - DPLL_VCO_ENABLE; - if (pipe != PIPE_A) - crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; - - crtc->config.dpll_hw_state.dpll_md = - (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; - bestn = crtc->config.dpll.n; bestm2_frac = crtc->config.dpll.m2 & 0x3fffff; bestm1 = crtc->config.dpll.m1; @@ -6171,6 +6171,10 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc, u32 mdiv; int refclk = 100000; + /* In case of MIPI DPLL will not even be used */ + if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)) + return; + mutex_lock(&dev_priv->dpio_lock); mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); mutex_unlock(&dev_priv->dpio_lock); @@ -6231,7 +6235,7 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc, crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1; val = I915_READ(DSPSTRIDE(pipe)); - crtc->base.primary->fb->pitches[0] = val & 0xffffff80; + crtc->base.primary->fb->pitches[0] = val & 0xffffffc0; aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, plane_config->tiled); @@ -6363,7 +6367,6 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, static void ironlake_init_pch_refclk(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; u32 val, final; bool has_lvds = false; @@ -6373,8 +6376,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) bool can_ssc = false; /* We need to take the global config into account */ - list_for_each_entry(encoder, &mode_config->encoder_list, - base.head) { + for_each_intel_encoder(dev, encoder) { switch (encoder->type) { case INTEL_OUTPUT_LVDS: has_panel = true; @@ -6681,11 +6683,10 @@ static void lpt_disable_clkout_dp(struct drm_device *dev) static void lpt_init_pch_refclk(struct drm_device *dev) { - struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; bool has_vga = false; - list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { + for_each_intel_encoder(dev, encoder) { switch (encoder->type) { case INTEL_OUTPUT_ANALOG: has_vga = true; @@ -7141,7 +7142,8 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, enum transcoder transcoder, - struct intel_link_m_n *m_n) + struct intel_link_m_n *m_n, + struct intel_link_m_n *m2_n2) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -7155,6 +7157,20 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; + /* Read M2_N2 registers only for gen < 8 (M2_N2 available for + * gen < 8) and if DRRS is supported (to make sure the + * registers are not unnecessarily read). + */ + if (m2_n2 && INTEL_INFO(dev)->gen < 8 && + crtc->config.has_drrs) { + m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); + m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); + m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) + & ~TU_SIZE_MASK; + m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); + m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) + & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; + } } else { m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); @@ -7173,14 +7189,15 @@ void intel_dp_get_m_n(struct intel_crtc *crtc, intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); else intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, - &pipe_config->dp_m_n); + &pipe_config->dp_m_n, + &pipe_config->dp_m2_n2); } static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, struct intel_crtc_config *pipe_config) { intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, - &pipe_config->fdi_m_n); + &pipe_config->fdi_m_n, NULL); } static void ironlake_get_pfit_config(struct intel_crtc *crtc, @@ -7251,7 +7268,7 @@ static void ironlake_get_plane_config(struct intel_crtc *crtc, crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1; val = I915_READ(DSPSTRIDE(pipe)); - crtc->base.primary->fb->pitches[0] = val & 0xffffff80; + crtc->base.primary->fb->pitches[0] = val & 0xffffffc0; aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, plane_config->tiled); @@ -7611,6 +7628,22 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc, return 0; } +static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, + enum port port, + struct intel_crtc_config *pipe_config) +{ + pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); + + switch (pipe_config->ddi_pll_sel) { + case PORT_CLK_SEL_WRPLL1: + pipe_config->shared_dpll = DPLL_ID_WRPLL1; + break; + case PORT_CLK_SEL_WRPLL2: + pipe_config->shared_dpll = DPLL_ID_WRPLL2; + break; + } +} + static void haswell_get_ddi_port_state(struct intel_crtc *crtc, struct intel_crtc_config *pipe_config) { @@ -7624,16 +7657,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; - pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); - - switch (pipe_config->ddi_pll_sel) { - case PORT_CLK_SEL_WRPLL1: - pipe_config->shared_dpll = DPLL_ID_WRPLL1; - break; - case PORT_CLK_SEL_WRPLL2: - pipe_config->shared_dpll = DPLL_ID_WRPLL2; - break; - } + haswell_get_ddi_pll(dev_priv, port, pipe_config); if (pipe_config->shared_dpll >= 0) { pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; @@ -8033,74 +8057,62 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - uint32_t cntl; + uint32_t cntl = 0, size = 0; - if (base != intel_crtc->cursor_base) { - /* On these chipsets we can only modify the base whilst - * the cursor is disabled. - */ - if (intel_crtc->cursor_cntl) { - I915_WRITE(_CURACNTR, 0); - POSTING_READ(_CURACNTR); - intel_crtc->cursor_cntl = 0; + if (base) { + unsigned int width = intel_crtc->cursor_width; + unsigned int height = intel_crtc->cursor_height; + unsigned int stride = roundup_pow_of_two(width) * 4; + + switch (stride) { + default: + WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n", + width, stride); + stride = 256; + /* fallthrough */ + case 256: + case 512: + case 1024: + case 2048: + break; } - I915_WRITE(_CURABASE, base); - POSTING_READ(_CURABASE); + cntl |= CURSOR_ENABLE | + CURSOR_GAMMA_ENABLE | + CURSOR_FORMAT_ARGB | + CURSOR_STRIDE(stride); + + size = (height << 12) | width; } - /* XXX width must be 64, stride 256 => 0x00 << 28 */ - cntl = 0; - if (base) - cntl = (CURSOR_ENABLE | - CURSOR_GAMMA_ENABLE | - CURSOR_FORMAT_ARGB); - if (intel_crtc->cursor_cntl != cntl) { - I915_WRITE(_CURACNTR, cntl); + if (intel_crtc->cursor_cntl != 0 && + (intel_crtc->cursor_base != base || + intel_crtc->cursor_size != size || + intel_crtc->cursor_cntl != cntl)) { + /* On these chipsets we can only modify the base/size/stride + * whilst the cursor is disabled. + */ + I915_WRITE(_CURACNTR, 0); POSTING_READ(_CURACNTR); - intel_crtc->cursor_cntl = cntl; + intel_crtc->cursor_cntl = 0; } -} -static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; - uint32_t cntl; + if (intel_crtc->cursor_base != base) + I915_WRITE(_CURABASE, base); - cntl = 0; - if (base) { - cntl = MCURSOR_GAMMA_ENABLE; - switch (intel_crtc->cursor_width) { - case 64: - cntl |= CURSOR_MODE_64_ARGB_AX; - break; - case 128: - cntl |= CURSOR_MODE_128_ARGB_AX; - break; - case 256: - cntl |= CURSOR_MODE_256_ARGB_AX; - break; - default: - WARN_ON(1); - return; - } - cntl |= pipe << 28; /* Connect to correct pipe */ + if (intel_crtc->cursor_size != size) { + I915_WRITE(CURSIZE, size); + intel_crtc->cursor_size = size; } + if (intel_crtc->cursor_cntl != cntl) { - I915_WRITE(CURCNTR(pipe), cntl); - POSTING_READ(CURCNTR(pipe)); + I915_WRITE(_CURACNTR, cntl); + POSTING_READ(_CURACNTR); intel_crtc->cursor_cntl = cntl; } - - /* and commit changes on next vblank */ - I915_WRITE(CURBASE(pipe), base); - POSTING_READ(CURBASE(pipe)); } -static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) +static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -8125,6 +8137,7 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) WARN_ON(1); return; } + cntl |= pipe << 28; /* Connect to correct pipe */ } if (IS_HASWELL(dev) || IS_BROADWELL(dev)) cntl |= CURSOR_PIPE_CSC_ENABLE; @@ -8184,15 +8197,50 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, I915_WRITE(CURPOS(pipe), pos); - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) - ivb_update_cursor(crtc, base); - else if (IS_845G(dev) || IS_I865G(dev)) + if (IS_845G(dev) || IS_I865G(dev)) i845_update_cursor(crtc, base); else i9xx_update_cursor(crtc, base); intel_crtc->cursor_base = base; } +static bool cursor_size_ok(struct drm_device *dev, + uint32_t width, uint32_t height) +{ + if (width == 0 || height == 0) + return false; + + /* + * 845g/865g are special in that they are only limited by + * the width of their cursors, the height is arbitrary up to + * the precision of the register. Everything else requires + * square cursors, limited to a few power-of-two sizes. + */ + if (IS_845G(dev) || IS_I865G(dev)) { + if ((width & 63) != 0) + return false; + + if (width > (IS_845G(dev) ? 64 : 512)) + return false; + + if (height > 1023) + return false; + } else { + switch (width | height) { + case 256: + case 128: + if (IS_GEN2(dev)) + return false; + case 64: + break; + default: + return false; + } + } + + return true; +} + /* * intel_crtc_cursor_set_obj - Set cursor to specified GEM object * @@ -8205,10 +8253,9 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, uint32_t width, uint32_t height) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum pipe pipe = intel_crtc->pipe; - unsigned old_width; + unsigned old_width, stride; uint32_t addr; int ret; @@ -8222,14 +8269,13 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, } /* Check for which cursor types we support */ - if (!((width == 64 && height == 64) || - (width == 128 && height == 128 && !IS_GEN2(dev)) || - (width == 256 && height == 256 && !IS_GEN2(dev)))) { + if (!cursor_size_ok(dev, width, height)) { DRM_DEBUG("Cursor dimension not supported\n"); return -EINVAL; } - if (obj->base.size < width * height * 4) { + stride = roundup_pow_of_two(width) * 4; + if (obj->base.size < stride * height) { DRM_DEBUG_KMS("buffer is too small\n"); ret = -ENOMEM; goto fail; @@ -8278,9 +8324,6 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, addr = obj->phys_handle->busaddr; } - if (IS_GEN2(dev)) - I915_WRITE(CURSIZE, (height << 12) | width); - finish: if (intel_crtc->cursor_bo) { if (!INTEL_INFO(dev)->cursor_needs_physical) @@ -8468,8 +8511,6 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, connector->base.id, connector->name, encoder->base.id, encoder->name); - drm_modeset_acquire_init(ctx, 0); - retry: ret = drm_modeset_lock(&config->connection_mutex, ctx); if (ret) @@ -8508,10 +8549,14 @@ retry: i++; if (!(encoder->possible_crtcs & (1 << i))) continue; - if (!possible_crtc->enabled) { - crtc = possible_crtc; - break; - } + if (possible_crtc->enabled) + continue; + /* This can occur when applying the pipe A quirk on resume. */ + if (to_intel_crtc(possible_crtc)->new_enabled) + continue; + + crtc = possible_crtc; + break; } /* @@ -8580,15 +8625,11 @@ fail_unlock: goto retry; } - drm_modeset_drop_locks(ctx); - drm_modeset_acquire_fini(ctx); - return false; } void intel_release_load_detect_pipe(struct drm_connector *connector, - struct intel_load_detect_pipe *old, - struct drm_modeset_acquire_ctx *ctx) + struct intel_load_detect_pipe *old) { struct intel_encoder *intel_encoder = intel_attached_encoder(connector); @@ -8612,17 +8653,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, drm_framebuffer_unreference(old->release_fb); } - goto unlock; return; } /* Switch crtc and encoder back off if necessary */ if (old->dpms_mode != DRM_MODE_DPMS_ON) connector->funcs->dpms(connector, old->dpms_mode); - -unlock: - drm_modeset_drop_locks(ctx); - drm_modeset_acquire_fini(ctx); } static int i9xx_pll_refclk(struct drm_device *dev, @@ -9522,6 +9558,8 @@ static bool use_mmio_flip(struct intel_engine_cs *ring, return false; else if (i915.use_mmio_flip > 0) return true; + else if (i915.enable_execlists) + return true; else return ring != obj->ring; } @@ -9837,8 +9875,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev) to_intel_encoder(connector->base.encoder); } - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { + for_each_intel_encoder(dev, encoder) { encoder->new_crtc = to_intel_crtc(encoder->base.crtc); } @@ -9869,8 +9906,7 @@ static void intel_modeset_commit_output_state(struct drm_device *dev) connector->base.encoder = &connector->new_encoder->base; } - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { + for_each_intel_encoder(dev, encoder) { encoder->base.crtc = &encoder->new_crtc->base; } @@ -9997,6 +10033,15 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, pipe_config->dp_m_n.tu); + + DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", + pipe_config->has_dp_encoder, + pipe_config->dp_m2_n2.gmch_m, + pipe_config->dp_m2_n2.gmch_n, + pipe_config->dp_m2_n2.link_m, + pipe_config->dp_m2_n2.link_n, + pipe_config->dp_m2_n2.tu); + DRM_DEBUG_KMS("requested mode:\n"); drm_mode_debug_printmodeline(&pipe_config->requested_mode); DRM_DEBUG_KMS("adjusted mode:\n"); @@ -10031,8 +10076,7 @@ static bool check_single_encoder_cloning(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct intel_encoder *source_encoder; - list_for_each_entry(source_encoder, - &dev->mode_config.encoder_list, base.head) { + for_each_intel_encoder(dev, source_encoder) { if (source_encoder->new_crtc != crtc) continue; @@ -10048,8 +10092,7 @@ static bool check_encoder_cloning(struct intel_crtc *crtc) struct drm_device *dev = crtc->base.dev; struct intel_encoder *encoder; - list_for_each_entry(encoder, - &dev->mode_config.encoder_list, base.head) { + for_each_intel_encoder(dev, encoder) { if (encoder->new_crtc != crtc) continue; @@ -10133,8 +10176,7 @@ encoder_retry: * adjust it according to limitations or connector properties, and also * a chance to reject the mode entirely. */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { + for_each_intel_encoder(dev, encoder) { if (&encoder->new_crtc->base != crtc) continue; @@ -10212,8 +10254,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, 1 << connector->new_encoder->new_crtc->pipe; } - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { + for_each_intel_encoder(dev, encoder) { if (encoder->base.crtc == &encoder->new_crtc->base) continue; @@ -10287,8 +10328,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) struct intel_crtc *intel_crtc; struct drm_connector *connector; - list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list, - base.head) { + for_each_intel_encoder(dev, intel_encoder) { if (!intel_encoder->base.crtc) continue; @@ -10377,6 +10417,22 @@ intel_pipe_config_compare(struct drm_device *dev, return false; \ } +/* This is required for BDW+ where there is only one set of registers for + * switching between high and low RR. + * This macro can be used whenever a comparison has to be made between one + * hw state and multiple sw state variables. + */ +#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \ + if ((current_config->name != pipe_config->name) && \ + (current_config->alt_name != pipe_config->name)) { \ + DRM_ERROR("mismatch in " #name " " \ + "(expected %i or %i, found %i)\n", \ + current_config->name, \ + current_config->alt_name, \ + pipe_config->name); \ + return false; \ + } + #define PIPE_CONF_CHECK_FLAGS(name, mask) \ if ((current_config->name ^ pipe_config->name) & (mask)) { \ DRM_ERROR("mismatch in " #name "(" #mask ") " \ @@ -10409,11 +10465,28 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(fdi_m_n.tu); PIPE_CONF_CHECK_I(has_dp_encoder); - PIPE_CONF_CHECK_I(dp_m_n.gmch_m); - PIPE_CONF_CHECK_I(dp_m_n.gmch_n); - PIPE_CONF_CHECK_I(dp_m_n.link_m); - PIPE_CONF_CHECK_I(dp_m_n.link_n); - PIPE_CONF_CHECK_I(dp_m_n.tu); + + if (INTEL_INFO(dev)->gen < 8) { + PIPE_CONF_CHECK_I(dp_m_n.gmch_m); + PIPE_CONF_CHECK_I(dp_m_n.gmch_n); + PIPE_CONF_CHECK_I(dp_m_n.link_m); + PIPE_CONF_CHECK_I(dp_m_n.link_n); + PIPE_CONF_CHECK_I(dp_m_n.tu); + + if (current_config->has_drrs) { + PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m); + PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n); + PIPE_CONF_CHECK_I(dp_m2_n2.link_m); + PIPE_CONF_CHECK_I(dp_m2_n2.link_n); + PIPE_CONF_CHECK_I(dp_m2_n2.tu); + } + } else { + PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m); + PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n); + PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m); + PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n); + PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu); + } PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay); PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal); @@ -10499,6 +10572,7 @@ intel_pipe_config_compare(struct drm_device *dev, #undef PIPE_CONF_CHECK_X #undef PIPE_CONF_CHECK_I +#undef PIPE_CONF_CHECK_I_ALT #undef PIPE_CONF_CHECK_FLAGS #undef PIPE_CONF_CHECK_CLOCK_FUZZY #undef PIPE_CONF_QUIRK @@ -10528,8 +10602,7 @@ check_encoder_state(struct drm_device *dev) struct intel_encoder *encoder; struct intel_connector *connector; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { + for_each_intel_encoder(dev, encoder) { bool enabled = false; bool active = false; enum pipe pipe, tracked_pipe; @@ -10608,8 +10681,7 @@ check_crtc_state(struct drm_device *dev) WARN(crtc->active && !crtc->base.enabled, "active crtc, but not enabled in sw tracking\n"); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { + for_each_intel_encoder(dev, encoder) { if (encoder->base.crtc != &crtc->base) continue; enabled = true; @@ -10631,8 +10703,7 @@ check_crtc_state(struct drm_device *dev) if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) active = crtc->active; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { + for_each_intel_encoder(dev, encoder) { enum pipe pipe; if (encoder->base.crtc != &crtc->base) continue; @@ -11000,7 +11071,7 @@ static void intel_set_config_restore_state(struct drm_device *dev, } count = 0; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + for_each_intel_encoder(dev, encoder) { encoder->new_crtc = to_intel_crtc(config->save_encoder_crtcs[count++]); } @@ -11159,8 +11230,7 @@ intel_modeset_stage_output_state(struct drm_device *dev, } /* Check for any encoders that needs to be disabled. */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { + for_each_intel_encoder(dev, encoder) { int num_connectors = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, @@ -11193,9 +11263,7 @@ intel_modeset_stage_output_state(struct drm_device *dev, for_each_intel_crtc(dev, crtc) { crtc->new_enabled = false; - list_for_each_entry(encoder, - &dev->mode_config.encoder_list, - base.head) { + for_each_intel_encoder(dev, encoder) { if (encoder->new_crtc == crtc) { crtc->new_enabled = true; break; @@ -11232,7 +11300,7 @@ static void disable_crtc_nofb(struct intel_crtc *crtc) connector->new_encoder = NULL; } - list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + for_each_intel_encoder(dev, encoder) { if (encoder->new_crtc == crtc) encoder->new_crtc = NULL; } @@ -11295,7 +11363,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set) ret = intel_set_mode(set->crtc, set->mode, set->x, set->y, set->fb); } else if (config->fb_changed) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc); intel_crtc_wait_for_pending_flips(set->crtc); @@ -11309,8 +11376,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set) */ if (!intel_crtc->primary_enabled && ret == 0) { WARN_ON(!intel_crtc->active); - intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane, - intel_crtc->pipe); + intel_enable_primary_hw_plane(set->crtc->primary, set->crtc); } /* @@ -11463,8 +11529,6 @@ static int intel_primary_plane_disable(struct drm_plane *plane) { struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_plane *intel_plane = to_intel_plane(plane); struct intel_crtc *intel_crtc; if (!plane->fb) @@ -11487,8 +11551,8 @@ intel_primary_plane_disable(struct drm_plane *plane) goto disable_unpin; intel_crtc_wait_for_pending_flips(plane->crtc); - intel_disable_primary_hw_plane(dev_priv, intel_plane->plane, - intel_plane->pipe); + intel_disable_primary_hw_plane(plane, plane->crtc); + disable_unpin: mutex_lock(&dev->struct_mutex); i915_gem_track_fb(intel_fb_obj(plane->fb), NULL, @@ -11508,9 +11572,7 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, uint32_t src_w, uint32_t src_h) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_plane *intel_plane = to_intel_plane(plane); struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); struct drm_rect dest = { @@ -11597,9 +11659,7 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); if (intel_crtc->primary_enabled) - intel_disable_primary_hw_plane(dev_priv, - intel_plane->plane, - intel_plane->pipe); + intel_disable_primary_hw_plane(plane, crtc); if (plane->fb != fb) @@ -11616,8 +11676,7 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, return ret; if (!intel_crtc->primary_enabled) - intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane, - intel_crtc->pipe); + intel_enable_primary_hw_plane(plane, crtc); return 0; } @@ -11706,8 +11765,8 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, }; const struct drm_rect clip = { /* integer pixels */ - .x2 = intel_crtc->config.pipe_src_w, - .y2 = intel_crtc->config.pipe_src_h, + .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0, + .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0, }; bool visible; int ret; @@ -11726,6 +11785,10 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h); } else { intel_crtc_update_cursor(crtc, visible); + + intel_frontbuffer_flip(crtc->dev, + INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe)); + return 0; } } @@ -11802,8 +11865,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) intel_crtc->cursor_base = ~0; intel_crtc->cursor_cntl = ~0; - - init_waitqueue_head(&intel_crtc->vbl_wait); + intel_crtc->cursor_size = ~0; BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); @@ -11866,8 +11928,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder) int index_mask = 0; int entry = 0; - list_for_each_entry(source_encoder, - &dev->mode_config.encoder_list, base.head) { + for_each_intel_encoder(dev, source_encoder) { if (encoders_cloneable(encoder, source_encoder)) index_mask |= (1 << entry); @@ -12056,7 +12117,7 @@ static void intel_setup_outputs(struct drm_device *dev) intel_edp_psr_init(dev); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + for_each_intel_encoder(dev, encoder) { encoder->base.possible_crtcs = encoder->crtc_mask; encoder->base.possible_clones = intel_encoder_clones(encoder); @@ -12322,29 +12383,27 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.get_display_clock_speed = i830_get_display_clock_speed; - if (HAS_PCH_SPLIT(dev)) { - if (IS_GEN5(dev)) { - dev_priv->display.fdi_link_train = ironlake_fdi_link_train; - dev_priv->display.write_eld = ironlake_write_eld; - } else if (IS_GEN6(dev)) { - dev_priv->display.fdi_link_train = gen6_fdi_link_train; - dev_priv->display.write_eld = ironlake_write_eld; - dev_priv->display.modeset_global_resources = - snb_modeset_global_resources; - } else if (IS_IVYBRIDGE(dev)) { - /* FIXME: detect B0+ stepping and use auto training */ - dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; - dev_priv->display.write_eld = ironlake_write_eld; - dev_priv->display.modeset_global_resources = - ivb_modeset_global_resources; - } else if (IS_HASWELL(dev) || IS_GEN8(dev)) { - dev_priv->display.fdi_link_train = hsw_fdi_link_train; - dev_priv->display.write_eld = haswell_write_eld; - dev_priv->display.modeset_global_resources = - haswell_modeset_global_resources; - } - } else if (IS_G4X(dev)) { + if (IS_G4X(dev)) { dev_priv->display.write_eld = g4x_write_eld; + } else if (IS_GEN5(dev)) { + dev_priv->display.fdi_link_train = ironlake_fdi_link_train; + dev_priv->display.write_eld = ironlake_write_eld; + } else if (IS_GEN6(dev)) { + dev_priv->display.fdi_link_train = gen6_fdi_link_train; + dev_priv->display.write_eld = ironlake_write_eld; + dev_priv->display.modeset_global_resources = + snb_modeset_global_resources; + } else if (IS_IVYBRIDGE(dev)) { + /* FIXME: detect B0+ stepping and use auto training */ + dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; + dev_priv->display.write_eld = ironlake_write_eld; + dev_priv->display.modeset_global_resources = + ivb_modeset_global_resources; + } else if (IS_HASWELL(dev) || IS_GEN8(dev)) { + dev_priv->display.fdi_link_train = hsw_fdi_link_train; + dev_priv->display.write_eld = haswell_write_eld; + dev_priv->display.modeset_global_resources = + haswell_modeset_global_resources; } else if (IS_VALLEYVIEW(dev)) { dev_priv->display.modeset_global_resources = valleyview_modeset_global_resources; @@ -12550,8 +12609,6 @@ void intel_modeset_init_hw(struct drm_device *dev) intel_init_clock_gating(dev); - intel_reset_dpio(dev); - intel_enable_gt_powersave(dev); } @@ -12597,7 +12654,10 @@ void intel_modeset_init(struct drm_device *dev) dev->mode_config.max_height = 8192; } - if (IS_GEN2(dev)) { + if (IS_845G(dev) || IS_I865G(dev)) { + dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512; + dev->mode_config.cursor_height = 1023; + } else if (IS_GEN2(dev)) { dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; } else { @@ -12622,7 +12682,6 @@ void intel_modeset_init(struct drm_device *dev) } intel_init_dpio(dev); - intel_reset_dpio(dev); intel_shared_dpll_init(dev); @@ -12665,7 +12724,7 @@ static void intel_enable_pipe_a(struct drm_device *dev) struct intel_connector *connector; struct drm_connector *crt = NULL; struct intel_load_detect_pipe load_detect_temp; - struct drm_modeset_acquire_ctx ctx; + struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; /* We can't just switch on the pipe A, we need to set things up with a * proper mode and output configuration. As a gross hack, enable pipe A @@ -12682,10 +12741,8 @@ static void intel_enable_pipe_a(struct drm_device *dev) if (!crt) return; - if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, &ctx)) - intel_release_load_detect_pipe(crt, &load_detect_temp, &ctx); - - + if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) + intel_release_load_detect_pipe(crt, &load_detect_temp); } static bool @@ -12952,8 +13009,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); } - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { + for_each_intel_encoder(dev, encoder) { pipe = 0; if (encoder->get_hw_state(encoder, &pipe)) { @@ -13017,8 +13073,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, } /* HW state is read out, now we need to sanitize this mess. */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { + for_each_intel_encoder(dev, encoder) { intel_sanitize_encoder(encoder); } @@ -13117,7 +13172,7 @@ void intel_modeset_cleanup(struct drm_device *dev) * experience fancy races otherwise. */ drm_irq_uninstall(dev); - cancel_work_sync(&dev_priv->hotplug_work); + intel_hpd_cancel_work(dev_priv); dev_priv->pm._irqs_disabled = true; /* @@ -13385,3 +13440,25 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); } } + +void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file) +{ + struct intel_crtc *crtc; + + for_each_intel_crtc(dev, crtc) { + struct intel_unpin_work *work; + unsigned long irqflags; + + spin_lock_irqsave(&dev->event_lock, irqflags); + + work = crtc->unpin_work; + + if (work && work->event && + work->event->base.file_priv == file) { + kfree(work->event); + work->event = NULL; + } + + spin_unlock_irqrestore(&dev->event_lock, irqflags); + } +} diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index eb52ecfe14cf..6a2256cf1f2a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -828,20 +828,6 @@ intel_dp_set_clock(struct intel_encoder *encoder, } } -static void -intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - enum transcoder transcoder = crtc->config.cpu_transcoder; - - I915_WRITE(PIPE_DATA_M2(transcoder), - TU_SIZE(m_n->tu) | m_n->gmch_m); - I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n); - I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m); - I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n); -} - bool intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config) @@ -867,6 +853,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, pipe_config->has_pch_encoder = true; pipe_config->has_dp_encoder = true; + pipe_config->has_drrs = false; pipe_config->has_audio = intel_dp->has_audio; if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { @@ -970,13 +957,14 @@ found: if (intel_connector->panel.downclock_mode != NULL && intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) { + pipe_config->has_drrs = true; intel_link_compute_m_n(bpp, lane_count, intel_connector->panel.downclock_mode->clock, pipe_config->port_clock, &pipe_config->dp_m2_n2); } - if (HAS_DDI(dev)) + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw); else intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); @@ -1285,6 +1273,19 @@ static void edp_panel_vdd_work(struct work_struct *__work) drm_modeset_unlock(&dev->mode_config.connection_mutex); } +static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) +{ + unsigned long delay; + + /* + * Queue the timer to fire a long time from now (relative to the power + * down delay) to keep the panel power up across a sequence of + * operations. + */ + delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5); + schedule_delayed_work(&intel_dp->panel_vdd_work, delay); +} + static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) { if (!is_edp(intel_dp)) @@ -1294,17 +1295,10 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) intel_dp->want_panel_vdd = false; - if (sync) { + if (sync) edp_panel_vdd_off_sync(intel_dp); - } else { - /* - * Queue the timer to fire a long - * time from now (relative to the power down delay) - * to keep the panel power up across a sequence of operations - */ - schedule_delayed_work(&intel_dp->panel_vdd_work, - msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); - } + else + edp_panel_vdd_schedule_off(intel_dp); } void intel_edp_panel_on(struct intel_dp *intel_dp) @@ -1800,7 +1794,6 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); lockdep_assert_held(&dev_priv->psr.lock); - lockdep_assert_held(&dev->struct_mutex); WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); @@ -2288,6 +2281,8 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) enum pipe pipe = intel_crtc->pipe; u32 val; + intel_dp_prepare(encoder); + mutex_lock(&dev_priv->dpio_lock); /* program left/right clock distribution */ @@ -2654,8 +2649,8 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp) /* Program swing margin */ for (i = 0; i < 4; i++) { val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); - val &= ~DPIO_SWING_MARGIN_MASK; - val |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT; + val &= ~DPIO_SWING_MARGIN000_MASK; + val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT; vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); } @@ -2966,7 +2961,10 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, } } else { - *DP &= ~DP_LINK_TRAIN_MASK; + if (IS_CHERRYVIEW(dev)) + *DP &= ~DP_LINK_TRAIN_MASK_CHV; + else + *DP &= ~DP_LINK_TRAIN_MASK; switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { case DP_TRAINING_PATTERN_DISABLE: @@ -2979,8 +2977,12 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, *DP |= DP_LINK_TRAIN_PAT_2; break; case DP_TRAINING_PATTERN_3: - DRM_ERROR("DP training pattern 3 not supported\n"); - *DP |= DP_LINK_TRAIN_PAT_2; + if (IS_CHERRYVIEW(dev)) { + *DP |= DP_LINK_TRAIN_PAT_3_CHV; + } else { + DRM_ERROR("DP training pattern 3 not supported\n"); + *DP |= DP_LINK_TRAIN_PAT_2; + } break; } } @@ -3267,7 +3269,10 @@ intel_dp_link_down(struct intel_dp *intel_dp) DP &= ~DP_LINK_TRAIN_MASK_CPT; I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); } else { - DP &= ~DP_LINK_TRAIN_MASK; + if (IS_CHERRYVIEW(dev)) + DP &= ~DP_LINK_TRAIN_MASK_CHV; + else + DP &= ~DP_LINK_TRAIN_MASK; I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); } POSTING_READ(intel_dp->output_reg); @@ -3548,6 +3553,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) if (WARN_ON(!intel_encoder->base.crtc)) return; + if (!to_intel_crtc(intel_encoder->base.crtc)->active) + return; + /* Try to read receiver status if the link appears to be up */ if (!intel_dp_get_link_status(intel_dp, link_status)) { return; @@ -3998,6 +4006,21 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) kfree(intel_dig_port); } +static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + + if (!is_edp(intel_dp)) + return; + + edp_panel_vdd_off_sync(intel_dp); +} + +static void intel_dp_encoder_reset(struct drm_encoder *encoder) +{ + intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder)); +} + static const struct drm_connector_funcs intel_dp_connector_funcs = { .dpms = intel_connector_dpms, .detect = intel_dp_detect, @@ -4013,6 +4036,7 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = }; static const struct drm_encoder_funcs intel_dp_enc_funcs = { + .reset = intel_dp_encoder_reset, .destroy = intel_dp_encoder_destroy, }; @@ -4026,15 +4050,22 @@ bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) { struct intel_dp *intel_dp = &intel_dig_port->dp; + struct intel_encoder *intel_encoder = &intel_dig_port->base; struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - int ret; + enum intel_display_power_domain power_domain; + bool ret = true; + if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; - DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port, + DRM_DEBUG_KMS("got hpd irq on port %c - %s\n", + port_name(intel_dig_port->port), long_hpd ? "long" : "short"); + power_domain = intel_display_port_power_domain(intel_encoder); + intel_display_power_get(dev_priv, power_domain); + if (long_hpd) { if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) goto mst_fail; @@ -4050,8 +4081,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) } else { if (intel_dp->is_mst) { - ret = intel_dp_check_mst_status(intel_dp); - if (ret == -EINVAL) + if (intel_dp_check_mst_status(intel_dp) == -EINVAL) goto mst_fail; } @@ -4065,7 +4095,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) drm_modeset_unlock(&dev->mode_config.connection_mutex); } } - return false; + ret = false; + goto put_power; mst_fail: /* if we were in MST mode, and device is not there get out of MST mode */ if (intel_dp->is_mst) { @@ -4073,7 +4104,10 @@ mst_fail: intel_dp->is_mst = false; drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); } - return true; +put_power: + intel_display_power_put(dev_priv, power_domain); + + return ret; } /* Return which DP Port should be selected for Transcoder DP control */ @@ -4382,7 +4416,7 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) val = I915_READ(reg); if (index > DRRS_HIGH_RR) { val |= PIPECONF_EDP_RR_MODE_SWITCH; - intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2); + intel_dp_set_m_n(intel_crtc); } else { val &= ~PIPECONF_EDP_RR_MODE_SWITCH; } @@ -4422,7 +4456,7 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port, } if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { - DRM_INFO("VBT doesn't support DRRS\n"); + DRM_DEBUG_KMS("VBT doesn't support DRRS\n"); return NULL; } @@ -4430,7 +4464,7 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port, (dev, fixed_mode, connector); if (!downclock_mode) { - DRM_INFO("DRRS not supported\n"); + DRM_DEBUG_KMS("DRRS not supported\n"); return NULL; } @@ -4441,10 +4475,36 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port, intel_dp->drrs_state.type = dev_priv->vbt.drrs_type; intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR; - DRM_INFO("seamless DRRS supported for eDP panel.\n"); + DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n"); return downclock_mode; } +void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder) +{ + struct drm_device *dev = intel_encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp *intel_dp; + enum intel_display_power_domain power_domain; + + if (intel_encoder->type != INTEL_OUTPUT_EDP) + return; + + intel_dp = enc_to_intel_dp(&intel_encoder->base); + if (!edp_have_panel_vdd(intel_dp)) + return; + /* + * The VDD bit needs a power domain reference, so if the bit is + * already enabled when we boot or resume, grab this reference and + * schedule a vdd off, so we don't hold on to the reference + * indefinitely. + */ + DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); + power_domain = intel_display_port_power_domain(intel_encoder); + intel_display_power_get(dev_priv, power_domain); + + edp_panel_vdd_schedule_off(intel_dp); +} + static bool intel_edp_init_connector(struct intel_dp *intel_dp, struct intel_connector *intel_connector, struct edp_power_seq *power_seq) @@ -4465,13 +4525,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, if (!is_edp(intel_dp)) return true; - /* The VDD bit needs a power domain reference, so if the bit is already - * enabled when we boot, grab this reference. */ - if (edp_have_panel_vdd(intel_dp)) { - enum intel_display_power_domain power_domain; - power_domain = intel_display_port_power_domain(intel_encoder); - intel_display_power_get(dev_priv, power_domain); - } + intel_edp_panel_vdd_sanitize(intel_encoder); /* Cache DPCD and EDID for edp. */ intel_edp_panel_vdd_on(intel_dp); @@ -4691,6 +4745,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) intel_encoder->disable = intel_disable_dp; intel_encoder->get_hw_state = intel_dp_get_hw_state; intel_encoder->get_config = intel_dp_get_config; + intel_encoder->suspend = intel_dp_encoder_suspend; if (IS_CHERRYVIEW(dev)) { intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; intel_encoder->pre_enable = chv_pre_enable_dp; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8a475a6909c3..d683a2090249 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -153,6 +153,12 @@ struct intel_encoder { * be set correctly before calling this function. */ void (*get_config)(struct intel_encoder *, struct intel_crtc_config *pipe_config); + /* + * Called during system suspend after all pending requests for the + * encoder are flushed (for example for DP AUX transactions) and + * device interrupts are disabled. + */ + void (*suspend)(struct intel_encoder *); int crtc_mask; enum hpd_pin hpd_pin; }; @@ -324,6 +330,7 @@ struct intel_crtc_config { /* m2_n2 for eDP downclock */ struct intel_link_m_n dp_m2_n2; + bool has_drrs; /* * Frequence the dpll for the port should run at. Differs from the @@ -404,6 +411,7 @@ struct intel_crtc { uint32_t cursor_addr; int16_t cursor_width, cursor_height; uint32_t cursor_cntl; + uint32_t cursor_size; uint32_t cursor_base; struct intel_plane_config plane_config; @@ -424,8 +432,6 @@ struct intel_crtc { struct intel_pipe_wm active; } wm; - wait_queue_head_t vbl_wait; - int scanline_offset; struct intel_mmio_flip mmio_flip; }; @@ -449,6 +455,7 @@ struct intel_plane { unsigned int crtc_w, crtc_h; uint32_t src_x, src_y; uint32_t src_w, src_h; + unsigned int rotation; /* Since we need to change the watermarks before/after * enabling/disabling the planes, we need to store the parameters here @@ -830,8 +837,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, struct intel_load_detect_pipe *old, struct drm_modeset_acquire_ctx *ctx); void intel_release_load_detect_pipe(struct drm_connector *connector, - struct intel_load_detect_pipe *old, - struct drm_modeset_acquire_ctx *ctx); + struct intel_load_detect_pipe *old); int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_i915_gem_object *obj, struct intel_engine_cs *pipelined); @@ -877,6 +883,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv); void hsw_disable_pc8(struct drm_i915_private *dev_priv); void intel_dp_get_m_n(struct intel_crtc *crtc, struct intel_crtc_config *pipe_config); +void intel_dp_set_m_n(struct intel_crtc *crtc); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config, @@ -891,7 +898,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, struct intel_crtc_config *pipe_config); int intel_format_to_fourcc(int format); void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc); - +void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file); /* intel_dp.c */ void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); @@ -912,6 +919,7 @@ bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, void intel_edp_backlight_on(struct intel_dp *intel_dp); void intel_edp_backlight_off(struct intel_dp *intel_dp); void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); +void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder); void intel_edp_panel_on(struct intel_dp *intel_dp); void intel_edp_panel_off(struct intel_dp *intel_dp); void intel_edp_psr_enable(struct intel_dp *intel_dp); @@ -945,7 +953,7 @@ void intel_dvo_init(struct drm_device *dev); extern int intel_fbdev_init(struct drm_device *dev); extern void intel_fbdev_initial_config(struct drm_device *dev); extern void intel_fbdev_fini(struct drm_device *dev); -extern void intel_fbdev_set_suspend(struct drm_device *dev, int state); +extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous); extern void intel_fbdev_output_poll_changed(struct drm_device *dev); extern void intel_fbdev_restore_mode(struct drm_device *dev); #else @@ -962,7 +970,7 @@ static inline void intel_fbdev_fini(struct drm_device *dev) { } -static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state) +static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) { } @@ -1085,7 +1093,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); void intel_flush_primary_plane(struct drm_i915_private *dev_priv, enum plane plane); -void intel_plane_restore(struct drm_plane *plane); +int intel_plane_restore(struct drm_plane *plane); void intel_plane_disable(struct drm_plane *plane); int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv); diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index bfcefbf33709..5bd9e09ad3c5 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -92,6 +92,9 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, if (fixed_mode) intel_fixed_panel_mode(fixed_mode, adjusted_mode); + /* DSI uses short packets for sync events, so clear mode flags for DSI */ + adjusted_mode->flags = 0; + if (intel_dsi->dev.dev_ops->mode_fixup) return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev, mode, adjusted_mode); @@ -152,6 +155,8 @@ static void intel_dsi_enable(struct intel_encoder *encoder) if (intel_dsi->dev.dev_ops->enable) intel_dsi->dev.dev_ops->enable(&intel_dsi->dev); + wait_for_dsi_fifo_empty(intel_dsi); + /* assert ip_tg_enable signal */ temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK; temp = temp | intel_dsi->port_bits; @@ -177,6 +182,10 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder) tmp |= DPLL_REFA_CLK_ENABLE_VLV; I915_WRITE(DPLL(pipe), tmp); + /* update the hw state for DPLL */ + intel_crtc->config.dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV | + DPLL_REFA_CLK_ENABLE_VLV; + tmp = I915_READ(DSPCLK_GATE_D); tmp |= DPOUNIT_CLOCK_GATE_DISABLE; I915_WRITE(DSPCLK_GATE_D, tmp); @@ -192,6 +201,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder) if (intel_dsi->dev.dev_ops->send_otp_cmds) intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev); + wait_for_dsi_fifo_empty(intel_dsi); + /* Enable port in pre-enable phase itself because as per hw team * recommendation, port should be enabled befor plane & pipe */ intel_dsi_enable(encoder); @@ -232,6 +243,8 @@ static void intel_dsi_disable(struct intel_encoder *encoder) DRM_DEBUG_KMS("\n"); if (is_vid_mode(intel_dsi)) { + wait_for_dsi_fifo_empty(intel_dsi); + /* de-assert ip_tg_enable signal */ temp = I915_READ(MIPI_PORT_CTRL(pipe)); I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE); @@ -246,8 +259,8 @@ static void intel_dsi_disable(struct intel_encoder *encoder) temp = I915_READ(MIPI_CTRL(pipe)); temp &= ~ESCAPE_CLOCK_DIVIDER_MASK; I915_WRITE(MIPI_CTRL(pipe), temp | - intel_dsi->escape_clk_div << - ESCAPE_CLOCK_DIVIDER_SHIFT); + intel_dsi->escape_clk_div << + ESCAPE_CLOCK_DIVIDER_SHIFT); I915_WRITE(MIPI_EOT_DISABLE(pipe), CLOCKSTOP); @@ -261,6 +274,8 @@ static void intel_dsi_disable(struct intel_encoder *encoder) * some next enable sequence send turn on packet error is observed */ if (intel_dsi->dev.dev_ops->disable) intel_dsi->dev.dev_ops->disable(&intel_dsi->dev); + + wait_for_dsi_fifo_empty(intel_dsi); } static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) @@ -282,7 +297,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) usleep_range(2000, 2500); if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT) - == 0x00000), 30)) + == 0x00000), 30)) DRM_ERROR("DSI LP not going Low\n"); val = I915_READ(MIPI_PORT_CTRL(pipe)); @@ -351,9 +366,21 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, static void intel_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config) { + u32 pclk; DRM_DEBUG_KMS("\n"); - /* XXX: read flags, set to adjusted_mode */ + /* + * DPLL_MD is not used in case of DSI, reading will get some default value + * set dpll_md = 0 + */ + pipe_config->dpll_hw_state.dpll_md = 0; + + pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp); + if (!pclk) + return; + + pipe_config->adjusted_mode.crtc_clock = pclk; + pipe_config->port_clock = pclk; } static enum drm_mode_status @@ -396,9 +423,11 @@ static u16 txclkesc(u32 divider, unsigned int us) } /* return pixels in terms of txbyteclkhs */ -static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count) +static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count, + u16 burst_mode_ratio) { - return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp, 8), lane_count); + return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio, + 8 * 100), lane_count); } static void set_dsi_timings(struct drm_encoder *encoder, @@ -424,10 +453,12 @@ static void set_dsi_timings(struct drm_encoder *encoder, vbp = mode->vtotal - mode->vsync_end; /* horizontal values are in terms of high speed byte clock */ - hactive = txbyteclkhs(hactive, bpp, lane_count); - hfp = txbyteclkhs(hfp, bpp, lane_count); - hsync = txbyteclkhs(hsync, bpp, lane_count); - hbp = txbyteclkhs(hbp, bpp, lane_count); + hactive = txbyteclkhs(hactive, bpp, lane_count, + intel_dsi->burst_mode_ratio); + hfp = txbyteclkhs(hfp, bpp, lane_count, intel_dsi->burst_mode_ratio); + hsync = txbyteclkhs(hsync, bpp, lane_count, + intel_dsi->burst_mode_ratio); + hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio); I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive); I915_WRITE(MIPI_HFP_COUNT(pipe), hfp); @@ -514,12 +545,14 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder) intel_dsi->video_mode_format == VIDEO_MODE_BURST) { I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe), txbyteclkhs(adjusted_mode->htotal, bpp, - intel_dsi->lane_count) + 1); + intel_dsi->lane_count, + intel_dsi->burst_mode_ratio) + 1); } else { I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe), txbyteclkhs(adjusted_mode->vtotal * adjusted_mode->htotal, - bpp, intel_dsi->lane_count) + 1); + bpp, intel_dsi->lane_count, + intel_dsi->burst_mode_ratio) + 1); } I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout); I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val); @@ -549,7 +582,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder) * XXX: write MIPI_STOP_STATE_STALL? */ I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), - intel_dsi->hs_to_lp_count); + intel_dsi->hs_to_lp_count); /* XXX: low power clock equivalence in terms of byte clock. the number * of byte clocks occupied in one low power clock. based on txbyteclkhs @@ -574,10 +607,10 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder) * 64 like 1366 x 768. Enable RANDOM resolution support for such * panels by default */ I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe), - intel_dsi->video_frmt_cfg_bits | - intel_dsi->video_mode_format | - IP_TG_CONFIG | - RANDOM_DPI_DISPLAY_RESOLUTION); + intel_dsi->video_frmt_cfg_bits | + intel_dsi->video_mode_format | + IP_TG_CONFIG | + RANDOM_DPI_DISPLAY_RESOLUTION); } static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder) diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index 31db33d3e5cc..657eb5c1b9d8 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h @@ -116,6 +116,8 @@ struct intel_dsi { u16 clk_hs_to_lp_count; u16 init_count; + u32 pclk; + u16 burst_mode_ratio; /* all delays in ms */ u16 backlight_off_delay; @@ -132,6 +134,7 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) extern void vlv_enable_dsi_pll(struct intel_encoder *encoder); extern void vlv_disable_dsi_pll(struct intel_encoder *encoder); +extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp); extern struct intel_dsi_dev_ops vbt_generic_dsi_display_ops; diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c index 933c86305237..f4767fd2ebeb 100644 --- a/drivers/gpu/drm/i915/intel_dsi_cmd.c +++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c @@ -419,3 +419,19 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs) return 0; } + +void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi) +{ + struct drm_encoder *encoder = &intel_dsi->base.base; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + enum pipe pipe = intel_crtc->pipe; + u32 mask; + + mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY | + LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY; + + if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 100)) + DRM_ERROR("DPI FIFOs are not empty\n"); +} diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h index 9a18cbfa5460..46aa1acc00eb 100644 --- a/drivers/gpu/drm/i915/intel_dsi_cmd.h +++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h @@ -51,6 +51,7 @@ int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel, u8 *reqdata, int reqlen, u8 *buf, int buflen); int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs); +void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi); /* XXX: questionable write helpers */ static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi, diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index 47c7584a4aa0..f6bdd44069ce 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c @@ -271,6 +271,8 @@ static bool generic_init(struct intel_dsi_device *dsi) u32 ths_prepare_ns, tclk_trail_ns; u32 tclk_prepare_clkzero, ths_prepare_hszero; u32 lp_to_hs_switch, hs_to_lp_switch; + u32 pclk, computed_ddr; + u16 burst_mode_ratio; DRM_DEBUG_KMS("\n"); @@ -284,8 +286,6 @@ static bool generic_init(struct intel_dsi_device *dsi) else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565) bits_per_pixel = 16; - bitrate = (mode->clock * bits_per_pixel) / intel_dsi->lane_count; - intel_dsi->operation_mode = mipi_config->is_cmd_mode; intel_dsi->video_mode_format = mipi_config->video_transfer_mode; intel_dsi->escape_clk_div = mipi_config->byte_clk_sel; @@ -297,6 +297,40 @@ static bool generic_init(struct intel_dsi_device *dsi) intel_dsi->video_frmt_cfg_bits = mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0; + pclk = mode->clock; + + /* Burst Mode Ratio + * Target ddr frequency from VBT / non burst ddr freq + * multiply by 100 to preserve remainder + */ + if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) { + if (mipi_config->target_burst_mode_freq) { + computed_ddr = + (pclk * bits_per_pixel) / intel_dsi->lane_count; + + if (mipi_config->target_burst_mode_freq < + computed_ddr) { + DRM_ERROR("Burst mode freq is less than computed\n"); + return false; + } + + burst_mode_ratio = DIV_ROUND_UP( + mipi_config->target_burst_mode_freq * 100, + computed_ddr); + + pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100); + } else { + DRM_ERROR("Burst mode target is not set\n"); + return false; + } + } else + burst_mode_ratio = 100; + + intel_dsi->burst_mode_ratio = burst_mode_ratio; + intel_dsi->pclk = pclk; + + bitrate = (pclk * bits_per_pixel) / intel_dsi->lane_count; + switch (intel_dsi->escape_clk_div) { case 0: tlpx_ns = 50; diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c index ba79ec19da3b..fa7a6ca34cd6 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/intel_dsi_pll.c @@ -134,8 +134,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode, #else /* Get DSI clock from pixel clock */ -static u32 dsi_clk_from_pclk(const struct drm_display_mode *mode, - int pixel_format, int lane_count) +static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count) { u32 dsi_clk_khz; u32 bpp; @@ -156,7 +155,7 @@ static u32 dsi_clk_from_pclk(const struct drm_display_mode *mode, /* DSI data rate = pixel clock * bits per pixel / lane count pixel clock is converted from KHz to Hz */ - dsi_clk_khz = DIV_ROUND_CLOSEST(mode->clock * bpp, lane_count); + dsi_clk_khz = DIV_ROUND_CLOSEST(pclk * bpp, lane_count); return dsi_clk_khz; } @@ -191,7 +190,7 @@ static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp) for (m = 62; m <= 92; m++) { for (p = 2; p <= 6; p++) { /* Find the optimal m and p divisors - with minimal error +/- the required clock */ + with minimal error +/- the required clock */ calc_dsi_clk = (m * ref_clk) / p; if (calc_dsi_clk == target_dsi_clk) { calc_m = m; @@ -228,15 +227,13 @@ static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp) static void vlv_configure_dsi_pll(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); - const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); int ret; struct dsi_mnp dsi_mnp; u32 dsi_clk; - dsi_clk = dsi_clk_from_pclk(mode, intel_dsi->pixel_format, - intel_dsi->lane_count); + dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format, + intel_dsi->lane_count); ret = dsi_calc_mnp(dsi_clk, &dsi_mnp); if (ret) { @@ -298,3 +295,84 @@ void vlv_disable_dsi_pll(struct intel_encoder *encoder) mutex_unlock(&dev_priv->dpio_lock); } + +static void assert_bpp_mismatch(int pixel_format, int pipe_bpp) +{ + int bpp; + + switch (pixel_format) { + default: + case VID_MODE_FORMAT_RGB888: + case VID_MODE_FORMAT_RGB666_LOOSE: + bpp = 24; + break; + case VID_MODE_FORMAT_RGB666: + bpp = 18; + break; + case VID_MODE_FORMAT_RGB565: + bpp = 16; + break; + } + + WARN(bpp != pipe_bpp, + "bpp match assertion failure (expected %d, current %d)\n", + bpp, pipe_bpp); +} + +u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp) +{ + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + u32 dsi_clock, pclk; + u32 pll_ctl, pll_div; + u32 m = 0, p = 0; + int refclk = 25000; + int i; + + DRM_DEBUG_KMS("\n"); + + mutex_lock(&dev_priv->dpio_lock); + pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); + pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER); + mutex_unlock(&dev_priv->dpio_lock); + + /* mask out other bits and extract the P1 divisor */ + pll_ctl &= DSI_PLL_P1_POST_DIV_MASK; + pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2); + + /* mask out the other bits and extract the M1 divisor */ + pll_div &= DSI_PLL_M1_DIV_MASK; + pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT; + + while (pll_ctl) { + pll_ctl = pll_ctl >> 1; + p++; + } + p--; + + if (!p) { + DRM_ERROR("wrong P1 divisor\n"); + return 0; + } + + for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) { + if (lfsr_converts[i] == pll_div) + break; + } + + if (i == ARRAY_SIZE(lfsr_converts)) { + DRM_ERROR("wrong m_seed programmed\n"); + return 0; + } + + m = i + 62; + + dsi_clock = (m * refclk) / p; + + /* pixel_format and pipe_bpp should agree */ + assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp); + + pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, pipe_bpp); + + return pclk; +} diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index f475414671d8..cf052a39558d 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -26,6 +26,7 @@ #include <linux/module.h> #include <linux/kernel.h> +#include <linux/console.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> @@ -636,6 +637,15 @@ out: return false; } +static void intel_fbdev_suspend_worker(struct work_struct *work) +{ + intel_fbdev_set_suspend(container_of(work, + struct drm_i915_private, + fbdev_suspend_work)->dev, + FBINFO_STATE_RUNNING, + true); +} + int intel_fbdev_init(struct drm_device *dev) { struct intel_fbdev *ifbdev; @@ -662,6 +672,8 @@ int intel_fbdev_init(struct drm_device *dev) } dev_priv->fbdev = ifbdev; + INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker); + drm_fb_helper_single_add_all_connectors(&ifbdev->helper); return 0; @@ -682,12 +694,14 @@ void intel_fbdev_fini(struct drm_device *dev) if (!dev_priv->fbdev) return; + flush_work(&dev_priv->fbdev_suspend_work); + intel_fbdev_destroy(dev, dev_priv->fbdev); kfree(dev_priv->fbdev); dev_priv->fbdev = NULL; } -void intel_fbdev_set_suspend(struct drm_device *dev, int state) +void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_fbdev *ifbdev = dev_priv->fbdev; @@ -698,6 +712,33 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state) info = ifbdev->helper.fbdev; + if (synchronous) { + /* Flush any pending work to turn the console on, and then + * wait to turn it off. It must be synchronous as we are + * about to suspend or unload the driver. + * + * Note that from within the work-handler, we cannot flush + * ourselves, so only flush outstanding work upon suspend! + */ + if (state != FBINFO_STATE_RUNNING) + flush_work(&dev_priv->fbdev_suspend_work); + console_lock(); + } else { + /* + * The console lock can be pretty contented on resume due + * to all the printk activity. Try to keep it out of the hot + * path of resume if possible. + */ + WARN_ON(state != FBINFO_STATE_RUNNING); + if (!console_trylock()) { + /* Don't block our own workqueue as this can + * be run in parallel with other i915.ko tasks. + */ + schedule_work(&dev_priv->fbdev_suspend_work); + return; + } + } + /* On resume from hibernation: If the object is shmemfs backed, it has * been restored from swap. If the object is stolen however, it will be * full of whatever garbage was left in there. @@ -706,6 +747,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state) memset_io(info->screen_base, 0, info->screen_size); fb_set_suspend(info, state); + console_unlock(); } void intel_fbdev_output_poll_changed(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index f9151f6641d9..9169786dbbc3 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -885,7 +885,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc *crtc) if (HAS_GMCH_DISPLAY(dev)) return false; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + for_each_intel_encoder(dev, encoder) { if (encoder->new_crtc != crtc) continue; @@ -1260,6 +1260,8 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder) enum pipe pipe = intel_crtc->pipe; u32 val; + intel_hdmi_prepare(encoder); + mutex_lock(&dev_priv->dpio_lock); /* program left/right clock distribution */ @@ -1429,8 +1431,8 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder) for (i = 0; i < 4; i++) { val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); - val &= ~DPIO_SWING_MARGIN_MASK; - val |= 102 << DPIO_SWING_MARGIN_SHIFT; + val &= ~DPIO_SWING_MARGIN000_MASK; + val |= 102 << DPIO_SWING_MARGIN000_SHIFT; vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c new file mode 100644 index 000000000000..c096b9b7f22a --- /dev/null +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -0,0 +1,1697 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Ben Widawsky <ben@bwidawsk.net> + * Michel Thierry <michel.thierry@intel.com> + * Thomas Daniel <thomas.daniel@intel.com> + * Oscar Mateo <oscar.mateo@intel.com> + * + */ + +/** + * DOC: Logical Rings, Logical Ring Contexts and Execlists + * + * Motivation: + * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts". + * These expanded contexts enable a number of new abilities, especially + * "Execlists" (also implemented in this file). + * + * One of the main differences with the legacy HW contexts is that logical + * ring contexts incorporate many more things to the context's state, like + * PDPs or ringbuffer control registers: + * + * The reason why PDPs are included in the context is straightforward: as + * PPGTTs (per-process GTTs) are actually per-context, having the PDPs + * contained there mean you don't need to do a ppgtt->switch_mm yourself, + * instead, the GPU will do it for you on the context switch. + * + * But, what about the ringbuffer control registers (head, tail, etc..)? + * shouldn't we just need a set of those per engine command streamer? This is + * where the name "Logical Rings" starts to make sense: by virtualizing the + * rings, the engine cs shifts to a new "ring buffer" with every context + * switch. When you want to submit a workload to the GPU you: A) choose your + * context, B) find its appropriate virtualized ring, C) write commands to it + * and then, finally, D) tell the GPU to switch to that context. + * + * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch + * to a contexts is via a context execution list, ergo "Execlists". + * + * LRC implementation: + * Regarding the creation of contexts, we have: + * + * - One global default context. + * - One local default context for each opened fd. + * - One local extra context for each context create ioctl call. + * + * Now that ringbuffers belong per-context (and not per-engine, like before) + * and that contexts are uniquely tied to a given engine (and not reusable, + * like before) we need: + * + * - One ringbuffer per-engine inside each context. + * - One backing object per-engine inside each context. + * + * The global default context starts its life with these new objects fully + * allocated and populated. The local default context for each opened fd is + * more complex, because we don't know at creation time which engine is going + * to use them. To handle this, we have implemented a deferred creation of LR + * contexts: + * + * The local context starts its life as a hollow or blank holder, that only + * gets populated for a given engine once we receive an execbuffer. If later + * on we receive another execbuffer ioctl for the same context but a different + * engine, we allocate/populate a new ringbuffer and context backing object and + * so on. + * + * Finally, regarding local contexts created using the ioctl call: as they are + * only allowed with the render ring, we can allocate & populate them right + * away (no need to defer anything, at least for now). + * + * Execlists implementation: + * Execlists are the new method by which, on gen8+ hardware, workloads are + * submitted for execution (as opposed to the legacy, ringbuffer-based, method). + * This method works as follows: + * + * When a request is committed, its commands (the BB start and any leading or + * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer + * for the appropriate context. The tail pointer in the hardware context is not + * updated at this time, but instead, kept by the driver in the ringbuffer + * structure. A structure representing this request is added to a request queue + * for the appropriate engine: this structure contains a copy of the context's + * tail after the request was written to the ring buffer and a pointer to the + * context itself. + * + * If the engine's request queue was empty before the request was added, the + * queue is processed immediately. Otherwise the queue will be processed during + * a context switch interrupt. In any case, elements on the queue will get sent + * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a + * globally unique 20-bits submission ID. + * + * When execution of a request completes, the GPU updates the context status + * buffer with a context complete event and generates a context switch interrupt. + * During the interrupt handling, the driver examines the events in the buffer: + * for each context complete event, if the announced ID matches that on the head + * of the request queue, then that request is retired and removed from the queue. + * + * After processing, if any requests were retired and the queue is not empty + * then a new execution list can be submitted. The two requests at the front of + * the queue are next to be submitted but since a context may not occur twice in + * an execution list, if subsequent requests have the same ID as the first then + * the two requests must be combined. This is done simply by discarding requests + * at the head of the queue until either only one requests is left (in which case + * we use a NULL second context) or the first two requests have unique IDs. + * + * By always executing the first two requests in the queue the driver ensures + * that the GPU is kept as busy as possible. In the case where a single context + * completes but a second context is still executing, the request for this second + * context will be at the head of the queue when we remove the first one. This + * request will then be resubmitted along with a new request for a different context, + * which will cause the hardware to continue executing the second request and queue + * the new request (the GPU detects the condition of a context getting preempted + * with the same context and optimizes the context switch flow by not doing + * preemption, but just sampling the new tail pointer). + * + */ + +#include <drm/drmP.h> +#include <drm/i915_drm.h> +#include "i915_drv.h" + +#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) +#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE) + +#define GEN8_LR_CONTEXT_ALIGN 4096 + +#define RING_EXECLIST_QFULL (1 << 0x2) +#define RING_EXECLIST1_VALID (1 << 0x3) +#define RING_EXECLIST0_VALID (1 << 0x4) +#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE) +#define RING_EXECLIST1_ACTIVE (1 << 0x11) +#define RING_EXECLIST0_ACTIVE (1 << 0x12) + +#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0) +#define GEN8_CTX_STATUS_PREEMPTED (1 << 1) +#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2) +#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3) +#define GEN8_CTX_STATUS_COMPLETE (1 << 4) +#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15) + +#define CTX_LRI_HEADER_0 0x01 +#define CTX_CONTEXT_CONTROL 0x02 +#define CTX_RING_HEAD 0x04 +#define CTX_RING_TAIL 0x06 +#define CTX_RING_BUFFER_START 0x08 +#define CTX_RING_BUFFER_CONTROL 0x0a +#define CTX_BB_HEAD_U 0x0c +#define CTX_BB_HEAD_L 0x0e +#define CTX_BB_STATE 0x10 +#define CTX_SECOND_BB_HEAD_U 0x12 +#define CTX_SECOND_BB_HEAD_L 0x14 +#define CTX_SECOND_BB_STATE 0x16 +#define CTX_BB_PER_CTX_PTR 0x18 +#define CTX_RCS_INDIRECT_CTX 0x1a +#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c +#define CTX_LRI_HEADER_1 0x21 +#define CTX_CTX_TIMESTAMP 0x22 +#define CTX_PDP3_UDW 0x24 +#define CTX_PDP3_LDW 0x26 +#define CTX_PDP2_UDW 0x28 +#define CTX_PDP2_LDW 0x2a +#define CTX_PDP1_UDW 0x2c +#define CTX_PDP1_LDW 0x2e +#define CTX_PDP0_UDW 0x30 +#define CTX_PDP0_LDW 0x32 +#define CTX_LRI_HEADER_2 0x41 +#define CTX_R_PWR_CLK_STATE 0x42 +#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44 + +#define GEN8_CTX_VALID (1<<0) +#define GEN8_CTX_FORCE_PD_RESTORE (1<<1) +#define GEN8_CTX_FORCE_RESTORE (1<<2) +#define GEN8_CTX_L3LLC_COHERENT (1<<5) +#define GEN8_CTX_PRIVILEGE (1<<8) +enum { + ADVANCED_CONTEXT = 0, + LEGACY_CONTEXT, + ADVANCED_AD_CONTEXT, + LEGACY_64B_CONTEXT +}; +#define GEN8_CTX_MODE_SHIFT 3 +enum { + FAULT_AND_HANG = 0, + FAULT_AND_HALT, /* Debug only */ + FAULT_AND_STREAM, + FAULT_AND_CONTINUE /* Unsupported */ +}; +#define GEN8_CTX_ID_SHIFT 32 + +/** + * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists + * @dev: DRM device. + * @enable_execlists: value of i915.enable_execlists module parameter. + * + * Only certain platforms support Execlists (the prerequisites being + * support for Logical Ring Contexts and Aliasing PPGTT or better), + * and only when enabled via module parameter. + * + * Return: 1 if Execlists is supported and has to be enabled. + */ +int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists) +{ + WARN_ON(i915.enable_ppgtt == -1); + + if (enable_execlists == 0) + return 0; + + if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) && + i915.use_mmio_flip >= 0) + return 1; + + return 0; +} + +/** + * intel_execlists_ctx_id() - get the Execlists Context ID + * @ctx_obj: Logical Ring Context backing object. + * + * Do not confuse with ctx->id! Unfortunately we have a name overload + * here: the old context ID we pass to userspace as a handler so that + * they can refer to a context, and the new context ID we pass to the + * ELSP so that the GPU can inform us of the context status via + * interrupts. + * + * Return: 20-bits globally unique context ID. + */ +u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj) +{ + u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj); + + /* LRCA is required to be 4K aligned so the more significant 20 bits + * are globally unique */ + return lrca >> 12; +} + +static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj) +{ + uint64_t desc; + uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj); + + WARN_ON(lrca & 0xFFFFFFFF00000FFFULL); + + desc = GEN8_CTX_VALID; + desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT; + desc |= GEN8_CTX_L3LLC_COHERENT; + desc |= GEN8_CTX_PRIVILEGE; + desc |= lrca; + desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT; + + /* TODO: WaDisableLiteRestore when we start using semaphore + * signalling between Command Streamers */ + /* desc |= GEN8_CTX_FORCE_RESTORE; */ + + return desc; +} + +static void execlists_elsp_write(struct intel_engine_cs *ring, + struct drm_i915_gem_object *ctx_obj0, + struct drm_i915_gem_object *ctx_obj1) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + uint64_t temp = 0; + uint32_t desc[4]; + unsigned long flags; + + /* XXX: You must always write both descriptors in the order below. */ + if (ctx_obj1) + temp = execlists_ctx_descriptor(ctx_obj1); + else + temp = 0; + desc[1] = (u32)(temp >> 32); + desc[0] = (u32)temp; + + temp = execlists_ctx_descriptor(ctx_obj0); + desc[3] = (u32)(temp >> 32); + desc[2] = (u32)temp; + + /* Set Force Wakeup bit to prevent GT from entering C6 while ELSP writes + * are in progress. + * + * The other problem is that we can't just call gen6_gt_force_wake_get() + * because that function calls intel_runtime_pm_get(), which might sleep. + * Instead, we do the runtime_pm_get/put when creating/destroying requests. + */ + spin_lock_irqsave(&dev_priv->uncore.lock, flags); + if (dev_priv->uncore.forcewake_count++ == 0) + dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); + spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); + + I915_WRITE(RING_ELSP(ring), desc[1]); + I915_WRITE(RING_ELSP(ring), desc[0]); + I915_WRITE(RING_ELSP(ring), desc[3]); + /* The context is automatically loaded after the following */ + I915_WRITE(RING_ELSP(ring), desc[2]); + + /* ELSP is a wo register, so use another nearby reg for posting instead */ + POSTING_READ(RING_EXECLIST_STATUS(ring)); + + /* Release Force Wakeup (see the big comment above). */ + spin_lock_irqsave(&dev_priv->uncore.lock, flags); + if (--dev_priv->uncore.forcewake_count == 0) + dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); + spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); +} + +static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail) +{ + struct page *page; + uint32_t *reg_state; + + page = i915_gem_object_get_page(ctx_obj, 1); + reg_state = kmap_atomic(page); + + reg_state[CTX_RING_TAIL+1] = tail; + + kunmap_atomic(reg_state); + + return 0; +} + +static int execlists_submit_context(struct intel_engine_cs *ring, + struct intel_context *to0, u32 tail0, + struct intel_context *to1, u32 tail1) +{ + struct drm_i915_gem_object *ctx_obj0; + struct drm_i915_gem_object *ctx_obj1 = NULL; + + ctx_obj0 = to0->engine[ring->id].state; + BUG_ON(!ctx_obj0); + WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0)); + + execlists_ctx_write_tail(ctx_obj0, tail0); + + if (to1) { + ctx_obj1 = to1->engine[ring->id].state; + BUG_ON(!ctx_obj1); + WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1)); + + execlists_ctx_write_tail(ctx_obj1, tail1); + } + + execlists_elsp_write(ring, ctx_obj0, ctx_obj1); + + return 0; +} + +static void execlists_context_unqueue(struct intel_engine_cs *ring) +{ + struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL; + struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL; + struct drm_i915_private *dev_priv = ring->dev->dev_private; + + assert_spin_locked(&ring->execlist_lock); + + if (list_empty(&ring->execlist_queue)) + return; + + /* Try to read in pairs */ + list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue, + execlist_link) { + if (!req0) { + req0 = cursor; + } else if (req0->ctx == cursor->ctx) { + /* Same ctx: ignore first request, as second request + * will update tail past first request's workload */ + cursor->elsp_submitted = req0->elsp_submitted; + list_del(&req0->execlist_link); + queue_work(dev_priv->wq, &req0->work); + req0 = cursor; + } else { + req1 = cursor; + break; + } + } + + WARN_ON(req1 && req1->elsp_submitted); + + WARN_ON(execlists_submit_context(ring, req0->ctx, req0->tail, + req1 ? req1->ctx : NULL, + req1 ? req1->tail : 0)); + + req0->elsp_submitted++; + if (req1) + req1->elsp_submitted++; +} + +static bool execlists_check_remove_request(struct intel_engine_cs *ring, + u32 request_id) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct intel_ctx_submit_request *head_req; + + assert_spin_locked(&ring->execlist_lock); + + head_req = list_first_entry_or_null(&ring->execlist_queue, + struct intel_ctx_submit_request, + execlist_link); + + if (head_req != NULL) { + struct drm_i915_gem_object *ctx_obj = + head_req->ctx->engine[ring->id].state; + if (intel_execlists_ctx_id(ctx_obj) == request_id) { + WARN(head_req->elsp_submitted == 0, + "Never submitted head request\n"); + + if (--head_req->elsp_submitted <= 0) { + list_del(&head_req->execlist_link); + queue_work(dev_priv->wq, &head_req->work); + return true; + } + } + } + + return false; +} + +/** + * intel_execlists_handle_ctx_events() - handle Context Switch interrupts + * @ring: Engine Command Streamer to handle. + * + * Check the unread Context Status Buffers and manage the submission of new + * contexts to the ELSP accordingly. + */ +void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + u32 status_pointer; + u8 read_pointer; + u8 write_pointer; + u32 status; + u32 status_id; + u32 submit_contexts = 0; + + status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); + + read_pointer = ring->next_context_status_buffer; + write_pointer = status_pointer & 0x07; + if (read_pointer > write_pointer) + write_pointer += 6; + + spin_lock(&ring->execlist_lock); + + while (read_pointer < write_pointer) { + read_pointer++; + status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + + (read_pointer % 6) * 8); + status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + + (read_pointer % 6) * 8 + 4); + + if (status & GEN8_CTX_STATUS_PREEMPTED) { + if (status & GEN8_CTX_STATUS_LITE_RESTORE) { + if (execlists_check_remove_request(ring, status_id)) + WARN(1, "Lite Restored request removed from queue\n"); + } else + WARN(1, "Preemption without Lite Restore\n"); + } + + if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) || + (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) { + if (execlists_check_remove_request(ring, status_id)) + submit_contexts++; + } + } + + if (submit_contexts != 0) + execlists_context_unqueue(ring); + + spin_unlock(&ring->execlist_lock); + + WARN(submit_contexts > 2, "More than two context complete events?\n"); + ring->next_context_status_buffer = write_pointer % 6; + + I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), + ((u32)ring->next_context_status_buffer & 0x07) << 8); +} + +static void execlists_free_request_task(struct work_struct *work) +{ + struct intel_ctx_submit_request *req = + container_of(work, struct intel_ctx_submit_request, work); + struct drm_device *dev = req->ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + intel_runtime_pm_put(dev_priv); + + mutex_lock(&dev->struct_mutex); + i915_gem_context_unreference(req->ctx); + mutex_unlock(&dev->struct_mutex); + + kfree(req); +} + +static int execlists_context_queue(struct intel_engine_cs *ring, + struct intel_context *to, + u32 tail) +{ + struct intel_ctx_submit_request *req = NULL, *cursor; + struct drm_i915_private *dev_priv = ring->dev->dev_private; + unsigned long flags; + int num_elements = 0; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (req == NULL) + return -ENOMEM; + req->ctx = to; + i915_gem_context_reference(req->ctx); + req->ring = ring; + req->tail = tail; + INIT_WORK(&req->work, execlists_free_request_task); + + intel_runtime_pm_get(dev_priv); + + spin_lock_irqsave(&ring->execlist_lock, flags); + + list_for_each_entry(cursor, &ring->execlist_queue, execlist_link) + if (++num_elements > 2) + break; + + if (num_elements > 2) { + struct intel_ctx_submit_request *tail_req; + + tail_req = list_last_entry(&ring->execlist_queue, + struct intel_ctx_submit_request, + execlist_link); + + if (to == tail_req->ctx) { + WARN(tail_req->elsp_submitted != 0, + "More than 2 already-submitted reqs queued\n"); + list_del(&tail_req->execlist_link); + queue_work(dev_priv->wq, &tail_req->work); + } + } + + list_add_tail(&req->execlist_link, &ring->execlist_queue); + if (num_elements == 0) + execlists_context_unqueue(ring); + + spin_unlock_irqrestore(&ring->execlist_lock, flags); + + return 0; +} + +static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf) +{ + struct intel_engine_cs *ring = ringbuf->ring; + uint32_t flush_domains; + int ret; + + flush_domains = 0; + if (ring->gpu_caches_dirty) + flush_domains = I915_GEM_GPU_DOMAINS; + + ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains); + if (ret) + return ret; + + ring->gpu_caches_dirty = false; + return 0; +} + +static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf, + struct list_head *vmas) +{ + struct intel_engine_cs *ring = ringbuf->ring; + struct i915_vma *vma; + uint32_t flush_domains = 0; + bool flush_chipset = false; + int ret; + + list_for_each_entry(vma, vmas, exec_list) { + struct drm_i915_gem_object *obj = vma->obj; + + ret = i915_gem_object_sync(obj, ring); + if (ret) + return ret; + + if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) + flush_chipset |= i915_gem_clflush_object(obj, false); + + flush_domains |= obj->base.write_domain; + } + + if (flush_domains & I915_GEM_DOMAIN_GTT) + wmb(); + + /* Unconditionally invalidate gpu caches and ensure that we do flush + * any residual writes from the previous batch. + */ + return logical_ring_invalidate_all_caches(ringbuf); +} + +/** + * execlists_submission() - submit a batchbuffer for execution, Execlists style + * @dev: DRM device. + * @file: DRM file. + * @ring: Engine Command Streamer to submit to. + * @ctx: Context to employ for this submission. + * @args: execbuffer call arguments. + * @vmas: list of vmas. + * @batch_obj: the batchbuffer to submit. + * @exec_start: batchbuffer start virtual address pointer. + * @flags: translated execbuffer call flags. + * + * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts + * away the submission details of the execbuffer ioctl call. + * + * Return: non-zero if the submission fails. + */ +int intel_execlists_submission(struct drm_device *dev, struct drm_file *file, + struct intel_engine_cs *ring, + struct intel_context *ctx, + struct drm_i915_gem_execbuffer2 *args, + struct list_head *vmas, + struct drm_i915_gem_object *batch_obj, + u64 exec_start, u32 flags) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; + int instp_mode; + u32 instp_mask; + int ret; + + instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; + instp_mask = I915_EXEC_CONSTANTS_MASK; + switch (instp_mode) { + case I915_EXEC_CONSTANTS_REL_GENERAL: + case I915_EXEC_CONSTANTS_ABSOLUTE: + case I915_EXEC_CONSTANTS_REL_SURFACE: + if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) { + DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); + return -EINVAL; + } + + if (instp_mode != dev_priv->relative_constants_mode) { + if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) { + DRM_DEBUG("rel surface constants mode invalid on gen5+\n"); + return -EINVAL; + } + + /* The HW changed the meaning on this bit on gen6 */ + instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; + } + break; + default: + DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode); + return -EINVAL; + } + + if (args->num_cliprects != 0) { + DRM_DEBUG("clip rectangles are only valid on pre-gen5\n"); + return -EINVAL; + } else { + if (args->DR4 == 0xffffffff) { + DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); + args->DR4 = 0; + } + + if (args->DR1 || args->DR4 || args->cliprects_ptr) { + DRM_DEBUG("0 cliprects but dirt in cliprects fields\n"); + return -EINVAL; + } + } + + if (args->flags & I915_EXEC_GEN7_SOL_RESET) { + DRM_DEBUG("sol reset is gen7 only\n"); + return -EINVAL; + } + + ret = execlists_move_to_gpu(ringbuf, vmas); + if (ret) + return ret; + + if (ring == &dev_priv->ring[RCS] && + instp_mode != dev_priv->relative_constants_mode) { + ret = intel_logical_ring_begin(ringbuf, 4); + if (ret) + return ret; + + intel_logical_ring_emit(ringbuf, MI_NOOP); + intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1)); + intel_logical_ring_emit(ringbuf, INSTPM); + intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode); + intel_logical_ring_advance(ringbuf); + + dev_priv->relative_constants_mode = instp_mode; + } + + ret = ring->emit_bb_start(ringbuf, exec_start, flags); + if (ret) + return ret; + + i915_gem_execbuffer_move_to_active(vmas, ring); + i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); + + return 0; +} + +void intel_logical_ring_stop(struct intel_engine_cs *ring) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + int ret; + + if (!intel_ring_initialized(ring)) + return; + + ret = intel_ring_idle(ring); + if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) + DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", + ring->name, ret); + + /* TODO: Is this correct with Execlists enabled? */ + I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); + if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { + DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); + return; + } + I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); +} + +int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf) +{ + struct intel_engine_cs *ring = ringbuf->ring; + int ret; + + if (!ring->gpu_caches_dirty) + return 0; + + ret = ring->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS); + if (ret) + return ret; + + ring->gpu_caches_dirty = false; + return 0; +} + +/** + * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload + * @ringbuf: Logical Ringbuffer to advance. + * + * The tail is updated in our logical ringbuffer struct, not in the actual context. What + * really happens during submission is that the context and current tail will be placed + * on a queue waiting for the ELSP to be ready to accept a new context submission. At that + * point, the tail *inside* the context is updated and the ELSP written to. + */ +void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf) +{ + struct intel_engine_cs *ring = ringbuf->ring; + struct intel_context *ctx = ringbuf->FIXME_lrc_ctx; + + intel_logical_ring_advance(ringbuf); + + if (intel_ring_stopped(ring)) + return; + + execlists_context_queue(ring, ctx, ringbuf->tail); +} + +static int logical_ring_alloc_seqno(struct intel_engine_cs *ring, + struct intel_context *ctx) +{ + if (ring->outstanding_lazy_seqno) + return 0; + + if (ring->preallocated_lazy_request == NULL) { + struct drm_i915_gem_request *request; + + request = kmalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) + return -ENOMEM; + + /* Hold a reference to the context this request belongs to + * (we will need it when the time comes to emit/retire the + * request). + */ + request->ctx = ctx; + i915_gem_context_reference(request->ctx); + + ring->preallocated_lazy_request = request; + } + + return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); +} + +static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf, + int bytes) +{ + struct intel_engine_cs *ring = ringbuf->ring; + struct drm_i915_gem_request *request; + u32 seqno = 0; + int ret; + + if (ringbuf->last_retired_head != -1) { + ringbuf->head = ringbuf->last_retired_head; + ringbuf->last_retired_head = -1; + + ringbuf->space = intel_ring_space(ringbuf); + if (ringbuf->space >= bytes) + return 0; + } + + list_for_each_entry(request, &ring->request_list, list) { + if (__intel_ring_space(request->tail, ringbuf->tail, + ringbuf->size) >= bytes) { + seqno = request->seqno; + break; + } + } + + if (seqno == 0) + return -ENOSPC; + + ret = i915_wait_seqno(ring, seqno); + if (ret) + return ret; + + i915_gem_retire_requests_ring(ring); + ringbuf->head = ringbuf->last_retired_head; + ringbuf->last_retired_head = -1; + + ringbuf->space = intel_ring_space(ringbuf); + return 0; +} + +static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, + int bytes) +{ + struct intel_engine_cs *ring = ringbuf->ring; + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long end; + int ret; + + ret = logical_ring_wait_request(ringbuf, bytes); + if (ret != -ENOSPC) + return ret; + + /* Force the context submission in case we have been skipping it */ + intel_logical_ring_advance_and_submit(ringbuf); + + /* With GEM the hangcheck timer should kick us out of the loop, + * leaving it early runs the risk of corrupting GEM state (due + * to running on almost untested codepaths). But on resume + * timers don't work yet, so prevent a complete hang in that + * case by choosing an insanely large timeout. */ + end = jiffies + 60 * HZ; + + do { + ringbuf->head = I915_READ_HEAD(ring); + ringbuf->space = intel_ring_space(ringbuf); + if (ringbuf->space >= bytes) { + ret = 0; + break; + } + + msleep(1); + + if (dev_priv->mm.interruptible && signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + + ret = i915_gem_check_wedge(&dev_priv->gpu_error, + dev_priv->mm.interruptible); + if (ret) + break; + + if (time_after(jiffies, end)) { + ret = -EBUSY; + break; + } + } while (1); + + return ret; +} + +static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf) +{ + uint32_t __iomem *virt; + int rem = ringbuf->size - ringbuf->tail; + + if (ringbuf->space < rem) { + int ret = logical_ring_wait_for_space(ringbuf, rem); + + if (ret) + return ret; + } + + virt = ringbuf->virtual_start + ringbuf->tail; + rem /= 4; + while (rem--) + iowrite32(MI_NOOP, virt++); + + ringbuf->tail = 0; + ringbuf->space = intel_ring_space(ringbuf); + + return 0; +} + +static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes) +{ + int ret; + + if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) { + ret = logical_ring_wrap_buffer(ringbuf); + if (unlikely(ret)) + return ret; + } + + if (unlikely(ringbuf->space < bytes)) { + ret = logical_ring_wait_for_space(ringbuf, bytes); + if (unlikely(ret)) + return ret; + } + + return 0; +} + +/** + * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands + * + * @ringbuf: Logical ringbuffer. + * @num_dwords: number of DWORDs that we plan to write to the ringbuffer. + * + * The ringbuffer might not be ready to accept the commands right away (maybe it needs to + * be wrapped, or wait a bit for the tail to be updated). This function takes care of that + * and also preallocates a request (every workload submission is still mediated through + * requests, same as it did with legacy ringbuffer submission). + * + * Return: non-zero if the ringbuffer is not ready to be written to. + */ +int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords) +{ + struct intel_engine_cs *ring = ringbuf->ring; + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + ret = i915_gem_check_wedge(&dev_priv->gpu_error, + dev_priv->mm.interruptible); + if (ret) + return ret; + + ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t)); + if (ret) + return ret; + + /* Preallocate the olr before touching the ring */ + ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx); + if (ret) + return ret; + + ringbuf->space -= num_dwords * sizeof(uint32_t); + return 0; +} + +static int gen8_init_common_ring(struct intel_engine_cs *ring) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); + I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); + + I915_WRITE(RING_MODE_GEN7(ring), + _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | + _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); + POSTING_READ(RING_MODE_GEN7(ring)); + DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); + + memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); + + return 0; +} + +static int gen8_init_render_ring(struct intel_engine_cs *ring) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + ret = gen8_init_common_ring(ring); + if (ret) + return ret; + + /* We need to disable the AsyncFlip performance optimisations in order + * to use MI_WAIT_FOR_EVENT within the CS. It should already be + * programmed to '1' on all products. + * + * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv + */ + I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); + + ret = intel_init_pipe_control(ring); + if (ret) + return ret; + + I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); + + return ret; +} + +static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf, + u64 offset, unsigned flags) +{ + bool ppgtt = !(flags & I915_DISPATCH_SECURE); + int ret; + + ret = intel_logical_ring_begin(ringbuf, 4); + if (ret) + return ret; + + /* FIXME(BDW): Address space and security selectors. */ + intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8)); + intel_logical_ring_emit(ringbuf, lower_32_bits(offset)); + intel_logical_ring_emit(ringbuf, upper_32_bits(offset)); + intel_logical_ring_emit(ringbuf, MI_NOOP); + intel_logical_ring_advance(ringbuf); + + return 0; +} + +static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long flags; + + if (!dev->irq_enabled) + return false; + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (ring->irq_refcount++ == 0) { + I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); + POSTING_READ(RING_IMR(ring->mmio_base)); + } + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + + return true; +} + +static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (--ring->irq_refcount == 0) { + I915_WRITE_IMR(ring, ~ring->irq_keep_mask); + POSTING_READ(RING_IMR(ring->mmio_base)); + } + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); +} + +static int gen8_emit_flush(struct intel_ringbuffer *ringbuf, + u32 invalidate_domains, + u32 unused) +{ + struct intel_engine_cs *ring = ringbuf->ring; + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t cmd; + int ret; + + ret = intel_logical_ring_begin(ringbuf, 4); + if (ret) + return ret; + + cmd = MI_FLUSH_DW + 1; + + if (ring == &dev_priv->ring[VCS]) { + if (invalidate_domains & I915_GEM_GPU_DOMAINS) + cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | + MI_FLUSH_DW_STORE_INDEX | + MI_FLUSH_DW_OP_STOREDW; + } else { + if (invalidate_domains & I915_GEM_DOMAIN_RENDER) + cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | + MI_FLUSH_DW_OP_STOREDW; + } + + intel_logical_ring_emit(ringbuf, cmd); + intel_logical_ring_emit(ringbuf, + I915_GEM_HWS_SCRATCH_ADDR | + MI_FLUSH_DW_USE_GTT); + intel_logical_ring_emit(ringbuf, 0); /* upper addr */ + intel_logical_ring_emit(ringbuf, 0); /* value */ + intel_logical_ring_advance(ringbuf); + + return 0; +} + +static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf, + u32 invalidate_domains, + u32 flush_domains) +{ + struct intel_engine_cs *ring = ringbuf->ring; + u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; + u32 flags = 0; + int ret; + + flags |= PIPE_CONTROL_CS_STALL; + + if (flush_domains) { + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + } + + if (invalidate_domains) { + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_QW_WRITE; + flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + } + + ret = intel_logical_ring_begin(ringbuf, 6); + if (ret) + return ret; + + intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); + intel_logical_ring_emit(ringbuf, flags); + intel_logical_ring_emit(ringbuf, scratch_addr); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_advance(ringbuf); + + return 0; +} + +static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) +{ + return intel_read_status_page(ring, I915_GEM_HWS_INDEX); +} + +static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno) +{ + intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); +} + +static int gen8_emit_request(struct intel_ringbuffer *ringbuf) +{ + struct intel_engine_cs *ring = ringbuf->ring; + u32 cmd; + int ret; + + ret = intel_logical_ring_begin(ringbuf, 6); + if (ret) + return ret; + + cmd = MI_STORE_DWORD_IMM_GEN8; + cmd |= MI_GLOBAL_GTT; + + intel_logical_ring_emit(ringbuf, cmd); + intel_logical_ring_emit(ringbuf, + (ring->status_page.gfx_addr + + (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT))); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno); + intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); + intel_logical_ring_emit(ringbuf, MI_NOOP); + intel_logical_ring_advance_and_submit(ringbuf); + + return 0; +} + +/** + * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer + * + * @ring: Engine Command Streamer. + * + */ +void intel_logical_ring_cleanup(struct intel_engine_cs *ring) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + + if (!intel_ring_initialized(ring)) + return; + + intel_logical_ring_stop(ring); + WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); + ring->preallocated_lazy_request = NULL; + ring->outstanding_lazy_seqno = 0; + + if (ring->cleanup) + ring->cleanup(ring); + + i915_cmd_parser_fini_ring(ring); + + if (ring->status_page.obj) { + kunmap(sg_page(ring->status_page.obj->pages->sgl)); + ring->status_page.obj = NULL; + } +} + +static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) +{ + int ret; + struct intel_context *dctx = ring->default_context; + struct drm_i915_gem_object *dctx_obj; + + /* Intentionally left blank. */ + ring->buffer = NULL; + + ring->dev = dev; + INIT_LIST_HEAD(&ring->active_list); + INIT_LIST_HEAD(&ring->request_list); + init_waitqueue_head(&ring->irq_queue); + + INIT_LIST_HEAD(&ring->execlist_queue); + spin_lock_init(&ring->execlist_lock); + ring->next_context_status_buffer = 0; + + ret = intel_lr_context_deferred_create(dctx, ring); + if (ret) + return ret; + + /* The status page is offset 0 from the context object in LRCs. */ + dctx_obj = dctx->engine[ring->id].state; + ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj); + ring->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl)); + if (ring->status_page.page_addr == NULL) + return -ENOMEM; + ring->status_page.obj = dctx_obj; + + ret = i915_cmd_parser_init_ring(ring); + if (ret) + return ret; + + if (ring->init) { + ret = ring->init(ring); + if (ret) + return ret; + } + + return 0; +} + +static int logical_render_ring_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_engine_cs *ring = &dev_priv->ring[RCS]; + + ring->name = "render ring"; + ring->id = RCS; + ring->mmio_base = RENDER_RING_BASE; + ring->irq_enable_mask = + GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT; + ring->irq_keep_mask = + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT; + if (HAS_L3_DPF(dev)) + ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + + ring->init = gen8_init_render_ring; + ring->cleanup = intel_fini_pipe_control; + ring->get_seqno = gen8_get_seqno; + ring->set_seqno = gen8_set_seqno; + ring->emit_request = gen8_emit_request; + ring->emit_flush = gen8_emit_flush_render; + ring->irq_get = gen8_logical_ring_get_irq; + ring->irq_put = gen8_logical_ring_put_irq; + ring->emit_bb_start = gen8_emit_bb_start; + + return logical_ring_init(dev, ring); +} + +static int logical_bsd_ring_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_engine_cs *ring = &dev_priv->ring[VCS]; + + ring->name = "bsd ring"; + ring->id = VCS; + ring->mmio_base = GEN6_BSD_RING_BASE; + ring->irq_enable_mask = + GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; + ring->irq_keep_mask = + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; + + ring->init = gen8_init_common_ring; + ring->get_seqno = gen8_get_seqno; + ring->set_seqno = gen8_set_seqno; + ring->emit_request = gen8_emit_request; + ring->emit_flush = gen8_emit_flush; + ring->irq_get = gen8_logical_ring_get_irq; + ring->irq_put = gen8_logical_ring_put_irq; + ring->emit_bb_start = gen8_emit_bb_start; + + return logical_ring_init(dev, ring); +} + +static int logical_bsd2_ring_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; + + ring->name = "bds2 ring"; + ring->id = VCS2; + ring->mmio_base = GEN8_BSD2_RING_BASE; + ring->irq_enable_mask = + GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; + ring->irq_keep_mask = + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; + + ring->init = gen8_init_common_ring; + ring->get_seqno = gen8_get_seqno; + ring->set_seqno = gen8_set_seqno; + ring->emit_request = gen8_emit_request; + ring->emit_flush = gen8_emit_flush; + ring->irq_get = gen8_logical_ring_get_irq; + ring->irq_put = gen8_logical_ring_put_irq; + ring->emit_bb_start = gen8_emit_bb_start; + + return logical_ring_init(dev, ring); +} + +static int logical_blt_ring_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_engine_cs *ring = &dev_priv->ring[BCS]; + + ring->name = "blitter ring"; + ring->id = BCS; + ring->mmio_base = BLT_RING_BASE; + ring->irq_enable_mask = + GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; + ring->irq_keep_mask = + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT; + + ring->init = gen8_init_common_ring; + ring->get_seqno = gen8_get_seqno; + ring->set_seqno = gen8_set_seqno; + ring->emit_request = gen8_emit_request; + ring->emit_flush = gen8_emit_flush; + ring->irq_get = gen8_logical_ring_get_irq; + ring->irq_put = gen8_logical_ring_put_irq; + ring->emit_bb_start = gen8_emit_bb_start; + + return logical_ring_init(dev, ring); +} + +static int logical_vebox_ring_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_engine_cs *ring = &dev_priv->ring[VECS]; + + ring->name = "video enhancement ring"; + ring->id = VECS; + ring->mmio_base = VEBOX_RING_BASE; + ring->irq_enable_mask = + GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; + ring->irq_keep_mask = + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT; + + ring->init = gen8_init_common_ring; + ring->get_seqno = gen8_get_seqno; + ring->set_seqno = gen8_set_seqno; + ring->emit_request = gen8_emit_request; + ring->emit_flush = gen8_emit_flush; + ring->irq_get = gen8_logical_ring_get_irq; + ring->irq_put = gen8_logical_ring_put_irq; + ring->emit_bb_start = gen8_emit_bb_start; + + return logical_ring_init(dev, ring); +} + +/** + * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers + * @dev: DRM device. + * + * This function inits the engines for an Execlists submission style (the equivalent in the + * legacy ringbuffer submission world would be i915_gem_init_rings). It does it only for + * those engines that are present in the hardware. + * + * Return: non-zero if the initialization failed. + */ +int intel_logical_rings_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + ret = logical_render_ring_init(dev); + if (ret) + return ret; + + if (HAS_BSD(dev)) { + ret = logical_bsd_ring_init(dev); + if (ret) + goto cleanup_render_ring; + } + + if (HAS_BLT(dev)) { + ret = logical_blt_ring_init(dev); + if (ret) + goto cleanup_bsd_ring; + } + + if (HAS_VEBOX(dev)) { + ret = logical_vebox_ring_init(dev); + if (ret) + goto cleanup_blt_ring; + } + + if (HAS_BSD2(dev)) { + ret = logical_bsd2_ring_init(dev); + if (ret) + goto cleanup_vebox_ring; + } + + ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); + if (ret) + goto cleanup_bsd2_ring; + + return 0; + +cleanup_bsd2_ring: + intel_logical_ring_cleanup(&dev_priv->ring[VCS2]); +cleanup_vebox_ring: + intel_logical_ring_cleanup(&dev_priv->ring[VECS]); +cleanup_blt_ring: + intel_logical_ring_cleanup(&dev_priv->ring[BCS]); +cleanup_bsd_ring: + intel_logical_ring_cleanup(&dev_priv->ring[VCS]); +cleanup_render_ring: + intel_logical_ring_cleanup(&dev_priv->ring[RCS]); + + return ret; +} + +static int +populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj, + struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf) +{ + struct drm_i915_gem_object *ring_obj = ringbuf->obj; + struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; + struct page *page; + uint32_t *reg_state; + int ret; + + ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true); + if (ret) { + DRM_DEBUG_DRIVER("Could not set to CPU domain\n"); + return ret; + } + + ret = i915_gem_object_get_pages(ctx_obj); + if (ret) { + DRM_DEBUG_DRIVER("Could not get object pages\n"); + return ret; + } + + i915_gem_object_pin_pages(ctx_obj); + + /* The second page of the context object contains some fields which must + * be set up prior to the first execution. */ + page = i915_gem_object_get_page(ctx_obj, 1); + reg_state = kmap_atomic(page); + + /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM + * commands followed by (reg, value) pairs. The values we are setting here are + * only for the first context restore: on a subsequent save, the GPU will + * recreate this batchbuffer with new values (including all the missing + * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */ + if (ring->id == RCS) + reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14); + else + reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11); + reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED; + reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring); + reg_state[CTX_CONTEXT_CONTROL+1] = + _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT); + reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base); + reg_state[CTX_RING_HEAD+1] = 0; + reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base); + reg_state[CTX_RING_TAIL+1] = 0; + reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base); + reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj); + reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base); + reg_state[CTX_RING_BUFFER_CONTROL+1] = + ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID; + reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168; + reg_state[CTX_BB_HEAD_U+1] = 0; + reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140; + reg_state[CTX_BB_HEAD_L+1] = 0; + reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110; + reg_state[CTX_BB_STATE+1] = (1<<5); + reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c; + reg_state[CTX_SECOND_BB_HEAD_U+1] = 0; + reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114; + reg_state[CTX_SECOND_BB_HEAD_L+1] = 0; + reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118; + reg_state[CTX_SECOND_BB_STATE+1] = 0; + if (ring->id == RCS) { + /* TODO: according to BSpec, the register state context + * for CHV does not have these. OTOH, these registers do + * exist in CHV. I'm waiting for a clarification */ + reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0; + reg_state[CTX_BB_PER_CTX_PTR+1] = 0; + reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4; + reg_state[CTX_RCS_INDIRECT_CTX+1] = 0; + reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8; + reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0; + } + reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9); + reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED; + reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8; + reg_state[CTX_CTX_TIMESTAMP+1] = 0; + reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3); + reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3); + reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2); + reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2); + reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1); + reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1); + reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0); + reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0); + reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]); + reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]); + reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]); + reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]); + reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]); + reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]); + reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]); + reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]); + if (ring->id == RCS) { + reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); + reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8; + reg_state[CTX_R_PWR_CLK_STATE+1] = 0; + } + + kunmap_atomic(reg_state); + + ctx_obj->dirty = 1; + set_page_dirty(page); + i915_gem_object_unpin_pages(ctx_obj); + + return 0; +} + +/** + * intel_lr_context_free() - free the LRC specific bits of a context + * @ctx: the LR context to free. + * + * The real context freeing is done in i915_gem_context_free: this only + * takes care of the bits that are LRC related: the per-engine backing + * objects and the logical ringbuffer. + */ +void intel_lr_context_free(struct intel_context *ctx) +{ + int i; + + for (i = 0; i < I915_NUM_RINGS; i++) { + struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; + struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf; + + if (ctx_obj) { + intel_destroy_ringbuffer_obj(ringbuf); + kfree(ringbuf); + i915_gem_object_ggtt_unpin(ctx_obj); + drm_gem_object_unreference(&ctx_obj->base); + } + } +} + +static uint32_t get_lr_context_size(struct intel_engine_cs *ring) +{ + int ret = 0; + + WARN_ON(INTEL_INFO(ring->dev)->gen != 8); + + switch (ring->id) { + case RCS: + ret = GEN8_LR_CONTEXT_RENDER_SIZE; + break; + case VCS: + case BCS: + case VECS: + case VCS2: + ret = GEN8_LR_CONTEXT_OTHER_SIZE; + break; + } + + return ret; +} + +/** + * intel_lr_context_deferred_create() - create the LRC specific bits of a context + * @ctx: LR context to create. + * @ring: engine to be used with the context. + * + * This function can be called more than once, with different engines, if we plan + * to use the context with them. The context backing objects and the ringbuffers + * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why + * the creation is a deferred call: it's better to make sure first that we need to use + * a given ring with the context. + * + * Return: non-zero on eror. + */ +int intel_lr_context_deferred_create(struct intel_context *ctx, + struct intel_engine_cs *ring) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_gem_object *ctx_obj; + uint32_t context_size; + struct intel_ringbuffer *ringbuf; + int ret; + + WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); + if (ctx->engine[ring->id].state) + return 0; + + context_size = round_up(get_lr_context_size(ring), 4096); + + ctx_obj = i915_gem_alloc_context_obj(dev, context_size); + if (IS_ERR(ctx_obj)) { + ret = PTR_ERR(ctx_obj); + DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret); + return ret; + } + + ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0); + if (ret) { + DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret); + drm_gem_object_unreference(&ctx_obj->base); + return ret; + } + + ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); + if (!ringbuf) { + DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n", + ring->name); + i915_gem_object_ggtt_unpin(ctx_obj); + drm_gem_object_unreference(&ctx_obj->base); + ret = -ENOMEM; + return ret; + } + + ringbuf->ring = ring; + ringbuf->FIXME_lrc_ctx = ctx; + + ringbuf->size = 32 * PAGE_SIZE; + ringbuf->effective_size = ringbuf->size; + ringbuf->head = 0; + ringbuf->tail = 0; + ringbuf->space = ringbuf->size; + ringbuf->last_retired_head = -1; + + /* TODO: For now we put this in the mappable region so that we can reuse + * the existing ringbuffer code which ioremaps it. When we start + * creating many contexts, this will no longer work and we must switch + * to a kmapish interface. + */ + ret = intel_alloc_ringbuffer_obj(dev, ringbuf); + if (ret) { + DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n", + ring->name, ret); + goto error; + } + + ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); + if (ret) { + DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); + intel_destroy_ringbuffer_obj(ringbuf); + goto error; + } + + ctx->engine[ring->id].ringbuf = ringbuf; + ctx->engine[ring->id].state = ctx_obj; + + return 0; + +error: + kfree(ringbuf); + i915_gem_object_ggtt_unpin(ctx_obj); + drm_gem_object_unreference(&ctx_obj->base); + return ret; +} diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h new file mode 100644 index 000000000000..991d4499fb03 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -0,0 +1,112 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _INTEL_LRC_H_ +#define _INTEL_LRC_H_ + +/* Execlists regs */ +#define RING_ELSP(ring) ((ring)->mmio_base+0x230) +#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234) +#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244) +#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370) +#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0) + +/* Logical Rings */ +void intel_logical_ring_stop(struct intel_engine_cs *ring); +void intel_logical_ring_cleanup(struct intel_engine_cs *ring); +int intel_logical_rings_init(struct drm_device *dev); + +int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf); +void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf); +/** + * intel_logical_ring_advance() - advance the ringbuffer tail + * @ringbuf: Ringbuffer to advance. + * + * The tail is only updated in our logical ringbuffer struct. + */ +static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf) +{ + ringbuf->tail &= ringbuf->size - 1; +} +/** + * intel_logical_ring_emit() - write a DWORD to the ringbuffer. + * @ringbuf: Ringbuffer to write to. + * @data: DWORD to write. + */ +static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf, + u32 data) +{ + iowrite32(data, ringbuf->virtual_start + ringbuf->tail); + ringbuf->tail += 4; +} +int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords); + +/* Logical Ring Contexts */ +void intel_lr_context_free(struct intel_context *ctx); +int intel_lr_context_deferred_create(struct intel_context *ctx, + struct intel_engine_cs *ring); + +/* Execlists */ +int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); +int intel_execlists_submission(struct drm_device *dev, struct drm_file *file, + struct intel_engine_cs *ring, + struct intel_context *ctx, + struct drm_i915_gem_execbuffer2 *args, + struct list_head *vmas, + struct drm_i915_gem_object *batch_obj, + u64 exec_start, u32 flags); +u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj); + +/** + * struct intel_ctx_submit_request - queued context submission request + * @ctx: Context to submit to the ELSP. + * @ring: Engine to submit it to. + * @tail: how far in the context's ringbuffer this request goes to. + * @execlist_link: link in the submission queue. + * @work: workqueue for processing this request in a bottom half. + * @elsp_submitted: no. of times this request has been sent to the ELSP. + * + * The ELSP only accepts two elements at a time, so we queue context/tail + * pairs on a given queue (ring->execlist_queue) until the hardware is + * available. The queue serves a double purpose: we also use it to keep track + * of the up to 2 contexts currently in the hardware (usually one in execution + * and the other queued up by the GPU): We only remove elements from the head + * of the queue when the hardware informs us that an element has been + * completed. + * + * All accesses to the queue are mediated by a spinlock (ring->execlist_lock). + */ +struct intel_ctx_submit_request { + struct intel_context *ctx; + struct intel_engine_cs *ring; + u32 tail; + + struct list_head execlist_link; + struct work_struct work; + + int elsp_submitted; +}; + +void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring); + +#endif /* _INTEL_LRC_H_ */ diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 881361c0f27e..1987491723a5 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -823,8 +823,7 @@ bool intel_is_dual_link_lvds(struct drm_device *dev) struct intel_encoder *encoder; struct intel_lvds_encoder *lvds_encoder; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { + for_each_intel_encoder(dev, encoder) { if (encoder->type == INTEL_OUTPUT_LVDS) { lvds_encoder = to_lvds_encoder(&encoder->base); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 3f88f29a98c0..c8f744c418f0 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -309,6 +309,9 @@ static void gen7_enable_fbc(struct drm_crtc *crtc) dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; + if (dev_priv->fbc.false_color) + dpfc_ctl |= FBC_CTL_FALSE_COLOR; + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); if (IS_IVYBRIDGE(dev)) { @@ -1268,34 +1271,27 @@ static bool g4x_compute_srwm(struct drm_device *dev, display, cursor); } -static bool vlv_compute_drain_latency(struct drm_device *dev, - int plane, - int *plane_prec_mult, - int *plane_dl, - int *cursor_prec_mult, - int *cursor_dl) +static bool vlv_compute_drain_latency(struct drm_crtc *crtc, + int pixel_size, + int *prec_mult, + int *drain_latency) { - struct drm_crtc *crtc; - int clock, pixel_size; int entries; + int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock; - crtc = intel_get_crtc_for_plane(dev, plane); - if (!intel_crtc_active(crtc)) + if (WARN(clock == 0, "Pixel clock is zero!\n")) return false; - clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock; - pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */ + if (WARN(pixel_size == 0, "Pixel size is zero!\n")) + return false; - entries = (clock / 1000) * pixel_size; - *plane_prec_mult = (entries > 256) ? - DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16; - *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) * - pixel_size); + entries = DIV_ROUND_UP(clock, 1000) * pixel_size; + *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 : + DRAIN_LATENCY_PRECISION_32; + *drain_latency = (64 * (*prec_mult) * 4) / entries; - entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */ - *cursor_prec_mult = (entries > 256) ? - DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16; - *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4); + if (*drain_latency > DRAIN_LATENCY_MASK) + *drain_latency = DRAIN_LATENCY_MASK; return true; } @@ -1308,39 +1304,48 @@ static bool vlv_compute_drain_latency(struct drm_device *dev, * latency value. */ -static void vlv_update_drain_latency(struct drm_device *dev) +static void vlv_update_drain_latency(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = dev->dev_private; - int planea_prec, planea_dl, planeb_prec, planeb_dl; - int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl; - int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is - either 16 or 32 */ + struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pixel_size; + int drain_latency; + enum pipe pipe = intel_crtc->pipe; + int plane_prec, prec_mult, plane_dl; - /* For plane A, Cursor A */ - if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl, - &cursor_prec_mult, &cursora_dl)) { - cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? - DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16; - planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ? - DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16; + plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_64 | + DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_64 | + (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT)); + + if (!intel_crtc_active(crtc)) { + I915_WRITE(VLV_DDL(pipe), plane_dl); + return; + } - I915_WRITE(VLV_DDL1, cursora_prec | - (cursora_dl << DDL_CURSORA_SHIFT) | - planea_prec | planea_dl); + /* Primary plane Drain Latency */ + pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */ + if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) { + plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ? + DDL_PLANE_PRECISION_64 : + DDL_PLANE_PRECISION_32; + plane_dl |= plane_prec | drain_latency; } - /* For plane B, Cursor B */ - if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl, - &cursor_prec_mult, &cursorb_dl)) { - cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? - DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16; - planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ? - DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16; + /* Cursor Drain Latency + * BPP is always 4 for cursor + */ + pixel_size = 4; - I915_WRITE(VLV_DDL2, cursorb_prec | - (cursorb_dl << DDL_CURSORB_SHIFT) | - planeb_prec | planeb_dl); + /* Program cursor DL only if it is enabled */ + if (intel_crtc->cursor_base && + vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) { + plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ? + DDL_CURSOR_PRECISION_64 : + DDL_CURSOR_PRECISION_32; + plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT); } + + I915_WRITE(VLV_DDL(pipe), plane_dl); } #define single_plane_enabled(mask) is_power_of_2(mask) @@ -1356,7 +1361,73 @@ static void valleyview_update_wm(struct drm_crtc *crtc) unsigned int enabled = 0; bool cxsr_enabled; - vlv_update_drain_latency(dev); + vlv_update_drain_latency(crtc); + + if (g4x_compute_wm0(dev, PIPE_A, + &valleyview_wm_info, latency_ns, + &valleyview_cursor_wm_info, latency_ns, + &planea_wm, &cursora_wm)) + enabled |= 1 << PIPE_A; + + if (g4x_compute_wm0(dev, PIPE_B, + &valleyview_wm_info, latency_ns, + &valleyview_cursor_wm_info, latency_ns, + &planeb_wm, &cursorb_wm)) + enabled |= 1 << PIPE_B; + + if (single_plane_enabled(enabled) && + g4x_compute_srwm(dev, ffs(enabled) - 1, + sr_latency_ns, + &valleyview_wm_info, + &valleyview_cursor_wm_info, + &plane_sr, &ignore_cursor_sr) && + g4x_compute_srwm(dev, ffs(enabled) - 1, + 2*sr_latency_ns, + &valleyview_wm_info, + &valleyview_cursor_wm_info, + &ignore_plane_sr, &cursor_sr)) { + cxsr_enabled = true; + } else { + cxsr_enabled = false; + intel_set_memory_cxsr(dev_priv, false); + plane_sr = cursor_sr = 0; + } + + DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, " + "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", + planea_wm, cursora_wm, + planeb_wm, cursorb_wm, + plane_sr, cursor_sr); + + I915_WRITE(DSPFW1, + (plane_sr << DSPFW_SR_SHIFT) | + (cursorb_wm << DSPFW_CURSORB_SHIFT) | + (planeb_wm << DSPFW_PLANEB_SHIFT) | + (planea_wm << DSPFW_PLANEA_SHIFT)); + I915_WRITE(DSPFW2, + (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | + (cursora_wm << DSPFW_CURSORA_SHIFT)); + I915_WRITE(DSPFW3, + (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) | + (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); + + if (cxsr_enabled) + intel_set_memory_cxsr(dev_priv, true); +} + +static void cherryview_update_wm(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + static const int sr_latency_ns = 12000; + struct drm_i915_private *dev_priv = dev->dev_private; + int planea_wm, planeb_wm, planec_wm; + int cursora_wm, cursorb_wm, cursorc_wm; + int plane_sr, cursor_sr; + int ignore_plane_sr, ignore_cursor_sr; + unsigned int enabled = 0; + bool cxsr_enabled; + + vlv_update_drain_latency(crtc); if (g4x_compute_wm0(dev, PIPE_A, &valleyview_wm_info, latency_ns, @@ -1370,6 +1441,12 @@ static void valleyview_update_wm(struct drm_crtc *crtc) &planeb_wm, &cursorb_wm)) enabled |= 1 << PIPE_B; + if (g4x_compute_wm0(dev, PIPE_C, + &valleyview_wm_info, latency_ns, + &valleyview_cursor_wm_info, latency_ns, + &planec_wm, &cursorc_wm)) + enabled |= 1 << PIPE_C; + if (single_plane_enabled(enabled) && g4x_compute_srwm(dev, ffs(enabled) - 1, sr_latency_ns, @@ -1388,27 +1465,66 @@ static void valleyview_update_wm(struct drm_crtc *crtc) plane_sr = cursor_sr = 0; } - DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", + DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, " + "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, " + "SR: plane=%d, cursor=%d\n", planea_wm, cursora_wm, planeb_wm, cursorb_wm, + planec_wm, cursorc_wm, plane_sr, cursor_sr); I915_WRITE(DSPFW1, (plane_sr << DSPFW_SR_SHIFT) | (cursorb_wm << DSPFW_CURSORB_SHIFT) | (planeb_wm << DSPFW_PLANEB_SHIFT) | - planea_wm); + (planea_wm << DSPFW_PLANEA_SHIFT)); I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | (cursora_wm << DSPFW_CURSORA_SHIFT)); I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); + I915_WRITE(DSPFW9_CHV, + (I915_READ(DSPFW9_CHV) & ~(DSPFW_PLANEC_MASK | + DSPFW_CURSORC_MASK)) | + (planec_wm << DSPFW_PLANEC_SHIFT) | + (cursorc_wm << DSPFW_CURSORC_SHIFT)); if (cxsr_enabled) intel_set_memory_cxsr(dev_priv, true); } +static void valleyview_update_sprite_wm(struct drm_plane *plane, + struct drm_crtc *crtc, + uint32_t sprite_width, + uint32_t sprite_height, + int pixel_size, + bool enabled, bool scaled) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe = to_intel_plane(plane)->pipe; + int sprite = to_intel_plane(plane)->plane; + int drain_latency; + int plane_prec; + int sprite_dl; + int prec_mult; + + sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_64(sprite) | + (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite))); + + if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, + &drain_latency)) { + plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ? + DDL_SPRITE_PRECISION_64(sprite) : + DDL_SPRITE_PRECISION_32(sprite); + sprite_dl |= plane_prec | + (drain_latency << DDL_SPRITE_SHIFT(sprite)); + } + + I915_WRITE(VLV_DDL(pipe), sprite_dl); +} + static void g4x_update_wm(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -1444,7 +1560,8 @@ static void g4x_update_wm(struct drm_crtc *crtc) plane_sr = cursor_sr = 0; } - DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", + DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, " + "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", planea_wm, cursora_wm, planeb_wm, cursorb_wm, plane_sr, cursor_sr); @@ -1453,7 +1570,7 @@ static void g4x_update_wm(struct drm_crtc *crtc) (plane_sr << DSPFW_SR_SHIFT) | (cursorb_wm << DSPFW_CURSORB_SHIFT) | (planeb_wm << DSPFW_PLANEB_SHIFT) | - planea_wm); + (planea_wm << DSPFW_PLANEA_SHIFT)); I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | (cursora_wm << DSPFW_CURSORA_SHIFT)); @@ -1527,8 +1644,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) /* 965 has limitations... */ I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | - (8 << 16) | (8 << 8) | (8 << 0)); - I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); + (8 << DSPFW_CURSORB_SHIFT) | + (8 << DSPFW_PLANEB_SHIFT) | + (8 << DSPFW_PLANEA_SHIFT)); + I915_WRITE(DSPFW2, (8 << DSPFW_CURSORA_SHIFT) | + (8 << DSPFW_PLANEC_SHIFT_OLD)); /* update cursor SR watermark */ I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); @@ -3034,7 +3154,7 @@ static void ironlake_enable_drps(struct drm_device *dev) I915_READ(0x112e0); dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); dev_priv->ips.last_count2 = I915_READ(0x112f4); - getrawmonotonic(&dev_priv->ips.last_time2); + dev_priv->ips.last_time2 = ktime_get_raw_ns(); spin_unlock_irq(&mchdev_lock); } @@ -3420,10 +3540,10 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode) else mode = 0; } - DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", - (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", - (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", - (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); + DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", + (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", + (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", + (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); } static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) @@ -3447,8 +3567,8 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) mask = INTEL_RC6_ENABLE; if ((enable_rc6 & mask) != enable_rc6) - DRM_INFO("Adjusting RC6 mask to %d (requested %d, valid %d)\n", - enable_rc6 & mask, enable_rc6, mask); + DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n", + enable_rc6 & mask, enable_rc6, mask); return enable_rc6 & mask; } @@ -3599,7 +3719,6 @@ static void gen6_enable_rps(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; u32 rp_state_cap; - u32 gt_perf_status; u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; u32 gtfifodbg; int rc6_mode; @@ -3624,7 +3743,6 @@ static void gen6_enable_rps(struct drm_device *dev) gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); - gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); parse_rp_state_cap(dev_priv, rp_state_cap); @@ -4595,18 +4713,16 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) { - struct timespec now, diff1; - u64 diff; - unsigned long diffms; + u64 now, diff, diffms; u32 count; assert_spin_locked(&mchdev_lock); - getrawmonotonic(&now); - diff1 = timespec_sub(now, dev_priv->ips.last_time2); + now = ktime_get_raw_ns(); + diffms = now - dev_priv->ips.last_time2; + do_div(diffms, NSEC_PER_MSEC); /* Don't divide by 0 */ - diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; if (!diffms) return; @@ -5230,11 +5346,9 @@ static void gen6_check_mch_setup(struct drm_device *dev) uint32_t tmp; tmp = I915_READ(MCH_SSKPD); - if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) { - DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp); - DRM_INFO("This can cause pipe underruns and display issues.\n"); - DRM_INFO("Please upgrade your BIOS to fix this.\n"); - } + if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) + DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", + tmp); } static void gen6_init_clock_gating(struct drm_device *dev) @@ -6257,6 +6371,153 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, vlv_set_power_well(dev_priv, power_well, false); } +static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum dpio_phy phy; + + WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && + power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); + + /* + * Enable the CRI clock source so we can get at the + * display and the reference clock for VGA + * hotplug / manual detection. + */ + if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { + phy = DPIO_PHY0; + I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | + DPLL_REFA_CLK_ENABLE_VLV); + I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | + DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); + } else { + phy = DPIO_PHY1; + I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) | + DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); + } + udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ + vlv_set_power_well(dev_priv, power_well, true); + + /* Poll for phypwrgood signal */ + if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1)) + DRM_ERROR("Display PHY %d is not power up\n", phy); + + I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) | + PHY_COM_LANE_RESET_DEASSERT(phy)); +} + +static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum dpio_phy phy; + + WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC && + power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D); + + if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) { + phy = DPIO_PHY0; + assert_pll_disabled(dev_priv, PIPE_A); + assert_pll_disabled(dev_priv, PIPE_B); + } else { + phy = DPIO_PHY1; + assert_pll_disabled(dev_priv, PIPE_C); + } + + I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) & + ~PHY_COM_LANE_RESET_DEASSERT(phy)); + + vlv_set_power_well(dev_priv, power_well, false); +} + +static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum pipe pipe = power_well->data; + bool enabled; + u32 state, ctrl; + + mutex_lock(&dev_priv->rps.hw_lock); + + state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); + /* + * We only ever set the power-on and power-gate states, anything + * else is unexpected. + */ + WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); + enabled = state == DP_SSS_PWR_ON(pipe); + + /* + * A transient state at this point would mean some unexpected party + * is poking at the power controls too. + */ + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); + WARN_ON(ctrl << 16 != state); + + mutex_unlock(&dev_priv->rps.hw_lock); + + return enabled; +} + +static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well, + bool enable) +{ + enum pipe pipe = power_well->data; + u32 state; + u32 ctrl; + + state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); + + mutex_lock(&dev_priv->rps.hw_lock); + +#define COND \ + ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) + + if (COND) + goto out; + + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + ctrl &= ~DP_SSC_MASK(pipe); + ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); + vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl); + + if (wait_for(COND, 100)) + DRM_ERROR("timout setting power well state %08x (%08x)\n", + state, + vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ)); + +#undef COND + +out: + mutex_unlock(&dev_priv->rps.hw_lock); +} + +static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0); +} + +static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + WARN_ON_ONCE(power_well->data != PIPE_A && + power_well->data != PIPE_B && + power_well->data != PIPE_C); + + chv_set_pipe_power_well(dev_priv, power_well, true); +} + +static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + WARN_ON_ONCE(power_well->data != PIPE_A && + power_well->data != PIPE_B && + power_well->data != PIPE_C); + + chv_set_pipe_power_well(dev_priv, power_well, false); +} + static void check_power_well_state(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { @@ -6448,6 +6709,39 @@ EXPORT_SYMBOL_GPL(i915_get_cdclk_freq); BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ BIT(POWER_DOMAIN_INIT)) +#define CHV_PIPE_A_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PIPE_A) | \ + BIT(POWER_DOMAIN_INIT)) + +#define CHV_PIPE_B_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PIPE_B) | \ + BIT(POWER_DOMAIN_INIT)) + +#define CHV_PIPE_C_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PIPE_C) | \ + BIT(POWER_DOMAIN_INIT)) + +#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ + BIT(POWER_DOMAIN_INIT)) + +#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ + BIT(POWER_DOMAIN_INIT)) + +#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ + BIT(POWER_DOMAIN_INIT)) + +#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ + BIT(POWER_DOMAIN_INIT)) + static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { .sync_hw = i9xx_always_on_power_well_noop, .enable = i9xx_always_on_power_well_noop, @@ -6455,6 +6749,20 @@ static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { .is_enabled = i9xx_always_on_power_well_enabled, }; +static const struct i915_power_well_ops chv_pipe_power_well_ops = { + .sync_hw = chv_pipe_power_well_sync_hw, + .enable = chv_pipe_power_well_enable, + .disable = chv_pipe_power_well_disable, + .is_enabled = chv_pipe_power_well_enabled, +}; + +static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { + .sync_hw = vlv_power_well_sync_hw, + .enable = chv_dpio_cmn_power_well_enable, + .disable = chv_dpio_cmn_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + static struct i915_power_well i9xx_always_on_power_well[] = { { .name = "always-on", @@ -6577,6 +6885,107 @@ static struct i915_power_well vlv_power_wells[] = { }, }; +static struct i915_power_well chv_power_wells[] = { + { + .name = "always-on", + .always_on = 1, + .domains = VLV_ALWAYS_ON_POWER_DOMAINS, + .ops = &i9xx_always_on_power_well_ops, + }, +#if 0 + { + .name = "display", + .domains = VLV_DISPLAY_POWER_DOMAINS, + .data = PUNIT_POWER_WELL_DISP2D, + .ops = &vlv_display_power_well_ops, + }, + { + .name = "pipe-a", + .domains = CHV_PIPE_A_POWER_DOMAINS, + .data = PIPE_A, + .ops = &chv_pipe_power_well_ops, + }, + { + .name = "pipe-b", + .domains = CHV_PIPE_B_POWER_DOMAINS, + .data = PIPE_B, + .ops = &chv_pipe_power_well_ops, + }, + { + .name = "pipe-c", + .domains = CHV_PIPE_C_POWER_DOMAINS, + .data = PIPE_C, + .ops = &chv_pipe_power_well_ops, + }, +#endif + { + .name = "dpio-common-bc", + /* + * XXX: cmnreset for one PHY seems to disturb the other. + * As a workaround keep both powered on at the same + * time for now. + */ + .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS, + .data = PUNIT_POWER_WELL_DPIO_CMN_BC, + .ops = &chv_dpio_cmn_power_well_ops, + }, + { + .name = "dpio-common-d", + /* + * XXX: cmnreset for one PHY seems to disturb the other. + * As a workaround keep both powered on at the same + * time for now. + */ + .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS, + .data = PUNIT_POWER_WELL_DPIO_CMN_D, + .ops = &chv_dpio_cmn_power_well_ops, + }, +#if 0 + { + .name = "dpio-tx-b-01", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, + }, + { + .name = "dpio-tx-b-23", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, + }, + { + .name = "dpio-tx-c-01", + .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, + }, + { + .name = "dpio-tx-c-23", + .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, + }, + { + .name = "dpio-tx-d-01", + .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS | + CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01, + }, + { + .name = "dpio-tx-d-23", + .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS | + CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23, + }, +#endif +}; + static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, enum punit_power_well power_well_id) { @@ -6613,6 +7022,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) } else if (IS_BROADWELL(dev_priv->dev)) { set_power_wells(power_domains, bdw_power_wells); hsw_pwr = power_domains; + } else if (IS_CHERRYVIEW(dev_priv->dev)) { + set_power_wells(power_domains, chv_power_wells); } else if (IS_VALLEYVIEW(dev_priv->dev)) { set_power_wells(power_domains, vlv_power_wells); } else { @@ -6840,11 +7251,13 @@ void intel_init_pm(struct drm_device *dev) else if (INTEL_INFO(dev)->gen == 8) dev_priv->display.init_clock_gating = gen8_init_clock_gating; } else if (IS_CHERRYVIEW(dev)) { - dev_priv->display.update_wm = valleyview_update_wm; + dev_priv->display.update_wm = cherryview_update_wm; + dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm; dev_priv->display.init_clock_gating = cherryview_init_clock_gating; } else if (IS_VALLEYVIEW(dev)) { dev_priv->display.update_wm = valleyview_update_wm; + dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm; dev_priv->display.init_clock_gating = valleyview_init_clock_gating; } else if (IS_PINEVIEW(dev)) { diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b3d8f766fa7f..4fb1ec95ec08 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -33,14 +33,24 @@ #include "i915_trace.h" #include "intel_drv.h" -/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, - * but keeps the logic simple. Indeed, the whole purpose of this macro is just - * to give some inclination as to some of the magic values used in the various - * workarounds! - */ -#define CACHELINE_BYTES 64 +bool +intel_ring_initialized(struct intel_engine_cs *ring) +{ + struct drm_device *dev = ring->dev; + + if (!dev) + return false; -static inline int __ring_space(int head, int tail, int size) + if (i915.enable_execlists) { + struct intel_context *dctx = ring->default_context; + struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf; + + return ringbuf->obj; + } else + return ring->buffer && ring->buffer->obj; +} + +int __intel_ring_space(int head, int tail, int size) { int space = head - (tail + I915_RING_FREE_SPACE); if (space < 0) @@ -48,12 +58,13 @@ static inline int __ring_space(int head, int tail, int size) return space; } -static inline int ring_space(struct intel_ringbuffer *ringbuf) +int intel_ring_space(struct intel_ringbuffer *ringbuf) { - return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size); + return __intel_ring_space(ringbuf->head & HEAD_ADDR, + ringbuf->tail, ringbuf->size); } -static bool intel_ring_stopped(struct intel_engine_cs *ring) +bool intel_ring_stopped(struct intel_engine_cs *ring) { struct drm_i915_private *dev_priv = ring->dev->dev_private; return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); @@ -380,6 +391,27 @@ gen7_render_ring_flush(struct intel_engine_cs *ring, } static int +gen8_emit_pipe_control(struct intel_engine_cs *ring, + u32 flags, u32 scratch_addr) +{ + int ret; + + ret = intel_ring_begin(ring, 6); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, flags); + intel_ring_emit(ring, scratch_addr); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + + return 0; +} + +static int gen8_render_ring_flush(struct intel_engine_cs *ring, u32 invalidate_domains, u32 flush_domains) { @@ -402,22 +434,17 @@ gen8_render_ring_flush(struct intel_engine_cs *ring, flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; flags |= PIPE_CONTROL_QW_WRITE; flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; - } - ret = intel_ring_begin(ring, 6); - if (ret) - return ret; - - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); - intel_ring_emit(ring, flags); - intel_ring_emit(ring, scratch_addr); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); - - return 0; + /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */ + ret = gen8_emit_pipe_control(ring, + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_STALL_AT_SCOREBOARD, + 0); + if (ret) + return ret; + } + return gen8_emit_pipe_control(ring, flags, scratch_addr); } static void ring_write_tail(struct intel_engine_cs *ring, @@ -460,9 +487,14 @@ static bool stop_ring(struct intel_engine_cs *ring) if (!IS_GEN2(ring->dev)) { I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); - if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { - DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); - return false; + if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { + DRM_ERROR("%s : timed out trying to stop ring\n", ring->name); + /* Sometimes we observe that the idle flag is not + * set even though the ring is empty. So double + * check before giving up. + */ + if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring)) + return false; } } @@ -516,6 +548,9 @@ static int init_ring_common(struct intel_engine_cs *ring) else ring_setup_phys_status_page(ring); + /* Enforce ordering by reading HEAD register back */ + I915_READ_HEAD(ring); + /* Initialize the ring. This must happen _after_ we've cleared the ring * registers with the above sequence (the readback of the HEAD registers * also enforces ordering), otherwise the hw might lose the new ring @@ -544,7 +579,7 @@ static int init_ring_common(struct intel_engine_cs *ring) else { ringbuf->head = I915_READ_HEAD(ring); ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; - ringbuf->space = ring_space(ringbuf); + ringbuf->space = intel_ring_space(ringbuf); ringbuf->last_retired_head = -1; } @@ -556,8 +591,25 @@ out: return ret; } -static int -init_pipe_control(struct intel_engine_cs *ring) +void +intel_fini_pipe_control(struct intel_engine_cs *ring) +{ + struct drm_device *dev = ring->dev; + + if (ring->scratch.obj == NULL) + return; + + if (INTEL_INFO(dev)->gen >= 5) { + kunmap(sg_page(ring->scratch.obj->pages->sgl)); + i915_gem_object_ggtt_unpin(ring->scratch.obj); + } + + drm_gem_object_unreference(&ring->scratch.obj->base); + ring->scratch.obj = NULL; +} + +int +intel_init_pipe_control(struct intel_engine_cs *ring) { int ret; @@ -632,7 +684,7 @@ static int init_render_ring(struct intel_engine_cs *ring) _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); if (INTEL_INFO(dev)->gen >= 5) { - ret = init_pipe_control(ring); + ret = intel_init_pipe_control(ring); if (ret) return ret; } @@ -667,16 +719,7 @@ static void render_ring_cleanup(struct intel_engine_cs *ring) dev_priv->semaphore_obj = NULL; } - if (ring->scratch.obj == NULL) - return; - - if (INTEL_INFO(dev)->gen >= 5) { - kunmap(sg_page(ring->scratch.obj->pages->sgl)); - i915_gem_object_ggtt_unpin(ring->scratch.obj); - } - - drm_gem_object_unreference(&ring->scratch.obj->base); - ring->scratch.obj = NULL; + intel_fini_pipe_control(ring); } static int gen8_rcs_signal(struct intel_engine_cs *signaller, @@ -1495,7 +1538,7 @@ static int init_phys_status_page(struct intel_engine_cs *ring) return 0; } -static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) +void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) { if (!ringbuf->obj) return; @@ -1506,8 +1549,8 @@ static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) ringbuf->obj = NULL; } -static int intel_alloc_ringbuffer_obj(struct drm_device *dev, - struct intel_ringbuffer *ringbuf) +int intel_alloc_ringbuffer_obj(struct drm_device *dev, + struct intel_ringbuffer *ringbuf) { struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; @@ -1569,7 +1612,9 @@ static int intel_init_ring_buffer(struct drm_device *dev, ring->dev = dev; INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); + INIT_LIST_HEAD(&ring->execlist_queue); ringbuf->size = 32 * PAGE_SIZE; + ringbuf->ring = ring; memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); init_waitqueue_head(&ring->irq_queue); @@ -1652,13 +1697,14 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) ringbuf->head = ringbuf->last_retired_head; ringbuf->last_retired_head = -1; - ringbuf->space = ring_space(ringbuf); + ringbuf->space = intel_ring_space(ringbuf); if (ringbuf->space >= n) return 0; } list_for_each_entry(request, &ring->request_list, list) { - if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) { + if (__intel_ring_space(request->tail, ringbuf->tail, + ringbuf->size) >= n) { seqno = request->seqno; break; } @@ -1675,7 +1721,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) ringbuf->head = ringbuf->last_retired_head; ringbuf->last_retired_head = -1; - ringbuf->space = ring_space(ringbuf); + ringbuf->space = intel_ring_space(ringbuf); return 0; } @@ -1704,7 +1750,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) trace_i915_ring_wait_begin(ring); do { ringbuf->head = I915_READ_HEAD(ring); - ringbuf->space = ring_space(ringbuf); + ringbuf->space = intel_ring_space(ringbuf); if (ringbuf->space >= n) { ret = 0; break; @@ -1756,7 +1802,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) iowrite32(MI_NOOP, virt++); ringbuf->tail = 0; - ringbuf->space = ring_space(ringbuf); + ringbuf->space = intel_ring_space(ringbuf); return 0; } @@ -1961,9 +2007,7 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring, u64 offset, u32 len, unsigned flags) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; - bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL && - !(flags & I915_DISPATCH_SECURE); + bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE); int ret; ret = intel_ring_begin(ring, 4); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index ed5941078f92..9cbf7b0ebc99 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -5,6 +5,13 @@ #define I915_CMD_HASH_ORDER 9 +/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, + * but keeps the logic simple. Indeed, the whole purpose of this macro is just + * to give some inclination as to some of the magic values used in the various + * workarounds! + */ +#define CACHELINE_BYTES 64 + /* * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" @@ -70,6 +77,7 @@ enum intel_ring_hangcheck_action { HANGCHECK_IDLE = 0, HANGCHECK_WAIT, HANGCHECK_ACTIVE, + HANGCHECK_ACTIVE_LOOP, HANGCHECK_KICK, HANGCHECK_HUNG, }; @@ -78,6 +86,7 @@ enum intel_ring_hangcheck_action { struct intel_ring_hangcheck { u64 acthd; + u64 max_acthd; u32 seqno; int score; enum intel_ring_hangcheck_action action; @@ -88,6 +97,15 @@ struct intel_ringbuffer { struct drm_i915_gem_object *obj; void __iomem *virtual_start; + struct intel_engine_cs *ring; + + /* + * FIXME: This backpointer is an artifact of the history of how the + * execlist patches came into being. It will get removed once the basic + * code has landed. + */ + struct intel_context *FIXME_lrc_ctx; + u32 head; u32 tail; int space; @@ -212,6 +230,18 @@ struct intel_engine_cs { unsigned int num_dwords); } semaphore; + /* Execlists */ + spinlock_t execlist_lock; + struct list_head execlist_queue; + u8 next_context_status_buffer; + u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ + int (*emit_request)(struct intel_ringbuffer *ringbuf); + int (*emit_flush)(struct intel_ringbuffer *ringbuf, + u32 invalidate_domains, + u32 flush_domains); + int (*emit_bb_start)(struct intel_ringbuffer *ringbuf, + u64 offset, unsigned flags); + /** * List of objects currently involved in rendering from the * ringbuffer. @@ -285,11 +315,7 @@ struct intel_engine_cs { u32 (*get_cmd_length_mask)(u32 cmd_header); }; -static inline bool -intel_ring_initialized(struct intel_engine_cs *ring) -{ - return ring->buffer && ring->buffer->obj; -} +bool intel_ring_initialized(struct intel_engine_cs *ring); static inline unsigned intel_ring_flag(struct intel_engine_cs *ring) @@ -353,6 +379,10 @@ intel_write_status_page(struct intel_engine_cs *ring, #define I915_GEM_HWS_SCRATCH_INDEX 0x30 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) +void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf); +int intel_alloc_ringbuffer_obj(struct drm_device *dev, + struct intel_ringbuffer *ringbuf); + void intel_stop_ring_buffer(struct intel_engine_cs *ring); void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); @@ -370,6 +400,9 @@ static inline void intel_ring_advance(struct intel_engine_cs *ring) struct intel_ringbuffer *ringbuf = ring->buffer; ringbuf->tail &= ringbuf->size - 1; } +int __intel_ring_space(int head, int tail, int size); +int intel_ring_space(struct intel_ringbuffer *ringbuf); +bool intel_ring_stopped(struct intel_engine_cs *ring); void __intel_ring_advance(struct intel_engine_cs *ring); int __must_check intel_ring_idle(struct intel_engine_cs *ring); @@ -377,6 +410,9 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno); int intel_ring_flush_all_caches(struct intel_engine_cs *ring); int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring); +void intel_fini_pipe_control(struct intel_engine_cs *ring); +int intel_init_pipe_control(struct intel_engine_cs *ring); + int intel_init_render_ring_buffer(struct drm_device *dev); int intel_init_bsd_ring_buffer(struct drm_device *dev); int intel_init_bsd2_ring_buffer(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 168c6652cda1..0bdb00b7c59c 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -53,6 +53,7 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl enum pipe pipe = crtc->pipe; long timeout = msecs_to_jiffies_timeout(1); int scanline, min, max, vblank_start; + wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); DEFINE_WAIT(wait); WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex)); @@ -81,7 +82,7 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl * other CPUs can see the task state update by the time we * read the scanline. */ - prepare_to_wait(&crtc->vbl_wait, &wait, TASK_UNINTERRUPTIBLE); + prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); scanline = intel_get_crtc_scanline(crtc); if (scanline < min || scanline > max) @@ -100,7 +101,7 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl local_irq_disable(); } - finish_wait(&crtc->vbl_wait, &wait); + finish_wait(wq, &wait); drm_vblank_put(dev, pipe); @@ -163,6 +164,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, sprctl &= ~SP_PIXFORMAT_MASK; sprctl &= ~SP_YUV_BYTE_ORDER_MASK; sprctl &= ~SP_TILED; + sprctl &= ~SP_ROTATE_180; switch (fb->pixel_format) { case DRM_FORMAT_YUYV: @@ -235,6 +237,14 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, fb->pitches[0]); linear_offset -= sprsurf_offset; + if (intel_plane->rotation == BIT(DRM_ROTATE_180)) { + sprctl |= SP_ROTATE_180; + + x += src_w; + y += src_h; + linear_offset += src_h * fb->pitches[0] + src_w * pixel_size; + } + atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count); intel_update_primary_plane(intel_crtc); @@ -364,6 +374,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, sprctl &= ~SPRITE_RGB_ORDER_RGBX; sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK; sprctl &= ~SPRITE_TILED; + sprctl &= ~SPRITE_ROTATE_180; switch (fb->pixel_format) { case DRM_FORMAT_XBGR8888: @@ -426,6 +437,18 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, pixel_size, fb->pitches[0]); linear_offset -= sprsurf_offset; + if (intel_plane->rotation == BIT(DRM_ROTATE_180)) { + sprctl |= SPRITE_ROTATE_180; + + /* HSW and BDW does this automagically in hardware */ + if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { + x += src_w; + y += src_h; + linear_offset += src_h * fb->pitches[0] + + src_w * pixel_size; + } + } + atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count); intel_update_primary_plane(intel_crtc); @@ -571,6 +594,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, dvscntr &= ~DVS_RGB_ORDER_XBGR; dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK; dvscntr &= ~DVS_TILED; + dvscntr &= ~DVS_ROTATE_180; switch (fb->pixel_format) { case DRM_FORMAT_XBGR8888: @@ -628,6 +652,14 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, pixel_size, fb->pitches[0]); linear_offset -= dvssurf_offset; + if (intel_plane->rotation == BIT(DRM_ROTATE_180)) { + dvscntr |= DVS_ROTATE_180; + + x += src_w; + y += src_h; + linear_offset += src_h * fb->pitches[0] + src_w * pixel_size; + } + atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count); intel_update_primary_plane(intel_crtc); @@ -895,6 +927,9 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, max_scale = intel_plane->max_downscale << 16; min_scale = intel_plane->can_scale ? 1 : (1 << 16); + drm_rect_rotate(&src, fb->width << 16, fb->height << 16, + intel_plane->rotation); + hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale); BUG_ON(hscale < 0); @@ -933,6 +968,9 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, drm_rect_width(&dst) * hscale - drm_rect_width(&src), drm_rect_height(&dst) * vscale - drm_rect_height(&src)); + drm_rect_rotate_inv(&src, fb->width << 16, fb->height << 16, + intel_plane->rotation); + /* sanity check to make sure the src viewport wasn't enlarged */ WARN_ON(src.x1 < (int) src_x || src.y1 < (int) src_y || @@ -1180,18 +1218,42 @@ out_unlock: return ret; } -void intel_plane_restore(struct drm_plane *plane) +static int intel_plane_set_property(struct drm_plane *plane, + struct drm_property *prop, + uint64_t val) +{ + struct drm_device *dev = plane->dev; + struct intel_plane *intel_plane = to_intel_plane(plane); + uint64_t old_val; + int ret = -ENOENT; + + if (prop == dev->mode_config.rotation_property) { + /* exactly one rotation angle please */ + if (hweight32(val & 0xf) != 1) + return -EINVAL; + + old_val = intel_plane->rotation; + intel_plane->rotation = val; + ret = intel_plane_restore(plane); + if (ret) + intel_plane->rotation = old_val; + } + + return ret; +} + +int intel_plane_restore(struct drm_plane *plane) { struct intel_plane *intel_plane = to_intel_plane(plane); if (!plane->crtc || !plane->fb) - return; + return 0; - intel_update_plane(plane, plane->crtc, plane->fb, - intel_plane->crtc_x, intel_plane->crtc_y, - intel_plane->crtc_w, intel_plane->crtc_h, - intel_plane->src_x, intel_plane->src_y, - intel_plane->src_w, intel_plane->src_h); + return intel_update_plane(plane, plane->crtc, plane->fb, + intel_plane->crtc_x, intel_plane->crtc_y, + intel_plane->crtc_w, intel_plane->crtc_h, + intel_plane->src_x, intel_plane->src_y, + intel_plane->src_w, intel_plane->src_h); } void intel_plane_disable(struct drm_plane *plane) @@ -1206,6 +1268,7 @@ static const struct drm_plane_funcs intel_plane_funcs = { .update_plane = intel_update_plane, .disable_plane = intel_disable_plane, .destroy = intel_destroy_plane, + .set_property = intel_plane_set_property, }; static uint32_t ilk_plane_formats[] = { @@ -1310,13 +1373,28 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) intel_plane->pipe = pipe; intel_plane->plane = plane; + intel_plane->rotation = BIT(DRM_ROTATE_0); possible_crtcs = (1 << pipe); ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs, &intel_plane_funcs, plane_formats, num_plane_formats, false); - if (ret) + if (ret) { kfree(intel_plane); + goto out; + } + + if (!dev->mode_config.rotation_property) + dev->mode_config.rotation_property = + drm_mode_create_rotation_property(dev, + BIT(DRM_ROTATE_0) | + BIT(DRM_ROTATE_180)); + + if (dev->mode_config.rotation_property) + drm_object_attach_property(&intel_plane->base.base, + dev->mode_config.rotation_property, + intel_plane->rotation); + out: return ret; } diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index e211eef4b7e4..32186a656816 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1323,11 +1323,16 @@ intel_tv_detect(struct drm_connector *connector, bool force) struct intel_load_detect_pipe tmp; struct drm_modeset_acquire_ctx ctx; + drm_modeset_acquire_init(&ctx, 0); + if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) { type = intel_tv_detect_type(intel_tv, connector); - intel_release_load_detect_pipe(connector, &tmp, &ctx); + intel_release_load_detect_pipe(connector, &tmp); } else return connector_status_unknown; + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } else return connector->status; |