diff options
Diffstat (limited to 'drivers')
401 files changed, 16209 insertions, 9407 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index aef87fdbd187..44311296ec02 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -840,6 +840,14 @@ static bool i830_check_flags(unsigned int flags) return false; } +void intel_gtt_insert_page(dma_addr_t addr, + unsigned int pg, + unsigned int flags) +{ + intel_private.driver->write_entry(addr, pg, flags); +} +EXPORT_SYMBOL(intel_gtt_insert_page); + void intel_gtt_insert_sg_entries(struct sg_table *st, unsigned int pg_start, unsigned int flags) diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 4a424eca75ed..f353db213a81 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,2 +1,2 @@ -obj-y := dma-buf.o fence.o reservation.o seqno-fence.o +obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o obj-$(CONFIG_SYNC_FILE) += sync_file.o diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 6355ab38d630..20ce0687b111 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -824,7 +824,7 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) EXPORT_SYMBOL_GPL(dma_buf_vunmap); #ifdef CONFIG_DEBUG_FS -static int dma_buf_describe(struct seq_file *s) +static int dma_buf_debug_show(struct seq_file *s, void *unused) { int ret; struct dma_buf *buf_obj; @@ -879,17 +879,9 @@ static int dma_buf_describe(struct seq_file *s) return 0; } -static int dma_buf_show(struct seq_file *s, void *unused) -{ - void (*func)(struct seq_file *) = s->private; - - func(s); - return 0; -} - static int dma_buf_debug_open(struct inode *inode, struct file *file) { - return single_open(file, dma_buf_show, inode->i_private); + return single_open(file, dma_buf_debug_show, NULL); } static const struct file_operations dma_buf_debug_fops = { @@ -903,20 +895,23 @@ static struct dentry *dma_buf_debugfs_dir; static int dma_buf_init_debugfs(void) { + struct dentry *d; int err = 0; - dma_buf_debugfs_dir = debugfs_create_dir("dma_buf", NULL); - - if (IS_ERR(dma_buf_debugfs_dir)) { - err = PTR_ERR(dma_buf_debugfs_dir); - dma_buf_debugfs_dir = NULL; - return err; - } + d = debugfs_create_dir("dma_buf", NULL); + if (IS_ERR(d)) + return PTR_ERR(d); - err = dma_buf_debugfs_create_file("bufinfo", dma_buf_describe); + dma_buf_debugfs_dir = d; - if (err) + d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir, + NULL, &dma_buf_debug_fops); + if (IS_ERR(d)) { pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); + debugfs_remove_recursive(dma_buf_debugfs_dir); + dma_buf_debugfs_dir = NULL; + err = PTR_ERR(d); + } return err; } @@ -926,17 +921,6 @@ static void dma_buf_uninit_debugfs(void) if (dma_buf_debugfs_dir) debugfs_remove_recursive(dma_buf_debugfs_dir); } - -int dma_buf_debugfs_create_file(const char *name, - int (*write)(struct seq_file *)) -{ - struct dentry *d; - - d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir, - write, &dma_buf_debug_fops); - - return PTR_ERR_OR_ZERO(d); -} #else static inline int dma_buf_init_debugfs(void) { diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/fence-array.c new file mode 100644 index 000000000000..a8731c853da6 --- /dev/null +++ b/drivers/dma-buf/fence-array.c @@ -0,0 +1,144 @@ +/* + * fence-array: aggregate fences to be waited together + * + * Copyright (C) 2016 Collabora Ltd + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Authors: + * Gustavo Padovan <gustavo@padovan.org> + * Christian König <christian.koenig@amd.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/export.h> +#include <linux/slab.h> +#include <linux/fence-array.h> + +static void fence_array_cb_func(struct fence *f, struct fence_cb *cb); + +static const char *fence_array_get_driver_name(struct fence *fence) +{ + return "fence_array"; +} + +static const char *fence_array_get_timeline_name(struct fence *fence) +{ + return "unbound"; +} + +static void fence_array_cb_func(struct fence *f, struct fence_cb *cb) +{ + struct fence_array_cb *array_cb = + container_of(cb, struct fence_array_cb, cb); + struct fence_array *array = array_cb->array; + + if (atomic_dec_and_test(&array->num_pending)) + fence_signal(&array->base); + fence_put(&array->base); +} + +static bool fence_array_enable_signaling(struct fence *fence) +{ + struct fence_array *array = to_fence_array(fence); + struct fence_array_cb *cb = (void *)(&array[1]); + unsigned i; + + for (i = 0; i < array->num_fences; ++i) { + cb[i].array = array; + /* + * As we may report that the fence is signaled before all + * callbacks are complete, we need to take an additional + * reference count on the array so that we do not free it too + * early. The core fence handling will only hold the reference + * until we signal the array as complete (but that is now + * insufficient). + */ + fence_get(&array->base); + if (fence_add_callback(array->fences[i], &cb[i].cb, + fence_array_cb_func)) { + fence_put(&array->base); + if (atomic_dec_and_test(&array->num_pending)) + return false; + } + } + + return true; +} + +static bool fence_array_signaled(struct fence *fence) +{ + struct fence_array *array = to_fence_array(fence); + + return atomic_read(&array->num_pending) <= 0; +} + +static void fence_array_release(struct fence *fence) +{ + struct fence_array *array = to_fence_array(fence); + unsigned i; + + for (i = 0; i < array->num_fences; ++i) + fence_put(array->fences[i]); + + kfree(array->fences); + fence_free(fence); +} + +const struct fence_ops fence_array_ops = { + .get_driver_name = fence_array_get_driver_name, + .get_timeline_name = fence_array_get_timeline_name, + .enable_signaling = fence_array_enable_signaling, + .signaled = fence_array_signaled, + .wait = fence_default_wait, + .release = fence_array_release, +}; + +/** + * fence_array_create - Create a custom fence array + * @num_fences: [in] number of fences to add in the array + * @fences: [in] array containing the fences + * @context: [in] fence context to use + * @seqno: [in] sequence number to use + * @signal_on_any [in] signal on any fence in the array + * + * Allocate a fence_array object and initialize the base fence with fence_init(). + * In case of error it returns NULL. + * + * The caller should allocte the fences array with num_fences size + * and fill it with the fences it wants to add to the object. Ownership of this + * array is take and fence_put() is used on each fence on release. + * + * If @signal_on_any is true the fence array signals if any fence in the array + * signals, otherwise it signals when all fences in the array signal. + */ +struct fence_array *fence_array_create(int num_fences, struct fence **fences, + u64 context, unsigned seqno, + bool signal_on_any) +{ + struct fence_array *array; + size_t size = sizeof(*array); + + /* Allocate the callback structures behind the array. */ + size += num_fences * sizeof(struct fence_array_cb); + array = kzalloc(size, GFP_KERNEL); + if (!array) + return NULL; + + spin_lock_init(&array->lock); + fence_init(&array->base, &fence_array_ops, &array->lock, + context, seqno); + + array->num_fences = num_fences; + atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); + array->fences = fences; + + return array; +} +EXPORT_SYMBOL(fence_array_create); diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c index 7b05dbe9b296..4d51f9e83fa8 100644 --- a/drivers/dma-buf/fence.c +++ b/drivers/dma-buf/fence.c @@ -35,7 +35,7 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit); * context or not. One device can have multiple separate contexts, * and they're used if some engine can run independently of another. */ -static atomic_t fence_context_counter = ATOMIC_INIT(0); +static atomic64_t fence_context_counter = ATOMIC64_INIT(0); /** * fence_context_alloc - allocate an array of fence contexts @@ -44,10 +44,10 @@ static atomic_t fence_context_counter = ATOMIC_INIT(0); * This function will return the first index of the number of fences allocated. * The fence context is used for setting fence->context to a unique number. */ -unsigned fence_context_alloc(unsigned num) +u64 fence_context_alloc(unsigned num) { BUG_ON(!num); - return atomic_add_return(num, &fence_context_counter) - num; + return atomic64_add_return(num, &fence_context_counter) - num; } EXPORT_SYMBOL(fence_context_alloc); @@ -513,7 +513,7 @@ EXPORT_SYMBOL(fence_wait_any_timeout); */ void fence_init(struct fence *fence, const struct fence_ops *ops, - spinlock_t *lock, unsigned context, unsigned seqno) + spinlock_t *lock, u64 context, unsigned seqno) { BUG_ON(!lock); BUG_ON(!ops || !ops->wait || !ops->enable_signaling || diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index f08cf2d8309e..9aaa608dfe01 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -82,7 +82,7 @@ struct sync_file *sync_file_create(struct fence *fence) sync_file->num_fences = 1; atomic_set(&sync_file->status, 1); - snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%d-%d", + snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), fence->context, fence->seqno); diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index be43afb08c69..e3dba6f44a79 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm_lock.o drm_memory.o drm_drv.o drm_vm.o \ drm_scatter.o drm_pci.o \ drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ - drm_crtc.o drm_modes.o drm_edid.o \ + drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \ drm_info.o drm_debugfs.o drm_encoder_slave.o \ drm_trace_points.o drm_global.o drm_prime.o \ drm_rect.o drm_vma_manager.o drm_flip_work.o \ @@ -23,7 +23,8 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ - drm_kms_helper_common.o drm_dp_dual_mode_helper.o + drm_kms_helper_common.o drm_dp_dual_mode_helper.o \ + drm_simple_kms_helper.o drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 992f00b65be4..da3d02154fa6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -2032,7 +2032,7 @@ struct amdgpu_device { struct amdgpu_irq_src hpd_irq; /* rings */ - unsigned fence_context; + u64 fence_context; unsigned num_rings; struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; bool ib_pool_ready; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index b0832da2ef7e..a6eecf6f9065 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -240,7 +240,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, work->base = base; - r = drm_vblank_get(crtc->dev, amdgpu_crtc->crtc_id); + r = drm_crtc_vblank_get(crtc); if (r) { DRM_ERROR("failed to get vblank before flip\n"); goto pflip_cleanup; @@ -268,7 +268,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, return 0; vblank_cleanup: - drm_vblank_put(crtc->dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(crtc); pflip_cleanup: if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 8bf84efafb04..b16366c2b4a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -427,7 +427,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, soffset, eoffset, eoffset - soffset); if (i->fence) - seq_printf(m, " protected by 0x%08x on context %d", + seq_printf(m, " protected by 0x%08x on context %llu", i->fence->seqno, i->fence->context); seq_printf(m, "\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 8227344d2ff6..c1b04e9aab57 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2667,19 +2667,21 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) } } -static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { amdgpu_crtc->lut_r[i] = red[i] >> 6; amdgpu_crtc->lut_g[i] = green[i] >> 6; amdgpu_crtc->lut_b[i] = blue[i] >> 6; } dce_v10_0_crtc_load_lut(crtc); + + return 0; } static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc) @@ -2717,13 +2719,13 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->pageflip_irq, type); - drm_vblank_on(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_on(crtc); dce_v10_0_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - drm_vblank_off(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (amdgpu_crtc->enabled) { dce_v10_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); @@ -3372,7 +3374,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index af26ec0bc59d..c90408bc0fde 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2678,19 +2678,21 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) } } -static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { amdgpu_crtc->lut_r[i] = red[i] >> 6; amdgpu_crtc->lut_g[i] = green[i] >> 6; amdgpu_crtc->lut_b[i] = blue[i] >> 6; } dce_v11_0_crtc_load_lut(crtc); + + return 0; } static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc) @@ -2728,13 +2730,13 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->pageflip_irq, type); - drm_vblank_on(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_on(crtc); dce_v11_0_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - drm_vblank_off(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (amdgpu_crtc->enabled) { dce_v11_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); @@ -3433,7 +3435,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 3fb65e41a6ef..300ff4aab0fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2574,19 +2574,21 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) } } -static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { amdgpu_crtc->lut_r[i] = red[i] >> 6; amdgpu_crtc->lut_g[i] = green[i] >> 6; amdgpu_crtc->lut_b[i] = blue[i] >> 6; } dce_v8_0_crtc_load_lut(crtc); + + return 0; } static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc) @@ -2624,13 +2626,13 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->pageflip_irq, type); - drm_vblank_on(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_on(crtc); dce_v8_0_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - drm_vblank_off(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (amdgpu_crtc->enabled) { dce_v8_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); @@ -3376,7 +3378,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index ec4036a09f3e..a625b9137da2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -187,12 +187,12 @@ int init_pipelines(struct device_queue_manager *dqm, unsigned int get_first_pipe(struct device_queue_manager *dqm); unsigned int get_pipes_num(struct device_queue_manager *dqm); -extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) +static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) { return (pdd->lds_base >> 16) & 0xFF; } -extern inline unsigned int +static inline unsigned int get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd) { return (pdd->lds_base >> 60) & 0x0E; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index d0d5f4baf72d..80113c335966 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -617,10 +617,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd); int kfd_init_apertures(struct kfd_process *process); /* Queue Context Management */ -inline uint32_t lower_32(uint64_t x); -inline uint32_t upper_32(uint64_t x); struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd); -inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m); int init_queue(struct queue **q, struct queue_properties properties); void uninit_queue(struct queue *q); diff --git a/drivers/gpu/drm/arc/Makefile b/drivers/gpu/drm/arc/Makefile index d48fda70f857..73de56a0139a 100644 --- a/drivers/gpu/drm/arc/Makefile +++ b/drivers/gpu/drm/arc/Makefile @@ -1,2 +1,2 @@ -arcpgu-y := arcpgu_crtc.o arcpgu_hdmi.o arcpgu_drv.o +arcpgu-y := arcpgu_crtc.o arcpgu_hdmi.o arcpgu_sim.o arcpgu_drv.o obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o diff --git a/drivers/gpu/drm/arc/arcpgu.h b/drivers/gpu/drm/arc/arcpgu.h index 86574b698a78..e8fcf3ab1d9a 100644 --- a/drivers/gpu/drm/arc/arcpgu.h +++ b/drivers/gpu/drm/arc/arcpgu.h @@ -22,7 +22,6 @@ struct arcpgu_drm_private { struct clk *clk; struct drm_fbdev_cma *fbdev; struct drm_framebuffer *fb; - struct list_head event_list; struct drm_crtc crtc; struct drm_plane *plane; }; @@ -43,6 +42,7 @@ static inline u32 arc_pgu_read(struct arcpgu_drm_private *arcpgu, int arc_pgu_setup_crtc(struct drm_device *dev); int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np); +int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np); struct drm_fbdev_cma *arcpgu_fbdev_cma_init(struct drm_device *dev, unsigned int preferred_bpp, unsigned int num_crtc, unsigned int max_conn_count); diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c index 92f8beff8e60..ee0a61c2861b 100644 --- a/drivers/gpu/drm/arc/arcpgu_crtc.c +++ b/drivers/gpu/drm/arc/arcpgu_crtc.c @@ -145,20 +145,14 @@ static int arc_pgu_crtc_atomic_check(struct drm_crtc *crtc, static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *state) { - struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); - unsigned long flags; - - if (crtc->state->event) { - struct drm_pending_vblank_event *event = crtc->state->event; + struct drm_pending_vblank_event *event = crtc->state->event; + if (event) { crtc->state->event = NULL; - event->pipe = drm_crtc_index(crtc); - - WARN_ON(drm_crtc_vblank_get(crtc) != 0); - spin_lock_irqsave(&crtc->dev->event_lock, flags); - list_add_tail(&event->base.link, &arcpgu->event_list); - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + spin_lock_irq(&crtc->dev->event_lock); + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irq(&crtc->dev->event_lock); } } diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c index 76e187a5bde0..ccbdadb108dc 100644 --- a/drivers/gpu/drm/arc/arcpgu_drv.c +++ b/drivers/gpu/drm/arc/arcpgu_drv.c @@ -32,17 +32,11 @@ static void arcpgu_fb_output_poll_changed(struct drm_device *dev) drm_fbdev_cma_hotplug_event(arcpgu->fbdev); } -static int arcpgu_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, bool async) -{ - return drm_atomic_helper_commit(dev, state, false); -} - static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = { .fb_create = drm_fb_cma_create, .output_poll_changed = arcpgu_fb_output_poll_changed, .atomic_check = drm_atomic_helper_check, - .atomic_commit = arcpgu_atomic_commit, + .atomic_commit = drm_atomic_helper_commit, }; static void arcpgu_setup_mode_config(struct drm_device *drm) @@ -81,22 +75,6 @@ static const struct file_operations arcpgu_drm_ops = { .mmap = arcpgu_gem_mmap, }; -static void arcpgu_preclose(struct drm_device *drm, struct drm_file *file) -{ - struct arcpgu_drm_private *arcpgu = drm->dev_private; - struct drm_pending_vblank_event *e, *t; - unsigned long flags; - - spin_lock_irqsave(&drm->event_lock, flags); - list_for_each_entry_safe(e, t, &arcpgu->event_list, base.link) { - if (e->base.file_priv != file) - continue; - list_del(&e->base.link); - e->base.destroy(&e->base); - } - spin_unlock_irqrestore(&drm->event_lock, flags); -} - static void arcpgu_lastclose(struct drm_device *drm) { struct arcpgu_drm_private *arcpgu = drm->dev_private; @@ -122,8 +100,6 @@ static int arcpgu_load(struct drm_device *drm) if (IS_ERR(arcpgu->clk)) return PTR_ERR(arcpgu->clk); - INIT_LIST_HEAD(&arcpgu->event_list); - arcpgu_setup_mode_config(drm); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -149,15 +125,16 @@ static int arcpgu_load(struct drm_device *drm) /* find the encoder node and initialize it */ encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0); - if (!encoder_node) { - dev_err(drm->dev, "failed to get an encoder slave node\n"); - return -ENODEV; + if (encoder_node) { + ret = arcpgu_drm_hdmi_init(drm, encoder_node); + if (ret < 0) + return ret; + } else { + ret = arcpgu_drm_sim_init(drm, 0); + if (ret < 0) + return ret; } - ret = arcpgu_drm_hdmi_init(drm, encoder_node); - if (ret < 0) - return ret; - drm_mode_config_reset(drm); drm_kms_helper_poll_init(drm); @@ -192,7 +169,6 @@ int arcpgu_unload(struct drm_device *drm) static struct drm_driver arcpgu_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, - .preclose = arcpgu_preclose, .lastclose = arcpgu_lastclose, .name = "drm-arcpgu", .desc = "ARC PGU Controller", @@ -207,7 +183,7 @@ static struct drm_driver arcpgu_drm_driver = { .get_vblank_counter = drm_vblank_no_hw_counter, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, @@ -235,15 +211,8 @@ static int arcpgu_probe(struct platform_device *pdev) if (ret) goto err_unload; - ret = drm_connector_register_all(drm); - if (ret) - goto err_unregister; - return 0; -err_unregister: - drm_dev_unregister(drm); - err_unload: arcpgu_unload(drm); @@ -257,7 +226,6 @@ static int arcpgu_remove(struct platform_device *pdev) { struct drm_device *drm = platform_get_drvdata(pdev); - drm_connector_unregister_all(drm); drm_dev_unregister(drm); arcpgu_unload(drm); drm_dev_unref(drm); diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c index 08b6baeb320d..b7a8b2ac4055 100644 --- a/drivers/gpu/drm/arc/arcpgu_hdmi.c +++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c @@ -46,23 +46,6 @@ static int arcpgu_drm_connector_get_modes(struct drm_connector *connector) return sfuncs->get_modes(&slave->base, connector); } -struct drm_encoder * -arcpgu_drm_connector_best_encoder(struct drm_connector *connector) -{ - struct drm_encoder_slave *slave; - struct arcpgu_drm_connector *con = - container_of(connector, struct arcpgu_drm_connector, connector); - - slave = con->encoder_slave; - if (slave == NULL) { - dev_err(connector->dev->dev, - "connector_best_encoder: cannot find slave encoder for connector\n"); - return NULL; - } - - return &slave->base; -} - static enum drm_connector_status arcpgu_drm_connector_detect(struct drm_connector *connector, bool force) { @@ -97,7 +80,6 @@ static void arcpgu_drm_connector_destroy(struct drm_connector *connector) static const struct drm_connector_helper_funcs arcpgu_drm_connector_helper_funcs = { .get_modes = arcpgu_drm_connector_get_modes, - .best_encoder = arcpgu_drm_connector_best_encoder, }; static const struct drm_connector_funcs arcpgu_drm_connector_funcs = { diff --git a/drivers/gpu/drm/arc/arcpgu_sim.c b/drivers/gpu/drm/arc/arcpgu_sim.c new file mode 100644 index 000000000000..2bf06d71556a --- /dev/null +++ b/drivers/gpu/drm/arc/arcpgu_sim.c @@ -0,0 +1,128 @@ +/* + * ARC PGU DRM driver. + * + * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <drm/drm_crtc_helper.h> +#include <drm/drm_encoder_slave.h> +#include <drm/drm_atomic_helper.h> + +#include "arcpgu.h" + +#define XRES_DEF 640 +#define YRES_DEF 480 + +#define XRES_MAX 8192 +#define YRES_MAX 8192 + + +struct arcpgu_drm_connector { + struct drm_connector connector; + struct drm_encoder_slave *encoder_slave; +}; + +static int arcpgu_drm_connector_get_modes(struct drm_connector *connector) +{ + int count; + + count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX); + drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); + return count; +} + +static enum drm_connector_status +arcpgu_drm_connector_detect(struct drm_connector *connector, bool force) +{ + return connector_status_connected; +} + +static void arcpgu_drm_connector_destroy(struct drm_connector *connector) +{ + drm_connector_unregister(connector); + drm_connector_cleanup(connector); +} + +static const struct drm_connector_helper_funcs +arcpgu_drm_connector_helper_funcs = { + .get_modes = arcpgu_drm_connector_get_modes, +}; + +static const struct drm_connector_funcs arcpgu_drm_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .reset = drm_atomic_helper_connector_reset, + .detect = arcpgu_drm_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = arcpgu_drm_connector_destroy, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np) +{ + struct arcpgu_drm_connector *arcpgu_connector; + struct drm_encoder_slave *encoder; + struct drm_connector *connector; + int ret; + + encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL); + if (encoder == NULL) + return -ENOMEM; + + encoder->base.possible_crtcs = 1; + encoder->base.possible_clones = 0; + + ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs, + DRM_MODE_ENCODER_VIRTUAL, NULL); + if (ret) + return ret; + + arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector), + GFP_KERNEL); + if (!arcpgu_connector) { + ret = -ENOMEM; + goto error_encoder_cleanup; + } + + connector = &arcpgu_connector->connector; + drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs); + + ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs, + DRM_MODE_CONNECTOR_VIRTUAL); + if (ret < 0) { + dev_err(drm->dev, "failed to initialize drm connector\n"); + goto error_encoder_cleanup; + } + + ret = drm_mode_connector_attach_encoder(connector, &encoder->base); + if (ret < 0) { + dev_err(drm->dev, "could not attach connector to encoder\n"); + drm_connector_unregister(connector); + goto error_connector_cleanup; + } + + arcpgu_connector->encoder_slave = encoder; + + return 0; + +error_connector_cleanup: + drm_connector_cleanup(connector); + +error_encoder_cleanup: + drm_encoder_cleanup(&encoder->base); + return ret; +} diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig index eaed454e043c..1b2906568a48 100644 --- a/drivers/gpu/drm/arm/Kconfig +++ b/drivers/gpu/drm/arm/Kconfig @@ -25,3 +25,19 @@ config DRM_HDLCD_SHOW_UNDERRUN Enable this option to show in red colour the pixels that the HDLCD device did not fetch from framebuffer due to underrun conditions. + +config DRM_MALI_DISPLAY + tristate "ARM Mali Display Processor" + depends on DRM && OF && (ARM || ARM64) + depends on COMMON_CLK + select DRM_ARM + select DRM_KMS_HELPER + select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER + select VIDEOMODE_HELPERS + help + Choose this option if you want to compile the ARM Mali Display + Processor driver. It supports the DP500, DP550 and DP650 variants + of the hardware. + + If compiled as a module it will be called mali-dp. diff --git a/drivers/gpu/drm/arm/Makefile b/drivers/gpu/drm/arm/Makefile index 89dcb7bab93a..bb8b158ff90d 100644 --- a/drivers/gpu/drm/arm/Makefile +++ b/drivers/gpu/drm/arm/Makefile @@ -1,2 +1,4 @@ hdlcd-y := hdlcd_drv.o hdlcd_crtc.o obj-$(CONFIG_DRM_HDLCD) += hdlcd.o +mali-dp-y := malidp_drv.o malidp_hw.o malidp_planes.o malidp_crtc.o +obj-$(CONFIG_DRM_MALI_DISPLAY) += mali-dp.o diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index 0813c2f06931..48019ae22ddb 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -196,30 +196,11 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, } } -static void hdlcd_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *state) -{ -} - -static bool hdlcd_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { - .mode_fixup = hdlcd_crtc_mode_fixup, - .mode_set = drm_helper_crtc_mode_set, - .mode_set_base = drm_helper_crtc_mode_set_base, - .mode_set_nofb = hdlcd_crtc_mode_set_nofb, .enable = hdlcd_crtc_enable, .disable = hdlcd_crtc_disable, - .prepare = hdlcd_crtc_disable, - .commit = hdlcd_crtc_enable, .atomic_check = hdlcd_crtc_atomic_check, .atomic_begin = hdlcd_crtc_atomic_begin, - .atomic_flush = hdlcd_crtc_atomic_flush, }; static int hdlcd_plane_atomic_check(struct drm_plane *plane, diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index a6ca36f0096f..74279be20b75 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -106,17 +106,11 @@ static void hdlcd_fb_output_poll_changed(struct drm_device *drm) drm_fbdev_cma_hotplug_event(hdlcd->fbdev); } -static int hdlcd_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, bool nonblock) -{ - return drm_atomic_helper_commit(dev, state, false); -} - static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = { .fb_create = drm_fb_cma_create, .output_poll_changed = hdlcd_fb_output_poll_changed, .atomic_check = drm_atomic_helper_check, - .atomic_commit = hdlcd_atomic_commit, + .atomic_commit = drm_atomic_helper_commit, }; static void hdlcd_setup_mode_config(struct drm_device *drm) @@ -296,7 +290,7 @@ static struct drm_driver hdlcd_driver = { .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = hdlcd_enable_vblank, .disable_vblank = hdlcd_disable_vblank, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = drm_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c new file mode 100644 index 000000000000..08e6a71f5d05 --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_crtc.c @@ -0,0 +1,216 @@ +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * ARM Mali DP500/DP550/DP650 driver (crtc operations) + */ + +#include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <linux/clk.h> +#include <video/videomode.h> + +#include "malidp_drv.h" +#include "malidp_hw.h" + +static bool malidp_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + + /* + * check that the hardware can drive the required clock rate, + * but skip the check if the clock is meant to be disabled (req_rate = 0) + */ + long rate, req_rate = mode->crtc_clock * 1000; + + if (req_rate) { + rate = clk_round_rate(hwdev->mclk, req_rate); + if (rate < req_rate) { + DRM_DEBUG_DRIVER("mclk clock unable to reach %d kHz\n", + mode->crtc_clock); + return false; + } + + rate = clk_round_rate(hwdev->pxlclk, req_rate); + if (rate != req_rate) { + DRM_DEBUG_DRIVER("pxlclk doesn't support %ld Hz\n", + req_rate); + return false; + } + } + + return true; +} + +static void malidp_crtc_enable(struct drm_crtc *crtc) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + struct videomode vm; + + drm_display_mode_to_videomode(&crtc->state->adjusted_mode, &vm); + + clk_prepare_enable(hwdev->pxlclk); + + /* mclk needs to be set to the same or higher rate than pxlclk */ + clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000); + clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000); + + hwdev->modeset(hwdev, &vm); + hwdev->leave_config_mode(hwdev); + drm_crtc_vblank_on(crtc); +} + +static void malidp_crtc_disable(struct drm_crtc *crtc) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + + drm_crtc_vblank_off(crtc); + hwdev->enter_config_mode(hwdev); + clk_disable_unprepare(hwdev->pxlclk); +} + +static int malidp_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + struct drm_plane *plane; + const struct drm_plane_state *pstate; + u32 rot_mem_free, rot_mem_usable; + int rotated_planes = 0; + + /* + * check if there is enough rotation memory available for planes + * that need 90° and 270° rotation. Each plane has set its required + * memory size in the ->plane_check() callback, here we only make + * sure that the sums are less that the total usable memory. + * + * The rotation memory allocation algorithm (for each plane): + * a. If no more rotated planes exist, all remaining rotate + * memory in the bank is available for use by the plane. + * b. If other rotated planes exist, and plane's layer ID is + * DE_VIDEO1, it can use all the memory from first bank if + * secondary rotation memory bank is available, otherwise it can + * use up to half the bank's memory. + * c. If other rotated planes exist, and plane's layer ID is not + * DE_VIDEO1, it can use half of the available memory + * + * Note: this algorithm assumes that the order in which the planes are + * checked always has DE_VIDEO1 plane first in the list if it is + * rotated. Because that is how we create the planes in the first + * place, under current DRM version things work, but if ever the order + * in which drm_atomic_crtc_state_for_each_plane() iterates over planes + * changes, we need to pre-sort the planes before validation. + */ + + /* first count the number of rotated planes */ + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { + if (pstate->rotation & MALIDP_ROTATED_MASK) + rotated_planes++; + } + + rot_mem_free = hwdev->rotation_memory[0]; + /* + * if we have more than 1 plane using rotation memory, use the second + * block of rotation memory as well + */ + if (rotated_planes > 1) + rot_mem_free += hwdev->rotation_memory[1]; + + /* now validate the rotation memory requirements */ + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { + struct malidp_plane *mp = to_malidp_plane(plane); + struct malidp_plane_state *ms = to_malidp_plane_state(pstate); + + if (pstate->rotation & MALIDP_ROTATED_MASK) { + /* process current plane */ + rotated_planes--; + + if (!rotated_planes) { + /* no more rotated planes, we can use what's left */ + rot_mem_usable = rot_mem_free; + } else { + if ((mp->layer->id != DE_VIDEO1) || + (hwdev->rotation_memory[1] == 0)) + rot_mem_usable = rot_mem_free / 2; + else + rot_mem_usable = hwdev->rotation_memory[0]; + } + + rot_mem_free -= rot_mem_usable; + + if (ms->rotmem_size > rot_mem_usable) + return -EINVAL; + } + } + + return 0; +} + +static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = { + .mode_fixup = malidp_crtc_mode_fixup, + .enable = malidp_crtc_enable, + .disable = malidp_crtc_disable, + .atomic_check = malidp_crtc_atomic_check, +}; + +static const struct drm_crtc_funcs malidp_crtc_funcs = { + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +int malidp_crtc_init(struct drm_device *drm) +{ + struct malidp_drm *malidp = drm->dev_private; + struct drm_plane *primary = NULL, *plane; + int ret; + + ret = malidp_de_planes_init(drm); + if (ret < 0) { + DRM_ERROR("Failed to initialise planes\n"); + return ret; + } + + drm_for_each_plane(plane, drm) { + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + primary = plane; + break; + } + } + + if (!primary) { + DRM_ERROR("no primary plane found\n"); + ret = -EINVAL; + goto crtc_cleanup_planes; + } + + ret = drm_crtc_init_with_planes(drm, &malidp->crtc, primary, NULL, + &malidp_crtc_funcs, NULL); + + if (!ret) { + drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs); + return 0; + } + +crtc_cleanup_planes: + malidp_de_planes_destroy(drm); + + return ret; +} diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c new file mode 100644 index 000000000000..e5b44e92f8cf --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -0,0 +1,512 @@ +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * ARM Mali DP500/DP550/DP650 KMS/DRM driver + */ + +#include <linux/module.h> +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/of_reserved_mem.h> + +#include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_of.h> + +#include "malidp_drv.h" +#include "malidp_regs.h" +#include "malidp_hw.h" + +#define MALIDP_CONF_VALID_TIMEOUT 250 + +/* + * set the "config valid" bit and wait until the hardware acts on it + */ +static int malidp_set_and_wait_config_valid(struct drm_device *drm) +{ + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + int ret; + + hwdev->set_config_valid(hwdev); + /* don't wait for config_valid flag if we are in config mode */ + if (hwdev->in_config_mode(hwdev)) + return 0; + + ret = wait_event_interruptible_timeout(malidp->wq, + atomic_read(&malidp->config_valid) == 1, + msecs_to_jiffies(MALIDP_CONF_VALID_TIMEOUT)); + + return (ret > 0) ? 0 : -ETIMEDOUT; +} + +static void malidp_output_poll_changed(struct drm_device *drm) +{ + struct malidp_drm *malidp = drm->dev_private; + + drm_fbdev_cma_hotplug_event(malidp->fbdev); +} + +static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state) +{ + struct drm_pending_vblank_event *event; + struct drm_device *drm = state->dev; + struct malidp_drm *malidp = drm->dev_private; + int ret = malidp_set_and_wait_config_valid(drm); + + if (ret) + DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n"); + + event = malidp->crtc.state->event; + if (event) { + malidp->crtc.state->event = NULL; + + spin_lock_irq(&drm->event_lock); + if (drm_crtc_vblank_get(&malidp->crtc) == 0) + drm_crtc_arm_vblank_event(&malidp->crtc, event); + else + drm_crtc_send_vblank_event(&malidp->crtc, event); + spin_unlock_irq(&drm->event_lock); + } + drm_atomic_helper_commit_hw_done(state); +} + +static void malidp_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *drm = state->dev; + + drm_atomic_helper_commit_modeset_disables(drm, state); + drm_atomic_helper_commit_modeset_enables(drm, state); + drm_atomic_helper_commit_planes(drm, state, true); + + malidp_atomic_commit_hw_done(state); + + drm_atomic_helper_wait_for_vblanks(drm, state); + + drm_atomic_helper_cleanup_planes(drm, state); +} + +static struct drm_mode_config_helper_funcs malidp_mode_config_helpers = { + .atomic_commit_tail = malidp_atomic_commit_tail, +}; + +static const struct drm_mode_config_funcs malidp_mode_config_funcs = { + .fb_create = drm_fb_cma_create, + .output_poll_changed = malidp_output_poll_changed, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static int malidp_enable_vblank(struct drm_device *drm, unsigned int crtc) +{ + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + + malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK, + hwdev->map.de_irq_map.vsync_irq); + return 0; +} + +static void malidp_disable_vblank(struct drm_device *drm, unsigned int pipe) +{ + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + + malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, + hwdev->map.de_irq_map.vsync_irq); +} + +static int malidp_init(struct drm_device *drm) +{ + int ret; + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + + drm_mode_config_init(drm); + + drm->mode_config.min_width = hwdev->min_line_size; + drm->mode_config.min_height = hwdev->min_line_size; + drm->mode_config.max_width = hwdev->max_line_size; + drm->mode_config.max_height = hwdev->max_line_size; + drm->mode_config.funcs = &malidp_mode_config_funcs; + drm->mode_config.helper_private = &malidp_mode_config_helpers; + + ret = malidp_crtc_init(drm); + if (ret) { + drm_mode_config_cleanup(drm); + return ret; + } + + return 0; +} + +static int malidp_irq_init(struct platform_device *pdev) +{ + int irq_de, irq_se, ret = 0; + struct drm_device *drm = dev_get_drvdata(&pdev->dev); + + /* fetch the interrupts from DT */ + irq_de = platform_get_irq_byname(pdev, "DE"); + if (irq_de < 0) { + DRM_ERROR("no 'DE' IRQ specified!\n"); + return irq_de; + } + irq_se = platform_get_irq_byname(pdev, "SE"); + if (irq_se < 0) { + DRM_ERROR("no 'SE' IRQ specified!\n"); + return irq_se; + } + + ret = malidp_de_irq_init(drm, irq_de); + if (ret) + return ret; + + ret = malidp_se_irq_init(drm, irq_se); + if (ret) { + malidp_de_irq_fini(drm); + return ret; + } + + return 0; +} + +static void malidp_lastclose(struct drm_device *drm) +{ + struct malidp_drm *malidp = drm->dev_private; + + drm_fbdev_cma_restore_mode(malidp->fbdev); +} + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif + .poll = drm_poll, + .read = drm_read, + .llseek = noop_llseek, + .mmap = drm_gem_cma_mmap, +}; + +static struct drm_driver malidp_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC | + DRIVER_PRIME, + .lastclose = malidp_lastclose, + .get_vblank_counter = drm_vblank_no_hw_counter, + .enable_vblank = malidp_enable_vblank, + .disable_vblank = malidp_disable_vblank, + .gem_free_object_unlocked = drm_gem_cma_free_object, + .gem_vm_ops = &drm_gem_cma_vm_ops, + .dumb_create = drm_gem_cma_dumb_create, + .dumb_map_offset = drm_gem_cma_dumb_map_offset, + .dumb_destroy = drm_gem_dumb_destroy, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, + .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, + .gem_prime_vmap = drm_gem_cma_prime_vmap, + .gem_prime_vunmap = drm_gem_cma_prime_vunmap, + .gem_prime_mmap = drm_gem_cma_prime_mmap, + .fops = &fops, + .name = "mali-dp", + .desc = "ARM Mali Display Processor driver", + .date = "20160106", + .major = 1, + .minor = 0, +}; + +static const struct of_device_id malidp_drm_of_match[] = { + { + .compatible = "arm,mali-dp500", + .data = &malidp_device[MALIDP_500] + }, + { + .compatible = "arm,mali-dp550", + .data = &malidp_device[MALIDP_550] + }, + { + .compatible = "arm,mali-dp650", + .data = &malidp_device[MALIDP_650] + }, + {}, +}; +MODULE_DEVICE_TABLE(of, malidp_drm_of_match); + +#define MAX_OUTPUT_CHANNELS 3 + +static int malidp_bind(struct device *dev) +{ + struct resource *res; + struct drm_device *drm; + struct malidp_drm *malidp; + struct malidp_hw_device *hwdev; + struct platform_device *pdev = to_platform_device(dev); + /* number of lines for the R, G and B output */ + u8 output_width[MAX_OUTPUT_CHANNELS]; + int ret = 0, i; + u32 version, out_depth = 0; + + malidp = devm_kzalloc(dev, sizeof(*malidp), GFP_KERNEL); + if (!malidp) + return -ENOMEM; + + hwdev = devm_kzalloc(dev, sizeof(*hwdev), GFP_KERNEL); + if (!hwdev) + return -ENOMEM; + + /* + * copy the associated data from malidp_drm_of_match to avoid + * having to keep a reference to the OF node after binding + */ + memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev)); + malidp->dev = hwdev; + + INIT_LIST_HEAD(&malidp->event_list); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hwdev->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(hwdev->regs)) { + DRM_ERROR("Failed to map control registers area\n"); + return PTR_ERR(hwdev->regs); + } + + hwdev->pclk = devm_clk_get(dev, "pclk"); + if (IS_ERR(hwdev->pclk)) + return PTR_ERR(hwdev->pclk); + + hwdev->aclk = devm_clk_get(dev, "aclk"); + if (IS_ERR(hwdev->aclk)) + return PTR_ERR(hwdev->aclk); + + hwdev->mclk = devm_clk_get(dev, "mclk"); + if (IS_ERR(hwdev->mclk)) + return PTR_ERR(hwdev->mclk); + + hwdev->pxlclk = devm_clk_get(dev, "pxlclk"); + if (IS_ERR(hwdev->pxlclk)) + return PTR_ERR(hwdev->pxlclk); + + /* Get the optional framebuffer memory resource */ + ret = of_reserved_mem_device_init(dev); + if (ret && ret != -ENODEV) + return ret; + + drm = drm_dev_alloc(&malidp_driver, dev); + if (!drm) { + ret = -ENOMEM; + goto alloc_fail; + } + + /* Enable APB clock in order to get access to the registers */ + clk_prepare_enable(hwdev->pclk); + /* + * Enable AXI clock and main clock so that prefetch can start once + * the registers are set + */ + clk_prepare_enable(hwdev->aclk); + clk_prepare_enable(hwdev->mclk); + + ret = hwdev->query_hw(hwdev); + if (ret) { + DRM_ERROR("Invalid HW configuration\n"); + goto query_hw_fail; + } + + version = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_DE_CORE_ID); + DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16, + (version >> 12) & 0xf, (version >> 8) & 0xf); + + /* set the number of lines used for output of RGB data */ + ret = of_property_read_u8_array(dev->of_node, + "arm,malidp-output-port-lines", + output_width, MAX_OUTPUT_CHANNELS); + if (ret) + goto query_hw_fail; + + for (i = 0; i < MAX_OUTPUT_CHANNELS; i++) + out_depth = (out_depth << 8) | (output_width[i] & 0xf); + malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base); + + drm->dev_private = malidp; + dev_set_drvdata(dev, drm); + atomic_set(&malidp->config_valid, 0); + init_waitqueue_head(&malidp->wq); + + ret = malidp_init(drm); + if (ret < 0) + goto init_fail; + + ret = drm_dev_register(drm, 0); + if (ret) + goto register_fail; + + /* Set the CRTC's port so that the encoder component can find it */ + malidp->crtc.port = of_graph_get_next_endpoint(dev->of_node, NULL); + + ret = component_bind_all(dev, drm); + of_node_put(malidp->crtc.port); + + if (ret) { + DRM_ERROR("Failed to bind all components\n"); + goto bind_fail; + } + + ret = malidp_irq_init(pdev); + if (ret < 0) + goto irq_init_fail; + + ret = drm_vblank_init(drm, drm->mode_config.num_crtc); + if (ret < 0) { + DRM_ERROR("failed to initialise vblank\n"); + goto vblank_fail; + } + + drm_mode_config_reset(drm); + + malidp->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc, + drm->mode_config.num_connector); + + if (IS_ERR(malidp->fbdev)) { + ret = PTR_ERR(malidp->fbdev); + malidp->fbdev = NULL; + goto fbdev_fail; + } + + drm_kms_helper_poll_init(drm); + return 0; + +fbdev_fail: + drm_vblank_cleanup(drm); +vblank_fail: + malidp_se_irq_fini(drm); + malidp_de_irq_fini(drm); +irq_init_fail: + component_unbind_all(dev, drm); +bind_fail: + drm_dev_unregister(drm); +register_fail: + malidp_de_planes_destroy(drm); + drm_mode_config_cleanup(drm); +init_fail: + drm->dev_private = NULL; + dev_set_drvdata(dev, NULL); +query_hw_fail: + clk_disable_unprepare(hwdev->mclk); + clk_disable_unprepare(hwdev->aclk); + clk_disable_unprepare(hwdev->pclk); + drm_dev_unref(drm); +alloc_fail: + of_reserved_mem_device_release(dev); + + return ret; +} + +static void malidp_unbind(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + + if (malidp->fbdev) { + drm_fbdev_cma_fini(malidp->fbdev); + malidp->fbdev = NULL; + } + drm_kms_helper_poll_fini(drm); + malidp_se_irq_fini(drm); + malidp_de_irq_fini(drm); + drm_vblank_cleanup(drm); + component_unbind_all(dev, drm); + drm_dev_unregister(drm); + malidp_de_planes_destroy(drm); + drm_mode_config_cleanup(drm); + drm->dev_private = NULL; + dev_set_drvdata(dev, NULL); + clk_disable_unprepare(hwdev->mclk); + clk_disable_unprepare(hwdev->aclk); + clk_disable_unprepare(hwdev->pclk); + drm_dev_unref(drm); + of_reserved_mem_device_release(dev); +} + +static const struct component_master_ops malidp_master_ops = { + .bind = malidp_bind, + .unbind = malidp_unbind, +}; + +static int malidp_compare_dev(struct device *dev, void *data) +{ + struct device_node *np = data; + + return dev->of_node == np; +} + +static int malidp_platform_probe(struct platform_device *pdev) +{ + struct device_node *port, *ep; + struct component_match *match = NULL; + + if (!pdev->dev.of_node) + return -ENODEV; + + /* there is only one output port inside each device, find it */ + ep = of_graph_get_next_endpoint(pdev->dev.of_node, NULL); + if (!ep) + return -ENODEV; + + if (!of_device_is_available(ep)) { + of_node_put(ep); + return -ENODEV; + } + + /* add the remote encoder port as component */ + port = of_graph_get_remote_port_parent(ep); + of_node_put(ep); + if (!port || !of_device_is_available(port)) { + of_node_put(port); + return -EAGAIN; + } + + component_match_add(&pdev->dev, &match, malidp_compare_dev, port); + return component_master_add_with_match(&pdev->dev, &malidp_master_ops, + match); +} + +static int malidp_platform_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &malidp_master_ops); + return 0; +} + +static struct platform_driver malidp_platform_driver = { + .probe = malidp_platform_probe, + .remove = malidp_platform_remove, + .driver = { + .name = "mali-dp", + .of_match_table = malidp_drm_of_match, + }, +}; + +module_platform_driver(malidp_platform_driver); + +MODULE_AUTHOR("Liviu Dudau <Liviu.Dudau@arm.com>"); +MODULE_DESCRIPTION("ARM Mali DP DRM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h new file mode 100644 index 000000000000..95558fde214b --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_drv.h @@ -0,0 +1,54 @@ +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * ARM Mali DP500/DP550/DP650 KMS/DRM driver structures + */ + +#ifndef __MALIDP_DRV_H__ +#define __MALIDP_DRV_H__ + +#include <linux/mutex.h> +#include <linux/wait.h> +#include "malidp_hw.h" + +struct malidp_drm { + struct malidp_hw_device *dev; + struct drm_fbdev_cma *fbdev; + struct list_head event_list; + struct drm_crtc crtc; + wait_queue_head_t wq; + atomic_t config_valid; +}; + +#define crtc_to_malidp_device(x) container_of(x, struct malidp_drm, crtc) + +struct malidp_plane { + struct drm_plane base; + struct malidp_hw_device *hwdev; + const struct malidp_layer *layer; +}; + +struct malidp_plane_state { + struct drm_plane_state base; + + /* size of the required rotation memory if plane is rotated */ + u32 rotmem_size; +}; + +#define to_malidp_plane(x) container_of(x, struct malidp_plane, base) +#define to_malidp_plane_state(x) container_of(x, struct malidp_plane_state, base) + +int malidp_de_planes_init(struct drm_device *drm); +void malidp_de_planes_destroy(struct drm_device *drm); +int malidp_crtc_init(struct drm_device *drm); + +/* often used combination of rotational bits */ +#define MALIDP_ROTATED_MASK (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270)) + +#endif /* __MALIDP_DRV_H__ */ diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c new file mode 100644 index 000000000000..a6132f1d58c1 --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_hw.c @@ -0,0 +1,691 @@ +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * ARM Mali DP500/DP550/DP650 hardware manipulation routines. This is where + * the difference between various versions of the hardware is being dealt with + * in an attempt to provide to the rest of the driver code a unified view + */ + +#include <linux/types.h> +#include <linux/io.h> +#include <drm/drmP.h> +#include <video/videomode.h> +#include <video/display_timing.h> + +#include "malidp_drv.h" +#include "malidp_hw.h" + +static const struct malidp_input_format malidp500_de_formats[] = { + /* fourcc, layers supporting the format, internal id */ + { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 0 }, + { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 1 }, + { DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 2 }, + { DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 3 }, + { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 4 }, + { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 5 }, + { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 6 }, + { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 7 }, + { DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 8 }, + { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 9 }, + { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 10 }, + { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 11 }, + { DRM_FORMAT_UYVY, DE_VIDEO1, 12 }, + { DRM_FORMAT_YUYV, DE_VIDEO1, 13 }, + { DRM_FORMAT_NV12, DE_VIDEO1, 14 }, + { DRM_FORMAT_YUV420, DE_VIDEO1, 15 }, +}; + +#define MALIDP_ID(__group, __format) \ + ((((__group) & 0x7) << 3) | ((__format) & 0x7)) + +#define MALIDP_COMMON_FORMATS \ + /* fourcc, layers supporting the format, internal id */ \ + { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 0) }, \ + { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 1) }, \ + { DRM_FORMAT_RGBA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 2) }, \ + { DRM_FORMAT_BGRA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 3) }, \ + { DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 0) }, \ + { DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 1) }, \ + { DRM_FORMAT_RGBA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 2) }, \ + { DRM_FORMAT_BGRA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 3) }, \ + { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 0) }, \ + { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 1) }, \ + { DRM_FORMAT_RGBX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 2) }, \ + { DRM_FORMAT_BGRX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 3) }, \ + { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 0) }, \ + { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 1) }, \ + { DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 0) }, \ + { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \ + { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \ + { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \ + { DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \ + { DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \ + { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) }, \ + { DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) } + +static const struct malidp_input_format malidp550_de_formats[] = { + MALIDP_COMMON_FORMATS, +}; + +static const struct malidp_layer malidp500_layers[] = { + { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE }, + { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE }, + { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE }, +}; + +static const struct malidp_layer malidp550_layers[] = { + { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE }, + { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE }, + { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE }, + { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE }, +}; + +#define MALIDP_DE_DEFAULT_PREFETCH_START 5 + +static int malidp500_query_hw(struct malidp_hw_device *hwdev) +{ + u32 conf = malidp_hw_read(hwdev, MALIDP500_CONFIG_ID); + /* bit 4 of the CONFIG_ID register holds the line size multiplier */ + u8 ln_size_mult = conf & 0x10 ? 2 : 1; + + hwdev->min_line_size = 2; + hwdev->max_line_size = SZ_2K * ln_size_mult; + hwdev->rotation_memory[0] = SZ_1K * 64 * ln_size_mult; + hwdev->rotation_memory[1] = 0; /* no second rotation memory bank */ + + return 0; +} + +static void malidp500_enter_config_mode(struct malidp_hw_device *hwdev) +{ + u32 status, count = 100; + + malidp_hw_setbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL); + while (count) { + status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); + if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ) + break; + /* + * entering config mode can take as long as the rendering + * of a full frame, hence the long sleep here + */ + usleep_range(1000, 10000); + count--; + } + WARN(count == 0, "timeout while entering config mode"); +} + +static void malidp500_leave_config_mode(struct malidp_hw_device *hwdev) +{ + u32 status, count = 100; + + malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL); + while (count) { + status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); + if ((status & MALIDP500_DC_CONFIG_REQ) == 0) + break; + usleep_range(100, 1000); + count--; + } + WARN(count == 0, "timeout while leaving config mode"); +} + +static bool malidp500_in_config_mode(struct malidp_hw_device *hwdev) +{ + u32 status; + + status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); + if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ) + return true; + + return false; +} + +static void malidp500_set_config_valid(struct malidp_hw_device *hwdev) +{ + malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID); +} + +static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *mode) +{ + u32 val = 0; + + malidp_hw_clearbits(hwdev, MALIDP500_DC_CLEAR_MASK, MALIDP500_DC_CONTROL); + if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH) + val |= MALIDP500_HSYNCPOL; + if (mode->flags & DISPLAY_FLAGS_VSYNC_HIGH) + val |= MALIDP500_VSYNCPOL; + val |= MALIDP_DE_DEFAULT_PREFETCH_START; + malidp_hw_setbits(hwdev, val, MALIDP500_DC_CONTROL); + + /* + * Mali-DP500 encodes the background color like this: + * - red @ MALIDP500_BGND_COLOR[12:0] + * - green @ MALIDP500_BGND_COLOR[27:16] + * - blue @ (MALIDP500_BGND_COLOR + 4)[12:0] + */ + val = ((MALIDP_BGND_COLOR_G & 0xfff) << 16) | + (MALIDP_BGND_COLOR_R & 0xfff); + malidp_hw_write(hwdev, val, MALIDP500_BGND_COLOR); + malidp_hw_write(hwdev, MALIDP_BGND_COLOR_B, MALIDP500_BGND_COLOR + 4); + + val = MALIDP_DE_H_FRONTPORCH(mode->hfront_porch) | + MALIDP_DE_H_BACKPORCH(mode->hback_porch); + malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_H_TIMINGS); + + val = MALIDP500_DE_V_FRONTPORCH(mode->vfront_porch) | + MALIDP_DE_V_BACKPORCH(mode->vback_porch); + malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_V_TIMINGS); + + val = MALIDP_DE_H_SYNCWIDTH(mode->hsync_len) | + MALIDP_DE_V_SYNCWIDTH(mode->vsync_len); + malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_SYNC_WIDTH); + + val = MALIDP_DE_H_ACTIVE(mode->hactive) | MALIDP_DE_V_ACTIVE(mode->vactive); + malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_HV_ACTIVE); + + if (mode->flags & DISPLAY_FLAGS_INTERLACED) + malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); + else + malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); +} + +static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt) +{ + unsigned int depth; + int bpp; + + /* RGB888 or BGR888 can't be rotated */ + if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888)) + return -EINVAL; + + /* + * Each layer needs enough rotation memory to fit 8 lines + * worth of pixel data. Required size is then: + * size = rotated_width * (bpp / 8) * 8; + */ + drm_fb_get_bpp_depth(fmt, &depth, &bpp); + + return w * bpp; +} + +static int malidp550_query_hw(struct malidp_hw_device *hwdev) +{ + u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID); + u8 ln_size = (conf >> 4) & 0x3, rsize; + + hwdev->min_line_size = 2; + + switch (ln_size) { + case 0: + hwdev->max_line_size = SZ_2K; + /* two banks of 64KB for rotation memory */ + rsize = 64; + break; + case 1: + hwdev->max_line_size = SZ_4K; + /* two banks of 128KB for rotation memory */ + rsize = 128; + break; + case 2: + hwdev->max_line_size = 1280; + /* two banks of 40KB for rotation memory */ + rsize = 40; + break; + case 3: + /* reserved value */ + hwdev->max_line_size = 0; + return -EINVAL; + } + + hwdev->rotation_memory[0] = hwdev->rotation_memory[1] = rsize * SZ_1K; + return 0; +} + +static void malidp550_enter_config_mode(struct malidp_hw_device *hwdev) +{ + u32 status, count = 100; + + malidp_hw_setbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL); + while (count) { + status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); + if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ) + break; + /* + * entering config mode can take as long as the rendering + * of a full frame, hence the long sleep here + */ + usleep_range(1000, 10000); + count--; + } + WARN(count == 0, "timeout while entering config mode"); +} + +static void malidp550_leave_config_mode(struct malidp_hw_device *hwdev) +{ + u32 status, count = 100; + + malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL); + while (count) { + status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); + if ((status & MALIDP550_DC_CONFIG_REQ) == 0) + break; + usleep_range(100, 1000); + count--; + } + WARN(count == 0, "timeout while leaving config mode"); +} + +static bool malidp550_in_config_mode(struct malidp_hw_device *hwdev) +{ + u32 status; + + status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); + if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ) + return true; + + return false; +} + +static void malidp550_set_config_valid(struct malidp_hw_device *hwdev) +{ + malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID); +} + +static void malidp550_modeset(struct malidp_hw_device *hwdev, struct videomode *mode) +{ + u32 val = MALIDP_DE_DEFAULT_PREFETCH_START; + + malidp_hw_write(hwdev, val, MALIDP550_DE_CONTROL); + /* + * Mali-DP550 and Mali-DP650 encode the background color like this: + * - red @ MALIDP550_DE_BGND_COLOR[23:16] + * - green @ MALIDP550_DE_BGND_COLOR[15:8] + * - blue @ MALIDP550_DE_BGND_COLOR[7:0] + * + * We need to truncate the least significant 4 bits from the default + * MALIDP_BGND_COLOR_x values + */ + val = (((MALIDP_BGND_COLOR_R >> 4) & 0xff) << 16) | + (((MALIDP_BGND_COLOR_G >> 4) & 0xff) << 8) | + ((MALIDP_BGND_COLOR_B >> 4) & 0xff); + malidp_hw_write(hwdev, val, MALIDP550_DE_BGND_COLOR); + + val = MALIDP_DE_H_FRONTPORCH(mode->hfront_porch) | + MALIDP_DE_H_BACKPORCH(mode->hback_porch); + malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_H_TIMINGS); + + val = MALIDP550_DE_V_FRONTPORCH(mode->vfront_porch) | + MALIDP_DE_V_BACKPORCH(mode->vback_porch); + malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_V_TIMINGS); + + val = MALIDP_DE_H_SYNCWIDTH(mode->hsync_len) | + MALIDP_DE_V_SYNCWIDTH(mode->vsync_len); + if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH) + val |= MALIDP550_HSYNCPOL; + if (mode->flags & DISPLAY_FLAGS_VSYNC_HIGH) + val |= MALIDP550_VSYNCPOL; + malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_SYNC_WIDTH); + + val = MALIDP_DE_H_ACTIVE(mode->hactive) | MALIDP_DE_V_ACTIVE(mode->vactive); + malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_HV_ACTIVE); + + if (mode->flags & DISPLAY_FLAGS_INTERLACED) + malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); + else + malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); +} + +static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt) +{ + u32 bytes_per_col; + + /* raw RGB888 or BGR888 can't be rotated */ + if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888)) + return -EINVAL; + + switch (fmt) { + /* 8 lines at 4 bytes per pixel */ + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_RGB888: + case DRM_FORMAT_BGR888: + /* 16 lines at 2 bytes per pixel */ + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_YUYV: + bytes_per_col = 32; + break; + /* 16 lines at 1.5 bytes per pixel */ + case DRM_FORMAT_NV12: + case DRM_FORMAT_YUV420: + bytes_per_col = 24; + break; + default: + return -EINVAL; + } + + return w * bytes_per_col; +} + +static int malidp650_query_hw(struct malidp_hw_device *hwdev) +{ + u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID); + u8 ln_size = (conf >> 4) & 0x3, rsize; + + hwdev->min_line_size = 4; + + switch (ln_size) { + case 0: + case 2: + /* reserved values */ + hwdev->max_line_size = 0; + return -EINVAL; + case 1: + hwdev->max_line_size = SZ_4K; + /* two banks of 128KB for rotation memory */ + rsize = 128; + break; + case 3: + hwdev->max_line_size = 2560; + /* two banks of 80KB for rotation memory */ + rsize = 80; + } + + hwdev->rotation_memory[0] = hwdev->rotation_memory[1] = rsize * SZ_1K; + return 0; +} + +const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = { + [MALIDP_500] = { + .map = { + .se_base = MALIDP500_SE_BASE, + .dc_base = MALIDP500_DC_BASE, + .out_depth_base = MALIDP500_OUTPUT_DEPTH, + .features = 0, /* no CLEARIRQ register */ + .n_layers = ARRAY_SIZE(malidp500_layers), + .layers = malidp500_layers, + .de_irq_map = { + .irq_mask = MALIDP_DE_IRQ_UNDERRUN | + MALIDP500_DE_IRQ_AXI_ERR | + MALIDP500_DE_IRQ_VSYNC | + MALIDP500_DE_IRQ_GLOBAL, + .vsync_irq = MALIDP500_DE_IRQ_VSYNC, + }, + .se_irq_map = { + .irq_mask = MALIDP500_SE_IRQ_CONF_MODE, + .vsync_irq = 0, + }, + .dc_irq_map = { + .irq_mask = MALIDP500_DE_IRQ_CONF_VALID, + .vsync_irq = MALIDP500_DE_IRQ_CONF_VALID, + }, + .input_formats = malidp500_de_formats, + .n_input_formats = ARRAY_SIZE(malidp500_de_formats), + }, + .query_hw = malidp500_query_hw, + .enter_config_mode = malidp500_enter_config_mode, + .leave_config_mode = malidp500_leave_config_mode, + .in_config_mode = malidp500_in_config_mode, + .set_config_valid = malidp500_set_config_valid, + .modeset = malidp500_modeset, + .rotmem_required = malidp500_rotmem_required, + }, + [MALIDP_550] = { + .map = { + .se_base = MALIDP550_SE_BASE, + .dc_base = MALIDP550_DC_BASE, + .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH, + .features = MALIDP_REGMAP_HAS_CLEARIRQ, + .n_layers = ARRAY_SIZE(malidp550_layers), + .layers = malidp550_layers, + .de_irq_map = { + .irq_mask = MALIDP_DE_IRQ_UNDERRUN | + MALIDP550_DE_IRQ_VSYNC, + .vsync_irq = MALIDP550_DE_IRQ_VSYNC, + }, + .se_irq_map = { + .irq_mask = MALIDP550_SE_IRQ_EOW | + MALIDP550_SE_IRQ_AXI_ERR, + }, + .dc_irq_map = { + .irq_mask = MALIDP550_DC_IRQ_CONF_VALID, + .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID, + }, + .input_formats = malidp550_de_formats, + .n_input_formats = ARRAY_SIZE(malidp550_de_formats), + }, + .query_hw = malidp550_query_hw, + .enter_config_mode = malidp550_enter_config_mode, + .leave_config_mode = malidp550_leave_config_mode, + .in_config_mode = malidp550_in_config_mode, + .set_config_valid = malidp550_set_config_valid, + .modeset = malidp550_modeset, + .rotmem_required = malidp550_rotmem_required, + }, + [MALIDP_650] = { + .map = { + .se_base = MALIDP550_SE_BASE, + .dc_base = MALIDP550_DC_BASE, + .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH, + .features = MALIDP_REGMAP_HAS_CLEARIRQ, + .n_layers = ARRAY_SIZE(malidp550_layers), + .layers = malidp550_layers, + .de_irq_map = { + .irq_mask = MALIDP_DE_IRQ_UNDERRUN | + MALIDP650_DE_IRQ_DRIFT | + MALIDP550_DE_IRQ_VSYNC, + .vsync_irq = MALIDP550_DE_IRQ_VSYNC, + }, + .se_irq_map = { + .irq_mask = MALIDP550_SE_IRQ_EOW | + MALIDP550_SE_IRQ_AXI_ERR, + }, + .dc_irq_map = { + .irq_mask = MALIDP550_DC_IRQ_CONF_VALID, + .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID, + }, + .input_formats = malidp550_de_formats, + .n_input_formats = ARRAY_SIZE(malidp550_de_formats), + }, + .query_hw = malidp650_query_hw, + .enter_config_mode = malidp550_enter_config_mode, + .leave_config_mode = malidp550_leave_config_mode, + .in_config_mode = malidp550_in_config_mode, + .set_config_valid = malidp550_set_config_valid, + .modeset = malidp550_modeset, + .rotmem_required = malidp550_rotmem_required, + }, +}; + +u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map, + u8 layer_id, u32 format) +{ + unsigned int i; + + for (i = 0; i < map->n_input_formats; i++) { + if (((map->input_formats[i].layer & layer_id) == layer_id) && + (map->input_formats[i].format == format)) + return map->input_formats[i].id; + } + + return MALIDP_INVALID_FORMAT_ID; +} + +static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 irq) +{ + u32 base = malidp_get_block_base(hwdev, block); + + if (hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) + malidp_hw_write(hwdev, irq, base + MALIDP_REG_CLEARIRQ); + else + malidp_hw_write(hwdev, irq, base + MALIDP_REG_STATUS); +} + +static irqreturn_t malidp_de_irq(int irq, void *arg) +{ + struct drm_device *drm = arg; + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev; + const struct malidp_irq_map *de; + u32 status, mask, dc_status; + irqreturn_t ret = IRQ_NONE; + + if (!drm->dev_private) + return IRQ_HANDLED; + + hwdev = malidp->dev; + de = &hwdev->map.de_irq_map; + + /* first handle the config valid IRQ */ + dc_status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); + if (dc_status & hwdev->map.dc_irq_map.vsync_irq) { + /* we have a page flip event */ + atomic_set(&malidp->config_valid, 1); + malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status); + ret = IRQ_WAKE_THREAD; + } + + status = malidp_hw_read(hwdev, MALIDP_REG_STATUS); + if (!(status & de->irq_mask)) + return ret; + + mask = malidp_hw_read(hwdev, MALIDP_REG_MASKIRQ); + status &= mask; + if (status & de->vsync_irq) + drm_crtc_handle_vblank(&malidp->crtc); + + malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, status); + + return (ret == IRQ_NONE) ? IRQ_HANDLED : ret; +} + +static irqreturn_t malidp_de_irq_thread_handler(int irq, void *arg) +{ + struct drm_device *drm = arg; + struct malidp_drm *malidp = drm->dev_private; + + wake_up(&malidp->wq); + + return IRQ_HANDLED; +} + +int malidp_de_irq_init(struct drm_device *drm, int irq) +{ + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + int ret; + + /* ensure interrupts are disabled */ + malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff); + malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff); + malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff); + malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff); + + ret = devm_request_threaded_irq(drm->dev, irq, malidp_de_irq, + malidp_de_irq_thread_handler, + IRQF_SHARED, "malidp-de", drm); + if (ret < 0) { + DRM_ERROR("failed to install DE IRQ handler\n"); + return ret; + } + + /* first enable the DC block IRQs */ + malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK, + hwdev->map.dc_irq_map.irq_mask); + + /* now enable the DE block IRQs */ + malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK, + hwdev->map.de_irq_map.irq_mask); + + return 0; +} + +void malidp_de_irq_fini(struct drm_device *drm) +{ + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + + malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, + hwdev->map.de_irq_map.irq_mask); + malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, + hwdev->map.dc_irq_map.irq_mask); +} + +static irqreturn_t malidp_se_irq(int irq, void *arg) +{ + struct drm_device *drm = arg; + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + u32 status, mask; + + status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS); + if (!(status & hwdev->map.se_irq_map.irq_mask)) + return IRQ_NONE; + + mask = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_MASKIRQ); + status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS); + status &= mask; + /* ToDo: status decoding and firing up of VSYNC and page flip events */ + + malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, status); + + return IRQ_HANDLED; +} + +static irqreturn_t malidp_se_irq_thread_handler(int irq, void *arg) +{ + return IRQ_HANDLED; +} + +int malidp_se_irq_init(struct drm_device *drm, int irq) +{ + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + int ret; + + /* ensure interrupts are disabled */ + malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff); + malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff); + + ret = devm_request_threaded_irq(drm->dev, irq, malidp_se_irq, + malidp_se_irq_thread_handler, + IRQF_SHARED, "malidp-se", drm); + if (ret < 0) { + DRM_ERROR("failed to install SE IRQ handler\n"); + return ret; + } + + malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK, + hwdev->map.se_irq_map.irq_mask); + + return 0; +} + +void malidp_se_irq_fini(struct drm_device *drm) +{ + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + + malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, + hwdev->map.se_irq_map.irq_mask); +} diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h new file mode 100644 index 000000000000..141743e9f3a6 --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_hw.h @@ -0,0 +1,241 @@ +/* + * + * (C) COPYRIGHT 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * ARM Mali DP hardware manipulation routines. + */ + +#ifndef __MALIDP_HW_H__ +#define __MALIDP_HW_H__ + +#include <linux/bitops.h> +#include "malidp_regs.h" + +struct videomode; +struct clk; + +/* Mali DP IP blocks */ +enum { + MALIDP_DE_BLOCK = 0, + MALIDP_SE_BLOCK, + MALIDP_DC_BLOCK +}; + +/* Mali DP layer IDs */ +enum { + DE_VIDEO1 = BIT(0), + DE_GRAPHICS1 = BIT(1), + DE_GRAPHICS2 = BIT(2), /* used only in DP500 */ + DE_VIDEO2 = BIT(3), + DE_SMART = BIT(4), +}; + +struct malidp_input_format { + u32 format; /* DRM fourcc */ + u8 layer; /* bitmask of layers supporting it */ + u8 id; /* used internally */ +}; + +#define MALIDP_INVALID_FORMAT_ID 0xff + +/* + * hide the differences between register maps + * by using a common structure to hold the + * base register offsets + */ + +struct malidp_irq_map { + u32 irq_mask; /* mask of IRQs that can be enabled in the block */ + u32 vsync_irq; /* IRQ bit used for signaling during VSYNC */ +}; + +struct malidp_layer { + u16 id; /* layer ID */ + u16 base; /* address offset for the register bank */ + u16 ptr; /* address offset for the pointer register */ +}; + +/* regmap features */ +#define MALIDP_REGMAP_HAS_CLEARIRQ (1 << 0) + +struct malidp_hw_regmap { + /* address offset of the DE register bank */ + /* is always 0x0000 */ + /* address offset of the SE registers bank */ + const u16 se_base; + /* address offset of the DC registers bank */ + const u16 dc_base; + + /* address offset for the output depth register */ + const u16 out_depth_base; + + /* bitmap with register map features */ + const u8 features; + + /* list of supported layers */ + const u8 n_layers; + const struct malidp_layer *layers; + + const struct malidp_irq_map de_irq_map; + const struct malidp_irq_map se_irq_map; + const struct malidp_irq_map dc_irq_map; + + /* list of supported input formats for each layer */ + const struct malidp_input_format *input_formats; + const u8 n_input_formats; +}; + +struct malidp_hw_device { + const struct malidp_hw_regmap map; + void __iomem *regs; + + /* APB clock */ + struct clk *pclk; + /* AXI clock */ + struct clk *aclk; + /* main clock for display core */ + struct clk *mclk; + /* pixel clock for display core */ + struct clk *pxlclk; + + /* + * Validate the driver instance against the hardware bits + */ + int (*query_hw)(struct malidp_hw_device *hwdev); + + /* + * Set the hardware into config mode, ready to accept mode changes + */ + void (*enter_config_mode)(struct malidp_hw_device *hwdev); + + /* + * Tell hardware to exit configuration mode + */ + void (*leave_config_mode)(struct malidp_hw_device *hwdev); + + /* + * Query if hardware is in configuration mode + */ + bool (*in_config_mode)(struct malidp_hw_device *hwdev); + + /* + * Set configuration valid flag for hardware parameters that can + * be changed outside the configuration mode. Hardware will use + * the new settings when config valid is set after the end of the + * current buffer scanout + */ + void (*set_config_valid)(struct malidp_hw_device *hwdev); + + /* + * Set a new mode in hardware. Requires the hardware to be in + * configuration mode before this function is called. + */ + void (*modeset)(struct malidp_hw_device *hwdev, struct videomode *m); + + /* + * Calculate the required rotation memory given the active area + * and the buffer format. + */ + int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt); + + u8 features; + + u8 min_line_size; + u16 max_line_size; + + /* size of memory used for rotating layers, up to two banks available */ + u32 rotation_memory[2]; +}; + +/* Supported variants of the hardware */ +enum { + MALIDP_500 = 0, + MALIDP_550, + MALIDP_650, + /* keep the next entry last */ + MALIDP_MAX_DEVICES +}; + +extern const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES]; + +static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg) +{ + return readl(hwdev->regs + reg); +} + +static inline void malidp_hw_write(struct malidp_hw_device *hwdev, + u32 value, u32 reg) +{ + writel(value, hwdev->regs + reg); +} + +static inline void malidp_hw_setbits(struct malidp_hw_device *hwdev, + u32 mask, u32 reg) +{ + u32 data = malidp_hw_read(hwdev, reg); + + data |= mask; + malidp_hw_write(hwdev, data, reg); +} + +static inline void malidp_hw_clearbits(struct malidp_hw_device *hwdev, + u32 mask, u32 reg) +{ + u32 data = malidp_hw_read(hwdev, reg); + + data &= ~mask; + malidp_hw_write(hwdev, data, reg); +} + +static inline u32 malidp_get_block_base(struct malidp_hw_device *hwdev, + u8 block) +{ + switch (block) { + case MALIDP_SE_BLOCK: + return hwdev->map.se_base; + case MALIDP_DC_BLOCK: + return hwdev->map.dc_base; + } + + return 0; +} + +static inline void malidp_hw_disable_irq(struct malidp_hw_device *hwdev, + u8 block, u32 irq) +{ + u32 base = malidp_get_block_base(hwdev, block); + + malidp_hw_clearbits(hwdev, irq, base + MALIDP_REG_MASKIRQ); +} + +static inline void malidp_hw_enable_irq(struct malidp_hw_device *hwdev, + u8 block, u32 irq) +{ + u32 base = malidp_get_block_base(hwdev, block); + + malidp_hw_setbits(hwdev, irq, base + MALIDP_REG_MASKIRQ); +} + +int malidp_de_irq_init(struct drm_device *drm, int irq); +void malidp_de_irq_fini(struct drm_device *drm); +int malidp_se_irq_init(struct drm_device *drm, int irq); +void malidp_se_irq_fini(struct drm_device *drm); + +u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map, + u8 layer_id, u32 format); + +/* + * background color components are defined as 12bits values, + * they will be shifted right when stored on hardware that + * supports only 8bits per channel + */ +#define MALIDP_BGND_COLOR_R 0x000 +#define MALIDP_BGND_COLOR_G 0x000 +#define MALIDP_BGND_COLOR_B 0x000 + +#endif /* __MALIDP_HW_H__ */ diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c new file mode 100644 index 000000000000..725098d6179a --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -0,0 +1,298 @@ +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * ARM Mali DP plane manipulation routines. + */ + +#include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_plane_helper.h> + +#include "malidp_hw.h" +#include "malidp_drv.h" + +/* Layer specific register offsets */ +#define MALIDP_LAYER_FORMAT 0x000 +#define MALIDP_LAYER_CONTROL 0x004 +#define LAYER_ENABLE (1 << 0) +#define LAYER_ROT_OFFSET 8 +#define LAYER_H_FLIP (1 << 10) +#define LAYER_V_FLIP (1 << 11) +#define LAYER_ROT_MASK (0xf << 8) +#define MALIDP_LAYER_SIZE 0x00c +#define LAYER_H_VAL(x) (((x) & 0x1fff) << 0) +#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16) +#define MALIDP_LAYER_COMP_SIZE 0x010 +#define MALIDP_LAYER_OFFSET 0x014 +#define MALIDP_LAYER_STRIDE 0x018 + +static void malidp_de_plane_destroy(struct drm_plane *plane) +{ + struct malidp_plane *mp = to_malidp_plane(plane); + + if (mp->base.fb) + drm_framebuffer_unreference(mp->base.fb); + + drm_plane_helper_disable(plane); + drm_plane_cleanup(plane); + devm_kfree(plane->dev->dev, mp); +} + +struct drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane) +{ + struct malidp_plane_state *state, *m_state; + + if (!plane->state) + return NULL; + + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (state) { + m_state = to_malidp_plane_state(plane->state); + __drm_atomic_helper_plane_duplicate_state(plane, &state->base); + state->rotmem_size = m_state->rotmem_size; + } + + return &state->base; +} + +void malidp_destroy_plane_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct malidp_plane_state *m_state = to_malidp_plane_state(state); + + __drm_atomic_helper_plane_destroy_state(state); + kfree(m_state); +} + +static const struct drm_plane_funcs malidp_de_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = malidp_de_plane_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = malidp_duplicate_plane_state, + .atomic_destroy_state = malidp_destroy_plane_state, +}; + +static int malidp_de_plane_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct malidp_plane *mp = to_malidp_plane(plane); + struct malidp_plane_state *ms = to_malidp_plane_state(state); + u8 format_id; + u32 src_w, src_h; + + if (!state->crtc || !state->fb) + return 0; + + format_id = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id, + state->fb->pixel_format); + if (format_id == MALIDP_INVALID_FORMAT_ID) + return -EINVAL; + + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + + if ((state->crtc_w > mp->hwdev->max_line_size) || + (state->crtc_h > mp->hwdev->max_line_size) || + (state->crtc_w < mp->hwdev->min_line_size) || + (state->crtc_h < mp->hwdev->min_line_size) || + (state->crtc_w != src_w) || (state->crtc_h != src_h)) + return -EINVAL; + + /* packed RGB888 / BGR888 can't be rotated or flipped */ + if (state->rotation != BIT(DRM_ROTATE_0) && + (state->fb->pixel_format == DRM_FORMAT_RGB888 || + state->fb->pixel_format == DRM_FORMAT_BGR888)) + return -EINVAL; + + ms->rotmem_size = 0; + if (state->rotation & MALIDP_ROTATED_MASK) { + int val; + + val = mp->hwdev->rotmem_required(mp->hwdev, state->crtc_h, + state->crtc_w, + state->fb->pixel_format); + if (val < 0) + return val; + + ms->rotmem_size = val; + } + + return 0; +} + +static void malidp_de_plane_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_gem_cma_object *obj; + struct malidp_plane *mp; + const struct malidp_hw_regmap *map; + u8 format_id; + u16 ptr; + u32 format, src_w, src_h, dest_w, dest_h, val = 0; + int num_planes, i; + + mp = to_malidp_plane(plane); + + map = &mp->hwdev->map; + format = plane->state->fb->pixel_format; + format_id = malidp_hw_get_format_id(map, mp->layer->id, format); + num_planes = drm_format_num_planes(format); + + /* convert src values from Q16 fixed point to integer */ + src_w = plane->state->src_w >> 16; + src_h = plane->state->src_h >> 16; + if (plane->state->rotation & MALIDP_ROTATED_MASK) { + dest_w = plane->state->crtc_h; + dest_h = plane->state->crtc_w; + } else { + dest_w = plane->state->crtc_w; + dest_h = plane->state->crtc_h; + } + + malidp_hw_write(mp->hwdev, format_id, mp->layer->base); + + for (i = 0; i < num_planes; i++) { + /* calculate the offset for the layer's plane registers */ + ptr = mp->layer->ptr + (i << 4); + + obj = drm_fb_cma_get_gem_obj(plane->state->fb, i); + malidp_hw_write(mp->hwdev, lower_32_bits(obj->paddr), ptr); + malidp_hw_write(mp->hwdev, upper_32_bits(obj->paddr), ptr + 4); + malidp_hw_write(mp->hwdev, plane->state->fb->pitches[i], + mp->layer->base + MALIDP_LAYER_STRIDE); + } + + malidp_hw_write(mp->hwdev, LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h), + mp->layer->base + MALIDP_LAYER_SIZE); + + malidp_hw_write(mp->hwdev, LAYER_H_VAL(dest_w) | LAYER_V_VAL(dest_h), + mp->layer->base + MALIDP_LAYER_COMP_SIZE); + + malidp_hw_write(mp->hwdev, LAYER_H_VAL(plane->state->crtc_x) | + LAYER_V_VAL(plane->state->crtc_y), + mp->layer->base + MALIDP_LAYER_OFFSET); + + /* first clear the rotation bits in the register */ + malidp_hw_clearbits(mp->hwdev, LAYER_ROT_MASK, + mp->layer->base + MALIDP_LAYER_CONTROL); + + /* setup the rotation and axis flip bits */ + if (plane->state->rotation & DRM_ROTATE_MASK) + val = ilog2(plane->state->rotation & DRM_ROTATE_MASK) << LAYER_ROT_OFFSET; + if (plane->state->rotation & BIT(DRM_REFLECT_X)) + val |= LAYER_V_FLIP; + if (plane->state->rotation & BIT(DRM_REFLECT_Y)) + val |= LAYER_H_FLIP; + + /* set the 'enable layer' bit */ + val |= LAYER_ENABLE; + + malidp_hw_setbits(mp->hwdev, val, + mp->layer->base + MALIDP_LAYER_CONTROL); +} + +static void malidp_de_plane_disable(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct malidp_plane *mp = to_malidp_plane(plane); + + malidp_hw_clearbits(mp->hwdev, LAYER_ENABLE, + mp->layer->base + MALIDP_LAYER_CONTROL); +} + +static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = { + .atomic_check = malidp_de_plane_check, + .atomic_update = malidp_de_plane_update, + .atomic_disable = malidp_de_plane_disable, +}; + +int malidp_de_planes_init(struct drm_device *drm) +{ + struct malidp_drm *malidp = drm->dev_private; + const struct malidp_hw_regmap *map = &malidp->dev->map; + struct malidp_plane *plane = NULL; + enum drm_plane_type plane_type; + unsigned long crtcs = 1 << drm->mode_config.num_crtc; + u32 *formats; + int ret, i, j, n; + + formats = kcalloc(map->n_input_formats, sizeof(*formats), GFP_KERNEL); + if (!formats) { + ret = -ENOMEM; + goto cleanup; + } + + for (i = 0; i < map->n_layers; i++) { + u8 id = map->layers[i].id; + + plane = kzalloc(sizeof(*plane), GFP_KERNEL); + if (!plane) { + ret = -ENOMEM; + goto cleanup; + } + + /* build the list of DRM supported formats based on the map */ + for (n = 0, j = 0; j < map->n_input_formats; j++) { + if ((map->input_formats[j].layer & id) == id) + formats[n++] = map->input_formats[j].format; + } + + plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY : + DRM_PLANE_TYPE_OVERLAY; + ret = drm_universal_plane_init(drm, &plane->base, crtcs, + &malidp_de_plane_funcs, formats, + n, plane_type, NULL); + if (ret < 0) + goto cleanup; + + if (!drm->mode_config.rotation_property) { + unsigned long flags = BIT(DRM_ROTATE_0) | + BIT(DRM_ROTATE_90) | + BIT(DRM_ROTATE_180) | + BIT(DRM_ROTATE_270) | + BIT(DRM_REFLECT_X) | + BIT(DRM_REFLECT_Y); + drm->mode_config.rotation_property = + drm_mode_create_rotation_property(drm, flags); + } + /* SMART layer can't be rotated */ + if (drm->mode_config.rotation_property && (id != DE_SMART)) + drm_object_attach_property(&plane->base.base, + drm->mode_config.rotation_property, + BIT(DRM_ROTATE_0)); + + drm_plane_helper_add(&plane->base, + &malidp_de_plane_helper_funcs); + plane->hwdev = malidp->dev; + plane->layer = &map->layers[i]; + } + + kfree(formats); + + return 0; + +cleanup: + malidp_de_planes_destroy(drm); + kfree(formats); + + return ret; +} + +void malidp_de_planes_destroy(struct drm_device *drm) +{ + struct drm_plane *p, *pt; + + list_for_each_entry_safe(p, pt, &drm->mode_config.plane_list, head) { + drm_plane_cleanup(p); + kfree(p); + } +} diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h new file mode 100644 index 000000000000..73fecb38f955 --- /dev/null +++ b/drivers/gpu/drm/arm/malidp_regs.h @@ -0,0 +1,172 @@ +/* + * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. + * Author: Liviu Dudau <Liviu.Dudau@arm.com> + * + * This program is free software and is provided to you under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation, and any use by you of this program is subject to the terms + * of such GNU licence. + * + * ARM Mali DP500/DP550/DP650 registers definition. + */ + +#ifndef __MALIDP_REGS_H__ +#define __MALIDP_REGS_H__ + +/* + * abbreviations used: + * - DC - display core (general settings) + * - DE - display engine + * - SE - scaling engine + */ + +/* interrupt bit masks */ +#define MALIDP_DE_IRQ_UNDERRUN (1 << 0) + +#define MALIDP500_DE_IRQ_AXI_ERR (1 << 4) +#define MALIDP500_DE_IRQ_VSYNC (1 << 5) +#define MALIDP500_DE_IRQ_PROG_LINE (1 << 6) +#define MALIDP500_DE_IRQ_SATURATION (1 << 7) +#define MALIDP500_DE_IRQ_CONF_VALID (1 << 8) +#define MALIDP500_DE_IRQ_CONF_MODE (1 << 11) +#define MALIDP500_DE_IRQ_CONF_ACTIVE (1 << 17) +#define MALIDP500_DE_IRQ_PM_ACTIVE (1 << 18) +#define MALIDP500_DE_IRQ_TESTMODE_ACTIVE (1 << 19) +#define MALIDP500_DE_IRQ_FORCE_BLNK_ACTIVE (1 << 24) +#define MALIDP500_DE_IRQ_AXI_BUSY (1 << 28) +#define MALIDP500_DE_IRQ_GLOBAL (1 << 31) +#define MALIDP500_SE_IRQ_CONF_MODE (1 << 0) +#define MALIDP500_SE_IRQ_CONF_VALID (1 << 4) +#define MALIDP500_SE_IRQ_INIT_BUSY (1 << 5) +#define MALIDP500_SE_IRQ_AXI_ERROR (1 << 8) +#define MALIDP500_SE_IRQ_OVERRUN (1 << 9) +#define MALIDP500_SE_IRQ_PROG_LINE1 (1 << 12) +#define MALIDP500_SE_IRQ_PROG_LINE2 (1 << 13) +#define MALIDP500_SE_IRQ_CONF_ACTIVE (1 << 17) +#define MALIDP500_SE_IRQ_PM_ACTIVE (1 << 18) +#define MALIDP500_SE_IRQ_AXI_BUSY (1 << 28) +#define MALIDP500_SE_IRQ_GLOBAL (1 << 31) + +#define MALIDP550_DE_IRQ_SATURATION (1 << 8) +#define MALIDP550_DE_IRQ_VSYNC (1 << 12) +#define MALIDP550_DE_IRQ_PROG_LINE (1 << 13) +#define MALIDP550_DE_IRQ_AXI_ERR (1 << 16) +#define MALIDP550_SE_IRQ_EOW (1 << 0) +#define MALIDP550_SE_IRQ_AXI_ERR (1 << 16) +#define MALIDP550_DC_IRQ_CONF_VALID (1 << 0) +#define MALIDP550_DC_IRQ_CONF_MODE (1 << 4) +#define MALIDP550_DC_IRQ_CONF_ACTIVE (1 << 16) +#define MALIDP550_DC_IRQ_DE (1 << 20) +#define MALIDP550_DC_IRQ_SE (1 << 24) + +#define MALIDP650_DE_IRQ_DRIFT (1 << 4) + +/* bit masks that are common between products */ +#define MALIDP_CFG_VALID (1 << 0) +#define MALIDP_DISP_FUNC_ILACED (1 << 8) + +/* register offsets for IRQ management */ +#define MALIDP_REG_STATUS 0x00000 +#define MALIDP_REG_SETIRQ 0x00004 +#define MALIDP_REG_MASKIRQ 0x00008 +#define MALIDP_REG_CLEARIRQ 0x0000c + +/* register offsets */ +#define MALIDP_DE_CORE_ID 0x00018 +#define MALIDP_DE_DISPLAY_FUNC 0x00020 + +/* these offsets are relative to MALIDP5x0_TIMINGS_BASE */ +#define MALIDP_DE_H_TIMINGS 0x0 +#define MALIDP_DE_V_TIMINGS 0x4 +#define MALIDP_DE_SYNC_WIDTH 0x8 +#define MALIDP_DE_HV_ACTIVE 0xc + +/* macros to set values into registers */ +#define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0) +#define MALIDP_DE_H_BACKPORCH(x) (((x) & 0x3ff) << 16) +#define MALIDP500_DE_V_FRONTPORCH(x) (((x) & 0xff) << 0) +#define MALIDP550_DE_V_FRONTPORCH(x) (((x) & 0xfff) << 0) +#define MALIDP_DE_V_BACKPORCH(x) (((x) & 0xff) << 16) +#define MALIDP_DE_H_SYNCWIDTH(x) (((x) & 0x3ff) << 0) +#define MALIDP_DE_V_SYNCWIDTH(x) (((x) & 0xff) << 16) +#define MALIDP_DE_H_ACTIVE(x) (((x) & 0x1fff) << 0) +#define MALIDP_DE_V_ACTIVE(x) (((x) & 0x1fff) << 16) + +/* register offsets and bits specific to DP500 */ +#define MALIDP500_DC_BASE 0x00000 +#define MALIDP500_DC_CONTROL 0x0000c +#define MALIDP500_DC_CONFIG_REQ (1 << 17) +#define MALIDP500_HSYNCPOL (1 << 20) +#define MALIDP500_VSYNCPOL (1 << 21) +#define MALIDP500_DC_CLEAR_MASK 0x300fff +#define MALIDP500_DE_LINE_COUNTER 0x00010 +#define MALIDP500_DE_AXI_CONTROL 0x00014 +#define MALIDP500_DE_SECURE_CTRL 0x0001c +#define MALIDP500_DE_CHROMA_KEY 0x00024 +#define MALIDP500_TIMINGS_BASE 0x00028 + +#define MALIDP500_CONFIG_3D 0x00038 +#define MALIDP500_BGND_COLOR 0x0003c +#define MALIDP500_OUTPUT_DEPTH 0x00044 +#define MALIDP500_YUV_RGB_COEF 0x00048 +#define MALIDP500_COLOR_ADJ_COEF 0x00078 +#define MALIDP500_COEF_TABLE_ADDR 0x000a8 +#define MALIDP500_COEF_TABLE_DATA 0x000ac +#define MALIDP500_DE_LV_BASE 0x00100 +#define MALIDP500_DE_LV_PTR_BASE 0x00124 +#define MALIDP500_DE_LG1_BASE 0x00200 +#define MALIDP500_DE_LG1_PTR_BASE 0x0021c +#define MALIDP500_DE_LG2_BASE 0x00300 +#define MALIDP500_DE_LG2_PTR_BASE 0x0031c +#define MALIDP500_SE_BASE 0x00c00 +#define MALIDP500_SE_PTR_BASE 0x00e0c +#define MALIDP500_DC_IRQ_BASE 0x00f00 +#define MALIDP500_CONFIG_VALID 0x00f00 +#define MALIDP500_CONFIG_ID 0x00fd4 + +/* register offsets and bits specific to DP550/DP650 */ +#define MALIDP550_DE_CONTROL 0x00010 +#define MALIDP550_DE_LINE_COUNTER 0x00014 +#define MALIDP550_DE_AXI_CONTROL 0x00018 +#define MALIDP550_DE_QOS 0x0001c +#define MALIDP550_TIMINGS_BASE 0x00030 +#define MALIDP550_HSYNCPOL (1 << 12) +#define MALIDP550_VSYNCPOL (1 << 28) + +#define MALIDP550_DE_DISP_SIDEBAND 0x00040 +#define MALIDP550_DE_BGND_COLOR 0x00044 +#define MALIDP550_DE_OUTPUT_DEPTH 0x0004c +#define MALIDP550_DE_COLOR_COEF 0x00050 +#define MALIDP550_DE_COEF_TABLE_ADDR 0x00080 +#define MALIDP550_DE_COEF_TABLE_DATA 0x00084 +#define MALIDP550_DE_LV1_BASE 0x00100 +#define MALIDP550_DE_LV1_PTR_BASE 0x00124 +#define MALIDP550_DE_LV2_BASE 0x00200 +#define MALIDP550_DE_LV2_PTR_BASE 0x00224 +#define MALIDP550_DE_LG_BASE 0x00300 +#define MALIDP550_DE_LG_PTR_BASE 0x0031c +#define MALIDP550_DE_LS_BASE 0x00400 +#define MALIDP550_DE_LS_PTR_BASE 0x0042c +#define MALIDP550_DE_PERF_BASE 0x00500 +#define MALIDP550_SE_BASE 0x08000 +#define MALIDP550_DC_BASE 0x0c000 +#define MALIDP550_DC_CONTROL 0x0c010 +#define MALIDP550_DC_CONFIG_REQ (1 << 16) +#define MALIDP550_CONFIG_VALID 0x0c014 +#define MALIDP550_CONFIG_ID 0x0ffd4 + +/* + * Starting with DP550 the register map blocks has been standardised to the + * following layout: + * + * Offset Block registers + * 0x00000 Display Engine + * 0x08000 Scaling Engine + * 0x0c000 Display Core + * 0x10000 Secure control + * + * The old DP500 IP mixes some DC with the DE registers, hence the need + * for a mapping structure. + */ + +#endif /* __MALIDP_REGS_H__ */ diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 3130aa8bcdd0..34405e4a5d36 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -199,7 +199,7 @@ static void armada_drm_plane_work_run(struct armada_crtc *dcrtc, /* Handle any pending frame work. */ if (work) { work->fn(dcrtc, plane, work); - drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); + drm_crtc_vblank_put(&dcrtc->crtc); } wake_up(&plane->frame_wait); @@ -210,7 +210,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc, { int ret; - ret = drm_vblank_get(dcrtc->crtc.dev, dcrtc->num); + ret = drm_crtc_vblank_get(&dcrtc->crtc); if (ret) { DRM_ERROR("failed to acquire vblank counter\n"); return ret; @@ -218,7 +218,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc, ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0; if (ret) - drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); + drm_crtc_vblank_put(&dcrtc->crtc); return ret; } @@ -234,7 +234,7 @@ struct armada_plane_work *armada_drm_plane_work_cancel( struct armada_plane_work *work = xchg(&plane->work, NULL); if (work) - drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); + drm_crtc_vblank_put(&dcrtc->crtc); return work; } @@ -260,7 +260,7 @@ static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc, if (fwork->event) { spin_lock_irqsave(&dev->event_lock, flags); - drm_send_vblank_event(dev, dcrtc->num, fwork->event); + drm_crtc_send_vblank_event(&dcrtc->crtc, fwork->event); spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -592,9 +592,9 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc, if (interlaced ^ dcrtc->interlaced) { if (adj->flags & DRM_MODE_FLAG_INTERLACE) - drm_vblank_get(dcrtc->crtc.dev, dcrtc->num); + drm_crtc_vblank_get(&dcrtc->crtc); else - drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); + drm_crtc_vblank_put(&dcrtc->crtc); dcrtc->interlaced = interlaced; } diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 439824a61aa5..f5ebdd681445 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -189,7 +189,6 @@ static struct drm_driver armada_drm_driver = { .load = armada_drm_load, .lastclose = armada_drm_lastclose, .unload = armada_drm_unload, - .set_busid = drm_platform_set_busid, .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = armada_drm_enable_vblank, .disable_vblank = armada_drm_disable_vblank, @@ -197,7 +196,7 @@ static struct drm_driver armada_drm_driver = { .debugfs_init = armada_drm_debugfs_init, .debugfs_cleanup = armada_drm_debugfs_cleanup, #endif - .gem_free_object = armada_gem_free_object, + .gem_free_object_unlocked = armada_gem_free_object, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = armada_gem_prime_export, diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index 148e8a42b2c6..1ee707ef6b8d 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -121,6 +121,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, int ret; ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip, + BIT(DRM_ROTATE_0), 0, INT_MAX, true, false, &visible); if (ret) return ret; diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index fcd9c0714836..f54afd2113a9 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -209,7 +209,7 @@ static struct drm_driver driver = { .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, - .gem_free_object = ast_gem_free_object, + .gem_free_object_unlocked = ast_gem_free_object, .dumb_create = ast_dumb_create, .dumb_map_offset = ast_dumb_mmap_offset, .dumb_destroy = drm_gem_dumb_destroy, diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index 5320f8c57884..c017a9330a18 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c @@ -167,12 +167,9 @@ static int astfb_create_object(struct ast_fbdev *afbdev, struct drm_gem_object **gobj_p) { struct drm_device *dev = afbdev->helper.dev; - u32 bpp, depth; u32 size; struct drm_gem_object *gobj; - int ret = 0; - drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp); size = mode_cmd->pitches[0] * mode_cmd->height; ret = ast_gem_create(dev, size, true, &gobj); diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index c337922606e3..5957c3e659fe 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -624,19 +624,21 @@ static void ast_crtc_reset(struct drm_crtc *crtc) } -static void ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct ast_crtc *ast_crtc = to_ast_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { ast_crtc->lut_r[i] = red[i] >> 8; ast_crtc->lut_g[i] = green[i] >> 8; ast_crtc->lut_b[i] = blue[i] >> 8; } ast_crtc_load_lut(crtc); + + return 0; } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index bd12231ab0cd..613f6c99b76a 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -374,8 +374,8 @@ static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc) spin_lock_irqsave(&dev->event_lock, flags); if (crtc->event) { - drm_send_vblank_event(dev, crtc->id, crtc->event); - drm_vblank_put(dev, crtc->id); + drm_crtc_send_vblank_event(&crtc->base, crtc->event); + drm_crtc_vblank_put(&crtc->base); crtc->event = NULL; } spin_unlock_irqrestore(&dev->event_lock, flags); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 8ded7645747e..d4a3d61b7b06 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -519,7 +519,7 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev, } /* Swap the state, this is the point of no return. */ - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); if (async) queue_work(dc->wq, &commit->work); @@ -691,13 +691,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev) destroy_workqueue(dc->wq); } -static void atmel_hlcdc_dc_connector_unplug_all(struct drm_device *dev) -{ - mutex_lock(&dev->mode_config.mutex); - drm_connector_unregister_all(dev); - mutex_unlock(&dev->mode_config.mutex); -} - static void atmel_hlcdc_dc_lastclose(struct drm_device *dev) { struct atmel_hlcdc_dc *dc = dev->dev_private; @@ -776,7 +769,7 @@ static struct drm_driver atmel_hlcdc_dc_driver = { .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = atmel_hlcdc_dc_enable_vblank, .disable_vblank = atmel_hlcdc_dc_disable_vblank, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, @@ -815,15 +808,8 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev) if (ret) goto err_unload; - ret = drm_connector_register_all(ddev); - if (ret) - goto err_unregister; - return 0; -err_unregister: - drm_dev_unregister(ddev); - err_unload: atmel_hlcdc_dc_unload(ddev); @@ -837,7 +823,6 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev) { struct drm_device *ddev = platform_get_drvdata(pdev); - atmel_hlcdc_dc_connector_unplug_all(ddev); drm_dev_unregister(ddev); atmel_hlcdc_dc_unload(ddev); drm_dev_unref(ddev); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index 39802c0539b6..473a475f27b1 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c @@ -113,21 +113,9 @@ static int atmel_hlcdc_rgb_mode_valid(struct drm_connector *connector, return atmel_hlcdc_dc_mode_valid(rgb->dc, mode); } - - -static struct drm_encoder * -atmel_hlcdc_rgb_best_encoder(struct drm_connector *connector) -{ - struct atmel_hlcdc_rgb_output *rgb = - drm_connector_to_atmel_hlcdc_rgb_output(connector); - - return &rgb->encoder; -} - static const struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = { .get_modes = atmel_hlcdc_panel_get_modes, .mode_valid = atmel_hlcdc_rgb_mode_valid, - .best_encoder = atmel_hlcdc_rgb_best_encoder, }; static enum drm_connector_status diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index b332b4d3b0e2..abace82de6ea 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -89,7 +89,7 @@ static struct drm_driver bochs_driver = { .date = "20130925", .major = 1, .minor = 0, - .gem_free_object = bochs_gem_free_object, + .gem_free_object_unlocked = bochs_gem_free_object, .dumb_create = bochs_dumb_create, .dumb_map_offset = bochs_dumb_mmap_offset, .dumb_destroy = drm_gem_dumb_destroy, diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 8f7423f18da5..a141921445f4 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -50,6 +50,14 @@ config DRM_PARADE_PS8622 ---help--- Parade eDP-LVDS bridge chip driver. +config DRM_SII902X + tristate "Silicon Image sii902x RGB/HDMI bridge" + depends on OF + select DRM_KMS_HELPER + select REGMAP_I2C + ---help--- + Silicon Image sii902x bridge chip driver. + source "drivers/gpu/drm/bridge/analogix/Kconfig" endmenu diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 96b13b30e6ab..bfec9f8cb9d2 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -5,4 +5,5 @@ obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o +obj-$(CONFIG_DRM_SII902X) += sii902x.o obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/ diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c index d087b054c360..f9f03bcba0af 100644 --- a/drivers/gpu/drm/bridge/analogix-anx78xx.c +++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c @@ -986,16 +986,8 @@ unlock: return num_modes; } -static struct drm_encoder *anx78xx_best_encoder(struct drm_connector *connector) -{ - struct anx78xx *anx78xx = connector_to_anx78xx(connector); - - return anx78xx->bridge.encoder; -} - static const struct drm_connector_helper_funcs anx78xx_connector_helper_funcs = { .get_modes = anx78xx_get_modes, - .best_encoder = anx78xx_best_encoder, }; static enum drm_connector_status anx78xx_detect(struct drm_connector *connector, diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c index c9d941283d30..70b1f7d4270b 100644 --- a/drivers/gpu/drm/bridge/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/dw-hdmi.c @@ -1476,15 +1476,6 @@ dw_hdmi_connector_mode_valid(struct drm_connector *connector, return mode_status; } -static struct drm_encoder *dw_hdmi_connector_best_encoder(struct drm_connector - *connector) -{ - struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, - connector); - - return hdmi->encoder; -} - static void dw_hdmi_connector_destroy(struct drm_connector *connector) { drm_connector_unregister(connector); @@ -1525,7 +1516,7 @@ static const struct drm_connector_funcs dw_hdmi_atomic_connector_funcs = { static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = { .get_modes = dw_hdmi_connector_get_modes, .mode_valid = dw_hdmi_connector_mode_valid, - .best_encoder = dw_hdmi_connector_best_encoder, + .best_encoder = drm_atomic_helper_best_encoder, }; static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = { diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c index 7ecd59f70b8e..93f3dacf9e27 100644 --- a/drivers/gpu/drm/bridge/nxp-ptn3460.c +++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c @@ -235,16 +235,8 @@ out: return num_modes; } -static struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector) -{ - struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector); - - return ptn_bridge->bridge.encoder; -} - static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = { .get_modes = ptn3460_get_modes, - .best_encoder = ptn3460_best_encoder, }; static enum drm_connector_status ptn3460_detect(struct drm_connector *connector, diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c index be881e9fef8f..5cd8dd7e5904 100644 --- a/drivers/gpu/drm/bridge/parade-ps8622.c +++ b/drivers/gpu/drm/bridge/parade-ps8622.c @@ -474,18 +474,8 @@ static int ps8622_get_modes(struct drm_connector *connector) return drm_panel_get_modes(ps8622->panel); } -static struct drm_encoder *ps8622_best_encoder(struct drm_connector *connector) -{ - struct ps8622_bridge *ps8622; - - ps8622 = connector_to_ps8622(connector); - - return ps8622->bridge.encoder; -} - static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = { .get_modes = ps8622_get_modes, - .best_encoder = ps8622_best_encoder, }; static enum drm_connector_status ps8622_detect(struct drm_connector *connector, diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c new file mode 100644 index 000000000000..9126d0306ab5 --- /dev/null +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -0,0 +1,467 @@ +/* + * Copyright (C) 2016 Atmel + * Bo Shen <voice.shen@atmel.com> + * + * Authors: Bo Shen <voice.shen@atmel.com> + * Boris Brezillon <boris.brezillon@free-electrons.com> + * Wu, Songjun <Songjun.Wu@atmel.com> + * + * + * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/regmap.h> + +#include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_edid.h> + +#define SII902X_TPI_VIDEO_DATA 0x0 + +#define SII902X_TPI_PIXEL_REPETITION 0x8 +#define SII902X_TPI_AVI_PIXEL_REP_BUS_24BIT BIT(5) +#define SII902X_TPI_AVI_PIXEL_REP_RISING_EDGE BIT(4) +#define SII902X_TPI_AVI_PIXEL_REP_4X 3 +#define SII902X_TPI_AVI_PIXEL_REP_2X 1 +#define SII902X_TPI_AVI_PIXEL_REP_NONE 0 +#define SII902X_TPI_CLK_RATIO_HALF (0 << 6) +#define SII902X_TPI_CLK_RATIO_1X (1 << 6) +#define SII902X_TPI_CLK_RATIO_2X (2 << 6) +#define SII902X_TPI_CLK_RATIO_4X (3 << 6) + +#define SII902X_TPI_AVI_IN_FORMAT 0x9 +#define SII902X_TPI_AVI_INPUT_BITMODE_12BIT BIT(7) +#define SII902X_TPI_AVI_INPUT_DITHER BIT(6) +#define SII902X_TPI_AVI_INPUT_RANGE_LIMITED (2 << 2) +#define SII902X_TPI_AVI_INPUT_RANGE_FULL (1 << 2) +#define SII902X_TPI_AVI_INPUT_RANGE_AUTO (0 << 2) +#define SII902X_TPI_AVI_INPUT_COLORSPACE_BLACK (3 << 0) +#define SII902X_TPI_AVI_INPUT_COLORSPACE_YUV422 (2 << 0) +#define SII902X_TPI_AVI_INPUT_COLORSPACE_YUV444 (1 << 0) +#define SII902X_TPI_AVI_INPUT_COLORSPACE_RGB (0 << 0) + +#define SII902X_TPI_AVI_INFOFRAME 0x0c + +#define SII902X_SYS_CTRL_DATA 0x1a +#define SII902X_SYS_CTRL_PWR_DWN BIT(4) +#define SII902X_SYS_CTRL_AV_MUTE BIT(3) +#define SII902X_SYS_CTRL_DDC_BUS_REQ BIT(2) +#define SII902X_SYS_CTRL_DDC_BUS_GRTD BIT(1) +#define SII902X_SYS_CTRL_OUTPUT_MODE BIT(0) +#define SII902X_SYS_CTRL_OUTPUT_HDMI 1 +#define SII902X_SYS_CTRL_OUTPUT_DVI 0 + +#define SII902X_REG_CHIPID(n) (0x1b + (n)) + +#define SII902X_PWR_STATE_CTRL 0x1e +#define SII902X_AVI_POWER_STATE_MSK GENMASK(1, 0) +#define SII902X_AVI_POWER_STATE_D(l) ((l) & SII902X_AVI_POWER_STATE_MSK) + +#define SII902X_INT_ENABLE 0x3c +#define SII902X_INT_STATUS 0x3d +#define SII902X_HOTPLUG_EVENT BIT(0) +#define SII902X_PLUGGED_STATUS BIT(2) + +#define SII902X_REG_TPI_RQB 0xc7 + +#define SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS 500 + +struct sii902x { + struct i2c_client *i2c; + struct regmap *regmap; + struct drm_bridge bridge; + struct drm_connector connector; + struct gpio_desc *reset_gpio; +}; + +static inline struct sii902x *bridge_to_sii902x(struct drm_bridge *bridge) +{ + return container_of(bridge, struct sii902x, bridge); +} + +static inline struct sii902x *connector_to_sii902x(struct drm_connector *con) +{ + return container_of(con, struct sii902x, connector); +} + +static void sii902x_reset(struct sii902x *sii902x) +{ + if (!sii902x->reset_gpio) + return; + + gpiod_set_value(sii902x->reset_gpio, 1); + + /* The datasheet says treset-min = 100us. Make it 150us to be sure. */ + usleep_range(150, 200); + + gpiod_set_value(sii902x->reset_gpio, 0); +} + +static enum drm_connector_status +sii902x_connector_detect(struct drm_connector *connector, bool force) +{ + struct sii902x *sii902x = connector_to_sii902x(connector); + unsigned int status; + + regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status); + + return (status & SII902X_PLUGGED_STATUS) ? + connector_status_connected : connector_status_disconnected; +} + +static const struct drm_connector_funcs sii902x_connector_funcs = { + .dpms = drm_atomic_helper_connector_dpms, + .detect = sii902x_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int sii902x_get_modes(struct drm_connector *connector) +{ + struct sii902x *sii902x = connector_to_sii902x(connector); + struct regmap *regmap = sii902x->regmap; + u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; + unsigned long timeout; + unsigned int status; + struct edid *edid; + int num = 0; + int ret; + + ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA, + SII902X_SYS_CTRL_DDC_BUS_REQ, + SII902X_SYS_CTRL_DDC_BUS_REQ); + if (ret) + return ret; + + timeout = jiffies + + msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS); + do { + ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status); + if (ret) + return ret; + } while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) && + time_before(jiffies, timeout)); + + if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) { + dev_err(&sii902x->i2c->dev, "failed to acquire the i2c bus"); + return -ETIMEDOUT; + } + + ret = regmap_write(regmap, SII902X_SYS_CTRL_DATA, status); + if (ret) + return ret; + + edid = drm_get_edid(connector, sii902x->i2c->adapter); + drm_mode_connector_update_edid_property(connector, edid); + if (edid) { + num = drm_add_edid_modes(connector, edid); + kfree(edid); + } + + ret = drm_display_info_set_bus_formats(&connector->display_info, + &bus_format, 1); + if (ret) + return ret; + + ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status); + if (ret) + return ret; + + ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA, + SII902X_SYS_CTRL_DDC_BUS_REQ | + SII902X_SYS_CTRL_DDC_BUS_GRTD, 0); + if (ret) + return ret; + + timeout = jiffies + + msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS); + do { + ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status); + if (ret) + return ret; + } while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ | + SII902X_SYS_CTRL_DDC_BUS_GRTD) && + time_before(jiffies, timeout)); + + if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ | + SII902X_SYS_CTRL_DDC_BUS_GRTD)) { + dev_err(&sii902x->i2c->dev, "failed to release the i2c bus"); + return -ETIMEDOUT; + } + + return num; +} + +static enum drm_mode_status sii902x_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + /* TODO: check mode */ + + return MODE_OK; +} + +static const struct drm_connector_helper_funcs sii902x_connector_helper_funcs = { + .get_modes = sii902x_get_modes, + .mode_valid = sii902x_mode_valid, +}; + +static void sii902x_bridge_disable(struct drm_bridge *bridge) +{ + struct sii902x *sii902x = bridge_to_sii902x(bridge); + + regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA, + SII902X_SYS_CTRL_PWR_DWN, + SII902X_SYS_CTRL_PWR_DWN); +} + +static void sii902x_bridge_enable(struct drm_bridge *bridge) +{ + struct sii902x *sii902x = bridge_to_sii902x(bridge); + + regmap_update_bits(sii902x->regmap, SII902X_PWR_STATE_CTRL, + SII902X_AVI_POWER_STATE_MSK, + SII902X_AVI_POWER_STATE_D(0)); + regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA, + SII902X_SYS_CTRL_PWR_DWN, 0); +} + +static void sii902x_bridge_mode_set(struct drm_bridge *bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adj) +{ + struct sii902x *sii902x = bridge_to_sii902x(bridge); + struct regmap *regmap = sii902x->regmap; + u8 buf[HDMI_INFOFRAME_SIZE(AVI)]; + struct hdmi_avi_infoframe frame; + int ret; + + buf[0] = adj->clock; + buf[1] = adj->clock >> 8; + buf[2] = adj->vrefresh; + buf[3] = 0x00; + buf[4] = adj->hdisplay; + buf[5] = adj->hdisplay >> 8; + buf[6] = adj->vdisplay; + buf[7] = adj->vdisplay >> 8; + buf[8] = SII902X_TPI_CLK_RATIO_1X | SII902X_TPI_AVI_PIXEL_REP_NONE | + SII902X_TPI_AVI_PIXEL_REP_BUS_24BIT; + buf[9] = SII902X_TPI_AVI_INPUT_RANGE_AUTO | + SII902X_TPI_AVI_INPUT_COLORSPACE_RGB; + + ret = regmap_bulk_write(regmap, SII902X_TPI_VIDEO_DATA, buf, 10); + if (ret) + return; + + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, adj); + if (ret < 0) { + DRM_ERROR("couldn't fill AVI infoframe\n"); + return; + } + + ret = hdmi_avi_infoframe_pack(&frame, buf, sizeof(buf)); + if (ret < 0) { + DRM_ERROR("failed to pack AVI infoframe: %d\n", ret); + return; + } + + /* Do not send the infoframe header, but keep the CRC field. */ + regmap_bulk_write(regmap, SII902X_TPI_AVI_INFOFRAME, + buf + HDMI_INFOFRAME_HEADER_SIZE - 1, + HDMI_AVI_INFOFRAME_SIZE + 1); +} + +static int sii902x_bridge_attach(struct drm_bridge *bridge) +{ + struct sii902x *sii902x = bridge_to_sii902x(bridge); + struct drm_device *drm = bridge->dev; + int ret; + + drm_connector_helper_add(&sii902x->connector, + &sii902x_connector_helper_funcs); + + if (!drm_core_check_feature(drm, DRIVER_ATOMIC)) { + dev_err(&sii902x->i2c->dev, + "sii902x driver is only compatible with DRM devices supporting atomic updates"); + return -ENOTSUPP; + } + + ret = drm_connector_init(drm, &sii902x->connector, + &sii902x_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); + if (ret) + return ret; + + if (sii902x->i2c->irq > 0) + sii902x->connector.polled = DRM_CONNECTOR_POLL_HPD; + else + sii902x->connector.polled = DRM_CONNECTOR_POLL_CONNECT; + + drm_mode_connector_attach_encoder(&sii902x->connector, bridge->encoder); + + return 0; +} + +static const struct drm_bridge_funcs sii902x_bridge_funcs = { + .attach = sii902x_bridge_attach, + .mode_set = sii902x_bridge_mode_set, + .disable = sii902x_bridge_disable, + .enable = sii902x_bridge_enable, +}; + +static const struct regmap_range sii902x_volatile_ranges[] = { + { .range_min = 0, .range_max = 0xff }, +}; + +static const struct regmap_access_table sii902x_volatile_table = { + .yes_ranges = sii902x_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(sii902x_volatile_ranges), +}; + +static const struct regmap_config sii902x_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .volatile_table = &sii902x_volatile_table, + .cache_type = REGCACHE_NONE, +}; + +static irqreturn_t sii902x_interrupt(int irq, void *data) +{ + struct sii902x *sii902x = data; + unsigned int status = 0; + + regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status); + regmap_write(sii902x->regmap, SII902X_INT_STATUS, status); + + if ((status & SII902X_HOTPLUG_EVENT) && sii902x->bridge.dev) + drm_helper_hpd_irq_event(sii902x->bridge.dev); + + return IRQ_HANDLED; +} + +static int sii902x_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + unsigned int status = 0; + struct sii902x *sii902x; + u8 chipid[4]; + int ret; + + sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL); + if (!sii902x) + return -ENOMEM; + + sii902x->i2c = client; + sii902x->regmap = devm_regmap_init_i2c(client, &sii902x_regmap_config); + if (IS_ERR(sii902x->regmap)) + return PTR_ERR(sii902x->regmap); + + sii902x->reset_gpio = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(sii902x->reset_gpio)) { + dev_err(dev, "Failed to retrieve/request reset gpio: %ld\n", + PTR_ERR(sii902x->reset_gpio)); + return PTR_ERR(sii902x->reset_gpio); + } + + sii902x_reset(sii902x); + + ret = regmap_write(sii902x->regmap, SII902X_REG_TPI_RQB, 0x0); + if (ret) + return ret; + + ret = regmap_bulk_read(sii902x->regmap, SII902X_REG_CHIPID(0), + &chipid, 4); + if (ret) { + dev_err(dev, "regmap_read failed %d\n", ret); + return ret; + } + + if (chipid[0] != 0xb0) { + dev_err(dev, "Invalid chipid: %02x (expecting 0xb0)\n", + chipid[0]); + return -EINVAL; + } + + /* Clear all pending interrupts */ + regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status); + regmap_write(sii902x->regmap, SII902X_INT_STATUS, status); + + if (client->irq > 0) { + regmap_write(sii902x->regmap, SII902X_INT_ENABLE, + SII902X_HOTPLUG_EVENT); + + ret = devm_request_threaded_irq(dev, client->irq, NULL, + sii902x_interrupt, + IRQF_ONESHOT, dev_name(dev), + sii902x); + if (ret) + return ret; + } + + sii902x->bridge.funcs = &sii902x_bridge_funcs; + sii902x->bridge.of_node = dev->of_node; + ret = drm_bridge_add(&sii902x->bridge); + if (ret) { + dev_err(dev, "Failed to add drm_bridge\n"); + return ret; + } + + i2c_set_clientdata(client, sii902x); + + return 0; +} + +static int sii902x_remove(struct i2c_client *client) + +{ + struct sii902x *sii902x = i2c_get_clientdata(client); + + drm_bridge_remove(&sii902x->bridge); + + return 0; +} + +static const struct of_device_id sii902x_dt_ids[] = { + { .compatible = "sil,sii9022", }, + { } +}; +MODULE_DEVICE_TABLE(of, sii902x_dt_ids); + +static const struct i2c_device_id sii902x_i2c_ids[] = { + { "sii9022", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, sii902x_i2c_ids); + +static struct i2c_driver sii902x_driver = { + .probe = sii902x_probe, + .remove = sii902x_remove, + .driver = { + .name = "sii902x", + .of_match_table = sii902x_dt_ids, + }, + .id_table = sii902x_i2c_ids, +}; +module_i2c_driver(sii902x_driver); + +MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>"); +MODULE_DESCRIPTION("SII902x RGB -> HDMI bridges"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index dc83f69da6f1..b05f7eae32ce 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -142,7 +142,7 @@ static struct drm_driver driver = { .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, - .gem_free_object = cirrus_gem_free_object, + .gem_free_object_unlocked = cirrus_gem_free_object, .dumb_create = cirrus_dumb_create, .dumb_map_offset = cirrus_dumb_mmap_offset, .dumb_destroy = drm_gem_dumb_destroy, diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index d3d8d7bfcc57..17c915d9a03e 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -325,21 +325,20 @@ static void cirrus_crtc_commit(struct drm_crtc *crtc) * use this for 8-bit mode so can't perform smooth fades on deeper modes, * but it's a requirement that we provide the function */ -static void cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc); int i; - if (size != CIRRUS_LUT_SIZE) - return; - - for (i = 0; i < CIRRUS_LUT_SIZE; i++) { + for (i = 0; i < size; i++) { cirrus_crtc->lut_r[i] = red[i]; cirrus_crtc->lut_g[i] = green[i]; cirrus_crtc->lut_b[i] = blue[i]; } cirrus_crtc_load_lut(crtc); + + return 0; } /* Simple cleanup function */ diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index c204ef32df16..d99ab2f6663f 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -33,6 +33,20 @@ #include "drm_crtc_internal.h" +static void crtc_commit_free(struct kref *kref) +{ + struct drm_crtc_commit *commit = + container_of(kref, struct drm_crtc_commit, ref); + + kfree(commit); +} + +void drm_crtc_commit_put(struct drm_crtc_commit *commit) +{ + kref_put(&commit->ref, crtc_commit_free); +} +EXPORT_SYMBOL(drm_crtc_commit_put); + /** * drm_atomic_state_default_release - * release memory initialized by drm_atomic_state_init @@ -44,11 +58,8 @@ void drm_atomic_state_default_release(struct drm_atomic_state *state) { kfree(state->connectors); - kfree(state->connector_states); kfree(state->crtcs); - kfree(state->crtc_states); kfree(state->planes); - kfree(state->plane_states); } EXPORT_SYMBOL(drm_atomic_state_default_release); @@ -72,18 +83,10 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) sizeof(*state->crtcs), GFP_KERNEL); if (!state->crtcs) goto fail; - state->crtc_states = kcalloc(dev->mode_config.num_crtc, - sizeof(*state->crtc_states), GFP_KERNEL); - if (!state->crtc_states) - goto fail; state->planes = kcalloc(dev->mode_config.num_total_plane, sizeof(*state->planes), GFP_KERNEL); if (!state->planes) goto fail; - state->plane_states = kcalloc(dev->mode_config.num_total_plane, - sizeof(*state->plane_states), GFP_KERNEL); - if (!state->plane_states) - goto fail; state->dev = dev; @@ -139,40 +142,48 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state); for (i = 0; i < state->num_connector; i++) { - struct drm_connector *connector = state->connectors[i]; + struct drm_connector *connector = state->connectors[i].ptr; if (!connector) continue; connector->funcs->atomic_destroy_state(connector, - state->connector_states[i]); - state->connectors[i] = NULL; - state->connector_states[i] = NULL; + state->connectors[i].state); + state->connectors[i].ptr = NULL; + state->connectors[i].state = NULL; drm_connector_unreference(connector); } for (i = 0; i < config->num_crtc; i++) { - struct drm_crtc *crtc = state->crtcs[i]; + struct drm_crtc *crtc = state->crtcs[i].ptr; if (!crtc) continue; crtc->funcs->atomic_destroy_state(crtc, - state->crtc_states[i]); - state->crtcs[i] = NULL; - state->crtc_states[i] = NULL; + state->crtcs[i].state); + + if (state->crtcs[i].commit) { + kfree(state->crtcs[i].commit->event); + state->crtcs[i].commit->event = NULL; + drm_crtc_commit_put(state->crtcs[i].commit); + } + + state->crtcs[i].commit = NULL; + state->crtcs[i].ptr = NULL; + state->crtcs[i].state = NULL; } for (i = 0; i < config->num_total_plane; i++) { - struct drm_plane *plane = state->planes[i]; + struct drm_plane *plane = state->planes[i].ptr; if (!plane) continue; plane->funcs->atomic_destroy_state(plane, - state->plane_states[i]); - state->planes[i] = NULL; - state->plane_states[i] = NULL; + state->planes[i].state); + state->planes[i].ptr = NULL; + state->planes[i].state = NULL; } } EXPORT_SYMBOL(drm_atomic_state_default_clear); @@ -270,8 +281,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, if (!crtc_state) return ERR_PTR(-ENOMEM); - state->crtc_states[index] = crtc_state; - state->crtcs[index] = crtc; + state->crtcs[index].state = crtc_state; + state->crtcs[index].ptr = crtc; crtc_state->state = state; DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n", @@ -632,8 +643,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, if (!plane_state) return ERR_PTR(-ENOMEM); - state->plane_states[index] = plane_state; - state->planes[index] = plane; + state->planes[index].state = plane_state; + state->planes[index].ptr = plane; plane_state->state = state; DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n", @@ -897,8 +908,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, index = drm_connector_index(connector); if (index >= state->num_connector) { - struct drm_connector **c; - struct drm_connector_state **cs; + struct __drm_connnectors_state *c; int alloc = max(index + 1, config->num_connector); c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL); @@ -909,26 +919,19 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, memset(&state->connectors[state->num_connector], 0, sizeof(*state->connectors) * (alloc - state->num_connector)); - cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), GFP_KERNEL); - if (!cs) - return ERR_PTR(-ENOMEM); - - state->connector_states = cs; - memset(&state->connector_states[state->num_connector], 0, - sizeof(*state->connector_states) * (alloc - state->num_connector)); state->num_connector = alloc; } - if (state->connector_states[index]) - return state->connector_states[index]; + if (state->connectors[index].state) + return state->connectors[index].state; connector_state = connector->funcs->atomic_duplicate_state(connector); if (!connector_state) return ERR_PTR(-ENOMEM); drm_connector_reference(connector); - state->connector_states[index] = connector_state; - state->connectors[index] = connector; + state->connectors[index].state = connector_state; + state->connectors[index].ptr = connector; connector_state->state = state; DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n", @@ -1432,7 +1435,8 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit); */ static struct drm_pending_vblank_event *create_vblank_event( - struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data) + struct drm_device *dev, struct drm_file *file_priv, + struct fence *fence, uint64_t user_data) { struct drm_pending_vblank_event *e = NULL; int ret; @@ -1445,12 +1449,17 @@ static struct drm_pending_vblank_event *create_vblank_event( e->event.base.length = sizeof(e->event); e->event.user_data = user_data; - ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base); - if (ret) { - kfree(e); - return NULL; + if (file_priv) { + ret = drm_event_reserve_init(dev, file_priv, &e->base, + &e->event.base); + if (ret) { + kfree(e); + return NULL; + } } + e->base.fence = fence; + return e; } @@ -1690,7 +1699,8 @@ retry: for_each_crtc_in_state(state, crtc, crtc_state, i) { struct drm_pending_vblank_event *e; - e = create_vblank_event(dev, file_priv, arg->user_data); + e = create_vblank_event(dev, file_priv, NULL, + arg->user_data); if (!e) { ret = -ENOMEM; goto out; diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index ddfa0d120e39..de7fddce3cef 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -110,8 +110,10 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state, if (funcs->atomic_best_encoder) new_encoder = funcs->atomic_best_encoder(connector, conn_state); - else + else if (funcs->best_encoder) new_encoder = funcs->best_encoder(connector); + else + new_encoder = drm_atomic_helper_best_encoder(connector); if (new_encoder) { if (encoder_mask & (1 << drm_encoder_index(new_encoder))) { @@ -298,8 +300,10 @@ update_connector_routing(struct drm_atomic_state *state, if (funcs->atomic_best_encoder) new_encoder = funcs->atomic_best_encoder(connector, connector_state); - else + else if (funcs->best_encoder) new_encoder = funcs->best_encoder(connector); + else + new_encoder = drm_atomic_helper_best_encoder(connector); if (!new_encoder) { DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", @@ -414,6 +418,9 @@ mode_fixup(struct drm_atomic_state *state) for_each_crtc_in_state(state, crtc, crtc_state, i) { const struct drm_crtc_helper_funcs *funcs; + if (!crtc_state->enable) + continue; + if (!crtc_state->mode_changed && !crtc_state->connectors_changed) continue; @@ -458,7 +465,7 @@ mode_fixup(struct drm_atomic_state *state) * times for the same update, e.g. when the ->atomic_check functions depend upon * the adjusted dotclock for fifo space allocation and watermark computation. * - * RETURNS + * RETURNS: * Zero for success or -errno */ int @@ -572,7 +579,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset); * It also sets crtc_state->planes_changed to indicate that a crtc has * updated planes. * - * RETURNS + * RETURNS: * Zero for success or -errno */ int @@ -611,7 +618,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev, if (!funcs || !funcs->atomic_check) continue; - ret = funcs->atomic_check(crtc, state->crtc_states[i]); + ret = funcs->atomic_check(crtc, crtc_state); if (ret) { DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", crtc->base.id, crtc->name); @@ -640,7 +647,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes); * ->atomic_check functions depend upon an updated adjusted_mode.clock to * e.g. properly compute watermarks. * - * RETURNS + * RETURNS: * Zero for success or -errno */ int drm_atomic_helper_check(struct drm_device *dev, @@ -1113,22 +1120,17 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); /** - * drm_atomic_helper_commit - commit validated state object - * @dev: DRM device - * @state: the driver state object - * @nonblocking: whether nonblocking behavior is requested. + * drm_atomic_helper_commit_tail - commit atomic update to hardware + * @state: new modeset state to be committed * - * This function commits a with drm_atomic_helper_check() pre-validated state - * object. This can still fail when e.g. the framebuffer reservation fails. For - * now this doesn't implement nonblocking commits. + * This is the default implemenation for the ->atomic_commit_tail() hook of the + * &drm_mode_config_helper_funcs vtable. * - * Note that right now this function does not support nonblocking commits, hence - * driver writers must implement their own version for now. Also note that the - * default ordering of how the various stages are called is to match the legacy - * modeset helper library closest. One peculiarity of that is that it doesn't - * mesh well with runtime PM at all. + * Note that the default ordering of how the various stages are called is to + * match the legacy modeset helper library closest. One peculiarity of that is + * that it doesn't mesh well with runtime PM at all. * - * For drivers supporting runtime PM the recommended sequence is + * For drivers supporting runtime PM the recommended sequence is instead :: * * drm_atomic_helper_commit_modeset_disables(dev, state); * @@ -1136,9 +1138,75 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); * * drm_atomic_helper_commit_planes(dev, state, true); * - * See the kerneldoc entries for these three functions for more details. + * for committing the atomic update to hardware. See the kerneldoc entries for + * these three functions for more details. + */ +void drm_atomic_helper_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + + drm_atomic_helper_commit_modeset_disables(dev, state); + + drm_atomic_helper_commit_planes(dev, state, false); + + drm_atomic_helper_commit_modeset_enables(dev, state); + + drm_atomic_helper_commit_hw_done(state); + + drm_atomic_helper_wait_for_vblanks(dev, state); + + drm_atomic_helper_cleanup_planes(dev, state); +} +EXPORT_SYMBOL(drm_atomic_helper_commit_tail); + +static void commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + struct drm_mode_config_helper_funcs *funcs; + + funcs = dev->mode_config.helper_private; + + drm_atomic_helper_wait_for_fences(dev, state); + + drm_atomic_helper_wait_for_dependencies(state); + + if (funcs && funcs->atomic_commit_tail) + funcs->atomic_commit_tail(state); + else + drm_atomic_helper_commit_tail(state); + + drm_atomic_helper_commit_cleanup_done(state); + + drm_atomic_state_free(state); +} + +static void commit_work(struct work_struct *work) +{ + struct drm_atomic_state *state = container_of(work, + struct drm_atomic_state, + commit_work); + commit_tail(state); +} + +/** + * drm_atomic_helper_commit - commit validated state object + * @dev: DRM device + * @state: the driver state object + * @nonblock: whether nonblocking behavior is requested. + * + * This function commits a with drm_atomic_helper_check() pre-validated state + * object. This can still fail when e.g. the framebuffer reservation fails. This + * function implements nonblocking commits, using + * drm_atomic_helper_setup_commit() and related functions. + * + * Note that right now this function does not support nonblocking commits, hence + * driver writers must implement their own version for now. + * + * Committing the actual hardware state is done through the + * ->atomic_commit_tail() callback of the &drm_mode_config_helper_funcs vtable, + * or it's default implementation drm_atomic_helper_commit_tail(). * - * RETURNS + * RETURNS: * Zero for success or -errno. */ int drm_atomic_helper_commit(struct drm_device *dev, @@ -1147,8 +1215,11 @@ int drm_atomic_helper_commit(struct drm_device *dev, { int ret; - if (nonblock) - return -EBUSY; + ret = drm_atomic_helper_setup_commit(state, nonblock); + if (ret) + return ret; + + INIT_WORK(&state->commit_work, commit_work); ret = drm_atomic_helper_prepare_planes(dev, state); if (ret) @@ -1160,7 +1231,7 @@ int drm_atomic_helper_commit(struct drm_device *dev, * the software side now. */ - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); /* * Everything below can be run asynchronously without the need to grab @@ -1176,21 +1247,16 @@ int drm_atomic_helper_commit(struct drm_device *dev, * update. Which is important since compositors need to figure out the * composition of the next frame right after having submitted the * current layout. + * + * NOTE: Commit work has multiple phases, first hardware commit, then + * cleanup. We want them to overlap, hence need system_unbound_wq to + * make sure work items don't artifically stall on each another. */ - drm_atomic_helper_wait_for_fences(dev, state); - - drm_atomic_helper_commit_modeset_disables(dev, state); - - drm_atomic_helper_commit_planes(dev, state, false); - - drm_atomic_helper_commit_modeset_enables(dev, state); - - drm_atomic_helper_wait_for_vblanks(dev, state); - - drm_atomic_helper_cleanup_planes(dev, state); - - drm_atomic_state_free(state); + if (nonblock) + queue_work(system_unbound_wq, &state->commit_work); + else + commit_tail(state); return 0; } @@ -1199,12 +1265,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit); /** * DOC: implementing nonblocking commit * - * For now the atomic helpers don't support nonblocking commit directly. If - * there is real need it could be added though, using the dma-buf fence - * infrastructure for generic synchronization with outstanding rendering. - * - * For now drivers have to implement nonblocking commit themselves, with the - * following sequence being the recommended one: + * Nonblocking atomic commits have to be implemented in the following sequence: * * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function * which commit needs to call which can fail, so we want to run it first and @@ -1216,10 +1277,14 @@ EXPORT_SYMBOL(drm_atomic_helper_commit); * cancelled updates. Note that it is important to ensure that the framebuffer * cleanup is still done when cancelling. * - * For sufficient parallelism it is recommended to have a work item per crtc - * (for updates which don't touch global state) and a global one. Then we only - * need to synchronize with the crtc work items for changed crtcs and the global - * work item, which allows nice concurrent updates on disjoint sets of crtcs. + * Asynchronous workers need to have sufficient parallelism to be able to run + * different atomic commits on different CRTCs in parallel. The simplest way to + * achive this is by running them on the &system_unbound_wq work queue. Note + * that drivers are not required to split up atomic commits and run an + * individual commit in parallel - userspace is supposed to do that if it cares. + * But it might be beneficial to do that for modesets, since those necessarily + * must be done as one global operation, and enabling or disabling a CRTC can + * take a long time. But even that is not required. * * 3. The software state is updated synchronously with * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset @@ -1232,8 +1297,310 @@ EXPORT_SYMBOL(drm_atomic_helper_commit); * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and * then cleaning up the framebuffers after the old framebuffer is no longer * being displayed. + * + * The above scheme is implemented in the atomic helper libraries in + * drm_atomic_helper_commit() using a bunch of helper functions. See + * drm_atomic_helper_setup_commit() for a starting point. */ +static int stall_checks(struct drm_crtc *crtc, bool nonblock) +{ + struct drm_crtc_commit *commit, *stall_commit = NULL; + bool completed = true; + int i; + long ret = 0; + + spin_lock(&crtc->commit_lock); + i = 0; + list_for_each_entry(commit, &crtc->commit_list, commit_entry) { + if (i == 0) { + completed = try_wait_for_completion(&commit->flip_done); + /* Userspace is not allowed to get ahead of the previous + * commit with nonblocking ones. */ + if (!completed && nonblock) { + spin_unlock(&crtc->commit_lock); + return -EBUSY; + } + } else if (i == 1) { + stall_commit = commit; + drm_crtc_commit_get(stall_commit); + break; + } + + i++; + } + spin_unlock(&crtc->commit_lock); + + if (!stall_commit) + return 0; + + /* We don't want to let commits get ahead of cleanup work too much, + * stalling on 2nd previous commit means triple-buffer won't ever stall. + */ + ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n", + crtc->base.id, crtc->name); + + drm_crtc_commit_put(stall_commit); + + return ret < 0 ? ret : 0; +} + +/** + * drm_atomic_helper_setup_commit - setup possibly nonblocking commit + * @state: new modeset state to be committed + * @nonblock: whether nonblocking behavior is requested. + * + * This function prepares @state to be used by the atomic helper's support for + * nonblocking commits. Drivers using the nonblocking commit infrastructure + * should always call this function from their ->atomic_commit hook. + * + * To be able to use this support drivers need to use a few more helper + * functions. drm_atomic_helper_wait_for_dependencies() must be called before + * actually committing the hardware state, and for nonblocking commits this call + * must be placed in the async worker. See also drm_atomic_helper_swap_state() + * and it's stall parameter, for when a driver's commit hooks look at the + * ->state pointers of struct &drm_crtc, &drm_plane or &drm_connector directly. + * + * Completion of the hardware commit step must be signalled using + * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed + * to read or change any permanent software or hardware modeset state. The only + * exception is state protected by other means than &drm_modeset_lock locks. + * Only the free standing @state with pointers to the old state structures can + * be inspected, e.g. to clean up old buffers using + * drm_atomic_helper_cleanup_planes(). + * + * At the very end, before cleaning up @state drivers must call + * drm_atomic_helper_commit_cleanup_done(). + * + * This is all implemented by in drm_atomic_helper_commit(), giving drivers a + * complete and esay-to-use default implementation of the atomic_commit() hook. + * + * The tracking of asynchronously executed and still pending commits is done + * using the core structure &drm_crtc_commit. + * + * By default there's no need to clean up resources allocated by this function + * explicitly: drm_atomic_state_default_clear() will take care of that + * automatically. + * + * Returns: + * + * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast, + * -ENOMEM on allocation failures and -EINTR when a signal is pending. + */ +int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, + bool nonblock) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_crtc_commit *commit; + int i, ret; + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + commit = kzalloc(sizeof(*commit), GFP_KERNEL); + if (!commit) + return -ENOMEM; + + init_completion(&commit->flip_done); + init_completion(&commit->hw_done); + init_completion(&commit->cleanup_done); + INIT_LIST_HEAD(&commit->commit_entry); + kref_init(&commit->ref); + commit->crtc = crtc; + + state->crtcs[i].commit = commit; + + ret = stall_checks(crtc, nonblock); + if (ret) + return ret; + + /* Drivers only send out events when at least either current or + * new CRTC state is active. Complete right away if everything + * stays off. */ + if (!crtc->state->active && !crtc_state->active) { + complete_all(&commit->flip_done); + continue; + } + + /* Legacy cursor updates are fully unsynced. */ + if (state->legacy_cursor_update) { + complete_all(&commit->flip_done); + continue; + } + + if (!crtc_state->event) { + commit->event = kzalloc(sizeof(*commit->event), + GFP_KERNEL); + if (!commit->event) + return -ENOMEM; + + crtc_state->event = commit->event; + } + + crtc_state->event->base.completion = &commit->flip_done; + } + + return 0; +} +EXPORT_SYMBOL(drm_atomic_helper_setup_commit); + + +static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc) +{ + struct drm_crtc_commit *commit; + int i = 0; + + list_for_each_entry(commit, &crtc->commit_list, commit_entry) { + /* skip the first entry, that's the current commit */ + if (i == 1) + return commit; + i++; + } + + return NULL; +} + +/** + * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits + * @state: new modeset state to be committed + * + * This function waits for all preceeding commits that touch the same CRTC as + * @state to both be committed to the hardware (as signalled by + * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled + * by calling drm_crtc_vblank_send_event on the event member of + * &drm_crtc_state). + * + * This is part of the atomic helper support for nonblocking commits, see + * drm_atomic_helper_setup_commit() for an overview. + */ +void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_crtc_commit *commit; + int i; + long ret; + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + spin_lock(&crtc->commit_lock); + commit = preceeding_commit(crtc); + if (commit) + drm_crtc_commit_get(commit); + spin_unlock(&crtc->commit_lock); + + if (!commit) + continue; + + ret = wait_for_completion_timeout(&commit->hw_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n", + crtc->base.id, crtc->name); + + /* Currently no support for overwriting flips, hence + * stall for previous one to execute completely. */ + ret = wait_for_completion_timeout(&commit->flip_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", + crtc->base.id, crtc->name); + + drm_crtc_commit_put(commit); + } +} +EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies); + +/** + * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit + * @state: new modeset state to be committed + * + * This function is used to signal completion of the hardware commit step. After + * this step the driver is not allowed to read or change any permanent software + * or hardware modeset state. The only exception is state protected by other + * means than &drm_modeset_lock locks. + * + * Drivers should try to postpone any expensive or delayed cleanup work after + * this function is called. + * + * This is part of the atomic helper support for nonblocking commits, see + * drm_atomic_helper_setup_commit() for an overview. + */ +void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_crtc_commit *commit; + int i; + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + commit = state->crtcs[i].commit; + if (!commit) + continue; + + /* backend must have consumed any event by now */ + WARN_ON(crtc->state->event); + spin_lock(&crtc->commit_lock); + complete_all(&commit->hw_done); + spin_unlock(&crtc->commit_lock); + } +} +EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done); + +/** + * drm_atomic_helper_commit_cleanup_done - signal completion of commit + * @state: new modeset state to be committed + * + * This signals completion of the atomic update @state, including any cleanup + * work. If used, it must be called right before calling + * drm_atomic_state_free(). + * + * This is part of the atomic helper support for nonblocking commits, see + * drm_atomic_helper_setup_commit() for an overview. + */ +void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_crtc_commit *commit; + int i; + long ret; + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + commit = state->crtcs[i].commit; + if (WARN_ON(!commit)) + continue; + + spin_lock(&crtc->commit_lock); + complete_all(&commit->cleanup_done); + WARN_ON(!try_wait_for_completion(&commit->hw_done)); + + /* commit_list borrows our reference, need to remove before we + * clean up our drm_atomic_state. But only after it actually + * completed, otherwise subsequent commits won't stall properly. */ + if (try_wait_for_completion(&commit->flip_done)) + goto del_commit; + + spin_unlock(&crtc->commit_lock); + + /* We must wait for the vblank event to signal our completion + * before releasing our reference, since the vblank work does + * not hold a reference of its own. */ + ret = wait_for_completion_timeout(&commit->flip_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", + crtc->base.id, crtc->name); + + spin_lock(&crtc->commit_lock); +del_commit: + list_del(&commit->commit_entry); + spin_unlock(&crtc->commit_lock); + } +} +EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done); + /** * drm_atomic_helper_prepare_planes - prepare plane resources before commit * @dev: DRM device @@ -1249,16 +1616,12 @@ EXPORT_SYMBOL(drm_atomic_helper_commit); int drm_atomic_helper_prepare_planes(struct drm_device *dev, struct drm_atomic_state *state) { - int nplanes = dev->mode_config.num_total_plane; - int ret, i; + struct drm_plane *plane; + struct drm_plane_state *plane_state; + int ret, i, j; - for (i = 0; i < nplanes; i++) { + for_each_plane_in_state(state, plane, plane_state, i) { const struct drm_plane_helper_funcs *funcs; - struct drm_plane *plane = state->planes[i]; - struct drm_plane_state *plane_state = state->plane_states[i]; - - if (!plane) - continue; funcs = plane->helper_private; @@ -1272,12 +1635,10 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev, return 0; fail: - for (i--; i >= 0; i--) { + for_each_plane_in_state(state, plane, plane_state, j) { const struct drm_plane_helper_funcs *funcs; - struct drm_plane *plane = state->planes[i]; - struct drm_plane_state *plane_state = state->plane_states[i]; - if (!plane) + if (j >= i) continue; funcs = plane->helper_private; @@ -1537,8 +1898,8 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes); /** * drm_atomic_helper_swap_state - store atomic state into current sw state - * @dev: DRM device * @state: atomic state + * @stall: stall for proceeding commits * * This function stores the atomic state into the current state pointers in all * driver objects. It should be called after all failing steps have been done @@ -1559,42 +1920,70 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes); * * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3 * contains the old state. Also do any other cleanup required with that state. + * + * @stall must be set when nonblocking commits for this driver directly access + * the ->state pointer of &drm_plane, &drm_crtc or &drm_connector. With the + * current atomic helpers this is almost always the case, since the helpers + * don't pass the right state structures to the callbacks. */ -void drm_atomic_helper_swap_state(struct drm_device *dev, - struct drm_atomic_state *state) +void drm_atomic_helper_swap_state(struct drm_atomic_state *state, + bool stall) { int i; + long ret; + struct drm_connector *connector; + struct drm_connector_state *conn_state; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_plane *plane; + struct drm_plane_state *plane_state; + struct drm_crtc_commit *commit; + + if (stall) { + for_each_crtc_in_state(state, crtc, crtc_state, i) { + spin_lock(&crtc->commit_lock); + commit = list_first_entry_or_null(&crtc->commit_list, + struct drm_crtc_commit, commit_entry); + if (commit) + drm_crtc_commit_get(commit); + spin_unlock(&crtc->commit_lock); + + if (!commit) + continue; - for (i = 0; i < state->num_connector; i++) { - struct drm_connector *connector = state->connectors[i]; - - if (!connector) - continue; + ret = wait_for_completion_timeout(&commit->hw_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n", + crtc->base.id, crtc->name); + drm_crtc_commit_put(commit); + } + } + for_each_connector_in_state(state, connector, conn_state, i) { connector->state->state = state; - swap(state->connector_states[i], connector->state); + swap(state->connectors[i].state, connector->state); connector->state->state = NULL; } - for (i = 0; i < dev->mode_config.num_crtc; i++) { - struct drm_crtc *crtc = state->crtcs[i]; - - if (!crtc) - continue; - + for_each_crtc_in_state(state, crtc, crtc_state, i) { crtc->state->state = state; - swap(state->crtc_states[i], crtc->state); + swap(state->crtcs[i].state, crtc->state); crtc->state->state = NULL; - } - for (i = 0; i < dev->mode_config.num_total_plane; i++) { - struct drm_plane *plane = state->planes[i]; + if (state->crtcs[i].commit) { + spin_lock(&crtc->commit_lock); + list_add(&state->crtcs[i].commit->commit_entry, + &crtc->commit_list); + spin_unlock(&crtc->commit_lock); - if (!plane) - continue; + state->crtcs[i].commit->event = NULL; + } + } + for_each_plane_in_state(state, plane, plane_state, i) { plane->state->state = state; - swap(state->plane_states[i], plane->state); + swap(state->planes[i].state, plane->state); plane->state->state = NULL; } } @@ -2409,7 +2798,7 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip); * This is the main helper function provided by the atomic helper framework for * implementing the legacy DPMS connector interface. It computes the new desired * ->active state for the corresponding CRTC (if the connector is enabled) and - * updates it. + * updates it. * * Returns: * Returns 0 on success, negative errno numbers on failure. @@ -2930,16 +3319,15 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state); * @red: red correction table * @green: green correction table * @blue: green correction table - * @start: * @size: size of the tables * * Implements support for legacy gamma correction table for drivers * that support color management through the DEGAMMA_LUT/GAMMA_LUT * properties. */ -void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, - u16 *red, u16 *green, u16 *blue, - uint32_t start, uint32_t size) +int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, + u16 *red, u16 *green, u16 *blue, + uint32_t size) { struct drm_device *dev = crtc->dev; struct drm_mode_config *config = &dev->mode_config; @@ -2951,7 +3339,7 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, state = drm_atomic_state_alloc(crtc->dev); if (!state) - return; + return -ENOMEM; blob = drm_property_create_blob(dev, sizeof(struct drm_color_lut) * size, @@ -3002,7 +3390,7 @@ retry: drm_property_unreference_blob(blob); - return; + return 0; fail: if (ret == -EDEADLK) goto backoff; @@ -3010,7 +3398,7 @@ fail: drm_atomic_state_free(state); drm_property_unreference_blob(blob); - return; + return ret; backoff: drm_atomic_state_clear(state); drm_atomic_legacy_backoff(state); diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index 50d0baa06db0..4153e8a193af 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -30,25 +30,36 @@ #include <drm/drmP.h> #include "drm_internal.h" +#include "drm_legacy.h" /** - * drm_getmagic - Get unique magic of a client - * @dev: DRM device to operate on - * @data: ioctl data containing the drm_auth object - * @file_priv: DRM file that performs the operation + * DOC: master and authentication * - * This looks up the unique magic of the passed client and returns it. If the - * client did not have a magic assigned, yet, a new one is registered. The magic - * is stored in the passed drm_auth object. + * struct &drm_master is used to track groups of clients with open + * primary/legacy device nodes. For every struct &drm_file which has had at + * least once successfully became the device master (either through the + * SET_MASTER IOCTL, or implicitly through opening the primary device node when + * no one else is the current master that time) there exists one &drm_master. + * This is noted in the is_master member of &drm_file. All other clients have + * just a pointer to the &drm_master they are associated with. * - * Returns: 0 on success, negative error code on failure. + * In addition only one &drm_master can be the current master for a &drm_device. + * It can be switched through the DROP_MASTER and SET_MASTER IOCTL, or + * implicitly through closing/openeing the primary device node. See also + * drm_is_current_master(). + * + * Clients can authenticate against the current master (if it matches their own) + * using the GETMAGIC and AUTHMAGIC IOCTLs. Together with exchanging masters, + * this allows controlled access to the device for an entire group of mutually + * trusted clients. */ + int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_auth *auth = data; int ret = 0; - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev->master_mutex); if (!file_priv->magic) { ret = idr_alloc(&file_priv->master->magic_map, file_priv, 1, 0, GFP_KERNEL); @@ -56,23 +67,13 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) file_priv->magic = ret; } auth->magic = file_priv->magic; - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->master_mutex); DRM_DEBUG("%u\n", auth->magic); return ret < 0 ? ret : 0; } -/** - * drm_authmagic - Authenticate client with a magic - * @dev: DRM device to operate on - * @data: ioctl data containing the drm_auth object - * @file_priv: DRM file that performs the operation - * - * This looks up a DRM client by the passed magic and authenticates it. - * - * Returns: 0 on success, negative error code on failure. - */ int drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -81,13 +82,253 @@ int drm_authmagic(struct drm_device *dev, void *data, DRM_DEBUG("%u\n", auth->magic); - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev->master_mutex); file = idr_find(&file_priv->master->magic_map, auth->magic); if (file) { file->authenticated = 1; idr_replace(&file_priv->master->magic_map, NULL, auth->magic); } - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->master_mutex); return file ? 0 : -EINVAL; } + +static struct drm_master *drm_master_create(struct drm_device *dev) +{ + struct drm_master *master; + + master = kzalloc(sizeof(*master), GFP_KERNEL); + if (!master) + return NULL; + + kref_init(&master->refcount); + spin_lock_init(&master->lock.spinlock); + init_waitqueue_head(&master->lock.lock_queue); + idr_init(&master->magic_map); + master->dev = dev; + + return master; +} + +static int drm_set_master(struct drm_device *dev, struct drm_file *fpriv, + bool new_master) +{ + int ret = 0; + + dev->master = drm_master_get(fpriv->master); + if (dev->driver->master_set) { + ret = dev->driver->master_set(dev, fpriv, new_master); + if (unlikely(ret != 0)) { + drm_master_put(&dev->master); + } + } + + return ret; +} + +static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) +{ + struct drm_master *old_master; + int ret; + + lockdep_assert_held_once(&dev->master_mutex); + + old_master = fpriv->master; + fpriv->master = drm_master_create(dev); + if (!fpriv->master) { + fpriv->master = old_master; + return -ENOMEM; + } + + if (dev->driver->master_create) { + ret = dev->driver->master_create(dev, fpriv->master); + if (ret) + goto out_err; + } + fpriv->is_master = 1; + fpriv->authenticated = 1; + + ret = drm_set_master(dev, fpriv, true); + if (ret) + goto out_err; + + if (old_master) + drm_master_put(&old_master); + + return 0; + +out_err: + /* drop references and restore old master on failure */ + drm_master_put(&fpriv->master); + fpriv->master = old_master; + + return ret; +} + +int drm_setmaster_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret = 0; + + mutex_lock(&dev->master_mutex); + if (drm_is_current_master(file_priv)) + goto out_unlock; + + if (dev->master) { + ret = -EINVAL; + goto out_unlock; + } + + if (!file_priv->master) { + ret = -EINVAL; + goto out_unlock; + } + + if (!file_priv->is_master) { + ret = drm_new_set_master(dev, file_priv); + goto out_unlock; + } + + ret = drm_set_master(dev, file_priv, false); +out_unlock: + mutex_unlock(&dev->master_mutex); + return ret; +} + +static void drm_drop_master(struct drm_device *dev, + struct drm_file *fpriv) +{ + if (dev->driver->master_drop) + dev->driver->master_drop(dev, fpriv); + drm_master_put(&dev->master); +} + +int drm_dropmaster_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret = -EINVAL; + + mutex_lock(&dev->master_mutex); + if (!drm_is_current_master(file_priv)) + goto out_unlock; + + if (!dev->master) + goto out_unlock; + + ret = 0; + drm_drop_master(dev, file_priv); +out_unlock: + mutex_unlock(&dev->master_mutex); + return ret; +} + +int drm_master_open(struct drm_file *file_priv) +{ + struct drm_device *dev = file_priv->minor->dev; + int ret = 0; + + /* if there is no current master make this fd it, but do not create + * any master object for render clients */ + mutex_lock(&dev->master_mutex); + if (!dev->master) + ret = drm_new_set_master(dev, file_priv); + else + file_priv->master = drm_master_get(dev->master); + mutex_unlock(&dev->master_mutex); + + return ret; +} + +void drm_master_release(struct drm_file *file_priv) +{ + struct drm_device *dev = file_priv->minor->dev; + struct drm_master *master = file_priv->master; + + mutex_lock(&dev->master_mutex); + if (file_priv->magic) + idr_remove(&file_priv->master->magic_map, file_priv->magic); + + if (!drm_is_current_master(file_priv)) + goto out; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + /* + * Since the master is disappearing, so is the + * possibility to lock. + */ + mutex_lock(&dev->struct_mutex); + if (master->lock.hw_lock) { + if (dev->sigdata.lock == master->lock.hw_lock) + dev->sigdata.lock = NULL; + master->lock.hw_lock = NULL; + master->lock.file_priv = NULL; + wake_up_interruptible_all(&master->lock.lock_queue); + } + mutex_unlock(&dev->struct_mutex); + } + + if (dev->master == file_priv->master) + drm_drop_master(dev, file_priv); +out: + /* drop the master reference held by the file priv */ + if (file_priv->master) + drm_master_put(&file_priv->master); + mutex_unlock(&dev->master_mutex); +} + +/** + * drm_is_current_master - checks whether @priv is the current master + * @fpriv: DRM file private + * + * Checks whether @fpriv is current master on its device. This decides whether a + * client is allowed to run DRM_MASTER IOCTLs. + * + * Most of the modern IOCTL which require DRM_MASTER are for kernel modesetting + * - the current master is assumed to own the non-shareable display hardware. + */ +bool drm_is_current_master(struct drm_file *fpriv) +{ + return fpriv->is_master && fpriv->master == fpriv->minor->dev->master; +} +EXPORT_SYMBOL(drm_is_current_master); + +/** + * drm_master_get - reference a master pointer + * @master: struct &drm_master + * + * Increments the reference count of @master and returns a pointer to @master. + */ +struct drm_master *drm_master_get(struct drm_master *master) +{ + kref_get(&master->refcount); + return master; +} +EXPORT_SYMBOL(drm_master_get); + +static void drm_master_destroy(struct kref *kref) +{ + struct drm_master *master = container_of(kref, struct drm_master, refcount); + struct drm_device *dev = master->dev; + + if (dev->driver->master_destroy) + dev->driver->master_destroy(dev, master); + + drm_legacy_master_rmmaps(dev, master); + + idr_destroy(&master->magic_map); + kfree(master->unique); + kfree(master); +} + +/** + * drm_master_put - unreference and clear a master pointer + * @master: pointer to a pointer of struct &drm_master + * + * This decrements the &drm_master behind @master and sets it to NULL. + */ +void drm_master_put(struct drm_master **master) +{ + kref_put(&(*master)->refcount, drm_master_destroy); + *master = NULL; +} +EXPORT_SYMBOL(drm_master_put); diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index b3654404abd0..255543086590 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -36,7 +36,7 @@ * encoder chain. * * A bridge is always attached to a single &drm_encoder at a time, but can be - * either connected to it directly, or through an intermediate bridge: + * either connected to it directly, or through an intermediate bridge:: * * encoder ---> bridge B ---> bridge A * diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 9b34158c0f77..c3a12cd8bd0d 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -51,7 +51,7 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, */ if (!entry->map || map->type != entry->map->type || - entry->master != dev->primary->master) + entry->master != dev->master) continue; switch (map->type) { case _DRM_SHM: @@ -245,12 +245,12 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, map->offset = (unsigned long)map->handle; if (map->flags & _DRM_CONTAINS_LOCK) { /* Prevent a 2nd X Server from creating a 2nd lock */ - if (dev->primary->master->lock.hw_lock != NULL) { + if (dev->master->lock.hw_lock != NULL) { vfree(map->handle); kfree(map); return -EBUSY; } - dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ + dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */ } break; case _DRM_AGP: { @@ -356,7 +356,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, mutex_unlock(&dev->struct_mutex); if (!(map->flags & _DRM_DRIVER)) - list->master = dev->primary->master; + list->master = dev->master; *maplist = list; return 0; } diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 0e3cc66aa8b7..fd93e9c79d28 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -39,6 +39,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_modeset_lock.h> #include <drm/drm_atomic.h> +#include <drm/drm_auth.h> #include "drm_crtc_internal.h" #include "drm_internal.h" @@ -239,37 +240,6 @@ const char *drm_get_subpixel_order_name(enum subpixel_order order) } EXPORT_SYMBOL(drm_get_subpixel_order_name); -static char printable_char(int c) -{ - return isascii(c) && isprint(c) ? c : '?'; -} - -/** - * drm_get_format_name - return a string for drm fourcc format - * @format: format to compute name of - * - * Note that the buffer used by this function is globally shared and owned by - * the function itself. - * - * FIXME: This isn't really multithreading safe. - */ -const char *drm_get_format_name(uint32_t format) -{ - static char buf[32]; - - snprintf(buf, sizeof(buf), - "%c%c%c%c %s-endian (0x%08x)", - printable_char(format & 0xff), - printable_char((format >> 8) & 0xff), - printable_char((format >> 16) & 0xff), - printable_char((format >> 24) & 0x7f), - format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little", - format); - - return buf; -} -EXPORT_SYMBOL(drm_get_format_name); - /* * Internal function to assign a slot in the object idr and optionally * register the object into the idr. @@ -535,7 +505,7 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private); * * Cleanup framebuffer. This function is intended to be used from the drivers * ->destroy callback. It can also be used to clean up driver private - * framebuffers embedded into a larger structure. + * framebuffers embedded into a larger structure. * * Note that this function does not remove the fb from active usuage - if it is * still used anywhere, hilarity can ensue since userspace could call getfb on @@ -639,6 +609,31 @@ static unsigned int drm_num_crtcs(struct drm_device *dev) return num; } +static int drm_crtc_register_all(struct drm_device *dev) +{ + struct drm_crtc *crtc; + int ret = 0; + + drm_for_each_crtc(crtc, dev) { + if (crtc->funcs->late_register) + ret = crtc->funcs->late_register(crtc); + if (ret) + return ret; + } + + return 0; +} + +static void drm_crtc_unregister_all(struct drm_device *dev) +{ + struct drm_crtc *crtc; + + drm_for_each_crtc(crtc, dev) { + if (crtc->funcs->early_unregister) + crtc->funcs->early_unregister(crtc); + } +} + /** * drm_crtc_init_with_planes - Initialise a new CRTC object with * specified primary and cursor planes. @@ -669,6 +664,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, crtc->dev = dev; crtc->funcs = funcs; + INIT_LIST_HEAD(&crtc->commit_list); + spin_lock_init(&crtc->commit_lock); + drm_modeset_lock_init(&crtc->mutex); ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); if (ret) @@ -692,7 +690,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, crtc->base.properties = &crtc->properties; list_add_tail(&crtc->head, &config->crtc_list); - config->num_crtc++; + crtc->index = config->num_crtc++; crtc->primary = primary; crtc->cursor = cursor; @@ -722,6 +720,11 @@ void drm_crtc_cleanup(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; + /* Note that the crtc_list is considered to be static; should we + * remove the drm_crtc at runtime we would have to decrement all + * the indices on the drm_crtc after us in the crtc_list. + */ + kfree(crtc->gamma_store); crtc->gamma_store = NULL; @@ -741,29 +744,6 @@ void drm_crtc_cleanup(struct drm_crtc *crtc) } EXPORT_SYMBOL(drm_crtc_cleanup); -/** - * drm_crtc_index - find the index of a registered CRTC - * @crtc: CRTC to find index for - * - * Given a registered CRTC, return the index of that CRTC within a DRM - * device's list of CRTCs. - */ -unsigned int drm_crtc_index(struct drm_crtc *crtc) -{ - unsigned int index = 0; - struct drm_crtc *tmp; - - drm_for_each_crtc(tmp, crtc->dev) { - if (tmp == crtc) - return index; - - index++; - } - - BUG(); -} -EXPORT_SYMBOL(drm_crtc_index); - /* * drm_mode_remove - remove and free a mode * @connector: connector list to modify @@ -984,6 +964,12 @@ void drm_connector_cleanup(struct drm_connector *connector) struct drm_device *dev = connector->dev; struct drm_display_mode *mode, *t; + /* The connector should have been removed from userspace long before + * it is finally destroyed. + */ + if (WARN_ON(connector->registered)) + drm_connector_unregister(connector); + if (connector->tile_group) { drm_mode_put_tile_group(dev, connector->tile_group); connector->tile_group = NULL; @@ -1030,19 +1016,34 @@ int drm_connector_register(struct drm_connector *connector) { int ret; + if (connector->registered) + return 0; + ret = drm_sysfs_connector_add(connector); if (ret) return ret; ret = drm_debugfs_connector_add(connector); if (ret) { - drm_sysfs_connector_remove(connector); - return ret; + goto err_sysfs; + } + + if (connector->funcs->late_register) { + ret = connector->funcs->late_register(connector); + if (ret) + goto err_debugfs; } drm_mode_object_register(connector->dev, &connector->base); + connector->registered = true; return 0; + +err_debugfs: + drm_debugfs_connector_remove(connector); +err_sysfs: + drm_sysfs_connector_remove(connector); + return ret; } EXPORT_SYMBOL(drm_connector_register); @@ -1054,8 +1055,16 @@ EXPORT_SYMBOL(drm_connector_register); */ void drm_connector_unregister(struct drm_connector *connector) { + if (!connector->registered) + return; + + if (connector->funcs->early_unregister) + connector->funcs->early_unregister(connector); + drm_sysfs_connector_remove(connector); drm_debugfs_connector_remove(connector); + + connector->registered = false; } EXPORT_SYMBOL(drm_connector_unregister); @@ -1064,9 +1073,9 @@ EXPORT_SYMBOL(drm_connector_unregister); * @dev: drm device * * This function registers all connectors in sysfs and other places so that - * userspace can start to access them. Drivers can call it after calling - * drm_dev_register() to complete the device registration, if they don't call - * drm_connector_register() on each connector individually. + * userspace can start to access them. drm_connector_register_all() is called + * automatically from drm_dev_register() to complete the device registration, + * if they don't call drm_connector_register() on each connector individually. * * When a device is unplugged and should be removed from userspace access, * call drm_connector_unregister_all(), which is the inverse of this @@ -1119,6 +1128,31 @@ void drm_connector_unregister_all(struct drm_device *dev) } EXPORT_SYMBOL(drm_connector_unregister_all); +static int drm_encoder_register_all(struct drm_device *dev) +{ + struct drm_encoder *encoder; + int ret = 0; + + drm_for_each_encoder(encoder, dev) { + if (encoder->funcs->late_register) + ret = encoder->funcs->late_register(encoder); + if (ret) + return ret; + } + + return 0; +} + +static void drm_encoder_unregister_all(struct drm_device *dev) +{ + struct drm_encoder *encoder; + + drm_for_each_encoder(encoder, dev) { + if (encoder->funcs->early_unregister) + encoder->funcs->early_unregister(encoder); + } +} + /** * drm_encoder_init - Init a preallocated encoder * @dev: drm device @@ -1166,7 +1200,7 @@ int drm_encoder_init(struct drm_device *dev, } list_add_tail(&encoder->head, &dev->mode_config.encoder_list); - dev->mode_config.num_encoder++; + encoder->index = dev->mode_config.num_encoder++; out_put: if (ret) @@ -1180,29 +1214,6 @@ out_unlock: EXPORT_SYMBOL(drm_encoder_init); /** - * drm_encoder_index - find the index of a registered encoder - * @encoder: encoder to find index for - * - * Given a registered encoder, return the index of that encoder within a DRM - * device's list of encoders. - */ -unsigned int drm_encoder_index(struct drm_encoder *encoder) -{ - unsigned int index = 0; - struct drm_encoder *tmp; - - drm_for_each_encoder(tmp, encoder->dev) { - if (tmp == encoder) - return index; - - index++; - } - - BUG(); -} -EXPORT_SYMBOL(drm_encoder_index); - -/** * drm_encoder_cleanup - cleans up an initialised encoder * @encoder: encoder to cleanup * @@ -1212,6 +1223,11 @@ void drm_encoder_cleanup(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; + /* Note that the encoder_list is considered to be static; should we + * remove the drm_encoder at runtime we would have to decrement all + * the indices on the drm_encoder after us in the encoder_list. + */ + drm_modeset_lock_all(dev); drm_mode_object_unregister(dev, &encoder->base); kfree(encoder->name); @@ -1300,7 +1316,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, plane->type = type; list_add_tail(&plane->head, &config->plane_list); - config->num_total_plane++; + plane->index = config->num_total_plane++; if (plane->type == DRM_PLANE_TYPE_OVERLAY) config->num_overlay_plane++; @@ -1325,6 +1341,31 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, } EXPORT_SYMBOL(drm_universal_plane_init); +static int drm_plane_register_all(struct drm_device *dev) +{ + struct drm_plane *plane; + int ret = 0; + + drm_for_each_plane(plane, dev) { + if (plane->funcs->late_register) + ret = plane->funcs->late_register(plane); + if (ret) + return ret; + } + + return 0; +} + +static void drm_plane_unregister_all(struct drm_device *dev) +{ + struct drm_plane *plane; + + drm_for_each_plane(plane, dev) { + if (plane->funcs->early_unregister) + plane->funcs->early_unregister(plane); + } +} + /** * drm_plane_init - Initialize a legacy plane * @dev: DRM device @@ -1374,6 +1415,11 @@ void drm_plane_cleanup(struct drm_plane *plane) BUG_ON(list_empty(&plane->head)); + /* Note that the plane_list is considered to be static; should we + * remove the drm_plane at runtime we would have to decrement all + * the indices on the drm_plane after us in the plane_list. + */ + list_del(&plane->head); dev->mode_config.num_total_plane--; if (plane->type == DRM_PLANE_TYPE_OVERLAY) @@ -1391,29 +1437,6 @@ void drm_plane_cleanup(struct drm_plane *plane) EXPORT_SYMBOL(drm_plane_cleanup); /** - * drm_plane_index - find the index of a registered plane - * @plane: plane to find index for - * - * Given a registered plane, return the index of that CRTC within a DRM - * device's list of planes. - */ -unsigned int drm_plane_index(struct drm_plane *plane) -{ - unsigned int index = 0; - struct drm_plane *tmp; - - drm_for_each_plane(tmp, plane->dev) { - if (tmp == plane) - return index; - - index++; - } - - BUG(); -} -EXPORT_SYMBOL(drm_plane_index); - -/** * drm_plane_from_index - find the registered plane at an index * @dev: DRM device * @idx: index of registered plane to find for @@ -1425,13 +1448,11 @@ struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx) { struct drm_plane *plane; - unsigned int i = 0; - drm_for_each_plane(plane, dev) { - if (i == idx) + drm_for_each_plane(plane, dev) + if (idx == plane->index) return plane; - i++; - } + return NULL; } EXPORT_SYMBOL(drm_plane_from_index); @@ -1467,6 +1488,46 @@ void drm_plane_force_disable(struct drm_plane *plane) } EXPORT_SYMBOL(drm_plane_force_disable); +int drm_modeset_register_all(struct drm_device *dev) +{ + int ret; + + ret = drm_plane_register_all(dev); + if (ret) + goto err_plane; + + ret = drm_crtc_register_all(dev); + if (ret) + goto err_crtc; + + ret = drm_encoder_register_all(dev); + if (ret) + goto err_encoder; + + ret = drm_connector_register_all(dev); + if (ret) + goto err_connector; + + return 0; + +err_connector: + drm_encoder_unregister_all(dev); +err_encoder: + drm_crtc_unregister_all(dev); +err_crtc: + drm_plane_unregister_all(dev); +err_plane: + return ret; +} + +void drm_modeset_unregister_all(struct drm_device *dev) +{ + drm_connector_unregister_all(dev); + drm_encoder_unregister_all(dev); + drm_crtc_unregister_all(dev); + drm_plane_unregister_all(dev); +} + static int drm_mode_create_standard_properties(struct drm_device *dev) { struct drm_property *prop; @@ -2975,6 +3036,8 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); return PTR_ERR(fb); } + fb->hot_x = req->hot_x; + fb->hot_y = req->hot_y; } else { fb = NULL; } @@ -3581,7 +3644,7 @@ int drm_mode_getfb(struct drm_device *dev, r->bpp = fb->bits_per_pixel; r->pitch = fb->pitches[0]; if (fb->funcs->create_handle) { - if (file_priv->is_master || capable(CAP_SYS_ADMIN) || + if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) || drm_is_control_client(file_priv)) { ret = fb->funcs->create_handle(fb, file_priv, &r->handle); @@ -3738,6 +3801,13 @@ void drm_fb_release(struct drm_file *priv) } } +static bool drm_property_type_valid(struct drm_property *property) +{ + if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) + return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE); + return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE); +} + /** * drm_property_create - create a new property type * @dev: drm device @@ -5138,6 +5208,9 @@ EXPORT_SYMBOL(drm_mode_connector_attach_encoder); int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, int gamma_size) { + uint16_t *r_base, *g_base, *b_base; + int i; + crtc->gamma_size = gamma_size; crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3, @@ -5147,6 +5220,16 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, return -ENOMEM; } + r_base = crtc->gamma_store; + g_base = r_base + gamma_size; + b_base = g_base + gamma_size; + for (i = 0; i < gamma_size; i++) { + r_base[i] = i << 8; + g_base[i] = i << 8; + b_base[i] = i << 8; + } + + return 0; } EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size); @@ -5214,7 +5297,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev, goto out; } - crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); + ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size); out: drm_modeset_unlock_all(dev); @@ -5544,264 +5627,6 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, } /** - * drm_fb_get_bpp_depth - get the bpp/depth values for format - * @format: pixel format (DRM_FORMAT_*) - * @depth: storage for the depth value - * @bpp: storage for the bpp value - * - * This only supports RGB formats here for compat with code that doesn't use - * pixel formats directly yet. - */ -void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, - int *bpp) -{ - switch (format) { - case DRM_FORMAT_C8: - case DRM_FORMAT_RGB332: - case DRM_FORMAT_BGR233: - *depth = 8; - *bpp = 8; - break; - case DRM_FORMAT_XRGB1555: - case DRM_FORMAT_XBGR1555: - case DRM_FORMAT_RGBX5551: - case DRM_FORMAT_BGRX5551: - case DRM_FORMAT_ARGB1555: - case DRM_FORMAT_ABGR1555: - case DRM_FORMAT_RGBA5551: - case DRM_FORMAT_BGRA5551: - *depth = 15; - *bpp = 16; - break; - case DRM_FORMAT_RGB565: - case DRM_FORMAT_BGR565: - *depth = 16; - *bpp = 16; - break; - case DRM_FORMAT_RGB888: - case DRM_FORMAT_BGR888: - *depth = 24; - *bpp = 24; - break; - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_RGBX8888: - case DRM_FORMAT_BGRX8888: - *depth = 24; - *bpp = 32; - break; - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_RGBX1010102: - case DRM_FORMAT_BGRX1010102: - case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_ABGR2101010: - case DRM_FORMAT_RGBA1010102: - case DRM_FORMAT_BGRA1010102: - *depth = 30; - *bpp = 32; - break; - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_ABGR8888: - case DRM_FORMAT_RGBA8888: - case DRM_FORMAT_BGRA8888: - *depth = 32; - *bpp = 32; - break; - default: - DRM_DEBUG_KMS("unsupported pixel format %s\n", - drm_get_format_name(format)); - *depth = 0; - *bpp = 0; - break; - } -} -EXPORT_SYMBOL(drm_fb_get_bpp_depth); - -/** - * drm_format_num_planes - get the number of planes for format - * @format: pixel format (DRM_FORMAT_*) - * - * Returns: - * The number of planes used by the specified pixel format. - */ -int drm_format_num_planes(uint32_t format) -{ - switch (format) { - case DRM_FORMAT_YUV410: - case DRM_FORMAT_YVU410: - case DRM_FORMAT_YUV411: - case DRM_FORMAT_YVU411: - case DRM_FORMAT_YUV420: - case DRM_FORMAT_YVU420: - case DRM_FORMAT_YUV422: - case DRM_FORMAT_YVU422: - case DRM_FORMAT_YUV444: - case DRM_FORMAT_YVU444: - return 3; - case DRM_FORMAT_NV12: - case DRM_FORMAT_NV21: - case DRM_FORMAT_NV16: - case DRM_FORMAT_NV61: - case DRM_FORMAT_NV24: - case DRM_FORMAT_NV42: - return 2; - default: - return 1; - } -} -EXPORT_SYMBOL(drm_format_num_planes); - -/** - * drm_format_plane_cpp - determine the bytes per pixel value - * @format: pixel format (DRM_FORMAT_*) - * @plane: plane index - * - * Returns: - * The bytes per pixel value for the specified plane. - */ -int drm_format_plane_cpp(uint32_t format, int plane) -{ - unsigned int depth; - int bpp; - - if (plane >= drm_format_num_planes(format)) - return 0; - - switch (format) { - case DRM_FORMAT_YUYV: - case DRM_FORMAT_YVYU: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_VYUY: - return 2; - case DRM_FORMAT_NV12: - case DRM_FORMAT_NV21: - case DRM_FORMAT_NV16: - case DRM_FORMAT_NV61: - case DRM_FORMAT_NV24: - case DRM_FORMAT_NV42: - return plane ? 2 : 1; - case DRM_FORMAT_YUV410: - case DRM_FORMAT_YVU410: - case DRM_FORMAT_YUV411: - case DRM_FORMAT_YVU411: - case DRM_FORMAT_YUV420: - case DRM_FORMAT_YVU420: - case DRM_FORMAT_YUV422: - case DRM_FORMAT_YVU422: - case DRM_FORMAT_YUV444: - case DRM_FORMAT_YVU444: - return 1; - default: - drm_fb_get_bpp_depth(format, &depth, &bpp); - return bpp >> 3; - } -} -EXPORT_SYMBOL(drm_format_plane_cpp); - -/** - * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor - * @format: pixel format (DRM_FORMAT_*) - * - * Returns: - * The horizontal chroma subsampling factor for the - * specified pixel format. - */ -int drm_format_horz_chroma_subsampling(uint32_t format) -{ - switch (format) { - case DRM_FORMAT_YUV411: - case DRM_FORMAT_YVU411: - case DRM_FORMAT_YUV410: - case DRM_FORMAT_YVU410: - return 4; - case DRM_FORMAT_YUYV: - case DRM_FORMAT_YVYU: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_VYUY: - case DRM_FORMAT_NV12: - case DRM_FORMAT_NV21: - case DRM_FORMAT_NV16: - case DRM_FORMAT_NV61: - case DRM_FORMAT_YUV422: - case DRM_FORMAT_YVU422: - case DRM_FORMAT_YUV420: - case DRM_FORMAT_YVU420: - return 2; - default: - return 1; - } -} -EXPORT_SYMBOL(drm_format_horz_chroma_subsampling); - -/** - * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor - * @format: pixel format (DRM_FORMAT_*) - * - * Returns: - * The vertical chroma subsampling factor for the - * specified pixel format. - */ -int drm_format_vert_chroma_subsampling(uint32_t format) -{ - switch (format) { - case DRM_FORMAT_YUV410: - case DRM_FORMAT_YVU410: - return 4; - case DRM_FORMAT_YUV420: - case DRM_FORMAT_YVU420: - case DRM_FORMAT_NV12: - case DRM_FORMAT_NV21: - return 2; - default: - return 1; - } -} -EXPORT_SYMBOL(drm_format_vert_chroma_subsampling); - -/** - * drm_format_plane_width - width of the plane given the first plane - * @width: width of the first plane - * @format: pixel format - * @plane: plane index - * - * Returns: - * The width of @plane, given that the width of the first plane is @width. - */ -int drm_format_plane_width(int width, uint32_t format, int plane) -{ - if (plane >= drm_format_num_planes(format)) - return 0; - - if (plane == 0) - return width; - - return width / drm_format_horz_chroma_subsampling(format); -} -EXPORT_SYMBOL(drm_format_plane_width); - -/** - * drm_format_plane_height - height of the plane given the first plane - * @height: height of the first plane - * @format: pixel format - * @plane: plane index - * - * Returns: - * The height of @plane, given that the height of the first plane is @height. - */ -int drm_format_plane_height(int height, uint32_t format, int plane) -{ - if (plane >= drm_format_num_planes(format)) - return 0; - - if (plane == 0) - return height; - - return height / drm_format_vert_chroma_subsampling(format); -} -EXPORT_SYMBOL(drm_format_plane_height); - -/** * drm_rotation_simplify() - Try to simplify the rotation * @rotation: Rotation to be simplified * @supported_rotations: Supported rotations @@ -6064,3 +5889,48 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, return tg; } EXPORT_SYMBOL(drm_mode_create_tile_group); + +/** + * drm_crtc_enable_color_mgmt - enable color management properties + * @crtc: DRM CRTC + * @degamma_lut_size: the size of the degamma lut (before CSC) + * @has_ctm: whether to attach ctm_property for CSC matrix + * @gamma_lut_size: the size of the gamma lut (after CSC) + * + * This function lets the driver enable the color correction + * properties on a CRTC. This includes 3 degamma, csc and gamma + * properties that userspace can set and 2 size properties to inform + * the userspace of the lut sizes. Each of the properties are + * optional. The gamma and degamma properties are only attached if + * their size is not 0 and ctm_property is only attached if has_ctm is + * true. + */ +void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc, + uint degamma_lut_size, + bool has_ctm, + uint gamma_lut_size) +{ + struct drm_device *dev = crtc->dev; + struct drm_mode_config *config = &dev->mode_config; + + if (degamma_lut_size) { + drm_object_attach_property(&crtc->base, + config->degamma_lut_property, 0); + drm_object_attach_property(&crtc->base, + config->degamma_lut_size_property, + degamma_lut_size); + } + + if (has_ctm) + drm_object_attach_property(&crtc->base, + config->ctm_property, 0); + + if (gamma_lut_size) { + drm_object_attach_property(&crtc->base, + config->gamma_lut_property, 0); + drm_object_attach_property(&crtc->base, + config->gamma_lut_size_property, + gamma_lut_size); + } +} +EXPORT_SYMBOL(drm_crtc_enable_color_mgmt); diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index a6e42433ef0e..d61591274ff6 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -232,6 +232,9 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev) */ void drm_helper_disable_unused_functions(struct drm_device *dev) { + if (drm_core_check_feature(dev, DRIVER_ATOMIC)) + DRM_ERROR("Called for atomic driver, this is not what you want.\n"); + drm_modeset_lock_all(dev); __drm_helper_disable_unused_functions(dev); drm_modeset_unlock_all(dev); @@ -1121,36 +1124,3 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, return drm_plane_helper_commit(plane, plane_state, old_fb); } EXPORT_SYMBOL(drm_helper_crtc_mode_set_base); - -/** - * drm_helper_crtc_enable_color_mgmt - enable color management properties - * @crtc: DRM CRTC - * @degamma_lut_size: the size of the degamma lut (before CSC) - * @gamma_lut_size: the size of the gamma lut (after CSC) - * - * This function lets the driver enable the color correction properties on a - * CRTC. This includes 3 degamma, csc and gamma properties that userspace can - * set and 2 size properties to inform the userspace of the lut sizes. - */ -void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc, - int degamma_lut_size, - int gamma_lut_size) -{ - struct drm_device *dev = crtc->dev; - struct drm_mode_config *config = &dev->mode_config; - - drm_object_attach_property(&crtc->base, - config->degamma_lut_property, 0); - drm_object_attach_property(&crtc->base, - config->ctm_property, 0); - drm_object_attach_property(&crtc->base, - config->gamma_lut_property, 0); - - drm_object_attach_property(&crtc->base, - config->degamma_lut_size_property, - degamma_lut_size); - drm_object_attach_property(&crtc->base, - config->gamma_lut_size_property, - gamma_lut_size); -} -EXPORT_SYMBOL(drm_helper_crtc_enable_color_mgmt); diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index a78c138282ea..47a500b90fd7 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -31,14 +31,100 @@ * and are not exported to drivers. */ + +/* drm_crtc.c */ +void drm_connector_ida_init(void); +void drm_connector_ida_destroy(void); int drm_mode_object_get(struct drm_device *dev, struct drm_mode_object *obj, uint32_t obj_type); void drm_mode_object_unregister(struct drm_device *dev, struct drm_mode_object *object); +bool drm_property_change_valid_get(struct drm_property *property, + uint64_t value, + struct drm_mode_object **ref); +void drm_property_change_valid_put(struct drm_property *property, + struct drm_mode_object *ref); + +int drm_plane_check_pixel_format(const struct drm_plane *plane, + u32 format); +int drm_crtc_check_viewport(const struct drm_crtc *crtc, + int x, int y, + const struct drm_display_mode *mode, + const struct drm_framebuffer *fb); + +void drm_fb_release(struct drm_file *file_priv); +void drm_property_destroy_user_blobs(struct drm_device *dev, + struct drm_file *file_priv); + +/* dumb buffer support IOCTLs */ +int drm_mode_create_dumb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +/* framebuffer IOCTLs */ +extern int drm_mode_addfb(struct drm_device *dev, + void *data, struct drm_file *file_priv); +extern int drm_mode_addfb2(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_rmfb(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_getfb(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_dirtyfb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +/* IOCTLs */ +int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +int drm_mode_getresources(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_getplane_res(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_mode_getcrtc(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_getconnector(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_setcrtc(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_getplane(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_setplane(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_cursor_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_cursor2_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_getproperty_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_getblob_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_createblob_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_destroyblob_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_connector_property_set_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_getencoder(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_gamma_get_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_gamma_set_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +int drm_mode_page_flip_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); /* drm_atomic.c */ int drm_atomic_get_property(struct drm_mode_object *obj, - struct drm_property *property, uint64_t *val); + struct drm_property *property, uint64_t *val); int drm_mode_atomic_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int drm_modeset_register_all(struct drm_device *dev); +void drm_modeset_unregister_all(struct drm_device *dev); diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 3bcf8e6a85b3..fa10cef2ba37 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -46,11 +46,8 @@ static const struct drm_info_list drm_debugfs_list[] = { {"name", drm_name_info, 0}, - {"vm", drm_vm_info, 0}, {"clients", drm_clients_info, 0}, - {"bufs", drm_bufs_info, 0}, {"gem_names", drm_gem_name_info, DRIVER_GEM}, - {"vma", drm_vma_info, 0}, }; #define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list) diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index eeaf5a7c3aa7..091053e995e5 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -708,8 +708,6 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, memset(&msg, 0, sizeof(msg)); - mutex_lock(&aux->hw_mutex); - for (i = 0; i < num; i++) { msg.address = msgs[i].addr; drm_dp_i2c_msg_set_request(&msg, &msgs[i]); @@ -764,8 +762,6 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, msg.size = 0; (void)drm_dp_i2c_do_msg(aux, &msg); - mutex_unlock(&aux->hw_mutex); - return err; } @@ -774,22 +770,64 @@ static const struct i2c_algorithm drm_dp_i2c_algo = { .master_xfer = drm_dp_i2c_xfer, }; +static struct drm_dp_aux *i2c_to_aux(struct i2c_adapter *i2c) +{ + return container_of(i2c, struct drm_dp_aux, ddc); +} + +static void lock_bus(struct i2c_adapter *i2c, unsigned int flags) +{ + mutex_lock(&i2c_to_aux(i2c)->hw_mutex); +} + +static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags) +{ + return mutex_trylock(&i2c_to_aux(i2c)->hw_mutex); +} + +static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags) +{ + mutex_unlock(&i2c_to_aux(i2c)->hw_mutex); +} + /** - * drm_dp_aux_register() - initialise and register aux channel + * drm_dp_aux_init() - minimally initialise an aux channel * @aux: DisplayPort AUX channel * - * Returns 0 on success or a negative error code on failure. + * If you need to use the drm_dp_aux's i2c adapter prior to registering it + * with the outside world, call drm_dp_aux_init() first. You must still + * call drm_dp_aux_register() once the connector has been registered to + * allow userspace access to the auxiliary DP channel. */ -int drm_dp_aux_register(struct drm_dp_aux *aux) +void drm_dp_aux_init(struct drm_dp_aux *aux) { - int ret; - mutex_init(&aux->hw_mutex); aux->ddc.algo = &drm_dp_i2c_algo; aux->ddc.algo_data = aux; aux->ddc.retries = 3; + aux->ddc.lock_bus = lock_bus; + aux->ddc.trylock_bus = trylock_bus; + aux->ddc.unlock_bus = unlock_bus; +} +EXPORT_SYMBOL(drm_dp_aux_init); + +/** + * drm_dp_aux_register() - initialise and register aux channel + * @aux: DisplayPort AUX channel + * + * Automatically calls drm_dp_aux_init() if this hasn't been done yet. + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_aux_register(struct drm_dp_aux *aux) +{ + int ret; + + if (!aux->ddc.algo) + drm_dp_aux_init(aux); + aux->ddc.class = I2C_CLASS_DDC; aux->ddc.owner = THIS_MODULE; aux->ddc.dev.parent = aux->dev; diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index bff89226a344..aead9ffcbe29 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -34,8 +34,10 @@ #include <linux/slab.h> #include <drm/drmP.h> #include <drm/drm_core.h> +#include "drm_crtc_internal.h" #include "drm_legacy.h" #include "drm_internal.h" +#include "drm_crtc_internal.h" /* * drm_debug: Enable debug output. @@ -93,114 +95,6 @@ void drm_ut_debug_printk(const char *function_name, const char *format, ...) } EXPORT_SYMBOL(drm_ut_debug_printk); -struct drm_master *drm_master_create(struct drm_minor *minor) -{ - struct drm_master *master; - - master = kzalloc(sizeof(*master), GFP_KERNEL); - if (!master) - return NULL; - - kref_init(&master->refcount); - spin_lock_init(&master->lock.spinlock); - init_waitqueue_head(&master->lock.lock_queue); - idr_init(&master->magic_map); - master->minor = minor; - - return master; -} - -struct drm_master *drm_master_get(struct drm_master *master) -{ - kref_get(&master->refcount); - return master; -} -EXPORT_SYMBOL(drm_master_get); - -static void drm_master_destroy(struct kref *kref) -{ - struct drm_master *master = container_of(kref, struct drm_master, refcount); - struct drm_device *dev = master->minor->dev; - - if (dev->driver->master_destroy) - dev->driver->master_destroy(dev, master); - - drm_legacy_master_rmmaps(dev, master); - - idr_destroy(&master->magic_map); - kfree(master->unique); - kfree(master); -} - -void drm_master_put(struct drm_master **master) -{ - kref_put(&(*master)->refcount, drm_master_destroy); - *master = NULL; -} -EXPORT_SYMBOL(drm_master_put); - -int drm_setmaster_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - int ret = 0; - - mutex_lock(&dev->master_mutex); - if (file_priv->is_master) - goto out_unlock; - - if (file_priv->minor->master) { - ret = -EINVAL; - goto out_unlock; - } - - if (!file_priv->master) { - ret = -EINVAL; - goto out_unlock; - } - - if (!file_priv->allowed_master) { - ret = drm_new_set_master(dev, file_priv); - goto out_unlock; - } - - file_priv->minor->master = drm_master_get(file_priv->master); - file_priv->is_master = 1; - if (dev->driver->master_set) { - ret = dev->driver->master_set(dev, file_priv, false); - if (unlikely(ret != 0)) { - file_priv->is_master = 0; - drm_master_put(&file_priv->minor->master); - } - } - -out_unlock: - mutex_unlock(&dev->master_mutex); - return ret; -} - -int drm_dropmaster_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - int ret = -EINVAL; - - mutex_lock(&dev->master_mutex); - if (!file_priv->is_master) - goto out_unlock; - - if (!file_priv->minor->master) - goto out_unlock; - - ret = 0; - if (dev->driver->master_drop) - dev->driver->master_drop(dev, file_priv, false); - drm_master_put(&file_priv->minor->master); - file_priv->is_master = 0; - -out_unlock: - mutex_unlock(&dev->master_mutex); - return ret; -} - /* * DRM Minors * A DRM device can provide several char-dev interfaces on the DRM-Major. Each @@ -405,10 +299,9 @@ void drm_minor_release(struct drm_minor *minor) * callbacks implemented by the driver. The driver then needs to initialize all * the various subsystems for the drm device like memory management, vblank * handling, modesetting support and intial output configuration plus obviously - * initialize all the corresponding hardware bits. An important part of this is - * also calling drm_dev_set_unique() to set the userspace-visible unique name of - * this device instance. Finally when everything is up and running and ready for - * userspace the device instance can be published using drm_dev_register(). + * initialize all the corresponding hardware bits. Finally when everything is up + * and running and ready for userspace the device instance can be published + * using drm_dev_register(). * * There is also deprecated support for initalizing device instances using * bus-specific helpers and the ->load() callback. But due to @@ -430,6 +323,14 @@ void drm_minor_release(struct drm_minor *minor) * dev_priv field of &drm_device. */ +static int drm_dev_set_unique(struct drm_device *dev, const char *name) +{ + kfree(dev->unique); + dev->unique = kstrdup(name, GFP_KERNEL); + + return dev->unique ? 0 : -ENOMEM; +} + /** * drm_put_dev - Unregister and release a DRM device * @dev: DRM device @@ -549,11 +450,12 @@ static void drm_fs_inode_free(struct inode *inode) } /** - * drm_dev_alloc - Allocate new DRM device - * @driver: DRM driver to allocate device for + * drm_dev_init - Initialise new DRM device + * @dev: DRM device + * @driver: DRM driver * @parent: Parent device object * - * Allocate and initialize a new DRM device. No device registration is done. + * Initialize a new DRM device. No device registration is done. * Call drm_dev_register() to advertice the device to user space and register it * with other core subsystems. This should be done last in the device * initialization sequence to make sure userspace can't access an inconsistent @@ -564,19 +466,18 @@ static void drm_fs_inode_free(struct inode *inode) * * Note that for purely virtual devices @parent can be NULL. * + * Drivers that do not want to allocate their own device struct + * embedding struct &drm_device can call drm_dev_alloc() instead. + * * RETURNS: - * Pointer to new DRM device, or NULL if out of memory. + * 0 on success, or error code on failure. */ -struct drm_device *drm_dev_alloc(struct drm_driver *driver, - struct device *parent) +int drm_dev_init(struct drm_device *dev, + struct drm_driver *driver, + struct device *parent) { - struct drm_device *dev; int ret; - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return NULL; - kref_init(&dev->ref); dev->dev = parent; dev->driver = driver; @@ -605,8 +506,6 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL); if (ret) goto err_minors; - - WARN_ON(driver->suspend || driver->resume); } if (drm_core_check_feature(dev, DRIVER_RENDER)) { @@ -619,7 +518,8 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, if (ret) goto err_minors; - if (drm_ht_create(&dev->map_hash, 12)) + ret = drm_ht_create(&dev->map_hash, 12); + if (ret) goto err_minors; drm_legacy_ctxbitmap_init(dev); @@ -632,13 +532,13 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, } } - if (parent) { - ret = drm_dev_set_unique(dev, dev_name(parent)); - if (ret) - goto err_setunique; - } + /* Use the parent device name as DRM device unique identifier, but fall + * back to the driver name for virtual devices like vgem. */ + ret = drm_dev_set_unique(dev, parent ? dev_name(parent) : driver->name); + if (ret) + goto err_setunique; - return dev; + return 0; err_setunique: if (drm_core_check_feature(dev, DRIVER_GEM)) @@ -653,8 +553,49 @@ err_minors: drm_fs_inode_free(dev->anon_inode); err_free: mutex_destroy(&dev->master_mutex); - kfree(dev); - return NULL; + return ret; +} +EXPORT_SYMBOL(drm_dev_init); + +/** + * drm_dev_alloc - Allocate new DRM device + * @driver: DRM driver to allocate device for + * @parent: Parent device object + * + * Allocate and initialize a new DRM device. No device registration is done. + * Call drm_dev_register() to advertice the device to user space and register it + * with other core subsystems. This should be done last in the device + * initialization sequence to make sure userspace can't access an inconsistent + * state. + * + * The initial ref-count of the object is 1. Use drm_dev_ref() and + * drm_dev_unref() to take and drop further ref-counts. + * + * Note that for purely virtual devices @parent can be NULL. + * + * Drivers that wish to subclass or embed struct &drm_device into their + * own struct should look at using drm_dev_init() instead. + * + * RETURNS: + * Pointer to new DRM device, or NULL if out of memory. + */ +struct drm_device *drm_dev_alloc(struct drm_driver *driver, + struct device *parent) +{ + struct drm_device *dev; + int ret; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return NULL; + + ret = drm_dev_init(dev, driver, parent); + if (ret) { + kfree(dev); + return NULL; + } + + return dev; } EXPORT_SYMBOL(drm_dev_alloc); @@ -718,11 +659,7 @@ EXPORT_SYMBOL(drm_dev_unref); * * Register the DRM device @dev with the system, advertise device to user-space * and start normal device operation. @dev must be allocated via drm_dev_alloc() - * previously. Right after drm_dev_register() the driver should call - * drm_connector_register_all() to register all connectors in sysfs. This is - * a separate call for backward compatibility with drivers still using - * the deprecated ->load() callback, where connectors are registered from within - * the ->load() callback. + * previously. * * Never call this twice on any device! * @@ -759,6 +696,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) goto err_minors; } + if (drm_core_check_feature(dev, DRIVER_MODESET)) + drm_modeset_register_all(dev); + ret = 0; goto out_unlock; @@ -789,6 +729,9 @@ void drm_dev_unregister(struct drm_device *dev) drm_lastclose(dev); + if (drm_core_check_feature(dev, DRIVER_MODESET)) + drm_modeset_unregister_all(dev); + if (dev->driver->unload) dev->driver->unload(dev); @@ -806,26 +749,6 @@ void drm_dev_unregister(struct drm_device *dev) } EXPORT_SYMBOL(drm_dev_unregister); -/** - * drm_dev_set_unique - Set the unique name of a DRM device - * @dev: device of which to set the unique name - * @name: unique name - * - * Sets the unique name of a DRM device using the specified string. Drivers - * can use this at driver probe time if the unique name of the devices they - * drive is static. - * - * Return: 0 on success or a negative error code on failure. - */ -int drm_dev_set_unique(struct drm_device *dev, const char *name) -{ - kfree(dev->unique); - dev->unique = kstrdup(name, GFP_KERNEL); - - return dev->unique ? 0 : -ENOMEM; -} -EXPORT_SYMBOL(drm_dev_set_unique); - /* * DRM Core * The DRM core module initializes all global DRM objects and makes them diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index 9a401aed98e0..622f788bff46 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c @@ -271,7 +271,7 @@ int drm_load_edid_firmware(struct drm_connector *connector) * by commas, search through the list looking for one that * matches the connector. * - * If there's one or more that don't't specify a connector, keep + * If there's one or more that doesn't specify a connector, keep * the last one found one as a fallback. */ fwstr = kstrdup(edid_firmware, GFP_KERNEL); diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index 5075fae3c4e2..c0b0c718994a 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -23,6 +23,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_fb_cma_helper.h> +#include <linux/dma-mapping.h> #include <linux/module.h> #define DEFAULT_FBDEFIO_DELAY_MS 50 @@ -52,7 +53,7 @@ struct drm_fbdev_cma { * will be set up automatically. dirty() is called by * drm_fb_helper_deferred_io() in process context (struct delayed_work). * - * Example fbdev deferred io code: + * Example fbdev deferred io code:: * * static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb, * struct drm_file *file_priv, @@ -162,6 +163,10 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev, * drm_fb_cma_create_with_funcs() - helper function for the * &drm_mode_config_funcs ->fb_create * callback function + * @dev: DRM device + * @file_priv: drm file for the ioctl call + * @mode_cmd: metadata from the userspace fb creation request + * @funcs: vtable to be used for the new framebuffer object * * This can be used to set &drm_framebuffer_funcs for drivers that need the * dirty() callback. Use drm_fb_cma_create() if you don't need to change @@ -223,6 +228,9 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs); /** * drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function + * @dev: DRM device + * @file_priv: drm file for the ioctl call + * @mode_cmd: metadata from the userspace fb creation request * * If your hardware has special alignment or pitch requirements these should be * checked before calling this function. Use drm_fb_cma_create_with_funcs() if @@ -246,7 +254,7 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create); * This function will usually be called from the CRTC callback functions. */ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, - unsigned int plane) + unsigned int plane) { struct drm_fb_cma *fb_cma = to_fb_cma(fb); @@ -258,10 +266,6 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj); #ifdef CONFIG_DEBUG_FS -/* - * drm_fb_cma_describe() - Helper to dump information about a single - * CMA framebuffer object - */ static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m) { struct drm_fb_cma *fb_cma = to_fb_cma(fb); @@ -279,7 +283,9 @@ static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m) /** * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects - * in debugfs. + * in debugfs. + * @m: output file + * @arg: private data for the callback */ int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg) { @@ -297,6 +303,12 @@ int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg) EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show); #endif +static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + return dma_mmap_writecombine(info->device, vma, info->screen_base, + info->fix.smem_start, info->fix.smem_len); +} + static struct fb_ops drm_fbdev_cma_ops = { .owner = THIS_MODULE, .fb_fillrect = drm_fb_helper_sys_fillrect, @@ -307,6 +319,7 @@ static struct fb_ops drm_fbdev_cma_ops = { .fb_blank = drm_fb_helper_blank, .fb_pan_display = drm_fb_helper_pan_display, .fb_setcmap = drm_fb_helper_setcmap, + .fb_mmap = drm_fb_cma_mmap, }; static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info, @@ -333,6 +346,7 @@ static int drm_fbdev_cma_defio_init(struct fb_info *fbi, fbops = kzalloc(sizeof(*fbops), GFP_KERNEL); if (!fbdefio || !fbops) { kfree(fbdefio); + kfree(fbops); return -ENOMEM; } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 7c2eb75db60f..ce54e985d91b 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -227,7 +227,7 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc) g_base = r_base + crtc->gamma_size; b_base = g_base + crtc->gamma_size; - crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); + crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size); } /** @@ -385,7 +385,7 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper) drm_warn_on_modeset_not_all_locked(dev); - if (fb_helper->atomic) + if (dev->mode_config.funcs->atomic_commit) return restore_fbdev_mode_atomic(fb_helper); drm_for_each_plane(plane, dev) { @@ -464,7 +464,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper) /* Sometimes user space wants everything disabled, so don't steal the * display if there's a master. */ - if (dev->primary->master) + if (lockless_dereference(dev->master)) return false; drm_for_each_crtc(crtc, dev) { @@ -716,8 +716,6 @@ int drm_fb_helper_init(struct drm_device *dev, i++; } - fb_helper->atomic = !!drm_core_check_feature(dev, DRIVER_ATOMIC); - return 0; out_free: drm_fb_helper_crtc_free(fb_helper); @@ -1042,7 +1040,6 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, { struct drm_fb_helper *fb_helper = info->par; struct drm_framebuffer *fb = fb_helper->fb; - int pindex; if (info->fix.visual == FB_VISUAL_TRUECOLOR) { u32 *palette; @@ -1074,38 +1071,10 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, !fb_helper->funcs->gamma_get)) return -EINVAL; - pindex = regno; - - if (fb->bits_per_pixel == 16) { - pindex = regno << 3; - - if (fb->depth == 16 && regno > 63) - return -EINVAL; - if (fb->depth == 15 && regno > 31) - return -EINVAL; - - if (fb->depth == 16) { - u16 r, g, b; - int i; - if (regno < 32) { - for (i = 0; i < 8; i++) - fb_helper->funcs->gamma_set(crtc, red, - green, blue, pindex + i); - } + WARN_ON(fb->bits_per_pixel != 8); - fb_helper->funcs->gamma_get(crtc, &r, - &g, &b, - pindex >> 1); + fb_helper->funcs->gamma_set(crtc, red, green, blue, regno); - for (i = 0; i < 4; i++) - fb_helper->funcs->gamma_set(crtc, r, - green, b, - (pindex >> 1) + i); - } - } - - if (fb->depth != 16) - fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex); return 0; } @@ -1373,7 +1342,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, return -EBUSY; } - if (fb_helper->atomic) { + if (dev->mode_config.funcs->atomic_commit) { ret = pan_display_atomic(var, info); goto unlock; } @@ -2000,7 +1969,18 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, my_score++; connector_funcs = connector->helper_private; - encoder = connector_funcs->best_encoder(connector); + + /* + * If the DRM device implements atomic hooks and ->best_encoder() is + * NULL we fallback to the default drm_atomic_helper_best_encoder() + * helper. + */ + if (fb_helper->dev->mode_config.funcs->atomic_commit && + !connector_funcs->best_encoder) + encoder = drm_atomic_helper_best_encoder(connector); + else + encoder = connector_funcs->best_encoder(connector); + if (!encoder) goto out; diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 7af7f8bcb355..323c238fcac7 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -40,6 +40,7 @@ #include <linux/module.h> #include "drm_legacy.h" #include "drm_internal.h" +#include "drm_crtc_internal.h" /* from BKL pushdown */ DEFINE_MUTEX(drm_global_mutex); @@ -67,7 +68,7 @@ DEFINE_MUTEX(drm_global_mutex); * specific implementations. For GEM-based drivers this is drm_gem_mmap(). * * No other file operations are supported by the DRM userspace API. Overall the - * following is an example #file_operations structure: + * following is an example #file_operations structure:: * * static const example_drm_fops = { * .owner = THIS_MODULE, @@ -168,60 +169,6 @@ static int drm_cpu_valid(void) } /* - * drm_new_set_master - Allocate a new master object and become master for the - * associated master realm. - * - * @dev: The associated device. - * @fpriv: File private identifying the client. - * - * This function must be called with dev::struct_mutex held. - * Returns negative error code on failure. Zero on success. - */ -int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) -{ - struct drm_master *old_master; - int ret; - - lockdep_assert_held_once(&dev->master_mutex); - - /* create a new master */ - fpriv->minor->master = drm_master_create(fpriv->minor); - if (!fpriv->minor->master) - return -ENOMEM; - - /* take another reference for the copy in the local file priv */ - old_master = fpriv->master; - fpriv->master = drm_master_get(fpriv->minor->master); - - if (dev->driver->master_create) { - ret = dev->driver->master_create(dev, fpriv->master); - if (ret) - goto out_err; - } - if (dev->driver->master_set) { - ret = dev->driver->master_set(dev, fpriv, true); - if (ret) - goto out_err; - } - - fpriv->is_master = 1; - fpriv->allowed_master = 1; - fpriv->authenticated = 1; - if (old_master) - drm_master_put(&old_master); - - return 0; - -out_err: - /* drop both references and restore old master on failure */ - drm_master_put(&fpriv->minor->master); - drm_master_put(&fpriv->master); - fpriv->master = old_master; - - return ret; -} - -/* * Called whenever a process opens /dev/drm. * * \param filp file pointer. @@ -283,19 +230,11 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) goto out_prime_destroy; } - /* if there is no current master make this fd it, but do not create - * any master object for render clients */ - mutex_lock(&dev->master_mutex); - if (drm_is_primary_client(priv) && !priv->minor->master) { - /* create a new master */ - ret = drm_new_set_master(dev, priv); + if (drm_is_primary_client(priv)) { + ret = drm_master_open(priv); if (ret) goto out_close; - } else if (drm_is_primary_client(priv)) { - /* get a reference to the master */ - priv->master = drm_master_get(priv->minor->master); } - mutex_unlock(&dev->master_mutex); mutex_lock(&dev->filelist_mutex); list_add(&priv->lhead, &dev->filelist); @@ -324,7 +263,6 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) return 0; out_close: - mutex_unlock(&dev->master_mutex); if (dev->driver->postclose) dev->driver->postclose(dev, priv); out_prime_destroy: @@ -338,18 +276,6 @@ out_prime_destroy: return ret; } -static void drm_master_release(struct drm_device *dev, struct file *filp) -{ - struct drm_file *file_priv = filp->private_data; - - if (drm_legacy_i_have_hw_lock(dev, file_priv)) { - DRM_DEBUG("File %p released, freeing lock for context %d\n", - filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); - drm_legacy_lock_free(&file_priv->master->lock, - _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); - } -} - static void drm_events_release(struct drm_file *file_priv) { struct drm_device *dev = file_priv->minor->dev; @@ -368,7 +294,7 @@ static void drm_events_release(struct drm_file *file_priv) /* Remove unconsumed events */ list_for_each_entry_safe(e, et, &file_priv->event_list, link) { list_del(&e->link); - e->destroy(e); + kfree(e); } spin_unlock_irqrestore(&dev->event_lock, flags); @@ -451,11 +377,6 @@ int drm_release(struct inode *inode, struct file *filp) list_del(&file_priv->lhead); mutex_unlock(&dev->filelist_mutex); - mutex_lock(&dev->struct_mutex); - if (file_priv->magic) - idr_remove(&file_priv->master->magic_map, file_priv->magic); - mutex_unlock(&dev->struct_mutex); - if (dev->driver->preclose) dev->driver->preclose(dev, file_priv); @@ -468,9 +389,8 @@ int drm_release(struct inode *inode, struct file *filp) (long)old_encode_dev(file_priv->minor->kdev->devt), dev->open_count); - /* if the master has gone away we can't do anything with the lock */ - if (file_priv->minor->master) - drm_master_release(dev, filp); + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + drm_legacy_lock_release(dev, filp); if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) drm_legacy_reclaim_buffers(dev, file_priv); @@ -487,43 +407,12 @@ int drm_release(struct inode *inode, struct file *filp) drm_legacy_ctxbitmap_flush(dev, file_priv); - mutex_lock(&dev->master_mutex); - - if (file_priv->is_master) { - struct drm_master *master = file_priv->master; - - /* - * Since the master is disappearing, so is the - * possibility to lock. - */ - mutex_lock(&dev->struct_mutex); - if (master->lock.hw_lock) { - if (dev->sigdata.lock == master->lock.hw_lock) - dev->sigdata.lock = NULL; - master->lock.hw_lock = NULL; - master->lock.file_priv = NULL; - wake_up_interruptible_all(&master->lock.lock_queue); - } - mutex_unlock(&dev->struct_mutex); - - if (file_priv->minor->master == file_priv->master) { - /* drop the reference held my the minor */ - if (dev->driver->master_drop) - dev->driver->master_drop(dev, file_priv, true); - drm_master_put(&file_priv->minor->master); - } - } - - /* drop the master reference held by the file priv */ - if (file_priv->master) - drm_master_put(&file_priv->master); - file_priv->is_master = 0; - mutex_unlock(&dev->master_mutex); + if (drm_is_primary_client(file_priv)) + drm_master_release(file_priv); if (dev->driver->postclose) dev->driver->postclose(dev, file_priv); - if (drm_core_check_feature(dev, DRIVER_PRIME)) drm_prime_destroy_file_private(&file_priv->prime); @@ -636,7 +525,7 @@ put_back_event: } ret += length; - e->destroy(e); + kfree(e); } } mutex_unlock(&file_priv->event_read_lock); @@ -713,9 +602,6 @@ int drm_event_reserve_init_locked(struct drm_device *dev, list_add(&p->pending_link, &file_priv->pending_event_list); p->file_priv = file_priv; - /* we *could* pass this in as arg, but everyone uses kfree: */ - p->destroy = (void (*) (struct drm_pending_event *)) kfree; - return 0; } EXPORT_SYMBOL(drm_event_reserve_init_locked); @@ -778,7 +664,7 @@ void drm_event_cancel_free(struct drm_device *dev, list_del(&p->pending_link); } spin_unlock_irqrestore(&dev->event_lock, flags); - p->destroy(p); + kfree(p); } EXPORT_SYMBOL(drm_event_cancel_free); @@ -800,8 +686,19 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) { assert_spin_locked(&dev->event_lock); + if (e->completion) { + /* ->completion might disappear as soon as it signalled. */ + complete_all(e->completion); + e->completion = NULL; + } + + if (e->fence) { + fence_signal(e->fence); + fence_put(e->fence); + } + if (!e->file_priv) { - e->destroy(e); + kfree(e); return; } diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c new file mode 100644 index 000000000000..0645c85d5f95 --- /dev/null +++ b/drivers/gpu/drm/drm_fourcc.c @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com> + * + * DRM core format related functions + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include <linux/bug.h> +#include <linux/ctype.h> +#include <linux/export.h> +#include <linux/kernel.h> + +#include <drm/drmP.h> +#include <drm/drm_fourcc.h> + +static char printable_char(int c) +{ + return isascii(c) && isprint(c) ? c : '?'; +} + +/** + * drm_get_format_name - return a string for drm fourcc format + * @format: format to compute name of + * + * Note that the buffer used by this function is globally shared and owned by + * the function itself. + * + * FIXME: This isn't really multithreading safe. + */ +const char *drm_get_format_name(uint32_t format) +{ + static char buf[32]; + + snprintf(buf, sizeof(buf), + "%c%c%c%c %s-endian (0x%08x)", + printable_char(format & 0xff), + printable_char((format >> 8) & 0xff), + printable_char((format >> 16) & 0xff), + printable_char((format >> 24) & 0x7f), + format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little", + format); + + return buf; +} +EXPORT_SYMBOL(drm_get_format_name); + +/** + * drm_fb_get_bpp_depth - get the bpp/depth values for format + * @format: pixel format (DRM_FORMAT_*) + * @depth: storage for the depth value + * @bpp: storage for the bpp value + * + * This only supports RGB formats here for compat with code that doesn't use + * pixel formats directly yet. + */ +void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, + int *bpp) +{ + switch (format) { + case DRM_FORMAT_C8: + case DRM_FORMAT_RGB332: + case DRM_FORMAT_BGR233: + *depth = 8; + *bpp = 8; + break; + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + *depth = 15; + *bpp = 16; + break; + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + *depth = 16; + *bpp = 16; + break; + case DRM_FORMAT_RGB888: + case DRM_FORMAT_BGR888: + *depth = 24; + *bpp = 24; + break; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + *depth = 24; + *bpp = 32; + break; + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_RGBX1010102: + case DRM_FORMAT_BGRX1010102: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + *depth = 30; + *bpp = 32; + break; + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + *depth = 32; + *bpp = 32; + break; + default: + DRM_DEBUG_KMS("unsupported pixel format %s\n", + drm_get_format_name(format)); + *depth = 0; + *bpp = 0; + break; + } +} +EXPORT_SYMBOL(drm_fb_get_bpp_depth); + +/** + * drm_format_num_planes - get the number of planes for format + * @format: pixel format (DRM_FORMAT_*) + * + * Returns: + * The number of planes used by the specified pixel format. + */ +int drm_format_num_planes(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV444: + case DRM_FORMAT_YVU444: + return 3; + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: + return 2; + default: + return 1; + } +} +EXPORT_SYMBOL(drm_format_num_planes); + +/** + * drm_format_plane_cpp - determine the bytes per pixel value + * @format: pixel format (DRM_FORMAT_*) + * @plane: plane index + * + * Returns: + * The bytes per pixel value for the specified plane. + */ +int drm_format_plane_cpp(uint32_t format, int plane) +{ + unsigned int depth; + int bpp; + + if (plane >= drm_format_num_planes(format)) + return 0; + + switch (format) { + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + return 2; + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: + return plane ? 2 : 1; + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV444: + case DRM_FORMAT_YVU444: + return 1; + default: + drm_fb_get_bpp_depth(format, &depth, &bpp); + return bpp >> 3; + } +} +EXPORT_SYMBOL(drm_format_plane_cpp); + +/** + * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor + * @format: pixel format (DRM_FORMAT_*) + * + * Returns: + * The horizontal chroma subsampling factor for the + * specified pixel format. + */ +int drm_format_horz_chroma_subsampling(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + return 4; + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + return 2; + default: + return 1; + } +} +EXPORT_SYMBOL(drm_format_horz_chroma_subsampling); + +/** + * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor + * @format: pixel format (DRM_FORMAT_*) + * + * Returns: + * The vertical chroma subsampling factor for the + * specified pixel format. + */ +int drm_format_vert_chroma_subsampling(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + return 4; + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + return 2; + default: + return 1; + } +} +EXPORT_SYMBOL(drm_format_vert_chroma_subsampling); + +/** + * drm_format_plane_width - width of the plane given the first plane + * @width: width of the first plane + * @format: pixel format + * @plane: plane index + * + * Returns: + * The width of @plane, given that the width of the first plane is @width. + */ +int drm_format_plane_width(int width, uint32_t format, int plane) +{ + if (plane >= drm_format_num_planes(format)) + return 0; + + if (plane == 0) + return width; + + return width / drm_format_horz_chroma_subsampling(format); +} +EXPORT_SYMBOL(drm_format_plane_width); + +/** + * drm_format_plane_height - height of the plane given the first plane + * @height: height of the first plane + * @format: pixel format + * @plane: plane index + * + * Returns: + * The height of @plane, given that the height of the first plane is @height. + */ +int drm_format_plane_height(int height, uint32_t format, int plane) +{ + if (plane >= drm_format_num_planes(format)) + return 0; + + if (plane == 0) + return height; + + return height / drm_format_vert_chroma_subsampling(format); +} +EXPORT_SYMBOL(drm_format_plane_height); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 32156060b9c9..5c19dde1cd31 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -787,7 +787,7 @@ EXPORT_SYMBOL(drm_gem_object_release); * @kref: kref of the object to free * * Called after the last reference to the object has been lost. - * Must be called holding struct_ mutex + * Must be called holding &drm_device->struct_mutex. * * Frees the object */ diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 5d469b2f26f4..9ae353f4dd06 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -50,106 +50,24 @@ int drm_name_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_minor *minor = node->minor; struct drm_device *dev = minor->dev; - struct drm_master *master = minor->master; - if (!master) - return 0; - - if (master->unique) { - seq_printf(m, "%s %s %s\n", - dev->driver->name, - dev_name(dev->dev), master->unique); - } else { - seq_printf(m, "%s %s\n", - dev->driver->name, dev_name(dev->dev)); - } - return 0; -} - -/** - * Called when "/proc/dri/.../vm" is read. - * - * Prints information about all mappings in drm_device::maplist. - */ -int drm_vm_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct drm_local_map *map; - struct drm_map_list *r_list; - - /* Hardcoded from _DRM_FRAME_BUFFER, - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */ - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" }; - const char *type; - int i; - - mutex_lock(&dev->struct_mutex); - seq_printf(m, "slot offset size type flags address mtrr\n\n"); - i = 0; - list_for_each_entry(r_list, &dev->maplist, head) { - map = r_list->map; - if (!map) - continue; - if (map->type < 0 || map->type > 5) - type = "??"; - else - type = types[map->type]; - - seq_printf(m, "%4d 0x%016llx 0x%08lx %4.4s 0x%02x 0x%08lx ", - i, - (unsigned long long)map->offset, - map->size, type, map->flags, - (unsigned long) r_list->user_token); - if (map->mtrr < 0) - seq_printf(m, "none\n"); - else - seq_printf(m, "%4d\n", map->mtrr); - i++; - } - mutex_unlock(&dev->struct_mutex); - return 0; -} + struct drm_master *master; -/** - * Called when "/proc/dri/.../bufs" is read. - */ -int drm_bufs_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct drm_device_dma *dma; - int i, seg_pages; - - mutex_lock(&dev->struct_mutex); - dma = dev->dma; - if (!dma) { - mutex_unlock(&dev->struct_mutex); - return 0; - } - - seq_printf(m, " o size count free segs pages kB\n\n"); - for (i = 0; i <= DRM_MAX_ORDER; i++) { - if (dma->bufs[i].buf_count) { - seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order); - seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n", - i, - dma->bufs[i].buf_size, - dma->bufs[i].buf_count, - 0, - dma->bufs[i].seg_count, - seg_pages, - seg_pages * PAGE_SIZE / 1024); - } - } - seq_printf(m, "\n"); - for (i = 0; i < dma->buf_count; i++) { - if (i && !(i % 32)) - seq_printf(m, "\n"); - seq_printf(m, " %d", dma->buflist[i]->list); - } + mutex_lock(&dev->master_mutex); + master = dev->master; + if (!master) + goto out_unlock; + + seq_printf(m, "%s", dev->driver->name); + if (dev->dev) + seq_printf(m, " dev=%s", dev_name(dev->dev)); + if (master && master->unique) + seq_printf(m, " master=%s", master->unique); + if (dev->unique) + seq_printf(m, " unique=%s", dev->unique); seq_printf(m, "\n"); - mutex_unlock(&dev->struct_mutex); +out_unlock: + mutex_unlock(&dev->master_mutex); + return 0; } @@ -184,7 +102,7 @@ int drm_clients_info(struct seq_file *m, void *data) task ? task->comm : "<unknown>", pid_vnr(priv->pid), priv->minor->index, - priv->is_master ? 'y' : 'n', + drm_is_current_master(priv) ? 'y' : 'n', priv->authenticated ? 'y' : 'n', from_kuid_munged(seq_user_ns(m), priv->uid), priv->magic); @@ -194,7 +112,6 @@ int drm_clients_info(struct seq_file *m, void *data) return 0; } - static int drm_gem_one_name_info(int id, void *ptr, void *data) { struct drm_gem_object *obj = ptr; diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 902cf6a15212..b86dc9b921a5 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -29,15 +29,9 @@ extern struct mutex drm_global_mutex; void drm_lastclose(struct drm_device *dev); /* drm_pci.c */ -int drm_pci_set_unique(struct drm_device *dev, - struct drm_master *master, - struct drm_unique *u); int drm_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv); -/* drm_vm.c */ -int drm_vma_info(struct seq_file *m, void *data); - /* drm_prime.c */ int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -51,8 +45,6 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr /* drm_info.c */ int drm_name_info(struct seq_file *m, void *data); -int drm_vm_info(struct seq_file *m, void *data); -int drm_bufs_info(struct seq_file *m, void *data); int drm_clients_info(struct seq_file *m, void* data); int drm_gem_name_info(struct seq_file *m, void *data); @@ -67,6 +59,12 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv); +int drm_setmaster_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_dropmaster_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_master_open(struct drm_file *file_priv); +void drm_master_release(struct drm_file *file_priv); /* drm_sysfs.c */ extern struct class *drm_class; @@ -92,13 +90,6 @@ int drm_gem_open_ioctl(struct drm_device *dev, void *data, void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); -/* drm_drv.c */ -int drm_setmaster_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_dropmaster_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -struct drm_master *drm_master_create(struct drm_minor *minor); - /* drm_debugfs.c */ #if defined(CONFIG_DEBUG_FS) int drm_debugfs_init(struct drm_minor *minor, int minor_id, diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index b7a39771c152..1f84ff5f1bf8 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -30,6 +30,7 @@ #include <drm/drmP.h> #include <drm/drm_core.h> +#include <drm/drm_auth.h> #include "drm_legacy.h" #include "drm_internal.h" #include "drm_crtc_internal.h" @@ -37,6 +38,64 @@ #include <linux/pci.h> #include <linux/export.h> +/** + * DOC: getunique and setversion story + * + * BEWARE THE DRAGONS! MIND THE TRAPDOORS! + * + * In an attempt to warn anyone else who's trying to figure out what's going + * on here, I'll try to summarize the story. First things first, let's clear up + * the names, because the kernel internals, libdrm and the ioctls are all named + * differently: + * + * - GET_UNIQUE ioctl, implemented by drm_getunique is wrapped up in libdrm + * through the drmGetBusid function. + * - The libdrm drmSetBusid function is backed by the SET_UNIQUE ioctl. All + * that code is nerved in the kernel with drm_invalid_op(). + * - The internal set_busid kernel functions and driver callbacks are + * exclusively use by the SET_VERSION ioctl, because only drm 1.0 (which is + * nerved) allowed userspace to set the busid through the above ioctl. + * - Other ioctls and functions involved are named consistently. + * + * For anyone wondering what's the difference between drm 1.1 and 1.4: Correctly + * handling pci domains in the busid on ppc. Doing this correctly was only + * implemented in libdrm in 2010, hence can't be nerved yet. No one knows what's + * special with drm 1.2 and 1.3. + * + * Now the actual horror story of how device lookup in drm works. At large, + * there's 2 different ways, either by busid, or by device driver name. + * + * Opening by busid is fairly simple: + * + * 1. First call SET_VERSION to make sure pci domains are handled properly. As a + * side-effect this fills out the unique name in the master structure. + * 2. Call GET_UNIQUE to read out the unique name from the master structure, + * which matches the busid thanks to step 1. If it doesn't, proceed to try + * the next device node. + * + * Opening by name is slightly different: + * + * 1. Directly call VERSION to get the version and to match against the driver + * name returned by that ioctl. Note that SET_VERSION is not called, which + * means the the unique name for the master node just opening is _not_ filled + * out. This despite that with current drm device nodes are always bound to + * one device, and can't be runtime assigned like with drm 1.0. + * 2. Match driver name. If it mismatches, proceed to the next device node. + * 3. Call GET_UNIQUE, and check whether the unique name has length zero (by + * checking that the first byte in the string is 0). If that's not the case + * libdrm skips and proceeds to the next device node. Probably this is just + * copypasta from drm 1.0 times where a set unique name meant that the driver + * was in use already, but that's just conjecture. + * + * Long story short: To keep the open by name logic working, GET_UNIQUE must + * _not_ return a unique string when SET_VERSION hasn't been called yet, + * otherwise libdrm breaks. Even when that unique string can't ever change, and + * is totally irrelevant for actually opening the device because runtime + * assignable device instances were only support in drm 1.0, which is long dead. + * But the libdrm code in drmOpenByName somehow survived, hence this can't be + * broken. + */ + static int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -75,51 +134,6 @@ drm_unset_busid(struct drm_device *dev, master->unique_len = 0; } -/* - * Set the bus id. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument, pointing to a drm_unique structure. - * \return zero on success or a negative number on failure. - * - * Copies the bus id from userspace into drm_device::unique, and verifies that - * it matches the device this DRM is attached to (EINVAL otherwise). Deprecated - * in interface version 1.1 and will return EBUSY when setversion has requested - * version 1.1 or greater. Also note that KMS is all version 1.1 and later and - * UMS was only ever supported on pci devices. - */ -static int drm_setunique(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_unique *u = data; - struct drm_master *master = file_priv->master; - int ret; - - if (master->unique_len || master->unique) - return -EBUSY; - - if (!u->unique_len || u->unique_len > 1024) - return -EINVAL; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return 0; - - if (WARN_ON(!dev->pdev)) - return -EINVAL; - - ret = drm_pci_set_unique(dev, master, u); - if (ret) - goto err; - - return 0; - -err: - drm_unset_busid(dev, master); - return ret; -} - static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) { struct drm_master *master = file_priv->master; @@ -135,12 +149,7 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) return ret; } } else { - if (WARN(dev->unique == NULL, - "No drm_driver.set_busid() implementation provided by " - "%ps. Use drm_dev_set_unique() to set the unique " - "name explicitly.", dev->driver)) - return -EINVAL; - + WARN_ON(!dev->unique); master->unique = kstrdup(dev->unique, GFP_KERNEL); if (master->unique) master->unique_len = strlen(dev->unique); @@ -473,7 +482,8 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) return -EACCES; /* MASTER is only for master or control clients */ - if (unlikely((flags & DRM_MASTER) && !file_priv->is_master && + if (unlikely((flags & DRM_MASTER) && + !drm_is_current_master(file_priv) && !drm_is_control_client(file_priv))) return -EACCES; @@ -504,7 +514,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), - DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), @@ -513,10 +523,10 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0), DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_UNLOCKED|DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH), @@ -524,8 +534,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 0fac801c18fe..8ca3d2bf2bda 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -42,10 +42,6 @@ #include <linux/vgaarb.h> #include <linux/export.h> -/* Access macro for slots in vblank timestamp ringbuffer. */ -#define vblanktimestamp(dev, pipe, count) \ - ((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE]) - /* Retry timestamp calculation up to 3 times to satisfy * drm_timestamp_precision before giving up. */ @@ -82,36 +78,18 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe, struct timeval *t_vblank, u32 last) { struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; - u32 tslot; assert_spin_locked(&dev->vblank_time_lock); vblank->last = last; - /* All writers hold the spinlock, but readers are serialized by - * the latching of vblank->count below. - */ - tslot = vblank->count + vblank_count_inc; - vblanktimestamp(dev, pipe, tslot) = *t_vblank; - - /* - * vblank timestamp updates are protected on the write side with - * vblank_time_lock, but on the read side done locklessly using a - * sequence-lock on the vblank counter. Ensure correct ordering using - * memory barrriers. We need the barrier both before and also after the - * counter update to synchronize with the next timestamp write. - * The read-side barriers for this are in drm_vblank_count_and_time. - */ - smp_wmb(); + write_seqlock(&vblank->seqlock); + vblank->time = *t_vblank; vblank->count += vblank_count_inc; - smp_wmb(); + write_sequnlock(&vblank->seqlock); } -/** - * drm_reset_vblank_timestamp - reset the last timestamp to the last vblank - * @dev: DRM device - * @pipe: index of CRTC for which to reset the timestamp - * +/* * Reset the stored timestamp for the current vblank count to correspond * to the last vblank occurred. * @@ -155,11 +133,7 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe spin_unlock(&dev->vblank_time_lock); } -/** - * drm_update_vblank_count - update the master vblank counter - * @dev: DRM device - * @pipe: counter to update - * +/* * Call back into the driver to update the appropriate vblank counter * (specified by @pipe). Deal with wraparound, if it occurred, and * update the last read value so we can deal with wraparound on the next @@ -205,7 +179,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, const struct timeval *t_old; u64 diff_ns; - t_old = &vblanktimestamp(dev, pipe, vblank->count); + t_old = &vblank->time; diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old); /* @@ -239,49 +213,6 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, diff = 1; } - /* - * FIMXE: Need to replace this hack with proper seqlocks. - * - * Restrict the bump of the software vblank counter to a safe maximum - * value of +1 whenever there is the possibility that concurrent readers - * of vblank timestamps could be active at the moment, as the current - * implementation of the timestamp caching and updating is not safe - * against concurrent readers for calls to store_vblank() with a bump - * of anything but +1. A bump != 1 would very likely return corrupted - * timestamps to userspace, because the same slot in the cache could - * be concurrently written by store_vblank() and read by one of those - * readers without the read-retry logic detecting the collision. - * - * Concurrent readers can exist when we are called from the - * drm_vblank_off() or drm_vblank_on() functions and other non-vblank- - * irq callers. However, all those calls to us are happening with the - * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount - * can't increase while we are executing. Therefore a zero refcount at - * this point is safe for arbitrary counter bumps if we are called - * outside vblank irq, a non-zero count is not 100% safe. Unfortunately - * we must also accept a refcount of 1, as whenever we are called from - * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and - * we must let that one pass through in order to not lose vblank counts - * during vblank irq off - which would completely defeat the whole - * point of this routine. - * - * Whenever we are called from vblank irq, we have to assume concurrent - * readers exist or can show up any time during our execution, even if - * the refcount is currently zero, as vblank irqs are usually only - * enabled due to the presence of readers, and because when we are called - * from vblank irq we can't hold the vbl_lock to protect us from sudden - * bumps in vblank refcount. Therefore also restrict bumps to +1 when - * called from vblank irq. - */ - if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 || - (flags & DRM_CALLED_FROM_VBLIRQ))) { - DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u " - "refcount %u, vblirq %u\n", pipe, diff, - atomic_read(&vblank->refcount), - (flags & DRM_CALLED_FROM_VBLIRQ) != 0); - diff = 1; - } - DRM_DEBUG_VBL("updating vblank count on crtc %u:" " current=%u, diff=%u, hw=%u hw_last=%u\n", pipe, vblank->count, diff, cur_vblank, vblank->last); @@ -303,6 +234,37 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, store_vblank(dev, pipe, diff, &t_vblank, cur_vblank); } +/** + * drm_accurate_vblank_count - retrieve the master vblank counter + * @crtc: which counter to retrieve + * + * This function is similar to @drm_crtc_vblank_count but this + * function interpolates to handle a race with vblank irq's. + * + * This is mostly useful for hardware that can obtain the scanout + * position, but doesn't have a frame counter. + */ +u32 drm_accurate_vblank_count(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + unsigned int pipe = drm_crtc_index(crtc); + u32 vblank; + unsigned long flags; + + WARN(!dev->driver->get_vblank_timestamp, + "This function requires support for accurate vblank timestamps."); + + spin_lock_irqsave(&dev->vblank_time_lock, flags); + + drm_update_vblank_count(dev, pipe, 0); + vblank = drm_vblank_count(dev, pipe); + + spin_unlock_irqrestore(&dev->vblank_time_lock, flags); + + return vblank; +} +EXPORT_SYMBOL(drm_accurate_vblank_count); + /* * Disable vblank irq's on crtc, make sure that last vblank count * of hardware and corresponding consistent software vblank counter @@ -417,6 +379,7 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs) init_waitqueue_head(&vblank->queue); setup_timer(&vblank->disable_timer, vblank_disable_fn, (unsigned long)vblank); + seqlock_init(&vblank->seqlock); } DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n"); @@ -986,25 +949,19 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, struct timeval *vblanktime) { struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; - int count = DRM_TIMESTAMP_MAXRETRIES; - u32 cur_vblank; + u32 vblank_count; + unsigned int seq; if (WARN_ON(pipe >= dev->num_crtcs)) return 0; - /* - * Vblank timestamps are read lockless. To ensure consistency the vblank - * counter is rechecked and ordering is ensured using memory barriers. - * This works like a seqlock. The write-side barriers are in store_vblank. - */ do { - cur_vblank = vblank->count; - smp_rmb(); - *vblanktime = vblanktimestamp(dev, pipe, cur_vblank); - smp_rmb(); - } while (cur_vblank != vblank->count && --count > 0); + seq = read_seqbegin(&vblank->seqlock); + vblank_count = vblank->count; + *vblanktime = vblank->time; + } while (read_seqretry(&vblank->seqlock, seq)); - return cur_vblank; + return vblank_count; } EXPORT_SYMBOL(drm_vblank_count_and_time); @@ -1037,39 +994,11 @@ static void send_vblank_event(struct drm_device *dev, e->event.tv_sec = now->tv_sec; e->event.tv_usec = now->tv_usec; - drm_send_event_locked(dev, &e->base); - trace_drm_vblank_event_delivered(e->base.pid, e->pipe, e->event.sequence); -} -/** - * drm_arm_vblank_event - arm vblank event after pageflip - * @dev: DRM device - * @pipe: CRTC index - * @e: the event to prepare to send - * - * A lot of drivers need to generate vblank events for the very next vblank - * interrupt. For example when the page flip interrupt happens when the page - * flip gets armed, but not when it actually executes within the next vblank - * period. This helper function implements exactly the required vblank arming - * behaviour. - * - * Caller must hold event lock. Caller must also hold a vblank reference for - * the event @e, which will be dropped when the next vblank arrives. - * - * This is the legacy version of drm_crtc_arm_vblank_event(). - */ -void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe, - struct drm_pending_vblank_event *e) -{ - assert_spin_locked(&dev->event_lock); - - e->pipe = pipe; - e->event.sequence = drm_vblank_count(dev, pipe); - list_add_tail(&e->base.link, &dev->vblank_event_list); + drm_send_event_locked(dev, &e->base); } -EXPORT_SYMBOL(drm_arm_vblank_event); /** * drm_crtc_arm_vblank_event - arm vblank event after pageflip @@ -1084,32 +1013,35 @@ EXPORT_SYMBOL(drm_arm_vblank_event); * * Caller must hold event lock. Caller must also hold a vblank reference for * the event @e, which will be dropped when the next vblank arrives. - * - * This is the native KMS version of drm_arm_vblank_event(). */ void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, struct drm_pending_vblank_event *e) { - drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e); + struct drm_device *dev = crtc->dev; + unsigned int pipe = drm_crtc_index(crtc); + + assert_spin_locked(&dev->event_lock); + + e->pipe = pipe; + e->event.sequence = drm_vblank_count(dev, pipe); + list_add_tail(&e->base.link, &dev->vblank_event_list); } EXPORT_SYMBOL(drm_crtc_arm_vblank_event); /** - * drm_send_vblank_event - helper to send vblank event after pageflip - * @dev: DRM device - * @pipe: CRTC index + * drm_crtc_send_vblank_event - helper to send vblank event after pageflip + * @crtc: the source CRTC of the vblank event * @e: the event to send * * Updates sequence # and timestamp on event, and sends it to userspace. * Caller must hold event lock. - * - * This is the legacy version of drm_crtc_send_vblank_event(). */ -void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, - struct drm_pending_vblank_event *e) +void drm_crtc_send_vblank_event(struct drm_crtc *crtc, + struct drm_pending_vblank_event *e) { + struct drm_device *dev = crtc->dev; + unsigned int seq, pipe = drm_crtc_index(crtc); struct timeval now; - unsigned int seq; if (dev->num_crtcs > 0) { seq = drm_vblank_count_and_time(dev, pipe, &now); @@ -1121,23 +1053,6 @@ void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, e->pipe = pipe; send_vblank_event(dev, e, seq, &now); } -EXPORT_SYMBOL(drm_send_vblank_event); - -/** - * drm_crtc_send_vblank_event - helper to send vblank event after pageflip - * @crtc: the source CRTC of the vblank event - * @e: the event to send - * - * Updates sequence # and timestamp on event, and sends it to userspace. - * Caller must hold event lock. - * - * This is the native KMS version of drm_send_vblank_event(). - */ -void drm_crtc_send_vblank_event(struct drm_crtc *crtc, - struct drm_pending_vblank_event *e) -{ - drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e); -} EXPORT_SYMBOL(drm_crtc_send_vblank_event); /** @@ -1193,7 +1108,7 @@ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe) * Returns: * Zero on success or a negative error code on failure. */ -int drm_vblank_get(struct drm_device *dev, unsigned int pipe) +static int drm_vblank_get(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; unsigned long irqflags; @@ -1219,7 +1134,6 @@ int drm_vblank_get(struct drm_device *dev, unsigned int pipe) return ret; } -EXPORT_SYMBOL(drm_vblank_get); /** * drm_crtc_vblank_get - get a reference count on vblank events @@ -1228,8 +1142,6 @@ EXPORT_SYMBOL(drm_vblank_get); * Acquire a reference count on vblank events to avoid having them disabled * while in use. * - * This is the native kms version of drm_vblank_get(). - * * Returns: * Zero on success or a negative error code on failure. */ @@ -1249,7 +1161,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_get); * * This is the legacy version of drm_crtc_vblank_put(). */ -void drm_vblank_put(struct drm_device *dev, unsigned int pipe) +static void drm_vblank_put(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; @@ -1270,7 +1182,6 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe) jiffies + ((drm_vblank_offdelay * HZ)/1000)); } } -EXPORT_SYMBOL(drm_vblank_put); /** * drm_crtc_vblank_put - give up ownership of vblank events @@ -1278,8 +1189,6 @@ EXPORT_SYMBOL(drm_vblank_put); * * Release ownership of a given vblank counter, turning off interrupts * if possible. Disable interrupts after drm_vblank_offdelay milliseconds. - * - * This is the native kms version of drm_vblank_put(). */ void drm_crtc_vblank_put(struct drm_crtc *crtc) { diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h index d3b6ee357a2b..c6f422e879dd 100644 --- a/drivers/gpu/drm/drm_legacy.h +++ b/drivers/gpu/drm/drm_legacy.h @@ -88,14 +88,10 @@ struct drm_agp_mem { struct list_head head; }; -/* - * Generic Userspace Locking-API - */ - -int drm_legacy_i_have_hw_lock(struct drm_device *d, struct drm_file *f); +/* drm_lock.c */ int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f); int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_lock_free(struct drm_lock_data *lock, unsigned int ctx); +void drm_legacy_lock_release(struct drm_device *dev, struct file *filp); /* DMA support */ int drm_legacy_dma_setup(struct drm_device *dev); diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index daa2ff12101b..48ac0ebbd663 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -41,6 +41,110 @@ static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); /** + * Take the heavyweight lock. + * + * \param lock lock pointer. + * \param context locking context. + * \return one if the lock is held, or zero otherwise. + * + * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. + */ +static +int drm_lock_take(struct drm_lock_data *lock_data, + unsigned int context) +{ + unsigned int old, new, prev; + volatile unsigned int *lock = &lock_data->hw_lock->lock; + + spin_lock_bh(&lock_data->spinlock); + do { + old = *lock; + if (old & _DRM_LOCK_HELD) + new = old | _DRM_LOCK_CONT; + else { + new = context | _DRM_LOCK_HELD | + ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? + _DRM_LOCK_CONT : 0); + } + prev = cmpxchg(lock, old, new); + } while (prev != old); + spin_unlock_bh(&lock_data->spinlock); + + if (_DRM_LOCKING_CONTEXT(old) == context) { + if (old & _DRM_LOCK_HELD) { + if (context != DRM_KERNEL_CONTEXT) { + DRM_ERROR("%d holds heavyweight lock\n", + context); + } + return 0; + } + } + + if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) { + /* Have lock */ + return 1; + } + return 0; +} + +/** + * This takes a lock forcibly and hands it to context. Should ONLY be used + * inside *_unlock to give lock to kernel before calling *_dma_schedule. + * + * \param dev DRM device. + * \param lock lock pointer. + * \param context locking context. + * \return always one. + * + * Resets the lock file pointer. + * Marks the lock as held by the given context, via the \p cmpxchg instruction. + */ +static int drm_lock_transfer(struct drm_lock_data *lock_data, + unsigned int context) +{ + unsigned int old, new, prev; + volatile unsigned int *lock = &lock_data->hw_lock->lock; + + lock_data->file_priv = NULL; + do { + old = *lock; + new = context | _DRM_LOCK_HELD; + prev = cmpxchg(lock, old, new); + } while (prev != old); + return 1; +} + +static int drm_legacy_lock_free(struct drm_lock_data *lock_data, + unsigned int context) +{ + unsigned int old, new, prev; + volatile unsigned int *lock = &lock_data->hw_lock->lock; + + spin_lock_bh(&lock_data->spinlock); + if (lock_data->kernel_waiters != 0) { + drm_lock_transfer(lock_data, 0); + lock_data->idle_has_lock = 1; + spin_unlock_bh(&lock_data->spinlock); + return 1; + } + spin_unlock_bh(&lock_data->spinlock); + + do { + old = *lock; + new = _DRM_LOCKING_CONTEXT(old); + prev = cmpxchg(lock, old, new); + } while (prev != old); + + if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { + DRM_ERROR("%d freed heavyweight lock held by %d\n", + context, _DRM_LOCKING_CONTEXT(old)); + return 1; + } + wake_up_interruptible(&lock_data->lock_queue); + return 0; +} + +/** * Lock ioctl. * * \param inode device inode. @@ -115,7 +219,7 @@ int drm_legacy_lock(struct drm_device *dev, void *data, /* don't set the block all signals on the master process for now * really probably not the correct answer but lets us debug xkb * xserver for now */ - if (!file_priv->is_master) { + if (!drm_is_current_master(file_priv)) { dev->sigdata.context = lock->context; dev->sigdata.lock = master->lock.hw_lock; } @@ -165,120 +269,6 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_ } /** - * Take the heavyweight lock. - * - * \param lock lock pointer. - * \param context locking context. - * \return one if the lock is held, or zero otherwise. - * - * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. - */ -static -int drm_lock_take(struct drm_lock_data *lock_data, - unsigned int context) -{ - unsigned int old, new, prev; - volatile unsigned int *lock = &lock_data->hw_lock->lock; - - spin_lock_bh(&lock_data->spinlock); - do { - old = *lock; - if (old & _DRM_LOCK_HELD) - new = old | _DRM_LOCK_CONT; - else { - new = context | _DRM_LOCK_HELD | - ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? - _DRM_LOCK_CONT : 0); - } - prev = cmpxchg(lock, old, new); - } while (prev != old); - spin_unlock_bh(&lock_data->spinlock); - - if (_DRM_LOCKING_CONTEXT(old) == context) { - if (old & _DRM_LOCK_HELD) { - if (context != DRM_KERNEL_CONTEXT) { - DRM_ERROR("%d holds heavyweight lock\n", - context); - } - return 0; - } - } - - if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) { - /* Have lock */ - return 1; - } - return 0; -} - -/** - * This takes a lock forcibly and hands it to context. Should ONLY be used - * inside *_unlock to give lock to kernel before calling *_dma_schedule. - * - * \param dev DRM device. - * \param lock lock pointer. - * \param context locking context. - * \return always one. - * - * Resets the lock file pointer. - * Marks the lock as held by the given context, via the \p cmpxchg instruction. - */ -static int drm_lock_transfer(struct drm_lock_data *lock_data, - unsigned int context) -{ - unsigned int old, new, prev; - volatile unsigned int *lock = &lock_data->hw_lock->lock; - - lock_data->file_priv = NULL; - do { - old = *lock; - new = context | _DRM_LOCK_HELD; - prev = cmpxchg(lock, old, new); - } while (prev != old); - return 1; -} - -/** - * Free lock. - * - * \param dev DRM device. - * \param lock lock. - * \param context context. - * - * Resets the lock file pointer. - * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task - * waiting on the lock queue. - */ -int drm_legacy_lock_free(struct drm_lock_data *lock_data, unsigned int context) -{ - unsigned int old, new, prev; - volatile unsigned int *lock = &lock_data->hw_lock->lock; - - spin_lock_bh(&lock_data->spinlock); - if (lock_data->kernel_waiters != 0) { - drm_lock_transfer(lock_data, 0); - lock_data->idle_has_lock = 1; - spin_unlock_bh(&lock_data->spinlock); - return 1; - } - spin_unlock_bh(&lock_data->spinlock); - - do { - old = *lock; - new = _DRM_LOCKING_CONTEXT(old); - prev = cmpxchg(lock, old, new); - } while (prev != old); - - if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { - DRM_ERROR("%d freed heavyweight lock held by %d\n", - context, _DRM_LOCKING_CONTEXT(old)); - return 1; - } - wake_up_interruptible(&lock_data->lock_queue); - return 0; -} - -/** * This function returns immediately and takes the hw lock * with the kernel context if it is free, otherwise it gets the highest priority when and if * it is eventually released. @@ -330,11 +320,27 @@ void drm_legacy_idlelock_release(struct drm_lock_data *lock_data) } EXPORT_SYMBOL(drm_legacy_idlelock_release); -int drm_legacy_i_have_hw_lock(struct drm_device *dev, - struct drm_file *file_priv) +static int drm_legacy_i_have_hw_lock(struct drm_device *dev, + struct drm_file *file_priv) { struct drm_master *master = file_priv->master; return (file_priv->lock_count && master->lock.hw_lock && _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) && master->lock.file_priv == file_priv); } + +void drm_legacy_lock_release(struct drm_device *dev, struct file *filp) +{ + struct drm_file *file_priv = filp->private_data; + + /* if the master has gone away we can't do anything with the lock */ + if (!dev->master) + return; + + if (drm_legacy_i_have_hw_lock(dev, file_priv)) { + DRM_DEBUG("File %p released, freeing lock for context %d\n", + filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); + drm_legacy_lock_free(&file_priv->master->lock, + _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); + } +} diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index f5d80839a90c..49311fc61d5d 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -60,6 +60,21 @@ static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv) return 0; } +static int mipi_dsi_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); + int err; + + err = of_device_uevent_modalias(dev, env); + if (err != -ENODEV) + return err; + + add_uevent_var(env, "MODALIAS=%s%s", MIPI_DSI_MODULE_PREFIX, + dsi->name); + + return 0; +} + static const struct dev_pm_ops mipi_dsi_device_pm_ops = { .runtime_suspend = pm_generic_runtime_suspend, .runtime_resume = pm_generic_runtime_resume, @@ -74,6 +89,7 @@ static const struct dev_pm_ops mipi_dsi_device_pm_ops = { static struct bus_type mipi_dsi_bus_type = { .name = "mipi-dsi", .match = mipi_dsi_device_match, + .uevent = mipi_dsi_uevent, .pm = &mipi_dsi_device_pm_ops, }; @@ -983,6 +999,28 @@ int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi, EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on); /** + * mipi_dsi_set_tear_scanline() - turn on the display module's Tearing Effect + * output signal on the TE signal line when display module reaches line N + * defined by STS[n:0]. + * @dsi: DSI peripheral device + * @param: STS[10:0] + * Return: 0 on success or a negative error code on failure + */ +int mipi_dsi_set_tear_scanline(struct mipi_dsi_device *dsi, u16 param) +{ + u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, param >> 8, + param & 0xff }; + ssize_t err; + + err = mipi_dsi_generic_write(dsi, payload, sizeof(payload)); + if (err < 0) + return err; + + return 0; +} +EXPORT_SYMBOL(mipi_dsi_set_tear_scanline); + +/** * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image * data used by the interface * @dsi: DSI peripheral device diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 04de6fd88f8c..cb39f45d6a16 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -179,12 +179,14 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) { struct drm_mm_node *hole; - u64 end = node->start + node->size; + u64 end; u64 hole_start; u64 hole_end; BUG_ON(node == NULL); + end = node->start + node->size; + /* Find the relevant hole to add our node to */ drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { if (hole_start > node->start || hole_end < end) diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index e5e6f504d8cc..fc5040ae5f25 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -544,6 +544,7 @@ EXPORT_SYMBOL(drm_gtf_mode_complex); * * This function is to create the modeline based on the GTF algorithm. * Generalized Timing Formula is derived from: + * * GTF Spreadsheet by Andy Morrish (1/5/97) * available at http://www.vesa.org * @@ -552,7 +553,8 @@ EXPORT_SYMBOL(drm_gtf_mode_complex); * I also refer to the function of fb_get_mode in the file of * drivers/video/fbmon.c * - * Standard GTF parameters: + * Standard GTF parameters:: + * * M = 600 * C = 40 * K = 128 diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index e3a4adf03e7b..61146f5b4f56 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -30,14 +30,14 @@ * * As KMS moves toward more fine grained locking, and atomic ioctl where * userspace can indirectly control locking order, it becomes necessary - * to use ww_mutex and acquire-contexts to avoid deadlocks. But because + * to use &ww_mutex and acquire-contexts to avoid deadlocks. But because * the locking is more distributed around the driver code, we want a bit * of extra utility/tracking out of our acquire-ctx. This is provided * by drm_modeset_lock / drm_modeset_acquire_ctx. * - * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt + * For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.txt * - * The basic usage pattern is to: + * The basic usage pattern is to:: * * drm_modeset_acquire_init(&ctx) * retry: @@ -51,6 +51,13 @@ * ... do stuff ... * drm_modeset_drop_locks(&ctx); * drm_modeset_acquire_fini(&ctx); + * + * On top of of these per-object locks using &ww_mutex there's also an overall + * dev->mode_config.lock, for protecting everything else. Mostly this means + * probe state of connectors, and preventing hotplug add/removal of connectors. + * + * Finally there's a bunch of dedicated locks to protect drm core internal + * lists and lookup data structures. */ /** diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 29d5a548d07a..b2f8f1062d5f 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -144,50 +144,6 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master) } EXPORT_SYMBOL(drm_pci_set_busid); -int drm_pci_set_unique(struct drm_device *dev, - struct drm_master *master, - struct drm_unique *u) -{ - int domain, bus, slot, func, ret; - - master->unique_len = u->unique_len; - master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL); - if (!master->unique) { - ret = -ENOMEM; - goto err; - } - - if (copy_from_user(master->unique, u->unique, master->unique_len)) { - ret = -EFAULT; - goto err; - } - - master->unique[master->unique_len] = '\0'; - - /* Return error if the busid submitted doesn't match the device's actual - * busid. - */ - ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func); - if (ret != 3) { - ret = -EINVAL; - goto err; - } - - domain = bus >> 8; - bus &= 0xff; - - if ((domain != drm_get_pci_domain(dev)) || - (bus != dev->pdev->bus->number) || - (slot != PCI_SLOT(dev->pdev->devfn)) || - (func != PCI_FUNC(dev->pdev->devfn))) { - ret = -EINVAL; - goto err; - } - return 0; -err: - return ret; -} - static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) { if ((p->busnum >> 8) != drm_get_pci_domain(dev) || @@ -444,13 +400,6 @@ int drm_irq_by_busid(struct drm_device *dev, void *data, { return -EINVAL; } - -int drm_pci_set_unique(struct drm_device *dev, - struct drm_master *master, - struct drm_unique *u) -{ - return -EINVAL; -} #endif EXPORT_SYMBOL(drm_pci_init); diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 369d2898ff9e..16c4a7bd7465 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -115,6 +115,7 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc, * @src: source coordinates in 16.16 fixed point * @dest: integer destination coordinates * @clip: integer clipping coordinates + * @rotation: plane rotation * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point * @can_position: is it legal to position the plane such that it @@ -134,16 +135,17 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc, * Zero if update appears valid, error code on failure */ int drm_plane_helper_check_update(struct drm_plane *plane, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_rect *src, - struct drm_rect *dest, - const struct drm_rect *clip, - int min_scale, - int max_scale, - bool can_position, - bool can_update_disabled, - bool *visible) + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_rect *src, + struct drm_rect *dest, + const struct drm_rect *clip, + unsigned int rotation, + int min_scale, + int max_scale, + bool can_position, + bool can_update_disabled, + bool *visible) { int hscale, vscale; @@ -163,6 +165,8 @@ int drm_plane_helper_check_update(struct drm_plane *plane, return -EINVAL; } + drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation); + /* Check scaling */ hscale = drm_rect_calc_hscale(src, dest, min_scale, max_scale); vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale); @@ -174,6 +178,9 @@ int drm_plane_helper_check_update(struct drm_plane *plane, } *visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale); + + drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation); + if (!*visible) /* * Plane isn't visible; some drivers can handle this @@ -219,10 +226,12 @@ EXPORT_SYMBOL(drm_plane_helper_check_update); * * Note that we make some assumptions about hardware limitations that may not be * true for all hardware -- - * 1) Primary plane cannot be repositioned. - * 2) Primary plane cannot be scaled. - * 3) Primary plane must cover the entire CRTC. - * 4) Subpixel positioning is not supported. + * + * 1. Primary plane cannot be repositioned. + * 2. Primary plane cannot be scaled. + * 3. Primary plane must cover the entire CRTC. + * 4. Subpixel positioning is not supported. + * * Drivers for hardware that don't have these restrictions can provide their * own implementation rather than using this helper. * @@ -265,6 +274,7 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc, ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip, + BIT(DRM_ROTATE_0), DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, false, false, &visible); diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c index 644169e1a029..2c819ef90090 100644 --- a/drivers/gpu/drm/drm_platform.c +++ b/drivers/gpu/drm/drm_platform.c @@ -68,24 +68,6 @@ err_free: return ret; } -int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master) -{ - int id; - - id = dev->platformdev->id; - if (id < 0) - id = 0; - - master->unique = kasprintf(GFP_KERNEL, "platform:%s:%02d", - dev->platformdev->name, id); - if (!master->unique) - return -ENOMEM; - - master->unique_len = strlen(master->unique); - return 0; -} -EXPORT_SYMBOL(drm_platform_set_busid); - /** * drm_platform_init - Register a platform device with the DRM subsystem * @driver: DRM device driver diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index aab0f3f1f42d..780589b420a4 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -593,7 +593,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev, get_dma_buf(dma_buf); } - /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ + /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */ ret = drm_gem_handle_create_tail(file_priv, obj, handle); drm_gem_object_unreference_unlocked(obj); if (ret) @@ -601,11 +601,10 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev, ret = drm_prime_add_buf_handle(&file_priv->prime, dma_buf, *handle); + mutex_unlock(&file_priv->prime.lock); if (ret) goto fail; - mutex_unlock(&file_priv->prime.lock); - dma_buf_put(dma_buf); return 0; @@ -615,11 +614,14 @@ fail: * to detach.. which seems ok.. */ drm_gem_handle_delete(file_priv, *handle); + dma_buf_put(dma_buf); + return ret; + out_unlock: mutex_unlock(&dev->object_name_lock); out_put: - dma_buf_put(dma_buf); mutex_unlock(&file_priv->prime.lock); + dma_buf_put(dma_buf); return ret; } EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 0329080d7f7c..a0df377d7d1c 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -82,13 +82,30 @@ drm_mode_validate_flag(const struct drm_display_mode *mode, static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) { + struct drm_cmdline_mode *cmdline_mode; struct drm_display_mode *mode; - if (!connector->cmdline_mode.specified) + cmdline_mode = &connector->cmdline_mode; + if (!cmdline_mode->specified) return 0; + /* Only add a GTF mode if we find no matching probed modes */ + list_for_each_entry(mode, &connector->probed_modes, head) { + if (mode->hdisplay != cmdline_mode->xres || + mode->vdisplay != cmdline_mode->yres) + continue; + + if (cmdline_mode->refresh_specified) { + /* The probed mode's vrefresh is set until later */ + if (drm_mode_vrefresh(mode) != cmdline_mode->refresh) + continue; + } + + return 0; + } + mode = drm_mode_create_from_cmdline_mode(connector->dev, - &connector->cmdline_mode); + cmdline_mode); if (mode == NULL) return 0; diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c new file mode 100644 index 000000000000..0db36d27e90b --- /dev/null +++ b/drivers/gpu/drm/drm_simple_kms_helper.c @@ -0,0 +1,206 @@ +/* + * Copyright (C) 2016 Noralf Trønnes + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_plane_helper.h> +#include <drm/drm_simple_kms_helper.h> +#include <linux/slab.h> + +/** + * DOC: overview + * + * This helper library provides helpers for drivers for simple display + * hardware. + * + * drm_simple_display_pipe_init() initializes a simple display pipeline + * which has only one full-screen scanout buffer feeding one output. The + * pipeline is represented by struct &drm_simple_display_pipe and binds + * together &drm_plane, &drm_crtc and &drm_encoder structures into one fixed + * entity. Some flexibility for code reuse is provided through a separately + * allocated &drm_connector object and supporting optional &drm_bridge + * encoder drivers. + */ + +static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc) +{ + struct drm_simple_display_pipe *pipe; + + pipe = container_of(crtc, struct drm_simple_display_pipe, crtc); + if (!pipe->funcs || !pipe->funcs->enable) + return; + + pipe->funcs->enable(pipe, crtc->state); +} + +static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc) +{ + struct drm_simple_display_pipe *pipe; + + pipe = container_of(crtc, struct drm_simple_display_pipe, crtc); + if (!pipe->funcs || !pipe->funcs->disable) + return; + + pipe->funcs->disable(pipe); +} + +static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = { + .disable = drm_simple_kms_crtc_disable, + .enable = drm_simple_kms_crtc_enable, +}; + +static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = { + .reset = drm_atomic_helper_crtc_reset, + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *plane_state) +{ + struct drm_rect src = { + .x1 = plane_state->src_x, + .y1 = plane_state->src_y, + .x2 = plane_state->src_x + plane_state->src_w, + .y2 = plane_state->src_y + plane_state->src_h, + }; + struct drm_rect dest = { + .x1 = plane_state->crtc_x, + .y1 = plane_state->crtc_y, + .x2 = plane_state->crtc_x + plane_state->crtc_w, + .y2 = plane_state->crtc_y + plane_state->crtc_h, + }; + struct drm_rect clip = { 0 }; + struct drm_simple_display_pipe *pipe; + struct drm_crtc_state *crtc_state; + bool visible; + int ret; + + pipe = container_of(plane, struct drm_simple_display_pipe, plane); + crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state, + &pipe->crtc); + if (crtc_state->enable != !!plane_state->crtc) + return -EINVAL; /* plane must match crtc enable state */ + + if (!crtc_state->enable) + return 0; /* nothing to check when disabling or disabled */ + + clip.x2 = crtc_state->adjusted_mode.hdisplay; + clip.y2 = crtc_state->adjusted_mode.vdisplay; + ret = drm_plane_helper_check_update(plane, &pipe->crtc, + plane_state->fb, + &src, &dest, &clip, + plane_state->rotation, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, true, &visible); + if (ret) + return ret; + + if (!visible) + return -EINVAL; + + if (!pipe->funcs || !pipe->funcs->check) + return 0; + + return pipe->funcs->check(pipe, plane_state, crtc_state); +} + +static void drm_simple_kms_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *pstate) +{ + struct drm_simple_display_pipe *pipe; + + pipe = container_of(plane, struct drm_simple_display_pipe, plane); + if (!pipe->funcs || !pipe->funcs->update) + return; + + pipe->funcs->update(pipe, pstate); +} + +static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = { + .atomic_check = drm_simple_kms_plane_atomic_check, + .atomic_update = drm_simple_kms_plane_atomic_update, +}; + +static const struct drm_plane_funcs drm_simple_kms_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +/** + * drm_simple_display_pipe_init - Initialize a simple display pipeline + * @dev: DRM device + * @pipe: simple display pipe object to initialize + * @funcs: callbacks for the display pipe (optional) + * @formats: array of supported formats (%DRM_FORMAT_*) + * @format_count: number of elements in @formats + * @connector: connector to attach and register + * + * Sets up a display pipeline which consist of a really simple + * plane-crtc-encoder pipe coupled with the provided connector. + * Teardown of a simple display pipe is all handled automatically by the drm + * core through calling drm_mode_config_cleanup(). Drivers afterwards need to + * release the memory for the structure themselves. + * + * Returns: + * Zero on success, negative error code on failure. + */ +int drm_simple_display_pipe_init(struct drm_device *dev, + struct drm_simple_display_pipe *pipe, + const struct drm_simple_display_pipe_funcs *funcs, + const uint32_t *formats, unsigned int format_count, + struct drm_connector *connector) +{ + struct drm_encoder *encoder = &pipe->encoder; + struct drm_plane *plane = &pipe->plane; + struct drm_crtc *crtc = &pipe->crtc; + int ret; + + pipe->connector = connector; + pipe->funcs = funcs; + + drm_plane_helper_add(plane, &drm_simple_kms_plane_helper_funcs); + ret = drm_universal_plane_init(dev, plane, 0, + &drm_simple_kms_plane_funcs, + formats, format_count, + DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) + return ret; + + drm_crtc_helper_add(crtc, &drm_simple_kms_crtc_helper_funcs); + ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL, + &drm_simple_kms_crtc_funcs, NULL); + if (ret) + return ret; + + encoder->possible_crtcs = 1 << drm_crtc_index(crtc); + ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs, + DRM_MODE_ENCODER_NONE, NULL); + if (ret) + return ret; + + return drm_mode_connector_attach_encoder(connector, encoder); +} +EXPORT_SYMBOL(drm_simple_display_pipe_init); + +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index fa7fadce8063..32dd821b7202 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -32,75 +32,6 @@ static struct device_type drm_sysfs_device_minor = { struct class *drm_class; -/** - * __drm_class_suspend - internal DRM class suspend routine - * @dev: Linux device to suspend - * @state: power state to enter - * - * Just figures out what the actual struct drm_device associated with - * @dev is and calls its suspend hook, if present. - */ -static int __drm_class_suspend(struct device *dev, pm_message_t state) -{ - if (dev->type == &drm_sysfs_device_minor) { - struct drm_minor *drm_minor = to_drm_minor(dev); - struct drm_device *drm_dev = drm_minor->dev; - - if (drm_minor->type == DRM_MINOR_LEGACY && - !drm_core_check_feature(drm_dev, DRIVER_MODESET) && - drm_dev->driver->suspend) - return drm_dev->driver->suspend(drm_dev, state); - } - return 0; -} - -/** - * drm_class_suspend - internal DRM class suspend hook. Simply calls - * __drm_class_suspend() with the correct pm state. - * @dev: Linux device to suspend - */ -static int drm_class_suspend(struct device *dev) -{ - return __drm_class_suspend(dev, PMSG_SUSPEND); -} - -/** - * drm_class_freeze - internal DRM class freeze hook. Simply calls - * __drm_class_suspend() with the correct pm state. - * @dev: Linux device to freeze - */ -static int drm_class_freeze(struct device *dev) -{ - return __drm_class_suspend(dev, PMSG_FREEZE); -} - -/** - * drm_class_resume - DRM class resume hook - * @dev: Linux device to resume - * - * Just figures out what the actual struct drm_device associated with - * @dev is and calls its resume hook, if present. - */ -static int drm_class_resume(struct device *dev) -{ - if (dev->type == &drm_sysfs_device_minor) { - struct drm_minor *drm_minor = to_drm_minor(dev); - struct drm_device *drm_dev = drm_minor->dev; - - if (drm_minor->type == DRM_MINOR_LEGACY && - !drm_core_check_feature(drm_dev, DRIVER_MODESET) && - drm_dev->driver->resume) - return drm_dev->driver->resume(drm_dev); - } - return 0; -} - -static const struct dev_pm_ops drm_class_dev_pm_ops = { - .suspend = drm_class_suspend, - .resume = drm_class_resume, - .freeze = drm_class_freeze, -}; - static char *drm_devnode(struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev)); @@ -131,8 +62,6 @@ int drm_sysfs_init(void) if (IS_ERR(drm_class)) return PTR_ERR(drm_class); - drm_class->pm = &drm_class_dev_pm_ops; - err = class_create_file(drm_class, &class_attr_version.attr); if (err) { class_destroy(drm_class); diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index ac9f4b3ec615..43ff44a2b8e7 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -670,57 +670,3 @@ void drm_legacy_vma_flush(struct drm_device *dev) kfree(vma); } } - -int drm_vma_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct drm_vma_entry *pt; - struct vm_area_struct *vma; - unsigned long vma_count = 0; -#if defined(__i386__) - unsigned int pgprot; -#endif - - mutex_lock(&dev->struct_mutex); - list_for_each_entry(pt, &dev->vmalist, head) - vma_count++; - - seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n", - vma_count, high_memory, - (void *)(unsigned long)virt_to_phys(high_memory)); - - list_for_each_entry(pt, &dev->vmalist, head) { - vma = pt->vma; - if (!vma) - continue; - seq_printf(m, - "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000", - pt->pid, - (void *)vma->vm_start, (void *)vma->vm_end, - vma->vm_flags & VM_READ ? 'r' : '-', - vma->vm_flags & VM_WRITE ? 'w' : '-', - vma->vm_flags & VM_EXEC ? 'x' : '-', - vma->vm_flags & VM_MAYSHARE ? 's' : 'p', - vma->vm_flags & VM_LOCKED ? 'l' : '-', - vma->vm_flags & VM_IO ? 'i' : '-', - vma->vm_pgoff); - -#if defined(__i386__) - pgprot = pgprot_val(vma->vm_page_prot); - seq_printf(m, " %c%c%c%c%c%c%c%c%c", - pgprot & _PAGE_PRESENT ? 'p' : '-', - pgprot & _PAGE_RW ? 'w' : 'r', - pgprot & _PAGE_USER ? 'u' : 's', - pgprot & _PAGE_PWT ? 't' : 'b', - pgprot & _PAGE_PCD ? 'u' : 'c', - pgprot & _PAGE_ACCESSED ? 'a' : '-', - pgprot & _PAGE_DIRTY ? 'd' : '-', - pgprot & _PAGE_PSE ? 'm' : 'k', - pgprot & _PAGE_GLOBAL ? 'g' : 'l'); -#endif - seq_printf(m, "\n"); - } - mutex_unlock(&dev->struct_mutex); - return 0; -} diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c index 2f2ecde8285b..f306c8855978 100644 --- a/drivers/gpu/drm/drm_vma_manager.c +++ b/drivers/gpu/drm/drm_vma_manager.c @@ -127,6 +127,9 @@ EXPORT_SYMBOL(drm_vma_offset_manager_destroy); * used to implement weakly referenced lookups using kref_get_unless_zero(). * * Example: + * + * :: + * * drm_vma_offset_lock_lookup(mgr); * node = drm_vma_offset_lookup_locked(mgr); * if (node) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 3d4f56df8359..340d390306d8 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -496,7 +496,6 @@ static struct drm_driver etnaviv_drm_driver = { DRIVER_RENDER, .open = etnaviv_open, .preclose = etnaviv_preclose, - .set_busid = drm_platform_set_busid, .gem_free_object_unlocked = etnaviv_gem_free_object, .gem_vm_ops = &vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index f5321e2f25ff..a69cdd526bf8 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -125,7 +125,7 @@ struct etnaviv_gpu { u32 completed_fence; u32 retired_fence; wait_queue_head_t fence_event; - unsigned int fence_context; + u64 fence_context; spinlock_t fence_spinlock; /* worker for handling active-list retiring: */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c index 5e38e749ac17..ad6b73c7fc59 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c @@ -93,17 +93,8 @@ static int exynos_dpi_get_modes(struct drm_connector *connector) return 0; } -static struct drm_encoder * -exynos_dpi_best_encoder(struct drm_connector *connector) -{ - struct exynos_dpi *ctx = connector_to_dpi(connector); - - return &ctx->encoder; -} - static const struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = { .get_modes = exynos_dpi_get_modes, - .best_encoder = exynos_dpi_best_encoder, }; static int exynos_dpi_create_connector(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 2dd820e23b0c..13d28d4229e2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -267,6 +267,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, { struct exynos_drm_private *priv = dev->dev_private; struct exynos_atomic_commit *commit; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; int i, ret; commit = kzalloc(sizeof(*commit), GFP_KERNEL); @@ -288,10 +290,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, /* Wait until all affected CRTCs have completed previous commits and * mark them as pending. */ - for (i = 0; i < dev->mode_config.num_crtc; ++i) { - if (state->crtcs[i]) - commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]); - } + for_each_crtc_in_state(state, crtc, crtc_state, i) + commit->crtcs |= drm_crtc_mask(crtc); wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs)); @@ -299,7 +299,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, priv->pending |= commit->crtcs; spin_unlock(&priv->lock); - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); if (nonblock) schedule_work(&commit->work); @@ -407,7 +407,6 @@ static struct drm_driver exynos_drm_driver = { .preclose = exynos_drm_preclose, .lastclose = exynos_drm_lastclose, .postclose = exynos_drm_postclose, - .set_busid = drm_platform_set_busid, .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = exynos_drm_crtc_enable_vblank, .disable_vblank = exynos_drm_crtc_disable_vblank, diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 601ecf8006a7..e07cb1fe4860 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1566,17 +1566,8 @@ static int exynos_dsi_get_modes(struct drm_connector *connector) return 0; } -static struct drm_encoder * -exynos_dsi_best_encoder(struct drm_connector *connector) -{ - struct exynos_dsi *dsi = connector_to_dsi(connector); - - return &dsi->encoder; -} - static const struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = { .get_modes = exynos_dsi_get_modes, - .best_encoder = exynos_dsi_best_encoder, }; static int exynos_dsi_create_connector(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 55f1d37c666a..77f12c00abf9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -242,7 +242,7 @@ exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config, state->v_ratio == (1 << 15)) height_ok = true; - if (width_ok & height_ok) + if (width_ok && height_ok) return 0; DRM_DEBUG_KMS("scaling mode is not supported"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 608b0afa337f..e8f6c92b2a36 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -378,16 +378,8 @@ static int vidi_get_modes(struct drm_connector *connector) return drm_add_edid_modes(connector, edid); } -static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector) -{ - struct vidi_context *ctx = ctx_from_connector(connector); - - return &ctx->encoder; -} - static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = { .get_modes = vidi_get_modes, - .best_encoder = vidi_best_encoder, }; static int vidi_create_connector(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 58de5a430508..1625d7c8a319 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -937,17 +937,9 @@ static int hdmi_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector) -{ - struct hdmi_context *hdata = connector_to_hdmi(connector); - - return &hdata->encoder; -} - static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { .get_modes = hdmi_get_modes, .mode_valid = hdmi_mode_valid, - .best_encoder = hdmi_best_encoder, }; static int hdmi_create_connector(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c index 89c0084c2814..706de3278f1c 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c @@ -22,20 +22,21 @@ #include "fsl_dcu_drm_drv.h" #include "fsl_dcu_drm_plane.h" -static void fsl_dcu_drm_crtc_atomic_begin(struct drm_crtc *crtc, +static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { -} + struct drm_pending_vblank_event *event = crtc->state->event; -static int fsl_dcu_drm_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) -{ - return 0; -} + if (event) { + crtc->state->event = NULL; -static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) -{ + spin_lock_irq(&crtc->dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irq(&crtc->dev->event_lock); + } } static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc) @@ -117,8 +118,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) } static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = { - .atomic_begin = fsl_dcu_drm_crtc_atomic_begin, - .atomic_check = fsl_dcu_drm_crtc_atomic_check, .atomic_flush = fsl_dcu_drm_crtc_atomic_flush, .disable = fsl_dcu_drm_disable_crtc, .enable = fsl_dcu_drm_crtc_enable, diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 0ec1ad961e0d..33727d5d826a 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -198,7 +198,7 @@ static struct drm_driver fsl_dcu_drm_driver = { .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = fsl_dcu_drm_enable_vblank, .disable_vblank = fsl_dcu_drm_disable_vblank, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c index c564ec612b59..a6e4cd591960 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c @@ -37,23 +37,22 @@ int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev) ret = fsl_dcu_drm_crtc_create(fsl_dev); if (ret) - return ret; + goto err; ret = fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc); if (ret) - goto fail_encoder; + goto err; ret = fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder); if (ret) - goto fail_connector; + goto err; drm_mode_config_reset(fsl_dev->drm); drm_kms_helper_poll_init(fsl_dev->drm); return 0; -fail_encoder: - fsl_dev->crtc.funcs->destroy(&fsl_dev->crtc); -fail_connector: - fsl_dev->encoder.funcs->destroy(&fsl_dev->encoder); + +err: + drm_mode_config_cleanup(fsl_dev->drm); return ret; } diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index 98c998da91eb..0b0989e503ea 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -102,14 +102,6 @@ static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = { .reset = drm_atomic_helper_connector_reset, }; -static struct drm_encoder * -fsl_dcu_drm_connector_best_encoder(struct drm_connector *connector) -{ - struct fsl_dcu_drm_connector *fsl_con = to_fsl_dcu_connector(connector); - - return fsl_con->encoder; -} - static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector) { struct fsl_dcu_drm_connector *fsl_connector; @@ -136,7 +128,6 @@ static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector, } static const struct drm_connector_helper_funcs connector_helper_funcs = { - .best_encoder = fsl_dcu_drm_connector_best_encoder, .get_modes = fsl_dcu_drm_connector_get_modes, .mode_valid = fsl_dcu_drm_connector_mode_valid, }; diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index c95406e6f44d..1a1cf7a3b5ef 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -175,20 +175,21 @@ void gma_crtc_load_lut(struct drm_crtc *crtc) } } -void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, - u32 start, u32 size) +int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, + u32 size) { struct gma_crtc *gma_crtc = to_gma_crtc(crtc); int i; - int end = (start + size > 256) ? 256 : start + size; - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { gma_crtc->lut_r[i] = red[i] >> 8; gma_crtc->lut_g[i] = green[i] >> 8; gma_crtc->lut_b[i] = blue[i] >> 8; } gma_crtc_load_lut(crtc); + + return 0; } /** @@ -281,7 +282,7 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode) REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); /* Turn off vblank interrupts */ - drm_vblank_off(dev, pipe); + drm_crtc_vblank_off(crtc); /* Wait for vblank for the disable to take effect */ gma_wait_for_vblank(dev); diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h index b2491c65f053..e72dd08b701b 100644 --- a/drivers/gpu/drm/gma500/gma_display.h +++ b/drivers/gpu/drm/gma500/gma_display.h @@ -72,8 +72,8 @@ extern int gma_crtc_cursor_set(struct drm_crtc *crtc, uint32_t width, uint32_t height); extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); extern void gma_crtc_load_lut(struct drm_crtc *crtc); -extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, u32 start, u32 size); +extern int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, u32 size); extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode); extern void gma_crtc_prepare(struct drm_crtc *crtc); extern void gma_crtc_commit(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 398015be87e4..7b6c84925098 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -491,7 +491,6 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe, struct drm_psb_private *dev_priv = dev->dev_private; struct gma_crtc *gma_crtc; int i; - uint16_t *r_base, *g_base, *b_base; /* We allocate a extra array of drm_connector pointers * for fbdev after the crtc */ @@ -519,16 +518,10 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe, gma_crtc->pipe = pipe; gma_crtc->plane = pipe; - r_base = gma_crtc->base.gamma_store; - g_base = r_base + 256; - b_base = g_base + 256; for (i = 0; i < 256; i++) { gma_crtc->lut_r[i] = i; gma_crtc->lut_g[i] = i; gma_crtc->lut_b[i] = i; - r_base[i] = i << 8; - g_base[i] = i << 8; - b_base[i] = i << 8; gma_crtc->lut_adj[i] = 0; } diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index fba6372d060e..ed76baad525f 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -502,13 +502,6 @@ static void ade_crtc_disable(struct drm_crtc *crtc) acrtc->enable = false; } -static int ade_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) -{ - /* do nothing */ - return 0; -} - static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct ade_crtc *acrtc = to_ade_crtc(crtc); @@ -537,6 +530,7 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc, { struct ade_crtc *acrtc = to_ade_crtc(crtc); struct ade_hw_ctx *ctx = acrtc->ctx; + struct drm_pending_vblank_event *event = crtc->state->event; void __iomem *base = ctx->base; /* only crtc is enabled regs take effect */ @@ -545,12 +539,22 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc, /* flush ade registers */ writel(ADE_ENABLE, base + ADE_EN); } + + if (event) { + crtc->state->event = NULL; + + spin_lock_irq(&crtc->dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irq(&crtc->dev->event_lock); + } } static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = { .enable = ade_crtc_enable, .disable = ade_crtc_disable, - .atomic_check = ade_crtc_atomic_check, .mode_set_nofb = ade_crtc_mode_set_nofb, .atomic_begin = ade_crtc_atomic_begin, .atomic_flush = ade_crtc_atomic_flush, diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index 3f94785fbcca..1edd9bc80294 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -171,9 +171,8 @@ static struct drm_driver kirin_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC | DRIVER_HAVE_IRQ, .fops = &kirin_drm_fops, - .set_busid = drm_platform_set_busid, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = kirin_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, @@ -221,19 +220,12 @@ static int kirin_drm_bind(struct device *dev) if (ret) goto err_kms_cleanup; - /* connectors should be registered after drm device register */ - ret = drm_connector_register_all(drm_dev); - if (ret) - goto err_drm_dev_unregister; - DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name, driver->major, driver->minor, driver->patchlevel, driver->date, drm_dev->primary->index); return 0; -err_drm_dev_unregister: - drm_dev_unregister(drm_dev); err_kms_cleanup: kirin_drm_kms_cleanup(drm_dev); err_drm_dev_unref: @@ -246,7 +238,6 @@ static void kirin_drm_unbind(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - drm_connector_unregister_all(drm_dev); drm_dev_unregister(drm_dev); kirin_drm_kms_cleanup(drm_dev); drm_dev_unref(drm_dev); diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 29a32b11953b..7769e469118f 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -57,6 +57,28 @@ config DRM_I915_USERPTR If in doubt, say "Y". +config DRM_I915_GVT + bool "Enable Intel GVT-g graphics virtualization host support" + depends on DRM_I915 + default n + help + Choose this option if you want to enable Intel GVT-g graphics + virtualization technology host support with integrated graphics. + With GVT-g, it's possible to have one integrated graphics + device shared by multiple VMs under different hypervisors. + + Note that at least one hypervisor like Xen or KVM is required for + this driver to work, and it only supports newer device from + Broadwell+. For further information and setup guide, you can + visit: http://01.org/igvt-g. + + Now it's just a stub to support the modifications of i915 for + GVT device model. It requires at least one MPT modules for Xen/KVM + and other components of GVT device model to work. Use it under + you own risk. + + If in doubt, say "N". + menu "drm/i915 Debugging" depends on DRM_I915 depends on EXPERT diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 0b88ba0f3c1f..276abf1cac2b 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -59,6 +59,7 @@ i915-y += intel_audio.o \ intel_bios.o \ intel_color.o \ intel_display.o \ + intel_dpio_phy.o \ intel_dpll_mgr.o \ intel_fbc.o \ intel_fifo_underrun.o \ @@ -81,10 +82,12 @@ i915-y += dvo_ch7017.o \ dvo_tfp410.o \ intel_crt.o \ intel_ddi.o \ + intel_dp_aux_backlight.o \ intel_dp_link_training.o \ intel_dp_mst.o \ intel_dp.o \ intel_dsi.o \ + intel_dsi_dcs_backlight.o \ intel_dsi_panel_vbt.o \ intel_dsi_pll.o \ intel_dvo.o \ @@ -101,6 +104,11 @@ i915-y += i915_vgpu.o # legacy horrors i915-y += i915_dma.o +ifeq ($(CONFIG_DRM_I915_GVT),y) +i915-y += intel_gvt.o +include $(src)/gvt/Makefile +endif + obj-$(CONFIG_DRM_I915) += i915.o CFLAGS_i915_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile new file mode 100644 index 000000000000..d0f21a6ad60d --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/Makefile @@ -0,0 +1,5 @@ +GVT_DIR := gvt +GVT_SOURCE := gvt.o + +ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall +i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h new file mode 100644 index 000000000000..7ef412be665f --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/debug.h @@ -0,0 +1,34 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __GVT_DEBUG_H__ +#define __GVT_DEBUG_H__ + +#define gvt_dbg_core(fmt, args...) \ + DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args) + +/* + * Other GVT debug stuff will be introduced in the GVT device model patches. + */ + +#endif diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c new file mode 100644 index 000000000000..927f4579f5b6 --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -0,0 +1,145 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/types.h> +#include <xen/xen.h> + +#include "i915_drv.h" + +struct intel_gvt_host intel_gvt_host; + +static const char * const supported_hypervisors[] = { + [INTEL_GVT_HYPERVISOR_XEN] = "XEN", + [INTEL_GVT_HYPERVISOR_KVM] = "KVM", +}; + +/** + * intel_gvt_init_host - Load MPT modules and detect if we're running in host + * @gvt: intel gvt device + * + * This function is called at the driver loading stage. If failed to find a + * loadable MPT module or detect currently we're running in a VM, then GVT-g + * will be disabled + * + * Returns: + * Zero on success, negative error code if failed. + * + */ +int intel_gvt_init_host(void) +{ + if (intel_gvt_host.initialized) + return 0; + + /* Xen DOM U */ + if (xen_domain() && !xen_initial_domain()) + return -ENODEV; + + /* Try to load MPT modules for hypervisors */ + if (xen_initial_domain()) { + /* In Xen dom0 */ + intel_gvt_host.mpt = try_then_request_module( + symbol_get(xengt_mpt), "xengt"); + intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN; + } else { + /* not in Xen. Try KVMGT */ + intel_gvt_host.mpt = try_then_request_module( + symbol_get(kvmgt_mpt), "kvm"); + intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM; + } + + /* Fail to load MPT modules - bail out */ + if (!intel_gvt_host.mpt) + return -EINVAL; + + /* Try to detect if we're running in host instead of VM. */ + if (!intel_gvt_hypervisor_detect_host()) + return -ENODEV; + + gvt_dbg_core("Running with hypervisor %s in host mode\n", + supported_hypervisors[intel_gvt_host.hypervisor_type]); + + intel_gvt_host.initialized = true; + return 0; +} + +static void init_device_info(struct intel_gvt *gvt) +{ + if (IS_BROADWELL(gvt->dev_priv)) + gvt->device_info.max_support_vgpus = 8; + /* This function will grow large in GVT device model patches. */ +} + +/** + * intel_gvt_clean_device - clean a GVT device + * @gvt: intel gvt device + * + * This function is called at the driver unloading stage, to free the + * resources owned by a GVT device. + * + */ +void intel_gvt_clean_device(struct drm_i915_private *dev_priv) +{ + struct intel_gvt *gvt = &dev_priv->gvt; + + if (WARN_ON(!gvt->initialized)) + return; + + /* Other de-initialization of GVT components will be introduced. */ + + gvt->initialized = false; +} + +/** + * intel_gvt_init_device - initialize a GVT device + * @dev_priv: drm i915 private data + * + * This function is called at the initialization stage, to initialize + * necessary GVT components. + * + * Returns: + * Zero on success, negative error code if failed. + * + */ +int intel_gvt_init_device(struct drm_i915_private *dev_priv) +{ + struct intel_gvt *gvt = &dev_priv->gvt; + /* + * Cannot initialize GVT device without intel_gvt_host gets + * initialized first. + */ + if (WARN_ON(!intel_gvt_host.initialized)) + return -EINVAL; + + if (WARN_ON(gvt->initialized)) + return -EEXIST; + + gvt_dbg_core("init gvt device\n"); + + init_device_info(gvt); + /* + * Other initialization of GVT components will be introduce here. + */ + gvt_dbg_core("gvt device creation is done\n"); + gvt->initialized = true; + return 0; +} diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h new file mode 100644 index 000000000000..fb619a6e519d --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -0,0 +1,69 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _GVT_H_ +#define _GVT_H_ + +#include "debug.h" +#include "hypercall.h" + +#define GVT_MAX_VGPU 8 + +enum { + INTEL_GVT_HYPERVISOR_XEN = 0, + INTEL_GVT_HYPERVISOR_KVM, +}; + +struct intel_gvt_host { + bool initialized; + int hypervisor_type; + struct intel_gvt_mpt *mpt; +}; + +extern struct intel_gvt_host intel_gvt_host; + +/* Describe per-platform limitations. */ +struct intel_gvt_device_info { + u32 max_support_vgpus; + /* This data structure will grow bigger in GVT device model patches */ +}; + +struct intel_vgpu { + struct intel_gvt *gvt; + int id; + unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ +}; + +struct intel_gvt { + struct mutex lock; + bool initialized; + + struct drm_i915_private *dev_priv; + struct idr vgpu_idr; /* vGPU IDR pool */ + + struct intel_gvt_device_info device_info; +}; + +#include "mpt.h" + +#endif diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h new file mode 100644 index 000000000000..254df8bf1f35 --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -0,0 +1,38 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _GVT_HYPERCALL_H_ +#define _GVT_HYPERCALL_H_ + +/* + * Specific GVT-g MPT modules function collections. Currently GVT-g supports + * both Xen and KVM by providing dedicated hypervisor-related MPT modules. + */ +struct intel_gvt_mpt { + int (*detect_host)(void); +}; + +extern struct intel_gvt_mpt xengt_mpt; +extern struct intel_gvt_mpt kvmgt_mpt; + +#endif /* _GVT_HYPERCALL_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h new file mode 100644 index 000000000000..03601e3ffa7c --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -0,0 +1,49 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _GVT_MPT_H_ +#define _GVT_MPT_H_ + +/** + * DOC: Hypervisor Service APIs for GVT-g Core Logic + * + * This is the glue layer between specific hypervisor MPT modules and GVT-g core + * logic. Each kind of hypervisor MPT module provides a collection of function + * callbacks and will be attached to GVT host when the driver is loading. + * GVT-g core logic will call these APIs to request specific services from + * hypervisor. + */ + +/** + * intel_gvt_hypervisor_detect_host - check if GVT-g is running within + * hypervisor host/privilged domain + * + * Returns: + * Zero on success, -ENODEV if current kernel is running inside a VM + */ +static inline int intel_gvt_hypervisor_detect_host(void) +{ + return intel_gvt_host.mpt->detect_host(); +} + +#endif /* _GVT_MPT_H_ */ diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index a337f33bec5b..b0fd6a7b0603 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -215,7 +215,8 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { CMD( MI_RS_CONTEXT, SMI, F, 1, S ), CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), - CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ), + CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W, + .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ), CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ), CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ), CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ), @@ -736,7 +737,7 @@ static void fini_hash_table(struct intel_engine_cs *engine) /** * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer - * @ring: the ringbuffer to initialize + * @engine: the engine to initialize * * Optionally initializes fields related to batch buffer command parsing in the * struct intel_engine_cs based on whether the platform requires software @@ -750,12 +751,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) int cmd_table_count; int ret; - if (!IS_GEN7(engine->dev)) + if (!IS_GEN7(engine->i915)) return 0; switch (engine->id) { case RCS: - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine->i915)) { cmd_tables = hsw_render_ring_cmds; cmd_table_count = ARRAY_SIZE(hsw_render_ring_cmds); @@ -764,7 +765,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) cmd_table_count = ARRAY_SIZE(gen7_render_cmds); } - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine->i915)) { engine->reg_tables = hsw_render_reg_tables; engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables); } else { @@ -780,7 +781,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; case BCS: - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine->i915)) { cmd_tables = hsw_blt_ring_cmds; cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); } else { @@ -788,7 +789,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); } - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine->i915)) { engine->reg_tables = hsw_blt_reg_tables; engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables); } else { @@ -829,7 +830,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) /** * i915_cmd_parser_fini_ring() - clean up cmd parser related fields - * @ring: the ringbuffer to clean up + * @engine: the engine to clean up * * Releases any resources related to command parsing that may have been * initialized for the specified ring. @@ -1023,7 +1024,7 @@ unpin_src: /** * i915_needs_cmd_parser() - should a given ring use software command parsing? - * @ring: the ring in question + * @engine: the engine in question * * Only certain platforms require software batch buffer command parsing, and * only when enabled via module parameter. @@ -1035,7 +1036,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine) if (!engine->needs_cmd_parser) return false; - if (!USES_PPGTT(engine->dev)) + if (!USES_PPGTT(engine->i915)) return false; return (i915.enable_cmd_parser == 1); @@ -1098,6 +1099,11 @@ static bool check_cmd(const struct intel_engine_cs *engine, return false; } + if (desc->cmd.value == MI_LOAD_REGISTER_REG) { + DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n"); + return false; + } + if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1)) *oacontrol_set = (cmd[offset + 1] != 0); } @@ -1113,6 +1119,12 @@ static bool check_cmd(const struct intel_engine_cs *engine, return false; } + if (desc->cmd.value == MI_LOAD_REGISTER_REG) { + DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n", + reg_addr); + return false; + } + if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) && (offset + 2 > length || (cmd[offset + 1] & reg->mask) != reg->value)) { @@ -1164,7 +1176,7 @@ static bool check_cmd(const struct intel_engine_cs *engine, /** * i915_parse_cmds() - parse a submitted batch buffer for privilege violations - * @ring: the ring on which the batch is to execute + * @engine: the engine on which the batch is to execute * @batch_obj: the batch buffer in question * @shadow_batch_obj: copy of the batch buffer in question * @batch_start_offset: byte offset in the batch at which execution starts @@ -1269,14 +1281,28 @@ int i915_parse_cmds(struct intel_engine_cs *engine, /** * i915_cmd_parser_get_version() - get the cmd parser version number + * @dev_priv: i915 device private * * The cmd parser maintains a simple increasing integer version number suitable * for passing to userspace clients to determine what operations are permitted. * * Return: the current version number of the cmd parser */ -int i915_cmd_parser_get_version(void) +int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) { + struct intel_engine_cs *engine; + bool active = false; + + /* If the command parser is not enabled, report 0 - unsupported */ + for_each_engine(engine, dev_priv) { + if (i915_needs_cmd_parser(engine)) { + active = true; + break; + } + } + if (!active) + return 0; + /* * Command parser version history * @@ -1288,6 +1314,7 @@ int i915_cmd_parser_get_version(void) * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3. * 5. GPGPU dispatch compute indirect registers. * 6. TIMESTAMP register and Haswell CS GPR registers + * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers. */ - return 6; + return 7; } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 32690332d441..5b7526697838 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -89,17 +89,17 @@ static int i915_capabilities(struct seq_file *m, void *data) return 0; } -static const char get_active_flag(struct drm_i915_gem_object *obj) +static char get_active_flag(struct drm_i915_gem_object *obj) { return obj->active ? '*' : ' '; } -static const char get_pin_flag(struct drm_i915_gem_object *obj) +static char get_pin_flag(struct drm_i915_gem_object *obj) { return obj->pin_display ? 'p' : ' '; } -static const char get_tiling_flag(struct drm_i915_gem_object *obj) +static char get_tiling_flag(struct drm_i915_gem_object *obj) { switch (obj->tiling_mode) { default: @@ -109,12 +109,12 @@ static const char get_tiling_flag(struct drm_i915_gem_object *obj) } } -static inline const char get_global_flag(struct drm_i915_gem_object *obj) +static char get_global_flag(struct drm_i915_gem_object *obj) { return i915_gem_obj_to_ggtt(obj) ? 'g' : ' '; } -static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj) +static char get_pin_mapped_flag(struct drm_i915_gem_object *obj) { return obj->mapping ? 'M' : ' '; } @@ -199,13 +199,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); } -static void describe_ctx(struct seq_file *m, struct intel_context *ctx) -{ - seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i'); - seq_putc(m, ctx->remap_slice ? 'R' : 'r'); - seq_putc(m, ' '); -} - static int i915_gem_object_list_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; @@ -424,6 +417,42 @@ static void print_batch_pool_stats(struct seq_file *m, print_file_stats(m, "[k]batch pool", stats); } +static int per_file_ctx_stats(int id, void *ptr, void *data) +{ + struct i915_gem_context *ctx = ptr; + int n; + + for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) { + if (ctx->engine[n].state) + per_file_stats(0, ctx->engine[n].state, data); + if (ctx->engine[n].ringbuf) + per_file_stats(0, ctx->engine[n].ringbuf->obj, data); + } + + return 0; +} + +static void print_context_stats(struct seq_file *m, + struct drm_i915_private *dev_priv) +{ + struct file_stats stats; + struct drm_file *file; + + memset(&stats, 0, sizeof(stats)); + + mutex_lock(&dev_priv->dev->struct_mutex); + if (dev_priv->kernel_context) + per_file_ctx_stats(0, dev_priv->kernel_context, &stats); + + list_for_each_entry(file, &dev_priv->dev->filelist, lhead) { + struct drm_i915_file_private *fpriv = file->driver_priv; + idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats); + } + mutex_unlock(&dev_priv->dev->struct_mutex); + + print_file_stats(m, "[k]contexts", stats); +} + #define count_vmas(list, member) do { \ list_for_each_entry(vma, list, member) { \ size += i915_gem_obj_total_ggtt_size(vma->obj); \ @@ -528,10 +557,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data) seq_putc(m, '\n'); print_batch_pool_stats(m, dev_priv); - mutex_unlock(&dev->struct_mutex); mutex_lock(&dev->filelist_mutex); + print_context_stats(m, dev_priv); list_for_each_entry_reverse(file, &dev->filelist, lhead) { struct file_stats stats; struct task_struct *task; @@ -607,18 +636,20 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) for_each_intel_crtc(dev, crtc) { const char pipe = pipe_name(crtc->pipe); const char plane = plane_name(crtc->plane); - struct intel_unpin_work *work; + struct intel_flip_work *work; spin_lock_irq(&dev->event_lock); - work = crtc->unpin_work; + work = crtc->flip_work; if (work == NULL) { seq_printf(m, "No flip due on pipe %c (plane %c)\n", pipe, plane); } else { + u32 pending; u32 addr; - if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { - seq_printf(m, "Flip queued on pipe %c (plane %c)\n", + pending = atomic_read(&work->pending); + if (pending) { + seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n", pipe, plane); } else { seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", @@ -638,11 +669,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", work->flip_queued_vblank, work->flip_ready_vblank, - drm_crtc_vblank_count(&crtc->base)); - if (work->enable_stall_check) - seq_puts(m, "Stall check enabled, "); - else - seq_puts(m, "Stall check waiting for page flip ioctl, "); + intel_crtc_get_vblank_counter(crtc)); seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); if (INTEL_INFO(dev)->gen >= 4) @@ -1281,6 +1308,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) } seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); + seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep); seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); seq_printf(m, "Render p-state ratio: %d\n", (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8); @@ -1383,7 +1411,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) seqno[id] = engine->get_seqno(engine); } - i915_get_extra_instdone(dev, instdone); + i915_get_extra_instdone(dev_priv, instdone); intel_runtime_pm_put(dev_priv); @@ -1991,8 +2019,7 @@ static int i915_context_status(struct seq_file *m, void *unused) struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - struct intel_context *ctx; - enum intel_engine_id id; + struct i915_gem_context *ctx; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -2000,32 +2027,36 @@ static int i915_context_status(struct seq_file *m, void *unused) return ret; list_for_each_entry(ctx, &dev_priv->context_list, link) { - if (!i915.enable_execlists && - ctx->legacy_hw_ctx.rcs_state == NULL) - continue; - - seq_puts(m, "HW context "); - describe_ctx(m, ctx); - if (ctx == dev_priv->kernel_context) - seq_printf(m, "(kernel context) "); + seq_printf(m, "HW context %u ", ctx->hw_id); + if (IS_ERR(ctx->file_priv)) { + seq_puts(m, "(deleted) "); + } else if (ctx->file_priv) { + struct pid *pid = ctx->file_priv->file->pid; + struct task_struct *task; - if (i915.enable_execlists) { - seq_putc(m, '\n'); - for_each_engine_id(engine, dev_priv, id) { - struct drm_i915_gem_object *ctx_obj = - ctx->engine[id].state; - struct intel_ringbuffer *ringbuf = - ctx->engine[id].ringbuf; - - seq_printf(m, "%s: ", engine->name); - if (ctx_obj) - describe_obj(m, ctx_obj); - if (ringbuf) - describe_ctx_ringbuf(m, ringbuf); - seq_putc(m, '\n'); + task = get_pid_task(pid, PIDTYPE_PID); + if (task) { + seq_printf(m, "(%s [%d]) ", + task->comm, task->pid); + put_task_struct(task); } } else { - describe_obj(m, ctx->legacy_hw_ctx.rcs_state); + seq_puts(m, "(kernel) "); + } + + seq_putc(m, ctx->remap_slice ? 'R' : 'r'); + seq_putc(m, '\n'); + + for_each_engine(engine, dev_priv) { + struct intel_context *ce = &ctx->engine[engine->id]; + + seq_printf(m, "%s: ", engine->name); + seq_putc(m, ce->initialised ? 'I' : 'i'); + if (ce->state) + describe_obj(m, ce->state); + if (ce->ringbuf) + describe_ctx_ringbuf(m, ce->ringbuf); + seq_putc(m, '\n'); } seq_putc(m, '\n'); @@ -2037,24 +2068,22 @@ static int i915_context_status(struct seq_file *m, void *unused) } static void i915_dump_lrc_obj(struct seq_file *m, - struct intel_context *ctx, + struct i915_gem_context *ctx, struct intel_engine_cs *engine) { + struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; struct page *page; uint32_t *reg_state; int j; - struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; unsigned long ggtt_offset = 0; + seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id); + if (ctx_obj == NULL) { - seq_printf(m, "Context on %s with no gem object\n", - engine->name); + seq_puts(m, "\tNot allocated\n"); return; } - seq_printf(m, "CONTEXT: %s %u\n", engine->name, - intel_execlists_ctx_id(ctx, engine)); - if (!i915_gem_obj_ggtt_bound(ctx_obj)) seq_puts(m, "\tNot bound in GGTT\n"); else @@ -2087,7 +2116,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - struct intel_context *ctx; + struct i915_gem_context *ctx; int ret; if (!i915.enable_execlists) { @@ -2100,9 +2129,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) return ret; list_for_each_entry(ctx, &dev_priv->context_list, link) - if (ctx != dev_priv->kernel_context) - for_each_engine(engine, dev_priv) - i915_dump_lrc_obj(m, ctx, engine); + for_each_engine(engine, dev_priv) + i915_dump_lrc_obj(m, ctx, engine); mutex_unlock(&dev->struct_mutex); @@ -2173,8 +2201,8 @@ static int i915_execlists(struct seq_file *m, void *data) seq_printf(m, "\t%d requests in queue\n", count); if (head_req) { - seq_printf(m, "\tHead request id: %u\n", - intel_execlists_ctx_id(head_req->ctx, engine)); + seq_printf(m, "\tHead request context: %u\n", + head_req->ctx->hw_id); seq_printf(m, "\tHead request tail: %u\n", head_req->tail); } @@ -2268,7 +2296,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) static int per_file_ctx(int id, void *ptr, void *data) { - struct intel_context *ctx = ptr; + struct i915_gem_context *ctx = ptr; struct seq_file *m = data; struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; @@ -2313,12 +2341,12 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - if (INTEL_INFO(dev)->gen == 6) + if (IS_GEN6(dev_priv)) seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); for_each_engine(engine, dev_priv) { seq_printf(m, "%s\n", engine->name); - if (INTEL_INFO(dev)->gen == 7) + if (IS_GEN7(dev_priv)) seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(engine))); seq_printf(m, "PP_DIR_BASE: 0x%08x\n", @@ -2365,16 +2393,16 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) task = get_pid_task(file->pid, PIDTYPE_PID); if (!task) { ret = -ESRCH; - goto out_put; + goto out_unlock; } seq_printf(m, "\nproc: %s\n", task->comm); put_task_struct(task); idr_for_each(&file_priv->context_idr, per_file_ctx, (void *)(unsigned long)m); } +out_unlock: mutex_unlock(&dev->filelist_mutex); -out_put: intel_runtime_pm_put(dev_priv); mutex_unlock(&dev->struct_mutex); @@ -2509,6 +2537,7 @@ static void i915_guc_client_info(struct seq_file *m, seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", client->wq_size, client->wq_offset, client->wq_tail); + seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space); seq_printf(m, "\tFailed to queue: %u\n", client->q_fail); seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); seq_printf(m, "\tLast submission result: %d\n", client->retcode); @@ -2545,6 +2574,10 @@ static int i915_guc_info(struct seq_file *m, void *data) mutex_unlock(&dev->struct_mutex); + seq_printf(m, "Doorbell map:\n"); + seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc.doorbell_bitmap); + seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc.db_cacheline); + seq_printf(m, "GuC total action count: %llu\n", guc.action_count); seq_printf(m, "GuC action failure count: %u\n", guc.action_fail); seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd); @@ -3168,7 +3201,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) enum intel_engine_id id; int j, ret; - if (!i915_semaphore_is_enabled(dev)) { + if (!i915_semaphore_is_enabled(dev_priv)) { seq_puts(m, "Semaphores are disabled\n"); return 0; } @@ -4769,7 +4802,7 @@ i915_wedged_set(void *data, u64 val) intel_runtime_pm_get(dev_priv); - i915_handle_error(dev, val, + i915_handle_error(dev_priv, val, "Manually setting wedged to %llu", val); intel_runtime_pm_put(dev_priv); @@ -4919,7 +4952,7 @@ i915_drop_caches_set(void *data, u64 val) } if (val & (DROP_RETIRE | DROP_ACTIVE)) - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev_priv); if (val & DROP_BOUND) i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); @@ -4993,7 +5026,7 @@ i915_max_freq_set(void *data, u64 val) dev_priv->rps.max_freq_softlimit = val; - intel_set_rps(dev, val); + intel_set_rps(dev_priv, val); mutex_unlock(&dev_priv->rps.hw_lock); @@ -5060,7 +5093,7 @@ i915_min_freq_set(void *data, u64 val) dev_priv->rps.min_freq_softlimit = val; - intel_set_rps(dev, val); + intel_set_rps(dev_priv, val); mutex_unlock(&dev_priv->rps.hw_lock); @@ -5277,6 +5310,10 @@ static int i915_sseu_status(struct seq_file *m, void *unused) INTEL_INFO(dev)->eu_total); seq_printf(m, " Available EU Per Subslice: %u\n", INTEL_INFO(dev)->eu_per_subslice); + seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev))); + if (HAS_POOLED_EU(dev)) + seq_printf(m, " Min EU in pool: %u\n", + INTEL_INFO(dev)->min_eu_in_pool); seq_printf(m, " Has Slice Power Gating: %s\n", yesno(INTEL_INFO(dev)->has_slice_pg)); seq_printf(m, " Has Subslice Power Gating: %s\n", diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index b3198fcd0536..d15a461fa84a 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -186,7 +186,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = 1; break; case I915_PARAM_HAS_SEMAPHORES: - value = i915_semaphore_is_enabled(dev); + value = i915_semaphore_is_enabled(dev_priv); break; case I915_PARAM_HAS_PRIME_VMAP_FLUSH: value = 1; @@ -204,7 +204,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = 1; break; case I915_PARAM_CMD_PARSER_VERSION: - value = i915_cmd_parser_get_version(); + value = i915_cmd_parser_get_version(dev_priv); break; case I915_PARAM_HAS_COHERENT_PHYS_GTT: value = 1; @@ -223,8 +223,7 @@ static int i915_getparam(struct drm_device *dev, void *data, return -ENODEV; break; case I915_PARAM_HAS_GPU_RESET: - value = i915.enable_hangcheck && - intel_has_gpu_reset(dev); + value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv); break; case I915_PARAM_HAS_RESOURCE_STREAMER: value = HAS_RESOURCE_STREAMER(dev); @@ -425,6 +424,43 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { .can_switch = i915_switcheroo_can_switch, }; +static void i915_gem_fini(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + /* + * Neither the BIOS, ourselves or any other kernel + * expects the system to be in execlists mode on startup, + * so we need to reset the GPU back to legacy mode. And the only + * known way to disable logical contexts is through a GPU reset. + * + * So in order to leave the system in a known default configuration, + * always reset the GPU upon unload. Afterwards we then clean up the + * GEM state tracking, flushing off the requests and leaving the + * system in a known idle state. + * + * Note that is of the upmost importance that the GPU is idle and + * all stray writes are flushed *before* we dismantle the backing + * storage for the pinned objects. + * + * However, since we are uncertain that reseting the GPU on older + * machines is a good idea, we don't - just in case it leaves the + * machine in an unusable condition. + */ + if (HAS_HW_CONTEXTS(dev)) { + int reset = intel_gpu_reset(dev_priv, ALL_ENGINES); + WARN_ON(reset && reset != -ENODEV); + } + + mutex_lock(&dev->struct_mutex); + i915_gem_reset(dev); + i915_gem_cleanup_engines(dev); + i915_gem_context_fini(dev); + mutex_unlock(&dev->struct_mutex); + + WARN_ON(!list_empty(&to_i915(dev)->context_list)); +} + static int i915_load_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -454,6 +490,9 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) goto cleanup_vga_client; + /* must happen before intel_power_domains_init_hw() on VLV/CHV */ + intel_update_rawclk(dev_priv); + intel_power_domains_init_hw(dev_priv, false); intel_csr_ucode_init(dev_priv); @@ -468,7 +507,7 @@ static int i915_load_modeset_init(struct drm_device *dev) * working irqs for e.g. gmbus and dp aux transfers. */ intel_modeset_init(dev); - intel_guc_ucode_init(dev); + intel_guc_init(dev); ret = i915_gem_init(dev); if (ret) @@ -503,12 +542,9 @@ static int i915_load_modeset_init(struct drm_device *dev) return 0; cleanup_gem: - mutex_lock(&dev->struct_mutex); - i915_gem_cleanup_engines(dev); - i915_gem_context_fini(dev); - mutex_unlock(&dev->struct_mutex); + i915_gem_fini(dev); cleanup_irq: - intel_guc_ucode_fini(dev); + intel_guc_fini(dev); drm_irq_uninstall(dev); intel_teardown_gmbus(dev); cleanup_csr: @@ -728,6 +764,32 @@ static void gen9_sseu_info_init(struct drm_device *dev) (info->slice_total > 1)); info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1)); info->has_eu_pg = (info->eu_per_subslice > 2); + + if (IS_BROXTON(dev)) { +#define IS_SS_DISABLED(_ss_disable, ss) (_ss_disable & (0x1 << ss)) + /* + * There is a HW issue in 2x6 fused down parts that requires + * Pooled EU to be enabled as a WA. The pool configuration + * changes depending upon which subslice is fused down. This + * doesn't affect if the device has all 3 subslices enabled. + */ + /* WaEnablePooledEuFor2x6:bxt */ + info->has_pooled_eu = ((info->subslice_per_slice == 3) || + (info->subslice_per_slice == 2 && + INTEL_REVID(dev) < BXT_REVID_C0)); + + info->min_eu_in_pool = 0; + if (info->has_pooled_eu) { + if (IS_SS_DISABLED(ss_disable, 0) || + IS_SS_DISABLED(ss_disable, 2)) + info->min_eu_in_pool = 3; + else if (IS_SS_DISABLED(ss_disable, 1)) + info->min_eu_in_pool = 6; + else + info->min_eu_in_pool = 9; + } +#undef IS_SS_DISABLED + } } static void broadwell_sseu_info_init(struct drm_device *dev) @@ -850,7 +912,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev) DRM_INFO("Display disabled (module parameter)\n"); info->num_pipes = 0; } else if (info->num_pipes > 0 && - (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && + (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) && HAS_PCH_SPLIT(dev)) { u32 fuse_strap = I915_READ(FUSE_STRAP); u32 sfuse_strap = I915_READ(SFUSE_STRAP); @@ -874,7 +936,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev) DRM_INFO("PipeC fused off\n"); info->num_pipes -= 1; } - } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) { + } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) { u32 dfsm = I915_READ(SKL_DFSM); u8 disabled_mask = 0; bool invalid; @@ -915,21 +977,40 @@ static void intel_device_info_runtime_init(struct drm_device *dev) else if (INTEL_INFO(dev)->gen >= 9) gen9_sseu_info_init(dev); - /* Snooping is broken on BXT A stepping. */ info->has_snoop = !info->has_llc; - info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1); + + /* Snooping is broken on BXT A stepping. */ + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + info->has_snoop = false; DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice); DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total); DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice); + DRM_DEBUG_DRIVER("Has Pooled EU: %s\n", HAS_POOLED_EU(dev) ? "y" : "n"); + if (HAS_POOLED_EU(dev)) + DRM_DEBUG_DRIVER("Min EU in pool: %u\n", info->min_eu_in_pool); DRM_DEBUG_DRIVER("has slice power gating: %s\n", info->has_slice_pg ? "y" : "n"); DRM_DEBUG_DRIVER("has subslice power gating: %s\n", info->has_subslice_pg ? "y" : "n"); DRM_DEBUG_DRIVER("has EU power gating: %s\n", info->has_eu_pg ? "y" : "n"); + + i915.enable_execlists = + intel_sanitize_enable_execlists(dev_priv, + i915.enable_execlists); + + /* + * i915.enable_ppgtt is read-only, so do an early pass to validate the + * user's requested state against the hardware/driver capabilities. We + * do this now so that we can print out any log messages once rather + * than every time we check intel_enable_ppgtt(). + */ + i915.enable_ppgtt = + intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt); + DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); } static void intel_init_dpio(struct drm_i915_private *dev_priv) @@ -1020,6 +1101,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, memcpy(device_info, info, sizeof(dev_priv->info)); device_info->device_id = dev->pdev->device; + BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); + device_info->gen_mask = BIT(device_info->gen - 1); + spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); mutex_init(&dev_priv->backlight_lock); @@ -1036,6 +1120,10 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, if (ret < 0) return ret; + ret = intel_gvt_init(dev_priv); + if (ret < 0) + goto err_workqueues; + /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(dev); @@ -1061,6 +1149,10 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, "It may not be fully functional.\n"); return 0; + +err_workqueues: + i915_workqueues_cleanup(dev_priv); + return ret; } /** @@ -1137,7 +1229,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) if (ret < 0) goto put_bridge; - intel_uncore_init(dev); + intel_uncore_init(dev_priv); return 0; @@ -1155,7 +1247,7 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; - intel_uncore_fini(dev); + intel_uncore_fini(dev_priv); i915_mmio_cleanup(dev); pci_dev_put(dev_priv->bridge_dev); } @@ -1206,8 +1298,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) pci_set_master(dev->pdev); /* overlay on gen2 is broken and can't address above 1G */ - if (IS_GEN2(dev)) - dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); + if (IS_GEN2(dev)) { + ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); + if (ret) { + DRM_ERROR("failed to set DMA mask\n"); + + goto out_ggtt; + } + } + /* 965GM sometimes incorrectly writes to hardware status page (HWS) * using 32bit addressing, overwriting memory if HWS is located @@ -1217,8 +1316,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) * behaviour if any general state is accessed within a page above 4GB, * which also needs to be handled carefully. */ - if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) - dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); + if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) { + ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); + + if (ret) { + DRM_ERROR("failed to set DMA mask\n"); + + goto out_ggtt; + } + } aperture_size = ggtt->mappable_end; @@ -1236,9 +1342,9 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); - intel_uncore_sanitize(dev); + intel_uncore_sanitize(dev_priv); - intel_opregion_setup(dev); + intel_opregion_setup(dev_priv); i915_gem_load_init_fences(dev_priv); @@ -1300,14 +1406,14 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) * Notify a valid surface after modesetting, * when running inside a VM. */ - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); i915_setup_sysfs(dev); if (INTEL_INFO(dev_priv)->num_pipes) { /* Must be done after probing outputs */ - intel_opregion_init(dev); + intel_opregion_register(dev_priv); acpi_video_register(); } @@ -1326,7 +1432,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) i915_audio_component_cleanup(dev_priv); intel_gpu_ips_teardown(); acpi_video_unregister(); - intel_opregion_fini(dev_priv->dev); + intel_opregion_unregister(dev_priv); i915_teardown_sysfs(dev_priv->dev); i915_gem_shrinker_cleanup(dev_priv); } @@ -1418,6 +1524,8 @@ int i915_driver_unload(struct drm_device *dev) intel_fbdev_fini(dev); + intel_gvt_cleanup(dev_priv); + ret = i915_gem_suspend(dev); if (ret) { DRM_ERROR("failed to idle hardware: %d\n", ret); @@ -1458,11 +1566,8 @@ int i915_driver_unload(struct drm_device *dev) /* Flush any outstanding unpin_work. */ flush_workqueue(dev_priv->wq); - intel_guc_ucode_fini(dev); - mutex_lock(&dev->struct_mutex); - i915_gem_cleanup_engines(dev); - i915_gem_context_fini(dev); - mutex_unlock(&dev->struct_mutex); + intel_guc_fini(dev); + i915_gem_fini(dev); intel_fbc_cleanup_cfb(dev_priv); intel_power_domains_fini(dev_priv); @@ -1570,15 +1675,15 @@ const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f313b4d8344f..3eb47fbcea73 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -35,11 +35,9 @@ #include "i915_trace.h" #include "intel_drv.h" -#include <linux/apple-gmux.h> #include <linux/console.h> #include <linux/module.h> #include <linux/pm_runtime.h> -#include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> #include <drm/drm_crtc_helper.h> @@ -300,22 +298,26 @@ static const struct intel_device_info intel_haswell_m_info = { static const struct intel_device_info intel_broadwell_d_info = { BDW_FEATURES, .gen = 8, + .is_broadwell = 1, }; static const struct intel_device_info intel_broadwell_m_info = { BDW_FEATURES, .gen = 8, .is_mobile = 1, + .is_broadwell = 1, }; static const struct intel_device_info intel_broadwell_gt3d_info = { BDW_FEATURES, .gen = 8, + .is_broadwell = 1, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; static const struct intel_device_info intel_broadwell_gt3m_info = { BDW_FEATURES, .gen = 8, .is_mobile = 1, + .is_broadwell = 1, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; @@ -353,6 +355,7 @@ static const struct intel_device_info intel_broxton_info = { .has_ddi = 1, .has_fpga_dbg = 1, .has_fbc = 1, + .has_pooled_eu = 0, GEN_DEFAULT_PIPEOFFSETS, IVB_CURSOR_OFFSETS, BDW_COLORS, @@ -515,8 +518,10 @@ void intel_detect_pch(struct drm_device *dev) } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && - pch->subsystem_vendor == 0x1af4 && - pch->subsystem_device == 0x1100)) { + pch->subsystem_vendor == + PCI_SUBVENDOR_ID_REDHAT_QUMRANET && + pch->subsystem_device == + PCI_SUBDEVICE_ID_QEMU)) { dev_priv->pch_type = intel_virt_detect_pch(dev); } else continue; @@ -530,9 +535,9 @@ void intel_detect_pch(struct drm_device *dev) pci_dev_put(pch); } -bool i915_semaphore_is_enabled(struct drm_device *dev) +bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv) { - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_GEN(dev_priv) < 6) return false; if (i915.semaphores >= 0) @@ -542,13 +547,9 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) if (i915.enable_execlists) return false; - /* Until we get further testing... */ - if (IS_GEN8(dev)) - return false; - #ifdef CONFIG_INTEL_IOMMU /* Enable semaphores on SNB when IO remapping is off */ - if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) + if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) return false; #endif @@ -610,7 +611,7 @@ static int i915_drm_suspend(struct drm_device *dev) intel_guc_suspend(dev); - intel_suspend_gt_powersave(dev); + intel_suspend_gt_powersave(dev_priv); intel_display_suspend(dev); @@ -628,10 +629,10 @@ static int i915_drm_suspend(struct drm_device *dev) i915_save_state(dev); opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; - intel_opregion_notify_adapter(dev, opregion_target_state); + intel_opregion_notify_adapter(dev_priv, opregion_target_state); - intel_uncore_forcewake_reset(dev, false); - intel_opregion_fini(dev); + intel_uncore_forcewake_reset(dev_priv, false); + intel_opregion_unregister(dev_priv); intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); @@ -749,7 +750,7 @@ static int i915_drm_resume(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); i915_restore_state(dev); - intel_opregion_setup(dev); + intel_opregion_setup(dev_priv); intel_init_pch_refclk(dev); drm_mode_config_reset(dev); @@ -777,7 +778,7 @@ static int i915_drm_resume(struct drm_device *dev) spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev); + dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); intel_dp_mst_resume(dev); @@ -794,7 +795,7 @@ static int i915_drm_resume(struct drm_device *dev) /* Config may have changed between suspend and resume */ drm_helper_hpd_irq_event(dev); - intel_opregion_init(dev); + intel_opregion_register(dev_priv); intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); @@ -802,7 +803,7 @@ static int i915_drm_resume(struct drm_device *dev) dev_priv->modeset_restore = MODESET_DONE; mutex_unlock(&dev_priv->modeset_restore_lock); - intel_opregion_notify_adapter(dev, PCI_D0); + intel_opregion_notify_adapter(dev_priv, PCI_D0); drm_kms_helper_poll_enable(dev); @@ -870,9 +871,9 @@ static int i915_drm_resume_early(struct drm_device *dev) DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", ret); - intel_uncore_early_sanitize(dev, true); + intel_uncore_early_sanitize(dev_priv, true); - if (IS_BROXTON(dev)) { + if (IS_BROXTON(dev_priv)) { if (!dev_priv->suspended_to_idle) gen9_sanitize_dc_state(dev_priv); bxt_disable_dc9(dev_priv); @@ -880,7 +881,7 @@ static int i915_drm_resume_early(struct drm_device *dev) hsw_disable_pc8(dev_priv); } - intel_uncore_sanitize(dev); + intel_uncore_sanitize(dev_priv); if (IS_BROXTON(dev_priv) || !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) @@ -923,14 +924,14 @@ int i915_resume_switcheroo(struct drm_device *dev) * - re-init interrupt state * - re-init display */ -int i915_reset(struct drm_device *dev) +int i915_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_device *dev = dev_priv->dev; struct i915_gpu_error *error = &dev_priv->gpu_error; unsigned reset_counter; int ret; - intel_reset_gt_powersave(dev); + intel_reset_gt_powersave(dev_priv); mutex_lock(&dev->struct_mutex); @@ -946,7 +947,7 @@ int i915_reset(struct drm_device *dev) i915_gem_reset(dev); - ret = intel_gpu_reset(dev, ALL_ENGINES); + ret = intel_gpu_reset(dev_priv, ALL_ENGINES); /* Also reset the gpu hangman. */ if (error->stop_rings != 0) { @@ -1001,7 +1002,7 @@ int i915_reset(struct drm_device *dev) * of re-init after reset. */ if (INTEL_INFO(dev)->gen > 5) - intel_enable_gt_powersave(dev); + intel_enable_gt_powersave(dev_priv); return 0; @@ -1030,13 +1031,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (PCI_FUNC(pdev->devfn)) return -ENODEV; - /* - * apple-gmux is needed on dual GPU MacBook Pro - * to probe the panel if we're the inactive GPU. - */ - if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) && - apple_gmux_present() && pdev != vga_default_device() && - !vga_switcheroo_handler_flags()) + if (vga_switcheroo_client_probe_defer(pdev)) return -EPROBE_DEFER; return drm_get_pci_dev(pdev, ent, &driver); @@ -1115,6 +1110,49 @@ static int i915_pm_resume(struct device *dev) return i915_drm_resume(drm_dev); } +/* freeze: before creating the hibernation_image */ +static int i915_pm_freeze(struct device *dev) +{ + return i915_pm_suspend(dev); +} + +static int i915_pm_freeze_late(struct device *dev) +{ + int ret; + + ret = i915_pm_suspend_late(dev); + if (ret) + return ret; + + ret = i915_gem_freeze_late(dev_to_i915(dev)); + if (ret) + return ret; + + return 0; +} + +/* thaw: called after creating the hibernation image, but before turning off. */ +static int i915_pm_thaw_early(struct device *dev) +{ + return i915_pm_resume_early(dev); +} + +static int i915_pm_thaw(struct device *dev) +{ + return i915_pm_resume(dev); +} + +/* restore: called after loading the hibernation image. */ +static int i915_pm_restore_early(struct device *dev) +{ + return i915_pm_resume_early(dev); +} + +static int i915_pm_restore(struct device *dev) +{ + return i915_pm_resume(dev); +} + /* * Save all Gunit registers that may be lost after a D3 and a subsequent * S0i[R123] transition. The list of registers needing a save/restore is @@ -1478,7 +1516,7 @@ static int intel_runtime_suspend(struct device *device) struct drm_i915_private *dev_priv = dev->dev_private; int ret; - if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) + if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6()))) return -ENODEV; if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) @@ -1517,7 +1555,7 @@ static int intel_runtime_suspend(struct device *device) intel_guc_suspend(dev); - intel_suspend_gt_powersave(dev); + intel_suspend_gt_powersave(dev_priv); intel_runtime_pm_disable_interrupts(dev_priv); ret = 0; @@ -1539,7 +1577,7 @@ static int intel_runtime_suspend(struct device *device) return ret; } - intel_uncore_forcewake_reset(dev, false); + intel_uncore_forcewake_reset(dev_priv, false); enable_rpm_wakeref_asserts(dev_priv); WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); @@ -1553,14 +1591,14 @@ static int intel_runtime_suspend(struct device *device) * FIXME: We really should find a document that references the arguments * used below! */ - if (IS_BROADWELL(dev)) { + if (IS_BROADWELL(dev_priv)) { /* * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop * being detected, and the call we do at intel_runtime_resume() * won't be able to restore them. Since PCI_D3hot matches the * actual specification and appears to be working, use it. */ - intel_opregion_notify_adapter(dev, PCI_D3hot); + intel_opregion_notify_adapter(dev_priv, PCI_D3hot); } else { /* * current versions of firmware which depend on this opregion @@ -1569,7 +1607,7 @@ static int intel_runtime_suspend(struct device *device) * to distinguish it from notifications that might be sent via * the suspend path. */ - intel_opregion_notify_adapter(dev, PCI_D1); + intel_opregion_notify_adapter(dev_priv, PCI_D1); } assert_forcewakes_inactive(dev_priv); @@ -1593,7 +1631,7 @@ static int intel_runtime_resume(struct device *device) WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); disable_rpm_wakeref_asserts(dev_priv); - intel_opregion_notify_adapter(dev, PCI_D0); + intel_opregion_notify_adapter(dev_priv, PCI_D0); dev_priv->pm.suspended = false; if (intel_uncore_unclaimed_mmio(dev_priv)) DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); @@ -1620,7 +1658,7 @@ static int intel_runtime_resume(struct device *device) * we can do is to hope that things will still work (and disable RPM). */ i915_gem_init_swizzling(dev); - gen6_update_ring_freq(dev); + gen6_update_ring_freq(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv); @@ -1632,7 +1670,7 @@ static int intel_runtime_resume(struct device *device) if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) intel_hpd_init(dev_priv); - intel_enable_gt_powersave(dev); + intel_enable_gt_powersave(dev_priv); enable_rpm_wakeref_asserts(dev_priv); @@ -1669,14 +1707,14 @@ static const struct dev_pm_ops i915_pm_ops = { * @restore, @restore_early : called after rebooting and restoring the * hibernation image [PMSG_RESTORE] */ - .freeze = i915_pm_suspend, - .freeze_late = i915_pm_suspend_late, - .thaw_early = i915_pm_resume_early, - .thaw = i915_pm_resume, + .freeze = i915_pm_freeze, + .freeze_late = i915_pm_freeze_late, + .thaw_early = i915_pm_thaw_early, + .thaw = i915_pm_thaw, .poweroff = i915_pm_suspend, .poweroff_late = i915_pm_poweroff_late, - .restore_early = i915_pm_resume_early, - .restore = i915_pm_resume, + .restore_early = i915_pm_restore_early, + .restore = i915_pm_restore, /* S0ix (via runtime suspend) event handlers */ .runtime_suspend = intel_runtime_suspend, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5faacc6e548d..24a86c64d22e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -47,6 +47,7 @@ #include <drm/intel-gtt.h> #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ #include <drm/drm_gem.h> +#include <drm/drm_auth.h> #include "i915_params.h" #include "i915_reg.h" @@ -61,12 +62,14 @@ #include "i915_gem_gtt.h" #include "i915_gem_render_state.h" +#include "intel_gvt.h" + /* General customization: */ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20160425" +#define DRIVER_DATE "20160620" #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -324,6 +327,12 @@ struct i915_hotplug { &dev->mode_config.plane_list, \ base.head) +#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \ + list_for_each_entry(intel_plane, &dev->mode_config.plane_list, \ + base.head) \ + for_each_if ((plane_mask) & \ + (1 << drm_plane_index(&intel_plane->base))) + #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ list_for_each_entry(intel_plane, \ &(dev)->mode_config.plane_list, \ @@ -333,6 +342,10 @@ struct i915_hotplug { #define for_each_intel_crtc(dev, intel_crtc) \ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) +#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \ + list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) \ + for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base))) + #define for_each_intel_encoder(dev, intel_encoder) \ list_for_each_entry(intel_encoder, \ &(dev)->mode_config.encoder_list, \ @@ -588,6 +601,7 @@ struct drm_i915_display_funcs { struct intel_crtc_state *newstate); void (*initial_watermarks)(struct intel_crtc_state *cstate); void (*optimize_watermarks)(struct intel_crtc_state *cstate); + int (*compute_global_watermarks)(struct drm_atomic_state *state); void (*update_wm)(struct drm_crtc *crtc); int (*modeset_calc_cdclk)(struct drm_atomic_state *state); void (*modeset_commit_cdclk)(struct drm_atomic_state *state); @@ -612,7 +626,7 @@ struct drm_i915_display_funcs { struct drm_i915_gem_object *obj, struct drm_i915_gem_request *req, uint32_t flags); - void (*hpd_irq_setup)(struct drm_device *dev); + void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); /* clock updates for mode set */ /* cursor updates */ /* render clock increase/decrease */ @@ -735,6 +749,7 @@ struct intel_csr { func(is_valleyview) sep \ func(is_cherryview) sep \ func(is_haswell) sep \ + func(is_broadwell) sep \ func(is_skylake) sep \ func(is_broxton) sep \ func(is_kabylake) sep \ @@ -749,7 +764,8 @@ struct intel_csr { func(has_llc) sep \ func(has_snoop) sep \ func(has_ddi) sep \ - func(has_fpga_dbg) + func(has_fpga_dbg) sep \ + func(has_pooled_eu) #define DEFINE_FLAG(name) u8 name:1 #define SEP_SEMICOLON ; @@ -757,9 +773,10 @@ struct intel_csr { struct intel_device_info { u32 display_mmio_offset; u16 device_id; - u8 num_pipes:3; + u8 num_pipes; u8 num_sprites[I915_MAX_PIPES]; u8 gen; + u16 gen_mask; u8 ring_mask; /* Rings supported by the HW */ DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); /* Register offsets for the various display pipes and transcoders */ @@ -774,6 +791,7 @@ struct intel_device_info { u8 subslice_per_slice; u8 eu_total; u8 eu_per_subslice; + u8 min_eu_in_pool; /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ u8 subslice_7eu[3]; u8 has_slice_pg:1; @@ -821,9 +839,8 @@ struct i915_ctx_hang_stats { /* This must match up with the value previously used for execbuf2.rsvd1. */ #define DEFAULT_CONTEXT_HANDLE 0 -#define CONTEXT_NO_ZEROMAP (1<<0) /** - * struct intel_context - as the name implies, represents a context. + * struct i915_gem_context - as the name implies, represents a context. * @ref: reference count. * @user_handle: userspace tracking identity for this context. * @remap_slice: l3 row remapping information. @@ -841,33 +858,37 @@ struct i915_ctx_hang_stats { * Contexts are memory images used by the hardware to store copies of their * internal state. */ -struct intel_context { +struct i915_gem_context { struct kref ref; - int user_handle; - uint8_t remap_slice; struct drm_i915_private *i915; - int flags; struct drm_i915_file_private *file_priv; - struct i915_ctx_hang_stats hang_stats; struct i915_hw_ppgtt *ppgtt; - /* Legacy ring buffer submission */ - struct { - struct drm_i915_gem_object *rcs_state; - bool initialized; - } legacy_hw_ctx; + struct i915_ctx_hang_stats hang_stats; - /* Execlists */ - struct { + /* Unique identifier for this context, used by the hw for tracking */ + unsigned long flags; + unsigned hw_id; + u32 user_handle; +#define CONTEXT_NO_ZEROMAP (1<<0) + + struct intel_context { struct drm_i915_gem_object *state; struct intel_ringbuffer *ringbuf; - int pin_count; struct i915_vma *lrc_vma; - u64 lrc_desc; uint32_t *lrc_reg_state; + u64 lrc_desc; + int pin_count; + bool initialised; } engine[I915_NUM_ENGINES]; + u32 ring_size; + u32 desc_template; + struct atomic_notifier_head status_notifier; + bool execlists_force_single_submission; struct list_head link; + + u8 remap_slice; }; enum fb_op_origin { @@ -1115,6 +1136,8 @@ struct intel_gen6_power_mgmt { bool interrupts_enabled; u32 pm_iir; + u32 pm_intr_keep; + /* Frequencies are stored in potentially platform dependent multiples. * In other words, *_freq needs to be multiplied by X to be interesting. * Soft limits are those which are used for the dynamic reclocking done @@ -1488,6 +1511,7 @@ struct intel_vbt_data { bool present; bool active_low_pwm; u8 min_brightness; /* min_brightness/255 of max */ + enum intel_backlight_type type; } backlight; /* MIPI DSI */ @@ -1580,7 +1604,7 @@ struct skl_ddb_allocation { }; struct skl_wm_values { - bool dirty[I915_MAX_PIPES]; + unsigned dirty_pipes; struct skl_ddb_allocation ddb; uint32_t wm_linetime[I915_MAX_PIPES]; uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; @@ -1697,7 +1721,7 @@ struct i915_execbuffer_params { uint64_t batch_obj_vm_offset; struct intel_engine_cs *engine; struct drm_i915_gem_object *batch_obj; - struct intel_context *ctx; + struct i915_gem_context *ctx; struct drm_i915_gem_request *request; }; @@ -1724,6 +1748,8 @@ struct drm_i915_private { struct i915_virtual_gpu vgpu; + struct intel_gvt gvt; + struct intel_guc guc; struct intel_csr csr; @@ -1747,6 +1773,7 @@ struct drm_i915_private { wait_queue_head_t gmbus_wait_queue; struct pci_dev *bridge_dev; + struct i915_gem_context *kernel_context; struct intel_engine_cs engine[I915_NUM_ENGINES]; struct drm_i915_gem_object *semaphore_obj; uint32_t last_seqno, next_seqno; @@ -1802,13 +1829,17 @@ struct drm_i915_private { int num_fence_regs; /* 8 on pre-965, 16 otherwise */ unsigned int fsb_freq, mem_freq, is_ddr3; - unsigned int skl_boot_cdclk; + unsigned int skl_preferred_vco_freq; unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; unsigned int max_dotclk_freq; unsigned int rawclk_freq; unsigned int hpll_freq; unsigned int czclk_freq; + struct { + unsigned int vco, ref; + } cdclk_pll; + /** * wq - Driver workqueue for GEM. * @@ -1838,6 +1869,13 @@ struct drm_i915_private { DECLARE_HASHTABLE(mm_structs, 7); struct mutex mm_lock; + /* The hw wants to have a stable context identifier for the lifetime + * of the context (for OA, PASID, faults, etc). This is limited + * in execlists to 21 bits. + */ + struct ida context_hw_ida; +#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ + /* Kernel Modesetting */ struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; @@ -1950,9 +1988,6 @@ struct drm_i915_private { */ uint16_t skl_latency[8]; - /* Committed wm config */ - struct intel_wm_config config; - /* * The skl_wm_values structure is a bit too big for stack * allocation, so we keep the staging struct where we store @@ -1975,6 +2010,13 @@ struct drm_i915_private { * cstate->wm.need_postvbl_update. */ struct mutex wm_mutex; + + /* + * Set during HW readout of watermarks/DDB. Some platforms + * need to know when we're still using BIOS-provided values + * (which we don't fully trust). + */ + bool distrust_bios_wm; } wm; struct i915_runtime_pm pm; @@ -1989,8 +2031,6 @@ struct drm_i915_private { void (*stop_engine)(struct intel_engine_cs *engine); } gt; - struct intel_context *kernel_context; - /* perform PHY state sanity checks? */ bool chv_phy_assert[2]; @@ -2227,9 +2267,75 @@ struct drm_i915_gem_object { }; #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) -void i915_gem_track_fb(struct drm_i915_gem_object *old, - struct drm_i915_gem_object *new, - unsigned frontbuffer_bits); +/* + * Optimised SGL iterator for GEM objects + */ +static __always_inline struct sgt_iter { + struct scatterlist *sgp; + union { + unsigned long pfn; + dma_addr_t dma; + }; + unsigned int curr; + unsigned int max; +} __sgt_iter(struct scatterlist *sgl, bool dma) { + struct sgt_iter s = { .sgp = sgl }; + + if (s.sgp) { + s.max = s.curr = s.sgp->offset; + s.max += s.sgp->length; + if (dma) + s.dma = sg_dma_address(s.sgp); + else + s.pfn = page_to_pfn(sg_page(s.sgp)); + } + + return s; +} + +/** + * __sg_next - return the next scatterlist entry in a list + * @sg: The current sg entry + * + * Description: + * If the entry is the last, return NULL; otherwise, step to the next + * element in the array (@sg@+1). If that's a chain pointer, follow it; + * otherwise just return the pointer to the current element. + **/ +static inline struct scatterlist *__sg_next(struct scatterlist *sg) +{ +#ifdef CONFIG_DEBUG_SG + BUG_ON(sg->sg_magic != SG_MAGIC); +#endif + return sg_is_last(sg) ? NULL : + likely(!sg_is_chain(++sg)) ? sg : + sg_chain_ptr(sg); +} + +/** + * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table + * @__dmap: DMA address (output) + * @__iter: 'struct sgt_iter' (iterator state, internal) + * @__sgt: sg_table to iterate over (input) + */ +#define for_each_sgt_dma(__dmap, __iter, __sgt) \ + for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ + ((__dmap) = (__iter).dma + (__iter).curr); \ + (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ + ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0)) + +/** + * for_each_sgt_page - iterate over the pages of the given sg_table + * @__pp: page pointer (output) + * @__iter: 'struct sgt_iter' (iterator state, internal) + * @__sgt: sg_table to iterate over (input) + */ +#define for_each_sgt_page(__pp, __iter, __sgt) \ + for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ + ((__pp) = (__iter).pfn == 0 ? NULL : \ + pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ + (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ + ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) /** * Request queue structure. @@ -2278,6 +2384,9 @@ struct drm_i915_gem_request { /** Position in the ringbuffer of the end of the whole request */ u32 tail; + /** Preallocate space in the ringbuffer for the emitting the request */ + u32 reserved_space; + /** * Context and ring buffer related to this request * Contexts are refcounted, so when this request is associated with a @@ -2288,9 +2397,20 @@ struct drm_i915_gem_request { * i915_gem_request_free() will then decrement the refcount on the * context. */ - struct intel_context *ctx; + struct i915_gem_context *ctx; struct intel_ringbuffer *ringbuf; + /** + * Context related to the previous request. + * As the contexts are accessed by the hardware until the switch is + * completed to a new context, the hardware may still be writing + * to the context object after the breadcrumb is visible. We must + * not unpin/unbind/prune that object whilst still active and so + * we keep the previous context pinned until the following (this) + * request is retired. + */ + struct i915_gem_context *previous_context; + /** Batch buffer related to this request if any (used for error state dump only) */ struct drm_i915_gem_object *batch_obj; @@ -2327,11 +2447,13 @@ struct drm_i915_gem_request { /** Execlists no. of times this request has been sent to the ELSP */ int elsp_submitted; + /** Execlists context hardware id. */ + unsigned ctx_hw_id; }; struct drm_i915_gem_request * __must_check i915_gem_request_alloc(struct intel_engine_cs *engine, - struct intel_context *ctx); + struct i915_gem_context *ctx); void i915_gem_request_free(struct kref *req_ref); int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, struct drm_file *file); @@ -2359,23 +2481,9 @@ i915_gem_request_reference(struct drm_i915_gem_request *req) static inline void i915_gem_request_unreference(struct drm_i915_gem_request *req) { - WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex)); kref_put(&req->ref, i915_gem_request_free); } -static inline void -i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req) -{ - struct drm_device *dev; - - if (!req) - return; - - dev = req->engine->dev; - if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex)) - mutex_unlock(&dev->struct_mutex); -} - static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, struct drm_i915_gem_request *src) { @@ -2503,9 +2611,29 @@ struct drm_i915_cmd_table { #define INTEL_INFO(p) (&__I915__(p)->info) #define INTEL_GEN(p) (INTEL_INFO(p)->gen) #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) -#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) #define REVID_FOREVER 0xff +#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) + +#define GEN_FOREVER (0) +/* + * Returns true if Gen is in inclusive range [Start, End]. + * + * Use GEN_FOREVER for unbound start and or end. + */ +#define IS_GEN(p, s, e) ({ \ + unsigned int __s = (s), __e = (e); \ + BUILD_BUG_ON(!__builtin_constant_p(s)); \ + BUILD_BUG_ON(!__builtin_constant_p(e)); \ + if ((__s) != GEN_FOREVER) \ + __s = (s) - 1; \ + if ((__e) == GEN_FOREVER) \ + __e = BITS_PER_LONG - 1; \ + else \ + __e = (e) - 1; \ + !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \ +}) + /* * Return true if revision is in range [since,until] inclusive. * @@ -2538,7 +2666,7 @@ struct drm_i915_cmd_table { #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) -#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev)) +#define IS_BROADWELL(dev) (INTEL_INFO(dev)->is_broadwell) #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) #define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton) #define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake) @@ -2600,20 +2728,29 @@ struct drm_i915_cmd_table { #define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until)) +#define KBL_REVID_A0 0x0 +#define KBL_REVID_B0 0x1 +#define KBL_REVID_C0 0x2 +#define KBL_REVID_D0 0x3 +#define KBL_REVID_E0 0x4 + +#define IS_KBL_REVID(p, since, until) \ + (IS_KABYLAKE(p) && IS_REVID(p, since, until)) + /* * The genX designation typically refers to the render engine, so render * capability related checks should use IS_GEN, while display and other checks * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular * chips, etc.). */ -#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) -#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) -#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) -#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) -#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) -#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) -#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) -#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) +#define IS_GEN2(dev) (INTEL_INFO(dev)->gen_mask & BIT(1)) +#define IS_GEN3(dev) (INTEL_INFO(dev)->gen_mask & BIT(2)) +#define IS_GEN4(dev) (INTEL_INFO(dev)->gen_mask & BIT(3)) +#define IS_GEN5(dev) (INTEL_INFO(dev)->gen_mask & BIT(4)) +#define IS_GEN6(dev) (INTEL_INFO(dev)->gen_mask & BIT(5)) +#define IS_GEN7(dev) (INTEL_INFO(dev)->gen_mask & BIT(6)) +#define IS_GEN8(dev) (INTEL_INFO(dev)->gen_mask & BIT(7)) +#define IS_GEN9(dev) (INTEL_INFO(dev)->gen_mask & BIT(8)) #define RENDER_RING (1<<RCS) #define BSD_RING (1<<VCS) @@ -2686,12 +2823,18 @@ struct drm_i915_cmd_table { IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \ IS_KABYLAKE(dev) || IS_BROXTON(dev)) #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) -#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) +#define HAS_RC6p(dev) (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) #define HAS_CSR(dev) (IS_GEN9(dev)) -#define HAS_GUC_UCODE(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) -#define HAS_GUC_SCHED(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) +/* + * For now, anything with a GuC requires uCode loading, and then supports + * command submission once loaded. But these are logically independent + * properties, so we have separate macros to test them. + */ +#define HAS_GUC(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) +#define HAS_GUC_UCODE(dev) (HAS_GUC(dev)) +#define HAS_GUC_SCHED(dev) (HAS_GUC(dev)) #define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ INTEL_INFO(dev)->gen >= 8) @@ -2700,6 +2843,8 @@ struct drm_i915_cmd_table { !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \ !IS_BROXTON(dev)) +#define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu) + #define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 @@ -2740,6 +2885,9 @@ extern int i915_max_ioctl; extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); extern int i915_resume_switcheroo(struct drm_device *dev); +int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, + int enable_ppgtt); + /* i915_dma.c */ void __printf(3, 4) __i915_printk(struct drm_i915_private *dev_priv, const char *level, @@ -2760,9 +2908,9 @@ extern void i915_driver_postclose(struct drm_device *dev, extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); #endif -extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask); -extern bool intel_has_gpu_reset(struct drm_device *dev); -extern int i915_reset(struct drm_device *dev); +extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); +extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); +extern int i915_reset(struct drm_i915_private *dev_priv); extern int intel_guc_reset(struct drm_i915_private *dev_priv); extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); @@ -2772,30 +2920,33 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); /* intel_hotplug.c */ -void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); +void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, + u32 pin_mask, u32 long_mask); void intel_hpd_init(struct drm_i915_private *dev_priv); void intel_hpd_init_work(struct drm_i915_private *dev_priv); void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); /* i915_irq.c */ -void i915_queue_hangcheck(struct drm_device *dev); +void i915_queue_hangcheck(struct drm_i915_private *dev_priv); __printf(3, 4) -void i915_handle_error(struct drm_device *dev, u32 engine_mask, +void i915_handle_error(struct drm_i915_private *dev_priv, + u32 engine_mask, const char *fmt, ...); extern void intel_irq_init(struct drm_i915_private *dev_priv); int intel_irq_install(struct drm_i915_private *dev_priv); void intel_irq_uninstall(struct drm_i915_private *dev_priv); -extern void intel_uncore_sanitize(struct drm_device *dev); -extern void intel_uncore_early_sanitize(struct drm_device *dev, +extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv); +extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, bool restore_forcewake); -extern void intel_uncore_init(struct drm_device *dev); +extern void intel_uncore_init(struct drm_i915_private *dev_priv); extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); -extern void intel_uncore_fini(struct drm_device *dev); -extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); +extern void intel_uncore_fini(struct drm_i915_private *dev_priv); +extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, + bool restore); const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, enum forcewake_domains domains); @@ -2811,9 +2962,15 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); -static inline bool intel_vgpu_active(struct drm_device *dev) + +static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) { - return to_i915(dev)->vgpu.active; + return dev_priv->gvt.initialized; +} + +static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) +{ + return dev_priv->vgpu.active; } void @@ -2909,7 +3066,7 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_get_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv); -int i915_gem_init_userptr(struct drm_device *dev); +void i915_gem_init_userptr(struct drm_i915_private *dev_priv); int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, @@ -2919,11 +3076,13 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data, void i915_gem_load_init(struct drm_device *dev); void i915_gem_load_cleanup(struct drm_device *dev); void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); +int i915_gem_freeze_late(struct drm_i915_private *dev_priv); + void *i915_gem_object_alloc(struct drm_device *dev); void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops); -struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, +struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, size_t size); struct drm_i915_gem_object *i915_gem_object_create_from_data( struct drm_device *dev, const void *data, size_t size); @@ -2978,6 +3137,23 @@ static inline int __sg_page_count(struct scatterlist *sg) struct page * i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n); +static inline dma_addr_t +i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n) +{ + if (n < obj->get_page.last) { + obj->get_page.sg = obj->pages->sgl; + obj->get_page.last = 0; + } + + while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) { + obj->get_page.last += __sg_page_count(obj->get_page.sg++); + if (unlikely(sg_is_chain(obj->get_page.sg))) + obj->get_page.sg = sg_chain_ptr(obj->get_page.sg); + } + + return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT); +} + static inline struct page * i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) { @@ -3054,6 +3230,11 @@ int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args); int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); + +void i915_gem_track_fb(struct drm_i915_gem_object *old, + struct drm_i915_gem_object *new, + unsigned frontbuffer_bits); + /** * Returns true if seq1 is later than seq2. */ @@ -3081,13 +3262,13 @@ static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, req->seqno); } -int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); +int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno); int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); struct drm_i915_gem_request * i915_gem_find_active_request(struct intel_engine_cs *engine); -bool i915_gem_retire_requests(struct drm_device *dev); +bool i915_gem_retire_requests(struct drm_i915_private *dev_priv); void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); static inline u32 i915_reset_counter(struct i915_gpu_error *error) @@ -3147,7 +3328,6 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); int __must_check i915_gem_init(struct drm_device *dev); int i915_gem_init_engines(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); -int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice); void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_cleanup_engines(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev); @@ -3215,8 +3395,6 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, bool i915_gem_obj_bound(struct drm_i915_gem_object *o, struct i915_address_space *vm); -unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, - struct i915_address_space *vm); struct i915_vma * i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, struct i915_address_space *vm); @@ -3251,14 +3429,8 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); } -static inline unsigned long -i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - - return i915_gem_obj_size(obj, &ggtt->base); -} +unsigned long +i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj); static inline int __must_check i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, @@ -3272,12 +3444,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, alignment, flags | PIN_GLOBAL); } -static inline int -i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) -{ - return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); -} - void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view); static inline void @@ -3301,28 +3467,44 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); /* i915_gem_context.c */ int __must_check i915_gem_context_init(struct drm_device *dev); +void i915_gem_context_lost(struct drm_i915_private *dev_priv); void i915_gem_context_fini(struct drm_device *dev); void i915_gem_context_reset(struct drm_device *dev); int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); -int i915_gem_context_enable(struct drm_i915_gem_request *req); void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); int i915_switch_context(struct drm_i915_gem_request *req); -struct intel_context * -i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); void i915_gem_context_free(struct kref *ctx_ref); struct drm_i915_gem_object * i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); -static inline void i915_gem_context_reference(struct intel_context *ctx) +struct i915_gem_context * +i915_gem_context_create_gvt(struct drm_device *dev); + +static inline struct i915_gem_context * +i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) +{ + struct i915_gem_context *ctx; + + lockdep_assert_held(&file_priv->dev_priv->dev->struct_mutex); + + ctx = idr_find(&file_priv->context_idr, id); + if (!ctx) + return ERR_PTR(-ENOENT); + + return ctx; +} + +static inline void i915_gem_context_reference(struct i915_gem_context *ctx) { kref_get(&ctx->ref); } -static inline void i915_gem_context_unreference(struct intel_context *ctx) +static inline void i915_gem_context_unreference(struct i915_gem_context *ctx) { + lockdep_assert_held(&ctx->i915->dev->struct_mutex); kref_put(&ctx->ref, i915_gem_context_free); } -static inline bool i915_gem_context_is_default(const struct intel_context *c) +static inline bool i915_gem_context_is_default(const struct i915_gem_context *c) { return c->user_handle == DEFAULT_CONTEXT_HANDLE; } @@ -3335,6 +3517,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct drm_device *dev, @@ -3349,9 +3533,9 @@ int __must_check i915_gem_evict_for_vma(struct i915_vma *target); int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); /* belongs in i915_gem_gtt.h */ -static inline void i915_gem_chipset_flush(struct drm_device *dev) +static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) { - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_GEN(dev_priv) < 6) intel_gtt_chipset_flush(); } @@ -3430,18 +3614,19 @@ static inline void i915_error_state_buf_release( { kfree(eb->buf); } -void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, +void i915_capture_error_state(struct drm_i915_private *dev_priv, + u32 engine_mask, const char *error_msg); void i915_error_state_get(struct drm_device *dev, struct i915_error_state_file_priv *error_priv); void i915_error_state_put(struct i915_error_state_file_priv *error_priv); void i915_destroy_error_state(struct drm_device *dev); -void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); +void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone); const char *i915_cache_level_str(struct drm_i915_private *i915, int type); /* i915_cmd_parser.c */ -int i915_cmd_parser_get_version(void); +int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); int i915_cmd_parser_init_ring(struct intel_engine_cs *engine); void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine); bool i915_needs_cmd_parser(struct intel_engine_cs *engine); @@ -3481,6 +3666,7 @@ int intel_bios_init(struct drm_i915_private *dev_priv); bool intel_bios_is_valid_vbt(const void *buf, size_t size); bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); +bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); @@ -3489,31 +3675,33 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, /* intel_opregion.c */ #ifdef CONFIG_ACPI -extern int intel_opregion_setup(struct drm_device *dev); -extern void intel_opregion_init(struct drm_device *dev); -extern void intel_opregion_fini(struct drm_device *dev); -extern void intel_opregion_asle_intr(struct drm_device *dev); +extern int intel_opregion_setup(struct drm_i915_private *dev_priv); +extern void intel_opregion_register(struct drm_i915_private *dev_priv); +extern void intel_opregion_unregister(struct drm_i915_private *dev_priv); +extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv); extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable); -extern int intel_opregion_notify_adapter(struct drm_device *dev, +extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, pci_power_t state); -extern int intel_opregion_get_panel_type(struct drm_device *dev); +extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv); #else -static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } -static inline void intel_opregion_init(struct drm_device *dev) { return; } -static inline void intel_opregion_fini(struct drm_device *dev) { return; } -static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } +static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; } +static inline void intel_opregion_init(struct drm_i915_private *dev) { } +static inline void intel_opregion_fini(struct drm_i915_private *dev) { } +static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) +{ +} static inline int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) { return 0; } static inline int -intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) +intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state) { return 0; } -static inline int intel_opregion_get_panel_type(struct drm_device *dev) +static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev) { return -ENODEV; } @@ -3533,31 +3721,30 @@ extern void intel_modeset_init_hw(struct drm_device *dev); extern void intel_modeset_init(struct drm_device *dev); extern void intel_modeset_gem_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); -extern void intel_connector_unregister(struct intel_connector *); +extern void intel_connector_unregister(struct drm_connector *); extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); extern void intel_display_resume(struct drm_device *dev); extern void i915_redisable_vga(struct drm_device *dev); extern void i915_redisable_vga_power_on(struct drm_device *dev); -extern bool ironlake_set_drps(struct drm_device *dev, u8 val); +extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); extern void intel_init_pch_refclk(struct drm_device *dev); -extern void intel_set_rps(struct drm_device *dev, u8 val); +extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); extern void intel_detect_pch(struct drm_device *dev); -extern int intel_enable_rc6(const struct drm_device *dev); -extern bool i915_semaphore_is_enabled(struct drm_device *dev); +extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv); int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); /* overlay */ -extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); +extern struct intel_overlay_error_state * +intel_overlay_capture_error_state(struct drm_i915_private *dev_priv); extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, struct intel_overlay_error_state *error); -extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); +extern struct intel_display_error_state * +intel_display_capture_error_state(struct drm_i915_private *dev_priv); extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, struct drm_device *dev, struct intel_display_error_state *error); @@ -3586,6 +3773,24 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); +/* intel_dpio_phy.c */ +void chv_set_phy_signal_level(struct intel_encoder *encoder, + u32 deemph_reg_value, u32 margin_reg_value, + bool uniq_trans_scale); +void chv_data_lane_soft_reset(struct intel_encoder *encoder, + bool reset); +void chv_phy_pre_pll_enable(struct intel_encoder *encoder); +void chv_phy_pre_encoder_enable(struct intel_encoder *encoder); +void chv_phy_release_cl2_override(struct intel_encoder *encoder); +void chv_phy_post_pll_disable(struct intel_encoder *encoder); + +void vlv_set_phy_signal_level(struct intel_encoder *encoder, + u32 demph_reg_value, u32 preemph_reg_value, + u32 uniqtranscale_reg_value, u32 tx3_demph); +void vlv_phy_pre_pll_enable(struct intel_encoder *encoder); +void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder); +void vlv_phy_reset_lanes(struct intel_encoder *encoder); + int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index aad26851cee3..21d0dea57312 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -54,12 +54,33 @@ static bool cpu_cache_is_coherent(struct drm_device *dev, static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) { + if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) + return false; + if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) return true; return obj->pin_display; } +static int +insert_mappable_node(struct drm_i915_private *i915, + struct drm_mm_node *node, u32 size) +{ + memset(node, 0, sizeof(*node)); + return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node, + size, 0, 0, 0, + i915->ggtt.mappable_end, + DRM_MM_SEARCH_DEFAULT, + DRM_MM_CREATE_DEFAULT); +} + +static void +remove_mappable_node(struct drm_mm_node *node) +{ + drm_mm_remove_node(node); +} + /* some bookkeeping */ static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, size_t size) @@ -177,7 +198,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) vaddr += PAGE_SIZE; } - i915_gem_chipset_flush(obj->base.dev); + i915_gem_chipset_flush(to_i915(obj->base.dev)); st = kmalloc(sizeof(*st), GFP_KERNEL); if (st == NULL) @@ -347,7 +368,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, } drm_clflush_virt_range(vaddr, args->size); - i915_gem_chipset_flush(dev); + i915_gem_chipset_flush(to_i915(dev)); out: intel_fb_obj_flush(obj, false, ORIGIN_CPU); @@ -381,9 +402,9 @@ i915_gem_create(struct drm_file *file, return -EINVAL; /* Allocate the new object */ - obj = i915_gem_alloc_object(dev, size); - if (obj == NULL) - return -ENOMEM; + obj = i915_gem_object_create(dev, size); + if (IS_ERR(obj)) + return PTR_ERR(obj); ret = drm_gem_handle_create(file, &obj->base, &handle); /* drop reference from allocate - handle holds it now */ @@ -409,6 +430,9 @@ i915_gem_dumb_create(struct drm_file *file, /** * Creates a new mm object and returns a handle to it. + * @dev: drm device pointer + * @data: ioctl data blob + * @file: drm file pointer */ int i915_gem_create_ioctl(struct drm_device *dev, void *data, @@ -585,6 +609,142 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length, return ret ? - EFAULT : 0; } +static inline unsigned long +slow_user_access(struct io_mapping *mapping, + uint64_t page_base, int page_offset, + char __user *user_data, + unsigned long length, bool pwrite) +{ + void __iomem *ioaddr; + void *vaddr; + uint64_t unwritten; + + ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE); + /* We can use the cpu mem copy function because this is X86. */ + vaddr = (void __force *)ioaddr + page_offset; + if (pwrite) + unwritten = __copy_from_user(vaddr, user_data, length); + else + unwritten = __copy_to_user(user_data, vaddr, length); + + io_mapping_unmap(ioaddr); + return unwritten; +} + +static int +i915_gem_gtt_pread(struct drm_device *dev, + struct drm_i915_gem_object *obj, uint64_t size, + uint64_t data_offset, uint64_t data_ptr) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_mm_node node; + char __user *user_data; + uint64_t remain; + uint64_t offset; + int ret; + + ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE); + if (ret) { + ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE); + if (ret) + goto out; + + ret = i915_gem_object_get_pages(obj); + if (ret) { + remove_mappable_node(&node); + goto out; + } + + i915_gem_object_pin_pages(obj); + } else { + node.start = i915_gem_obj_ggtt_offset(obj); + node.allocated = false; + ret = i915_gem_object_put_fence(obj); + if (ret) + goto out_unpin; + } + + ret = i915_gem_object_set_to_gtt_domain(obj, false); + if (ret) + goto out_unpin; + + user_data = u64_to_user_ptr(data_ptr); + remain = size; + offset = data_offset; + + mutex_unlock(&dev->struct_mutex); + if (likely(!i915.prefault_disable)) { + ret = fault_in_multipages_writeable(user_data, remain); + if (ret) { + mutex_lock(&dev->struct_mutex); + goto out_unpin; + } + } + + while (remain > 0) { + /* Operation in this page + * + * page_base = page offset within aperture + * page_offset = offset within page + * page_length = bytes to copy for this page + */ + u32 page_base = node.start; + unsigned page_offset = offset_in_page(offset); + unsigned page_length = PAGE_SIZE - page_offset; + page_length = remain < page_length ? remain : page_length; + if (node.allocated) { + wmb(); + ggtt->base.insert_page(&ggtt->base, + i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), + node.start, + I915_CACHE_NONE, 0); + wmb(); + } else { + page_base += offset & PAGE_MASK; + } + /* This is a slow read/write as it tries to read from + * and write to user memory which may result into page + * faults, and so we cannot perform this under struct_mutex. + */ + if (slow_user_access(ggtt->mappable, page_base, + page_offset, user_data, + page_length, false)) { + ret = -EFAULT; + break; + } + + remain -= page_length; + user_data += page_length; + offset += page_length; + } + + mutex_lock(&dev->struct_mutex); + if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) { + /* The user has modified the object whilst we tried + * reading from it, and we now have no idea what domain + * the pages should be in. As we have just been touching + * them directly, flush everything back to the GTT + * domain. + */ + ret = i915_gem_object_set_to_gtt_domain(obj, false); + } + +out_unpin: + if (node.allocated) { + wmb(); + ggtt->base.clear_range(&ggtt->base, + node.start, node.size, + true); + i915_gem_object_unpin_pages(obj); + remove_mappable_node(&node); + } else { + i915_gem_object_ggtt_unpin(obj); + } +out: + return ret; +} + static int i915_gem_shmem_pread(struct drm_device *dev, struct drm_i915_gem_object *obj, @@ -600,6 +760,9 @@ i915_gem_shmem_pread(struct drm_device *dev, int needs_clflush = 0; struct sg_page_iter sg_iter; + if (!obj->base.filp) + return -ENODEV; + user_data = u64_to_user_ptr(args->data_ptr); remain = args->size; @@ -672,6 +835,9 @@ out: /** * Reads data from the object referenced by handle. + * @dev: drm device pointer + * @data: ioctl data blob + * @file: drm file pointer * * On error, the contents of *data are undefined. */ @@ -708,18 +874,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, goto out; } - /* prime objects have no backing filp to GEM pread/pwrite - * pages from. - */ - if (!obj->base.filp) { - ret = -EINVAL; - goto out; - } - trace_i915_gem_object_pread(obj, args->offset, args->size); ret = i915_gem_shmem_pread(dev, obj, args, file); + /* pread for non shmem backed objects */ + if (ret == -EFAULT || ret == -ENODEV) + ret = i915_gem_gtt_pread(dev, obj, args->size, + args->offset, args->data_ptr); + out: drm_gem_object_unreference(&obj->base); unlock: @@ -753,60 +916,99 @@ fast_user_write(struct io_mapping *mapping, /** * This is the fast pwrite path, where we copy the data directly from the * user into the GTT, uncached. + * @dev: drm device pointer + * @obj: i915 gem object + * @args: pwrite arguments structure + * @file: drm file pointer */ static int -i915_gem_gtt_pwrite_fast(struct drm_device *dev, +i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - ssize_t remain; - loff_t offset, page_base; + struct i915_ggtt *ggtt = &i915->ggtt; + struct drm_device *dev = obj->base.dev; + struct drm_mm_node node; + uint64_t remain, offset; char __user *user_data; - int page_offset, page_length, ret; + int ret; + bool hit_slow_path = false; + + if (obj->tiling_mode != I915_TILING_NONE) + return -EFAULT; ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK); - if (ret) - goto out; + if (ret) { + ret = insert_mappable_node(i915, &node, PAGE_SIZE); + if (ret) + goto out; + + ret = i915_gem_object_get_pages(obj); + if (ret) { + remove_mappable_node(&node); + goto out; + } + + i915_gem_object_pin_pages(obj); + } else { + node.start = i915_gem_obj_ggtt_offset(obj); + node.allocated = false; + ret = i915_gem_object_put_fence(obj); + if (ret) + goto out_unpin; + } ret = i915_gem_object_set_to_gtt_domain(obj, true); if (ret) goto out_unpin; - ret = i915_gem_object_put_fence(obj); - if (ret) - goto out_unpin; + intel_fb_obj_invalidate(obj, ORIGIN_GTT); + obj->dirty = true; user_data = u64_to_user_ptr(args->data_ptr); + offset = args->offset; remain = args->size; - - offset = i915_gem_obj_ggtt_offset(obj) + args->offset; - - intel_fb_obj_invalidate(obj, ORIGIN_GTT); - - while (remain > 0) { + while (remain) { /* Operation in this page * * page_base = page offset within aperture * page_offset = offset within page * page_length = bytes to copy for this page */ - page_base = offset & PAGE_MASK; - page_offset = offset_in_page(offset); - page_length = remain; - if ((page_offset + remain) > PAGE_SIZE) - page_length = PAGE_SIZE - page_offset; - + u32 page_base = node.start; + unsigned page_offset = offset_in_page(offset); + unsigned page_length = PAGE_SIZE - page_offset; + page_length = remain < page_length ? remain : page_length; + if (node.allocated) { + wmb(); /* flush the write before we modify the GGTT */ + ggtt->base.insert_page(&ggtt->base, + i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), + node.start, I915_CACHE_NONE, 0); + wmb(); /* flush modifications to the GGTT (insert_page) */ + } else { + page_base += offset & PAGE_MASK; + } /* If we get a fault while copying data, then (presumably) our * source page isn't available. Return the error and we'll * retry in the slow path. + * If the object is non-shmem backed, we retry again with the + * path that handles page fault. */ if (fast_user_write(ggtt->mappable, page_base, page_offset, user_data, page_length)) { - ret = -EFAULT; - goto out_flush; + hit_slow_path = true; + mutex_unlock(&dev->struct_mutex); + if (slow_user_access(ggtt->mappable, + page_base, + page_offset, user_data, + page_length, true)) { + ret = -EFAULT; + mutex_lock(&dev->struct_mutex); + goto out_flush; + } + + mutex_lock(&dev->struct_mutex); } remain -= page_length; @@ -815,9 +1017,31 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, } out_flush: + if (hit_slow_path) { + if (ret == 0 && + (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) { + /* The user has modified the object whilst we tried + * reading from it, and we now have no idea what domain + * the pages should be in. As we have just been touching + * them directly, flush everything back to the GTT + * domain. + */ + ret = i915_gem_object_set_to_gtt_domain(obj, false); + } + } + intel_fb_obj_flush(obj, false, ORIGIN_GTT); out_unpin: - i915_gem_object_ggtt_unpin(obj); + if (node.allocated) { + wmb(); + ggtt->base.clear_range(&ggtt->base, + node.start, node.size, + true); + i915_gem_object_unpin_pages(obj); + remove_mappable_node(&node); + } else { + i915_gem_object_ggtt_unpin(obj); + } out: return ret; } @@ -1006,7 +1230,7 @@ out: } if (needs_clflush_after) - i915_gem_chipset_flush(dev); + i915_gem_chipset_flush(to_i915(dev)); else obj->cache_dirty = true; @@ -1016,6 +1240,9 @@ out: /** * Writes data to the object referenced by handle. + * @dev: drm device + * @data: ioctl data blob + * @file: drm file * * On error, the contents of the buffer that were to be modified are undefined. */ @@ -1062,14 +1289,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, goto out; } - /* prime objects have no backing filp to GEM pread/pwrite - * pages from. - */ - if (!obj->base.filp) { - ret = -EINVAL; - goto out; - } - trace_i915_gem_object_pwrite(obj, args->offset, args->size); ret = -EFAULT; @@ -1079,20 +1298,20 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * pread/pwrite currently are reading and writing from the CPU * perspective, requiring manual detiling by the client. */ - if (obj->tiling_mode == I915_TILING_NONE && - obj->base.write_domain != I915_GEM_DOMAIN_CPU && - cpu_write_needs_clflush(obj)) { - ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); + if (!obj->base.filp || cpu_write_needs_clflush(obj)) { + ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file); /* Note that the gtt paths might fail with non-page-backed user * pointers (e.g. gtt mappings when moving data between * textures). Fallback to the shmem path in that case. */ } - if (ret == -EFAULT || ret == -ENOSPC) { + if (ret == -EFAULT) { if (obj->phys_handle) ret = i915_gem_phys_pwrite(obj, args, file); - else + else if (obj->base.filp) ret = i915_gem_shmem_pwrite(dev, obj, args, file); + else + ret = -ENODEV; } out: @@ -1213,6 +1432,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) * @req: duh! * @interruptible: do an interruptible wait (normally yes) * @timeout: in - how long to wait (NULL forever); out - how much time remaining + * @rps: RPS client * * Note: It is of utmost importance that the passed in seqno and reset_counter * values have been read by the caller in an smp safe manner. Where read-side @@ -1230,8 +1450,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, struct intel_rps_client *rps) { struct intel_engine_cs *engine = i915_gem_request_get_engine(req); - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = req->i915; const bool irq_test_in_progress = ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine); int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; @@ -1413,6 +1632,13 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) list_del_init(&request->list); i915_gem_request_remove_from_client(request); + if (request->previous_context) { + if (i915.enable_execlists) + intel_lr_context_unpin(request->previous_context, + request->engine); + } + + i915_gem_context_unreference(request->ctx); i915_gem_request_unreference(request); } @@ -1422,7 +1648,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req) struct intel_engine_cs *engine = req->engine; struct drm_i915_gem_request *tmp; - lockdep_assert_held(&engine->dev->struct_mutex); + lockdep_assert_held(&engine->i915->dev->struct_mutex); if (list_empty(&req->list)) return; @@ -1440,6 +1666,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req) /** * Waits for a request to be signaled, and cleans up the * request and object lists appropriately for that event. + * @req: request to wait on */ int i915_wait_request(struct drm_i915_gem_request *req) @@ -1466,6 +1693,8 @@ i915_wait_request(struct drm_i915_gem_request *req) /** * Ensures that all rendering to the object has completed and the object is * safe to unbind from the GTT or access from the CPU. + * @obj: i915 gem object + * @readonly: waiting for read access or write */ int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, @@ -1583,6 +1812,9 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file) /** * Called when user space prepares to use an object with the CPU, either * through the mmap ioctl's mapping or a GTT mapping. + * @dev: drm device + * @data: ioctl data blob + * @file: drm file */ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, @@ -1646,6 +1878,9 @@ unlock: /** * Called when user space has done writes to this buffer + * @dev: drm device + * @data: ioctl data blob + * @file: drm file */ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, @@ -1676,8 +1911,11 @@ unlock: } /** - * Maps the contents of an object, returning the address it is mapped - * into. + * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address + * it is mapped to. + * @dev: drm device + * @data: ioctl data blob + * @file: drm file * * While the mapping holds a reference on the contents of the object, it doesn't * imply a ref on the object itself. @@ -1982,7 +2220,7 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) return size; /* Previous chips need a power-of-two fence region when tiling */ - if (INTEL_INFO(dev)->gen == 3) + if (IS_GEN3(dev)) gtt_size = 1024*1024; else gtt_size = 512*1024; @@ -1995,7 +2233,10 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) /** * i915_gem_get_gtt_alignment - return required GTT alignment for an object - * @obj: object to check + * @dev: drm device + * @size: object size + * @tiling_mode: tiling mode + * @fenced: is fenced alignemned required or not * * Return the required GTT alignment for an object, taking into account * potential fence register mapping. @@ -2162,7 +2403,8 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj) static void i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) { - struct sg_page_iter sg_iter; + struct sgt_iter sgt_iter; + struct page *page; int ret; BUG_ON(obj->madv == __I915_MADV_PURGED); @@ -2184,9 +2426,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) if (obj->madv == I915_MADV_DONTNEED) obj->dirty = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { - struct page *page = sg_page_iter_page(&sg_iter); - + for_each_sgt_page(page, sgt_iter, obj->pages) { if (obj->dirty) set_page_dirty(page); @@ -2243,7 +2483,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) struct address_space *mapping; struct sg_table *st; struct scatterlist *sg; - struct sg_page_iter sg_iter; + struct sgt_iter sgt_iter; struct page *page; unsigned long last_pfn = 0; /* suppress gcc warning */ int ret; @@ -2340,8 +2580,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) err_pages: sg_mark_end(sg); - for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) - put_page(sg_page_iter_page(&sg_iter)); + for_each_sgt_page(page, sgt_iter, st) + put_page(page); sg_free_table(st); kfree(st); @@ -2395,6 +2635,44 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) return 0; } +/* The 'mapping' part of i915_gem_object_pin_map() below */ +static void *i915_gem_object_map(const struct drm_i915_gem_object *obj) +{ + unsigned long n_pages = obj->base.size >> PAGE_SHIFT; + struct sg_table *sgt = obj->pages; + struct sgt_iter sgt_iter; + struct page *page; + struct page *stack_pages[32]; + struct page **pages = stack_pages; + unsigned long i = 0; + void *addr; + + /* A single page can always be kmapped */ + if (n_pages == 1) + return kmap(sg_page(sgt->sgl)); + + if (n_pages > ARRAY_SIZE(stack_pages)) { + /* Too big for stack -- allocate temporary array instead */ + pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY); + if (!pages) + return NULL; + } + + for_each_sgt_page(page, sgt_iter, sgt) + pages[i++] = page; + + /* Check that we have the expected number of pages */ + GEM_BUG_ON(i != n_pages); + + addr = vmap(pages, n_pages, 0, PAGE_KERNEL); + + if (pages != stack_pages) + drm_free_large(pages); + + return addr; +} + +/* get, pin, and map the pages of the object into kernel space */ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj) { int ret; @@ -2407,29 +2685,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj) i915_gem_object_pin_pages(obj); - if (obj->mapping == NULL) { - struct page **pages; - - pages = NULL; - if (obj->base.size == PAGE_SIZE) - obj->mapping = kmap(sg_page(obj->pages->sgl)); - else - pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT, - sizeof(*pages), - GFP_TEMPORARY); - if (pages != NULL) { - struct sg_page_iter sg_iter; - int n; - - n = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, - obj->pages->nents, 0) - pages[n++] = sg_page_iter_page(&sg_iter); - - obj->mapping = vmap(pages, n, 0, PAGE_KERNEL); - drm_free_large(pages); - } - if (obj->mapping == NULL) { + if (!obj->mapping) { + obj->mapping = i915_gem_object_map(obj); + if (!obj->mapping) { i915_gem_object_unpin_pages(obj); return ERR_PTR(-ENOMEM); } @@ -2502,9 +2760,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring) } static int -i915_gem_init_seqno(struct drm_device *dev, u32 seqno) +i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; int ret; @@ -2514,7 +2771,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno) if (ret) return ret; } - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev_priv); /* Finally reset hw state */ for_each_engine(engine, dev_priv) @@ -2534,7 +2791,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) /* HWS page needs to be set less than what we * will inject to ring */ - ret = i915_gem_init_seqno(dev, seqno - 1); + ret = i915_gem_init_seqno(dev_priv, seqno - 1); if (ret) return ret; @@ -2550,13 +2807,11 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) } int -i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) +i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* reserve 0 for non-seqno */ if (dev_priv->next_seqno == 0) { - int ret = i915_gem_init_seqno(dev, 0); + int ret = i915_gem_init_seqno(dev_priv, 0); if (ret) return ret; @@ -2580,6 +2835,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, struct drm_i915_private *dev_priv; struct intel_ringbuffer *ringbuf; u32 request_start; + u32 reserved_tail; int ret; if (WARN_ON(request == NULL)) @@ -2594,9 +2850,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, * should already have been reserved in the ring buffer. Let the ring * know that it is time to use that space up. */ - intel_ring_reserved_space_use(ringbuf); - request_start = intel_ring_get_tail(ringbuf); + reserved_tail = request->reserved_space; + request->reserved_space = 0; + /* * Emit any outstanding flushes - execbuf can fail to emit the flush * after having emitted the batchbuffer command. Hence we need to fix @@ -2652,19 +2909,25 @@ void __i915_add_request(struct drm_i915_gem_request *request, /* Not allowed to fail! */ WARN(ret, "emit|add_request failed: %d!\n", ret); - i915_queue_hangcheck(engine->dev); + i915_queue_hangcheck(engine->i915); queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, round_jiffies_up_relative(HZ)); - intel_mark_busy(dev_priv->dev); + intel_mark_busy(dev_priv); /* Sanity check that the reserved size was large enough. */ - intel_ring_reserved_space_end(ringbuf); + ret = intel_ring_get_tail(ringbuf) - request_start; + if (ret < 0) + ret += ringbuf->size; + WARN_ONCE(ret > reserved_tail, + "Not enough space reserved (%d bytes) " + "for adding the request (%d bytes)\n", + reserved_tail, ret); } static bool i915_context_is_banned(struct drm_i915_private *dev_priv, - const struct intel_context *ctx) + const struct i915_gem_context *ctx) { unsigned long elapsed; @@ -2689,7 +2952,7 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv, } static void i915_set_reset_status(struct drm_i915_private *dev_priv, - struct intel_context *ctx, + struct i915_gem_context *ctx, const bool guilty) { struct i915_ctx_hang_stats *hs; @@ -2712,27 +2975,15 @@ void i915_gem_request_free(struct kref *req_ref) { struct drm_i915_gem_request *req = container_of(req_ref, typeof(*req), ref); - struct intel_context *ctx = req->ctx; - - if (req->file_priv) - i915_gem_request_remove_from_client(req); - - if (ctx) { - if (i915.enable_execlists && ctx != req->i915->kernel_context) - intel_lr_context_unpin(ctx, req->engine); - - i915_gem_context_unreference(ctx); - } - kmem_cache_free(req->i915->requests, req); } static inline int __i915_gem_request_alloc(struct intel_engine_cs *engine, - struct intel_context *ctx, + struct i915_gem_context *ctx, struct drm_i915_gem_request **req_out) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error); struct drm_i915_gem_request *req; int ret; @@ -2754,7 +3005,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, if (req == NULL) return -ENOMEM; - ret = i915_gem_get_seqno(engine->dev, &req->seqno); + ret = i915_gem_get_seqno(engine->i915, &req->seqno); if (ret) goto err; @@ -2765,15 +3016,6 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, req->ctx = ctx; i915_gem_context_reference(req->ctx); - if (i915.enable_execlists) - ret = intel_logical_ring_alloc_request_extras(req); - else - ret = intel_ring_alloc_request_extras(req); - if (ret) { - i915_gem_context_unreference(req->ctx); - goto err; - } - /* * Reserve space in the ring buffer for all the commands required to * eventually emit this request. This is to guarantee that the @@ -2781,24 +3023,20 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, * to be redone if the request is not actually submitted straight * away, e.g. because a GPU scheduler has deferred it. */ + req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; + if (i915.enable_execlists) - ret = intel_logical_ring_reserve_space(req); + ret = intel_logical_ring_alloc_request_extras(req); else - ret = intel_ring_reserve_space(req); - if (ret) { - /* - * At this point, the request is fully allocated even if not - * fully prepared. Thus it can be cleaned up using the proper - * free code. - */ - intel_ring_reserved_space_cancel(req->ringbuf); - i915_gem_request_unreference(req); - return ret; - } + ret = intel_ring_alloc_request_extras(req); + if (ret) + goto err_ctx; *req_out = req; return 0; +err_ctx: + i915_gem_context_unreference(ctx); err: kmem_cache_free(dev_priv->requests, req); return ret; @@ -2818,13 +3056,13 @@ err: */ struct drm_i915_gem_request * i915_gem_request_alloc(struct intel_engine_cs *engine, - struct intel_context *ctx) + struct i915_gem_context *ctx) { struct drm_i915_gem_request *req; int err; if (ctx == NULL) - ctx = to_i915(engine->dev)->kernel_context; + ctx = engine->i915->kernel_context; err = __i915_gem_request_alloc(engine, ctx, &req); return err ? ERR_PTR(err) : req; } @@ -2888,13 +3126,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv, /* Ensure irq handler finishes or is cancelled. */ tasklet_kill(&engine->irq_tasklet); - spin_lock_bh(&engine->execlist_lock); - /* list_splice_tail_init checks for empty lists */ - list_splice_tail_init(&engine->execlist_queue, - &engine->execlist_retired_req_list); - spin_unlock_bh(&engine->execlist_lock); - - intel_execlists_retire_requests(engine); + intel_execlists_cancel_requests(engine); } /* @@ -2954,6 +3186,7 @@ void i915_gem_reset(struct drm_device *dev) /** * This function clears the request list as sequence numbers are passed. + * @engine: engine to retire requests on */ void i915_gem_retire_requests_ring(struct intel_engine_cs *engine) @@ -3005,9 +3238,8 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine) } bool -i915_gem_retire_requests(struct drm_device *dev) +i915_gem_retire_requests(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; bool idle = true; @@ -3018,8 +3250,6 @@ i915_gem_retire_requests(struct drm_device *dev) spin_lock_bh(&engine->execlist_lock); idle &= list_empty(&engine->execlist_queue); spin_unlock_bh(&engine->execlist_lock); - - intel_execlists_retire_requests(engine); } } @@ -3042,7 +3272,7 @@ i915_gem_retire_work_handler(struct work_struct *work) /* Come back later if the device is busy... */ idle = false; if (mutex_trylock(&dev->struct_mutex)) { - idle = i915_gem_retire_requests(dev); + idle = i915_gem_retire_requests(dev_priv); mutex_unlock(&dev->struct_mutex); } if (!idle) @@ -3066,7 +3296,7 @@ i915_gem_idle_work_handler(struct work_struct *work) * Also locking seems to be fubar here, engine->request_list is protected * by dev->struct_mutex. */ - intel_mark_idle(dev); + intel_mark_idle(dev_priv); if (mutex_trylock(&dev->struct_mutex)) { for_each_engine(engine, dev_priv) @@ -3080,6 +3310,7 @@ i915_gem_idle_work_handler(struct work_struct *work) * Ensures that an object will eventually get non-busy by flushing any required * write domains, emitting any outstanding lazy request and retiring and * completed requests. + * @obj: object to flush */ static int i915_gem_object_flush_active(struct drm_i915_gem_object *obj) @@ -3096,14 +3327,8 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj) if (req == NULL) continue; - if (list_empty(&req->list)) - goto retire; - - if (i915_gem_request_completed(req, true)) { - __i915_gem_request_retire__upto(req); -retire: + if (i915_gem_request_completed(req, true)) i915_gem_object_retire__read(obj, i); - } } return 0; @@ -3111,7 +3336,9 @@ retire: /** * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT - * @DRM_IOCTL_ARGS: standard ioctl arguments + * @dev: drm device pointer + * @data: ioctl data blob + * @file: drm file pointer * * Returns 0 if successful, else an error is returned with the remaining time in * the timeout parameter. @@ -3185,7 +3412,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ret = __i915_wait_request(req[i], true, args->timeout_ns > 0 ? &args->timeout_ns : NULL, to_rps_client(file)); - i915_gem_request_unreference__unlocked(req[i]); + i915_gem_request_unreference(req[i]); } return ret; @@ -3211,7 +3438,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, if (i915_gem_request_completed(from_req, true)) return 0; - if (!i915_semaphore_is_enabled(obj->base.dev)) { + if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) { struct drm_i915_private *i915 = to_i915(obj->base.dev); ret = __i915_wait_request(from_req, i915->mm.interruptible, @@ -3345,6 +3572,17 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) old_write_domain); } +static void __i915_vma_iounmap(struct i915_vma *vma) +{ + GEM_BUG_ON(vma->pin_count); + + if (vma->iomap == NULL) + return; + + io_mapping_unmap(vma->iomap); + vma->iomap = NULL; +} + static int __i915_vma_unbind(struct i915_vma *vma, bool wait) { struct drm_i915_gem_object *obj = vma->obj; @@ -3377,6 +3615,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait) ret = i915_gem_object_put_fence(obj); if (ret) return ret; + + __i915_vma_iounmap(vma); } trace_i915_vma_unbind(vma); @@ -3488,6 +3728,11 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma, /** * Finds free space in the GTT aperture and binds the object or a view of it * there. + * @obj: object to bind + * @vm: address space to bind into + * @ggtt_view: global gtt view if applicable + * @alignment: requested alignment + * @flags: mask of PIN_* flags to use */ static struct i915_vma * i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, @@ -3731,7 +3976,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) return; if (i915_gem_clflush_object(obj, obj->pin_display)) - i915_gem_chipset_flush(obj->base.dev); + i915_gem_chipset_flush(to_i915(obj->base.dev)); old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; @@ -3745,6 +3990,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) /** * Moves a single object to the GTT read, and possibly write domain. + * @obj: object to act on + * @write: ask for write access or read only * * This function returns when the move is complete, including waiting on * flushes to occur. @@ -3816,6 +4063,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) /** * Changes the cache-level of an object across all VMA. + * @obj: object to act on + * @cache_level: new cache level to set for the object * * After this function returns, the object will be in the new cache-level * across all GTT and the contents of the backing storage will be coherent, @@ -3925,11 +4174,9 @@ out: * object is now coherent at its new cache level (with respect * to the access domain). */ - if (obj->cache_dirty && - obj->base.write_domain != I915_GEM_DOMAIN_CPU && - cpu_write_needs_clflush(obj)) { + if (obj->cache_dirty && cpu_write_needs_clflush(obj)) { if (i915_gem_clflush_object(obj, true)) - i915_gem_chipset_flush(obj->base.dev); + i915_gem_chipset_flush(to_i915(obj->base.dev)); } return 0; @@ -4097,6 +4344,8 @@ i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, /** * Moves a single object to the CPU read, and possibly write domain. + * @obj: object to act on + * @write: requesting write or read-only access * * This function returns when the move is complete, including waiting on * flushes to occur. @@ -4198,7 +4447,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) if (ret == 0) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); - i915_gem_request_unreference__unlocked(target); + i915_gem_request_unreference(target); return ret; } @@ -4499,21 +4748,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = { .put_pages = i915_gem_object_put_pages_gtt, }; -struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, +struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, size_t size) { struct drm_i915_gem_object *obj; struct address_space *mapping; gfp_t mask; + int ret; obj = i915_gem_object_alloc(dev); if (obj == NULL) - return NULL; + return ERR_PTR(-ENOMEM); - if (drm_gem_object_init(dev, &obj->base, size) != 0) { - i915_gem_object_free(obj); - return NULL; - } + ret = drm_gem_object_init(dev, &obj->base, size); + if (ret) + goto fail; mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) { @@ -4550,6 +4799,11 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, trace_i915_gem_object_create(obj); return obj; + +fail: + i915_gem_object_free(obj); + + return ERR_PTR(ret); } static bool discard_backing_storage(struct drm_i915_gem_object *obj) @@ -4655,16 +4909,12 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_vma *vma; - BUG_ON(!view); + GEM_BUG_ON(!view); list_for_each_entry(vma, &obj->vma_list, obj_link) - if (vma->vm == &ggtt->base && - i915_ggtt_view_equal(&vma->ggtt_view, view)) + if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view)) return vma; return NULL; } @@ -4706,9 +4956,10 @@ i915_gem_suspend(struct drm_device *dev) if (ret) goto err; - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev_priv); i915_gem_stop_engines(dev); + i915_gem_context_lost(dev_priv); mutex_unlock(&dev->struct_mutex); cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); @@ -4727,37 +4978,6 @@ err: return ret; } -int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice) -{ - struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; - int i, ret; - - if (!HAS_L3_DPF(dev) || !remap_info) - return 0; - - ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3); - if (ret) - return ret; - - /* - * Note: We do not worry about the concurrent register cacheline hang - * here because no other code should access these registers other than - * at initialization time. - */ - for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) { - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i)); - intel_ring_emit(engine, remap_info[i]); - } - - intel_ring_advance(engine); - - return ret; -} - void i915_gem_init_swizzling(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -4862,7 +5082,7 @@ i915_gem_init_hw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - int ret, j; + int ret; /* Double layer security blanket, see i915_gem_init() */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); @@ -4914,58 +5134,15 @@ i915_gem_init_hw(struct drm_device *dev) intel_mocs_init_l3cc_table(dev); /* We can't enable contexts until all firmware is loaded */ - if (HAS_GUC_UCODE(dev)) { - ret = intel_guc_ucode_load(dev); - if (ret) { - DRM_ERROR("Failed to initialize GuC, error %d\n", ret); - ret = -EIO; - goto out; - } - } + ret = intel_guc_setup(dev); + if (ret) + goto out; /* * Increment the next seqno by 0x100 so we have a visible break * on re-initialisation */ ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100); - if (ret) - goto out; - - /* Now it is safe to go back round and do everything else: */ - for_each_engine(engine, dev_priv) { - struct drm_i915_gem_request *req; - - req = i915_gem_request_alloc(engine, NULL); - if (IS_ERR(req)) { - ret = PTR_ERR(req); - break; - } - - if (engine->id == RCS) { - for (j = 0; j < NUM_L3_SLICES(dev); j++) { - ret = i915_gem_l3_remap(req, j); - if (ret) - goto err_request; - } - } - - ret = i915_ppgtt_init_ring(req); - if (ret) - goto err_request; - - ret = i915_gem_context_enable(req); - if (ret) - goto err_request; - -err_request: - i915_add_request_no_flush(req); - if (ret) { - DRM_ERROR("Failed to enable %s, error=%d\n", - engine->name, ret); - i915_gem_cleanup_engines(dev); - break; - } - } out: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); @@ -4977,9 +5154,6 @@ int i915_gem_init(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int ret; - i915.enable_execlists = intel_sanitize_enable_execlists(dev, - i915.enable_execlists); - mutex_lock(&dev->struct_mutex); if (!i915.enable_execlists) { @@ -5002,10 +5176,7 @@ int i915_gem_init(struct drm_device *dev) */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - ret = i915_gem_init_userptr(dev); - if (ret) - goto out_unlock; - + i915_gem_init_userptr(dev_priv); i915_gem_init_ggtt(dev); ret = i915_gem_context_init(dev); @@ -5042,14 +5213,6 @@ i915_gem_cleanup_engines(struct drm_device *dev) for_each_engine(engine, dev_priv) dev_priv->gt.cleanup_engine(engine); - - if (i915.enable_execlists) - /* - * Neither the BIOS, ourselves or any other kernel - * expects the system to be in execlists mode on startup, - * so we need to reset the GPU back to legacy mode. - */ - intel_gpu_reset(dev, ALL_ENGINES); } static void @@ -5073,7 +5236,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv) else dev_priv->num_fence_regs = 8; - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) dev_priv->num_fence_regs = I915_READ(vgtif_reg(avail_rs.fence_num)); @@ -5148,6 +5311,34 @@ void i915_gem_load_cleanup(struct drm_device *dev) kmem_cache_destroy(dev_priv->objects); } +int i915_gem_freeze_late(struct drm_i915_private *dev_priv) +{ + struct drm_i915_gem_object *obj; + + /* Called just before we write the hibernation image. + * + * We need to update the domain tracking to reflect that the CPU + * will be accessing all the pages to create and restore from the + * hibernation, and so upon restoration those pages will be in the + * CPU domain. + * + * To make sure the hibernation image contains the latest state, + * we update that state just before writing out the image. + */ + + list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { + obj->base.read_domains = I915_GEM_DOMAIN_CPU; + obj->base.write_domain = I915_GEM_DOMAIN_CPU; + } + + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + obj->base.read_domains = I915_GEM_DOMAIN_CPU; + obj->base.write_domain = I915_GEM_DOMAIN_CPU; + } + + return 0; +} + void i915_gem_release(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; @@ -5254,13 +5445,10 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, const struct i915_ggtt_view *view) { - struct drm_i915_private *dev_priv = to_i915(o->base.dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_vma *vma; list_for_each_entry(vma, &o->vma_list, obj_link) - if (vma->vm == &ggtt->base && - i915_ggtt_view_equal(&vma->ggtt_view, view)) + if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view)) return vma->node.start; WARN(1, "global vma for this object not found. (view=%u)\n", view->type); @@ -5286,12 +5474,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o, bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, const struct i915_ggtt_view *view) { - struct drm_i915_private *dev_priv = to_i915(o->base.dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_vma *vma; list_for_each_entry(vma, &o->vma_list, obj_link) - if (vma->vm == &ggtt->base && + if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view) && drm_mm_node_allocated(&vma->node)) return true; @@ -5310,23 +5496,18 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) return false; } -unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, - struct i915_address_space *vm) +unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o) { - struct drm_i915_private *dev_priv = o->base.dev->dev_private; struct i915_vma *vma; - WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); - - BUG_ON(list_empty(&o->vma_list)); + GEM_BUG_ON(list_empty(&o->vma_list)); list_for_each_entry(vma, &o->vma_list, obj_link) { if (vma->is_ggtt && - vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) - continue; - if (vma->vm == vm) + vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) return vma->node.size; } + return 0; } @@ -5365,8 +5546,8 @@ i915_gem_object_create_from_data(struct drm_device *dev, size_t bytes; int ret; - obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE)); - if (IS_ERR_OR_NULL(obj)) + obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE)); + if (IS_ERR(obj)) return obj; ret = i915_gem_object_set_to_cpu_domain(obj, true); diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c index 7bf2f3f2968e..3752d5daa4b2 100644 --- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c +++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c @@ -134,9 +134,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, if (obj == NULL) { int ret; - obj = i915_gem_alloc_object(pool->dev, size); - if (obj == NULL) - return ERR_PTR(-ENOMEM); + obj = i915_gem_object_create(pool->dev, size); + if (IS_ERR(obj)) + return obj; ret = i915_gem_object_get_pages(obj); if (ret) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index e5acc3916f75..30d9b4fd30f3 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -90,6 +90,8 @@ #include "i915_drv.h" #include "i915_trace.h" +#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 + /* This is a HW constraint. The value below is the largest known requirement * I've seen in a spec to date, and that was a workaround for a non-shipping * part. It should be safe to decrease this, but it's more future proof as is. @@ -97,28 +99,27 @@ #define GEN6_CONTEXT_ALIGN (64<<10) #define GEN7_CONTEXT_ALIGN 4096 -static size_t get_context_alignment(struct drm_device *dev) +static size_t get_context_alignment(struct drm_i915_private *dev_priv) { - if (IS_GEN6(dev)) + if (IS_GEN6(dev_priv)) return GEN6_CONTEXT_ALIGN; return GEN7_CONTEXT_ALIGN; } -static int get_context_size(struct drm_device *dev) +static int get_context_size(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; int ret; u32 reg; - switch (INTEL_INFO(dev)->gen) { + switch (INTEL_GEN(dev_priv)) { case 6: reg = I915_READ(CXT_SIZE); ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; break; case 7: reg = I915_READ(GEN7_CXT_SIZE); - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev_priv)) ret = HSW_CXT_TOTAL_SIZE; else ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; @@ -133,7 +134,7 @@ static int get_context_size(struct drm_device *dev) return ret; } -static void i915_gem_context_clean(struct intel_context *ctx) +static void i915_gem_context_clean(struct i915_gem_context *ctx) { struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; struct i915_vma *vma, *next; @@ -150,13 +151,12 @@ static void i915_gem_context_clean(struct intel_context *ctx) void i915_gem_context_free(struct kref *ctx_ref) { - struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); + struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); + int i; + lockdep_assert_held(&ctx->i915->dev->struct_mutex); trace_i915_context_free(ctx); - if (i915.enable_execlists) - intel_lr_context_free(ctx); - /* * This context is going away and we need to remove all VMAs still * around. This is to handle imported shared objects for which @@ -166,9 +166,22 @@ void i915_gem_context_free(struct kref *ctx_ref) i915_ppgtt_put(ctx->ppgtt); - if (ctx->legacy_hw_ctx.rcs_state) - drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); + for (i = 0; i < I915_NUM_ENGINES; i++) { + struct intel_context *ce = &ctx->engine[i]; + + if (!ce->state) + continue; + + WARN_ON(ce->pin_count); + if (ce->ringbuf) + intel_ringbuffer_free(ce->ringbuf); + + drm_gem_object_unreference(&ce->state->base); + } + list_del(&ctx->link); + + ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id); kfree(ctx); } @@ -178,9 +191,11 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) struct drm_i915_gem_object *obj; int ret; - obj = i915_gem_alloc_object(dev, size); - if (obj == NULL) - return ERR_PTR(-ENOMEM); + lockdep_assert_held(&dev->struct_mutex); + + obj = i915_gem_object_create(dev, size); + if (IS_ERR(obj)) + return obj; /* * Try to make the context utilize L3 as well as LLC. @@ -209,18 +224,46 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) return obj; } -static struct intel_context * +static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) +{ + int ret; + + ret = ida_simple_get(&dev_priv->context_hw_ida, + 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); + if (ret < 0) { + /* Contexts are only released when no longer active. + * Flush any pending retires to hopefully release some + * stale contexts and try again. + */ + i915_gem_retire_requests(dev_priv); + ret = ida_simple_get(&dev_priv->context_hw_ida, + 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); + if (ret < 0) + return ret; + } + + *out = ret; + return 0; +} + +static struct i915_gem_context * __create_hw_context(struct drm_device *dev, struct drm_i915_file_private *file_priv) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_context *ctx; + struct i915_gem_context *ctx; int ret; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (ctx == NULL) return ERR_PTR(-ENOMEM); + ret = assign_hw_id(dev_priv, &ctx->hw_id); + if (ret) { + kfree(ctx); + return ERR_PTR(ret); + } + kref_init(&ctx->ref); list_add_tail(&ctx->link, &dev_priv->context_list); ctx->i915 = dev_priv; @@ -232,7 +275,7 @@ __create_hw_context(struct drm_device *dev, ret = PTR_ERR(obj); goto err_out; } - ctx->legacy_hw_ctx.rcs_state = obj; + ctx->engine[RCS].state = obj; } /* Default context will never have a file_priv */ @@ -249,9 +292,13 @@ __create_hw_context(struct drm_device *dev, /* NB: Mark all slices as needing a remap so that when the context first * loads it will restore whatever remap state already exists. If there * is no remap info, it will be a NOP. */ - ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1; + ctx->remap_slice = ALL_L3_SLICES(dev_priv); ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD; + ctx->ring_size = 4 * PAGE_SIZE; + ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) << + GEN8_CTX_ADDRESSING_MODE_SHIFT; + ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier); return ctx; @@ -265,44 +312,27 @@ err_out: * context state of the GPU for applications that don't utilize HW contexts, as * well as an idle case. */ -static struct intel_context * +static struct i915_gem_context * i915_gem_create_context(struct drm_device *dev, struct drm_i915_file_private *file_priv) { - const bool is_global_default_ctx = file_priv == NULL; - struct intel_context *ctx; - int ret = 0; + struct i915_gem_context *ctx; - BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + lockdep_assert_held(&dev->struct_mutex); ctx = __create_hw_context(dev, file_priv); if (IS_ERR(ctx)) return ctx; - if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) { - /* We may need to do things with the shrinker which - * require us to immediately switch back to the default - * context. This can cause a problem as pinning the - * default context also requires GTT space which may not - * be available. To avoid this we always pin the default - * context. - */ - ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state, - get_context_alignment(dev), 0); - if (ret) { - DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); - goto err_destroy; - } - } - if (USES_FULL_PPGTT(dev)) { struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv); - if (IS_ERR_OR_NULL(ppgtt)) { + if (IS_ERR(ppgtt)) { DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", PTR_ERR(ppgtt)); - ret = PTR_ERR(ppgtt); - goto err_unpin; + idr_remove(&file_priv->context_idr, ctx->user_handle); + i915_gem_context_unreference(ctx); + return ERR_CAST(ppgtt); } ctx->ppgtt = ppgtt; @@ -311,24 +341,53 @@ i915_gem_create_context(struct drm_device *dev, trace_i915_context_create(ctx); return ctx; +} -err_unpin: - if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) - i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); -err_destroy: - idr_remove(&file_priv->context_idr, ctx->user_handle); - i915_gem_context_unreference(ctx); - return ERR_PTR(ret); +/** + * i915_gem_context_create_gvt - create a GVT GEM context + * @dev: drm device * + * + * This function is used to create a GVT specific GEM context. + * + * Returns: + * pointer to i915_gem_context on success, error pointer if failed + * + */ +struct i915_gem_context * +i915_gem_context_create_gvt(struct drm_device *dev) +{ + struct i915_gem_context *ctx; + int ret; + + if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) + return ERR_PTR(-ENODEV); + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ERR_PTR(ret); + + ctx = i915_gem_create_context(dev, NULL); + if (IS_ERR(ctx)) + goto out; + + ctx->execlists_force_single_submission = true; + ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */ +out: + mutex_unlock(&dev->struct_mutex); + return ctx; } -static void i915_gem_context_unpin(struct intel_context *ctx, +static void i915_gem_context_unpin(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { if (i915.enable_execlists) { intel_lr_context_unpin(ctx, engine); } else { - if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state) - i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); + struct intel_context *ce = &ctx->engine[engine->id]; + + if (ce->state) + i915_gem_object_ggtt_unpin(ce->state); + i915_gem_context_unreference(ctx); } } @@ -336,51 +395,48 @@ static void i915_gem_context_unpin(struct intel_context *ctx, void i915_gem_context_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int i; + + lockdep_assert_held(&dev->struct_mutex); if (i915.enable_execlists) { - struct intel_context *ctx; + struct i915_gem_context *ctx; list_for_each_entry(ctx, &dev_priv->context_list, link) intel_lr_context_reset(dev_priv, ctx); } - for (i = 0; i < I915_NUM_ENGINES; i++) { - struct intel_engine_cs *engine = &dev_priv->engine[i]; - - if (engine->last_context) { - i915_gem_context_unpin(engine->last_context, engine); - engine->last_context = NULL; - } - } - - /* Force the GPU state to be reinitialised on enabling */ - dev_priv->kernel_context->legacy_hw_ctx.initialized = false; + i915_gem_context_lost(dev_priv); } int i915_gem_context_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_context *ctx; + struct i915_gem_context *ctx; /* Init should only be called once per module load. Eventually the * restriction on the context_disabled check can be loosened. */ if (WARN_ON(dev_priv->kernel_context)) return 0; - if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) { + if (intel_vgpu_active(dev_priv) && + HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { if (!i915.enable_execlists) { DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); return -EINVAL; } } + /* Using the simple ida interface, the max is limited by sizeof(int) */ + BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); + ida_init(&dev_priv->context_hw_ida); + if (i915.enable_execlists) { /* NB: intentionally left blank. We will allocate our own * backing objects as we need them, thank you very much */ dev_priv->hw_context_size = 0; - } else if (HAS_HW_CONTEXTS(dev)) { - dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); + } else if (HAS_HW_CONTEXTS(dev_priv)) { + dev_priv->hw_context_size = + round_up(get_context_size(dev_priv), 4096); if (dev_priv->hw_context_size > (1<<20)) { DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n", dev_priv->hw_context_size); @@ -395,6 +451,26 @@ int i915_gem_context_init(struct drm_device *dev) return PTR_ERR(ctx); } + if (!i915.enable_execlists && ctx->engine[RCS].state) { + int ret; + + /* We may need to do things with the shrinker which + * require us to immediately switch back to the default + * context. This can cause a problem as pinning the + * default context also requires GTT space which may not + * be available. To avoid this we always pin the default + * context. + */ + ret = i915_gem_obj_ggtt_pin(ctx->engine[RCS].state, + get_context_alignment(dev_priv), 0); + if (ret) { + DRM_ERROR("Failed to pinned default global context (error %d)\n", + ret); + i915_gem_context_unreference(ctx); + return ret; + } + } + dev_priv->kernel_context = ctx; DRM_DEBUG_DRIVER("%s context support initialized\n", @@ -403,67 +479,48 @@ int i915_gem_context_init(struct drm_device *dev) return 0; } -void i915_gem_context_fini(struct drm_device *dev) +void i915_gem_context_lost(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_context *dctx = dev_priv->kernel_context; - int i; - - if (dctx->legacy_hw_ctx.rcs_state) { - /* The only known way to stop the gpu from accessing the hw context is - * to reset it. Do this as the very last operation to avoid confusing - * other code, leading to spurious errors. */ - intel_gpu_reset(dev, ALL_ENGINES); - - /* When default context is created and switched to, base object refcount - * will be 2 (+1 from object creation and +1 from do_switch()). - * i915_gem_context_fini() will be called after gpu_idle() has switched - * to default context. So we need to unreference the base object once - * to offset the do_switch part, so that i915_gem_context_unreference() - * can then free the base object correctly. */ - WARN_ON(!dev_priv->engine[RCS].last_context); - - i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); - } + struct intel_engine_cs *engine; - for (i = I915_NUM_ENGINES; --i >= 0;) { - struct intel_engine_cs *engine = &dev_priv->engine[i]; + lockdep_assert_held(&dev_priv->dev->struct_mutex); + for_each_engine(engine, dev_priv) { if (engine->last_context) { i915_gem_context_unpin(engine->last_context, engine); engine->last_context = NULL; } + + /* Force the GPU state to be reinitialised on enabling */ + dev_priv->kernel_context->engine[engine->id].initialised = + engine->init_context == NULL; } - i915_gem_context_unreference(dctx); - dev_priv->kernel_context = NULL; + /* Force the GPU state to be reinitialised on enabling */ + dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv); } -int i915_gem_context_enable(struct drm_i915_gem_request *req) +void i915_gem_context_fini(struct drm_device *dev) { - struct intel_engine_cs *engine = req->engine; - int ret; + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_gem_context *dctx = dev_priv->kernel_context; - if (i915.enable_execlists) { - if (engine->init_context == NULL) - return 0; + lockdep_assert_held(&dev->struct_mutex); - ret = engine->init_context(req); - } else - ret = i915_switch_context(req); + if (!i915.enable_execlists && dctx->engine[RCS].state) + i915_gem_object_ggtt_unpin(dctx->engine[RCS].state); - if (ret) { - DRM_ERROR("ring init context: %d\n", ret); - return ret; - } + i915_gem_context_unreference(dctx); + dev_priv->kernel_context = NULL; - return 0; + ida_destroy(&dev_priv->context_hw_ida); } static int context_idr_cleanup(int id, void *p, void *data) { - struct intel_context *ctx = p; + struct i915_gem_context *ctx = p; + ctx->file_priv = ERR_PTR(-EBADF); i915_gem_context_unreference(ctx); return 0; } @@ -471,7 +528,7 @@ static int context_idr_cleanup(int id, void *p, void *data) int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; - struct intel_context *ctx; + struct i915_gem_context *ctx; idr_init(&file_priv->context_idr); @@ -491,31 +548,22 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; + lockdep_assert_held(&dev->struct_mutex); + idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); idr_destroy(&file_priv->context_idr); } -struct intel_context * -i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) -{ - struct intel_context *ctx; - - ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id); - if (!ctx) - return ERR_PTR(-ENOENT); - - return ctx; -} - static inline int mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) { + struct drm_i915_private *dev_priv = req->i915; struct intel_engine_cs *engine = req->engine; u32 flags = hw_flags | MI_MM_SPACE_GTT; const int num_rings = /* Use an extended w/a on ivb+ if signalling from other rings */ - i915_semaphore_is_enabled(engine->dev) ? - hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 : + i915_semaphore_is_enabled(dev_priv) ? + hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 : 0; int len, ret; @@ -524,21 +572,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) * explicitly, so we rely on the value at ring init, stored in * itlb_before_ctx_switch. */ - if (IS_GEN6(engine->dev)) { + if (IS_GEN6(dev_priv)) { ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0); if (ret) return ret; } /* These flags are for resource streamer on HSW+ */ - if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8) + if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8) flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); - else if (INTEL_INFO(engine->dev)->gen < 8) + else if (INTEL_GEN(dev_priv) < 8) flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); len = 4; - if (INTEL_INFO(engine->dev)->gen >= 7) + if (INTEL_GEN(dev_priv) >= 7) len += 2 + (num_rings ? 4*num_rings + 6 : 0); ret = intel_ring_begin(req, len); @@ -546,14 +594,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) return ret; /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ - if (INTEL_INFO(engine->dev)->gen >= 7) { + if (INTEL_GEN(dev_priv) >= 7) { intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE); if (num_rings) { struct intel_engine_cs *signaller; intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(num_rings)); - for_each_engine(signaller, to_i915(engine->dev)) { + for_each_engine(signaller, dev_priv) { if (signaller == engine) continue; @@ -568,7 +616,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) intel_ring_emit(engine, MI_NOOP); intel_ring_emit(engine, MI_SET_CONTEXT); intel_ring_emit(engine, - i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) | + i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) | flags); /* * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP @@ -576,14 +624,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) */ intel_ring_emit(engine, MI_NOOP); - if (INTEL_INFO(engine->dev)->gen >= 7) { + if (INTEL_GEN(dev_priv) >= 7) { if (num_rings) { struct intel_engine_cs *signaller; i915_reg_t last_reg = {}; /* keep gcc quiet */ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(num_rings)); - for_each_engine(signaller, to_i915(engine->dev)) { + for_each_engine(signaller, dev_priv) { if (signaller == engine) continue; @@ -609,45 +657,83 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) return ret; } -static inline bool skip_rcs_switch(struct intel_engine_cs *engine, - struct intel_context *to) +static int remap_l3(struct drm_i915_gem_request *req, int slice) +{ + u32 *remap_info = req->i915->l3_parity.remap_info[slice]; + struct intel_engine_cs *engine = req->engine; + int i, ret; + + if (!remap_info) + return 0; + + ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2); + if (ret) + return ret; + + /* + * Note: We do not worry about the concurrent register cacheline hang + * here because no other code should access these registers other than + * at initialization time. + */ + intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4)); + for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) { + intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i)); + intel_ring_emit(engine, remap_info[i]); + } + intel_ring_emit(engine, MI_NOOP); + intel_ring_advance(engine); + + return 0; +} + +static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt, + struct intel_engine_cs *engine, + struct i915_gem_context *to) { if (to->remap_slice) return false; - if (!to->legacy_hw_ctx.initialized) + if (!to->engine[RCS].initialised) return false; - if (to->ppgtt && - !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) + if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) return false; return to == engine->last_context; } static bool -needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to) +needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, + struct intel_engine_cs *engine, + struct i915_gem_context *to) { - if (!to->ppgtt) + if (!ppgtt) return false; + /* Always load the ppgtt on first use */ + if (!engine->last_context) + return true; + + /* Same context without new entries, skip */ if (engine->last_context == to && - !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) + !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) return false; if (engine->id != RCS) return true; - if (INTEL_INFO(engine->dev)->gen < 8) + if (INTEL_GEN(engine->i915) < 8) return true; return false; } static bool -needs_pd_load_post(struct intel_context *to, u32 hw_flags) +needs_pd_load_post(struct i915_hw_ppgtt *ppgtt, + struct i915_gem_context *to, + u32 hw_flags) { - if (!to->ppgtt) + if (!ppgtt) return false; if (!IS_GEN8(to->i915)) @@ -661,18 +747,19 @@ needs_pd_load_post(struct intel_context *to, u32 hw_flags) static int do_rcs_switch(struct drm_i915_gem_request *req) { - struct intel_context *to = req->ctx; + struct i915_gem_context *to = req->ctx; struct intel_engine_cs *engine = req->engine; - struct intel_context *from; + struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt; + struct i915_gem_context *from; u32 hw_flags; int ret, i; - if (skip_rcs_switch(engine, to)) + if (skip_rcs_switch(ppgtt, engine, to)) return 0; /* Trying to pin first makes error handling easier. */ - ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, - get_context_alignment(engine->dev), + ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state, + get_context_alignment(engine->i915), 0); if (ret) return ret; @@ -694,37 +781,32 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) * * XXX: We need a real interface to do this instead of trickery. */ - ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false); + ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false); if (ret) goto unpin_out; - if (needs_pd_load_pre(engine, to)) { + if (needs_pd_load_pre(ppgtt, engine, to)) { /* Older GENs and non render rings still want the load first, * "PP_DCLV followed by PP_DIR_BASE register through Load * Register Immediate commands in Ring Buffer before submitting * a context."*/ trace_switch_mm(engine, to); - ret = to->ppgtt->switch_mm(to->ppgtt, req); + ret = ppgtt->switch_mm(ppgtt, req); if (ret) goto unpin_out; } - if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) + if (!to->engine[RCS].initialised || i915_gem_context_is_default(to)) /* NB: If we inhibit the restore, the context is not allowed to * die because future work may end up depending on valid address * space. This means we must enforce that a page table load * occur when this occurs. */ hw_flags = MI_RESTORE_INHIBIT; - else if (to->ppgtt && - intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings) + else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings) hw_flags = MI_FORCE_RESTORE; else hw_flags = 0; - /* We should never emit switch_mm more than once */ - WARN_ON(needs_pd_load_pre(engine, to) && - needs_pd_load_post(to, hw_flags)); - if (to != from || (hw_flags & MI_FORCE_RESTORE)) { ret = mi_set_context(req, hw_flags); if (ret) @@ -738,8 +820,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) * MI_SET_CONTEXT instead of when the next seqno has completed. */ if (from != NULL) { - from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; - i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req); + from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; + i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req); /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the * whole damn pipeline, we don't need to explicitly mark the * object dirty. The only exception is that the context must be @@ -747,10 +829,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) * able to defer doing this until we know the object would be * swapped, but there is no way to do that yet. */ - from->legacy_hw_ctx.rcs_state->dirty = 1; + from->engine[RCS].state->dirty = 1; /* obj is kept alive until the next request by its active ref */ - i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); + i915_gem_object_ggtt_unpin(from->engine[RCS].state); i915_gem_context_unreference(from); } i915_gem_context_reference(to); @@ -759,9 +841,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) /* GEN8 does *not* require an explicit reload if the PDPs have been * setup, and we do not wish to move them. */ - if (needs_pd_load_post(to, hw_flags)) { + if (needs_pd_load_post(ppgtt, to, hw_flags)) { trace_switch_mm(engine, to); - ret = to->ppgtt->switch_mm(to->ppgtt, req); + ret = ppgtt->switch_mm(ppgtt, req); /* The hardware context switch is emitted, but we haven't * actually changed the state - so it's probably safe to bail * here. Still, let the user know something dangerous has @@ -771,33 +853,33 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) return ret; } - if (to->ppgtt) - to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); + if (ppgtt) + ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); for (i = 0; i < MAX_L3_SLICES; i++) { if (!(to->remap_slice & (1<<i))) continue; - ret = i915_gem_l3_remap(req, i); + ret = remap_l3(req, i); if (ret) return ret; to->remap_slice &= ~(1<<i); } - if (!to->legacy_hw_ctx.initialized) { + if (!to->engine[RCS].initialised) { if (engine->init_context) { ret = engine->init_context(req); if (ret) return ret; } - to->legacy_hw_ctx.initialized = true; + to->engine[RCS].initialised = true; } return 0; unpin_out: - i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state); + i915_gem_object_ggtt_unpin(to->engine[RCS].state); return ret; } @@ -817,25 +899,24 @@ unpin_out: int i915_switch_context(struct drm_i915_gem_request *req) { struct intel_engine_cs *engine = req->engine; - struct drm_i915_private *dev_priv = req->i915; WARN_ON(i915.enable_execlists); - WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); + lockdep_assert_held(&req->i915->dev->struct_mutex); - if (engine->id != RCS || - req->ctx->legacy_hw_ctx.rcs_state == NULL) { - struct intel_context *to = req->ctx; + if (!req->ctx->engine[engine->id].state) { + struct i915_gem_context *to = req->ctx; + struct i915_hw_ppgtt *ppgtt = + to->ppgtt ?: req->i915->mm.aliasing_ppgtt; - if (needs_pd_load_pre(engine, to)) { + if (needs_pd_load_pre(ppgtt, engine, to)) { int ret; trace_switch_mm(engine, to); - ret = to->ppgtt->switch_mm(to->ppgtt, req); + ret = ppgtt->switch_mm(ppgtt, req); if (ret) return ret; - /* Doing a PD load always reloads the page dirs */ - to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); + ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); } if (to != engine->last_context) { @@ -861,7 +942,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_context_create *args = data; struct drm_i915_file_private *file_priv = file->driver_priv; - struct intel_context *ctx; + struct i915_gem_context *ctx; int ret; if (!contexts_enabled(dev)) @@ -890,7 +971,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_context_destroy *args = data; struct drm_i915_file_private *file_priv = file->driver_priv; - struct intel_context *ctx; + struct i915_gem_context *ctx; int ret; if (args->pad != 0) @@ -903,13 +984,13 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - ctx = i915_gem_context_get(file_priv, args->ctx_id); + ctx = i915_gem_context_lookup(file_priv, args->ctx_id); if (IS_ERR(ctx)) { mutex_unlock(&dev->struct_mutex); return PTR_ERR(ctx); } - idr_remove(&ctx->file_priv->context_idr, ctx->user_handle); + idr_remove(&file_priv->context_idr, ctx->user_handle); i915_gem_context_unreference(ctx); mutex_unlock(&dev->struct_mutex); @@ -922,14 +1003,14 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, { struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_context_param *args = data; - struct intel_context *ctx; + struct i915_gem_context *ctx; int ret; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; - ctx = i915_gem_context_get(file_priv, args->ctx_id); + ctx = i915_gem_context_lookup(file_priv, args->ctx_id); if (IS_ERR(ctx)) { mutex_unlock(&dev->struct_mutex); return PTR_ERR(ctx); @@ -965,14 +1046,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, { struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_context_param *args = data; - struct intel_context *ctx; + struct i915_gem_context *ctx; int ret; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; - ctx = i915_gem_context_get(file_priv, args->ctx_id); + ctx = i915_gem_context_lookup(file_priv, args->ctx_id); if (IS_ERR(ctx)) { mutex_unlock(&dev->struct_mutex); return PTR_ERR(ctx); @@ -1004,3 +1085,42 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, return ret; } + +int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, + void *data, struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_reset_stats *args = data; + struct i915_ctx_hang_stats *hs; + struct i915_gem_context *ctx; + int ret; + + if (args->flags || args->pad) + return -EINVAL; + + if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id); + if (IS_ERR(ctx)) { + mutex_unlock(&dev->struct_mutex); + return PTR_ERR(ctx); + } + hs = &ctx->hang_stats; + + if (capable(CAP_SYS_ADMIN)) + args->reset_count = i915_reset_count(&dev_priv->gpu_error); + else + args->reset_count = 0; + + args->batch_active = hs->batch_active; + args->batch_pending = hs->batch_pending; + + mutex_unlock(&dev->struct_mutex); + + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.h b/drivers/gpu/drm/i915/i915_gem_dmabuf.h new file mode 100644 index 000000000000..91315557e421 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.h @@ -0,0 +1,45 @@ +/* + * Copyright 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _I915_GEM_DMABUF_H_ +#define _I915_GEM_DMABUF_H_ + +#include <linux/dma-buf.h> + +static inline struct reservation_object * +i915_gem_object_get_dmabuf_resv(struct drm_i915_gem_object *obj) +{ + struct dma_buf *dma_buf; + + if (obj->base.dma_buf) + dma_buf = obj->base.dma_buf; + else if (obj->base.import_attach) + dma_buf = obj->base.import_attach->dmabuf; + else + return NULL; + + return dma_buf->resv; +} + +#endif diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index ea1f8d1bd228..b144c3f5c650 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -154,7 +154,7 @@ none: if (ret) return ret; - i915_gem_retire_requests(dev); + i915_gem_retire_requests(to_i915(dev)); goto search_again; } @@ -265,7 +265,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) if (ret) return ret; - i915_gem_retire_requests(vm->dev); + i915_gem_retire_requests(to_i915(vm->dev)); WARN_ON(!list_empty(&vm->active_list)); } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 33df74d98269..7941f1fe9cd2 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -714,7 +714,7 @@ eb_vma_misplaced(struct i915_vma *vma) static int i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, struct list_head *vmas, - struct intel_context *ctx, + struct i915_gem_context *ctx, bool *need_relocs) { struct drm_i915_gem_object *obj; @@ -722,7 +722,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, struct i915_address_space *vm; struct list_head ordered_vmas; struct list_head pinned_vmas; - bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4; + bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4; int retry; i915_gem_retire_requests_ring(engine); @@ -826,7 +826,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct intel_engine_cs *engine, struct eb_vmas *eb, struct drm_i915_gem_exec_object2 *exec, - struct intel_context *ctx) + struct i915_gem_context *ctx) { struct drm_i915_gem_relocation_entry *reloc; struct i915_address_space *vm; @@ -963,7 +963,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, } if (flush_chipset) - i915_gem_chipset_flush(req->engine->dev); + i915_gem_chipset_flush(req->engine->i915); if (flush_domains & I915_GEM_DOMAIN_GTT) wmb(); @@ -1063,17 +1063,17 @@ validate_exec_list(struct drm_device *dev, return 0; } -static struct intel_context * +static struct i915_gem_context * i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, struct intel_engine_cs *engine, const u32 ctx_id) { - struct intel_context *ctx = NULL; + struct i915_gem_context *ctx = NULL; struct i915_ctx_hang_stats *hs; if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE) return ERR_PTR(-EINVAL); - ctx = i915_gem_context_get(file->driver_priv, ctx_id); + ctx = i915_gem_context_lookup(file->driver_priv, ctx_id); if (IS_ERR(ctx)) return ctx; @@ -1083,14 +1083,6 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, return ERR_PTR(-EIO); } - if (i915.enable_execlists && !ctx->engine[engine->id].state) { - int ret = intel_lr_context_deferred_alloc(ctx, engine); - if (ret) { - DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret); - return ERR_PTR(ret); - } - } - return ctx; } @@ -1125,7 +1117,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { i915_gem_request_assign(&obj->last_fenced_req, req); if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, &dev_priv->mm.fence_list); } @@ -1436,7 +1428,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_object *batch_obj; struct drm_i915_gem_exec_object2 shadow_exec_entry; struct intel_engine_cs *engine; - struct intel_context *ctx; + struct i915_gem_context *ctx; struct i915_address_space *vm; struct i915_execbuffer_params params_master; /* XXX: will be removed later */ struct i915_execbuffer_params *params = ¶ms_master; @@ -1454,7 +1446,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, dispatch_flags = 0; if (args->flags & I915_EXEC_SECURE) { - if (!file->is_master || !capable(CAP_SYS_ADMIN)) + if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN)) return -EPERM; dispatch_flags |= I915_DISPATCH_SECURE; @@ -1561,7 +1553,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, batch_obj, args->batch_start_offset, args->batch_len, - file->is_master); + drm_is_current_master(file)); if (IS_ERR(parsed_batch_obj)) { ret = PTR_ERR(parsed_batch_obj); goto err; diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c index a2b938ec01a7..2b6bdc267fb5 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence.c +++ b/drivers/gpu/drm/i915/i915_gem_fence.c @@ -745,15 +745,15 @@ i915_gem_swizzle_page(struct page *page) void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) { - struct sg_page_iter sg_iter; + struct sgt_iter sgt_iter; + struct page *page; int i; if (obj->bit_17 == NULL) return; i = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { - struct page *page = sg_page_iter_page(&sg_iter); + for_each_sgt_page(page, sgt_iter, obj->pages) { char new_bit_17 = page_to_phys(page) >> 17; if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) { @@ -775,7 +775,8 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) { - struct sg_page_iter sg_iter; + struct sgt_iter sgt_iter; + struct page *page; int page_count = obj->base.size >> PAGE_SHIFT; int i; @@ -790,8 +791,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) } i = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { - if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17)) + + for_each_sgt_page(page, sgt_iter, obj->pages) { + if (page_to_phys(page) & (1 << 17)) __set_bit(i, obj->bit_17); else __clear_bit(i, obj->bit_17); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 92acdff9dad3..5890017b9832 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -93,6 +93,13 @@ * */ +static inline struct i915_ggtt * +i915_vm_to_ggtt(struct i915_address_space *vm) +{ + GEM_BUG_ON(!i915_is_ggtt(vm)); + return container_of(vm, struct i915_ggtt, base); +} + static int i915_get_ggtt_vma_pages(struct i915_vma *vma); @@ -103,25 +110,29 @@ const struct i915_ggtt_view i915_ggtt_view_rotated = { .type = I915_GGTT_VIEW_ROTATED, }; -static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) +int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, + int enable_ppgtt) { bool has_aliasing_ppgtt; bool has_full_ppgtt; bool has_full_48bit_ppgtt; - has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; - has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; - has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9; + has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6; + has_full_ppgtt = INTEL_GEN(dev_priv) >= 7; + has_full_48bit_ppgtt = + IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9; - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) has_full_ppgtt = false; /* emulation is too hard */ + if (!has_aliasing_ppgtt) + return 0; + /* * We don't allow disabling PPGTT for gen9+ as it's a requirement for * execlists, the sole mechanism available to submit work. */ - if (INTEL_INFO(dev)->gen < 9 && - (enable_ppgtt == 0 || !has_aliasing_ppgtt)) + if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9) return 0; if (enable_ppgtt == 1) @@ -135,19 +146,19 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) #ifdef CONFIG_INTEL_IOMMU /* Disable ppgtt on SNB if VT-d is on. */ - if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { + if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) { DRM_INFO("Disabling PPGTT because VT-d is on\n"); return 0; } #endif /* Early VLV doesn't have this */ - if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) { + if (IS_VALLEYVIEW(dev_priv) && dev_priv->dev->pdev->revision < 0xb) { DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); return 0; } - if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) + if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) return has_full_48bit_ppgtt ? 3 : 2; else return has_aliasing_ppgtt ? 1 : 0; @@ -866,6 +877,7 @@ static void gen8_free_page_tables(struct drm_device *dev, static int gen8_init_scratch(struct i915_address_space *vm) { struct drm_device *dev = vm->dev; + int ret; vm->scratch_page = alloc_scratch_page(dev); if (IS_ERR(vm->scratch_page)) @@ -873,24 +885,21 @@ static int gen8_init_scratch(struct i915_address_space *vm) vm->scratch_pt = alloc_pt(dev); if (IS_ERR(vm->scratch_pt)) { - free_scratch_page(dev, vm->scratch_page); - return PTR_ERR(vm->scratch_pt); + ret = PTR_ERR(vm->scratch_pt); + goto free_scratch_page; } vm->scratch_pd = alloc_pd(dev); if (IS_ERR(vm->scratch_pd)) { - free_pt(dev, vm->scratch_pt); - free_scratch_page(dev, vm->scratch_page); - return PTR_ERR(vm->scratch_pd); + ret = PTR_ERR(vm->scratch_pd); + goto free_pt; } if (USES_FULL_48BIT_PPGTT(dev)) { vm->scratch_pdp = alloc_pdp(dev); if (IS_ERR(vm->scratch_pdp)) { - free_pd(dev, vm->scratch_pd); - free_pt(dev, vm->scratch_pt); - free_scratch_page(dev, vm->scratch_page); - return PTR_ERR(vm->scratch_pdp); + ret = PTR_ERR(vm->scratch_pdp); + goto free_pd; } } @@ -900,6 +909,15 @@ static int gen8_init_scratch(struct i915_address_space *vm) gen8_initialize_pdp(vm, vm->scratch_pdp); return 0; + +free_pd: + free_pd(dev, vm->scratch_pd); +free_pt: + free_pt(dev, vm->scratch_pt); +free_scratch_page: + free_scratch_page(dev, vm->scratch_page); + + return ret; } static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) @@ -978,7 +996,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - if (intel_vgpu_active(vm->dev)) + if (intel_vgpu_active(to_i915(vm->dev))) gen8_ppgtt_notify_vgt(ppgtt, false); if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) @@ -1529,14 +1547,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) 0, 0, GEN8_PML4E_SHIFT); - if (intel_vgpu_active(ppgtt->base.dev)) { + if (intel_vgpu_active(to_i915(ppgtt->base.dev))) { ret = gen8_preallocate_top_level_pdps(ppgtt); if (ret) goto free_scratch; } } - if (intel_vgpu_active(ppgtt->base.dev)) + if (intel_vgpu_active(to_i915(ppgtt->base.dev))) gen8_ppgtt_notify_vgt(ppgtt, true); return 0; @@ -1821,20 +1839,19 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, enum i915_cache_level cache_level, u32 flags) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - gen6_pte_t *pt_vaddr; unsigned first_entry = start >> PAGE_SHIFT; unsigned act_pt = first_entry / GEN6_PTES; unsigned act_pte = first_entry % GEN6_PTES; - struct sg_page_iter sg_iter; + gen6_pte_t *pt_vaddr = NULL; + struct sgt_iter sgt_iter; + dma_addr_t addr; - pt_vaddr = NULL; - for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { + for_each_sgt_dma(addr, sgt_iter, pages) { if (pt_vaddr == NULL) pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); pt_vaddr[act_pte] = - vm->pte_encode(sg_page_iter_dma_address(&sg_iter), - cache_level, true, flags); + vm->pte_encode(addr, cache_level, true, flags); if (++act_pte == GEN6_PTES) { kunmap_px(ppgtt, pt_vaddr); @@ -1843,6 +1860,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, act_pte = 0; } } + if (pt_vaddr) kunmap_px(ppgtt, pt_vaddr); } @@ -2064,7 +2082,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) } else BUG(); - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) ppgtt->switch_mm = vgpu_mm_switch; ret = gen6_ppgtt_alloc(ppgtt); @@ -2140,7 +2158,7 @@ static void gtt_write_workarounds(struct drm_device *dev) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); } -int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) +static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) { struct drm_i915_private *dev_priv = dev->dev_private; int ret = 0; @@ -2179,20 +2197,6 @@ int i915_ppgtt_init_hw(struct drm_device *dev) return 0; } -int i915_ppgtt_init_ring(struct drm_i915_gem_request *req) -{ - struct drm_i915_private *dev_priv = req->i915; - struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - - if (i915.enable_execlists) - return 0; - - if (!ppgtt) - return 0; - - return ppgtt->switch_mm(ppgtt, req); -} - struct i915_hw_ppgtt * i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) { @@ -2275,12 +2279,11 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) dev_priv->mm.interruptible = interruptible; } -void i915_check_and_clear_faults(struct drm_device *dev) +void i915_check_and_clear_faults(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_INFO(dev_priv)->gen < 6) return; for_each_engine(engine, dev_priv) { @@ -2324,7 +2327,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) if (INTEL_INFO(dev)->gen < 6) return; - i915_check_and_clear_faults(dev); + i915_check_and_clear_faults(dev_priv); ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, true); @@ -2352,29 +2355,49 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) #endif } +static void gen8_ggtt_insert_page(struct i915_address_space *vm, + dma_addr_t addr, + uint64_t offset, + enum i915_cache_level level, + u32 unused) +{ + struct drm_i915_private *dev_priv = to_i915(vm->dev); + gen8_pte_t __iomem *pte = + (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + + (offset >> PAGE_SHIFT); + int rpm_atomic_seq; + + rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); + + gen8_set_pte(pte, gen8_pte_encode(addr, level, true)); + + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + POSTING_READ(GFX_FLSH_CNTL_GEN6); + + assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); +} + static void gen8_ggtt_insert_entries(struct i915_address_space *vm, struct sg_table *st, uint64_t start, enum i915_cache_level level, u32 unused) { struct drm_i915_private *dev_priv = to_i915(vm->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - unsigned first_entry = start >> PAGE_SHIFT; - gen8_pte_t __iomem *gtt_entries = - (gen8_pte_t __iomem *)ggtt->gsm + first_entry; - int i = 0; - struct sg_page_iter sg_iter; - dma_addr_t addr = 0; /* shut up gcc */ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + struct sgt_iter sgt_iter; + gen8_pte_t __iomem *gtt_entries; + gen8_pte_t gtt_entry; + dma_addr_t addr; int rpm_atomic_seq; + int i = 0; rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); - for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { - addr = sg_dma_address(sg_iter.sg) + - (sg_iter.sg_pgoffset << PAGE_SHIFT); - gen8_set_pte(>t_entries[i], - gen8_pte_encode(addr, level, true)); - i++; + gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT); + + for_each_sgt_dma(addr, sgt_iter, st) { + gtt_entry = gen8_pte_encode(addr, level, true); + gen8_set_pte(>t_entries[i++], gtt_entry); } /* @@ -2385,8 +2408,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, * hardware should work, we must keep this posting read for paranoia. */ if (i != 0) - WARN_ON(readq(>t_entries[i-1]) - != gen8_pte_encode(addr, level, true)); + WARN_ON(readq(>t_entries[i-1]) != gtt_entry); /* This next bit makes the above posting read even more important. We * want to flush the TLBs only after we're certain all the PTE updates @@ -2424,6 +2446,28 @@ static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm, stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL); } +static void gen6_ggtt_insert_page(struct i915_address_space *vm, + dma_addr_t addr, + uint64_t offset, + enum i915_cache_level level, + u32 flags) +{ + struct drm_i915_private *dev_priv = to_i915(vm->dev); + gen6_pte_t __iomem *pte = + (gen6_pte_t __iomem *)dev_priv->ggtt.gsm + + (offset >> PAGE_SHIFT); + int rpm_atomic_seq; + + rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); + + iowrite32(vm->pte_encode(addr, level, true, flags), pte); + + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + POSTING_READ(GFX_FLSH_CNTL_GEN6); + + assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); +} + /* * Binds an object into the global gtt with the specified cache level. The object * will be accessible to the GPU via commands whose operands reference offsets @@ -2436,21 +2480,21 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, enum i915_cache_level level, u32 flags) { struct drm_i915_private *dev_priv = to_i915(vm->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - unsigned first_entry = start >> PAGE_SHIFT; - gen6_pte_t __iomem *gtt_entries = - (gen6_pte_t __iomem *)ggtt->gsm + first_entry; - int i = 0; - struct sg_page_iter sg_iter; - dma_addr_t addr = 0; + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + struct sgt_iter sgt_iter; + gen6_pte_t __iomem *gtt_entries; + gen6_pte_t gtt_entry; + dma_addr_t addr; int rpm_atomic_seq; + int i = 0; rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); - for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { - addr = sg_page_iter_dma_address(&sg_iter); - iowrite32(vm->pte_encode(addr, level, true, flags), >t_entries[i]); - i++; + gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT); + + for_each_sgt_dma(addr, sgt_iter, st) { + gtt_entry = vm->pte_encode(addr, level, true, flags); + iowrite32(gtt_entry, >t_entries[i++]); } /* XXX: This serves as a posting read to make sure that the PTE has @@ -2459,10 +2503,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, * of NUMA access patterns. Therefore, even with the way we assume * hardware should work, we must keep this posting read for paranoia. */ - if (i != 0) { - unsigned long gtt = readl(>t_entries[i-1]); - WARN_ON(gtt != vm->pte_encode(addr, level, true, flags)); - } + if (i != 0) + WARN_ON(readl(>t_entries[i-1]) != gtt_entry); /* This next bit makes the above posting read even more important. We * want to flush the TLBs only after we're certain all the PTE updates @@ -2474,13 +2516,20 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); } +static void nop_clear_range(struct i915_address_space *vm, + uint64_t start, + uint64_t length, + bool use_scratch) +{ +} + static void gen8_ggtt_clear_range(struct i915_address_space *vm, uint64_t start, uint64_t length, bool use_scratch) { struct drm_i915_private *dev_priv = to_i915(vm->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; gen8_pte_t scratch_pte, __iomem *gtt_base = @@ -2512,7 +2561,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, bool use_scratch) { struct drm_i915_private *dev_priv = to_i915(vm->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; gen6_pte_t scratch_pte, __iomem *gtt_base = @@ -2538,6 +2587,24 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); } +static void i915_ggtt_insert_page(struct i915_address_space *vm, + dma_addr_t addr, + uint64_t offset, + enum i915_cache_level cache_level, + u32 unused) +{ + struct drm_i915_private *dev_priv = to_i915(vm->dev); + unsigned int flags = (cache_level == I915_CACHE_NONE) ? + AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; + int rpm_atomic_seq; + + rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); + + intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); + + assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); +} + static void i915_ggtt_insert_entries(struct i915_address_space *vm, struct sg_table *pages, uint64_t start, @@ -2727,11 +2794,9 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, i915_address_space_init(&ggtt->base, dev_priv); ggtt->base.total += PAGE_SIZE; - if (intel_vgpu_active(dev)) { - ret = intel_vgt_balloon(dev); - if (ret) - return ret; - } + ret = intel_vgt_balloon(dev_priv); + if (ret) + return ret; if (!HAS_LLC(dev)) ggtt->base.mm.color_adjust = i915_gtt_color_adjust; @@ -2831,8 +2896,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev) i915_gem_cleanup_stolen(dev); if (drm_mm_initialized(&ggtt->base.mm)) { - if (intel_vgpu_active(dev)) - intel_vgt_deballoon(); + intel_vgt_deballoon(dev_priv); drm_mm_takedown(&ggtt->base.mm); list_del(&ggtt->base.global_link); @@ -3069,13 +3133,16 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ret = ggtt_probe_common(dev, ggtt->size); - ggtt->base.clear_range = gen8_ggtt_clear_range; - if (IS_CHERRYVIEW(dev_priv)) - ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL; - else - ggtt->base.insert_entries = gen8_ggtt_insert_entries; ggtt->base.bind_vma = ggtt_bind_vma; ggtt->base.unbind_vma = ggtt_unbind_vma; + ggtt->base.insert_page = gen8_ggtt_insert_page; + ggtt->base.clear_range = nop_clear_range; + if (!USES_FULL_PPGTT(dev_priv)) + ggtt->base.clear_range = gen8_ggtt_clear_range; + + ggtt->base.insert_entries = gen8_ggtt_insert_entries; + if (IS_CHERRYVIEW(dev_priv)) + ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL; return ret; } @@ -3108,6 +3175,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) ret = ggtt_probe_common(dev, ggtt->size); ggtt->base.clear_range = gen6_ggtt_clear_range; + ggtt->base.insert_page = gen6_ggtt_insert_page; ggtt->base.insert_entries = gen6_ggtt_insert_entries; ggtt->base.bind_vma = ggtt_bind_vma; ggtt->base.unbind_vma = ggtt_unbind_vma; @@ -3139,6 +3207,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) &ggtt->mappable_base, &ggtt->mappable_end); ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev); + ggtt->base.insert_page = i915_ggtt_insert_page; ggtt->base.insert_entries = i915_ggtt_insert_entries; ggtt->base.clear_range = i915_ggtt_clear_range; ggtt->base.bind_vma = ggtt_bind_vma; @@ -3219,14 +3288,6 @@ int i915_ggtt_init_hw(struct drm_device *dev) if (intel_iommu_gfx_mapped) DRM_INFO("VT-d active for gfx access\n"); #endif - /* - * i915.enable_ppgtt is read-only, so do an early pass to validate the - * user's requested state against the hardware/driver capabilities. We - * do this now so that we can print out any log messages once rather - * than every time we check intel_enable_ppgtt(). - */ - i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt); - DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); return 0; @@ -3250,9 +3311,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_object *obj; struct i915_vma *vma; - bool flush; - i915_check_and_clear_faults(dev); + i915_check_and_clear_faults(dev_priv); /* First fill our portion of the GTT with scratch pages */ ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, @@ -3260,19 +3320,16 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) /* Cache flush objects bound into GGTT and rebind them. */ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - flush = false; list_for_each_entry(vma, &obj->vma_list, obj_link) { if (vma->vm != &ggtt->base) continue; WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE)); - - flush = true; } - if (flush) - i915_gem_clflush_object(obj, obj->pin_display); + if (obj->pin_display) + WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); } if (INTEL_INFO(dev)->gen >= 8) { @@ -3398,9 +3455,11 @@ static struct sg_table * intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, struct drm_i915_gem_object *obj) { + const size_t n_pages = obj->base.size / PAGE_SIZE; unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height; unsigned int size_pages_uv; - struct sg_page_iter sg_iter; + struct sgt_iter sgt_iter; + dma_addr_t dma_addr; unsigned long i; dma_addr_t *page_addr_list; struct sg_table *st; @@ -3409,7 +3468,7 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, int ret = -ENOMEM; /* Allocate a temporary list of source pages for random access. */ - page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE, + page_addr_list = drm_malloc_gfp(n_pages, sizeof(dma_addr_t), GFP_TEMPORARY); if (!page_addr_list) @@ -3432,11 +3491,10 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, /* Populate source page list from the object. */ i = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { - page_addr_list[i] = sg_page_iter_dma_address(&sg_iter); - i++; - } + for_each_sgt_dma(dma_addr, sgt_iter, obj->pages) + page_addr_list[i++] = dma_addr; + GEM_BUG_ON(i != n_pages); st->nents = 0; sg = st->sgl; @@ -3634,3 +3692,29 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj, return obj->base.size; } } + +void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) +{ + void __iomem *ptr; + + lockdep_assert_held(&vma->vm->dev->struct_mutex); + if (WARN_ON(!vma->obj->map_and_fenceable)) + return ERR_PTR(-ENODEV); + + GEM_BUG_ON(!vma->is_ggtt); + GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0); + + ptr = vma->iomap; + if (ptr == NULL) { + ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable, + vma->node.start, + vma->node.size); + if (ptr == NULL) + return ERR_PTR(-ENOMEM); + + vma->iomap = ptr; + } + + vma->pin_count++; + return ptr; +} diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 0008543d55f6..163b564fb87d 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -34,6 +34,8 @@ #ifndef __I915_GEM_GTT_H__ #define __I915_GEM_GTT_H__ +#include <linux/io-mapping.h> + struct drm_i915_file_private; typedef uint32_t gen6_pte_t; @@ -175,6 +177,7 @@ struct i915_vma { struct drm_mm_node node; struct drm_i915_gem_object *obj; struct i915_address_space *vm; + void __iomem *iomap; /** Flags and address space this VMA is bound to */ #define GLOBAL_BIND (1<<0) @@ -316,6 +319,11 @@ struct i915_address_space { uint64_t start, uint64_t length, bool use_scratch); + void (*insert_page)(struct i915_address_space *vm, + dma_addr_t addr, + uint64_t offset, + enum i915_cache_level cache_level, + u32 flags); void (*insert_entries)(struct i915_address_space *vm, struct sg_table *st, uint64_t start, @@ -518,9 +526,7 @@ int i915_ggtt_enable_hw(struct drm_device *dev); void i915_gem_init_ggtt(struct drm_device *dev); void i915_ggtt_cleanup_hw(struct drm_device *dev); -int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt); int i915_ppgtt_init_hw(struct drm_device *dev); -int i915_ppgtt_init_ring(struct drm_i915_gem_request *req); void i915_ppgtt_release(struct kref *kref); struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv); @@ -535,7 +541,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) kref_put(&ppgtt->ref, i915_ppgtt_release); } -void i915_check_and_clear_faults(struct drm_device *dev); +void i915_check_and_clear_faults(struct drm_i915_private *dev_priv); void i915_gem_suspend_gtt_mappings(struct drm_device *dev); void i915_gem_restore_gtt_mappings(struct drm_device *dev); @@ -560,4 +566,36 @@ size_t i915_ggtt_view_size(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view); +/** + * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture + * @vma: VMA to iomap + * + * The passed in VMA has to be pinned in the global GTT mappable region. + * An extra pinning of the VMA is acquired for the return iomapping, + * the caller must call i915_vma_unpin_iomap to relinquish the pinning + * after the iomapping is no longer required. + * + * Callers must hold the struct_mutex. + * + * Returns a valid iomapped pointer or ERR_PTR. + */ +void __iomem *i915_vma_pin_iomap(struct i915_vma *vma); + +/** + * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap + * @vma: VMA to unpin + * + * Unpins the previously iomapped VMA from i915_vma_pin_iomap(). + * + * Callers must hold the struct_mutex. This function is only valid to be + * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap(). + */ +static inline void i915_vma_unpin_iomap(struct i915_vma *vma) +{ + lockdep_assert_held(&vma->vm->dev->struct_mutex); + GEM_BUG_ON(vma->pin_count == 0); + GEM_BUG_ON(vma->iomap == NULL); + vma->pin_count--; +} + #endif diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 71611bf21fca..b7c1b5fb61ea 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -29,7 +29,7 @@ #include "intel_renderstate.h" static const struct intel_renderstate_rodata * -render_state_get_rodata(struct drm_device *dev, const int gen) +render_state_get_rodata(const int gen) { switch (gen) { case 6: @@ -45,21 +45,22 @@ render_state_get_rodata(struct drm_device *dev, const int gen) return NULL; } -static int render_state_init(struct render_state *so, struct drm_device *dev) +static int render_state_init(struct render_state *so, + struct drm_i915_private *dev_priv) { int ret; - so->gen = INTEL_INFO(dev)->gen; - so->rodata = render_state_get_rodata(dev, so->gen); + so->gen = INTEL_GEN(dev_priv); + so->rodata = render_state_get_rodata(so->gen); if (so->rodata == NULL) return 0; if (so->rodata->batch_items * 4 > 4096) return -EINVAL; - so->obj = i915_gem_alloc_object(dev, 4096); - if (so->obj == NULL) - return -ENOMEM; + so->obj = i915_gem_object_create(dev_priv->dev, 4096); + if (IS_ERR(so->obj)) + return PTR_ERR(so->obj); ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0); if (ret) @@ -93,6 +94,7 @@ free_gem: static int render_state_setup(struct render_state *so) { + struct drm_device *dev = so->obj->base.dev; const struct intel_renderstate_rodata *rodata = so->rodata; unsigned int i = 0, reloc_index = 0; struct page *page; @@ -134,6 +136,33 @@ static int render_state_setup(struct render_state *so) so->aux_batch_offset = i * sizeof(u32); + if (HAS_POOLED_EU(dev)) { + /* + * We always program 3x6 pool config but depending upon which + * subslice is disabled HW drops down to appropriate config + * shown below. + * + * In the below table 2x6 config always refers to + * fused-down version, native 2x6 is not available and can + * be ignored + * + * SNo subslices config eu pool configuration + * ----------------------------------------------------------- + * 1 3 subslices enabled (3x6) - 0x00777000 (9+9) + * 2 ss0 disabled (2x6) - 0x00777000 (3+9) + * 3 ss1 disabled (2x6) - 0x00770000 (6+6) + * 4 ss2 disabled (2x6) - 0x00007000 (9+3) + */ + u32 eu_pool_config = 0x00777000; + + OUT_BATCH(d, i, GEN9_MEDIA_POOL_STATE); + OUT_BATCH(d, i, GEN9_MEDIA_POOL_ENABLE); + OUT_BATCH(d, i, eu_pool_config); + OUT_BATCH(d, i, 0); + OUT_BATCH(d, i, 0); + OUT_BATCH(d, i, 0); + } + OUT_BATCH(d, i, MI_BATCH_BUFFER_END); so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset; @@ -177,7 +206,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine, if (WARN_ON(engine->id != RCS)) return -ENOENT; - ret = render_state_init(so, engine->dev); + ret = render_state_init(so, engine->i915); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 425e721aac58..538c30499848 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -131,7 +131,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, unsigned long count = 0; trace_i915_gem_shrink(dev_priv, target, flags); - i915_gem_retire_requests(dev_priv->dev); + i915_gem_retire_requests(dev_priv); + + /* + * Unbinding of objects will require HW access; Let us not wake the + * device just to recover a little memory. If absolutely necessary, + * we will force the wake during oom-notifier. + */ + if ((flags & I915_SHRINK_BOUND) && + !intel_runtime_pm_get_if_in_use(dev_priv)) + flags &= ~I915_SHRINK_BOUND; /* * As we may completely rewrite the (un)bound list whilst unbinding @@ -197,7 +206,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, list_splice(&still_in_list, phase->list); } - i915_gem_retire_requests(dev_priv->dev); + if (flags & I915_SHRINK_BOUND) + intel_runtime_pm_put(dev_priv); + + i915_gem_retire_requests(dev_priv); return count; } @@ -345,7 +357,9 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) return NOTIFY_DONE; + intel_runtime_pm_get(dev_priv); freed_pages = i915_gem_shrink_all(dev_priv); + intel_runtime_pm_put(dev_priv); /* Because we may be allocating inside our own driver, we cannot * assert that there are no objects with pinned pages that are not @@ -386,17 +400,35 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr struct drm_i915_private *dev_priv = container_of(nb, struct drm_i915_private, mm.vmap_notifier); struct shrinker_lock_uninterruptible slu; - unsigned long freed_pages; + struct i915_vma *vma, *next; + unsigned long freed_pages = 0; + int ret; if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) return NOTIFY_DONE; - freed_pages = i915_gem_shrink(dev_priv, -1UL, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_ACTIVE | - I915_SHRINK_VMAPS); + /* Force everything onto the inactive lists */ + ret = i915_gpu_idle(dev_priv->dev); + if (ret) + goto out; + + intel_runtime_pm_get(dev_priv); + freed_pages += i915_gem_shrink(dev_priv, -1UL, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_ACTIVE | + I915_SHRINK_VMAPS); + intel_runtime_pm_put(dev_priv); + + /* We also want to clear any cached iomaps as they wrap vmap */ + list_for_each_entry_safe(vma, next, + &dev_priv->ggtt.base.inactive_list, vm_link) { + unsigned long count = vma->node.size >> PAGE_SHIFT; + if (vma->iomap && i915_vma_unbind(vma) == 0) + freed_pages += count; + } +out: i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); *(unsigned long *)ptr += freed_pages; diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index b7ce963fb8f8..e9cd82290408 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -55,8 +55,10 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, return -ENODEV; /* See the comment at the drm_mm_init() call for more about this check. - * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */ - if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096) + * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete) + */ + if (start < 4096 && (IS_GEN8(dev_priv) || + IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0))) start = 4096; mutex_lock(&dev_priv->mm.stolen_lock); @@ -109,9 +111,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) if (INTEL_INFO(dev)->gen >= 3) { u32 bsm; - pci_read_config_dword(dev->pdev, BSM, &bsm); + pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm); - base = bsm & BSM_MASK; + base = bsm & INTEL_BSM_MASK; } else if (IS_I865G(dev)) { u16 toud = 0; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index b9bdb34032cd..a6eb5c47a49c 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -125,7 +125,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) if (INTEL_INFO(obj->base.dev)->gen >= 4) return true; - if (INTEL_INFO(obj->base.dev)->gen == 3) { + if (IS_GEN3(obj->base.dev)) { if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) return false; } else { @@ -229,7 +229,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, */ if (obj->map_and_fenceable && !i915_gem_object_fence_ok(obj, args->tiling_mode)) - ret = i915_gem_object_ggtt_unbind(obj); + ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); if (ret == 0) { if (obj->pages && diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 32d9726e38b1..2314c88323e3 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -706,7 +706,8 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) static void i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) { - struct sg_page_iter sg_iter; + struct sgt_iter sgt_iter; + struct page *page; BUG_ON(obj->userptr.work != NULL); __i915_gem_userptr_set_active(obj, false); @@ -716,9 +717,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) i915_gem_gtt_finish_object(obj); - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { - struct page *page = sg_page_iter_page(&sg_iter); - + for_each_sgt_page(page, sgt_iter, obj->pages) { if (obj->dirty) set_page_dirty(page); @@ -855,11 +854,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file return 0; } -int -i915_gem_init_userptr(struct drm_device *dev) +void i915_gem_init_userptr(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); mutex_init(&dev_priv->mm_lock); hash_init(dev_priv->mm_structs); - return 0; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 89725c9efc25..34ff2459ceea 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -411,7 +411,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); } - if (INTEL_INFO(dev)->gen == 7) + if (IS_GEN7(dev)) err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); for (i = 0; i < ARRAY_SIZE(error->ring); i++) @@ -824,19 +824,18 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv, return error_code; } -static void i915_gem_record_fences(struct drm_device *dev, +static void i915_gem_record_fences(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) { - struct drm_i915_private *dev_priv = dev->dev_private; int i; - if (IS_GEN3(dev) || IS_GEN2(dev)) { + if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) { for (i = 0; i < dev_priv->num_fence_regs; i++) error->fence[i] = I915_READ(FENCE_REG(i)); - } else if (IS_GEN5(dev) || IS_GEN4(dev)) { + } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) { for (i = 0; i < dev_priv->num_fence_regs; i++) error->fence[i] = I915_READ64(FENCE_REG_965_LO(i)); - } else if (INTEL_INFO(dev)->gen >= 6) { + } else if (INTEL_GEN(dev_priv) >= 6) { for (i = 0; i < dev_priv->num_fence_regs; i++) error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i)); } @@ -851,7 +850,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv, struct intel_engine_cs *to; enum intel_engine_id id; - if (!i915_semaphore_is_enabled(dev_priv->dev)) + if (!i915_semaphore_is_enabled(dev_priv)) return; if (!error->semaphore_obj) @@ -893,31 +892,29 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv, } } -static void i915_record_ring_state(struct drm_device *dev, +static void i915_record_ring_state(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error, struct intel_engine_cs *engine, struct drm_i915_error_ring *ering) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (INTEL_INFO(dev)->gen >= 6) { + if (INTEL_GEN(dev_priv) >= 6) { ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base)); ering->fault_reg = I915_READ(RING_FAULT_REG(engine)); - if (INTEL_INFO(dev)->gen >= 8) + if (INTEL_GEN(dev_priv) >= 8) gen8_record_semaphore_state(dev_priv, error, engine, ering); else gen6_record_semaphore_state(dev_priv, engine, ering); } - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base)); ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base)); ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base)); ering->instps = I915_READ(RING_INSTPS(engine->mmio_base)); ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32; ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32; } @@ -939,10 +936,10 @@ static void i915_record_ring_state(struct drm_device *dev, ering->tail = I915_READ_TAIL(engine); ering->ctl = I915_READ_CTL(engine); - if (I915_NEED_GFX_HWS(dev)) { + if (I915_NEED_GFX_HWS(dev_priv)) { i915_reg_t mmio; - if (IS_GEN7(dev)) { + if (IS_GEN7(dev_priv)) { switch (engine->id) { default: case RCS: @@ -958,7 +955,7 @@ static void i915_record_ring_state(struct drm_device *dev, mmio = VEBOX_HWS_PGA_GEN7; break; } - } else if (IS_GEN6(engine->dev)) { + } else if (IS_GEN6(engine->i915)) { mmio = RING_HWS_PGA_GEN6(engine->mmio_base); } else { /* XXX: gen8 returns to sanity */ @@ -971,18 +968,18 @@ static void i915_record_ring_state(struct drm_device *dev, ering->hangcheck_score = engine->hangcheck.score; ering->hangcheck_action = engine->hangcheck.action; - if (USES_PPGTT(dev)) { + if (USES_PPGTT(dev_priv)) { int i; ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); - if (IS_GEN6(dev)) + if (IS_GEN6(dev_priv)) ering->vm_info.pp_dir_base = I915_READ(RING_PP_DIR_BASE_READ(engine)); - else if (IS_GEN7(dev)) + else if (IS_GEN7(dev_priv)) ering->vm_info.pp_dir_base = I915_READ(RING_PP_DIR_BASE(engine)); - else if (INTEL_INFO(dev)->gen >= 8) + else if (INTEL_GEN(dev_priv) >= 8) for (i = 0; i < 4; i++) { ering->vm_info.pdp[i] = I915_READ(GEN8_RING_PDP_UDW(engine, i)); @@ -998,7 +995,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine, struct drm_i915_error_state *error, struct drm_i915_error_ring *ering) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_gem_object *obj; /* Currently render ring is the only HW context user */ @@ -1016,10 +1013,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine, } } -static void i915_gem_record_rings(struct drm_device *dev, +static void i915_gem_record_rings(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_request *request; int i, count; @@ -1030,12 +1026,12 @@ static void i915_gem_record_rings(struct drm_device *dev, error->ring[i].pid = -1; - if (engine->dev == NULL) + if (!intel_engine_initialized(engine)) continue; error->ring[i].valid = true; - i915_record_ring_state(dev, error, engine, &error->ring[i]); + i915_record_ring_state(dev_priv, error, engine, &error->ring[i]); request = i915_gem_find_active_request(engine); if (request) { @@ -1301,15 +1297,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); - i915_get_extra_instdone(dev, error->extra_instdone); + i915_get_extra_instdone(dev_priv, error->extra_instdone); } -static void i915_error_capture_msg(struct drm_device *dev, +static void i915_error_capture_msg(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error, u32 engine_mask, const char *error_msg) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 ecode; int ring_id = -1, len; @@ -1317,7 +1312,7 @@ static void i915_error_capture_msg(struct drm_device *dev, len = scnprintf(error->error_msg, sizeof(error->error_msg), "GPU HANG: ecode %d:%d:0x%08x", - INTEL_INFO(dev)->gen, ring_id, ecode); + INTEL_GEN(dev_priv), ring_id, ecode); if (ring_id != -1 && error->ring[ring_id].pid != -1) len += scnprintf(error->error_msg + len, @@ -1352,11 +1347,11 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv, * out a structure which becomes available in debugfs for user level tools * to pick up. */ -void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, +void i915_capture_error_state(struct drm_i915_private *dev_priv, + u32 engine_mask, const char *error_msg) { static bool warned; - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_error_state *error; unsigned long flags; @@ -1372,15 +1367,15 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, i915_capture_gen_state(dev_priv, error); i915_capture_reg_state(dev_priv, error); i915_gem_capture_buffers(dev_priv, error); - i915_gem_record_fences(dev, error); - i915_gem_record_rings(dev, error); + i915_gem_record_fences(dev_priv, error); + i915_gem_record_rings(dev_priv, error); do_gettimeofday(&error->time); - error->overlay = intel_overlay_capture_error_state(dev); - error->display = intel_display_capture_error_state(dev); + error->overlay = intel_overlay_capture_error_state(dev_priv); + error->display = intel_display_capture_error_state(dev_priv); - i915_error_capture_msg(dev, error, engine_mask, error_msg); + i915_error_capture_msg(dev_priv, error, engine_mask, error_msg); DRM_INFO("%s\n", error->error_msg); spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); @@ -1400,7 +1395,7 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); - DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index); + DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index); warned = true; } } @@ -1450,17 +1445,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type) } /* NB: please notice the memset */ -void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) +void i915_get_extra_instdone(struct drm_i915_private *dev_priv, + uint32_t *instdone) { - struct drm_i915_private *dev_priv = dev->dev_private; memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); - if (IS_GEN2(dev) || IS_GEN3(dev)) + if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv)) instdone[0] = I915_READ(GEN2_INSTDONE); - else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { + else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) { instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); instdone[1] = I915_READ(GEN4_INSTDONE1); - } else if (INTEL_INFO(dev)->gen >= 7) { + } else if (INTEL_GEN(dev_priv) >= 7) { instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); instdone[1] = I915_READ(GEN7_SC_INSTDONE); instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h index 80786d9f9ad3..cf5a65be4fe0 100644 --- a/drivers/gpu/drm/i915/i915_guc_reg.h +++ b/drivers/gpu/drm/i915/i915_guc_reg.h @@ -67,11 +67,11 @@ #define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */ #define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4) +/* Defines WOPCM space available to GuC firmware */ #define GUC_WOPCM_SIZE _MMIO(0xc050) -#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */ - /* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */ -#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE) +#define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */ +#define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */ #define GEN8_GT_PM_CONFIG _MMIO(0x138140) #define GEN9LP_GT_PM_CONFIG _MMIO(0x138140) diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index d40c13fb6643..22a55ac4e51c 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -158,8 +158,7 @@ static int host2guc_sample_forcewake(struct intel_guc *guc, data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; /* WaRsDisableCoarsePowerGating:skl,bxt */ - if (!intel_enable_rc6(dev) || - NEEDS_WaRsDisableCoarsePowerGating(dev)) + if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev)) data[1] = 0; else /* bit 0 and 1 are for Render and Media domain separately */ @@ -175,94 +174,88 @@ static int host2guc_sample_forcewake(struct intel_guc *guc, * client object which contains the page being used for the doorbell */ -static void guc_init_doorbell(struct intel_guc *guc, - struct i915_guc_client *client) +static int guc_update_doorbell_id(struct intel_guc *guc, + struct i915_guc_client *client, + u16 new_id) { + struct sg_table *sg = guc->ctx_pool_obj->pages; + void *doorbell_bitmap = guc->doorbell_bitmap; struct guc_doorbell_info *doorbell; + struct guc_context_desc desc; + size_t len; doorbell = client->client_base + client->doorbell_offset; - doorbell->db_status = GUC_DOORBELL_ENABLED; - doorbell->cookie = 0; -} - -static int guc_ring_doorbell(struct i915_guc_client *gc) -{ - struct guc_process_desc *desc; - union guc_doorbell_qw db_cmp, db_exc, db_ret; - union guc_doorbell_qw *db; - int attempt = 2, ret = -EAGAIN; - - desc = gc->client_base + gc->proc_desc_offset; - - /* Update the tail so it is visible to GuC */ - desc->tail = gc->wq_tail; - - /* current cookie */ - db_cmp.db_status = GUC_DOORBELL_ENABLED; - db_cmp.cookie = gc->cookie; - - /* cookie to be updated */ - db_exc.db_status = GUC_DOORBELL_ENABLED; - db_exc.cookie = gc->cookie + 1; - if (db_exc.cookie == 0) - db_exc.cookie = 1; - - /* pointer of current doorbell cacheline */ - db = gc->client_base + gc->doorbell_offset; - - while (attempt--) { - /* lets ring the doorbell */ - db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db, - db_cmp.value_qw, db_exc.value_qw); - - /* if the exchange was successfully executed */ - if (db_ret.value_qw == db_cmp.value_qw) { - /* db was successfully rung */ - gc->cookie = db_exc.cookie; - ret = 0; - break; - } + if (client->doorbell_id != GUC_INVALID_DOORBELL_ID && + test_bit(client->doorbell_id, doorbell_bitmap)) { + /* Deactivate the old doorbell */ + doorbell->db_status = GUC_DOORBELL_DISABLED; + (void)host2guc_release_doorbell(guc, client); + __clear_bit(client->doorbell_id, doorbell_bitmap); + } - /* XXX: doorbell was lost and need to acquire it again */ - if (db_ret.db_status == GUC_DOORBELL_DISABLED) - break; + /* Update the GuC's idea of the doorbell ID */ + len = sg_pcopy_to_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), + sizeof(desc) * client->ctx_index); + if (len != sizeof(desc)) + return -EFAULT; + desc.db_id = new_id; + len = sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), + sizeof(desc) * client->ctx_index); + if (len != sizeof(desc)) + return -EFAULT; - DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n", - db_cmp.cookie, db_ret.cookie); + client->doorbell_id = new_id; + if (new_id == GUC_INVALID_DOORBELL_ID) + return 0; - /* update the cookie to newly read cookie from GuC */ - db_cmp.cookie = db_ret.cookie; - db_exc.cookie = db_ret.cookie + 1; - if (db_exc.cookie == 0) - db_exc.cookie = 1; - } + /* Activate the new doorbell */ + __set_bit(new_id, doorbell_bitmap); + doorbell->cookie = 0; + doorbell->db_status = GUC_DOORBELL_ENABLED; + return host2guc_allocate_doorbell(guc, client); +} - return ret; +static int guc_init_doorbell(struct intel_guc *guc, + struct i915_guc_client *client, + uint16_t db_id) +{ + return guc_update_doorbell_id(guc, client, db_id); } static void guc_disable_doorbell(struct intel_guc *guc, struct i915_guc_client *client) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct guc_doorbell_info *doorbell; - i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id); - int value; - - doorbell = client->client_base + client->doorbell_offset; + (void)guc_update_doorbell_id(guc, client, GUC_INVALID_DOORBELL_ID); - doorbell->db_status = GUC_DOORBELL_DISABLED; + /* XXX: wait for any interrupts */ + /* XXX: wait for workqueue to drain */ +} - I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID); +static uint16_t +select_doorbell_register(struct intel_guc *guc, uint32_t priority) +{ + /* + * The bitmap tracks which doorbell registers are currently in use. + * It is split into two halves; the first half is used for normal + * priority contexts, the second half for high-priority ones. + * Note that logically higher priorities are numerically less than + * normal ones, so the test below means "is it high-priority?" + */ + const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH); + const uint16_t half = GUC_MAX_DOORBELLS / 2; + const uint16_t start = hi_pri ? half : 0; + const uint16_t end = start + half; + uint16_t id; - value = I915_READ(drbreg); - WARN_ON((value & GEN8_DRB_VALID) != 0); + id = find_next_zero_bit(guc->doorbell_bitmap, end, start); + if (id == end) + id = GUC_INVALID_DOORBELL_ID; - I915_WRITE(GEN8_DRBREGU(client->doorbell_id), 0); - I915_WRITE(drbreg, 0); + DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n", + hi_pri ? "high" : "normal", id); - /* XXX: wait for any interrupts */ - /* XXX: wait for workqueue to drain */ + return id; } /* @@ -289,37 +282,6 @@ static uint32_t select_doorbell_cacheline(struct intel_guc *guc) return offset; } -static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority) -{ - /* - * The bitmap is split into two halves; the first half is used for - * normal priority contexts, the second half for high-priority ones. - * Note that logically higher priorities are numerically less than - * normal ones, so the test below means "is it high-priority?" - */ - const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH); - const uint16_t half = GUC_MAX_DOORBELLS / 2; - const uint16_t start = hi_pri ? half : 0; - const uint16_t end = start + half; - uint16_t id; - - id = find_next_zero_bit(guc->doorbell_bitmap, end, start); - if (id == end) - id = GUC_INVALID_DOORBELL_ID; - else - bitmap_set(guc->doorbell_bitmap, id, 1); - - DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n", - hi_pri ? "high" : "normal", id); - - return id; -} - -static void release_doorbell(struct intel_guc *guc, uint16_t id) -{ - bitmap_clear(guc->doorbell_bitmap, id, 1); -} - /* * Initialise the process descriptor shared with the GuC firmware. */ @@ -361,10 +323,9 @@ static void guc_init_ctx_desc(struct intel_guc *guc, struct drm_i915_gem_object *client_obj = client->client_obj; struct drm_i915_private *dev_priv = guc_to_i915(guc); struct intel_engine_cs *engine; - struct intel_context *ctx = client->owner; + struct i915_gem_context *ctx = client->owner; struct guc_context_desc desc; struct sg_table *sg; - enum intel_engine_id id; u32 gfx_addr; memset(&desc, 0, sizeof(desc)); @@ -374,10 +335,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc, desc.priority = client->priority; desc.db_id = client->doorbell_id; - for_each_engine_id(engine, dev_priv, id) { + for_each_engine(engine, dev_priv) { + struct intel_context *ce = &ctx->engine[engine->id]; struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id]; struct drm_i915_gem_object *obj; - uint64_t ctx_desc; /* TODO: We have a design issue to be solved here. Only when we * receive the first batch, we know which engine is used by the @@ -386,20 +347,18 @@ static void guc_init_ctx_desc(struct intel_guc *guc, * for now who owns a GuC client. But for future owner of GuC * client, need to make sure lrc is pinned prior to enter here. */ - obj = ctx->engine[id].state; - if (!obj) + if (!ce->state) break; /* XXX: continue? */ - ctx_desc = intel_lr_context_descriptor(ctx, engine); - lrc->context_desc = (u32)ctx_desc; + lrc->context_desc = lower_32_bits(ce->lrc_desc); /* The state page is after PPHWSP */ - gfx_addr = i915_gem_obj_ggtt_offset(obj); + gfx_addr = i915_gem_obj_ggtt_offset(ce->state); lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE; lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | (engine->guc_id << GUC_ELC_ENGINE_OFFSET); - obj = ctx->engine[id].ringbuf->obj; + obj = ce->ringbuf->obj; gfx_addr = i915_gem_obj_ggtt_offset(obj); lrc->ring_begin = gfx_addr; @@ -427,7 +386,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc, desc.wq_size = client->wq_size; /* - * XXX: Take LRCs from an existing intel_context if this is not an + * XXX: Take LRCs from an existing context if this is not an * IsKMDCreatedContext client */ desc.desc_private = (uintptr_t)client; @@ -451,47 +410,64 @@ static void guc_fini_ctx_desc(struct intel_guc *guc, sizeof(desc) * client->ctx_index); } -int i915_guc_wq_check_space(struct i915_guc_client *gc) +/** + * i915_guc_wq_check_space() - check that the GuC can accept a request + * @request: request associated with the commands + * + * Return: 0 if space is available + * -EAGAIN if space is not currently available + * + * This function must be called (and must return 0) before a request + * is submitted to the GuC via i915_guc_submit() below. Once a result + * of 0 has been returned, it remains valid until (but only until) + * the next call to submit(). + * + * This precheck allows the caller to determine in advance that space + * will be available for the next submission before committing resources + * to it, and helps avoid late failures with complicated recovery paths. + */ +int i915_guc_wq_check_space(struct drm_i915_gem_request *request) { + const size_t wqi_size = sizeof(struct guc_wq_item); + struct i915_guc_client *gc = request->i915->guc.execbuf_client; struct guc_process_desc *desc; - u32 size = sizeof(struct guc_wq_item); - int ret = -ETIMEDOUT, timeout_counter = 200; + u32 freespace; - if (!gc) - return 0; + GEM_BUG_ON(gc == NULL); desc = gc->client_base + gc->proc_desc_offset; - while (timeout_counter-- > 0) { - if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) { - ret = 0; - break; - } + freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size); + if (likely(freespace >= wqi_size)) + return 0; - if (timeout_counter) - usleep_range(1000, 2000); - }; + gc->no_wq_space += 1; - return ret; + return -EAGAIN; } -static int guc_add_workqueue_item(struct i915_guc_client *gc, - struct drm_i915_gem_request *rq) +static void guc_add_workqueue_item(struct i915_guc_client *gc, + struct drm_i915_gem_request *rq) { + /* wqi_len is in DWords, and does not include the one-word header */ + const size_t wqi_size = sizeof(struct guc_wq_item); + const u32 wqi_len = wqi_size/sizeof(u32) - 1; struct guc_process_desc *desc; struct guc_wq_item *wqi; void *base; - u32 tail, wq_len, wq_off, space; + u32 freespace, tail, wq_off, wq_page; desc = gc->client_base + gc->proc_desc_offset; - space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size); - if (WARN_ON(space < sizeof(struct guc_wq_item))) - return -ENOSPC; /* shouldn't happen */ - /* postincrement WQ tail for next time */ - wq_off = gc->wq_tail; - gc->wq_tail += sizeof(struct guc_wq_item); - gc->wq_tail &= gc->wq_size - 1; + /* Free space is guaranteed, see i915_guc_wq_check_space() above */ + freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size); + GEM_BUG_ON(freespace < wqi_size); + + /* The GuC firmware wants the tail index in QWords, not bytes */ + tail = rq->tail; + GEM_BUG_ON(tail & 7); + tail >>= 3; + GEM_BUG_ON(tail > WQ_RING_TAIL_MAX); /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we * should not have the case where structure wqi is across page, neither @@ -500,19 +476,23 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc, * XXX: if not the case, we need save data to a temp wqi and copy it to * workqueue buffer dw by dw. */ - WARN_ON(sizeof(struct guc_wq_item) != 16); - WARN_ON(wq_off & 3); + BUILD_BUG_ON(wqi_size != 16); + + /* postincrement WQ tail for next time */ + wq_off = gc->wq_tail; + gc->wq_tail += wqi_size; + gc->wq_tail &= gc->wq_size - 1; + GEM_BUG_ON(wq_off & (wqi_size - 1)); - /* wq starts from the page after doorbell / process_desc */ - base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, - (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT)); + /* WQ starts from the page after doorbell / process_desc */ + wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT; wq_off &= PAGE_SIZE - 1; + base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, wq_page)); wqi = (struct guc_wq_item *)((char *)base + wq_off); - /* len does not include the header */ - wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1; + /* Now fill in the 4-word work queue item */ wqi->header = WQ_TYPE_INORDER | - (wq_len << WQ_LEN_SHIFT) | + (wqi_len << WQ_LEN_SHIFT) | (rq->engine->guc_id << WQ_TARGET_SHIFT) | WQ_NO_WCFLUSH_WAIT; @@ -520,48 +500,105 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc, wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->engine); - /* The GuC firmware wants the tail index in QWords, not bytes */ - tail = rq->ringbuf->tail >> 3; wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT; - wqi->fence_id = 0; /*XXX: what fence to be here */ + wqi->fence_id = rq->seqno; kunmap_atomic(base); +} - return 0; +static int guc_ring_doorbell(struct i915_guc_client *gc) +{ + struct guc_process_desc *desc; + union guc_doorbell_qw db_cmp, db_exc, db_ret; + union guc_doorbell_qw *db; + int attempt = 2, ret = -EAGAIN; + + desc = gc->client_base + gc->proc_desc_offset; + + /* Update the tail so it is visible to GuC */ + desc->tail = gc->wq_tail; + + /* current cookie */ + db_cmp.db_status = GUC_DOORBELL_ENABLED; + db_cmp.cookie = gc->cookie; + + /* cookie to be updated */ + db_exc.db_status = GUC_DOORBELL_ENABLED; + db_exc.cookie = gc->cookie + 1; + if (db_exc.cookie == 0) + db_exc.cookie = 1; + + /* pointer of current doorbell cacheline */ + db = gc->client_base + gc->doorbell_offset; + + while (attempt--) { + /* lets ring the doorbell */ + db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db, + db_cmp.value_qw, db_exc.value_qw); + + /* if the exchange was successfully executed */ + if (db_ret.value_qw == db_cmp.value_qw) { + /* db was successfully rung */ + gc->cookie = db_exc.cookie; + ret = 0; + break; + } + + /* XXX: doorbell was lost and need to acquire it again */ + if (db_ret.db_status == GUC_DOORBELL_DISABLED) + break; + + DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n", + db_cmp.cookie, db_ret.cookie); + + /* update the cookie to newly read cookie from GuC */ + db_cmp.cookie = db_ret.cookie; + db_exc.cookie = db_ret.cookie + 1; + if (db_exc.cookie == 0) + db_exc.cookie = 1; + } + + return ret; } /** * i915_guc_submit() - Submit commands through GuC - * @client: the guc client where commands will go through * @rq: request associated with the commands * - * Return: 0 if succeed + * Return: 0 on success, otherwise an errno. + * (Note: nonzero really shouldn't happen!) + * + * The caller must have already called i915_guc_wq_check_space() above + * with a result of 0 (success) since the last request submission. This + * guarantees that there is space in the work queue for the new request, + * so enqueuing the item cannot fail. + * + * Bad Things Will Happen if the caller violates this protocol e.g. calls + * submit() when check() says there's no space, or calls submit() multiple + * times with no intervening check(). + * + * The only error here arises if the doorbell hardware isn't functioning + * as expected, which really shouln't happen. */ -int i915_guc_submit(struct i915_guc_client *client, - struct drm_i915_gem_request *rq) +int i915_guc_submit(struct drm_i915_gem_request *rq) { - struct intel_guc *guc = client->guc; unsigned int engine_id = rq->engine->guc_id; - int q_ret, b_ret; + struct intel_guc *guc = &rq->i915->guc; + struct i915_guc_client *client = guc->execbuf_client; + int b_ret; - q_ret = guc_add_workqueue_item(client, rq); - if (q_ret == 0) - b_ret = guc_ring_doorbell(client); + guc_add_workqueue_item(client, rq); + b_ret = guc_ring_doorbell(client); client->submissions[engine_id] += 1; - if (q_ret) { - client->q_fail += 1; - client->retcode = q_ret; - } else if (b_ret) { + client->retcode = b_ret; + if (b_ret) client->b_fail += 1; - client->retcode = q_ret = b_ret; - } else { - client->retcode = 0; - } + guc->submissions[engine_id] += 1; guc->last_seqno[engine_id] = rq->seqno; - return q_ret; + return b_ret; } /* @@ -572,7 +609,7 @@ int i915_guc_submit(struct i915_guc_client *client, /** * gem_allocate_guc_obj() - Allocate gem object for GuC usage - * @dev: drm device + * @dev_priv: driver private data structure * @size: size of object * * This is a wrapper to create a gem obj. In order to use it inside GuC, the @@ -581,14 +618,13 @@ int i915_guc_submit(struct i915_guc_client *client, * * Return: A drm_i915_gem_object if successful, otherwise NULL. */ -static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev, - u32 size) +static struct drm_i915_gem_object * +gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; - obj = i915_gem_alloc_object(dev, size); - if (!obj) + obj = i915_gem_object_create(dev_priv->dev, size); + if (IS_ERR(obj)) return NULL; if (i915_gem_object_get_pages(obj)) { @@ -623,10 +659,10 @@ static void gem_release_guc_obj(struct drm_i915_gem_object *obj) drm_gem_object_unreference(&obj->base); } -static void guc_client_free(struct drm_device *dev, - struct i915_guc_client *client) +static void +guc_client_free(struct drm_i915_private *dev_priv, + struct i915_guc_client *client) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; if (!client) @@ -639,17 +675,10 @@ static void guc_client_free(struct drm_device *dev, if (client->client_base) { /* - * If we got as far as setting up a doorbell, make sure - * we shut it down before unmapping & deallocating the - * memory. So first disable the doorbell, then tell the - * GuC that we've finished with it, finally deallocate - * it in our bitmap + * If we got as far as setting up a doorbell, make sure we + * shut it down before unmapping & deallocating the memory. */ - if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) { - guc_disable_doorbell(guc, client); - host2guc_release_doorbell(guc, client); - release_doorbell(guc, client->doorbell_id); - } + guc_disable_doorbell(guc, client); kunmap(kmap_to_page(client->client_base)); } @@ -664,9 +693,51 @@ static void guc_client_free(struct drm_device *dev, kfree(client); } +/* + * Borrow the first client to set up & tear down every doorbell + * in turn, to ensure that all doorbell h/w is (re)initialised. + */ +static void guc_init_doorbell_hw(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct i915_guc_client *client = guc->execbuf_client; + uint16_t db_id, i; + int err; + + db_id = client->doorbell_id; + + for (i = 0; i < GUC_MAX_DOORBELLS; ++i) { + i915_reg_t drbreg = GEN8_DRBREGL(i); + u32 value = I915_READ(drbreg); + + err = guc_update_doorbell_id(guc, client, i); + + /* Report update failure or unexpectedly active doorbell */ + if (err || (i != db_id && (value & GUC_DOORBELL_ENABLED))) + DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) was 0x%x, err %d\n", + i, drbreg.reg, value, err); + } + + /* Restore to original value */ + err = guc_update_doorbell_id(guc, client, db_id); + if (err) + DRM_ERROR("Failed to restore doorbell to %d, err %d\n", + db_id, err); + + for (i = 0; i < GUC_MAX_DOORBELLS; ++i) { + i915_reg_t drbreg = GEN8_DRBREGL(i); + u32 value = I915_READ(drbreg); + + if (i != db_id && (value & GUC_DOORBELL_ENABLED)) + DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) finally 0x%x\n", + i, drbreg.reg, value); + + } +} + /** * guc_client_alloc() - Allocate an i915_guc_client - * @dev: drm device + * @dev_priv: driver private data structure * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW * The kernel client to replace ExecList submission is created with * NORMAL priority. Priority of a client for scheduler can be HIGH, @@ -676,14 +747,15 @@ static void guc_client_free(struct drm_device *dev, * * Return: An i915_guc_client object if success, else NULL. */ -static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, - uint32_t priority, - struct intel_context *ctx) +static struct i915_guc_client * +guc_client_alloc(struct drm_i915_private *dev_priv, + uint32_t priority, + struct i915_gem_context *ctx) { struct i915_guc_client *client; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; struct drm_i915_gem_object *obj; + uint16_t db_id; client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) @@ -702,7 +774,7 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, } /* The first page is doorbell/proc_desc. Two followed pages are wq. */ - obj = gem_allocate_guc_obj(dev, GUC_DB_SIZE + GUC_WQ_SIZE); + obj = gem_allocate_guc_obj(dev_priv, GUC_DB_SIZE + GUC_WQ_SIZE); if (!obj) goto err; @@ -712,6 +784,11 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, client->wq_offset = GUC_DB_SIZE; client->wq_size = GUC_WQ_SIZE; + db_id = select_doorbell_register(guc, client->priority); + if (db_id == GUC_INVALID_DOORBELL_ID) + /* XXX: evict a doorbell instead? */ + goto err; + client->doorbell_offset = select_doorbell_cacheline(guc); /* @@ -724,29 +801,22 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, else client->proc_desc_offset = (GUC_DB_SIZE / 2); - client->doorbell_id = assign_doorbell(guc, client->priority); - if (client->doorbell_id == GUC_INVALID_DOORBELL_ID) - /* XXX: evict a doorbell instead */ - goto err; - guc_init_proc_desc(guc, client); guc_init_ctx_desc(guc, client); - guc_init_doorbell(guc, client); - - /* XXX: Any cache flushes needed? General domain mgmt calls? */ - - if (host2guc_allocate_doorbell(guc, client)) + if (guc_init_doorbell(guc, client, db_id)) goto err; - DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u db_id %u\n", - priority, client, client->ctx_index, client->doorbell_id); + DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u\n", + priority, client, client->ctx_index); + DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n", + client->doorbell_id, client->doorbell_offset); return client; err: DRM_ERROR("FAILED to create priority %u GuC client!\n", priority); - guc_client_free(dev, client); + guc_client_free(dev_priv, client); return NULL; } @@ -771,7 +841,7 @@ static void guc_create_log(struct intel_guc *guc) obj = guc->log_obj; if (!obj) { - obj = gem_allocate_guc_obj(dev_priv->dev, size); + obj = gem_allocate_guc_obj(dev_priv, size); if (!obj) { /* logging will be off */ i915.guc_log_level = -1; @@ -831,7 +901,7 @@ static void guc_create_ads(struct intel_guc *guc) obj = guc->ads_obj; if (!obj) { - obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size)); + obj = gem_allocate_guc_obj(dev_priv, PAGE_ALIGN(size)); if (!obj) return; @@ -885,66 +955,65 @@ static void guc_create_ads(struct intel_guc *guc) * Set up the memory resources to be shared with the GuC. At this point, * we require just one object that can be mapped through the GGTT. */ -int i915_guc_submission_init(struct drm_device *dev) +int i915_guc_submission_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; const size_t ctxsize = sizeof(struct guc_context_desc); const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize; const size_t gemsize = round_up(poolsize, PAGE_SIZE); struct intel_guc *guc = &dev_priv->guc; + /* Wipe bitmap & delete client in case of reinitialisation */ + bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS); + i915_guc_submission_disable(dev_priv); + if (!i915.enable_guc_submission) return 0; /* not enabled */ if (guc->ctx_pool_obj) return 0; /* already allocated */ - guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv->dev, gemsize); + guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv, gemsize); if (!guc->ctx_pool_obj) return -ENOMEM; ida_init(&guc->ctx_ids); - guc_create_log(guc); - guc_create_ads(guc); return 0; } -int i915_guc_submission_enable(struct drm_device *dev) +int i915_guc_submission_enable(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; - struct intel_context *ctx = dev_priv->kernel_context; struct i915_guc_client *client; /* client for execbuf submission */ - client = guc_client_alloc(dev, GUC_CTX_PRIORITY_KMD_NORMAL, ctx); + client = guc_client_alloc(dev_priv, + GUC_CTX_PRIORITY_KMD_NORMAL, + dev_priv->kernel_context); if (!client) { DRM_ERROR("Failed to create execbuf guc_client\n"); return -ENOMEM; } guc->execbuf_client = client; - host2guc_sample_forcewake(guc, client); + guc_init_doorbell_hw(guc); return 0; } -void i915_guc_submission_disable(struct drm_device *dev) +void i915_guc_submission_disable(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; - guc_client_free(dev, guc->execbuf_client); + guc_client_free(dev_priv, guc->execbuf_client); guc->execbuf_client = NULL; } -void i915_guc_submission_fini(struct drm_device *dev) +void i915_guc_submission_fini(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; gem_release_guc_obj(dev_priv->guc.ads_obj); @@ -967,10 +1036,10 @@ int intel_guc_suspend(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; - struct intel_context *ctx; + struct i915_gem_context *ctx; u32 data[3]; - if (!i915.enable_guc_submission) + if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS) return 0; ctx = dev_priv->kernel_context; @@ -993,10 +1062,10 @@ int intel_guc_resume(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; - struct intel_context *ctx; + struct i915_gem_context *ctx; u32 data[3]; - if (!i915.enable_guc_submission) + if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS) return 0; ctx = dev_priv->kernel_context; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2f6fd33c07ba..4378a659d962 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -336,9 +336,8 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) __gen6_disable_pm_irq(dev_priv, mask); } -void gen6_reset_rps_interrupts(struct drm_device *dev) +void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; i915_reg_t reg = gen6_pm_iir(dev_priv); spin_lock_irq(&dev_priv->irq_lock); @@ -349,10 +348,8 @@ void gen6_reset_rps_interrupts(struct drm_device *dev) spin_unlock_irq(&dev_priv->irq_lock); } -void gen6_enable_rps_interrupts(struct drm_device *dev) +void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - spin_lock_irq(&dev_priv->irq_lock); WARN_ON(dev_priv->rps.pm_iir); @@ -367,25 +364,11 @@ void gen6_enable_rps_interrupts(struct drm_device *dev) u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) { - /* - * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer - * if GEN6_PM_UP_EI_EXPIRED is masked. - * - * TODO: verify if this can be reproduced on VLV,CHV. - */ - if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) - mask &= ~GEN6_PM_RP_UP_EI_EXPIRED; - - if (INTEL_INFO(dev_priv)->gen >= 8) - mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; - - return mask; + return (mask & ~dev_priv->rps.pm_intr_keep); } -void gen6_disable_rps_interrupts(struct drm_device *dev) +void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - spin_lock_irq(&dev_priv->irq_lock); dev_priv->rps.interrupts_enabled = false; spin_unlock_irq(&dev_priv->irq_lock); @@ -402,7 +385,7 @@ void gen6_disable_rps_interrupts(struct drm_device *dev) spin_unlock_irq(&dev_priv->irq_lock); - synchronize_irq(dev->irq); + synchronize_irq(dev_priv->dev->irq); } /** @@ -605,19 +588,17 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, /** * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion - * @dev: drm device + * @dev_priv: i915 device private */ -static void i915_enable_asle_pipestat(struct drm_device *dev) +static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) + if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) return; spin_lock_irq(&dev_priv->irq_lock); i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); - if (INTEL_INFO(dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) i915_enable_pipestat(dev_priv, PIPE_A, PIPE_LEGACY_BLC_EVENT_STATUS); @@ -750,7 +731,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) if (mode->flags & DRM_MODE_FLAG_INTERLACE) vtotal /= 2; - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; else position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; @@ -767,7 +748,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) * problem. We may need to extend this to include other platforms, * but so far testing only shows the problem on HSW. */ - if (HAS_DDI(dev) && !position) { + if (HAS_DDI(dev_priv) && !position) { int i, temp; for (i = 0; i < 100; i++) { @@ -835,7 +816,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, if (stime) *stime = ktime_get(); - if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { + if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { /* No obvious pixelcount register. Only query vertical * scanout position from Display scan line register. */ @@ -897,7 +878,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, else position += vtotal - vbl_end; - if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { + if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { *vpos = position; *hpos = 0; } else { @@ -955,9 +936,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, &crtc->hwmode); } -static void ironlake_rps_change_irq_handler(struct drm_device *dev) +static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 busy_up, busy_down, max_avg, min_avg; u8 new_delay; @@ -986,7 +966,7 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev) new_delay = dev_priv->ips.min_delay; } - if (ironlake_set_drps(dev, new_delay)) + if (ironlake_set_drps(dev_priv, new_delay)) dev_priv->ips.cur_delay = new_delay; spin_unlock(&mchdev_lock); @@ -1175,7 +1155,7 @@ static void gen6_pm_rps_work(struct work_struct *work) new_delay += adj; new_delay = clamp_t(int, new_delay, min, max); - intel_set_rps(dev_priv->dev, new_delay); + intel_set_rps(dev_priv, new_delay); mutex_unlock(&dev_priv->rps.hw_lock); out: @@ -1506,27 +1486,23 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, } -static void gmbus_irq_handler(struct drm_device *dev) +static void gmbus_irq_handler(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - wake_up_all(&dev_priv->gmbus_wait_queue); } -static void dp_aux_irq_handler(struct drm_device *dev) +static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - wake_up_all(&dev_priv->gmbus_wait_queue); } #if defined(CONFIG_DEBUG_FS) -static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, +static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe, uint32_t crc0, uint32_t crc1, uint32_t crc2, uint32_t crc3, uint32_t crc4) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; struct intel_pipe_crc_entry *entry; int head, tail; @@ -1550,7 +1526,8 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, entry = &pipe_crc->entries[head]; - entry->frame = dev->driver->get_vblank_counter(dev, pipe); + entry->frame = dev_priv->dev->driver->get_vblank_counter(dev_priv->dev, + pipe); entry->crc[0] = crc0; entry->crc[1] = crc1; entry->crc[2] = crc2; @@ -1566,27 +1543,26 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, } #else static inline void -display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, +display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe, uint32_t crc0, uint32_t crc1, uint32_t crc2, uint32_t crc3, uint32_t crc4) {} #endif -static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) +static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; - - display_pipe_crc_irq_handler(dev, pipe, + display_pipe_crc_irq_handler(dev_priv, pipe, I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 0, 0, 0, 0); } -static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) +static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; - - display_pipe_crc_irq_handler(dev, pipe, + display_pipe_crc_irq_handler(dev_priv, pipe, I915_READ(PIPE_CRC_RES_1_IVB(pipe)), I915_READ(PIPE_CRC_RES_2_IVB(pipe)), I915_READ(PIPE_CRC_RES_3_IVB(pipe)), @@ -1594,22 +1570,22 @@ static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) I915_READ(PIPE_CRC_RES_5_IVB(pipe))); } -static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) +static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; uint32_t res1, res2; - if (INTEL_INFO(dev)->gen >= 3) + if (INTEL_GEN(dev_priv) >= 3) res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); else res1 = 0; - if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); else res2 = 0; - display_pipe_crc_irq_handler(dev, pipe, + display_pipe_crc_irq_handler(dev_priv, pipe, I915_READ(PIPE_CRC_RES_RED(pipe)), I915_READ(PIPE_CRC_RES_GREEN(pipe)), I915_READ(PIPE_CRC_RES_BLUE(pipe)), @@ -1643,18 +1619,21 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) } } -static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) +static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv, + enum pipe pipe) { - if (!drm_handle_vblank(dev, pipe)) - return false; + bool ret; - return true; + ret = drm_handle_vblank(dev_priv->dev, pipe); + if (ret) + intel_finish_page_flip_mmio(dev_priv, pipe); + + return ret; } -static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir, - u32 pipe_stats[I915_MAX_PIPES]) +static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, + u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { - struct drm_i915_private *dev_priv = dev->dev_private; int pipe; spin_lock(&dev_priv->irq_lock); @@ -1710,31 +1689,28 @@ static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir, spin_unlock(&dev_priv->irq_lock); } -static void valleyview_pipestat_irq_handler(struct drm_device *dev, +static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, u32 pipe_stats[I915_MAX_PIPES]) { - struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; for_each_pipe(dev_priv, pipe) { if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && - intel_pipe_handle_vblank(dev, pipe)) - intel_check_page_flip(dev, pipe); + intel_pipe_handle_vblank(dev_priv, pipe)) + intel_check_page_flip(dev_priv, pipe); - if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip(dev, pipe); - } + if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) + intel_finish_page_flip_cs(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev, pipe); + i9xx_pipe_crc_irq_handler(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); } if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) - gmbus_irq_handler(dev); + gmbus_irq_handler(dev_priv); } static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) @@ -1747,12 +1723,13 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) return hotplug_status; } -static void i9xx_hpd_irq_handler(struct drm_device *dev, +static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status) { u32 pin_mask = 0, long_mask = 0; - if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || + IS_CHERRYVIEW(dev_priv)) { u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; if (hotplug_trigger) { @@ -1760,11 +1737,11 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev, hotplug_trigger, hpd_status_g4x, i9xx_port_hotplug_long_detect); - intel_hpd_irq_handler(dev, pin_mask, long_mask); + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) - dp_aux_irq_handler(dev); + dp_aux_irq_handler(dev_priv); } else { u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; @@ -1772,7 +1749,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev, intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, hotplug_trigger, hpd_status_i915, i9xx_port_hotplug_long_detect); - intel_hpd_irq_handler(dev, pin_mask, long_mask); + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } } } @@ -1831,7 +1808,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) /* Call regardless, as some status bits might not be * signalled in iir */ - valleyview_pipestat_irq_ack(dev, iir, pipe_stats); + valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); /* * VLV_IIR is single buffered, and reflects the level @@ -1850,9 +1827,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) gen6_rps_irq_handler(dev_priv, pm_iir); if (hotplug_status) - i9xx_hpd_irq_handler(dev, hotplug_status); + i9xx_hpd_irq_handler(dev_priv, hotplug_status); - valleyview_pipestat_irq_handler(dev, pipe_stats); + valleyview_pipestat_irq_handler(dev_priv, pipe_stats); } while (0); enable_rpm_wakeref_asserts(dev_priv); @@ -1911,7 +1888,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) /* Call regardless, as some status bits might not be * signalled in iir */ - valleyview_pipestat_irq_ack(dev, iir, pipe_stats); + valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); /* * VLV_IIR is single buffered, and reflects the level @@ -1927,9 +1904,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) gen8_gt_irq_handler(dev_priv, gt_iir); if (hotplug_status) - i9xx_hpd_irq_handler(dev, hotplug_status); + i9xx_hpd_irq_handler(dev_priv, hotplug_status); - valleyview_pipestat_irq_handler(dev, pipe_stats); + valleyview_pipestat_irq_handler(dev_priv, pipe_stats); } while (0); enable_rpm_wakeref_asserts(dev_priv); @@ -1937,10 +1914,10 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) return ret; } -static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, +static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, + u32 hotplug_trigger, const u32 hpd[HPD_NUM_PINS]) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; /* @@ -1966,16 +1943,15 @@ static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, dig_hotplug_reg, hpd, pch_port_hotplug_long_detect); - intel_hpd_irq_handler(dev, pin_mask, long_mask); + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } -static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) +static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { - struct drm_i915_private *dev_priv = dev->dev_private; int pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; - ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); + ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); if (pch_iir & SDE_AUDIO_POWER_MASK) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> @@ -1985,10 +1961,10 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) } if (pch_iir & SDE_AUX_MASK) - dp_aux_irq_handler(dev); + dp_aux_irq_handler(dev_priv); if (pch_iir & SDE_GMBUS) - gmbus_irq_handler(dev); + gmbus_irq_handler(dev_priv); if (pch_iir & SDE_AUDIO_HDCP_MASK) DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); @@ -2018,9 +1994,8 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); } -static void ivb_err_int_handler(struct drm_device *dev) +static void ivb_err_int_handler(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 err_int = I915_READ(GEN7_ERR_INT); enum pipe pipe; @@ -2032,19 +2007,18 @@ static void ivb_err_int_handler(struct drm_device *dev) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { - if (IS_IVYBRIDGE(dev)) - ivb_pipe_crc_irq_handler(dev, pipe); + if (IS_IVYBRIDGE(dev_priv)) + ivb_pipe_crc_irq_handler(dev_priv, pipe); else - hsw_pipe_crc_irq_handler(dev, pipe); + hsw_pipe_crc_irq_handler(dev_priv, pipe); } } I915_WRITE(GEN7_ERR_INT, err_int); } -static void cpt_serr_int_handler(struct drm_device *dev) +static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 serr_int = I915_READ(SERR_INT); if (serr_int & SERR_INT_POISON) @@ -2062,13 +2036,12 @@ static void cpt_serr_int_handler(struct drm_device *dev) I915_WRITE(SERR_INT, serr_int); } -static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) +static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { - struct drm_i915_private *dev_priv = dev->dev_private; int pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; - ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); + ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> @@ -2078,10 +2051,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) } if (pch_iir & SDE_AUX_MASK_CPT) - dp_aux_irq_handler(dev); + dp_aux_irq_handler(dev_priv); if (pch_iir & SDE_GMBUS_CPT) - gmbus_irq_handler(dev); + gmbus_irq_handler(dev_priv); if (pch_iir & SDE_AUDIO_CP_REQ_CPT) DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); @@ -2096,12 +2069,11 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) I915_READ(FDI_RX_IIR(pipe))); if (pch_iir & SDE_ERROR_CPT) - cpt_serr_int_handler(dev); + cpt_serr_int_handler(dev_priv); } -static void spt_irq_handler(struct drm_device *dev, u32 pch_iir) +static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & ~SDE_PORTE_HOTPLUG_SPT; u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; @@ -2130,16 +2102,16 @@ static void spt_irq_handler(struct drm_device *dev, u32 pch_iir) } if (pin_mask) - intel_hpd_irq_handler(dev, pin_mask, long_mask); + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); if (pch_iir & SDE_GMBUS_CPT) - gmbus_irq_handler(dev); + gmbus_irq_handler(dev_priv); } -static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, +static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, + u32 hotplug_trigger, const u32 hpd[HPD_NUM_PINS]) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); @@ -2149,97 +2121,93 @@ static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, dig_hotplug_reg, hpd, ilk_port_hotplug_long_detect); - intel_hpd_irq_handler(dev, pin_mask, long_mask); + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } -static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) +static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, + u32 de_iir) { - struct drm_i915_private *dev_priv = dev->dev_private; enum pipe pipe; u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; if (hotplug_trigger) - ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk); + ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); if (de_iir & DE_AUX_CHANNEL_A) - dp_aux_irq_handler(dev); + dp_aux_irq_handler(dev_priv); if (de_iir & DE_GSE) - intel_opregion_asle_intr(dev); + intel_opregion_asle_intr(dev_priv); if (de_iir & DE_POISON) DRM_ERROR("Poison interrupt\n"); for_each_pipe(dev_priv, pipe) { if (de_iir & DE_PIPE_VBLANK(pipe) && - intel_pipe_handle_vblank(dev, pipe)) - intel_check_page_flip(dev, pipe); + intel_pipe_handle_vblank(dev_priv, pipe)) + intel_check_page_flip(dev_priv, pipe); if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); if (de_iir & DE_PIPE_CRC_DONE(pipe)) - i9xx_pipe_crc_irq_handler(dev, pipe); + i9xx_pipe_crc_irq_handler(dev_priv, pipe); /* plane/pipes map 1:1 on ilk+ */ - if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip_plane(dev, pipe); - } + if (de_iir & DE_PLANE_FLIP_DONE(pipe)) + intel_finish_page_flip_cs(dev_priv, pipe); } /* check event from PCH */ if (de_iir & DE_PCH_EVENT) { u32 pch_iir = I915_READ(SDEIIR); - if (HAS_PCH_CPT(dev)) - cpt_irq_handler(dev, pch_iir); + if (HAS_PCH_CPT(dev_priv)) + cpt_irq_handler(dev_priv, pch_iir); else - ibx_irq_handler(dev, pch_iir); + ibx_irq_handler(dev_priv, pch_iir); /* should clear PCH hotplug event before clear CPU irq */ I915_WRITE(SDEIIR, pch_iir); } - if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) - ironlake_rps_change_irq_handler(dev); + if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) + ironlake_rps_change_irq_handler(dev_priv); } -static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) +static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, + u32 de_iir) { - struct drm_i915_private *dev_priv = dev->dev_private; enum pipe pipe; u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; if (hotplug_trigger) - ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb); + ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); if (de_iir & DE_ERR_INT_IVB) - ivb_err_int_handler(dev); + ivb_err_int_handler(dev_priv); if (de_iir & DE_AUX_CHANNEL_A_IVB) - dp_aux_irq_handler(dev); + dp_aux_irq_handler(dev_priv); if (de_iir & DE_GSE_IVB) - intel_opregion_asle_intr(dev); + intel_opregion_asle_intr(dev_priv); for_each_pipe(dev_priv, pipe) { if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && - intel_pipe_handle_vblank(dev, pipe)) - intel_check_page_flip(dev, pipe); + intel_pipe_handle_vblank(dev_priv, pipe)) + intel_check_page_flip(dev_priv, pipe); /* plane/pipes map 1:1 on ilk+ */ - if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip_plane(dev, pipe); - } + if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) + intel_finish_page_flip_cs(dev_priv, pipe); } /* check event from PCH */ - if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { + if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { u32 pch_iir = I915_READ(SDEIIR); - cpt_irq_handler(dev, pch_iir); + cpt_irq_handler(dev_priv, pch_iir); /* clear PCH hotplug event before clear CPU irq */ I915_WRITE(SDEIIR, pch_iir); @@ -2277,7 +2245,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) * able to process them after we restore SDEIER (as soon as we restore * it, we'll get an interrupt if SDEIIR still has something to process * due to its back queue). */ - if (!HAS_PCH_NOP(dev)) { + if (!HAS_PCH_NOP(dev_priv)) { sde_ier = I915_READ(SDEIER); I915_WRITE(SDEIER, 0); POSTING_READ(SDEIER); @@ -2289,7 +2257,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) if (gt_iir) { I915_WRITE(GTIIR, gt_iir); ret = IRQ_HANDLED; - if (INTEL_INFO(dev)->gen >= 6) + if (INTEL_GEN(dev_priv) >= 6) snb_gt_irq_handler(dev_priv, gt_iir); else ilk_gt_irq_handler(dev_priv, gt_iir); @@ -2299,13 +2267,13 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) if (de_iir) { I915_WRITE(DEIIR, de_iir); ret = IRQ_HANDLED; - if (INTEL_INFO(dev)->gen >= 7) - ivb_display_irq_handler(dev, de_iir); + if (INTEL_GEN(dev_priv) >= 7) + ivb_display_irq_handler(dev_priv, de_iir); else - ilk_display_irq_handler(dev, de_iir); + ilk_display_irq_handler(dev_priv, de_iir); } - if (INTEL_INFO(dev)->gen >= 6) { + if (INTEL_GEN(dev_priv) >= 6) { u32 pm_iir = I915_READ(GEN6_PMIIR); if (pm_iir) { I915_WRITE(GEN6_PMIIR, pm_iir); @@ -2316,7 +2284,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) I915_WRITE(DEIER, de_ier); POSTING_READ(DEIER); - if (!HAS_PCH_NOP(dev)) { + if (!HAS_PCH_NOP(dev_priv)) { I915_WRITE(SDEIER, sde_ier); POSTING_READ(SDEIER); } @@ -2327,10 +2295,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) return ret; } -static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, +static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, + u32 hotplug_trigger, const u32 hpd[HPD_NUM_PINS]) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); @@ -2340,13 +2308,12 @@ static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, dig_hotplug_reg, hpd, bxt_port_hotplug_long_detect); - intel_hpd_irq_handler(dev, pin_mask, long_mask); + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } static irqreturn_t gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) { - struct drm_device *dev = dev_priv->dev; irqreturn_t ret = IRQ_NONE; u32 iir; enum pipe pipe; @@ -2357,7 +2324,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) I915_WRITE(GEN8_DE_MISC_IIR, iir); ret = IRQ_HANDLED; if (iir & GEN8_DE_MISC_GSE) - intel_opregion_asle_intr(dev); + intel_opregion_asle_intr(dev_priv); else DRM_ERROR("Unexpected DE Misc interrupt\n"); } @@ -2381,26 +2348,28 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) GEN9_AUX_CHANNEL_D; if (iir & tmp_mask) { - dp_aux_irq_handler(dev); + dp_aux_irq_handler(dev_priv); found = true; } if (IS_BROXTON(dev_priv)) { tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; if (tmp_mask) { - bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt); + bxt_hpd_irq_handler(dev_priv, tmp_mask, + hpd_bxt); found = true; } } else if (IS_BROADWELL(dev_priv)) { tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; if (tmp_mask) { - ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw); + ilk_hpd_irq_handler(dev_priv, + tmp_mask, hpd_bdw); found = true; } } - if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) { - gmbus_irq_handler(dev); + if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { + gmbus_irq_handler(dev_priv); found = true; } @@ -2427,8 +2396,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); if (iir & GEN8_PIPE_VBLANK && - intel_pipe_handle_vblank(dev, pipe)) - intel_check_page_flip(dev, pipe); + intel_pipe_handle_vblank(dev_priv, pipe)) + intel_check_page_flip(dev_priv, pipe); flip_done = iir; if (INTEL_INFO(dev_priv)->gen >= 9) @@ -2436,13 +2405,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) else flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; - if (flip_done) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip_plane(dev, pipe); - } + if (flip_done) + intel_finish_page_flip_cs(dev_priv, pipe); if (iir & GEN8_PIPE_CDCLK_CRC_DONE) - hsw_pipe_crc_irq_handler(dev, pipe); + hsw_pipe_crc_irq_handler(dev_priv, pipe); if (iir & GEN8_PIPE_FIFO_UNDERRUN) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); @@ -2459,7 +2426,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) fault_errors); } - if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) && + if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && master_ctl & GEN8_DE_PCH_IRQ) { /* * FIXME(BDW): Assume for now that the new interrupt handling @@ -2472,9 +2439,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) ret = IRQ_HANDLED; if (HAS_PCH_SPT(dev_priv)) - spt_irq_handler(dev, iir); + spt_irq_handler(dev_priv, iir); else - cpt_irq_handler(dev, iir); + cpt_irq_handler(dev_priv, iir); } else { /* * Like on previous PCH there seems to be something @@ -2550,20 +2517,20 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv, /** * i915_reset_and_wakeup - do process context error handling work - * @dev: drm device + * @dev_priv: i915 device private * * Fire an error uevent so userspace can see that a hang or error * was detected. */ -static void i915_reset_and_wakeup(struct drm_device *dev) +static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj; char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; int ret; - kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); + kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); /* * Note that there's only one work item which does gpu resets, so we @@ -2577,8 +2544,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev) */ if (i915_reset_in_progress(&dev_priv->gpu_error)) { DRM_DEBUG_DRIVER("resetting chip\n"); - kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, - reset_event); + kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); /* * In most cases it's guaranteed that we get here with an RPM @@ -2589,7 +2555,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev) */ intel_runtime_pm_get(dev_priv); - intel_prepare_reset(dev); + intel_prepare_reset(dev_priv); /* * All state reset _must_ be completed before we update the @@ -2597,14 +2563,14 @@ static void i915_reset_and_wakeup(struct drm_device *dev) * pending state and not properly drop locks, resulting in * deadlocks with the reset work. */ - ret = i915_reset(dev); + ret = i915_reset(dev_priv); - intel_finish_reset(dev); + intel_finish_reset(dev_priv); intel_runtime_pm_put(dev_priv); if (ret == 0) - kobject_uevent_env(&dev->primary->kdev->kobj, + kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); /* @@ -2615,9 +2581,8 @@ static void i915_reset_and_wakeup(struct drm_device *dev) } } -static void i915_report_and_clear_eir(struct drm_device *dev) +static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; uint32_t instdone[I915_NUM_INSTDONE_REG]; u32 eir = I915_READ(EIR); int pipe, i; @@ -2627,9 +2592,9 @@ static void i915_report_and_clear_eir(struct drm_device *dev) pr_err("render error detected, EIR: 0x%08x\n", eir); - i915_get_extra_instdone(dev, instdone); + i915_get_extra_instdone(dev_priv, instdone); - if (IS_G4X(dev)) { + if (IS_G4X(dev_priv)) { if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { u32 ipeir = I915_READ(IPEIR_I965); @@ -2651,7 +2616,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) } } - if (!IS_GEN2(dev)) { + if (!IS_GEN2(dev_priv)) { if (eir & I915_ERROR_PAGE_TABLE) { u32 pgtbl_err = I915_READ(PGTBL_ER); pr_err("page table error\n"); @@ -2673,7 +2638,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); for (i = 0; i < ARRAY_SIZE(instdone); i++) pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); - if (INTEL_INFO(dev)->gen < 4) { + if (INTEL_GEN(dev_priv) < 4) { u32 ipeir = I915_READ(IPEIR); pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); @@ -2709,18 +2674,19 @@ static void i915_report_and_clear_eir(struct drm_device *dev) /** * i915_handle_error - handle a gpu error - * @dev: drm device + * @dev_priv: i915 device private * @engine_mask: mask representing engines that are hung * Do some basic checking of register state at error time and * dump it to the syslog. Also call i915_capture_error_state() to make * sure we get a record and make it available in debugfs. Fire a uevent * so userspace knows something bad happened (should trigger collection * of a ring dump etc.). + * @fmt: Error message format string */ -void i915_handle_error(struct drm_device *dev, u32 engine_mask, +void i915_handle_error(struct drm_i915_private *dev_priv, + u32 engine_mask, const char *fmt, ...) { - struct drm_i915_private *dev_priv = dev->dev_private; va_list args; char error_msg[80]; @@ -2728,8 +2694,8 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask, vscnprintf(error_msg, sizeof(error_msg), fmt, args); va_end(args); - i915_capture_error_state(dev, engine_mask, error_msg); - i915_report_and_clear_eir(dev); + i915_capture_error_state(dev_priv, engine_mask, error_msg); + i915_report_and_clear_eir(dev_priv); if (engine_mask) { atomic_or(I915_RESET_IN_PROGRESS_FLAG, @@ -2751,7 +2717,7 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask, i915_error_wake_up(dev_priv, false); } - i915_reset_and_wakeup(dev); + i915_reset_and_wakeup(dev_priv); } /* Called from drm generic code, passed 'crtc' which @@ -2869,9 +2835,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno) } static bool -ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) +ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr) { - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { return (ipehr >> 23) == 0x1c; } else { ipehr &= ~MI_SEMAPHORE_SYNC_MASK; @@ -2884,10 +2850,10 @@ static struct intel_engine_cs * semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, u64 offset) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct intel_engine_cs *signaller; - if (INTEL_INFO(dev_priv)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { for_each_engine(signaller, dev_priv) { if (engine == signaller) continue; @@ -2916,7 +2882,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, static struct intel_engine_cs * semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u32 cmd, ipehr, head; u64 offset = 0; int i, backwards; @@ -2942,7 +2908,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) return NULL; ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); - if (!ipehr_is_semaphore_wait(engine->dev, ipehr)) + if (!ipehr_is_semaphore_wait(engine->i915, ipehr)) return NULL; /* @@ -2954,7 +2920,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) * ringbuffer itself. */ head = I915_READ_HEAD(engine) & HEAD_ADDR; - backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4; + backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4; for (i = backwards; i; --i) { /* @@ -2976,7 +2942,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) return NULL; *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1; - if (INTEL_INFO(engine->dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { offset = ioread32(engine->buffer->virtual_start + head + 12); offset <<= 32; offset = ioread32(engine->buffer->virtual_start + head + 8); @@ -2986,7 +2952,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) static int semaphore_passed(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct intel_engine_cs *signaller; u32 seqno; @@ -3028,7 +2994,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine) if (engine->id != RCS) return true; - i915_get_extra_instdone(engine->dev, instdone); + i915_get_extra_instdone(engine->i915, instdone); /* There might be unstable subunit states even when * actual head is not moving. Filter out the unstable ones by @@ -3069,8 +3035,7 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd) static enum intel_ring_hangcheck_action ring_stuck(struct intel_engine_cs *engine, u64 acthd) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; enum intel_ring_hangcheck_action ha; u32 tmp; @@ -3078,7 +3043,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd) if (ha != HANGCHECK_HUNG) return ha; - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) return HANGCHECK_HUNG; /* Is the chip hanging on a WAIT_FOR_EVENT? @@ -3088,19 +3053,19 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd) */ tmp = I915_READ_CTL(engine); if (tmp & RING_WAIT) { - i915_handle_error(dev, 0, + i915_handle_error(dev_priv, 0, "Kicking stuck wait on %s", engine->name); I915_WRITE_CTL(engine, tmp); return HANGCHECK_KICK; } - if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { + if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) { switch (semaphore_passed(engine)) { default: return HANGCHECK_HUNG; case 1: - i915_handle_error(dev, 0, + i915_handle_error(dev_priv, 0, "Kicking stuck semaphore on %s", engine->name); I915_WRITE_CTL(engine, tmp); @@ -3115,7 +3080,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd) static unsigned kick_waiters(struct intel_engine_cs *engine) { - struct drm_i915_private *i915 = to_i915(engine->dev); + struct drm_i915_private *i915 = engine->i915; unsigned user_interrupts = READ_ONCE(engine->user_interrupts); if (engine->hangcheck.user_interrupts == user_interrupts && @@ -3144,7 +3109,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work) struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), gpu_error.hangcheck_work.work); - struct drm_device *dev = dev_priv->dev; struct intel_engine_cs *engine; enum intel_engine_id id; int busy_count = 0, rings_hung = 0; @@ -3272,22 +3236,22 @@ static void i915_hangcheck_elapsed(struct work_struct *work) } if (rings_hung) { - i915_handle_error(dev, rings_hung, "Engine(s) hung"); + i915_handle_error(dev_priv, rings_hung, "Engine(s) hung"); goto out; } if (busy_count) /* Reset timer case chip hangs without another request * being added */ - i915_queue_hangcheck(dev); + i915_queue_hangcheck(dev_priv); out: ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); } -void i915_queue_hangcheck(struct drm_device *dev) +void i915_queue_hangcheck(struct drm_i915_private *dev_priv) { - struct i915_gpu_error *e = &to_i915(dev)->gpu_error; + struct i915_gpu_error *e = &dev_priv->gpu_error; if (!i915.enable_hangcheck) return; @@ -3500,31 +3464,29 @@ static void cherryview_irq_preinstall(struct drm_device *dev) spin_unlock_irq(&dev_priv->irq_lock); } -static u32 intel_hpd_enabled_irqs(struct drm_device *dev, +static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, const u32 hpd[HPD_NUM_PINS]) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; u32 enabled_irqs = 0; - for_each_intel_encoder(dev, encoder) + for_each_intel_encoder(dev_priv->dev, encoder) if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) enabled_irqs |= hpd[encoder->hpd_pin]; return enabled_irqs; } -static void ibx_hpd_irq_setup(struct drm_device *dev) +static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_irqs, hotplug, enabled_irqs; - if (HAS_PCH_IBX(dev)) { + if (HAS_PCH_IBX(dev_priv)) { hotplug_irqs = SDE_HOTPLUG_MASK; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); } else { hotplug_irqs = SDE_HOTPLUG_MASK_CPT; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); } ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); @@ -3543,18 +3505,17 @@ static void ibx_hpd_irq_setup(struct drm_device *dev) * When CPU and PCH are on the same package, port A * HPD must be enabled in both north and south. */ - if (HAS_PCH_LPT_LP(dev)) + if (HAS_PCH_LPT_LP(dev_priv)) hotplug |= PORTA_HOTPLUG_ENABLE; I915_WRITE(PCH_PORT_HOTPLUG, hotplug); } -static void spt_hpd_irq_setup(struct drm_device *dev) +static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_irqs, hotplug, enabled_irqs; hotplug_irqs = SDE_HOTPLUG_MASK_SPT; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); @@ -3569,24 +3530,23 @@ static void spt_hpd_irq_setup(struct drm_device *dev) I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); } -static void ilk_hpd_irq_setup(struct drm_device *dev) +static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_irqs, hotplug, enabled_irqs; - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); - } else if (INTEL_INFO(dev)->gen >= 7) { + } else if (INTEL_GEN(dev_priv) >= 7) { hotplug_irqs = DE_DP_A_HOTPLUG_IVB; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); } else { hotplug_irqs = DE_DP_A_HOTPLUG; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); } @@ -3601,15 +3561,14 @@ static void ilk_hpd_irq_setup(struct drm_device *dev) hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); - ibx_hpd_irq_setup(dev); + ibx_hpd_irq_setup(dev_priv); } -static void bxt_hpd_irq_setup(struct drm_device *dev) +static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_irqs, hotplug, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); @@ -3827,6 +3786,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) uint32_t de_pipe_enables; u32 de_port_masked = GEN8_AUX_CHANNEL_A; u32 de_port_enables; + u32 de_misc_masked = GEN8_DE_MISC_GSE; enum pipe pipe; if (INTEL_INFO(dev_priv)->gen >= 9) { @@ -3862,6 +3822,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) de_pipe_enables); GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); + GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); } static int gen8_irq_postinstall(struct drm_device *dev) @@ -4006,13 +3967,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev) /* * Returns true when a page flip has completed. */ -static bool i8xx_handle_vblank(struct drm_device *dev, +static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv, int plane, int pipe, u32 iir) { - struct drm_i915_private *dev_priv = dev->dev_private; u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); - if (!intel_pipe_handle_vblank(dev, pipe)) + if (!intel_pipe_handle_vblank(dev_priv, pipe)) return false; if ((iir & flip_pending) == 0) @@ -4027,12 +3987,11 @@ static bool i8xx_handle_vblank(struct drm_device *dev, if (I915_READ16(ISR) & flip_pending) goto check_page_flip; - intel_prepare_page_flip(dev, plane); - intel_finish_page_flip(dev, pipe); + intel_finish_page_flip_cs(dev_priv, pipe); return true; check_page_flip: - intel_check_page_flip(dev, pipe); + intel_check_page_flip(dev_priv, pipe); return false; } @@ -4089,15 +4048,15 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) for_each_pipe(dev_priv, pipe) { int plane = pipe; - if (HAS_FBC(dev)) + if (HAS_FBC(dev_priv)) plane = !plane; if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && - i8xx_handle_vblank(dev, plane, pipe, iir)) + i8xx_handle_vblank(dev_priv, plane, pipe, iir)) flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev, pipe); + i9xx_pipe_crc_irq_handler(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(dev_priv, @@ -4182,7 +4141,7 @@ static int i915_irq_postinstall(struct drm_device *dev) I915_WRITE(IER, enable_mask); POSTING_READ(IER); - i915_enable_asle_pipestat(dev); + i915_enable_asle_pipestat(dev_priv); /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ @@ -4197,13 +4156,12 @@ static int i915_irq_postinstall(struct drm_device *dev) /* * Returns true when a page flip has completed. */ -static bool i915_handle_vblank(struct drm_device *dev, +static bool i915_handle_vblank(struct drm_i915_private *dev_priv, int plane, int pipe, u32 iir) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); - if (!intel_pipe_handle_vblank(dev, pipe)) + if (!intel_pipe_handle_vblank(dev_priv, pipe)) return false; if ((iir & flip_pending) == 0) @@ -4218,12 +4176,11 @@ static bool i915_handle_vblank(struct drm_device *dev, if (I915_READ(ISR) & flip_pending) goto check_page_flip; - intel_prepare_page_flip(dev, plane); - intel_finish_page_flip(dev, pipe); + intel_finish_page_flip_cs(dev_priv, pipe); return true; check_page_flip: - intel_check_page_flip(dev, pipe); + intel_check_page_flip(dev_priv, pipe); return false; } @@ -4273,11 +4230,11 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) break; /* Consume port. Then clear IIR or we'll miss events */ - if (I915_HAS_HOTPLUG(dev) && + if (I915_HAS_HOTPLUG(dev_priv) && iir & I915_DISPLAY_PORT_INTERRUPT) { u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); if (hotplug_status) - i9xx_hpd_irq_handler(dev, hotplug_status); + i9xx_hpd_irq_handler(dev_priv, hotplug_status); } I915_WRITE(IIR, iir & ~flip_mask); @@ -4288,18 +4245,18 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) for_each_pipe(dev_priv, pipe) { int plane = pipe; - if (HAS_FBC(dev)) + if (HAS_FBC(dev_priv)) plane = !plane; if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && - i915_handle_vblank(dev, plane, pipe, iir)) + i915_handle_vblank(dev_priv, plane, pipe, iir)) flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev, pipe); + i9xx_pipe_crc_irq_handler(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(dev_priv, @@ -4307,7 +4264,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) } if (blc_event || (iir & I915_ASLE_INTERRUPT)) - intel_opregion_asle_intr(dev); + intel_opregion_asle_intr(dev_priv); /* With MSI, interrupts are only generated when iir * transitions from zero to nonzero. If another bit got @@ -4391,7 +4348,7 @@ static int i965_irq_postinstall(struct drm_device *dev) I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); enable_mask |= I915_USER_INTERRUPT; - if (IS_G4X(dev)) + if (IS_G4X(dev_priv)) enable_mask |= I915_BSD_USER_INTERRUPT; /* Interrupt setup is already guaranteed to be single-threaded, this is @@ -4406,7 +4363,7 @@ static int i965_irq_postinstall(struct drm_device *dev) * Enable some error detection, note the instruction error mask * bit is reserved, so we leave it masked. */ - if (IS_G4X(dev)) { + if (IS_G4X(dev_priv)) { error_mask = ~(GM45_ERROR_PAGE_TABLE | GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV | @@ -4424,26 +4381,25 @@ static int i965_irq_postinstall(struct drm_device *dev) i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); POSTING_READ(PORT_HOTPLUG_EN); - i915_enable_asle_pipestat(dev); + i915_enable_asle_pipestat(dev_priv); return 0; } -static void i915_hpd_irq_setup(struct drm_device *dev) +static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_en; assert_spin_locked(&dev_priv->irq_lock); /* Note HDMI and DP share hotplug bits */ /* enable bits are the same for all generations */ - hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915); + hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); /* Programming the CRT detection parameters tends to generate a spurious hotplug event about three seconds later. So just do it once. */ - if (IS_G4X(dev)) + if (IS_G4X(dev_priv)) hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; @@ -4510,7 +4466,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) if (iir & I915_DISPLAY_PORT_INTERRUPT) { u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); if (hotplug_status) - i9xx_hpd_irq_handler(dev, hotplug_status); + i9xx_hpd_irq_handler(dev_priv, hotplug_status); } I915_WRITE(IIR, iir & ~flip_mask); @@ -4523,24 +4479,24 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) for_each_pipe(dev_priv, pipe) { if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && - i915_handle_vblank(dev, pipe, pipe, iir)) + i915_handle_vblank(dev_priv, pipe, pipe, iir)) flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev, pipe); + i9xx_pipe_crc_irq_handler(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); } if (blc_event || (iir & I915_ASLE_INTERRUPT)) - intel_opregion_asle_intr(dev); + intel_opregion_asle_intr(dev_priv); if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) - gmbus_irq_handler(dev); + gmbus_irq_handler(dev_priv); /* With MSI, interrupts are only generated when iir * transitions from zero to nonzero. If another bit got @@ -4611,6 +4567,20 @@ void intel_irq_init(struct drm_i915_private *dev_priv) else dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; + dev_priv->rps.pm_intr_keep = 0; + + /* + * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer + * if GEN6_PM_UP_EI_EXPIRED is masked. + * + * TODO: verify if this can be reproduced on VLV,CHV. + */ + if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) + dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED; + + if (INTEL_INFO(dev_priv)->gen >= 8) + dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP; + INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, i915_hangcheck_elapsed); @@ -4674,12 +4644,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev->driver->disable_vblank = ironlake_disable_vblank; dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; } else { - if (INTEL_INFO(dev_priv)->gen == 2) { + if (IS_GEN2(dev_priv)) { dev->driver->irq_preinstall = i8xx_irq_preinstall; dev->driver->irq_postinstall = i8xx_irq_postinstall; dev->driver->irq_handler = i8xx_irq_handler; dev->driver->irq_uninstall = i8xx_irq_uninstall; - } else if (INTEL_INFO(dev_priv)->gen == 3) { + } else if (IS_GEN3(dev_priv)) { dev->driver->irq_preinstall = i915_irq_preinstall; dev->driver->irq_postinstall = i915_irq_postinstall; dev->driver->irq_uninstall = i915_irq_uninstall; diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 1779f02e6df8..7effe68d552c 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -54,10 +54,13 @@ struct i915_params i915 __read_mostly = { .verbose_state_checks = 1, .nuclear_pageflip = 0, .edp_vswing = 0, - .enable_guc_submission = false, + .enable_guc_loading = -1, + .enable_guc_submission = -1, .guc_log_level = -1, .enable_dp_mst = true, .inject_load_failure = 0, + .enable_dpcd_backlight = false, + .enable_gvt = false, }; module_param_named(modeset, i915.modeset, int, 0400); @@ -197,8 +200,15 @@ MODULE_PARM_DESC(edp_vswing, "(0=use value from vbt [default], 1=low power swing(200mV)," "2=default swing(400mV))"); -module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, bool, 0400); -MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)"); +module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400); +MODULE_PARM_DESC(enable_guc_loading, + "Enable GuC firmware loading " + "(-1=auto [default], 0=never, 1=if available, 2=required)"); + +module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400); +MODULE_PARM_DESC(enable_guc_submission, + "Enable GuC submission " + "(-1=auto [default], 0=never, 1=if available, 2=required)"); module_param_named(guc_log_level, i915.guc_log_level, int, 0400); MODULE_PARM_DESC(guc_log_level, @@ -210,3 +220,10 @@ MODULE_PARM_DESC(enable_dp_mst, module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400); MODULE_PARM_DESC(inject_load_failure, "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); +module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600); +MODULE_PARM_DESC(enable_dpcd_backlight, + "Enable support for DPCD backlight control (default:false)"); + +module_param_named(enable_gvt, i915.enable_gvt, bool, 0600); +MODULE_PARM_DESC(enable_gvt, + "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 02bc27804291..0ad020b4a925 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -45,6 +45,8 @@ struct i915_params { int enable_ips; int invert_brightness; int enable_cmd_parser; + int enable_guc_loading; + int enable_guc_submission; int guc_log_level; int use_mmio_flip; int mmio_debug; @@ -57,10 +59,11 @@ struct i915_params { bool load_detect_test; bool reset; bool disable_display; - bool enable_guc_submission; bool verbose_state_checks; bool nuclear_pageflip; bool enable_dp_mst; + bool enable_dpcd_backlight; + bool enable_gvt; }; extern struct i915_params i915 __read_mostly; diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h new file mode 100644 index 000000000000..c0cb2974caac --- /dev/null +++ b/drivers/gpu/drm/i915/i915_pvinfo.h @@ -0,0 +1,113 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _I915_PVINFO_H_ +#define _I915_PVINFO_H_ + +/* The MMIO offset of the shared info between guest and host emulator */ +#define VGT_PVINFO_PAGE 0x78000 +#define VGT_PVINFO_SIZE 0x1000 + +/* + * The following structure pages are defined in GEN MMIO space + * for virtualization. (One page for now) + */ +#define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */ +#define VGT_VERSION_MAJOR 1 +#define VGT_VERSION_MINOR 0 + +#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor)) +#define INTEL_VGT_IF_VERSION \ + INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR) + +/* + * notifications from guest to vgpu device model + */ +enum vgt_g2v_type { + VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2, + VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY, + VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE, + VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY, + VGT_G2V_EXECLIST_CONTEXT_CREATE, + VGT_G2V_EXECLIST_CONTEXT_DESTROY, + VGT_G2V_MAX, +}; + +struct vgt_if { + u64 magic; /* VGT_MAGIC */ + uint16_t version_major; + uint16_t version_minor; + u32 vgt_id; /* ID of vGT instance */ + u32 rsv1[12]; /* pad to offset 0x40 */ + /* + * Data structure to describe the balooning info of resources. + * Each VM can only have one portion of continuous area for now. + * (May support scattered resource in future) + * (starting from offset 0x40) + */ + struct { + /* Aperture register balooning */ + struct { + u32 base; + u32 size; + } mappable_gmadr; /* aperture */ + /* GMADR register balooning */ + struct { + u32 base; + u32 size; + } nonmappable_gmadr; /* non aperture */ + /* allowed fence registers */ + u32 fence_num; + u32 rsv2[3]; + } avail_rs; /* available/assigned resource */ + u32 rsv3[0x200 - 24]; /* pad to half page */ + /* + * The bottom half page is for response from Gfx driver to hypervisor. + */ + u32 rsv4; + u32 display_ready; /* ready for display owner switch */ + + u32 rsv5[4]; + + u32 g2v_notify; + u32 rsv6[7]; + + struct { + u32 lo; + u32 hi; + } pdp[4]; + + u32 execlist_context_descriptor_lo; + u32 execlist_context_descriptor_hi; + + u32 rsv7[0x200 - 24]; /* pad to one page */ +} __packed; + +#define vgtif_reg(x) \ + _MMIO((VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))) + +/* vGPU display status to be used by the host side */ +#define VGT_DRV_DISPLAY_NOT_READY 0 +#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */ + +#endif /* _I915_PVINFO_H_ */ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index b407411e31ba..c6bfbf8d7cca 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -220,6 +220,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define ECOCHK_PPGTT_WT_HSW (0x2<<3) #define ECOCHK_PPGTT_WB_HSW (0x3<<3) +#define GEN8_CONFIG0 _MMIO(0xD00) +#define GEN9_DEFAULT_FIXES (1 << 3 | 1 << 2 | 1 << 1) + #define GAC_ECO_BITS _MMIO(0x14090) #define ECOBITS_SNB_BIT (1<<13) #define ECOBITS_PPGTT_CACHE64B (3<<8) @@ -442,6 +445,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) */ #define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) +#define GEN9_MEDIA_POOL_STATE ((0x3 << 29) | (0x2 << 27) | (0x5 << 16) | 4) +#define GEN9_MEDIA_POOL_ENABLE (1 << 31) #define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) #define SC_UPDATE_SCISSOR (0x1<<1) @@ -713,6 +718,9 @@ enum skl_disp_power_wells { /* Not actual bit groups. Used as IDs for lookup_power_well() */ SKL_DISP_PW_ALWAYS_ON, SKL_DISP_PW_DC_OFF, + + BXT_DPIO_CMN_A, + BXT_DPIO_CMN_BC, }; #define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2)) @@ -886,7 +894,7 @@ enum skl_disp_power_wells { * PLLs can be routed to any transcoder A/B/C. * * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is - * digital port D (CHV) or port A (BXT). + * digital port D (CHV) or port A (BXT). :: * * * Dual channel PHY (VLV/CHV/BXT) @@ -1273,6 +1281,15 @@ enum skl_disp_power_wells { #define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090) #define GT_DISPLAY_POWER_ON(phy) (1 << (phy)) +#define _BXT_PHY_CTL_DDI_A 0x64C00 +#define _BXT_PHY_CTL_DDI_B 0x64C10 +#define _BXT_PHY_CTL_DDI_C 0x64C20 +#define BXT_PHY_CMNLANE_POWERDOWN_ACK (1 << 10) +#define BXT_PHY_LANE_POWERDOWN_ACK (1 << 9) +#define BXT_PHY_LANE_ENABLED (1 << 8) +#define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \ + _BXT_PHY_CTL_DDI_B) + #define _PHY_CTL_FAMILY_EDP 0x64C80 #define _PHY_CTL_FAMILY_DDI 0x64C90 #define COMMON_RESET_DIS (1 << 31) @@ -1669,6 +1686,9 @@ enum skl_disp_power_wells { #define GEN7_TLB_RD_ADDR _MMIO(0x4700) +#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) +#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28) + #if 0 #define PRB0_TAIL _MMIO(0x2030) #define PRB0_HEAD _MMIO(0x2034) @@ -1804,6 +1824,10 @@ enum skl_disp_power_wells { #define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2)) #define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2)) +/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */ +#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4) +#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2) + /* WaClearTdlStateAckDirtyBits */ #define GEN8_STATE_ACK _MMIO(0x20F0) #define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8) @@ -2161,6 +2185,9 @@ enum skl_disp_power_wells { #define FBC_LL_SIZE (1536) +#define FBC_LLC_READ_CTRL _MMIO(0x9044) +#define FBC_LLC_FULLY_OPEN (1<<30) + /* Framebuffer compression for GM45+ */ #define DPFC_CB_BASE _MMIO(0x3200) #define DPFC_CONTROL _MMIO(0x3208) @@ -2200,6 +2227,8 @@ enum skl_disp_power_wells { #define ILK_DPFC_STATUS _MMIO(0x43210) #define ILK_DPFC_FENCE_YOFF _MMIO(0x43218) #define ILK_DPFC_CHICKEN _MMIO(0x43224) +#define ILK_DPFC_DISABLE_DUMMY0 (1<<8) +#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23) #define ILK_FBC_RT_BASE _MMIO(0x2128) #define ILK_FBC_RT_VALID (1<<0) #define SNB_FBC_FRONT_BUFFER (1<<1) @@ -2449,6 +2478,8 @@ enum skl_disp_power_wells { #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 +#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024) + #define _FPA0 0x6040 #define _FPA1 0x6044 #define _FPB0 0x6048 @@ -3020,6 +3051,18 @@ enum skl_disp_power_wells { /* Same as Haswell, but 72064 bytes now. */ #define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE) +enum { + INTEL_ADVANCED_CONTEXT = 0, + INTEL_LEGACY_32B_CONTEXT, + INTEL_ADVANCED_AD_CONTEXT, + INTEL_LEGACY_64B_CONTEXT +}; + +#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3 +#define GEN8_CTX_ADDRESSING_MODE(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\ + INTEL_LEGACY_64B_CONTEXT : \ + INTEL_LEGACY_32B_CONTEXT) + #define CHV_CLK_CTL1 _MMIO(0x101100) #define VLV_CLK_CTL2 _MMIO(0x101104) #define CLK_CTL2_CZCOUNT_30NS_SHIFT 28 @@ -6031,6 +6074,10 @@ enum skl_disp_power_wells { #define CHICKEN_PAR1_1 _MMIO(0x42080) #define DPA_MASK_VBLANK_SRD (1 << 15) #define FORCE_ARB_IDLE_PLANES (1 << 14) +#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3) + +#define CHICKEN_PAR2_1 _MMIO(0x42090) +#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT (1 << 14) #define _CHICKEN_PIPESL_1_A 0x420b0 #define _CHICKEN_PIPESL_1_B 0x420b4 @@ -6039,6 +6086,7 @@ enum skl_disp_power_wells { #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) #define DISP_ARB_CTL _MMIO(0x45000) +#define DISP_FBC_MEMORY_WAKE (1<<31) #define DISP_TILE_SURFACE_SWIZZLING (1<<13) #define DISP_FBC_WM_DIS (1<<15) #define DISP_ARB_CTL2 _MMIO(0x45004) @@ -6052,6 +6100,9 @@ enum skl_disp_power_wells { #define HSW_NDE_RSTWRN_OPT _MMIO(0x46408) #define RESET_PCH_HANDSHAKE_ENABLE (1<<4) +#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) +#define MASK_WAKEMEM (1<<13) + #define SKL_DFSM _MMIO(0x51000) #define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23) #define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23) @@ -6067,8 +6118,10 @@ enum skl_disp_power_wells { #define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4) #define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8) +#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1<<10) #define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec) +#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248) #define GEN8_CS_CHICKEN1 _MMIO(0x2580) /* GEN7 chicken */ @@ -6076,6 +6129,7 @@ enum skl_disp_power_wells { # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) # define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14) #define COMMON_SLICE_CHICKEN2 _MMIO(0x7014) +# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8) # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) #define HIZ_CHICKEN _MMIO(0x7018) @@ -6089,7 +6143,14 @@ enum skl_disp_power_wells { #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 #define GEN8_L3SQCREG1 _MMIO(0xB100) -#define BDW_WA_L3SQCREG1_DEFAULT 0x784000 +/* + * Note that on CHV the following has an off-by-one error wrt. to BSpec. + * Using the formula in BSpec leads to a hang, while the formula here works + * fine and matches the formulas for all other platforms. A BSpec change + * request has been filed to clarify this. + */ +#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19) +#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14) #define GEN7_L3CNTLREG1 _MMIO(0xB01C) #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C @@ -6921,6 +6982,7 @@ enum skl_disp_power_wells { #define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3) #define GEN6_UCGCTL1 _MMIO(0x9400) +# define GEN6_GAMUNIT_CLOCK_GATE_DISABLE (1 << 22) # define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16) # define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) # define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) @@ -6937,6 +6999,7 @@ enum skl_disp_power_wells { #define GEN7_UCGCTL4 _MMIO(0x940c) #define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) +#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1<<14) #define GEN6_RCGCTL1 _MMIO(0x9410) #define GEN6_RCGCTL2 _MMIO(0x9414) @@ -7021,7 +7084,7 @@ enum skl_disp_power_wells { #define VLV_RCEDATA _MMIO(0xA0BC) #define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0) #define GEN6_PMINTRMSK _MMIO(0xA168) -#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31) +#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31) #define VLV_PWRDWNUPCTL _MMIO(0xA294) #define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4) #define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8) @@ -7557,14 +7620,15 @@ enum skl_disp_power_wells { #define CDCLK_FREQ_540 (1<<26) #define CDCLK_FREQ_337_308 (2<<26) #define CDCLK_FREQ_675_617 (3<<26) -#define CDCLK_FREQ_DECIMAL_MASK (0x7ff) - #define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22) #define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22) #define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22) #define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22) #define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22) +#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20) +#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3) #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) +#define CDCLK_FREQ_DECIMAL_MASK (0x7ff) /* LCPLL_CTL */ #define LCPLL1_CTL _MMIO(0x46010) @@ -8140,6 +8204,8 @@ enum skl_disp_power_wells { #define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c) #define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c) #define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE) +#define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9) +#define BXT_DPHY_DEFEATURE_EN (1 << 8) #define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) #define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) #define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 2d576b7ff299..02507bfc8def 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -43,7 +43,7 @@ static u32 calc_residency(struct drm_device *dev, u64 units = 128ULL, div = 100000ULL; u32 ret; - if (!intel_enable_rc6(dev)) + if (!intel_enable_rc6()) return 0; intel_runtime_pm_get(dev_priv); @@ -70,8 +70,7 @@ static u32 calc_residency(struct drm_device *dev, static ssize_t show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) { - struct drm_minor *dminor = dev_to_drm_minor(kdev); - return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev)); + return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6()); } static ssize_t @@ -204,7 +203,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj, struct drm_minor *dminor = dev_to_drm_minor(dev); struct drm_device *drm_dev = dminor->dev; struct drm_i915_private *dev_priv = drm_dev->dev_private; - struct intel_context *ctx; + struct i915_gem_context *ctx; u32 *temp = NULL; /* Just here to make handling failures easy */ int slice = (int)(uintptr_t)attr->private; int ret; @@ -397,7 +396,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, /* We still need *_set_rps to process the new max_delay and * update the interrupt limits and PMINTRMSK even though * frequency request may be unchanged. */ - intel_set_rps(dev, val); + intel_set_rps(dev_priv, val); mutex_unlock(&dev_priv->rps.hw_lock); @@ -461,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, /* We still need *_set_rps to process the new min_delay and * update the interrupt limits and PMINTRMSK even though * frequency request may be unchanged. */ - intel_set_rps(dev, val); + intel_set_rps(dev_priv, val); mutex_unlock(&dev_priv->rps.hw_lock); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index dc0def210097..6768db032f84 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to, ), TP_fast_assign( - __entry->dev = from->dev->primary->index; + __entry->dev = from->i915->dev->primary->index; __entry->sync_from = from->id; __entry->sync_to = to_req->engine->id; __entry->seqno = i915_gem_request_get_seqno(req); @@ -486,13 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch, ), TP_fast_assign( - struct intel_engine_cs *engine = - i915_gem_request_get_engine(req); - __entry->dev = engine->dev->primary->index; - __entry->ring = engine->id; - __entry->seqno = i915_gem_request_get_seqno(req); + __entry->dev = req->i915->dev->primary->index; + __entry->ring = req->engine->id; + __entry->seqno = req->seqno; __entry->flags = flags; - i915_trace_irq_get(engine, req); + i915_trace_irq_get(req->engine, req); ), TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", @@ -511,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush, ), TP_fast_assign( - __entry->dev = req->engine->dev->primary->index; + __entry->dev = req->i915->dev->primary->index; __entry->ring = req->engine->id; __entry->invalidate = invalidate; __entry->flush = flush; @@ -533,11 +531,9 @@ DECLARE_EVENT_CLASS(i915_gem_request, ), TP_fast_assign( - struct intel_engine_cs *engine = - i915_gem_request_get_engine(req); - __entry->dev = engine->dev->primary->index; - __entry->ring = engine->id; - __entry->seqno = i915_gem_request_get_seqno(req); + __entry->dev = req->i915->dev->primary->index; + __entry->ring = req->engine->id; + __entry->seqno = req->seqno; ), TP_printk("dev=%u, ring=%u, seqno=%u", @@ -560,7 +556,7 @@ TRACE_EVENT(i915_gem_request_notify, ), TP_fast_assign( - __entry->dev = engine->dev->primary->index; + __entry->dev = engine->i915->dev->primary->index; __entry->ring = engine->id; __entry->seqno = engine->get_seqno(engine); ), @@ -597,13 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin, * less desirable. */ TP_fast_assign( - struct intel_engine_cs *engine = - i915_gem_request_get_engine(req); - __entry->dev = engine->dev->primary->index; - __entry->ring = engine->id; - __entry->seqno = i915_gem_request_get_seqno(req); + __entry->dev = req->i915->dev->primary->index; + __entry->ring = req->engine->id; + __entry->seqno = req->seqno; __entry->blocking = - mutex_is_locked(&engine->dev->struct_mutex); + mutex_is_locked(&req->i915->dev->struct_mutex); ), TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", @@ -740,12 +734,12 @@ DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release, * the context. */ DECLARE_EVENT_CLASS(i915_context, - TP_PROTO(struct intel_context *ctx), + TP_PROTO(struct i915_gem_context *ctx), TP_ARGS(ctx), TP_STRUCT__entry( __field(u32, dev) - __field(struct intel_context *, ctx) + __field(struct i915_gem_context *, ctx) __field(struct i915_address_space *, vm) ), @@ -760,12 +754,12 @@ DECLARE_EVENT_CLASS(i915_context, ) DEFINE_EVENT(i915_context, i915_context_create, - TP_PROTO(struct intel_context *ctx), + TP_PROTO(struct i915_gem_context *ctx), TP_ARGS(ctx) ); DEFINE_EVENT(i915_context, i915_context_free, - TP_PROTO(struct intel_context *ctx), + TP_PROTO(struct i915_gem_context *ctx), TP_ARGS(ctx) ); @@ -777,13 +771,13 @@ DEFINE_EVENT(i915_context, i915_context_free, * called only if full ppgtt is enabled. */ TRACE_EVENT(switch_mm, - TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to), + TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to), TP_ARGS(engine, to), TP_STRUCT__entry( __field(u32, ring) - __field(struct intel_context *, to) + __field(struct i915_gem_context *, to) __field(struct i915_address_space *, vm) __field(u32, dev) ), @@ -792,7 +786,7 @@ TRACE_EVENT(switch_mm, __entry->ring = engine->id; __entry->to = to; __entry->vm = to->ppgtt? &to->ppgtt->base : NULL; - __entry->dev = engine->dev->primary->index; + __entry->dev = engine->i915->dev->primary->index; ), TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index d02efb8cad4d..f6acb5a0e701 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -53,20 +53,19 @@ /** * i915_check_vgpu - detect virtual GPU - * @dev: drm device * + * @dev_priv: i915 device private * * This function is called at the initialization stage, to detect whether * running on a vGPU. */ -void i915_check_vgpu(struct drm_device *dev) +void i915_check_vgpu(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); uint64_t magic; uint32_t version; BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); - if (!IS_HASWELL(dev)) + if (!IS_HASWELL(dev_priv)) return; magic = __raw_i915_read64(dev_priv, vgtif_reg(magic)); @@ -102,10 +101,13 @@ static struct _balloon_info_ bl_info; * This function is called to deallocate the ballooned-out graphic memory, when * driver is unloaded or when ballooning fails. */ -void intel_vgt_deballoon(void) +void intel_vgt_deballoon(struct drm_i915_private *dev_priv) { int i; + if (!intel_vgpu_active(dev_priv)) + return; + DRM_DEBUG("VGT deballoon.\n"); for (i = 0; i < 4; i++) { @@ -151,36 +153,35 @@ static int vgt_balloon_space(struct drm_mm *mm, * of its graphic space being zero. Yet there are some portions ballooned out( * the shadow part, which are marked as reserved by drm allocator). From the * host point of view, the graphic address space is partitioned by multiple - * vGPUs in different VMs. + * vGPUs in different VMs. :: * * vGPU1 view Host view * 0 ------> +-----------+ +-----------+ - * ^ |///////////| | vGPU3 | - * | |///////////| +-----------+ - * | |///////////| | vGPU2 | + * ^ |###########| | vGPU3 | + * | |###########| +-----------+ + * | |###########| | vGPU2 | * | +-----------+ +-----------+ * mappable GM | available | ==> | vGPU1 | * | +-----------+ +-----------+ - * | |///////////| | | - * v |///////////| | Host | + * | |###########| | | + * v |###########| | Host | * +=======+===========+ +===========+ - * ^ |///////////| | vGPU3 | - * | |///////////| +-----------+ - * | |///////////| | vGPU2 | + * ^ |###########| | vGPU3 | + * | |###########| +-----------+ + * | |###########| | vGPU2 | * | +-----------+ +-----------+ * unmappable GM | available | ==> | vGPU1 | * | +-----------+ +-----------+ - * | |///////////| | | - * | |///////////| | Host | - * v |///////////| | | + * | |###########| | | + * | |###########| | Host | + * v |###########| | | * total GM size ------> +-----------+ +-----------+ * * Returns: * zero on success, non-zero if configuration invalid or ballooning failed */ -int intel_vgt_balloon(struct drm_device *dev) +int intel_vgt_balloon(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; unsigned long ggtt_end = ggtt->base.start + ggtt->base.total; @@ -188,6 +189,9 @@ int intel_vgt_balloon(struct drm_device *dev) unsigned long unmappable_base, unmappable_size, unmappable_end; int ret; + if (!intel_vgpu_active(dev_priv)) + return 0; + mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base)); mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size)); unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base)); @@ -259,6 +263,6 @@ int intel_vgt_balloon(struct drm_device *dev) err: DRM_ERROR("VGT balloon fail\n"); - intel_vgt_deballoon(); + intel_vgt_deballoon(dev_priv); return ret; } diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h index 3c83b47b5f69..3c3b2d24e830 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.h +++ b/drivers/gpu/drm/i915/i915_vgpu.h @@ -24,94 +24,10 @@ #ifndef _I915_VGPU_H_ #define _I915_VGPU_H_ -/* The MMIO offset of the shared info between guest and host emulator */ -#define VGT_PVINFO_PAGE 0x78000 -#define VGT_PVINFO_SIZE 0x1000 +#include "i915_pvinfo.h" -/* - * The following structure pages are defined in GEN MMIO space - * for virtualization. (One page for now) - */ -#define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */ -#define VGT_VERSION_MAJOR 1 -#define VGT_VERSION_MINOR 0 - -#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor)) -#define INTEL_VGT_IF_VERSION \ - INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR) - -/* - * notifications from guest to vgpu device model - */ -enum vgt_g2v_type { - VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2, - VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY, - VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE, - VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY, - VGT_G2V_EXECLIST_CONTEXT_CREATE, - VGT_G2V_EXECLIST_CONTEXT_DESTROY, - VGT_G2V_MAX, -}; - -struct vgt_if { - uint64_t magic; /* VGT_MAGIC */ - uint16_t version_major; - uint16_t version_minor; - uint32_t vgt_id; /* ID of vGT instance */ - uint32_t rsv1[12]; /* pad to offset 0x40 */ - /* - * Data structure to describe the balooning info of resources. - * Each VM can only have one portion of continuous area for now. - * (May support scattered resource in future) - * (starting from offset 0x40) - */ - struct { - /* Aperture register balooning */ - struct { - uint32_t base; - uint32_t size; - } mappable_gmadr; /* aperture */ - /* GMADR register balooning */ - struct { - uint32_t base; - uint32_t size; - } nonmappable_gmadr; /* non aperture */ - /* allowed fence registers */ - uint32_t fence_num; - uint32_t rsv2[3]; - } avail_rs; /* available/assigned resource */ - uint32_t rsv3[0x200 - 24]; /* pad to half page */ - /* - * The bottom half page is for response from Gfx driver to hypervisor. - */ - uint32_t rsv4; - uint32_t display_ready; /* ready for display owner switch */ - - uint32_t rsv5[4]; - - uint32_t g2v_notify; - uint32_t rsv6[7]; - - struct { - uint32_t lo; - uint32_t hi; - } pdp[4]; - - uint32_t execlist_context_descriptor_lo; - uint32_t execlist_context_descriptor_hi; - - uint32_t rsv7[0x200 - 24]; /* pad to one page */ -} __packed; - -#define vgtif_reg(x) \ - _MMIO((VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x)) - -/* vGPU display status to be used by the host side */ -#define VGT_DRV_DISPLAY_NOT_READY 0 -#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */ - -extern void i915_check_vgpu(struct drm_device *dev); -extern int intel_vgt_balloon(struct drm_device *dev); -extern void intel_vgt_deballoon(void); +void i915_check_vgpu(struct drm_i915_private *dev_priv); +int intel_vgt_balloon(struct drm_i915_private *dev_priv); +void intel_vgt_deballoon(struct drm_i915_private *dev_priv); #endif /* _I915_VGPU_H_ */ diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 50ff90aea721..c5a166752eda 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -191,7 +191,7 @@ int intel_atomic_setup_scalers(struct drm_device *dev, /* plane scaler case: assign as a plane scaler */ /* find the plane that set the bit as scaler_user */ - plane = drm_state->planes[i]; + plane = drm_state->planes[i].ptr; /* * to enable/disable hq mode, add planes that are using scaler @@ -223,7 +223,8 @@ int intel_atomic_setup_scalers(struct drm_device *dev, continue; } - plane_state = to_intel_plane_state(drm_state->plane_states[i]); + plane_state = intel_atomic_get_existing_plane_state(drm_state, + intel_plane); scaler_id = &plane_state->scaler_id; } diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 02a7527ce7bb..b9329c2a670a 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -621,17 +621,11 @@ static void i915_audio_component_codec_wake_override(struct device *dev, static int i915_audio_component_get_cdclk_freq(struct device *dev) { struct drm_i915_private *dev_priv = dev_to_i915(dev); - int ret; if (WARN_ON_ONCE(!HAS_DDI(dev_priv))) return -ENODEV; - intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); - ret = dev_priv->display.get_display_clock_speed(dev_priv->dev); - - intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO); - - return ret; + return dev_priv->cdclk_freq; } static int i915_audio_component_sync_audio_rate(struct device *dev, diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b235b6e88ead..da5ed4a850b9 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -139,6 +139,11 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, else panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; + panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) | + dvo_timing->himage_lo; + panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) | + dvo_timing->vimage_lo; + /* Some VBTs have bogus h/vtotal values */ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; @@ -213,7 +218,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, dev_priv->vbt.lvds_dither = lvds_options->pixel_dither; - ret = intel_opregion_get_panel_type(dev_priv->dev); + ret = intel_opregion_get_panel_type(dev_priv); if (ret >= 0) { WARN_ON(ret > 0xf); panel_type = ret; @@ -318,6 +323,15 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, return; } + dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI; + if (bdb->version >= 191 && + get_blocksize(backlight_data) >= sizeof(*backlight_data)) { + const struct bdb_lfp_backlight_control_method *method; + + method = &backlight_data->backlight_control[panel_type]; + dev_priv->vbt.backlight.type = method->type; + } + dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; dev_priv->vbt.backlight.min_brightness = entry->min_brightness; @@ -763,6 +777,16 @@ parse_mipi_config(struct drm_i915_private *dev_priv, return; } + /* + * These fields are introduced from the VBT version 197 onwards, + * so making sure that these bits are set zero in the previous + * versions. + */ + if (dev_priv->vbt.dsi.config->dual_link && bdb->version < 197) { + dev_priv->vbt.dsi.config->dl_dcs_cabc_ports = 0; + dev_priv->vbt.dsi.config->dl_dcs_backlight_ports = 0; + } + /* We have mandatory mipi config blocks. Initialize as generic panel */ dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; } @@ -1187,7 +1211,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, } if (bdb->version < 106) { expected_size = 22; - } else if (bdb->version < 109) { + } else if (bdb->version < 111) { expected_size = 27; } else if (bdb->version < 195) { BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33); @@ -1546,6 +1570,45 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin) } /** + * intel_bios_is_port_present - is the specified digital port present + * @dev_priv: i915 device instance + * @port: port to check + * + * Return true if the device in %port is present. + */ +bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port) +{ + static const struct { + u16 dp, hdmi; + } port_mapping[] = { + [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, + [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, + [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, + [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, + }; + int i; + + /* FIXME maybe deal with port A as well? */ + if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping)) + return false; + + if (!dev_priv->vbt.child_dev_num) + return false; + + for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { + const union child_device_config *p_child = + &dev_priv->vbt.child_dev[i]; + if ((p_child->common.dvo_port == port_mapping[port].dp || + p_child->common.dvo_port == port_mapping[port].hdmi) && + (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING | + DEVICE_TYPE_DISPLAYPORT_OUTPUT))) + return true; + } + + return false; +} + +/** * intel_bios_is_port_edp - is the device in given port eDP * @dev_priv: i915 device instance * @port: port to check diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index ab0ea315eddb..8405b5a367d7 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -30,6 +30,14 @@ #ifndef _INTEL_BIOS_H_ #define _INTEL_BIOS_H_ +enum intel_backlight_type { + INTEL_BACKLIGHT_PMIC, + INTEL_BACKLIGHT_LPSS, + INTEL_BACKLIGHT_DISPLAY_DDI, + INTEL_BACKLIGHT_DSI_DCS, + INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE, +}; + struct edp_power_seq { u16 t1_t3; u16 t8; @@ -113,7 +121,13 @@ struct mipi_config { u16 dual_link:2; u16 lane_cnt:2; u16 pixel_overlap:3; - u16 rsvd3:9; + u16 rgb_flip:1; +#define DL_DCS_PORT_A 0x00 +#define DL_DCS_PORT_C 0x01 +#define DL_DCS_PORT_A_AND_C 0x02 + u16 dl_dcs_cabc_ports:2; + u16 dl_dcs_backlight_ports:2; + u16 rsvd3:4; u16 rsvd4; diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index 1b3f97449395..522f5a2de015 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -547,7 +547,8 @@ void intel_color_init(struct drm_crtc *crtc) /* Enable color management support when we have degamma & gamma LUTs. */ if (INTEL_INFO(dev)->color.degamma_lut_size != 0 && INTEL_INFO(dev)->color.gamma_lut_size != 0) - drm_helper_crtc_enable_color_mgmt(crtc, + drm_crtc_enable_color_mgmt(crtc, INTEL_INFO(dev)->color.degamma_lut_size, + true, INTEL_INFO(dev)->color.gamma_lut_size); } diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 3fbb6fc66451..e115bcc6766f 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -743,6 +743,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = intel_crt_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .early_unregister = intel_connector_unregister, .destroy = intel_crt_destroy, .set_property = intel_crt_set_property, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, @@ -753,7 +754,6 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = { static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { .mode_valid = intel_crt_mode_valid, .get_modes = intel_crt_get_modes, - .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_crt_enc_funcs = { @@ -839,7 +839,7 @@ void intel_crt_init(struct drm_device *dev) &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, - DRM_MODE_ENCODER_DAC, NULL); + DRM_MODE_ENCODER_DAC, "CRT"); intel_connector_attach_encoder(intel_connector, &crt->base); @@ -876,7 +876,6 @@ void intel_crt_init(struct drm_device *dev) crt->base.get_hw_state = intel_crt_get_hw_state; } intel_connector->get_hw_state = intel_connector_get_hw_state; - intel_connector->unregister = intel_connector_unregister; drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index a34c23eceba0..2b3b428d9cd2 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -41,16 +41,22 @@ * be moved to FW_FAILED. */ +#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin" +MODULE_FIRMWARE(I915_CSR_KBL); +#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) + #define I915_CSR_SKL "i915/skl_dmc_ver1.bin" +MODULE_FIRMWARE(I915_CSR_SKL); +#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23) + #define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" +MODULE_FIRMWARE(I915_CSR_BXT); +#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) #define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares" -MODULE_FIRMWARE(I915_CSR_SKL); -MODULE_FIRMWARE(I915_CSR_BXT); -#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23) -#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) + #define CSR_MAX_FW_SIZE 0x2FFF #define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF @@ -169,12 +175,10 @@ struct stepping_info { char substepping; }; -/* - * Kabylake derivated from Skylake H0, so SKL H0 - * is the right firmware for KBL A0 (revid 0). - */ static const struct stepping_info kbl_stepping_info[] = { - {'H', '0'}, {'I', '0'} + {'A', '0'}, {'B', '0'}, {'C', '0'}, + {'D', '0'}, {'E', '0'}, {'F', '0'}, + {'G', '0'}, {'H', '0'}, {'I', '0'}, }; static const struct stepping_info skl_stepping_info[] = { @@ -298,7 +302,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, csr->version = css_header->version; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_KABYLAKE(dev_priv)) { + required_min_version = KBL_CSR_VERSION_REQUIRED; + } else if (IS_SKYLAKE(dev_priv)) { required_min_version = SKL_CSR_VERSION_REQUIRED; } else if (IS_BROXTON(dev_priv)) { required_min_version = BXT_CSR_VERSION_REQUIRED; @@ -446,7 +452,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) if (!HAS_CSR(dev_priv)) return; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + if (IS_KABYLAKE(dev_priv)) + csr->fw_path = I915_CSR_KBL; + else if (IS_SKYLAKE(dev_priv)) csr->fw_path = I915_CSR_SKL; else if (IS_BROXTON(dev_priv)) csr->fw_path = I915_CSR_BXT; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 01e523df363b..ad3b0ee5e55b 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -948,7 +948,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv, { struct intel_shared_dpll *pll; struct intel_dpll_hw_state *state; - intel_clock_t clock; + struct dpll clock; /* For DDI ports we always use a shared PLL. */ if (WARN_ON(dpll == DPLL_ID_PRIVATE)) @@ -1342,6 +1342,14 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port)); out: + if (ret && IS_BROXTON(dev_priv)) { + tmp = I915_READ(BXT_PHY_CTL(port)); + if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK | + BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) + DRM_ERROR("Port %c enabled but PHY powered down? " + "(PHY_CTL %08x)\n", port_name(port), tmp); + } + intel_display_power_put(dev_priv, power_domain); return ret; @@ -1742,9 +1750,11 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder) } } -static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv, - enum dpio_phy phy) +bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, + enum dpio_phy phy) { + enum port port; + if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy))) return false; @@ -1770,38 +1780,48 @@ static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv, return false; } + for_each_port_masked(port, + phy == DPIO_PHY0 ? BIT(PORT_B) | BIT(PORT_C) : + BIT(PORT_A)) { + u32 tmp = I915_READ(BXT_PHY_CTL(port)); + + if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) { + DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane " + "for port %c powered down " + "(PHY_CTL %08x)\n", + phy, port_name(port), tmp); + + return false; + } + } + return true; } -static u32 broxton_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy) +static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy) { u32 val = I915_READ(BXT_PORT_REF_DW6(phy)); return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT; } -static void broxton_phy_wait_grc_done(struct drm_i915_private *dev_priv, - enum dpio_phy phy) +static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv, + enum dpio_phy phy) { if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10)) DRM_ERROR("timeout waiting for PHY%d GRC\n", phy); } -static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv, - enum dpio_phy phy); - -static void broxton_phy_init(struct drm_i915_private *dev_priv, - enum dpio_phy phy) +void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) { - enum port port; - u32 ports, val; + u32 val; - if (broxton_phy_is_enabled(dev_priv, phy)) { + if (bxt_ddi_phy_is_enabled(dev_priv, phy)) { /* Still read out the GRC value for state verification */ if (phy == DPIO_PHY0) - dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, phy); + dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy); - if (broxton_phy_verify_state(dev_priv, phy)) { + if (bxt_ddi_phy_verify_state(dev_priv, phy)) { DRM_DEBUG_DRIVER("DDI PHY %d already enabled, " "won't reprogram it\n", phy); @@ -1810,8 +1830,6 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv, DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, " "force reprogramming it\n", phy); - } else { - DRM_DEBUG_DRIVER("DDI PHY %d not enabled, enabling it\n", phy); } val = I915_READ(BXT_P_CR_GT_DISP_PWRON); @@ -1831,28 +1849,6 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv, DRM_ERROR("timeout during PHY%d power on\n", phy); } - if (phy == DPIO_PHY0) - ports = BIT(PORT_B) | BIT(PORT_C); - else - ports = BIT(PORT_A); - - for_each_port_masked(port, ports) { - int lane; - - for (lane = 0; lane < 4; lane++) { - val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane)); - /* - * Note that on CHV this flag is called UPAR, but has - * the same function. - */ - val &= ~LATENCY_OPTIM; - if (lane != 1) - val |= LATENCY_OPTIM; - - I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val); - } - } - /* Program PLL Rcomp code offset */ val = I915_READ(BXT_PORT_CL1CM_DW9(phy)); val &= ~IREF0RC_OFFSET_MASK; @@ -1899,10 +1895,7 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv, * the corresponding calibrated value from PHY1, and disable * the automatic calibration on PHY0. */ - broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1); - - val = dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, - DPIO_PHY1); + val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, DPIO_PHY1); grc_code = val << GRC_CODE_FAST_SHIFT | val << GRC_CODE_SLOW_SHIFT | val; @@ -1912,31 +1905,16 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv, val |= GRC_DIS | GRC_RDY_OVRD; I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val); } - /* - * During PHY1 init delay waiting for GRC calibration to finish, since - * it can happen in parallel with the subsequent PHY0 init. - */ val = I915_READ(BXT_PHY_CTL_FAMILY(phy)); val |= COMMON_RESET_DIS; I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val); -} -void broxton_ddi_phy_init(struct drm_i915_private *dev_priv) -{ - /* Enable PHY1 first since it provides Rcomp for PHY0 */ - broxton_phy_init(dev_priv, DPIO_PHY1); - broxton_phy_init(dev_priv, DPIO_PHY0); - - /* - * If BIOS enabled only PHY0 and not PHY1, we skipped waiting for the - * PHY1 GRC calibration to finish, so wait for it here. - */ - broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1); + if (phy == DPIO_PHY1) + bxt_phy_wait_grc_done(dev_priv, DPIO_PHY1); } -static void broxton_phy_uninit(struct drm_i915_private *dev_priv, - enum dpio_phy phy) +void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) { uint32_t val; @@ -1949,12 +1927,6 @@ static void broxton_phy_uninit(struct drm_i915_private *dev_priv, I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val); } -void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv) -{ - broxton_phy_uninit(dev_priv, DPIO_PHY1); - broxton_phy_uninit(dev_priv, DPIO_PHY0); -} - static bool __printf(6, 7) __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, i915_reg_t reg, u32 mask, u32 expected, @@ -1982,11 +1954,9 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, return false; } -static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv, - enum dpio_phy phy) +bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, + enum dpio_phy phy) { - enum port port; - u32 ports; uint32_t mask; bool ok; @@ -1994,27 +1964,11 @@ static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv, __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \ ## __VA_ARGS__) - /* We expect the PHY to be always enabled */ - if (!broxton_phy_is_enabled(dev_priv, phy)) + if (!bxt_ddi_phy_is_enabled(dev_priv, phy)) return false; ok = true; - if (phy == DPIO_PHY0) - ports = BIT(PORT_B) | BIT(PORT_C); - else - ports = BIT(PORT_A); - - for_each_port_masked(port, ports) { - int lane; - - for (lane = 0; lane < 4; lane++) - ok &= _CHK(BXT_PORT_TX_DW14_LN(port, lane), - LATENCY_OPTIM, - lane != 1 ? LATENCY_OPTIM : 0, - "BXT_PORT_TX_DW14_LN(%d, %d)", port, lane); - } - /* PLL Rcomp code offset */ ok &= _CHK(BXT_PORT_CL1CM_DW9(phy), IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT, @@ -2058,11 +2012,65 @@ static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv, #undef _CHK } -void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv) +static uint8_t +bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) { - if (!broxton_phy_verify_state(dev_priv, DPIO_PHY0) || - !broxton_phy_verify_state(dev_priv, DPIO_PHY1)) - i915_report_error(dev_priv, "DDI PHY state mismatch\n"); + switch (pipe_config->lane_count) { + case 1: + return 0; + case 2: + return BIT(2) | BIT(0); + case 4: + return BIT(3) | BIT(2) | BIT(0); + default: + MISSING_CASE(pipe_config->lane_count); + + return 0; + } +} + +static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); + enum port port = dport->port; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + int lane; + + for (lane = 0; lane < 4; lane++) { + u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane)); + + /* + * Note that on CHV this flag is called UPAR, but has + * the same function. + */ + val &= ~LATENCY_OPTIM; + if (intel_crtc->config->lane_lat_optim_mask & BIT(lane)) + val |= LATENCY_OPTIM; + + I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val); + } +} + +static uint8_t +bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); + enum port port = dport->port; + int lane; + uint8_t mask; + + mask = 0; + for (lane = 0; lane < 4; lane++) { + u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane)); + + if (val & LATENCY_OPTIM) + mask |= BIT(lane); + } + + return mask; } void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) @@ -2236,13 +2244,19 @@ void intel_ddi_get_config(struct intel_encoder *encoder, } intel_ddi_clock_get(encoder, pipe_config); + + if (IS_BROXTON(dev_priv)) + pipe_config->lane_lat_optim_mask = + bxt_ddi_phy_get_lane_lat_optim_mask(encoder); } static bool intel_ddi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; int type = encoder->type; int port = intel_ddi_get_encoder_port(encoder); + int ret; WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n"); @@ -2250,9 +2264,17 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder, pipe_config->cpu_transcoder = TRANSCODER_EDP; if (type == INTEL_OUTPUT_HDMI) - return intel_hdmi_compute_config(encoder, pipe_config); + ret = intel_hdmi_compute_config(encoder, pipe_config); else - return intel_dp_compute_config(encoder, pipe_config); + ret = intel_dp_compute_config(encoder, pipe_config); + + if (IS_BROXTON(dev_priv) && ret) + pipe_config->lane_lat_optim_mask = + bxt_ddi_phy_calc_lane_lat_optim_mask(encoder, + pipe_config); + + return ret; + } static const struct drm_encoder_funcs intel_ddi_funcs = { @@ -2347,10 +2369,12 @@ void intel_ddi_init(struct drm_device *dev, enum port port) encoder = &intel_encoder->base; drm_encoder_init(dev, encoder, &intel_ddi_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port)); intel_encoder->compute_config = intel_ddi_compute_config; intel_encoder->enable = intel_enable_ddi; + if (IS_BROXTON(dev_priv)) + intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable; intel_encoder->pre_enable = intel_ddi_pre_enable; intel_encoder->disable = intel_disable_ddi; intel_encoder->post_disable = intel_ddi_post_disable; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2113f401f0ba..0b2cd669ac05 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -36,6 +36,7 @@ #include "intel_drv.h" #include <drm/i915_drm.h> #include "i915_drv.h" +#include "i915_gem_dmabuf.h" #include "intel_dsi.h" #include "i915_trace.h" #include <drm/drm_atomic.h> @@ -46,7 +47,11 @@ #include <drm/drm_rect.h> #include <linux/dma_remapping.h> #include <linux/reservation.h> -#include <linux/dma-buf.h> + +static bool is_mmio_work(struct intel_flip_work *work) +{ + return work->mmio_work.func; +} /* Primary plane formats for gen <= 3 */ static const uint32_t i8xx_primary_formats[] = { @@ -117,20 +122,18 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); static void ironlake_pfit_enable(struct intel_crtc *crtc); static void intel_modeset_setup_hw_state(struct drm_device *dev); static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); +static int ilk_max_pixel_rate(struct drm_atomic_state *state); +static int bxt_calc_cdclk(int max_pixclk); -typedef struct { - int min, max; -} intel_range_t; - -typedef struct { - int dot_limit; - int p2_slow, p2_fast; -} intel_p2_t; - -typedef struct intel_limit intel_limit_t; struct intel_limit { - intel_range_t dot, vco, n, m, m1, m2, p, p1; - intel_p2_t p2; + struct { + int min, max; + } dot, vco, n, m, m1, m2, p, p1; + + struct { + int dot_limit; + int p2_slow, p2_fast; + } p2; }; /* returns HPLL frequency in kHz */ @@ -185,6 +188,7 @@ intel_pch_rawclk(struct drm_i915_private *dev_priv) static int intel_vlv_hrawclk(struct drm_i915_private *dev_priv) { + /* RAWCLK_FREQ_VLV register updated from power well code */ return vlv_get_cck_clock_hpll(dev_priv, "hrawclk", CCK_DISPLAY_REF_CLOCK_CONTROL); } @@ -218,7 +222,7 @@ intel_g4x_hrawclk(struct drm_i915_private *dev_priv) } } -static void intel_update_rawclk(struct drm_i915_private *dev_priv) +void intel_update_rawclk(struct drm_i915_private *dev_priv) { if (HAS_PCH_SPLIT(dev_priv)) dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv); @@ -255,7 +259,7 @@ intel_fdi_link_freq(struct drm_i915_private *dev_priv, return 270000; } -static const intel_limit_t intel_limits_i8xx_dac = { +static const struct intel_limit intel_limits_i8xx_dac = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 908000, .max = 1512000 }, .n = { .min = 2, .max = 16 }, @@ -268,7 +272,7 @@ static const intel_limit_t intel_limits_i8xx_dac = { .p2_slow = 4, .p2_fast = 2 }, }; -static const intel_limit_t intel_limits_i8xx_dvo = { +static const struct intel_limit intel_limits_i8xx_dvo = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 908000, .max = 1512000 }, .n = { .min = 2, .max = 16 }, @@ -281,7 +285,7 @@ static const intel_limit_t intel_limits_i8xx_dvo = { .p2_slow = 4, .p2_fast = 4 }, }; -static const intel_limit_t intel_limits_i8xx_lvds = { +static const struct intel_limit intel_limits_i8xx_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 908000, .max = 1512000 }, .n = { .min = 2, .max = 16 }, @@ -294,7 +298,7 @@ static const intel_limit_t intel_limits_i8xx_lvds = { .p2_slow = 14, .p2_fast = 7 }, }; -static const intel_limit_t intel_limits_i9xx_sdvo = { +static const struct intel_limit intel_limits_i9xx_sdvo = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1400000, .max = 2800000 }, .n = { .min = 1, .max = 6 }, @@ -307,7 +311,7 @@ static const intel_limit_t intel_limits_i9xx_sdvo = { .p2_slow = 10, .p2_fast = 5 }, }; -static const intel_limit_t intel_limits_i9xx_lvds = { +static const struct intel_limit intel_limits_i9xx_lvds = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1400000, .max = 2800000 }, .n = { .min = 1, .max = 6 }, @@ -321,7 +325,7 @@ static const intel_limit_t intel_limits_i9xx_lvds = { }; -static const intel_limit_t intel_limits_g4x_sdvo = { +static const struct intel_limit intel_limits_g4x_sdvo = { .dot = { .min = 25000, .max = 270000 }, .vco = { .min = 1750000, .max = 3500000}, .n = { .min = 1, .max = 4 }, @@ -336,7 +340,7 @@ static const intel_limit_t intel_limits_g4x_sdvo = { }, }; -static const intel_limit_t intel_limits_g4x_hdmi = { +static const struct intel_limit intel_limits_g4x_hdmi = { .dot = { .min = 22000, .max = 400000 }, .vco = { .min = 1750000, .max = 3500000}, .n = { .min = 1, .max = 4 }, @@ -349,7 +353,7 @@ static const intel_limit_t intel_limits_g4x_hdmi = { .p2_slow = 10, .p2_fast = 5 }, }; -static const intel_limit_t intel_limits_g4x_single_channel_lvds = { +static const struct intel_limit intel_limits_g4x_single_channel_lvds = { .dot = { .min = 20000, .max = 115000 }, .vco = { .min = 1750000, .max = 3500000 }, .n = { .min = 1, .max = 3 }, @@ -363,7 +367,7 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = { }, }; -static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { +static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { .dot = { .min = 80000, .max = 224000 }, .vco = { .min = 1750000, .max = 3500000 }, .n = { .min = 1, .max = 3 }, @@ -377,7 +381,7 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { }, }; -static const intel_limit_t intel_limits_pineview_sdvo = { +static const struct intel_limit intel_limits_pineview_sdvo = { .dot = { .min = 20000, .max = 400000}, .vco = { .min = 1700000, .max = 3500000 }, /* Pineview's Ncounter is a ring counter */ @@ -392,7 +396,7 @@ static const intel_limit_t intel_limits_pineview_sdvo = { .p2_slow = 10, .p2_fast = 5 }, }; -static const intel_limit_t intel_limits_pineview_lvds = { +static const struct intel_limit intel_limits_pineview_lvds = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1700000, .max = 3500000 }, .n = { .min = 3, .max = 6 }, @@ -410,7 +414,7 @@ static const intel_limit_t intel_limits_pineview_lvds = { * We calculate clock using (register_value + 2) for N/M1/M2, so here * the range value for them is (actual_value - 2). */ -static const intel_limit_t intel_limits_ironlake_dac = { +static const struct intel_limit intel_limits_ironlake_dac = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 5 }, @@ -423,7 +427,7 @@ static const intel_limit_t intel_limits_ironlake_dac = { .p2_slow = 10, .p2_fast = 5 }, }; -static const intel_limit_t intel_limits_ironlake_single_lvds = { +static const struct intel_limit intel_limits_ironlake_single_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, @@ -436,7 +440,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds = { .p2_slow = 14, .p2_fast = 14 }, }; -static const intel_limit_t intel_limits_ironlake_dual_lvds = { +static const struct intel_limit intel_limits_ironlake_dual_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, @@ -450,7 +454,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds = { }; /* LVDS 100mhz refclk limits. */ -static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { +static const struct intel_limit intel_limits_ironlake_single_lvds_100m = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 2 }, @@ -463,7 +467,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { .p2_slow = 14, .p2_fast = 14 }, }; -static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { +static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, @@ -476,7 +480,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { .p2_slow = 7, .p2_fast = 7 }, }; -static const intel_limit_t intel_limits_vlv = { +static const struct intel_limit intel_limits_vlv = { /* * These are the data rate limits (measured in fast clocks) * since those are the strictest limits we have. The fast @@ -492,7 +496,7 @@ static const intel_limit_t intel_limits_vlv = { .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ }; -static const intel_limit_t intel_limits_chv = { +static const struct intel_limit intel_limits_chv = { /* * These are the data rate limits (measured in fast clocks) * since those are the strictest limits we have. The fast @@ -508,7 +512,7 @@ static const intel_limit_t intel_limits_chv = { .p2 = { .p2_slow = 1, .p2_fast = 14 }, }; -static const intel_limit_t intel_limits_bxt = { +static const struct intel_limit intel_limits_bxt = { /* FIXME: find real dot limits */ .dot = { .min = 0, .max = INT_MAX }, .vco = { .min = 4800000, .max = 6700000 }, @@ -581,7 +585,7 @@ static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state, * divided-down version of it. */ /* m1 is reserved as 0 in Pineview, n is a ring counter */ -static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock) +static int pnv_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = clock->m2 + 2; clock->p = clock->p1 * clock->p2; @@ -598,7 +602,7 @@ static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); } -static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock) +static int i9xx_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = i9xx_dpll_compute_m(clock); clock->p = clock->p1 * clock->p2; @@ -610,7 +614,7 @@ static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock) return clock->dot; } -static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock) +static int vlv_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = clock->m1 * clock->m2; clock->p = clock->p1 * clock->p2; @@ -622,7 +626,7 @@ static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock) return clock->dot / 5; } -int chv_calc_dpll_params(int refclk, intel_clock_t *clock) +int chv_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = clock->m1 * clock->m2; clock->p = clock->p1 * clock->p2; @@ -642,8 +646,8 @@ int chv_calc_dpll_params(int refclk, intel_clock_t *clock) */ static bool intel_PLL_is_valid(struct drm_device *dev, - const intel_limit_t *limit, - const intel_clock_t *clock) + const struct intel_limit *limit, + const struct dpll *clock) { if (clock->n < limit->n.min || limit->n.max < clock->n) INTELPllInvalid("n out of range\n"); @@ -678,7 +682,7 @@ static bool intel_PLL_is_valid(struct drm_device *dev, } static int -i9xx_select_p2_div(const intel_limit_t *limit, +i9xx_select_p2_div(const struct intel_limit *limit, const struct intel_crtc_state *crtc_state, int target) { @@ -713,13 +717,13 @@ i9xx_select_p2_div(const intel_limit_t *limit, * divider from @match_clock used for LVDS downclocking. */ static bool -i9xx_find_best_dpll(const intel_limit_t *limit, +i9xx_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct drm_device *dev = crtc_state->base.crtc->dev; - intel_clock_t clock; + struct dpll clock; int err = target; memset(best_clock, 0, sizeof(*best_clock)); @@ -770,13 +774,13 @@ i9xx_find_best_dpll(const intel_limit_t *limit, * divider from @match_clock used for LVDS downclocking. */ static bool -pnv_find_best_dpll(const intel_limit_t *limit, +pnv_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct drm_device *dev = crtc_state->base.crtc->dev; - intel_clock_t clock; + struct dpll clock; int err = target; memset(best_clock, 0, sizeof(*best_clock)); @@ -825,13 +829,13 @@ pnv_find_best_dpll(const intel_limit_t *limit, * divider from @match_clock used for LVDS downclocking. */ static bool -g4x_find_best_dpll(const intel_limit_t *limit, +g4x_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct drm_device *dev = crtc_state->base.crtc->dev; - intel_clock_t clock; + struct dpll clock; int max_n; bool found = false; /* approximately equals target * 0.00585 */ @@ -877,8 +881,8 @@ g4x_find_best_dpll(const intel_limit_t *limit, * best configuration and error found so far. Return the calculated error. */ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, - const intel_clock_t *calculated_clock, - const intel_clock_t *best_clock, + const struct dpll *calculated_clock, + const struct dpll *best_clock, unsigned int best_error_ppm, unsigned int *error_ppm) { @@ -918,14 +922,14 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ static bool -vlv_find_best_dpll(const intel_limit_t *limit, +vlv_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; - intel_clock_t clock; + struct dpll clock; unsigned int bestppm = 1000000; /* min update 19.2 MHz */ int max_n = min(limit->n.max, refclk / 19200); @@ -977,15 +981,15 @@ vlv_find_best_dpll(const intel_limit_t *limit, * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ static bool -chv_find_best_dpll(const intel_limit_t *limit, +chv_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; unsigned int best_error_ppm; - intel_clock_t clock; + struct dpll clock; uint64_t m2; int found = false; @@ -1035,10 +1039,10 @@ chv_find_best_dpll(const intel_limit_t *limit, } bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, - intel_clock_t *best_clock) + struct dpll *best_clock) { int refclk = 100000; - const intel_limit_t *limit = &intel_limits_bxt; + const struct intel_limit *limit = &intel_limits_bxt; return chv_find_best_dpll(limit, crtc_state, target_clock, refclk, NULL, best_clock); @@ -1203,7 +1207,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, u32 val; /* ILK FDI PLL is always enabled */ - if (INTEL_INFO(dev_priv)->gen == 5) + if (IS_GEN5(dev_priv)) return; /* On Haswell, DDI ports are responsible for the FDI PLL setup */ @@ -2309,7 +2313,7 @@ err_pm: return ret; } -static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) +void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) { struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct i915_ggtt_view view; @@ -3110,17 +3114,12 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, return -ENODEV; } -static void intel_complete_page_flips(struct drm_device *dev) +static void intel_complete_page_flips(struct drm_i915_private *dev_priv) { - struct drm_crtc *crtc; - - for_each_crtc(dev, crtc) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum plane plane = intel_crtc->plane; + struct intel_crtc *crtc; - intel_prepare_page_flip(dev, plane); - intel_finish_page_flip_plane(dev, plane); - } + for_each_intel_crtc(dev_priv->dev, crtc) + intel_finish_page_flip_cs(dev_priv, crtc->pipe); } static void intel_update_primary_planes(struct drm_device *dev) @@ -3143,41 +3142,39 @@ static void intel_update_primary_planes(struct drm_device *dev) } } -void intel_prepare_reset(struct drm_device *dev) +void intel_prepare_reset(struct drm_i915_private *dev_priv) { /* no reset support for gen2 */ - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) return; /* reset doesn't touch the display */ - if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) return; - drm_modeset_lock_all(dev); + drm_modeset_lock_all(dev_priv->dev); /* * Disabling the crtcs gracefully seems nicer. Also the * g33 docs say we should at least disable all the planes. */ - intel_display_suspend(dev); + intel_display_suspend(dev_priv->dev); } -void intel_finish_reset(struct drm_device *dev) +void intel_finish_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - /* * Flips in the rings will be nuked by the reset, * so complete all pending flips so that user space * will get its events and not get stuck. */ - intel_complete_page_flips(dev); + intel_complete_page_flips(dev_priv); /* no reset support for gen2 */ - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) return; /* reset doesn't touch the display */ - if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) { + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { /* * Flips in the rings have been nuked by the reset, * so update the base address of all primary @@ -3187,7 +3184,7 @@ void intel_finish_reset(struct drm_device *dev) * FIXME: Atomic will make this obsolete since we won't schedule * CS-based flips (which might get lost in gpu resets) any more. */ - intel_update_primary_planes(dev); + intel_update_primary_planes(dev_priv->dev); return; } @@ -3198,18 +3195,18 @@ void intel_finish_reset(struct drm_device *dev) intel_runtime_pm_disable_interrupts(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv); - intel_modeset_init_hw(dev); + intel_modeset_init_hw(dev_priv->dev); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev); + dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); - intel_display_resume(dev); + intel_display_resume(dev_priv->dev); intel_hpd_init(dev_priv); - drm_modeset_unlock_all(dev); + drm_modeset_unlock_all(dev_priv->dev); } static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) @@ -3224,7 +3221,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) return false; spin_lock_irq(&dev->event_lock); - pending = to_intel_crtc(crtc)->unpin_work != NULL; + pending = to_intel_crtc(crtc)->flip_work != NULL; spin_unlock_irq(&dev->event_lock); return pending; @@ -3803,7 +3800,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev) if (atomic_read(&crtc->unpin_work_count) == 0) continue; - if (crtc->unpin_work) + if (crtc->flip_work) intel_wait_for_vblank(dev, crtc->pipe); return true; @@ -3815,11 +3812,9 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev) static void page_flip_completed(struct intel_crtc *intel_crtc) { struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); - struct intel_unpin_work *work = intel_crtc->unpin_work; + struct intel_flip_work *work = intel_crtc->flip_work; - /* ensure that the unpin work is consistent wrt ->pending. */ - smp_rmb(); - intel_crtc->unpin_work = NULL; + intel_crtc->flip_work = NULL; if (work->event) drm_crtc_send_vblank_event(&intel_crtc->base, work->event); @@ -3827,7 +3822,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc) drm_crtc_vblank_put(&intel_crtc->base); wake_up_all(&dev_priv->pending_flip_queue); - queue_work(dev_priv->wq, &work->work); + queue_work(dev_priv->wq, &work->unpin_work); trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); @@ -3851,9 +3846,11 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) if (ret == 0) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_flip_work *work; spin_lock_irq(&dev->event_lock); - if (intel_crtc->unpin_work) { + work = intel_crtc->flip_work; + if (work && !is_mmio_work(work)) { WARN_ONCE(1, "Removing stuck page flip\n"); page_flip_completed(intel_crtc); } @@ -4281,8 +4278,9 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state) struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; - DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n", - intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX); + DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n", + intel_crtc->base.base.id, intel_crtc->base.name, + intel_crtc->pipe, SKL_CRTC_INDEX); return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0), @@ -4312,9 +4310,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, bool force_detach = !fb || !plane_state->visible; - DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n", - intel_plane->base.base.id, intel_crtc->pipe, - drm_plane_index(&intel_plane->base)); + DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n", + intel_plane->base.base.id, intel_plane->base.name, + intel_crtc->pipe, drm_plane_index(&intel_plane->base)); ret = skl_update_scaler(crtc_state, force_detach, drm_plane_index(&intel_plane->base), @@ -4330,8 +4328,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, /* check colorkey */ if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) { - DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed", - intel_plane->base.base.id); + DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed", + intel_plane->base.base.id, + intel_plane->base.name); return -EINVAL; } @@ -4350,8 +4349,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, case DRM_FORMAT_VYUY: break; default: - DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n", - intel_plane->base.base.id, fb->base.id, fb->pixel_format); + DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", + intel_plane->base.base.id, intel_plane->base.name, + fb->base.id, fb->pixel_format); return -EINVAL; } @@ -4641,14 +4641,14 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) struct intel_plane_state *old_primary_state = to_intel_plane_state(old_pri_state); - intel_fbc_pre_update(crtc); + intel_fbc_pre_update(crtc, pipe_config, primary_state); if (old_primary_state->visible && (modeset || !primary_state->visible)) intel_pre_disable_primary(&crtc->base); } - if (pipe_config->disable_cxsr) { + if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) { crtc->wm.cxsr_allowed = false; /* @@ -4841,6 +4841,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, false); + for_each_encoder_on_crtc(dev, crtc, encoder) + if (encoder->pre_pll_enable) + encoder->pre_pll_enable(encoder); + if (intel_crtc->config->shared_dpll) intel_enable_shared_dpll(intel_crtc); @@ -5269,21 +5273,34 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) return max_cdclk_freq*90/100; } +static int skl_calc_cdclk(int max_pixclk, int vco); + static void intel_update_max_cdclk(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; + int max_cdclk, vco; + vco = dev_priv->skl_preferred_vco_freq; + WARN_ON(vco != 8100000 && vco != 8640000); + + /* + * Use the lower (vco 8640) cdclk values as a + * first guess. skl_calc_cdclk() will correct it + * if the preferred vco is 8100 instead. + */ if (limit == SKL_DFSM_CDCLK_LIMIT_675) - dev_priv->max_cdclk_freq = 675000; + max_cdclk = 617143; else if (limit == SKL_DFSM_CDCLK_LIMIT_540) - dev_priv->max_cdclk_freq = 540000; + max_cdclk = 540000; else if (limit == SKL_DFSM_CDCLK_LIMIT_450) - dev_priv->max_cdclk_freq = 450000; + max_cdclk = 432000; else - dev_priv->max_cdclk_freq = 337500; + max_cdclk = 308571; + + dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); } else if (IS_BROXTON(dev)) { dev_priv->max_cdclk_freq = 624000; } else if (IS_BROADWELL(dev)) { @@ -5324,264 +5341,313 @@ static void intel_update_cdclk(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev); - DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", - dev_priv->cdclk_freq); + + if (INTEL_GEN(dev_priv) >= 9) + DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n", + dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco, + dev_priv->cdclk_pll.ref); + else + DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", + dev_priv->cdclk_freq); /* - * Program the gmbus_freq based on the cdclk frequency. - * BSpec erroneously claims we should aim for 4MHz, but - * in fact 1MHz is the correct frequency. + * 9:0 CMBUS [sic] CDCLK frequency (cdfreq): + * Programmng [sic] note: bit[9:2] should be programmed to the number + * of cdclk that generates 4MHz reference clock freq which is used to + * generate GMBus clock. This will vary with the cdclk freq. */ - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { - /* - * Program the gmbus_freq based on the cdclk frequency. - * BSpec erroneously claims we should aim for 4MHz, but - * in fact 1MHz is the correct frequency. - */ + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000)); - } +} - if (dev_priv->max_cdclk_freq == 0) - intel_update_max_cdclk(dev); +/* convert from kHz to .1 fixpoint MHz with -1MHz offset */ +static int skl_cdclk_decimal(int cdclk) +{ + return DIV_ROUND_CLOSEST(cdclk - 1000, 500); } -static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency) +static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk) { - uint32_t divider; - uint32_t ratio; - uint32_t current_freq; - int ret; + int ratio; + + if (cdclk == dev_priv->cdclk_pll.ref) + return 0; - /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */ - switch (frequency) { + switch (cdclk) { + default: + MISSING_CASE(cdclk); case 144000: + case 288000: + case 384000: + case 576000: + ratio = 60; + break; + case 624000: + ratio = 65; + break; + } + + return dev_priv->cdclk_pll.ref * ratio; +} + +static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) +{ + I915_WRITE(BXT_DE_PLL_ENABLE, 0); + + /* Timeout 200us */ + if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1)) + DRM_ERROR("timeout waiting for DE PLL unlock\n"); + + dev_priv->cdclk_pll.vco = 0; +} + +static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) +{ + int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref); + u32 val; + + val = I915_READ(BXT_DE_PLL_CTL); + val &= ~BXT_DE_PLL_RATIO_MASK; + val |= BXT_DE_PLL_RATIO(ratio); + I915_WRITE(BXT_DE_PLL_CTL, val); + + I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); + + /* Timeout 200us */ + if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1)) + DRM_ERROR("timeout waiting for DE PLL lock\n"); + + dev_priv->cdclk_pll.vco = vco; +} + +static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk) +{ + u32 val, divider; + int vco, ret; + + vco = bxt_de_pll_vco(dev_priv, cdclk); + + DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); + + /* cdclk = vco / 2 / div{1,1.5,2,4} */ + switch (DIV_ROUND_CLOSEST(vco, cdclk)) { + case 8: divider = BXT_CDCLK_CD2X_DIV_SEL_4; - ratio = BXT_DE_PLL_RATIO(60); break; - case 288000: + case 4: divider = BXT_CDCLK_CD2X_DIV_SEL_2; - ratio = BXT_DE_PLL_RATIO(60); break; - case 384000: + case 3: divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; - ratio = BXT_DE_PLL_RATIO(60); break; - case 576000: - divider = BXT_CDCLK_CD2X_DIV_SEL_1; - ratio = BXT_DE_PLL_RATIO(60); - break; - case 624000: + case 2: divider = BXT_CDCLK_CD2X_DIV_SEL_1; - ratio = BXT_DE_PLL_RATIO(65); - break; - case 19200: - /* - * Bypass frequency with DE PLL disabled. Init ratio, divider - * to suppress GCC warning. - */ - ratio = 0; - divider = 0; break; default: - DRM_ERROR("unsupported CDCLK freq %d", frequency); + WARN_ON(cdclk != dev_priv->cdclk_pll.ref); + WARN_ON(vco != 0); - return; + divider = BXT_CDCLK_CD2X_DIV_SEL_1; + break; } - mutex_lock(&dev_priv->rps.hw_lock); /* Inform power controller of upcoming frequency change */ + mutex_lock(&dev_priv->rps.hw_lock); ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 0x80000000); mutex_unlock(&dev_priv->rps.hw_lock); if (ret) { DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", - ret, frequency); + ret, cdclk); return; } - current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK; - /* convert from .1 fixpoint MHz with -1MHz offset to kHz */ - current_freq = current_freq * 500 + 1000; + if (dev_priv->cdclk_pll.vco != 0 && + dev_priv->cdclk_pll.vco != vco) + bxt_de_pll_disable(dev_priv); - /* - * DE PLL has to be disabled when - * - setting to 19.2MHz (bypass, PLL isn't used) - * - before setting to 624MHz (PLL needs toggling) - * - before setting to any frequency from 624MHz (PLL needs toggling) - */ - if (frequency == 19200 || frequency == 624000 || - current_freq == 624000) { - I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE); - /* Timeout 200us */ - if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK), - 1)) - DRM_ERROR("timout waiting for DE PLL unlock\n"); - } - - if (frequency != 19200) { - uint32_t val; - - val = I915_READ(BXT_DE_PLL_CTL); - val &= ~BXT_DE_PLL_RATIO_MASK; - val |= ratio; - I915_WRITE(BXT_DE_PLL_CTL, val); - - I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); - /* Timeout 200us */ - if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1)) - DRM_ERROR("timeout waiting for DE PLL lock\n"); - - val = I915_READ(CDCLK_CTL); - val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK; - val |= divider; - /* - * Disable SSA Precharge when CD clock frequency < 500 MHz, - * enable otherwise. - */ - val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE; - if (frequency >= 500000) - val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; + if (dev_priv->cdclk_pll.vco != vco) + bxt_de_pll_enable(dev_priv, vco); - val &= ~CDCLK_FREQ_DECIMAL_MASK; - /* convert from kHz to .1 fixpoint MHz with -1MHz offset */ - val |= (frequency - 1000) / 500; - I915_WRITE(CDCLK_CTL, val); - } + val = divider | skl_cdclk_decimal(cdclk); + /* + * FIXME if only the cd2x divider needs changing, it could be done + * without shutting off the pipe (if only one pipe is active). + */ + val |= BXT_CDCLK_CD2X_PIPE_NONE; + /* + * Disable SSA Precharge when CD clock frequency < 500 MHz, + * enable otherwise. + */ + if (cdclk >= 500000) + val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; + I915_WRITE(CDCLK_CTL, val); mutex_lock(&dev_priv->rps.hw_lock); ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, - DIV_ROUND_UP(frequency, 25000)); + DIV_ROUND_UP(cdclk, 25000)); mutex_unlock(&dev_priv->rps.hw_lock); if (ret) { DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", - ret, frequency); + ret, cdclk); return; } intel_update_cdclk(dev_priv->dev); } -static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv) +static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) { - if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE)) - return false; + u32 cdctl, expected; - /* TODO: Check for a valid CDCLK rate */ + intel_update_cdclk(dev_priv->dev); - if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) { - DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n"); + if (dev_priv->cdclk_pll.vco == 0 || + dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) + goto sanitize; - return false; - } + /* DPLL okay; verify the cdclock + * + * Some BIOS versions leave an incorrect decimal frequency value and + * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4, + * so sanitize this register. + */ + cdctl = I915_READ(CDCLK_CTL); + /* + * Let's ignore the pipe field, since BIOS could have configured the + * dividers both synching to an active pipe, or asynchronously + * (PIPE_NONE). + */ + cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE; - if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) { - DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n"); + expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) | + skl_cdclk_decimal(dev_priv->cdclk_freq); + /* + * Disable SSA Precharge when CD clock frequency < 500 MHz, + * enable otherwise. + */ + if (dev_priv->cdclk_freq >= 500000) + expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; - return false; - } + if (cdctl == expected) + /* All well; nothing to sanitize */ + return; - return true; -} +sanitize: + DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); -bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv) -{ - return broxton_cdclk_is_enabled(dev_priv); + /* force cdclk programming */ + dev_priv->cdclk_freq = 0; + + /* force full PLL disable + enable */ + dev_priv->cdclk_pll.vco = -1; } -void broxton_init_cdclk(struct drm_i915_private *dev_priv) +void bxt_init_cdclk(struct drm_i915_private *dev_priv) { - /* check if cd clock is enabled */ - if (broxton_cdclk_is_enabled(dev_priv)) { - DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n"); - return; - } + bxt_sanitize_cdclk(dev_priv); - DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n"); + if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) + return; /* * FIXME: * - The initial CDCLK needs to be read from VBT. * Need to make this change after VBT has changes for BXT. - * - check if setting the max (or any) cdclk freq is really necessary - * here, it belongs to modeset time */ - broxton_set_cdclk(dev_priv, 624000); - - I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL); + bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0)); +} - udelay(10); +void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) +{ + bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref); +} - if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) - DRM_ERROR("DBuf power enable timeout!\n"); +static int skl_calc_cdclk(int max_pixclk, int vco) +{ + if (vco == 8640000) { + if (max_pixclk > 540000) + return 617143; + else if (max_pixclk > 432000) + return 540000; + else if (max_pixclk > 308571) + return 432000; + else + return 308571; + } else { + if (max_pixclk > 540000) + return 675000; + else if (max_pixclk > 450000) + return 540000; + else if (max_pixclk > 337500) + return 450000; + else + return 337500; + } } -void broxton_uninit_cdclk(struct drm_i915_private *dev_priv) +static void +skl_dpll0_update(struct drm_i915_private *dev_priv) { - I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL); + u32 val; - udelay(10); + dev_priv->cdclk_pll.ref = 24000; + dev_priv->cdclk_pll.vco = 0; - if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) - DRM_ERROR("DBuf power disable timeout!\n"); + val = I915_READ(LCPLL1_CTL); + if ((val & LCPLL_PLL_ENABLE) == 0) + return; - /* Set minimum (bypass) frequency, in effect turning off the DE PLL */ - broxton_set_cdclk(dev_priv, 19200); -} + if (WARN_ON((val & LCPLL_PLL_LOCK) == 0)) + return; -static const struct skl_cdclk_entry { - unsigned int freq; - unsigned int vco; -} skl_cdclk_frequencies[] = { - { .freq = 308570, .vco = 8640 }, - { .freq = 337500, .vco = 8100 }, - { .freq = 432000, .vco = 8640 }, - { .freq = 450000, .vco = 8100 }, - { .freq = 540000, .vco = 8100 }, - { .freq = 617140, .vco = 8640 }, - { .freq = 675000, .vco = 8100 }, -}; + val = I915_READ(DPLL_CTRL1); -static unsigned int skl_cdclk_decimal(unsigned int freq) -{ - return (freq - 1000) / 500; + if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | + DPLL_CTRL1_SSC(SKL_DPLL0) | + DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) != + DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) + return; + + switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) { + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0): + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0): + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0): + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0): + dev_priv->cdclk_pll.vco = 8100000; + break; + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0): + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0): + dev_priv->cdclk_pll.vco = 8640000; + break; + default: + MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); + break; + } } -static unsigned int skl_cdclk_get_vco(unsigned int freq) +void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco) { - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) { - const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i]; + bool changed = dev_priv->skl_preferred_vco_freq != vco; - if (e->freq == freq) - return e->vco; - } + dev_priv->skl_preferred_vco_freq = vco; - return 8100; + if (changed) + intel_update_max_cdclk(dev_priv->dev); } static void -skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) +skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) { - unsigned int min_freq; + int min_cdclk = skl_calc_cdclk(0, vco); u32 val; - /* select the minimum CDCLK before enabling DPLL 0 */ - val = I915_READ(CDCLK_CTL); - val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK; - val |= CDCLK_FREQ_337_308; - - if (required_vco == 8640) - min_freq = 308570; - else - min_freq = 337500; - - val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq); + WARN_ON(vco != 8100000 && vco != 8640000); + /* select the minimum CDCLK before enabling DPLL 0 */ + val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk); I915_WRITE(CDCLK_CTL, val); POSTING_READ(CDCLK_CTL); @@ -5592,14 +5658,14 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640. * The modeset code is responsible for the selection of the exact link * rate later on, with the constraint of choosing a frequency that - * works with required_vco. + * works with vco. */ val = I915_READ(DPLL_CTRL1); val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0); - if (required_vco == 8640) + if (vco == 8640000) val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0); else @@ -5613,6 +5679,21 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5)) DRM_ERROR("DPLL0 not locked\n"); + + dev_priv->cdclk_pll.vco = vco; + + /* We'll want to keep using the current vco from now on. */ + skl_set_preferred_cdclk_vco(dev_priv, vco); +} + +static void +skl_dpll0_disable(struct drm_i915_private *dev_priv) +{ + I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); + if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) + DRM_ERROR("Couldn't disable DPLL0\n"); + + dev_priv->cdclk_pll.vco = 0; } static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) @@ -5642,12 +5723,14 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) return false; } -static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) +static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) { struct drm_device *dev = dev_priv->dev; u32 freq_select, pcu_ack; - DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq); + WARN_ON((cdclk == 24000) != (vco == 0)); + + DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { DRM_ERROR("failed to inform PCU about cdclk change\n"); @@ -5655,7 +5738,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) } /* set CDCLK_CTL */ - switch(freq) { + switch (cdclk) { case 450000: case 432000: freq_select = CDCLK_FREQ_450_432; @@ -5665,20 +5748,27 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) freq_select = CDCLK_FREQ_540; pcu_ack = 2; break; - case 308570: + case 308571: case 337500: default: freq_select = CDCLK_FREQ_337_308; pcu_ack = 0; break; - case 617140: + case 617143: case 675000: freq_select = CDCLK_FREQ_675_617; pcu_ack = 3; break; } - I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq)); + if (dev_priv->cdclk_pll.vco != 0 && + dev_priv->cdclk_pll.vco != vco) + skl_dpll0_disable(dev_priv); + + if (dev_priv->cdclk_pll.vco != vco) + skl_dpll0_enable(dev_priv, vco); + + I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk)); POSTING_READ(CDCLK_CTL); /* inform PCU of the change */ @@ -5689,52 +5779,41 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) intel_update_cdclk(dev); } +static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv); + void skl_uninit_cdclk(struct drm_i915_private *dev_priv) { - /* disable DBUF power */ - I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL); - - udelay(10); - - if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) - DRM_ERROR("DBuf power disable timeout\n"); - - /* disable DPLL0 */ - I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); - if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) - DRM_ERROR("Couldn't disable DPLL0\n"); + skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0); } void skl_init_cdclk(struct drm_i915_private *dev_priv) { - unsigned int required_vco; + int cdclk, vco; - /* DPLL0 not enabled (happens on early BIOS versions) */ - if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) { - /* enable DPLL0 */ - required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk); - skl_dpll0_enable(dev_priv, required_vco); - } - - /* set CDCLK to the frequency the BIOS chose */ - skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk); + skl_sanitize_cdclk(dev_priv); - /* enable DBUF power */ - I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL); + if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) { + /* + * Use the current vco as our initial + * guess as to what the preferred vco is. + */ + if (dev_priv->skl_preferred_vco_freq == 0) + skl_set_preferred_cdclk_vco(dev_priv, + dev_priv->cdclk_pll.vco); + return; + } - udelay(10); + vco = dev_priv->skl_preferred_vco_freq; + if (vco == 0) + vco = 8100000; + cdclk = skl_calc_cdclk(0, vco); - if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) - DRM_ERROR("DBuf power enable timeout\n"); + skl_set_cdclk(dev_priv, cdclk, vco); } -int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) +static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) { - uint32_t lcpll1 = I915_READ(LCPLL1_CTL); - uint32_t cdctl = I915_READ(CDCLK_CTL); - int freq = dev_priv->skl_boot_cdclk; + uint32_t cdctl, expected; /* * check if the pre-os intialized the display @@ -5744,8 +5823,10 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0) goto sanitize; + intel_update_cdclk(dev_priv->dev); /* Is PLL enabled and locked ? */ - if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK))) + if (dev_priv->cdclk_pll.vco == 0 || + dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) goto sanitize; /* DPLL okay; verify the cdclock @@ -5754,19 +5835,20 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) * decimal part is programmed wrong from BIOS where pre-os does not * enable display. Verify the same as well. */ - if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq))) + cdctl = I915_READ(CDCLK_CTL); + expected = (cdctl & CDCLK_FREQ_SEL_MASK) | + skl_cdclk_decimal(dev_priv->cdclk_freq); + if (cdctl == expected) /* All well; nothing to sanitize */ - return false; + return; + sanitize: - /* - * As of now initialize with max cdclk till - * we get dynamic cdclk support - * */ - dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq; - skl_init_cdclk(dev_priv); + DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); - /* we did have to sanitize */ - return true; + /* force cdclk programming */ + dev_priv->cdclk_freq = 0; + /* force full PLL disable + enable */ + dev_priv->cdclk_pll.vco = -1; } /* Adjust CDclk dividers to allow high res or save power if possible */ @@ -5906,21 +5988,15 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, return 200000; } -static int broxton_calc_cdclk(struct drm_i915_private *dev_priv, - int max_pixclk) +static int bxt_calc_cdclk(int max_pixclk) { - /* - * FIXME: - * - remove the guardband, it's not needed on BXT - * - set 19.2MHz bypass frequency if there are no active pipes - */ - if (max_pixclk > 576000*9/10) + if (max_pixclk > 576000) return 624000; - else if (max_pixclk > 384000*9/10) + else if (max_pixclk > 384000) return 576000; - else if (max_pixclk > 288000*9/10) + else if (max_pixclk > 288000) return 384000; - else if (max_pixclk > 144000*9/10) + else if (max_pixclk > 144000) return 288000; else return 144000; @@ -5963,9 +6039,6 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - if (max_pixclk < 0) - return max_pixclk; - intel_state->cdclk = intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); @@ -5975,22 +6048,17 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) return 0; } -static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state) +static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state) { - struct drm_device *dev = state->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int max_pixclk = intel_mode_max_pixclk(dev, state); + int max_pixclk = ilk_max_pixel_rate(state); struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - if (max_pixclk < 0) - return max_pixclk; - intel_state->cdclk = intel_state->dev_cdclk = - broxton_calc_cdclk(dev_priv, max_pixclk); + bxt_calc_cdclk(max_pixclk); if (!intel_state->active_crtcs) - intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0); + intel_state->dev_cdclk = bxt_calc_cdclk(0); return 0; } @@ -6252,7 +6320,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) return; if (to_intel_plane_state(crtc->primary->state)->visible) { - WARN_ON(intel_crtc->unpin_work); + WARN_ON(intel_crtc->flip_work); intel_pre_disable_primary_noatomic(crtc); @@ -6262,8 +6330,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) dev_priv->display.crtc_disable(crtc); - DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n", - crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", + crtc->base.id, crtc->name); WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); crtc->state->active = false; @@ -6563,10 +6631,10 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + int clock_limit = dev_priv->max_dotclk_freq; - /* FIXME should check pixel clock limits on all platforms */ if (INTEL_INFO(dev)->gen < 4) { - int clock_limit = dev_priv->max_cdclk_freq * 9 / 10; + clock_limit = dev_priv->max_cdclk_freq * 9 / 10; /* * Enable double wide mode when the dot clock @@ -6574,16 +6642,16 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, */ if (intel_crtc_supports_double_wide(crtc) && adjusted_mode->crtc_clock > clock_limit) { - clock_limit *= 2; + clock_limit = dev_priv->max_dotclk_freq; pipe_config->double_wide = true; } + } - if (adjusted_mode->crtc_clock > clock_limit) { - DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", - adjusted_mode->crtc_clock, clock_limit, - yesno(pipe_config->double_wide)); - return -EINVAL; - } + if (adjusted_mode->crtc_clock > clock_limit) { + DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", + adjusted_mode->crtc_clock, clock_limit, + yesno(pipe_config->double_wide)); + return -EINVAL; } /* @@ -6615,76 +6683,98 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, static int skylake_get_display_clock_speed(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t lcpll1 = I915_READ(LCPLL1_CTL); - uint32_t cdctl = I915_READ(CDCLK_CTL); - uint32_t linkrate; + uint32_t cdctl; - if (!(lcpll1 & LCPLL_PLL_ENABLE)) - return 24000; /* 24MHz is the cd freq with NSSC ref */ + skl_dpll0_update(dev_priv); - if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540) - return 540000; + if (dev_priv->cdclk_pll.vco == 0) + return dev_priv->cdclk_pll.ref; - linkrate = (I915_READ(DPLL_CTRL1) & - DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1; + cdctl = I915_READ(CDCLK_CTL); - if (linkrate == DPLL_CTRL1_LINK_RATE_2160 || - linkrate == DPLL_CTRL1_LINK_RATE_1080) { - /* vco 8640 */ + if (dev_priv->cdclk_pll.vco == 8640000) { switch (cdctl & CDCLK_FREQ_SEL_MASK) { case CDCLK_FREQ_450_432: return 432000; case CDCLK_FREQ_337_308: - return 308570; + return 308571; + case CDCLK_FREQ_540: + return 540000; case CDCLK_FREQ_675_617: - return 617140; + return 617143; default: - WARN(1, "Unknown cd freq selection\n"); + MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); } } else { - /* vco 8100 */ switch (cdctl & CDCLK_FREQ_SEL_MASK) { case CDCLK_FREQ_450_432: return 450000; case CDCLK_FREQ_337_308: return 337500; + case CDCLK_FREQ_540: + return 540000; case CDCLK_FREQ_675_617: return 675000; default: - WARN(1, "Unknown cd freq selection\n"); + MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); } } - /* error case, do as if DPLL0 isn't enabled */ - return 24000; + return dev_priv->cdclk_pll.ref; +} + +static void bxt_de_pll_update(struct drm_i915_private *dev_priv) +{ + u32 val; + + dev_priv->cdclk_pll.ref = 19200; + dev_priv->cdclk_pll.vco = 0; + + val = I915_READ(BXT_DE_PLL_ENABLE); + if ((val & BXT_DE_PLL_PLL_ENABLE) == 0) + return; + + if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0)) + return; + + val = I915_READ(BXT_DE_PLL_CTL); + dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) * + dev_priv->cdclk_pll.ref; } static int broxton_get_display_clock_speed(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t cdctl = I915_READ(CDCLK_CTL); - uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; - uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE); - int cdclk; + u32 divider; + int div, vco; - if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE)) - return 19200; + bxt_de_pll_update(dev_priv); + + vco = dev_priv->cdclk_pll.vco; + if (vco == 0) + return dev_priv->cdclk_pll.ref; - cdclk = 19200 * pll_ratio / 2; + divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; - switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) { + switch (divider) { case BXT_CDCLK_CD2X_DIV_SEL_1: - return cdclk; /* 576MHz or 624MHz */ + div = 2; + break; case BXT_CDCLK_CD2X_DIV_SEL_1_5: - return cdclk * 2 / 3; /* 384MHz */ + div = 3; + break; case BXT_CDCLK_CD2X_DIV_SEL_2: - return cdclk / 2; /* 288MHz */ + div = 4; + break; case BXT_CDCLK_CD2X_DIV_SEL_4: - return cdclk / 4; /* 144MHz */ + div = 8; + break; + default: + MISSING_CASE(divider); + return dev_priv->cdclk_pll.ref; } - /* error case, do as if DE PLL isn't enabled */ - return 19200; + return DIV_ROUND_CLOSEST(vco, div); } static int broadwell_get_display_clock_speed(struct drm_device *dev) @@ -7063,7 +7153,7 @@ static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) static void i9xx_update_pll_dividers(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - intel_clock_t *reduced_clock) + struct dpll *reduced_clock) { struct drm_device *dev = crtc->base.dev; u32 fp, fp2 = 0; @@ -7487,7 +7577,7 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe) static void i9xx_compute_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - intel_clock_t *reduced_clock) + struct dpll *reduced_clock) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -7563,7 +7653,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, static void i8xx_compute_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - intel_clock_t *reduced_clock) + struct dpll *reduced_clock) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -7817,7 +7907,7 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - const intel_limit_t *limit; + const struct intel_limit *limit; int refclk = 48000; memset(&crtc_state->dpll_hw_state, 0, @@ -7853,7 +7943,7 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - const intel_limit_t *limit; + const struct intel_limit *limit; int refclk = 96000; memset(&crtc_state->dpll_hw_state, 0, @@ -7896,7 +7986,7 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - const intel_limit_t *limit; + const struct intel_limit *limit; int refclk = 96000; memset(&crtc_state->dpll_hw_state, 0, @@ -7930,7 +8020,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - const intel_limit_t *limit; + const struct intel_limit *limit; int refclk = 96000; memset(&crtc_state->dpll_hw_state, 0, @@ -7963,7 +8053,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { int refclk = 100000; - const intel_limit_t *limit = &intel_limits_chv; + const struct intel_limit *limit = &intel_limits_chv; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); @@ -7984,7 +8074,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { int refclk = 100000; - const intel_limit_t *limit = &intel_limits_vlv; + const struct intel_limit *limit = &intel_limits_vlv; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); @@ -8034,7 +8124,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; int pipe = pipe_config->cpu_transcoder; - intel_clock_t clock; + struct dpll clock; u32 mdiv; int refclk = 100000; @@ -8131,7 +8221,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; int pipe = pipe_config->cpu_transcoder; enum dpio_channel port = vlv_pipe_to_channel(pipe); - intel_clock_t clock; + struct dpll clock; u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; int refclk = 100000; @@ -8275,12 +8365,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *encoder; + int i; u32 val, final; bool has_lvds = false; bool has_cpu_edp = false; bool has_panel = false; bool has_ck505 = false; bool can_ssc = false; + bool using_ssc_source = false; /* We need to take the global config into account */ for_each_intel_encoder(dev, encoder) { @@ -8307,8 +8399,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) can_ssc = true; } - DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", - has_panel, has_lvds, has_ck505); + /* Check if any DPLLs are using the SSC source */ + for (i = 0; i < dev_priv->num_shared_dpll; i++) { + u32 temp = I915_READ(PCH_DPLL(i)); + + if (!(temp & DPLL_VCO_ENABLE)) + continue; + + if ((temp & PLL_REF_INPUT_MASK) == + PLLB_REF_INPUT_SPREADSPECTRUMIN) { + using_ssc_source = true; + break; + } + } + + DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", + has_panel, has_lvds, has_ck505, using_ssc_source); /* Ironlake: try to setup display ref clock before DPLL * enabling. This is only under driver's control after @@ -8345,9 +8451,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; } else final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; - } else { - final |= DREF_SSC_SOURCE_DISABLE; - final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; + } else if (using_ssc_source) { + final |= DREF_SSC_SOURCE_ENABLE; + final |= DREF_SSC1_ENABLE; } if (final == val) @@ -8393,7 +8499,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) POSTING_READ(PCH_DREF_CONTROL); udelay(200); } else { - DRM_DEBUG_KMS("Disabling SSC entirely\n"); + DRM_DEBUG_KMS("Disabling CPU source output\n"); val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; @@ -8404,16 +8510,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) POSTING_READ(PCH_DREF_CONTROL); udelay(200); - /* Turn off the SSC source */ - val &= ~DREF_SSC_SOURCE_MASK; - val |= DREF_SSC_SOURCE_DISABLE; + if (!using_ssc_source) { + DRM_DEBUG_KMS("Disabling SSC source\n"); - /* Turn off SSC1 */ - val &= ~DREF_SSC1_ENABLE; + /* Turn off the SSC source */ + val &= ~DREF_SSC_SOURCE_MASK; + val |= DREF_SSC_SOURCE_DISABLE; - I915_WRITE(PCH_DREF_CONTROL, val); - POSTING_READ(PCH_DREF_CONTROL); - udelay(200); + /* Turn off SSC1 */ + val &= ~DREF_SSC1_ENABLE; + + I915_WRITE(PCH_DREF_CONTROL, val); + POSTING_READ(PCH_DREF_CONTROL); + udelay(200); + } } BUG_ON(val != final); @@ -8794,7 +8904,7 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state, - intel_clock_t *reduced_clock) + struct dpll *reduced_clock) { struct drm_crtc *crtc = &intel_crtc->base; struct drm_device *dev = crtc->dev; @@ -8902,10 +9012,10 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - intel_clock_t reduced_clock; + struct dpll reduced_clock; bool has_reduced_clock = false; struct intel_shared_dpll *pll; - const intel_limit_t *limit; + const struct intel_limit *limit; int refclk = 120000; memset(&crtc_state->dpll_hw_state, 0, @@ -9300,6 +9410,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, ironlake_get_fdi_m_n_config(crtc, pipe_config); if (HAS_PCH_IBX(dev_priv)) { + /* + * The pipe->pch transcoder and pch transcoder->pll + * mapping is fixed. + */ pll_id = (enum intel_dpll_id) crtc->pipe; } else { tmp = I915_READ(PCH_DPLL_SEL); @@ -9560,14 +9674,14 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv) } } -static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state) +static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; struct intel_atomic_state *old_intel_state = to_intel_atomic_state(old_state); unsigned int req_cdclk = old_intel_state->dev_cdclk; - broxton_set_cdclk(to_i915(dev), req_cdclk); + bxt_set_cdclk(to_i915(dev), req_cdclk); } /* compute the max rate for new configuration */ @@ -9687,6 +9801,18 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) cdclk, dev_priv->cdclk_freq); } +static int broadwell_calc_cdclk(int max_pixclk) +{ + if (max_pixclk > 540000) + return 675000; + else if (max_pixclk > 450000) + return 540000; + else if (max_pixclk > 337500) + return 450000; + else + return 337500; +} + static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->dev); @@ -9698,14 +9824,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) * FIXME should also account for plane ratio * once 64bpp pixel formats are supported. */ - if (max_pixclk > 540000) - cdclk = 675000; - else if (max_pixclk > 450000) - cdclk = 540000; - else if (max_pixclk > 337500) - cdclk = 450000; - else - cdclk = 337500; + cdclk = broadwell_calc_cdclk(max_pixclk); if (cdclk > dev_priv->max_cdclk_freq) { DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n", @@ -9715,7 +9834,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) intel_state->cdclk = intel_state->dev_cdclk = cdclk; if (!intel_state->active_crtcs) - intel_state->dev_cdclk = 337500; + intel_state->dev_cdclk = broadwell_calc_cdclk(0); return 0; } @@ -9730,6 +9849,47 @@ static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state) broadwell_set_cdclk(dev, req_cdclk); } +static int skl_modeset_calc_cdclk(struct drm_atomic_state *state) +{ + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_i915_private *dev_priv = to_i915(state->dev); + const int max_pixclk = ilk_max_pixel_rate(state); + int vco = intel_state->cdclk_pll_vco; + int cdclk; + + /* + * FIXME should also account for plane ratio + * once 64bpp pixel formats are supported. + */ + cdclk = skl_calc_cdclk(max_pixclk, vco); + + /* + * FIXME move the cdclk caclulation to + * compute_config() so we can fail gracegully. + */ + if (cdclk > dev_priv->max_cdclk_freq) { + DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n", + cdclk, dev_priv->max_cdclk_freq); + cdclk = dev_priv->max_cdclk_freq; + } + + intel_state->cdclk = intel_state->dev_cdclk = cdclk; + if (!intel_state->active_crtcs) + intel_state->dev_cdclk = skl_calc_cdclk(0, vco); + + return 0; +} + +static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state) +{ + struct drm_i915_private *dev_priv = to_i915(old_state->dev); + struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state); + unsigned int req_cdclk = intel_state->dev_cdclk; + unsigned int req_vco = intel_state->cdclk_pll_vco; + + skl_set_cdclk(dev_priv, req_cdclk, req_vco); +} + static int haswell_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { @@ -9850,6 +10010,10 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, enum intel_display_power_domain power_domain; u32 tmp; + /* + * The pipe->transcoder mapping is fixed with the exception of the eDP + * transcoder handled below. + */ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; /* @@ -10317,10 +10481,10 @@ intel_framebuffer_create_for_mode(struct drm_device *dev, struct drm_i915_gem_object *obj; struct drm_mode_fb_cmd2 mode_cmd = { 0 }; - obj = i915_gem_alloc_object(dev, + obj = i915_gem_object_create(dev, intel_framebuffer_size_for_mode(mode, bpp)); - if (obj == NULL) - return ERR_PTR(-ENOMEM); + if (IS_ERR(obj)) + return ERR_CAST(obj); mode_cmd.width = mode->hdisplay; mode_cmd.height = mode->vdisplay; @@ -10632,7 +10796,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, int pipe = pipe_config->cpu_transcoder; u32 dpll = pipe_config->dpll_hw_state.dpll; u32 fp; - intel_clock_t clock; + struct dpll clock; int port_clock; int refclk = i9xx_pll_refclk(dev, pipe_config); @@ -10806,31 +10970,27 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, return mode; } -void intel_mark_busy(struct drm_device *dev) +void intel_mark_busy(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->mm.busy) return; intel_runtime_pm_get(dev_priv); i915_update_gfx_val(dev_priv); - if (INTEL_INFO(dev)->gen >= 6) + if (INTEL_GEN(dev_priv) >= 6) gen6_rps_busy(dev_priv); dev_priv->mm.busy = true; } -void intel_mark_idle(struct drm_device *dev) +void intel_mark_idle(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (!dev_priv->mm.busy) return; dev_priv->mm.busy = false; - if (INTEL_INFO(dev)->gen >= 6) - gen6_rps_idle(dev->dev_private); + if (INTEL_GEN(dev_priv) >= 6) + gen6_rps_idle(dev_priv); intel_runtime_pm_put(dev_priv); } @@ -10839,15 +10999,16 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = crtc->dev; - struct intel_unpin_work *work; + struct intel_flip_work *work; spin_lock_irq(&dev->event_lock); - work = intel_crtc->unpin_work; - intel_crtc->unpin_work = NULL; + work = intel_crtc->flip_work; + intel_crtc->flip_work = NULL; spin_unlock_irq(&dev->event_lock); if (work) { - cancel_work_sync(&work->work); + cancel_work_sync(&work->mmio_work); + cancel_work_sync(&work->unpin_work); kfree(work); } @@ -10858,12 +11019,15 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) static void intel_unpin_work_fn(struct work_struct *__work) { - struct intel_unpin_work *work = - container_of(__work, struct intel_unpin_work, work); + struct intel_flip_work *work = + container_of(__work, struct intel_flip_work, unpin_work); struct intel_crtc *crtc = to_intel_crtc(work->crtc); struct drm_device *dev = crtc->base.dev; struct drm_plane *primary = crtc->base.primary; + if (is_mmio_work(work)) + flush_work(&work->mmio_work); + mutex_lock(&dev->struct_mutex); intel_unpin_fb_obj(work->old_fb, primary->state->rotation); drm_gem_object_unreference(&work->pending_flip_obj->base); @@ -10882,60 +11046,14 @@ static void intel_unpin_work_fn(struct work_struct *__work) kfree(work); } -static void do_intel_finish_page_flip(struct drm_device *dev, - struct drm_crtc *crtc) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_unpin_work *work; - unsigned long flags; - - /* Ignore early vblank irqs */ - if (intel_crtc == NULL) - return; - - /* - * This is called both by irq handlers and the reset code (to complete - * lost pageflips) so needs the full irqsave spinlocks. - */ - spin_lock_irqsave(&dev->event_lock, flags); - work = intel_crtc->unpin_work; - - /* Ensure we don't miss a work->pending update ... */ - smp_rmb(); - - if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { - spin_unlock_irqrestore(&dev->event_lock, flags); - return; - } - - page_flip_completed(intel_crtc); - - spin_unlock_irqrestore(&dev->event_lock, flags); -} - -void intel_finish_page_flip(struct drm_device *dev, int pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - - do_intel_finish_page_flip(dev, crtc); -} - -void intel_finish_page_flip_plane(struct drm_device *dev, int plane) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; - - do_intel_finish_page_flip(dev, crtc); -} - /* Is 'a' after or equal to 'b'? */ static bool g4x_flip_count_after_eq(u32 a, u32 b) { return !((a - b) & 0x80000000); } -static bool page_flip_finished(struct intel_crtc *crtc) +static bool __pageflip_finished_cs(struct intel_crtc *crtc, + struct intel_flip_work *work) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -10977,40 +11095,103 @@ static bool page_flip_finished(struct intel_crtc *crtc) * anyway, we don't really care. */ return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == - crtc->unpin_work->gtt_offset && + crtc->flip_work->gtt_offset && g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)), - crtc->unpin_work->flip_count); + crtc->flip_work->flip_count); } -void intel_prepare_page_flip(struct drm_device *dev, int plane) +static bool +__pageflip_finished_mmio(struct intel_crtc *crtc, + struct intel_flip_work *work) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); + /* + * MMIO work completes when vblank is different from + * flip_queued_vblank. + * + * Reset counter value doesn't matter, this is handled by + * i915_wait_request finishing early, so no need to handle + * reset here. + */ + return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank; +} + + +static bool pageflip_finished(struct intel_crtc *crtc, + struct intel_flip_work *work) +{ + if (!atomic_read(&work->pending)) + return false; + + smp_rmb(); + + if (is_mmio_work(work)) + return __pageflip_finished_mmio(crtc, work); + else + return __pageflip_finished_cs(crtc, work); +} + +void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe) +{ + struct drm_device *dev = dev_priv->dev; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_flip_work *work; unsigned long flags; + /* Ignore early vblank irqs */ + if (!crtc) + return; /* * This is called both by irq handlers and the reset code (to complete * lost pageflips) so needs the full irqsave spinlocks. - * - * NB: An MMIO update of the plane base pointer will also - * generate a page-flip completion irq, i.e. every modeset - * is also accompanied by a spurious intel_prepare_page_flip(). */ spin_lock_irqsave(&dev->event_lock, flags); - if (intel_crtc->unpin_work && page_flip_finished(intel_crtc)) - atomic_inc_not_zero(&intel_crtc->unpin_work->pending); + work = intel_crtc->flip_work; + + if (work != NULL && + !is_mmio_work(work) && + pageflip_finished(intel_crtc, work)) + page_flip_completed(intel_crtc); + spin_unlock_irqrestore(&dev->event_lock, flags); } -static inline void intel_mark_page_flip_active(struct intel_unpin_work *work) +void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe) { + struct drm_device *dev = dev_priv->dev; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_flip_work *work; + unsigned long flags; + + /* Ignore early vblank irqs */ + if (!crtc) + return; + + /* + * This is called both by irq handlers and the reset code (to complete + * lost pageflips) so needs the full irqsave spinlocks. + */ + spin_lock_irqsave(&dev->event_lock, flags); + work = intel_crtc->flip_work; + + if (work != NULL && + is_mmio_work(work) && + pageflip_finished(intel_crtc, work)) + page_flip_completed(intel_crtc); + + spin_unlock_irqrestore(&dev->event_lock, flags); +} + +static inline void intel_mark_page_flip_active(struct intel_crtc *crtc, + struct intel_flip_work *work) +{ + work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc); + /* Ensure that the work item is consistent when activating it ... */ - smp_wmb(); - atomic_set(&work->pending, INTEL_FLIP_PENDING); - /* and that it is marked active as soon as the irq could fire. */ - smp_wmb(); + smp_mb__before_atomic(); + atomic_set(&work->pending, 1); } static int intel_gen2_queue_flip(struct drm_device *dev, @@ -11041,10 +11222,9 @@ static int intel_gen2_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); intel_ring_emit(engine, 0); /* aux display base address, unused */ - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } @@ -11073,10 +11253,9 @@ static int intel_gen3_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); intel_ring_emit(engine, MI_NOOP); - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } @@ -11104,7 +11283,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset | + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset | obj->tiling_mode); /* XXX Enabling the panel-fitter across page-flip is so far @@ -11115,7 +11294,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev, pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; intel_ring_emit(engine, pf | pipesrc); - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } @@ -11139,7 +11317,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); /* Contrary to the suggestions in the documentation, * "Enable Panel Fitter" does not seem to be required when page @@ -11151,7 +11329,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev, pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; intel_ring_emit(engine, pf | pipesrc); - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } @@ -11243,16 +11420,17 @@ static int intel_gen7_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit); intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode)); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); intel_ring_emit(engine, (MI_NOOP)); - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } static bool use_mmio_flip(struct intel_engine_cs *engine, struct drm_i915_gem_object *obj) { + struct reservation_object *resv; + /* * This is not being used for older platforms, because * non-availability of flip done interrupt forces us to use @@ -11264,7 +11442,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine, if (engine == NULL) return true; - if (INTEL_INFO(engine->dev)->gen < 5) + if (INTEL_GEN(engine->i915) < 5) return false; if (i915.use_mmio_flip < 0) @@ -11273,17 +11451,17 @@ static bool use_mmio_flip(struct intel_engine_cs *engine, return true; else if (i915.enable_execlists) return true; - else if (obj->base.dma_buf && - !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv, - false)) + + resv = i915_gem_object_get_dmabuf_resv(obj); + if (resv && !reservation_object_test_signaled_rcu(resv, false)) return true; - else - return engine != i915_gem_request_get_engine(obj->last_write_req); + + return engine != i915_gem_request_get_engine(obj->last_write_req); } static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, unsigned int rotation, - struct intel_unpin_work *work) + struct intel_flip_work *work) { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -11335,7 +11513,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, } static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, - struct intel_unpin_work *work) + struct intel_flip_work *work) { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -11358,78 +11536,37 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, POSTING_READ(DSPSURF(intel_crtc->plane)); } -/* - * XXX: This is the temporary way to update the plane registers until we get - * around to using the usual plane update functions for MMIO flips - */ -static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip) -{ - struct intel_crtc *crtc = mmio_flip->crtc; - struct intel_unpin_work *work; - - spin_lock_irq(&crtc->base.dev->event_lock); - work = crtc->unpin_work; - spin_unlock_irq(&crtc->base.dev->event_lock); - if (work == NULL) - return; - - intel_mark_page_flip_active(work); - - intel_pipe_update_start(crtc); - - if (INTEL_INFO(mmio_flip->i915)->gen >= 9) - skl_do_mmio_flip(crtc, mmio_flip->rotation, work); - else - /* use_mmio_flip() retricts MMIO flips to ilk+ */ - ilk_do_mmio_flip(crtc, work); - - intel_pipe_update_end(crtc); -} - -static void intel_mmio_flip_work_func(struct work_struct *work) +static void intel_mmio_flip_work_func(struct work_struct *w) { - struct intel_mmio_flip *mmio_flip = - container_of(work, struct intel_mmio_flip, work); + struct intel_flip_work *work = + container_of(w, struct intel_flip_work, mmio_work); + struct intel_crtc *crtc = to_intel_crtc(work->crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_framebuffer *intel_fb = - to_intel_framebuffer(mmio_flip->crtc->base.primary->fb); + to_intel_framebuffer(crtc->base.primary->fb); struct drm_i915_gem_object *obj = intel_fb->obj; + struct reservation_object *resv; - if (mmio_flip->req) { - WARN_ON(__i915_wait_request(mmio_flip->req, + if (work->flip_queued_req) + WARN_ON(__i915_wait_request(work->flip_queued_req, false, NULL, - &mmio_flip->i915->rps.mmioflips)); - i915_gem_request_unreference__unlocked(mmio_flip->req); - } + &dev_priv->rps.mmioflips)); /* For framebuffer backed by dmabuf, wait for fence */ - if (obj->base.dma_buf) - WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, - false, false, + resv = i915_gem_object_get_dmabuf_resv(obj); + if (resv) + WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false, MAX_SCHEDULE_TIMEOUT) < 0); - intel_do_mmio_flip(mmio_flip); - kfree(mmio_flip); -} - -static int intel_queue_mmio_flip(struct drm_device *dev, - struct drm_crtc *crtc, - struct drm_i915_gem_object *obj) -{ - struct intel_mmio_flip *mmio_flip; - - mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL); - if (mmio_flip == NULL) - return -ENOMEM; - - mmio_flip->i915 = to_i915(dev); - mmio_flip->req = i915_gem_request_reference(obj->last_write_req); - mmio_flip->crtc = to_intel_crtc(crtc); - mmio_flip->rotation = crtc->primary->state->rotation; + intel_pipe_update_start(crtc); - INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func); - schedule_work(&mmio_flip->work); + if (INTEL_GEN(dev_priv) >= 9) + skl_do_mmio_flip(crtc, work->rotation, work); + else + /* use_mmio_flip() retricts MMIO flips to ilk+ */ + ilk_do_mmio_flip(crtc, work); - return 0; + intel_pipe_update_end(crtc, work); } static int intel_default_queue_flip(struct drm_device *dev, @@ -11442,37 +11579,32 @@ static int intel_default_queue_flip(struct drm_device *dev, return -ENODEV; } -static bool __intel_pageflip_stall_check(struct drm_device *dev, - struct drm_crtc *crtc) +static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv, + struct intel_crtc *intel_crtc, + struct intel_flip_work *work) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_unpin_work *work = intel_crtc->unpin_work; - u32 addr; - - if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) - return true; + u32 addr, vblank; - if (atomic_read(&work->pending) < INTEL_FLIP_PENDING) + if (!atomic_read(&work->pending)) return false; - if (!work->enable_stall_check) - return false; + smp_rmb(); + vblank = intel_crtc_get_vblank_counter(intel_crtc); if (work->flip_ready_vblank == 0) { if (work->flip_queued_req && !i915_gem_request_completed(work->flip_queued_req, true)) return false; - work->flip_ready_vblank = drm_crtc_vblank_count(crtc); + work->flip_ready_vblank = vblank; } - if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3) + if (vblank - work->flip_ready_vblank < 3) return false; /* Potential stall - if we see that the flip has happened, * assume a missed interrupt. */ - if (INTEL_INFO(dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); else addr = I915_READ(DSPADDR(intel_crtc->plane)); @@ -11484,12 +11616,12 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev, return addr == work->gtt_offset; } -void intel_check_page_flip(struct drm_device *dev, int pipe) +void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_device *dev = dev_priv->dev; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_unpin_work *work; + struct intel_flip_work *work; WARN_ON(!in_interrupt()); @@ -11497,19 +11629,24 @@ void intel_check_page_flip(struct drm_device *dev, int pipe) return; spin_lock(&dev->event_lock); - work = intel_crtc->unpin_work; - if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) { - WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", - work->flip_queued_vblank, drm_vblank_count(dev, pipe)); + work = intel_crtc->flip_work; + + if (work != NULL && !is_mmio_work(work) && + __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) { + WARN_ONCE(1, + "Kicking stuck page flip: queued at %d, now %d\n", + work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc)); page_flip_completed(intel_crtc); work = NULL; } - if (work != NULL && - drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1) - intel_queue_rps_boost_for_request(dev, work->flip_queued_req); + + if (work != NULL && !is_mmio_work(work) && + intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1) + intel_queue_rps_boost_for_request(work->flip_queued_req); spin_unlock(&dev->event_lock); } +__maybe_unused static int intel_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, @@ -11522,7 +11659,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_plane *primary = crtc->primary; enum pipe pipe = intel_crtc->pipe; - struct intel_unpin_work *work; + struct intel_flip_work *work; struct intel_engine_cs *engine; bool mmio_flip; struct drm_i915_gem_request *request = NULL; @@ -11559,19 +11696,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->event = event; work->crtc = crtc; work->old_fb = old_fb; - INIT_WORK(&work->work, intel_unpin_work_fn); + INIT_WORK(&work->unpin_work, intel_unpin_work_fn); ret = drm_crtc_vblank_get(crtc); if (ret) goto free_work; - /* We borrow the event spin lock for protecting unpin_work */ + /* We borrow the event spin lock for protecting flip_work */ spin_lock_irq(&dev->event_lock); - if (intel_crtc->unpin_work) { + if (intel_crtc->flip_work) { /* Before declaring the flip queue wedged, check if * the hardware completed the operation behind our backs. */ - if (__intel_pageflip_stall_check(dev, crtc)) { + if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) { DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); page_flip_completed(intel_crtc); } else { @@ -11583,7 +11720,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return -EBUSY; } } - intel_crtc->unpin_work = work; + intel_crtc->flip_work = work; spin_unlock_irq(&dev->event_lock); if (atomic_read(&intel_crtc->unpin_work_count) >= 2) @@ -11595,7 +11732,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, crtc->primary->fb = fb; update_state_fb(crtc->primary); - intel_fbc_pre_update(intel_crtc); + + intel_fbc_pre_update(intel_crtc, intel_crtc->config, + to_intel_plane_state(primary->state)); work->pending_flip_obj = obj; @@ -11638,6 +11777,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, */ if (!mmio_flip) { ret = i915_gem_object_sync(obj, engine, &request); + if (!ret && !request) { + request = i915_gem_request_alloc(engine, NULL); + ret = PTR_ERR_OR_ZERO(request); + } + if (ret) goto cleanup_pending; } @@ -11649,38 +11793,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj, 0); work->gtt_offset += intel_crtc->dspaddr_offset; + work->rotation = crtc->primary->state->rotation; if (mmio_flip) { - ret = intel_queue_mmio_flip(dev, crtc, obj); - if (ret) - goto cleanup_unpin; + INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func); i915_gem_request_assign(&work->flip_queued_req, obj->last_write_req); - } else { - if (!request) { - request = i915_gem_request_alloc(engine, NULL); - if (IS_ERR(request)) { - ret = PTR_ERR(request); - goto cleanup_unpin; - } - } + schedule_work(&work->mmio_work); + } else { + i915_gem_request_assign(&work->flip_queued_req, request); ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, page_flip_flags); if (ret) goto cleanup_unpin; - i915_gem_request_assign(&work->flip_queued_req, request); - } + intel_mark_page_flip_active(intel_crtc, work); - if (request) i915_add_request_no_flush(request); + } - work->flip_queued_vblank = drm_crtc_vblank_count(crtc); - work->enable_stall_check = true; - - i915_gem_track_fb(intel_fb_obj(work->old_fb), obj, + i915_gem_track_fb(intel_fb_obj(old_fb), obj, to_intel_plane(primary)->frontbuffer_bit); mutex_unlock(&dev->struct_mutex); @@ -11706,7 +11840,7 @@ cleanup: drm_framebuffer_unreference(work->old_fb); spin_lock_irq(&dev->event_lock); - intel_crtc->unpin_work = NULL; + intel_crtc->flip_work = NULL; spin_unlock_irq(&dev->event_lock); drm_crtc_vblank_put(crtc); @@ -11808,12 +11942,12 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane_state *old_plane_state = to_intel_plane_state(plane->state); - int idx = intel_crtc->base.base.id, ret; bool mode_changed = needs_modeset(crtc_state); bool was_crtc_enabled = crtc->state->active; bool is_crtc_enabled = crtc_state->active; bool turn_off, turn_on, visible, was_visible; struct drm_framebuffer *fb = plane_state->fb; + int ret; if (crtc_state && INTEL_INFO(dev)->gen >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) { @@ -11834,6 +11968,11 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, * Visibility is calculated as if the crtc was on, but * after scaler setup everything depends on it being off * when the crtc isn't active. + * + * FIXME this is wrong for watermarks. Watermarks should also + * be computed as if the pipe would be active. Perhaps move + * per-plane wm computation to the .check_plane() hook, and + * only combine the results from all planes in the current place? */ if (!is_crtc_enabled) to_intel_plane_state(plane_state)->visible = visible = false; @@ -11847,11 +11986,15 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, turn_off = was_visible && (!visible || mode_changed); turn_on = visible && (!was_visible || mode_changed); - DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx, - plane->base.id, fb ? fb->base.id : -1); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n", + intel_crtc->base.base.id, + intel_crtc->base.name, + plane->base.id, plane->name, + fb ? fb->base.id : -1); - DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n", - plane->base.id, was_visible, visible, + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", + plane->base.id, plane->name, + was_visible, visible, turn_off, turn_on, mode_changed); if (turn_on) { @@ -12007,7 +12150,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, } } else if (dev_priv->display.compute_intermediate_wm) { if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) - pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk; + pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal; } if (INTEL_INFO(dev)->gen >= 9) { @@ -12142,7 +12285,8 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, struct intel_plane_state *state; struct drm_framebuffer *fb; - DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id, + DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n", + crtc->base.base.id, crtc->base.name, context, pipe_config, pipe_name(crtc->pipe)); DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder)); @@ -12243,29 +12387,24 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, state = to_intel_plane_state(plane->state); fb = state->base.fb; if (!fb) { - DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d " - "disabled, scaler_id = %d\n", - plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", - plane->base.id, intel_plane->pipe, - (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1, - drm_plane_index(plane), state->scaler_id); + DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n", + plane->base.id, plane->name, state->scaler_id); continue; } - DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled", - plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", - plane->base.id, intel_plane->pipe, - crtc->base.primary == plane ? 0 : intel_plane->plane + 1, - drm_plane_index(plane)); - DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x", - fb->base.id, fb->width, fb->height, fb->pixel_format); - DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n", - state->scaler_id, - state->src.x1 >> 16, state->src.y1 >> 16, - drm_rect_width(&state->src) >> 16, - drm_rect_height(&state->src) >> 16, - state->dst.x1, state->dst.y1, - drm_rect_width(&state->dst), drm_rect_height(&state->dst)); + DRM_DEBUG_KMS("[PLANE:%d:%s] enabled", + plane->base.id, plane->name); + DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s", + fb->base.id, fb->width, fb->height, + drm_get_format_name(fb->pixel_format)); + DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n", + state->scaler_id, + state->src.x1 >> 16, state->src.y1 >> 16, + drm_rect_width(&state->src) >> 16, + drm_rect_height(&state->src) >> 16, + state->dst.x1, state->dst.y1, + drm_rect_width(&state->dst), + drm_rect_height(&state->dst)); } } @@ -12684,6 +12823,7 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(has_dp_encoder); PIPE_CONF_CHECK_I(lane_count); + PIPE_CONF_CHECK_X(lane_lat_optim_mask); if (INTEL_INFO(dev)->gen < 8) { PIPE_CONF_CHECK_M_N(dp_m_n); @@ -12932,7 +13072,7 @@ verify_crtc_state(struct drm_crtc *crtc, pipe_config->base.crtc = crtc; pipe_config->base.state = old_state; - DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config); @@ -13280,6 +13420,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state) intel_state->active_crtcs |= 1 << i; else intel_state->active_crtcs &= ~(1 << i); + + if (crtc_state->active != crtc->state->active) + intel_state->active_pipe_changes |= drm_crtc_mask(crtc); } /* @@ -13290,9 +13433,17 @@ static int intel_modeset_checks(struct drm_atomic_state *state) * adjusted_mode bits in the crtc directly. */ if (dev_priv->display.modeset_calc_cdclk) { + if (!intel_state->cdclk_pll_vco) + intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco; + if (!intel_state->cdclk_pll_vco) + intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq; + ret = dev_priv->display.modeset_calc_cdclk(state); + if (ret < 0) + return ret; - if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq) + if (intel_state->dev_cdclk != dev_priv->cdclk_freq || + intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco) ret = intel_modeset_all_pipes(state); if (ret < 0) @@ -13316,38 +13467,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state) * phase. The code here should be run after the per-crtc and per-plane 'check' * handlers to ensure that all derived state has been updated. */ -static void calc_watermark_data(struct drm_atomic_state *state) +static int calc_watermark_data(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_crtc *crtc; - struct drm_crtc_state *cstate; - struct drm_plane *plane; - struct drm_plane_state *pstate; - - /* - * Calculate watermark configuration details now that derived - * plane/crtc state is all properly updated. - */ - drm_for_each_crtc(crtc, dev) { - cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?: - crtc->state; - - if (cstate->active) - intel_state->wm_config.num_pipes_active++; - } - drm_for_each_legacy_plane(plane, dev) { - pstate = drm_atomic_get_existing_plane_state(state, plane) ?: - plane->state; + struct drm_i915_private *dev_priv = to_i915(dev); - if (!to_intel_plane_state(pstate)->visible) - continue; + /* Is there platform-specific watermark information to calculate? */ + if (dev_priv->display.compute_global_watermarks) + return dev_priv->display.compute_global_watermarks(state); - intel_state->wm_config.sprites_enabled = true; - if (pstate->crtc_w != pstate->src_w >> 16 || - pstate->crtc_h != pstate->src_h >> 16) - intel_state->wm_config.sprites_scaled = true; - } + return 0; } /** @@ -13377,14 +13506,13 @@ static int intel_atomic_check(struct drm_device *dev, if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) crtc_state->mode_changed = true; - if (!crtc_state->enable) { - if (needs_modeset(crtc_state)) - any_ms = true; + if (!needs_modeset(crtc_state)) continue; - } - if (!needs_modeset(crtc_state)) + if (!crtc_state->enable) { + any_ms = true; continue; + } /* FIXME: For only active_changed we shouldn't need to do any * state recomputation at all. */ @@ -13394,8 +13522,11 @@ static int intel_atomic_check(struct drm_device *dev, return ret; ret = intel_modeset_pipe_config(crtc, pipe_config); - if (ret) + if (ret) { + intel_dump_pipe_config(to_intel_crtc(crtc), + pipe_config, "[failed]"); return ret; + } if (i915.fastboot && intel_pipe_config_compare(dev, @@ -13405,13 +13536,12 @@ static int intel_atomic_check(struct drm_device *dev, to_intel_crtc_state(crtc_state)->update_pipe = true; } - if (needs_modeset(crtc_state)) { + if (needs_modeset(crtc_state)) any_ms = true; - ret = drm_atomic_add_affected_planes(state, crtc); - if (ret) - return ret; - } + ret = drm_atomic_add_affected_planes(state, crtc); + if (ret) + return ret; intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, needs_modeset(crtc_state) ? @@ -13431,9 +13561,7 @@ static int intel_atomic_check(struct drm_device *dev, return ret; intel_fbc_choose_crtc(dev_priv, state); - calc_watermark_data(state); - - return 0; + return calc_watermark_data(state); } static int intel_atomic_prepare_commit(struct drm_device *dev, @@ -13447,11 +13575,6 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, struct drm_crtc *crtc; int i, ret; - if (nonblock) { - DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n"); - return -EINVAL; - } - for_each_crtc_in_state(state, crtc, crtc_state, i) { if (state->legacy_cursor_update) continue; @@ -13495,6 +13618,16 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, return ret; } +u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + + if (!dev->max_vblank_count) + return drm_accurate_vblank_count(&crtc->base); + + return dev->driver->get_vblank_counter(dev, crtc->pipe); +} + static void intel_atomic_wait_for_vblanks(struct drm_device *dev, struct drm_i915_private *dev_priv, unsigned crtc_mask) @@ -13560,45 +13693,36 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state) return false; } -/** - * intel_atomic_commit - commit validated state object - * @dev: DRM device - * @state: the top-level driver state object - * @nonblock: nonblocking commit - * - * This function commits a top-level state object that has been validated - * with drm_atomic_helper_check(). - * - * FIXME: Atomic modeset support for i915 is not yet complete. At the moment - * we can only handle plane-related operations and do not yet support - * nonblocking commit. - * - * RETURNS - * Zero for success or -errno. - */ -static int intel_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, - bool nonblock) +static void intel_atomic_commit_tail(struct drm_atomic_state *state) { + struct drm_device *dev = state->dev; struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc_state *old_crtc_state; struct drm_crtc *crtc; struct intel_crtc_state *intel_cstate; - int ret = 0, i; + struct drm_plane *plane; + struct drm_plane_state *plane_state; bool hw_check = intel_state->modeset; unsigned long put_domains[I915_MAX_PIPES] = {}; unsigned crtc_vblank_mask = 0; + int i, ret; - ret = intel_atomic_prepare_commit(dev, state, nonblock); - if (ret) { - DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); - return ret; + for_each_plane_in_state(state, plane, plane_state, i) { + struct intel_plane_state *intel_plane_state = + to_intel_plane_state(plane_state); + + if (!intel_plane_state->wait_req) + continue; + + ret = __i915_wait_request(intel_plane_state->wait_req, + true, NULL, NULL); + /* EIO should be eaten, and we can't get interrupted in the + * worker, and blocking commits have waited already. */ + WARN_ON(ret); } - drm_atomic_helper_swap_state(dev, state); - dev_priv->wm.config = intel_state->wm_config; - intel_shared_dpll_commit(state); + drm_atomic_helper_wait_for_dependencies(state); if (intel_state->modeset) { memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, @@ -13653,7 +13777,8 @@ static int intel_atomic_commit(struct drm_device *dev, drm_atomic_helper_update_legacy_modeset_state(state->dev, state); if (dev_priv->display.modeset_commit_cdclk && - intel_state->dev_cdclk != dev_priv->cdclk_freq) + (intel_state->dev_cdclk != dev_priv->cdclk_freq || + intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)) dev_priv->display.modeset_commit_cdclk(state); intel_modeset_verify_disabled(dev); @@ -13665,30 +13790,44 @@ static int intel_atomic_commit(struct drm_device *dev, bool modeset = needs_modeset(crtc->state); struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state); - bool update_pipe = !modeset && pipe_config->update_pipe; if (modeset && crtc->state->active) { update_scanline_offset(to_intel_crtc(crtc)); dev_priv->display.crtc_enable(crtc); } + /* Complete events for now disable pipes here. */ + if (modeset && !crtc->state->active && crtc->state->event) { + spin_lock_irq(&dev->event_lock); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + spin_unlock_irq(&dev->event_lock); + + crtc->state->event = NULL; + } + if (!modeset) intel_pre_plane_update(to_intel_crtc_state(old_crtc_state)); if (crtc->state->active && drm_atomic_get_existing_plane_state(state, crtc->primary)) - intel_fbc_enable(intel_crtc); + intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state)); - if (crtc->state->active && - (crtc->state->planes_changed || update_pipe)) + if (crtc->state->active) drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); if (pipe_config->base.active && needs_vblank_wait(pipe_config)) crtc_vblank_mask |= 1 << i; } - /* FIXME: add subpixel order */ - + /* FIXME: We should call drm_atomic_helper_commit_hw_done() here + * already, but still need the state for the delayed optimization. To + * fix this: + * - wrap the optimization/post_plane_update stuff into a per-crtc work. + * - schedule that vblank worker _before_ calling hw_done + * - at the start of commit_tail, cancel it _synchrously + * - switch over to the vblank wait helper in the core after that since + * we don't need out special handling any more. + */ if (!state->legacy_cursor_update) intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask); @@ -13715,6 +13854,8 @@ static int intel_atomic_commit(struct drm_device *dev, intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); } + drm_atomic_helper_commit_hw_done(state); + if (intel_state->modeset) intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); @@ -13722,6 +13863,8 @@ static int intel_atomic_commit(struct drm_device *dev, drm_atomic_helper_cleanup_planes(dev, state); mutex_unlock(&dev->struct_mutex); + drm_atomic_helper_commit_cleanup_done(state); + drm_atomic_state_free(state); /* As one of the primary mmio accessors, KMS has a high likelihood @@ -13736,6 +13879,86 @@ static int intel_atomic_commit(struct drm_device *dev, * can happen also when the device is completely off. */ intel_uncore_arm_unclaimed_mmio_detection(dev_priv); +} + +static void intel_atomic_commit_work(struct work_struct *work) +{ + struct drm_atomic_state *state = container_of(work, + struct drm_atomic_state, + commit_work); + intel_atomic_commit_tail(state); +} + +static void intel_atomic_track_fbs(struct drm_atomic_state *state) +{ + struct drm_plane_state *old_plane_state; + struct drm_plane *plane; + struct drm_i915_gem_object *obj, *old_obj; + struct intel_plane *intel_plane; + int i; + + mutex_lock(&state->dev->struct_mutex); + for_each_plane_in_state(state, plane, old_plane_state, i) { + obj = intel_fb_obj(plane->state->fb); + old_obj = intel_fb_obj(old_plane_state->fb); + intel_plane = to_intel_plane(plane); + + i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); + } + mutex_unlock(&state->dev->struct_mutex); +} + +/** + * intel_atomic_commit - commit validated state object + * @dev: DRM device + * @state: the top-level driver state object + * @nonblock: nonblocking commit + * + * This function commits a top-level state object that has been validated + * with drm_atomic_helper_check(). + * + * FIXME: Atomic modeset support for i915 is not yet complete. At the moment + * nonblocking commits are only safe for pure plane updates. Everything else + * should work though. + * + * RETURNS + * Zero for success or -errno. + */ +static int intel_atomic_commit(struct drm_device *dev, + struct drm_atomic_state *state, + bool nonblock) +{ + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_i915_private *dev_priv = dev->dev_private; + int ret = 0; + + if (intel_state->modeset && nonblock) { + DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n"); + return -EINVAL; + } + + ret = drm_atomic_helper_setup_commit(state, nonblock); + if (ret) + return ret; + + INIT_WORK(&state->commit_work, intel_atomic_commit_work); + + ret = intel_atomic_prepare_commit(dev, state, nonblock); + if (ret) { + DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); + return ret; + } + + drm_atomic_helper_swap_state(state, true); + dev_priv->wm.distrust_bios_wm = false; + dev_priv->wm.skl_results = intel_state->wm_results; + intel_shared_dpll_commit(state); + intel_atomic_track_fbs(state); + + if (nonblock) + queue_work(system_unbound_wq, &state->commit_work); + else + intel_atomic_commit_tail(state); return 0; } @@ -13749,8 +13972,8 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc) state = drm_atomic_state_alloc(dev); if (!state) { - DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory", - crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory", + crtc->base.id, crtc->name); return; } @@ -13785,7 +14008,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .set_property = drm_atomic_helper_crtc_set_property, .destroy = intel_crtc_destroy, - .page_flip = intel_crtc_page_flip, + .page_flip = drm_atomic_helper_page_flip, .atomic_duplicate_state = intel_crtc_duplicate_state, .atomic_destroy_state = intel_crtc_destroy_state, }; @@ -13810,9 +14033,9 @@ intel_prepare_plane_fb(struct drm_plane *plane, { struct drm_device *dev = plane->dev; struct drm_framebuffer *fb = new_state->fb; - struct intel_plane *intel_plane = to_intel_plane(plane); struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); + struct reservation_object *resv; int ret = 0; if (!obj && !old_obj) @@ -13842,12 +14065,15 @@ intel_prepare_plane_fb(struct drm_plane *plane, } } + if (!obj) + return 0; + /* For framebuffer backed by dmabuf, wait for fence */ - if (obj && obj->base.dma_buf) { + resv = i915_gem_object_get_dmabuf_resv(obj); + if (resv) { long lret; - lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, - false, true, + lret = reservation_object_wait_timeout_rcu(resv, false, true, MAX_SCHEDULE_TIMEOUT); if (lret == -ERESTARTSYS) return lret; @@ -13855,9 +14081,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, WARN(lret < 0, "waiting returns %li\n", lret); } - if (!obj) { - ret = 0; - } else if (plane->type == DRM_PLANE_TYPE_CURSOR && + if (plane->type == DRM_PLANE_TYPE_CURSOR && INTEL_INFO(dev)->cursor_needs_physical) { int align = IS_I830(dev) ? 16 * 1024 : 256; ret = i915_gem_object_attach_phys(obj, align); @@ -13868,15 +14092,11 @@ intel_prepare_plane_fb(struct drm_plane *plane, } if (ret == 0) { - if (obj) { - struct intel_plane_state *plane_state = - to_intel_plane_state(new_state); - - i915_gem_request_assign(&plane_state->wait_req, - obj->last_write_req); - } + struct intel_plane_state *plane_state = + to_intel_plane_state(new_state); - i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); + i915_gem_request_assign(&plane_state->wait_req, + obj->last_write_req); } return ret; @@ -13896,7 +14116,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane, const struct drm_plane_state *old_state) { struct drm_device *dev = plane->dev; - struct intel_plane *intel_plane = to_intel_plane(plane); struct intel_plane_state *old_intel_state; struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb); struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb); @@ -13910,11 +14129,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane, !INTEL_INFO(dev)->cursor_needs_physical)) intel_unpin_fb_obj(old_state->fb, old_state->rotation); - /* prepare_fb aborted? */ - if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) || - (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit))) - i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); - i915_gem_request_assign(&old_intel_state->wait_req, NULL); } @@ -13970,6 +14184,7 @@ intel_check_primary_plane(struct drm_plane *plane, return drm_plane_helper_check_update(plane, crtc, fb, &state->src, &state->dst, &state->clip, + state->base.rotation, min_scale, max_scale, can_position, true, &state->visible); @@ -14006,7 +14221,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc, { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - intel_pipe_update_end(intel_crtc); + intel_pipe_update_end(intel_crtc, NULL); } /** @@ -14018,9 +14233,11 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc, */ void intel_plane_destroy(struct drm_plane *plane) { - struct intel_plane *intel_plane = to_intel_plane(plane); + if (!plane) + return; + drm_plane_cleanup(plane); - kfree(intel_plane); + kfree(to_intel_plane(plane)); } const struct drm_plane_funcs intel_plane_funcs = { @@ -14092,10 +14309,24 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, primary->disable_plane = i9xx_disable_primary_plane; } - ret = drm_universal_plane_init(dev, &primary->base, 0, - &intel_plane_funcs, - intel_primary_formats, num_formats, - DRM_PLANE_TYPE_PRIMARY, NULL); + if (INTEL_INFO(dev)->gen >= 9) + ret = drm_universal_plane_init(dev, &primary->base, 0, + &intel_plane_funcs, + intel_primary_formats, num_formats, + DRM_PLANE_TYPE_PRIMARY, + "plane 1%c", pipe_name(pipe)); + else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) + ret = drm_universal_plane_init(dev, &primary->base, 0, + &intel_plane_funcs, + intel_primary_formats, num_formats, + DRM_PLANE_TYPE_PRIMARY, + "primary %c", pipe_name(pipe)); + else + ret = drm_universal_plane_init(dev, &primary->base, 0, + &intel_plane_funcs, + intel_primary_formats, num_formats, + DRM_PLANE_TYPE_PRIMARY, + "plane %c", plane_name(primary->plane)); if (ret) goto fail; @@ -14145,6 +14376,7 @@ intel_check_cursor_plane(struct drm_plane *plane, ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src, &state->dst, &state->clip, + state->base.rotation, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, true, true, &state->visible); @@ -14253,7 +14485,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, &intel_plane_funcs, intel_cursor_formats, ARRAY_SIZE(intel_cursor_formats), - DRM_PLANE_TYPE_CURSOR, NULL); + DRM_PLANE_TYPE_CURSOR, + "cursor %c", pipe_name(pipe)); if (ret) goto fail; @@ -14338,7 +14571,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) goto fail; ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, - cursor, &intel_crtc_funcs, NULL); + cursor, &intel_crtc_funcs, + "pipe %c", pipe_name(pipe)); if (ret) goto fail; @@ -14372,10 +14606,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) return; fail: - if (primary) - drm_plane_cleanup(primary); - if (cursor) - drm_plane_cleanup(cursor); + intel_plane_destroy(primary); + intel_plane_destroy(cursor); kfree(crtc_state); kfree(intel_crtc); } @@ -14554,6 +14786,8 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(PCH_DP_D) & DP_DETECTED) intel_dp_init(dev, PCH_DP_D, PORT_D); } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + bool has_edp, has_port; + /* * The DP_DETECTED bit is the latched state of the DDC * SDA pin at boot. However since eDP doesn't require DDC @@ -14562,27 +14796,37 @@ static void intel_setup_outputs(struct drm_device *dev) * Thus we can't rely on the DP_DETECTED bit alone to detect * eDP ports. Consult the VBT as well as DP_DETECTED to * detect eDP ports. + * + * Sadly the straps seem to be missing sometimes even for HDMI + * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap + * and VBT for the presence of the port. Additionally we can't + * trust the port type the VBT declares as we've seen at least + * HDMI ports that the VBT claim are DP or eDP. */ - if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && - !intel_dp_is_edp(dev, PORT_B)) + has_edp = intel_dp_is_edp(dev, PORT_B); + has_port = intel_bios_is_port_present(dev_priv, PORT_B); + if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) + has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B); + if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) intel_hdmi_init(dev, VLV_HDMIB, PORT_B); - if (I915_READ(VLV_DP_B) & DP_DETECTED || - intel_dp_is_edp(dev, PORT_B)) - intel_dp_init(dev, VLV_DP_B, PORT_B); - if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && - !intel_dp_is_edp(dev, PORT_C)) + has_edp = intel_dp_is_edp(dev, PORT_C); + has_port = intel_bios_is_port_present(dev_priv, PORT_C); + if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) + has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C); + if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) intel_hdmi_init(dev, VLV_HDMIC, PORT_C); - if (I915_READ(VLV_DP_C) & DP_DETECTED || - intel_dp_is_edp(dev, PORT_C)) - intel_dp_init(dev, VLV_DP_C, PORT_C); if (IS_CHERRYVIEW(dev)) { - /* eDP not supported on port D, so don't check VBT */ - if (I915_READ(CHV_HDMID) & SDVO_DETECTED) - intel_hdmi_init(dev, CHV_HDMID, PORT_D); - if (I915_READ(CHV_DP_D) & DP_DETECTED) + /* + * eDP not supported on port D, + * so no need to worry about it + */ + has_port = intel_bios_is_port_present(dev_priv, PORT_D); + if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) intel_dp_init(dev, CHV_DP_D, PORT_D); + if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) + intel_hdmi_init(dev, CHV_HDMID, PORT_D); } intel_dsi_init(dev); @@ -15050,12 +15294,13 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { dev_priv->display.fdi_link_train = hsw_fdi_link_train; - if (IS_BROADWELL(dev_priv)) { - dev_priv->display.modeset_commit_cdclk = - broadwell_modeset_commit_cdclk; - dev_priv->display.modeset_calc_cdclk = - broadwell_modeset_calc_cdclk; - } + } + + if (IS_BROADWELL(dev_priv)) { + dev_priv->display.modeset_commit_cdclk = + broadwell_modeset_commit_cdclk; + dev_priv->display.modeset_calc_cdclk = + broadwell_modeset_calc_cdclk; } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { dev_priv->display.modeset_commit_cdclk = valleyview_modeset_commit_cdclk; @@ -15063,9 +15308,14 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) valleyview_modeset_calc_cdclk; } else if (IS_BROXTON(dev_priv)) { dev_priv->display.modeset_commit_cdclk = - broxton_modeset_commit_cdclk; + bxt_modeset_commit_cdclk; + dev_priv->display.modeset_calc_cdclk = + bxt_modeset_calc_cdclk; + } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + dev_priv->display.modeset_commit_cdclk = + skl_modeset_commit_cdclk; dev_priv->display.modeset_calc_cdclk = - broxton_modeset_calc_cdclk; + skl_modeset_calc_cdclk; } switch (INTEL_INFO(dev_priv)->gen) { @@ -15293,7 +15543,7 @@ void intel_modeset_init_hw(struct drm_device *dev) dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; intel_init_clock_gating(dev); - intel_enable_gt_powersave(dev); + intel_enable_gt_powersave(dev_priv); } /* @@ -15363,7 +15613,6 @@ retry: } /* Write calculated watermark values back */ - to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config; for_each_crtc_in_state(state, crtc, cstate, i) { struct intel_crtc_state *cs = to_intel_crtc_state(cstate); @@ -15461,11 +15710,13 @@ void intel_modeset_init(struct drm_device *dev) } intel_update_czclk(dev_priv); - intel_update_rawclk(dev_priv); intel_update_cdclk(dev); intel_shared_dpll_init(dev); + if (dev_priv->max_cdclk_freq == 0) + intel_update_max_cdclk(dev); + /* Just disable it once at startup */ i915_disable_vga(dev); intel_setup_outputs(dev); @@ -15606,8 +15857,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { bool plane; - DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", - crtc->base.base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n", + crtc->base.base.id, crtc->base.name); /* Pipe has the wrong plane attached and the plane is active. * Temporarily change the plane mapping and disable everything @@ -15775,26 +16026,24 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) if (crtc_state->base.active) { dev_priv->active_crtcs |= 1 << crtc->pipe; - if (IS_BROADWELL(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) pixclk = ilk_pipe_pixel_rate(crtc_state); - - /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ - if (crtc_state->ips_enabled) - pixclk = DIV_ROUND_UP(pixclk * 100, 95); - } else if (IS_VALLEYVIEW(dev_priv) || - IS_CHERRYVIEW(dev_priv) || - IS_BROXTON(dev_priv)) + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) pixclk = crtc_state->base.adjusted_mode.crtc_clock; else WARN_ON(dev_priv->display.modeset_calc_cdclk); + + /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ + if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) + pixclk = DIV_ROUND_UP(pixclk * 100, 95); } dev_priv->min_pixclk[crtc->pipe] = pixclk; readout_plane_state(crtc); - DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", - crtc->base.base.id, + DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", + crtc->base.base.id, crtc->base.name, crtc->active ? "enabled" : "disabled"); } @@ -16025,15 +16274,16 @@ retry: void intel_modeset_gem_init(struct drm_device *dev) { + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *c; struct drm_i915_gem_object *obj; int ret; - intel_init_gt_powersave(dev); + intel_init_gt_powersave(dev_priv); intel_modeset_init_hw(dev); - intel_setup_overlay(dev); + intel_setup_overlay(dev_priv); /* * Make sure any fbs we allocated at startup are properly @@ -16063,22 +16313,19 @@ void intel_modeset_gem_init(struct drm_device *dev) intel_backlight_register(dev); } -void intel_connector_unregister(struct intel_connector *intel_connector) +void intel_connector_unregister(struct drm_connector *connector) { - struct drm_connector *connector = &intel_connector->base; + struct intel_connector *intel_connector = to_intel_connector(connector); + intel_backlight_device_unregister(intel_connector); intel_panel_destroy_backlight(connector); - drm_connector_unregister(connector); } void intel_modeset_cleanup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_connector *connector; - - intel_disable_gt_powersave(dev); - intel_backlight_unregister(dev); + intel_disable_gt_powersave(dev_priv); /* * Interrupts and polling as the first thing to avoid creating havoc. @@ -16100,27 +16347,17 @@ void intel_modeset_cleanup(struct drm_device *dev) /* flush any delayed tasks or pending work */ flush_scheduled_work(); - /* destroy the backlight and sysfs files before encoders/connectors */ - for_each_intel_connector(dev, connector) - connector->unregister(connector); + drm_connector_unregister_all(dev); drm_mode_config_cleanup(dev); - intel_cleanup_overlay(dev); + intel_cleanup_overlay(dev_priv); - intel_cleanup_gt_powersave(dev); + intel_cleanup_gt_powersave(dev_priv); intel_teardown_gmbus(dev); } -/* - * Return which encoder is currently attached for connector. - */ -struct drm_encoder *intel_best_encoder(struct drm_connector *connector) -{ - return &intel_attached_encoder(connector)->base; -} - void intel_connector_attach_encoder(struct intel_connector *connector, struct intel_encoder *encoder) { @@ -16204,9 +16441,8 @@ struct intel_display_error_state { }; struct intel_display_error_state * -intel_display_capture_error_state(struct drm_device *dev) +intel_display_capture_error_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_display_error_state *error; int transcoders[] = { TRANSCODER_A, @@ -16216,14 +16452,14 @@ intel_display_capture_error_state(struct drm_device *dev) }; int i; - if (INTEL_INFO(dev)->num_pipes == 0) + if (INTEL_INFO(dev_priv)->num_pipes == 0) return NULL; error = kzalloc(sizeof(*error), GFP_ATOMIC); if (error == NULL) return NULL; - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); for_each_pipe(dev_priv, i) { @@ -16239,25 +16475,25 @@ intel_display_capture_error_state(struct drm_device *dev) error->plane[i].control = I915_READ(DSPCNTR(i)); error->plane[i].stride = I915_READ(DSPSTRIDE(i)); - if (INTEL_INFO(dev)->gen <= 3) { + if (INTEL_GEN(dev_priv) <= 3) { error->plane[i].size = I915_READ(DSPSIZE(i)); error->plane[i].pos = I915_READ(DSPPOS(i)); } - if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) + if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) error->plane[i].addr = I915_READ(DSPADDR(i)); - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { error->plane[i].surface = I915_READ(DSPSURF(i)); error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); } error->pipe[i].source = I915_READ(PIPESRC(i)); - if (HAS_GMCH_DISPLAY(dev)) + if (HAS_GMCH_DISPLAY(dev_priv)) error->pipe[i].stat = I915_READ(PIPESTAT(i)); } /* Note: this does not include DSI transcoders. */ - error->num_transcoders = INTEL_INFO(dev)->num_pipes; + error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes; if (HAS_DDI(dev_priv)) error->num_transcoders++; /* Account for eDP. */ diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f192f58708c2..0b84f8e5df50 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -131,11 +131,6 @@ static void vlv_steal_power_sequencer(struct drm_device *dev, enum pipe pipe); static void intel_dp_unset_edid(struct intel_dp *intel_dp); -static unsigned int intel_dp_unused_lane_mask(int lane_count) -{ - return ~((1 << lane_count) - 1) & 0xf; -} - static int intel_dp_max_link_bw(struct intel_dp *intel_dp) { @@ -775,6 +770,7 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp, DP_AUX_CH_CTL_TIME_OUT_1600us | DP_AUX_CH_CTL_RECEIVE_ERROR | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); } @@ -1181,7 +1177,6 @@ static void intel_aux_reg_init(struct intel_dp *intel_dp) static void intel_dp_aux_fini(struct intel_dp *intel_dp) { - drm_dp_aux_unregister(&intel_dp->aux); kfree(intel_dp->aux.name); } @@ -1216,15 +1211,6 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector) return 0; } -static void -intel_dp_connector_unregister(struct intel_connector *intel_connector) -{ - struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base); - - intel_dp_aux_fini(intel_dp); - intel_connector_unregister(intel_connector); -} - static int intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates) { @@ -1582,6 +1568,27 @@ found: &pipe_config->dp_m2_n2); } + /* + * DPLL0 VCO may need to be adjusted to get the correct + * clock for eDP. This will affect cdclk as well. + */ + if (is_edp(intel_dp) && + (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) { + int vco; + + switch (pipe_config->port_clock / 2) { + case 108000: + case 216000: + vco = 8640000; + break; + default: + vco = 8100000; + break; + } + + to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco; + } + if (!HAS_DDI(dev)) intel_dp_set_clock(encoder, pipe_config); @@ -2460,50 +2467,6 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder) intel_dp_link_down(intel_dp); } -static void chv_data_lane_soft_reset(struct intel_encoder *encoder, - bool reset) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); - struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); - enum pipe pipe = crtc->pipe; - uint32_t val; - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); - if (reset) - val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); - else - val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); - - if (crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); - if (reset) - val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); - else - val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); - } - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); - val |= CHV_PCS_REQ_SOFTRESET_EN; - if (reset) - val &= ~DPIO_PCS_CLK_SOFT_RESET; - else - val |= DPIO_PCS_CLK_SOFT_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); - - if (crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); - val |= CHV_PCS_REQ_SOFTRESET_EN; - if (reset) - val &= ~DPIO_PCS_CLK_SOFT_RESET; - else - val |= DPIO_PCS_CLK_SOFT_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); - } -} - static void chv_post_disable_dp(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); @@ -2811,266 +2774,38 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp) static void vlv_pre_enable_dp(struct intel_encoder *encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - struct intel_digital_port *dport = dp_to_dig_port(intel_dp); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); - enum dpio_channel port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - u32 val; - - mutex_lock(&dev_priv->sb_lock); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); - val = 0; - if (pipe) - val |= (1<<21); - else - val &= ~(1<<21); - val |= 0x001000c4; - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); - - mutex_unlock(&dev_priv->sb_lock); + vlv_phy_pre_encoder_enable(encoder); intel_enable_dp(encoder); } static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - enum dpio_channel port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - intel_dp_prepare(encoder); - /* Program Tx lane resets to default */ - mutex_lock(&dev_priv->sb_lock); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), - DPIO_PCS_TX_LANE2_RESET | - DPIO_PCS_TX_LANE1_RESET); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), - DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | - DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | - (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | - DPIO_PCS_CLK_SOFT_RESET); - - /* Fix up inter-pair skew failure */ - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000); - mutex_unlock(&dev_priv->sb_lock); + vlv_phy_pre_pll_enable(encoder); } static void chv_pre_enable_dp(struct intel_encoder *encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - struct intel_digital_port *dport = dp_to_dig_port(intel_dp); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - enum dpio_channel ch = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - int data, i, stagger; - u32 val; - - mutex_lock(&dev_priv->sb_lock); - - /* allow hardware to manage TX FIFO reset source */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); - val &= ~DPIO_LANEDESKEW_STRAP_OVRD; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); - - if (intel_crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); - val &= ~DPIO_LANEDESKEW_STRAP_OVRD; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); - } - - /* Program Tx lane latency optimal setting*/ - for (i = 0; i < intel_crtc->config->lane_count; i++) { - /* Set the upar bit */ - if (intel_crtc->config->lane_count == 1) - data = 0x0; - else - data = (i == 1) ? 0x0 : 0x1; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i), - data << DPIO_UPAR_SHIFT); - } - - /* Data lane stagger programming */ - if (intel_crtc->config->port_clock > 270000) - stagger = 0x18; - else if (intel_crtc->config->port_clock > 135000) - stagger = 0xd; - else if (intel_crtc->config->port_clock > 67500) - stagger = 0x7; - else if (intel_crtc->config->port_clock > 33750) - stagger = 0x4; - else - stagger = 0x2; - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); - val |= DPIO_TX2_STAGGER_MASK(0x1f); - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); - - if (intel_crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); - val |= DPIO_TX2_STAGGER_MASK(0x1f); - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); - } - - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch), - DPIO_LANESTAGGER_STRAP(stagger) | - DPIO_LANESTAGGER_STRAP_OVRD | - DPIO_TX1_STAGGER_MASK(0x1f) | - DPIO_TX1_STAGGER_MULT(6) | - DPIO_TX2_STAGGER_MULT(0)); - - if (intel_crtc->config->lane_count > 2) { - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch), - DPIO_LANESTAGGER_STRAP(stagger) | - DPIO_LANESTAGGER_STRAP_OVRD | - DPIO_TX1_STAGGER_MASK(0x1f) | - DPIO_TX1_STAGGER_MULT(7) | - DPIO_TX2_STAGGER_MULT(5)); - } - - /* Deassert data lane reset */ - chv_data_lane_soft_reset(encoder, false); - - mutex_unlock(&dev_priv->sb_lock); + chv_phy_pre_encoder_enable(encoder); intel_enable_dp(encoder); /* Second common lane will stay alive on its own now */ - if (dport->release_cl2_override) { - chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false); - dport->release_cl2_override = false; - } + chv_phy_release_cl2_override(encoder); } static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - enum dpio_channel ch = vlv_dport_to_channel(dport); - enum pipe pipe = intel_crtc->pipe; - unsigned int lane_mask = - intel_dp_unused_lane_mask(intel_crtc->config->lane_count); - u32 val; - intel_dp_prepare(encoder); - /* - * Must trick the second common lane into life. - * Otherwise we can't even access the PLL. - */ - if (ch == DPIO_CH0 && pipe == PIPE_B) - dport->release_cl2_override = - !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true); - - chv_phy_powergate_lanes(encoder, true, lane_mask); - - mutex_lock(&dev_priv->sb_lock); - - /* Assert data lane reset */ - chv_data_lane_soft_reset(encoder, true); - - /* program left/right clock distribution */ - if (pipe != PIPE_B) { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); - val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); - if (ch == DPIO_CH0) - val |= CHV_BUFLEFTENA1_FORCE; - if (ch == DPIO_CH1) - val |= CHV_BUFRIGHTENA1_FORCE; - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); - } else { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); - val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); - if (ch == DPIO_CH0) - val |= CHV_BUFLEFTENA2_FORCE; - if (ch == DPIO_CH1) - val |= CHV_BUFRIGHTENA2_FORCE; - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); - } - - /* program clock channel usage */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch)); - val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; - if (pipe != PIPE_B) - val &= ~CHV_PCS_USEDCLKCHANNEL; - else - val |= CHV_PCS_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); - - if (intel_crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); - val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; - if (pipe != PIPE_B) - val &= ~CHV_PCS_USEDCLKCHANNEL; - else - val |= CHV_PCS_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); - } - - /* - * This a a bit weird since generally CL - * matches the pipe, but here we need to - * pick the CL based on the port. - */ - val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch)); - if (pipe != PIPE_B) - val &= ~CHV_CMN_USEDCLKCHANNEL; - else - val |= CHV_CMN_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val); - - mutex_unlock(&dev_priv->sb_lock); + chv_phy_pre_pll_enable(encoder); } static void chv_dp_post_pll_disable(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe; - u32 val; - - mutex_lock(&dev_priv->sb_lock); - - /* disable left/right clock distribution */ - if (pipe != PIPE_B) { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); - val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); - } else { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); - val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); - } - - mutex_unlock(&dev_priv->sb_lock); - - /* - * Leave the power down bit cleared for at least one - * lane so that chv_powergate_phy_ch() will power - * on something when the channel is otherwise unused. - * When the port is off and the override is removed - * the lanes power down anyway, so otherwise it doesn't - * really matter what the state of power down bits is - * after this. - */ - chv_phy_powergate_lanes(encoder, false, 0x0); + chv_phy_post_pll_disable(encoder); } /* @@ -3178,16 +2913,10 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) static uint32_t vlv_signal_levels(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_digital_port *dport = dp_to_dig_port(intel_dp); - struct intel_crtc *intel_crtc = - to_intel_crtc(dport->base.base.crtc); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; unsigned long demph_reg_value, preemph_reg_value, uniqtranscale_reg_value; uint8_t train_set = intel_dp->train_set[0]; - enum dpio_channel port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { case DP_TRAIN_PRE_EMPH_LEVEL_0: @@ -3262,37 +2991,18 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp) return 0; } - mutex_lock(&dev_priv->sb_lock); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), - uniqtranscale_reg_value); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000); - mutex_unlock(&dev_priv->sb_lock); + vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value, + uniqtranscale_reg_value, 0); return 0; } -static bool chv_need_uniq_trans_scale(uint8_t train_set) -{ - return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 && - (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3; -} - static uint32_t chv_signal_levels(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_digital_port *dport = dp_to_dig_port(intel_dp); - struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc); - u32 deemph_reg_value, margin_reg_value, val; + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + u32 deemph_reg_value, margin_reg_value; + bool uniq_trans_scale = false; uint8_t train_set = intel_dp->train_set[0]; - enum dpio_channel ch = vlv_dport_to_channel(dport); - enum pipe pipe = intel_crtc->pipe; - int i; switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { case DP_TRAIN_PRE_EMPH_LEVEL_0: @@ -3312,7 +3022,7 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp) case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: deemph_reg_value = 128; margin_reg_value = 154; - /* FIXME extra to set for 1200 */ + uniq_trans_scale = true; break; default: return 0; @@ -3364,88 +3074,8 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp) return 0; } - mutex_lock(&dev_priv->sb_lock); - - /* Clear calc init */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); - val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); - val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); - val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); - - if (intel_crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); - val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); - val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); - val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); - } - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch)); - val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); - val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val); - - if (intel_crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch)); - val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); - val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val); - } - - /* Program swing deemph */ - for (i = 0; i < intel_crtc->config->lane_count; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); - val &= ~DPIO_SWING_DEEMPH9P5_MASK; - val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val); - } - - /* Program swing margin */ - for (i = 0; i < intel_crtc->config->lane_count; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); - - val &= ~DPIO_SWING_MARGIN000_MASK; - val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT; - - /* - * Supposedly this value shouldn't matter when unique transition - * scale is disabled, but in fact it does matter. Let's just - * always program the same value and hope it's OK. - */ - val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT); - val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT; - - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); - } - - /* - * The document said it needs to set bit 27 for ch0 and bit 26 - * for ch1. Might be a typo in the doc. - * For now, for this unique transition scale selection, set bit - * 27 for ch0 and ch1. - */ - for (i = 0; i < intel_crtc->config->lane_count; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); - if (chv_need_uniq_trans_scale(train_set)) - val |= DPIO_TX_UNIQ_TRANS_SCALE_EN; - else - val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); - } - - /* Start swing calculation */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); - val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); - - if (intel_crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); - val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); - } - - mutex_unlock(&dev_priv->sb_lock); + chv_set_phy_signal_level(encoder, deemph_reg_value, + margin_reg_value, uniq_trans_scale); return 0; } @@ -3714,7 +3344,6 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint8_t rev; if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, sizeof(intel_dp->dpcd)) < 0) @@ -3771,6 +3400,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) DRM_DEBUG_KMS("PSR2 %s on sink", dev_priv->psr.psr2_support ? "supported" : "not supported"); } + + /* Read the eDP Display control capabilities registers */ + memset(intel_dp->edp_dpcd, 0, sizeof(intel_dp->edp_dpcd)); + if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && + (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, + intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == + sizeof(intel_dp->edp_dpcd))) + DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd), + intel_dp->edp_dpcd); } DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n", @@ -3778,10 +3416,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) yesno(drm_dp_tps3_supported(intel_dp->dpcd))); /* Intermediate frequency support */ - if (is_edp(intel_dp) && - (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && - (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) && - (rev >= 0x03)) { /* eDp v1.4 or higher */ + if (is_edp(intel_dp) && (intel_dp->edp_dpcd[0] >= 0x03)) { /* eDp v1.4 or higher */ __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; int i; @@ -4812,6 +4447,13 @@ done: } static void +intel_dp_connector_unregister(struct drm_connector *connector) +{ + drm_dp_aux_unregister(&intel_attached_dp(connector)->aux); + intel_connector_unregister(connector); +} + +static void intel_dp_connector_destroy(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); @@ -4851,6 +4493,9 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) intel_dp->edp_notifier.notifier_call = NULL; } } + + intel_dp_aux_fini(intel_dp); + drm_encoder_cleanup(encoder); kfree(intel_dig_port); } @@ -4927,6 +4572,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_dp_set_property, .atomic_get_property = intel_connector_atomic_get_property, + .early_unregister = intel_dp_connector_unregister, .destroy = intel_dp_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, @@ -4935,7 +4581,6 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { .get_modes = intel_dp_get_modes, .mode_valid = intel_dp_mode_valid, - .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_dp_enc_funcs = { @@ -5590,14 +5235,14 @@ void intel_edp_drrs_flush(struct drm_device *dev, * * DRRS saves power by switching to low RR based on usage scenarios. * - * eDP DRRS:- - * The implementation is based on frontbuffer tracking implementation. - * When there is a disturbance on the screen triggered by user activity or a - * periodic system activity, DRRS is disabled (RR is changed to high RR). - * When there is no movement on screen, after a timeout of 1 second, a switch - * to low RR is made. - * For integration with frontbuffer tracking code, - * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called. + * The implementation is based on frontbuffer tracking implementation. When + * there is a disturbance on the screen triggered by user activity or a periodic + * system activity, DRRS is disabled (RR is changed to high RR). When there is + * no movement on screen, after a timeout of 1 second, a switch to low RR is + * made. + * + * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() + * and intel_edp_drrs_flush() are called. * * DRRS can be further extended to support other internal panels and also * the scenario of video playback wherein RR is set based on the rate @@ -5725,8 +5370,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); - if (fixed_mode) + if (fixed_mode) { fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + connector->display_info.width_mm = fixed_mode->width_mm; + connector->display_info.height_mm = fixed_mode->height_mm; + } } mutex_unlock(&dev->mode_config.mutex); @@ -5840,7 +5488,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; else intel_connector->get_hw_state = intel_connector_get_hw_state; - intel_connector->unregister = intel_dp_connector_unregister; /* Set up the hotplug pin. */ switch (port) { @@ -5923,9 +5570,9 @@ fail: return false; } -void -intel_dp_init(struct drm_device *dev, - i915_reg_t output_reg, enum port port) +bool intel_dp_init(struct drm_device *dev, + i915_reg_t output_reg, + enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_digital_port *intel_dig_port; @@ -5935,7 +5582,7 @@ intel_dp_init(struct drm_device *dev, intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); if (!intel_dig_port) - return; + return false; intel_connector = intel_connector_alloc(); if (!intel_connector) @@ -5945,7 +5592,7 @@ intel_dp_init(struct drm_device *dev, encoder = &intel_encoder->base; if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, - DRM_MODE_ENCODER_TMDS, NULL)) + DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port))) goto err_encoder_init; intel_encoder->compute_config = intel_dp_compute_config; @@ -5992,7 +5639,7 @@ intel_dp_init(struct drm_device *dev, if (!intel_dp_init_connector(intel_dig_port, intel_connector)) goto err_init_connector; - return; + return true; err_init_connector: drm_encoder_cleanup(encoder); @@ -6000,8 +5647,7 @@ err_encoder_init: kfree(intel_connector); err_connector_alloc: kfree(intel_dig_port); - - return; + return false; } void intel_dp_mst_suspend(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c new file mode 100644 index 000000000000..6532e226db29 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c @@ -0,0 +1,172 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "intel_drv.h" + +static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable) +{ + uint8_t reg_val = 0; + + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + ®_val) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_DISPLAY_CONTROL_REGISTER); + return; + } + if (enable) + reg_val |= DP_EDP_BACKLIGHT_ENABLE; + else + reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE); + + if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + reg_val) != 1) { + DRM_DEBUG_KMS("Failed to %s aux backlight\n", + enable ? "enable" : "disable"); + } +} + +/* + * Read the current backlight value from DPCD register(s) based + * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported + */ +static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + uint8_t read_val[2] = { 0x0 }; + uint16_t level = 0; + + if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + &read_val, sizeof(read_val)) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_BRIGHTNESS_MSB); + return 0; + } + level = read_val[0]; + if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + level = (read_val[0] << 8 | read_val[1]); + + return level; +} + +/* + * Sends the current backlight level over the aux channel, checking if its using + * 8-bit or 16 bit value (MSB and LSB) + */ +static void +intel_dp_aux_set_backlight(struct intel_connector *connector, u32 level) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + uint8_t vals[2] = { 0x0 }; + + vals[0] = level; + + /* Write the MSB and/or LSB */ + if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) { + vals[0] = (level & 0xFF00) >> 8; + vals[1] = (level & 0xFF); + } + if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + vals, sizeof(vals)) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight level\n"); + return; + } +} + +static void intel_dp_aux_enable_backlight(struct intel_connector *connector) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + uint8_t dpcd_buf = 0; + + set_aux_backlight_enable(intel_dp, true); + + if ((drm_dp_dpcd_readb(&intel_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) == 1) && + ((dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) == + DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET)) + drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + (dpcd_buf | DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD)); +} + +static void intel_dp_aux_disable_backlight(struct intel_connector *connector) +{ + set_aux_backlight_enable(enc_to_intel_dp(&connector->encoder->base), false); +} + +static int intel_dp_aux_setup_backlight(struct intel_connector *connector, + enum pipe pipe) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + struct intel_panel *panel = &connector->panel; + + intel_dp_aux_enable_backlight(connector); + + if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + panel->backlight.max = 0xFFFF; + else + panel->backlight.max = 0xFF; + + panel->backlight.min = 0; + panel->backlight.level = intel_dp_aux_get_backlight(connector); + + panel->backlight.enabled = panel->backlight.level != 0; + + return 0; +} + +static bool +intel_dp_aux_display_control_capable(struct intel_connector *connector) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + + /* Check the eDP Display control capabilities registers to determine if + * the panel can support backlight control over the aux channel + */ + if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP && + (intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) && + !((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_PIN_ENABLE_CAP) || + (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP))) { + DRM_DEBUG_KMS("AUX Backlight Control Supported!\n"); + return true; + } + return false; +} + +int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) +{ + struct intel_panel *panel = &intel_connector->panel; + + if (!i915.enable_dpcd_backlight) + return -ENODEV; + + if (!intel_dp_aux_display_control_capable(intel_connector)) + return -ENODEV; + + panel->backlight.setup = intel_dp_aux_setup_backlight; + panel->backlight.enable = intel_dp_aux_enable_backlight; + panel->backlight.disable = intel_dp_aux_disable_backlight; + panel->backlight.set = intel_dp_aux_set_backlight; + panel->backlight.get = intel_dp_aux_get_backlight; + + return 0; +} diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 7a34090cef34..9646816604be 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -336,6 +336,7 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_dp_mst_set_property, .atomic_get_property = intel_connector_atomic_get_property, + .early_unregister = intel_connector_unregister, .destroy = intel_dp_mst_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, @@ -455,7 +456,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); - intel_connector->unregister = intel_connector_unregister; intel_connector->get_hw_state = intel_dp_mst_get_hw_state; intel_connector->mst_port = intel_dp; intel_connector->port = port; @@ -489,7 +489,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct intel_connector *intel_connector = to_intel_connector(connector); struct drm_device *dev = connector->dev; - intel_connector->unregister(intel_connector); + drm_connector_unregister(connector); /* need to nuke the connector */ drm_modeset_lock_all(dev); @@ -534,7 +534,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum intel_mst->primary = intel_dig_port; drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs, - DRM_MODE_ENCODER_DPMST, NULL); + DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe)); intel_encoder->type = INTEL_OUTPUT_DP_MST; intel_encoder->crtc_mask = 0x7; diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c new file mode 100644 index 000000000000..288da35572b4 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dpio_phy.c @@ -0,0 +1,470 @@ +/* + * Copyright © 2014-2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "intel_drv.h" + +void chv_set_phy_signal_level(struct intel_encoder *encoder, + u32 deemph_reg_value, u32 margin_reg_value, + bool uniq_trans_scale) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc); + enum dpio_channel ch = vlv_dport_to_channel(dport); + enum pipe pipe = intel_crtc->pipe; + u32 val; + int i; + + mutex_lock(&dev_priv->sb_lock); + + /* Clear calc init */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); + val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); + val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); + val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); + + if (intel_crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); + val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); + val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); + val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); + } + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch)); + val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); + val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val); + + if (intel_crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch)); + val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); + val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val); + } + + /* Program swing deemph */ + for (i = 0; i < intel_crtc->config->lane_count; i++) { + val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); + val &= ~DPIO_SWING_DEEMPH9P5_MASK; + val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT; + vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val); + } + + /* Program swing margin */ + for (i = 0; i < intel_crtc->config->lane_count; i++) { + val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); + + val &= ~DPIO_SWING_MARGIN000_MASK; + val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT; + + /* + * Supposedly this value shouldn't matter when unique transition + * scale is disabled, but in fact it does matter. Let's just + * always program the same value and hope it's OK. + */ + val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT); + val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT; + + vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); + } + + /* + * The document said it needs to set bit 27 for ch0 and bit 26 + * for ch1. Might be a typo in the doc. + * For now, for this unique transition scale selection, set bit + * 27 for ch0 and ch1. + */ + for (i = 0; i < intel_crtc->config->lane_count; i++) { + val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); + if (uniq_trans_scale) + val |= DPIO_TX_UNIQ_TRANS_SCALE_EN; + else + val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; + vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); + } + + /* Start swing calculation */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); + val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); + + if (intel_crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); + val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); + } + + mutex_unlock(&dev_priv->sb_lock); + +} + +void chv_data_lane_soft_reset(struct intel_encoder *encoder, + bool reset) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + enum pipe pipe = crtc->pipe; + uint32_t val; + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); + if (reset) + val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); + else + val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); + + if (crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); + if (reset) + val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); + else + val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); + } + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); + val |= CHV_PCS_REQ_SOFTRESET_EN; + if (reset) + val &= ~DPIO_PCS_CLK_SOFT_RESET; + else + val |= DPIO_PCS_CLK_SOFT_RESET; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); + + if (crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); + val |= CHV_PCS_REQ_SOFTRESET_EN; + if (reset) + val &= ~DPIO_PCS_CLK_SOFT_RESET; + else + val |= DPIO_PCS_CLK_SOFT_RESET; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); + } +} + +void chv_phy_pre_pll_enable(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = + to_intel_crtc(encoder->base.crtc); + enum dpio_channel ch = vlv_dport_to_channel(dport); + enum pipe pipe = intel_crtc->pipe; + unsigned int lane_mask = + intel_dp_unused_lane_mask(intel_crtc->config->lane_count); + u32 val; + + /* + * Must trick the second common lane into life. + * Otherwise we can't even access the PLL. + */ + if (ch == DPIO_CH0 && pipe == PIPE_B) + dport->release_cl2_override = + !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true); + + chv_phy_powergate_lanes(encoder, true, lane_mask); + + mutex_lock(&dev_priv->sb_lock); + + /* Assert data lane reset */ + chv_data_lane_soft_reset(encoder, true); + + /* program left/right clock distribution */ + if (pipe != PIPE_B) { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); + val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); + if (ch == DPIO_CH0) + val |= CHV_BUFLEFTENA1_FORCE; + if (ch == DPIO_CH1) + val |= CHV_BUFRIGHTENA1_FORCE; + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); + } else { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); + val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); + if (ch == DPIO_CH0) + val |= CHV_BUFLEFTENA2_FORCE; + if (ch == DPIO_CH1) + val |= CHV_BUFRIGHTENA2_FORCE; + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); + } + + /* program clock channel usage */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch)); + val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; + if (pipe != PIPE_B) + val &= ~CHV_PCS_USEDCLKCHANNEL; + else + val |= CHV_PCS_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); + + if (intel_crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); + val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; + if (pipe != PIPE_B) + val &= ~CHV_PCS_USEDCLKCHANNEL; + else + val |= CHV_PCS_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); + } + + /* + * This a a bit weird since generally CL + * matches the pipe, but here we need to + * pick the CL based on the port. + */ + val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch)); + if (pipe != PIPE_B) + val &= ~CHV_CMN_USEDCLKCHANNEL; + else + val |= CHV_CMN_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val); + + mutex_unlock(&dev_priv->sb_lock); +} + +void chv_phy_pre_encoder_enable(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_digital_port *dport = dp_to_dig_port(intel_dp); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = + to_intel_crtc(encoder->base.crtc); + enum dpio_channel ch = vlv_dport_to_channel(dport); + int pipe = intel_crtc->pipe; + int data, i, stagger; + u32 val; + + mutex_lock(&dev_priv->sb_lock); + + /* allow hardware to manage TX FIFO reset source */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); + val &= ~DPIO_LANEDESKEW_STRAP_OVRD; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); + + if (intel_crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); + val &= ~DPIO_LANEDESKEW_STRAP_OVRD; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); + } + + /* Program Tx lane latency optimal setting*/ + for (i = 0; i < intel_crtc->config->lane_count; i++) { + /* Set the upar bit */ + if (intel_crtc->config->lane_count == 1) + data = 0x0; + else + data = (i == 1) ? 0x0 : 0x1; + vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i), + data << DPIO_UPAR_SHIFT); + } + + /* Data lane stagger programming */ + if (intel_crtc->config->port_clock > 270000) + stagger = 0x18; + else if (intel_crtc->config->port_clock > 135000) + stagger = 0xd; + else if (intel_crtc->config->port_clock > 67500) + stagger = 0x7; + else if (intel_crtc->config->port_clock > 33750) + stagger = 0x4; + else + stagger = 0x2; + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); + val |= DPIO_TX2_STAGGER_MASK(0x1f); + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); + + if (intel_crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); + val |= DPIO_TX2_STAGGER_MASK(0x1f); + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); + } + + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch), + DPIO_LANESTAGGER_STRAP(stagger) | + DPIO_LANESTAGGER_STRAP_OVRD | + DPIO_TX1_STAGGER_MASK(0x1f) | + DPIO_TX1_STAGGER_MULT(6) | + DPIO_TX2_STAGGER_MULT(0)); + + if (intel_crtc->config->lane_count > 2) { + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch), + DPIO_LANESTAGGER_STRAP(stagger) | + DPIO_LANESTAGGER_STRAP_OVRD | + DPIO_TX1_STAGGER_MASK(0x1f) | + DPIO_TX1_STAGGER_MULT(7) | + DPIO_TX2_STAGGER_MULT(5)); + } + + /* Deassert data lane reset */ + chv_data_lane_soft_reset(encoder, false); + + mutex_unlock(&dev_priv->sb_lock); +} + +void chv_phy_release_cl2_override(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (dport->release_cl2_override) { + chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false); + dport->release_cl2_override = false; + } +} + +void chv_phy_post_pll_disable(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe; + u32 val; + + mutex_lock(&dev_priv->sb_lock); + + /* disable left/right clock distribution */ + if (pipe != PIPE_B) { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); + val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); + } else { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); + val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); + } + + mutex_unlock(&dev_priv->sb_lock); + + /* + * Leave the power down bit cleared for at least one + * lane so that chv_powergate_phy_ch() will power + * on something when the channel is otherwise unused. + * When the port is off and the override is removed + * the lanes power down anyway, so otherwise it doesn't + * really matter what the state of power down bits is + * after this. + */ + chv_phy_powergate_lanes(encoder, false, 0x0); +} + +void vlv_set_phy_signal_level(struct intel_encoder *encoder, + u32 demph_reg_value, u32 preemph_reg_value, + u32 uniqtranscale_reg_value, u32 tx3_demph) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + enum dpio_channel port = vlv_dport_to_channel(dport); + int pipe = intel_crtc->pipe; + + mutex_lock(&dev_priv->sb_lock); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), + uniqtranscale_reg_value); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040); + + if (tx3_demph) + vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph); + + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); + mutex_unlock(&dev_priv->sb_lock); +} + +void vlv_phy_pre_pll_enable(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = + to_intel_crtc(encoder->base.crtc); + enum dpio_channel port = vlv_dport_to_channel(dport); + int pipe = intel_crtc->pipe; + + /* Program Tx lane resets to default */ + mutex_lock(&dev_priv->sb_lock); + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), + DPIO_PCS_TX_LANE2_RESET | + DPIO_PCS_TX_LANE1_RESET); + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), + DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | + DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | + (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | + DPIO_PCS_CLK_SOFT_RESET); + + /* Fix up inter-pair skew failure */ + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000); + mutex_unlock(&dev_priv->sb_lock); +} + +void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_digital_port *dport = dp_to_dig_port(intel_dp); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + enum dpio_channel port = vlv_dport_to_channel(dport); + int pipe = intel_crtc->pipe; + u32 val; + + mutex_lock(&dev_priv->sb_lock); + + /* Enable clock channels for this port */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); + val = 0; + if (pipe) + val |= (1<<21); + else + val &= ~(1<<21); + val |= 0x001000c4; + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); + + /* Program lane clock */ + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); + + mutex_unlock(&dev_priv->sb_lock); +} + +void vlv_phy_reset_lanes(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct intel_crtc *intel_crtc = + to_intel_crtc(encoder->base.crtc); + enum dpio_channel port = vlv_dport_to_channel(dport); + int pipe = intel_crtc->pipe; + + mutex_lock(&dev_priv->sb_lock); + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000); + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060); + mutex_unlock(&dev_priv->sb_lock); +} diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 3ac705936b04..c0eff1571731 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -208,8 +208,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc, if (memcmp(&crtc_state->dpll_hw_state, &shared_dpll[i].hw_state, sizeof(crtc_state->dpll_hw_state)) == 0) { - DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, active %x)\n", - crtc->base.base.id, pll->name, + DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n", + crtc->base.base.id, crtc->base.name, pll->name, shared_dpll[i].crtc_mask, pll->active_mask); return pll; @@ -220,8 +220,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc, for (i = range_min; i <= range_max; i++) { pll = &dev_priv->shared_dplls[i]; if (shared_dpll[i].crtc_mask == 0) { - DRM_DEBUG_KMS("CRTC:%d allocated %s\n", - crtc->base.base.id, pll->name); + DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n", + crtc->base.base.id, crtc->base.name, pll->name); return pll; } } @@ -358,14 +358,17 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, i = (enum intel_dpll_id) crtc->pipe; pll = &dev_priv->shared_dplls[i]; - DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", - crtc->base.base.id, pll->name); + DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", + crtc->base.base.id, crtc->base.name, pll->name); } else { pll = intel_find_shared_dpll(crtc, crtc_state, DPLL_ID_PCH_PLL_A, DPLL_ID_PCH_PLL_B); } + if (!pll) + return NULL; + /* reference the pll */ intel_reference_shared_dpll(pll, crtc_state); @@ -1236,9 +1239,6 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, case 162000: ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0); break; - /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which - results in CDCLK change. Need to handle the change of CDCLK by - disabling pipes and re-enabling them */ case 108000: ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0); break; @@ -1508,7 +1508,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, int clock = crtc_state->port_clock; if (encoder->type == INTEL_OUTPUT_HDMI) { - intel_clock_t best_clock; + struct dpll best_clock; /* Calculate HDMI div */ /* @@ -1613,8 +1613,8 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, i = (enum intel_dpll_id) intel_dig_port->port; pll = intel_get_shared_dpll_by_id(dev_priv, i); - DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", - crtc->base.base.id, pll->name); + DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", + crtc->base.base.id, crtc->base.name, pll->name); intel_reference_shared_dpll(pll, crtc_state); @@ -1633,18 +1633,10 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = { static void intel_ddi_pll_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t val = I915_READ(LCPLL_CTL); - - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { - int cdclk_freq; - - cdclk_freq = dev_priv->display.get_display_clock_speed(dev); - dev_priv->skl_boot_cdclk = cdclk_freq; - if (skl_sanitize_cdclk(dev_priv)) - DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n"); - if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) - DRM_ERROR("LCPLL1 is disabled\n"); - } else if (!IS_BROXTON(dev_priv)) { + + if (INTEL_GEN(dev_priv) < 9) { + uint32_t val = I915_READ(LCPLL_CTL); + /* * The LCPLL register should be turned on by the BIOS. For now * let's just check its state and print errors in case diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index a28b4aac1e02..7d0e071fe355 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -242,14 +242,6 @@ struct intel_connector { * and active (i.e. dpms ON state). */ bool (*get_hw_state)(struct intel_connector *); - /* - * Removes all interfaces through which the connector is accessible - * - like sysfs, debugfs entries -, so that no new operations can be - * started on the connector. Also makes sure all currently pending - * operations finish before returing. - */ - void (*unregister)(struct intel_connector *); - /* Panel info for eDP and LVDS */ struct intel_panel panel; @@ -266,7 +258,7 @@ struct intel_connector { struct intel_dp *mst_port; }; -typedef struct dpll { +struct dpll { /* given values */ int n; int m1, m2; @@ -276,7 +268,7 @@ typedef struct dpll { int vco; int m; int p; -} intel_clock_t; +}; struct intel_atomic_state { struct drm_atomic_state base; @@ -291,17 +283,32 @@ struct intel_atomic_state { bool dpll_set, modeset; + /* + * Does this transaction change the pipes that are active? This mask + * tracks which CRTC's have changed their active state at the end of + * the transaction (not counting the temporary disable during modesets). + * This mask should only be non-zero when intel_state->modeset is true, + * but the converse is not necessarily true; simply changing a mode may + * not flip the final active status of any CRTC's + */ + unsigned int active_pipe_changes; + unsigned int active_crtcs; unsigned int min_pixclk[I915_MAX_PIPES]; + /* SKL/KBL Only */ + unsigned int cdclk_pll_vco; + struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS]; - struct intel_wm_config wm_config; /* * Current watermarks can't be trusted during hardware readout, so * don't bother calculating intermediate watermarks. */ bool skip_intermediate_wm; + + /* Gen9+ only */ + struct skl_wm_values wm_results; }; struct intel_plane_state { @@ -405,6 +412,48 @@ struct skl_pipe_wm { uint32_t linetime; }; +struct intel_crtc_wm_state { + union { + struct { + /* + * Intermediate watermarks; these can be + * programmed immediately since they satisfy + * both the current configuration we're + * switching away from and the new + * configuration we're switching to. + */ + struct intel_pipe_wm intermediate; + + /* + * Optimal watermarks, programmed post-vblank + * when this state is committed. + */ + struct intel_pipe_wm optimal; + } ilk; + + struct { + /* gen9+ only needs 1-step wm programming */ + struct skl_pipe_wm optimal; + + /* cached plane data rate */ + unsigned plane_data_rate[I915_MAX_PLANES]; + unsigned plane_y_data_rate[I915_MAX_PLANES]; + + /* minimum block allocation */ + uint16_t minimum_blocks[I915_MAX_PLANES]; + uint16_t minimum_y_blocks[I915_MAX_PLANES]; + } skl; + }; + + /* + * Platforms with two-step watermark programming will need to + * update watermark programming post-vblank to switch from the + * safe intermediate watermarks to the optimal final + * watermarks. + */ + bool need_postvbl_update; +}; + struct intel_crtc_state { struct drm_crtc_state base; @@ -522,6 +571,12 @@ struct intel_crtc_state { uint8_t lane_count; + /* + * Used by platforms having DP/HDMI PHY with programmable lane + * latency optimization. + */ + uint8_t lane_lat_optim_mask; + /* Panel fitter controls for gen2-gen4 + VLV */ struct { u32 control; @@ -558,32 +613,7 @@ struct intel_crtc_state { /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */ bool disable_lp_wm; - struct { - /* - * Optimal watermarks, programmed post-vblank when this state - * is committed. - */ - union { - struct intel_pipe_wm ilk; - struct skl_pipe_wm skl; - } optimal; - - /* - * Intermediate watermarks; these can be programmed immediately - * since they satisfy both the current configuration we're - * switching away from and the new configuration we're switching - * to. - */ - struct intel_pipe_wm intermediate; - - /* - * Platforms with two-step watermark programming will need to - * update watermark programming post-vblank to switch from the - * safe intermediate watermarks to the optimal final - * watermarks. - */ - bool need_postvbl_update; - } wm; + struct intel_crtc_wm_state wm; /* Gamma mode programmed on the pipe */ uint32_t gamma_mode; @@ -598,14 +628,6 @@ struct vlv_wm_state { bool cxsr; }; -struct intel_mmio_flip { - struct work_struct work; - struct drm_i915_private *i915; - struct drm_i915_gem_request *req; - struct intel_crtc *crtc; - unsigned int rotation; -}; - struct intel_crtc { struct drm_crtc base; enum pipe pipe; @@ -620,7 +642,7 @@ struct intel_crtc { unsigned long enabled_power_domains; bool lowfreq_avail; struct intel_overlay *overlay; - struct intel_unpin_work *unpin_work; + struct intel_flip_work *flip_work; atomic_t unpin_work_count; @@ -815,6 +837,7 @@ struct intel_dp { uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; + uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; /* sink rates as reported by DP_SUPPORTED_LINK_RATES */ uint8_t num_sink_rates; int sink_rates[DP_MAX_SUPPORTED_RATES]; @@ -947,22 +970,21 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane) return dev_priv->plane_to_crtc_mapping[plane]; } -struct intel_unpin_work { - struct work_struct work; +struct intel_flip_work { + struct work_struct unpin_work; + struct work_struct mmio_work; + struct drm_crtc *crtc; struct drm_framebuffer *old_fb; struct drm_i915_gem_object *pending_flip_obj; struct drm_pending_vblank_event *event; atomic_t pending; -#define INTEL_FLIP_INACTIVE 0 -#define INTEL_FLIP_PENDING 1 -#define INTEL_FLIP_COMPLETE 2 u32 flip_count; u32 gtt_offset; struct drm_i915_gem_request *flip_queued_req; u32 flip_queued_vblank; u32 flip_ready_vblank; - bool enable_stall_check; + unsigned int rotation; }; struct intel_load_detect_pipe { @@ -1031,9 +1053,9 @@ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void gen6_reset_rps_interrupts(struct drm_device *dev); -void gen6_enable_rps_interrupts(struct drm_device *dev); -void gen6_disable_rps_interrupts(struct drm_device *dev); +void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv); +void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv); +void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv); u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask); void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv); @@ -1112,14 +1134,16 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv); void i915_audio_component_cleanup(struct drm_i915_private *dev_priv); /* intel_display.c */ +void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco); +void intel_update_rawclk(struct drm_i915_private *dev_priv); int vlv_get_cck_clock(struct drm_i915_private *dev_priv, const char *name, u32 reg, int ref_freq); extern const struct drm_plane_funcs intel_plane_funcs; void intel_init_display_hooks(struct drm_i915_private *dev_priv); unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); bool intel_has_pending_fb_unpin(struct drm_device *dev); -void intel_mark_busy(struct drm_device *dev); -void intel_mark_idle(struct drm_device *dev); +void intel_mark_busy(struct drm_i915_private *dev_priv); +void intel_mark_idle(struct drm_i915_private *dev_priv); void intel_crtc_restore_mode(struct drm_crtc *crtc); int intel_display_suspend(struct drm_device *dev); void intel_encoder_destroy(struct drm_encoder *encoder); @@ -1128,7 +1152,6 @@ struct intel_connector *intel_connector_alloc(void); bool intel_connector_get_hw_state(struct intel_connector *connector); void intel_connector_attach_encoder(struct intel_connector *connector, struct intel_encoder *encoder); -struct drm_encoder *intel_best_encoder(struct drm_connector *connector); struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); enum pipe intel_get_pipe_from_connector(struct intel_connector *connector); @@ -1151,6 +1174,9 @@ intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe) if (crtc->active) intel_wait_for_vblank(dev, pipe); } + +u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc); + int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); void vlv_wait_port_ready(struct drm_i915_private *dev_priv, struct intel_digital_port *dport, @@ -1164,14 +1190,14 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx); int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); +void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); struct drm_framebuffer * __intel_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj); -void intel_prepare_page_flip(struct drm_device *dev, int plane); -void intel_finish_page_flip(struct drm_device *dev, int pipe); -void intel_finish_page_flip_plane(struct drm_device *dev, int plane); -void intel_check_page_flip(struct drm_device *dev, int pipe); +void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe); +void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe); +void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe); int intel_prepare_plane_fb(struct drm_plane *plane, const struct drm_plane_state *new_state); void intel_cleanup_plane_fb(struct drm_plane *plane, @@ -1228,23 +1254,25 @@ u32 intel_compute_tile_offset(int *x, int *y, const struct drm_framebuffer *fb, int plane, unsigned int pitch, unsigned int rotation); -void intel_prepare_reset(struct drm_device *dev); -void intel_finish_reset(struct drm_device *dev); +void intel_prepare_reset(struct drm_i915_private *dev_priv); +void intel_finish_reset(struct drm_i915_private *dev_priv); void hsw_enable_pc8(struct drm_i915_private *dev_priv); void hsw_disable_pc8(struct drm_i915_private *dev_priv); -void broxton_init_cdclk(struct drm_i915_private *dev_priv); -void broxton_uninit_cdclk(struct drm_i915_private *dev_priv); -bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv); -void broxton_ddi_phy_init(struct drm_i915_private *dev_priv); -void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv); -void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv); +void bxt_init_cdclk(struct drm_i915_private *dev_priv); +void bxt_uninit_cdclk(struct drm_i915_private *dev_priv); +void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy); +void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy); +bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, + enum dpio_phy phy); +bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, + enum dpio_phy phy); void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv); void bxt_enable_dc9(struct drm_i915_private *dev_priv); void bxt_disable_dc9(struct drm_i915_private *dev_priv); void gen9_enable_dc5(struct drm_i915_private *dev_priv); void skl_init_cdclk(struct drm_i915_private *dev_priv); -int skl_sanitize_cdclk(struct drm_i915_private *dev_priv); void skl_uninit_cdclk(struct drm_i915_private *dev_priv); +unsigned int skl_cdclk_get_vco(unsigned int freq); void skl_enable_dc6(struct drm_i915_private *dev_priv); void skl_disable_dc6(struct drm_i915_private *dev_priv); void intel_dp_get_m_n(struct intel_crtc *crtc, @@ -1252,8 +1280,8 @@ void intel_dp_get_m_n(struct intel_crtc *crtc, void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, - intel_clock_t *best_clock); -int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock); + struct dpll *best_clock); +int chv_calc_dpll_params(int refclk, struct dpll *pll_clock); bool intel_crtc_active(struct drm_crtc *crtc); void hsw_enable_ips(struct intel_crtc *crtc); @@ -1284,7 +1312,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *); void intel_csr_ucode_resume(struct drm_i915_private *); /* intel_dp.c */ -void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); +bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, struct intel_connector *intel_connector); void intel_dp_set_link_params(struct intel_dp *intel_dp, @@ -1339,12 +1367,22 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp); bool intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]); +static inline unsigned int intel_dp_unused_lane_mask(int lane_count) +{ + return ~((1 << lane_count) - 1) & 0xf; +} + +/* intel_dp_aux_backlight.c */ +int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); + /* intel_dp_mst.c */ int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); /* intel_dsi.c */ void intel_dsi_init(struct drm_device *dev); +/* intel_dsi_dcs_backlight.c */ +int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector); /* intel_dvo.c */ void intel_dvo_init(struct drm_device *dev); @@ -1385,11 +1423,15 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev) void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, struct drm_atomic_state *state); bool intel_fbc_is_active(struct drm_i915_private *dev_priv); -void intel_fbc_pre_update(struct intel_crtc *crtc); +void intel_fbc_pre_update(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state); void intel_fbc_post_update(struct intel_crtc *crtc); void intel_fbc_init(struct drm_i915_private *dev_priv); void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv); -void intel_fbc_enable(struct intel_crtc *crtc); +void intel_fbc_enable(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state); void intel_fbc_disable(struct intel_crtc *crtc); void intel_fbc_global_disable(struct drm_i915_private *dev_priv); void intel_fbc_invalidate(struct drm_i915_private *dev_priv, @@ -1424,13 +1466,13 @@ void intel_attach_aspect_ratio_property(struct drm_connector *connector); /* intel_overlay.c */ -void intel_setup_overlay(struct drm_device *dev); -void intel_cleanup_overlay(struct drm_device *dev); +void intel_setup_overlay(struct drm_i915_private *dev_priv); +void intel_cleanup_overlay(struct drm_i915_private *dev_priv); int intel_overlay_switch_off(struct intel_overlay *overlay); -int intel_overlay_put_image(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int intel_overlay_attrs(struct drm_device *dev, void *data, - struct drm_file *file_priv); +int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); void intel_overlay_reset(struct drm_i915_private *dev_priv); @@ -1459,7 +1501,14 @@ extern struct drm_display_mode *intel_find_panel_downclock( struct drm_display_mode *fixed_mode, struct drm_connector *connector); void intel_backlight_register(struct drm_device *dev); -void intel_backlight_unregister(struct drm_device *dev); + +#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) +void intel_backlight_device_unregister(struct intel_connector *connector); +#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */ +static inline void intel_backlight_device_unregister(struct intel_connector *connector) +{ +} +#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ /* intel_psr.c */ @@ -1601,21 +1650,20 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv); void intel_pm_setup(struct drm_device *dev); void intel_gpu_ips_init(struct drm_i915_private *dev_priv); void intel_gpu_ips_teardown(void); -void intel_init_gt_powersave(struct drm_device *dev); -void intel_cleanup_gt_powersave(struct drm_device *dev); -void intel_enable_gt_powersave(struct drm_device *dev); -void intel_disable_gt_powersave(struct drm_device *dev); -void intel_suspend_gt_powersave(struct drm_device *dev); -void intel_reset_gt_powersave(struct drm_device *dev); -void gen6_update_ring_freq(struct drm_device *dev); +void intel_init_gt_powersave(struct drm_i915_private *dev_priv); +void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv); +void intel_enable_gt_powersave(struct drm_i915_private *dev_priv); +void intel_disable_gt_powersave(struct drm_i915_private *dev_priv); +void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv); +void intel_reset_gt_powersave(struct drm_i915_private *dev_priv); +void gen6_update_ring_freq(struct drm_i915_private *dev_priv); void gen6_rps_busy(struct drm_i915_private *dev_priv); void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); void gen6_rps_idle(struct drm_i915_private *dev_priv); void gen6_rps_boost(struct drm_i915_private *dev_priv, struct intel_rps_client *rps, unsigned long submitted); -void intel_queue_rps_boost_for_request(struct drm_device *dev, - struct drm_i915_gem_request *req); +void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req); void vlv_wm_get_hw_state(struct drm_device *dev); void ilk_wm_get_hw_state(struct drm_device *dev); void skl_wm_get_hw_state(struct drm_device *dev); @@ -1623,7 +1671,11 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, struct skl_ddb_allocation *ddb /* out */); uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); bool ilk_disable_lp_wm(struct drm_device *dev); -int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6); +int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6); +static inline int intel_enable_rc6(void) +{ + return i915.enable_rc6; +} /* intel_sdvo.c */ bool intel_sdvo_init(struct drm_device *dev, @@ -1635,7 +1687,7 @@ int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv); void intel_pipe_update_start(struct intel_crtc *crtc); -void intel_pipe_update_end(struct intel_crtc *crtc); +void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work); /* intel_tv.c */ void intel_tv_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 366ad6c67ce4..b444d0e35a98 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -532,7 +532,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder) struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); enum port port; - u32 tmp; DRM_DEBUG_KMS("\n"); @@ -551,11 +550,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder) msleep(intel_dsi->panel_on_delay); - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + u32 val; + /* Disable DPOunit clock gating, can stall pipe */ - tmp = I915_READ(DSPCLK_GATE_D); - tmp |= DPOUNIT_CLOCK_GATE_DISABLE; - I915_WRITE(DSPCLK_GATE_D, tmp); + val = I915_READ(DSPCLK_GATE_D); + val |= DPOUNIT_CLOCK_GATE_DISABLE; + I915_WRITE(DSPCLK_GATE_D, val); } /* put device in ready state */ @@ -693,7 +694,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder) intel_dsi_clear_device_ready(encoder); - if (!IS_BROXTON(dev_priv)) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { u32 val; val = I915_READ(DSPCLK_GATE_D); @@ -1171,6 +1172,12 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder) if (intel_dsi->clock_stop) tmp |= CLOCKSTOP; + if (IS_BROXTON(dev_priv)) { + tmp |= BXT_DPHY_DEFEATURE_EN; + if (!is_cmd_mode(intel_dsi)) + tmp |= BXT_DEFEATURE_DPI_FIFO_CTR; + } + for_each_dsi_port(port, intel_dsi->ports) { I915_WRITE(MIPI_DSI_FUNC_PRG(port), val); @@ -1378,12 +1385,12 @@ static const struct drm_encoder_funcs intel_dsi_funcs = { static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = { .get_modes = intel_dsi_get_modes, .mode_valid = intel_dsi_mode_valid, - .best_encoder = intel_best_encoder, }; static const struct drm_connector_funcs intel_dsi_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = intel_dsi_detect, + .early_unregister = intel_connector_unregister, .destroy = intel_dsi_connector_destroy, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_dsi_set_property, @@ -1449,7 +1456,7 @@ void intel_dsi_init(struct drm_device *dev) connector = &intel_connector->base; drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI, - NULL); + "DSI %c", port_name(port)); intel_encoder->compute_config = intel_dsi_compute_config; intel_encoder->pre_enable = intel_dsi_pre_enable; @@ -1460,7 +1467,6 @@ void intel_dsi_init(struct drm_device *dev) intel_encoder->get_config = intel_dsi_get_config; intel_connector->get_hw_state = intel_connector_get_hw_state; - intel_connector->unregister = intel_connector_unregister; /* * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI @@ -1473,10 +1479,42 @@ void intel_dsi_init(struct drm_device *dev) else intel_encoder->crtc_mask = BIT(PIPE_B); - if (dev_priv->vbt.dsi.config->dual_link) + if (dev_priv->vbt.dsi.config->dual_link) { intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C); - else + + switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) { + case DL_DCS_PORT_A: + intel_dsi->dcs_backlight_ports = BIT(PORT_A); + break; + case DL_DCS_PORT_C: + intel_dsi->dcs_backlight_ports = BIT(PORT_C); + break; + default: + case DL_DCS_PORT_A_AND_C: + intel_dsi->dcs_backlight_ports = BIT(PORT_A) | BIT(PORT_C); + break; + } + + switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) { + case DL_DCS_PORT_A: + intel_dsi->dcs_cabc_ports = BIT(PORT_A); + break; + case DL_DCS_PORT_C: + intel_dsi->dcs_cabc_ports = BIT(PORT_C); + break; + default: + case DL_DCS_PORT_A_AND_C: + intel_dsi->dcs_cabc_ports = BIT(PORT_A) | BIT(PORT_C); + break; + } + } else { intel_dsi->ports = BIT(port); + intel_dsi->dcs_backlight_ports = BIT(port); + intel_dsi->dcs_cabc_ports = BIT(port); + } + + if (!dev_priv->vbt.dsi.config->cabc_supported) + intel_dsi->dcs_cabc_ports = 0; /* Create a DSI host (and a device) for each port. */ for_each_dsi_port(port, intel_dsi->ports) { @@ -1545,6 +1583,9 @@ void intel_dsi_init(struct drm_device *dev) goto err; } + connector->display_info.width_mm = fixed_mode->width_mm; + connector->display_info.height_mm = fixed_mode->height_mm; + intel_panel_init(&intel_connector->panel, fixed_mode, NULL); intel_dsi_add_properties(intel_connector); diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index 61a6957fc6c2..5967ea6d6045 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h @@ -78,6 +78,10 @@ struct intel_dsi { u8 escape_clk_div; u8 dual_link; + + u16 dcs_backlight_ports; + u16 dcs_cabc_ports; + u8 pixel_overlap; u32 port_bits; u32 bw_timer; diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c new file mode 100644 index 000000000000..f0dc427743f8 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c @@ -0,0 +1,179 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Author: Deepak M <m.deepak at intel.com> + */ + +#include "intel_drv.h" +#include "intel_dsi.h" +#include "i915_drv.h" +#include <video/mipi_display.h> +#include <drm/drm_mipi_dsi.h> + +#define CONTROL_DISPLAY_BCTRL (1 << 5) +#define CONTROL_DISPLAY_DD (1 << 3) +#define CONTROL_DISPLAY_BL (1 << 2) + +#define POWER_SAVE_OFF (0 << 0) +#define POWER_SAVE_LOW (1 << 0) +#define POWER_SAVE_MEDIUM (2 << 0) +#define POWER_SAVE_HIGH (3 << 0) +#define POWER_SAVE_OUTDOOR_MODE (4 << 0) + +#define PANEL_PWM_MAX_VALUE 0xFF + +static u32 dcs_get_backlight(struct intel_connector *connector) +{ + struct intel_encoder *encoder = connector->encoder; + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct mipi_dsi_device *dsi_device; + u8 data; + enum port port; + + /* FIXME: Need to take care of 16 bit brightness level */ + for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + dsi_device = intel_dsi->dsi_hosts[port]->device; + mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS, + &data, sizeof(data)); + break; + } + + return data; +} + +static void dcs_set_backlight(struct intel_connector *connector, u32 level) +{ + struct intel_encoder *encoder = connector->encoder; + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct mipi_dsi_device *dsi_device; + u8 data = level; + enum port port; + + /* FIXME: Need to take care of 16 bit brightness level */ + for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + dsi_device = intel_dsi->dsi_hosts[port]->device; + mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, + &data, sizeof(data)); + } +} + +static void dcs_disable_backlight(struct intel_connector *connector) +{ + struct intel_encoder *encoder = connector->encoder; + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct mipi_dsi_device *dsi_device; + enum port port; + + dcs_set_backlight(connector, 0); + + for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) { + u8 cabc = POWER_SAVE_OFF; + + dsi_device = intel_dsi->dsi_hosts[port]->device; + mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE, + &cabc, sizeof(cabc)); + } + + for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + u8 ctrl = 0; + + dsi_device = intel_dsi->dsi_hosts[port]->device; + + mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY, + &ctrl, sizeof(ctrl)); + + ctrl &= ~CONTROL_DISPLAY_BL; + ctrl &= ~CONTROL_DISPLAY_DD; + ctrl &= ~CONTROL_DISPLAY_BCTRL; + + mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY, + &ctrl, sizeof(ctrl)); + } +} + +static void dcs_enable_backlight(struct intel_connector *connector) +{ + struct intel_encoder *encoder = connector->encoder; + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_panel *panel = &connector->panel; + struct mipi_dsi_device *dsi_device; + enum port port; + + for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + u8 ctrl = 0; + + dsi_device = intel_dsi->dsi_hosts[port]->device; + + mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY, + &ctrl, sizeof(ctrl)); + + ctrl |= CONTROL_DISPLAY_BL; + ctrl |= CONTROL_DISPLAY_DD; + ctrl |= CONTROL_DISPLAY_BCTRL; + + mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY, + &ctrl, sizeof(ctrl)); + } + + for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) { + u8 cabc = POWER_SAVE_MEDIUM; + + dsi_device = intel_dsi->dsi_hosts[port]->device; + mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE, + &cabc, sizeof(cabc)); + } + + dcs_set_backlight(connector, panel->backlight.level); +} + +static int dcs_setup_backlight(struct intel_connector *connector, + enum pipe unused) +{ + struct intel_panel *panel = &connector->panel; + + panel->backlight.max = PANEL_PWM_MAX_VALUE; + panel->backlight.level = PANEL_PWM_MAX_VALUE; + + return 0; +} + +int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector) +{ + struct drm_device *dev = intel_connector->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_encoder *encoder = intel_connector->encoder; + struct intel_panel *panel = &intel_connector->panel; + + if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS) + return -ENODEV; + + if (WARN_ON(encoder->type != INTEL_OUTPUT_DSI)) + return -EINVAL; + + panel->backlight.setup = dcs_setup_backlight; + panel->backlight.enable = dcs_enable_backlight; + panel->backlight.disable = dcs_disable_backlight; + panel->backlight.set = dcs_set_backlight; + panel->backlight.get = dcs_get_backlight; + + return 0; +} diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index e498f1c3221e..f122484bedfc 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c @@ -95,6 +95,24 @@ static struct gpio_map vlv_gpio_table[] = { { VLV_GPIO_NC_11_PANEL1_BKLTCTL }, }; +#define CHV_GPIO_IDX_START_N 0 +#define CHV_GPIO_IDX_START_E 73 +#define CHV_GPIO_IDX_START_SW 100 +#define CHV_GPIO_IDX_START_SE 198 + +#define CHV_VBT_MAX_PINS_PER_FMLY 15 + +#define CHV_GPIO_PAD_CFG0(f, i) (0x4400 + (f) * 0x400 + (i) * 8) +#define CHV_GPIO_GPIOEN (1 << 15) +#define CHV_GPIO_GPIOCFG_GPIO (0 << 8) +#define CHV_GPIO_GPIOCFG_GPO (1 << 8) +#define CHV_GPIO_GPIOCFG_GPI (2 << 8) +#define CHV_GPIO_GPIOCFG_HIZ (3 << 8) +#define CHV_GPIO_GPIOTXSTATE(state) ((!!(state)) << 1) + +#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4) +#define CHV_GPIO_CFGLOCK (1 << 31) + static inline enum port intel_dsi_seq_port_to_port(u8 port) { return port ? PORT_C : PORT_A; @@ -203,13 +221,14 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv, map = &vlv_gpio_table[gpio_index]; if (dev_priv->vbt.dsi.seq_version >= 3) { - DRM_DEBUG_KMS("GPIO element v3 not supported\n"); - return; + /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */ + port = IOSF_PORT_GPIO_NC; } else { if (gpio_source == 0) { port = IOSF_PORT_GPIO_NC; } else if (gpio_source == 1) { - port = IOSF_PORT_GPIO_SC; + DRM_DEBUG_KMS("SC gpio not supported\n"); + return; } else { DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source); return; @@ -231,6 +250,56 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv, mutex_unlock(&dev_priv->sb_lock); } +static void chv_exec_gpio(struct drm_i915_private *dev_priv, + u8 gpio_source, u8 gpio_index, bool value) +{ + u16 cfg0, cfg1; + u16 family_num; + u8 port; + + if (dev_priv->vbt.dsi.seq_version >= 3) { + if (gpio_index >= CHV_GPIO_IDX_START_SE) { + /* XXX: it's unclear whether 255->57 is part of SE. */ + gpio_index -= CHV_GPIO_IDX_START_SE; + port = CHV_IOSF_PORT_GPIO_SE; + } else if (gpio_index >= CHV_GPIO_IDX_START_SW) { + gpio_index -= CHV_GPIO_IDX_START_SW; + port = CHV_IOSF_PORT_GPIO_SW; + } else if (gpio_index >= CHV_GPIO_IDX_START_E) { + gpio_index -= CHV_GPIO_IDX_START_E; + port = CHV_IOSF_PORT_GPIO_E; + } else { + port = CHV_IOSF_PORT_GPIO_N; + } + } else { + /* XXX: The spec is unclear about CHV GPIO on seq v2 */ + if (gpio_source != 0) { + DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source); + return; + } + + if (gpio_index >= CHV_GPIO_IDX_START_E) { + DRM_DEBUG_KMS("invalid gpio index %u for GPIO N\n", + gpio_index); + return; + } + + port = CHV_IOSF_PORT_GPIO_N; + } + + family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY; + gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY; + + cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index); + cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index); + + mutex_lock(&dev_priv->sb_lock); + vlv_iosf_sb_write(dev_priv, port, cfg1, 0); + vlv_iosf_sb_write(dev_priv, port, cfg0, + CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value)); + mutex_unlock(&dev_priv->sb_lock); +} + static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) { struct drm_device *dev = intel_dsi->base.base.dev; @@ -254,6 +323,8 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) if (IS_VALLEYVIEW(dev_priv)) vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value); + else if (IS_CHERRYVIEW(dev_priv)) + chv_exec_gpio(dev_priv, gpio_source, gpio_index, value); else DRM_DEBUG_KMS("GPIO element not supported on this platform\n"); diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 286baec979c8..60e4ddf2ec6d 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -341,6 +341,7 @@ static void intel_dvo_destroy(struct drm_connector *connector) static const struct drm_connector_funcs intel_dvo_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = intel_dvo_detect, + .early_unregister = intel_connector_unregister, .destroy = intel_dvo_destroy, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_connector_atomic_get_property, @@ -351,7 +352,6 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = { static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { .mode_valid = intel_dvo_mode_valid, .get_modes = intel_dvo_get_modes, - .best_encoder = intel_best_encoder, }; static void intel_dvo_enc_destroy(struct drm_encoder *encoder) @@ -406,6 +406,18 @@ intel_dvo_get_current_mode(struct drm_connector *connector) return mode; } +static char intel_dvo_port_name(i915_reg_t dvo_reg) +{ + if (i915_mmio_reg_equal(dvo_reg, DVOA)) + return 'A'; + else if (i915_mmio_reg_equal(dvo_reg, DVOB)) + return 'B'; + else if (i915_mmio_reg_equal(dvo_reg, DVOC)) + return 'C'; + else + return '?'; +} + void intel_dvo_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -428,8 +440,6 @@ void intel_dvo_init(struct drm_device *dev) intel_dvo->attached_connector = intel_connector; intel_encoder = &intel_dvo->base; - drm_encoder_init(dev, &intel_encoder->base, - &intel_dvo_enc_funcs, encoder_type, NULL); intel_encoder->disable = intel_disable_dvo; intel_encoder->enable = intel_enable_dvo; @@ -438,7 +448,6 @@ void intel_dvo_init(struct drm_device *dev) intel_encoder->compute_config = intel_dvo_compute_config; intel_encoder->pre_enable = intel_dvo_pre_enable; intel_connector->get_hw_state = intel_dvo_connector_get_hw_state; - intel_connector->unregister = intel_connector_unregister; /* Now, try to find a controller */ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { @@ -496,6 +505,10 @@ void intel_dvo_init(struct drm_device *dev) if (!dvoinit) continue; + drm_encoder_init(dev, &intel_encoder->base, + &intel_dvo_enc_funcs, encoder_type, + "DVO %c", intel_dvo_port_name(dvo->dvo_reg)); + intel_encoder->type = INTEL_OUTPUT_DVO; intel_encoder->crtc_mask = (1 << 0) | (1 << 1); switch (dvo->type) { diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index d5a7cfec589b..ecabd59ffbaf 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -374,8 +374,9 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) * @dev_priv: i915 device instance * * This function is used to verify the current state of FBC. + * * FIXME: This should be tracked in the plane config eventually - * instead of queried at runtime for most callers. + * instead of queried at runtime for most callers. */ bool intel_fbc_is_active(struct drm_i915_private *dev_priv) { @@ -480,10 +481,10 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv) intel_fbc_hw_deactivate(dev_priv); } -static bool multiple_pipes_ok(struct intel_crtc *crtc) +static bool multiple_pipes_ok(struct intel_crtc *crtc, + struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - struct drm_plane *primary = crtc->base.primary; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; enum pipe pipe = crtc->pipe; @@ -491,9 +492,7 @@ static bool multiple_pipes_ok(struct intel_crtc *crtc) if (!no_fbc_on_multiple_pipes(dev_priv)) return true; - WARN_ON(!drm_modeset_is_locked(&primary->mutex)); - - if (to_intel_plane_state(primary->state)->visible) + if (plane_state->visible) fbc->visible_pipes_mask |= (1 << pipe); else fbc->visible_pipes_mask &= ~(1 << pipe); @@ -708,21 +707,16 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) return effective_w <= max_w && effective_h <= max_h; } -static void intel_fbc_update_state_cache(struct intel_crtc *crtc) +static void intel_fbc_update_state_cache(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc_state_cache *cache = &fbc->state_cache; - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - struct intel_plane_state *plane_state = - to_intel_plane_state(crtc->base.primary->state); struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj; - WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex)); - WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex)); - cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) cache->crtc.hsw_bdw_pixel_rate = @@ -740,7 +734,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc) /* FIXME: We lack the proper locking here, so only run this on the * platforms that need. */ - if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7) + if (IS_GEN(dev_priv, 5, 6)) cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj); cache->fb.pixel_format = fb->pixel_format; cache->fb.stride = fb->pitches[0]; @@ -827,7 +821,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc) bool enable_by_default = IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv); - if (intel_vgpu_active(dev_priv->dev)) { + if (intel_vgpu_active(dev_priv)) { fbc->no_fbc_reason = "VGPU is active"; return false; } @@ -887,7 +881,9 @@ static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, return memcmp(params1, params2, sizeof(*params1)) == 0; } -void intel_fbc_pre_update(struct intel_crtc *crtc) +void intel_fbc_pre_update(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; struct intel_fbc *fbc = &dev_priv->fbc; @@ -897,7 +893,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc) mutex_lock(&fbc->lock); - if (!multiple_pipes_ok(crtc)) { + if (!multiple_pipes_ok(crtc, plane_state)) { fbc->no_fbc_reason = "more than one pipe active"; goto deactivate; } @@ -905,7 +901,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc) if (!fbc->enabled || fbc->crtc != crtc) goto unlock; - intel_fbc_update_state_cache(crtc); + intel_fbc_update_state_cache(crtc, crtc_state, plane_state); deactivate: intel_fbc_deactivate(dev_priv); @@ -1089,7 +1085,9 @@ out: * intel_fbc_enable multiple times for the same pipe without an * intel_fbc_disable in the middle, as long as it is deactivated. */ -void intel_fbc_enable(struct intel_crtc *crtc) +void intel_fbc_enable(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; struct intel_fbc *fbc = &dev_priv->fbc; @@ -1102,19 +1100,19 @@ void intel_fbc_enable(struct intel_crtc *crtc) if (fbc->enabled) { WARN_ON(fbc->crtc == NULL); if (fbc->crtc == crtc) { - WARN_ON(!crtc->config->enable_fbc); + WARN_ON(!crtc_state->enable_fbc); WARN_ON(fbc->active); } goto out; } - if (!crtc->config->enable_fbc) + if (!crtc_state->enable_fbc) goto out; WARN_ON(fbc->active); WARN_ON(fbc->crtc != NULL); - intel_fbc_update_state_cache(crtc); + intel_fbc_update_state_cache(crtc, crtc_state, plane_state); if (intel_fbc_alloc_cfb(crtc)) { fbc->no_fbc_reason = "not enough stolen memory"; goto out; diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index ab8d09a81f14..4babefc51eb2 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -150,10 +150,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper, if (size * 2 < ggtt->stolen_usable_size) obj = i915_gem_object_create_stolen(dev, size); if (obj == NULL) - obj = i915_gem_alloc_object(dev, size); - if (!obj) { + obj = i915_gem_object_create(dev, size); + if (IS_ERR(obj)) { DRM_ERROR("failed to allocate framebuffer\n"); - ret = -ENOMEM; + ret = PTR_ERR(obj); goto out; } @@ -186,9 +186,11 @@ static int intelfb_create(struct drm_fb_helper *helper, struct i915_ggtt *ggtt = &dev_priv->ggtt; struct fb_info *info; struct drm_framebuffer *fb; + struct i915_vma *vma; struct drm_i915_gem_object *obj; - int size, ret; bool prealloc = false; + void *vaddr; + int ret; if (intel_fb && (sizes->fb_width > intel_fb->base.width || @@ -214,7 +216,6 @@ static int intelfb_create(struct drm_fb_helper *helper, } obj = intel_fb->obj; - size = obj->base.size; mutex_lock(&dev->struct_mutex); @@ -244,22 +245,23 @@ static int intelfb_create(struct drm_fb_helper *helper, info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &intelfb_ops; + vma = i915_gem_obj_to_ggtt(obj); + /* setup aperture base/size for vesafb takeover */ info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].size = ggtt->mappable_end; - info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj); - info->fix.smem_len = size; + info->fix.smem_start = dev->mode_config.fb_base + vma->node.start; + info->fix.smem_len = vma->node.size; - info->screen_base = - ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj), - size); - if (!info->screen_base) { + vaddr = i915_vma_pin_iomap(vma); + if (IS_ERR(vaddr)) { DRM_ERROR("Failed to remap framebuffer into virtual memory\n"); - ret = -ENOSPC; + ret = PTR_ERR(vaddr); goto out_destroy_fbi; } - info->screen_size = size; + info->screen_base = vaddr; + info->screen_size = vma->node.size; /* This driver doesn't need a VT switch to restore the mode on resume */ info->skip_vt_switch = true; @@ -287,7 +289,7 @@ static int intelfb_create(struct drm_fb_helper *helper, out_destroy_fbi: drm_fb_helper_release_fbi(helper); out_unpin: - i915_gem_object_ggtt_unpin(obj); + intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0)); out_unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -488,10 +490,10 @@ retry: } crtcs[i] = new_crtc; - DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n", + DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n", connector->name, - pipe_name(to_intel_crtc(connector->state->crtc)->pipe), connector->state->crtc->base.id, + connector->state->crtc->name, modes[i]->hdisplay, modes[i]->vdisplay, modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :""); @@ -550,7 +552,10 @@ static void intel_fbdev_destroy(struct drm_device *dev, drm_fb_helper_fini(&ifbdev->helper); if (ifbdev->fb) { - drm_framebuffer_unregister_private(&ifbdev->fb->base); + mutex_lock(&dev->struct_mutex); + intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0)); + mutex_unlock(&dev->struct_mutex); + drm_framebuffer_remove(&ifbdev->fb->base); } } @@ -717,8 +722,6 @@ int intel_fbdev_init(struct drm_device *dev) return ret; } - ifbdev->helper.atomic = true; - dev_priv->fbdev = ifbdev; INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker); diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 9d79c4c3e256..4df80cc9a291 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -48,14 +48,23 @@ struct drm_i915_gem_request; * queue (a circular array of work items), again described in the process * descriptor. Work queue pages are mapped momentarily as required. * - * Finally, we also keep a few statistics here, including the number of - * submissions to each engine, and a record of the last submission failure - * (if any). + * We also keep a few statistics on failures. Ideally, these should all + * be zero! + * no_wq_space: times that the submission pre-check found no space was + * available in the work queue (note, the queue is shared, + * not per-engine). It is OK for this to be nonzero, but + * it should not be huge! + * q_fail: failed to enqueue a work item. This should never happen, + * because we check for space beforehand. + * b_fail: failed to ring the doorbell. This should never happen, unless + * somehow the hardware misbehaves, or maybe if the GuC firmware + * crashes? We probably need to reset the GPU to recover. + * retcode: errno from last guc_submit() */ struct i915_guc_client { struct drm_i915_gem_object *client_obj; void *client_base; /* first page (only) of above */ - struct intel_context *owner; + struct i915_gem_context *owner; struct intel_guc *guc; uint32_t priority; uint32_t ctx_index; @@ -71,12 +80,13 @@ struct i915_guc_client { uint32_t wq_tail; uint32_t unused; /* Was 'wq_head' */ - /* GuC submission statistics & status */ - uint64_t submissions[GUC_MAX_ENGINES_NUM]; - uint32_t q_fail; + uint32_t no_wq_space; + uint32_t q_fail; /* No longer used */ uint32_t b_fail; int retcode; - int spare; /* pad to 32 DWords */ + + /* Per-engine counts of GuC submissions */ + uint64_t submissions[GUC_MAX_ENGINES_NUM]; }; enum intel_guc_fw_status { @@ -138,20 +148,19 @@ struct intel_guc { }; /* intel_guc_loader.c */ -extern void intel_guc_ucode_init(struct drm_device *dev); -extern int intel_guc_ucode_load(struct drm_device *dev); -extern void intel_guc_ucode_fini(struct drm_device *dev); +extern void intel_guc_init(struct drm_device *dev); +extern int intel_guc_setup(struct drm_device *dev); +extern void intel_guc_fini(struct drm_device *dev); extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status); extern int intel_guc_suspend(struct drm_device *dev); extern int intel_guc_resume(struct drm_device *dev); /* i915_guc_submission.c */ -int i915_guc_submission_init(struct drm_device *dev); -int i915_guc_submission_enable(struct drm_device *dev); -int i915_guc_submit(struct i915_guc_client *client, - struct drm_i915_gem_request *rq); -void i915_guc_submission_disable(struct drm_device *dev); -void i915_guc_submission_fini(struct drm_device *dev); -int i915_guc_wq_check_space(struct i915_guc_client *client); +int i915_guc_submission_init(struct drm_i915_private *dev_priv); +int i915_guc_submission_enable(struct drm_i915_private *dev_priv); +int i915_guc_wq_check_space(struct drm_i915_gem_request *rq); +int i915_guc_submit(struct drm_i915_gem_request *rq); +void i915_guc_submission_disable(struct drm_i915_private *dev_priv); +void i915_guc_submission_fini(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 2de57ffe5e18..944786d7075b 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -71,7 +71,8 @@ #define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT) #define WQ_RING_TAIL_SHIFT 20 -#define WQ_RING_TAIL_MASK (0x7FF << WQ_RING_TAIL_SHIFT) +#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */ +#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT) #define GUC_DOORBELL_ENABLED 1 #define GUC_DOORBELL_DISABLED 0 diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 876e5da44c4e..8fe96a2d989e 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -59,9 +59,12 @@ * */ -#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6.bin" +#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6_1.bin" MODULE_FIRMWARE(I915_SKL_GUC_UCODE); +#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin" +MODULE_FIRMWARE(I915_BXT_GUC_UCODE); + /* User-friendly representation of an enum */ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status) { @@ -100,6 +103,7 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; int irqs; + u32 tmp; /* tell all command streamers to forward interrupts and vblank to GuC */ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS); @@ -114,6 +118,16 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv) I915_WRITE(GUC_BCS_RCS_IER, ~irqs); I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs); I915_WRITE(GUC_WD_VECS_IER, ~irqs); + + /* + * If GuC has routed PM interrupts to itself, don't keep it. + * and keep other interrupts those are unmasked by GuC. + */ + tmp = I915_READ(GEN6_PMINTRMSK); + if (tmp & GEN8_PMINTR_REDIRECT_TO_NON_DISP) { + dev_priv->rps.pm_intr_keep |= ~(tmp & ~GEN8_PMINTR_REDIRECT_TO_NON_DISP); + dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; + } } static u32 get_gttype(struct drm_i915_private *dev_priv) @@ -281,6 +295,17 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv) return ret; } +static u32 guc_wopcm_size(struct drm_i915_private *dev_priv) +{ + u32 wopcm_size = GUC_WOPCM_TOP; + + /* On BXT, the top of WOPCM is reserved for RC6 context */ + if (IS_BROXTON(dev_priv)) + wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED; + + return wopcm_size; +} + /* * Load the GuC firmware blob into the MinuteIA. */ @@ -308,7 +333,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); /* init WOPCM */ - I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE); + I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv)); I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE); /* Enable MIA caching. GuC clock gating is disabled. */ @@ -372,66 +397,63 @@ static int i915_reset_guc(struct drm_i915_private *dev_priv) } /** - * intel_guc_ucode_load() - load GuC uCode into the device + * intel_guc_setup() - finish preparing the GuC for activity * @dev: drm device * * Called from gem_init_hw() during driver loading and also after a GPU reset. * + * The main action required here it to load the GuC uCode into the device. * The firmware image should have already been fetched into memory by the - * earlier call to intel_guc_ucode_init(), so here we need only check that - * is succeeded, and then transfer the image to the h/w. + * earlier call to intel_guc_init(), so here we need only check that worked, + * and then transfer the image to the h/w. * * Return: non-zero code on error */ -int intel_guc_ucode_load(struct drm_device *dev) +int intel_guc_setup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; - int retries, err = 0; - - if (!i915.enable_guc_submission) - return 0; + const char *fw_path = guc_fw->guc_fw_path; + int retries, ret, err; - DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n", + DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n", + fw_path, intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); - direct_interrupts_to_host(dev_priv); - - if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE) - return 0; - - if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS && - guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) - return -ENOEXEC; - - guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING; - - DRM_DEBUG_DRIVER("GuC fw fetch status %s\n", - intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status)); - - switch (guc_fw->guc_fw_fetch_status) { - case GUC_FIRMWARE_FAIL: - /* something went wrong :( */ - err = -EIO; + /* Loading forbidden, or no firmware to load? */ + if (!i915.enable_guc_loading) { + err = 0; goto fail; - - case GUC_FIRMWARE_NONE: - case GUC_FIRMWARE_PENDING: - default: - /* "can't happen" */ - WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n", - guc_fw->guc_fw_path, - intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), - guc_fw->guc_fw_fetch_status); + } else if (fw_path == NULL) { + /* Device is known to have no uCode (e.g. no GuC) */ err = -ENXIO; goto fail; + } else if (*fw_path == '\0') { + /* Device has a GuC but we don't know what f/w to load? */ + DRM_INFO("No GuC firmware known for this platform\n"); + err = -ENODEV; + goto fail; + } - case GUC_FIRMWARE_SUCCESS: - break; + /* Fetch failed, or already fetched but failed to load? */ + if (guc_fw->guc_fw_fetch_status != GUC_FIRMWARE_SUCCESS) { + err = -EIO; + goto fail; + } else if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) { + err = -ENOEXEC; + goto fail; } - err = i915_guc_submission_init(dev); + direct_interrupts_to_host(dev_priv); + + guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING; + + DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n", + intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), + intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); + + err = i915_guc_submission_init(dev_priv); if (err) goto fail; @@ -448,7 +470,7 @@ int intel_guc_ucode_load(struct drm_device *dev) */ err = i915_reset_guc(dev_priv); if (err) { - DRM_ERROR("GuC reset failed, err %d\n", err); + DRM_ERROR("GuC reset failed: %d\n", err); goto fail; } @@ -459,8 +481,8 @@ int intel_guc_ucode_load(struct drm_device *dev) if (--retries == 0) goto fail; - DRM_INFO("GuC fw load failed, err %d; will reset and " - "retry %d more time(s)\n", err, retries); + DRM_INFO("GuC fw load failed: %d; will reset and " + "retry %d more time(s)\n", err, retries); } guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS; @@ -470,10 +492,7 @@ int intel_guc_ucode_load(struct drm_device *dev) intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); if (i915.enable_guc_submission) { - /* The execbuf_client will be recreated. Release it first. */ - i915_guc_submission_disable(dev); - - err = i915_guc_submission_enable(dev); + err = i915_guc_submission_enable(dev_priv); if (err) goto fail; direct_interrupts_to_guc(dev_priv); @@ -482,15 +501,50 @@ int intel_guc_ucode_load(struct drm_device *dev) return 0; fail: - DRM_ERROR("GuC firmware load failed, err %d\n", err); if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING) guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL; direct_interrupts_to_host(dev_priv); - i915_guc_submission_disable(dev); - i915_guc_submission_fini(dev); + i915_guc_submission_disable(dev_priv); + i915_guc_submission_fini(dev_priv); - return err; + /* + * We've failed to load the firmware :( + * + * Decide whether to disable GuC submission and fall back to + * execlist mode, and whether to hide the error by returning + * zero or to return -EIO, which the caller will treat as a + * nonfatal error (i.e. it doesn't prevent driver load, but + * marks the GPU as wedged until reset). + */ + if (i915.enable_guc_loading > 1) { + ret = -EIO; + } else if (i915.enable_guc_submission > 1) { + ret = -EIO; + } else { + ret = 0; + } + + if (err == 0 && !HAS_GUC_UCODE(dev)) + ; /* Don't mention the GuC! */ + else if (err == 0) + DRM_INFO("GuC firmware load skipped\n"); + else if (ret != -EIO) + DRM_INFO("GuC firmware load failed: %d\n", err); + else + DRM_ERROR("GuC firmware load failed: %d\n", err); + + if (i915.enable_guc_submission) { + if (fw_path == NULL) + DRM_INFO("GuC submission without firmware not supported\n"); + if (ret == 0) + DRM_INFO("Falling back from GuC submission to execlist mode\n"); + else + DRM_ERROR("GuC init failed: %d\n", ret); + } + i915.enable_guc_submission = 0; + + return ret; } static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) @@ -552,9 +606,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) /* Header and uCode will be loaded to WOPCM. Size of the two. */ size = guc_fw->header_size + guc_fw->ucode_size; - - /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */ - if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) { + if (size > guc_wopcm_size(dev->dev_private)) { DRM_ERROR("Firmware is too large to fit in WOPCM\n"); goto fail; } @@ -617,22 +669,25 @@ fail: } /** - * intel_guc_ucode_init() - define parameters and fetch firmware + * intel_guc_init() - define parameters and fetch firmware * @dev: drm device * * Called early during driver load, but after GEM is initialised. * * The firmware will be transferred to the GuC's memory later, - * when intel_guc_ucode_load() is called. + * when intel_guc_setup() is called. */ -void intel_guc_ucode_init(struct drm_device *dev) +void intel_guc_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; const char *fw_path; - if (!HAS_GUC_SCHED(dev)) - i915.enable_guc_submission = false; + /* A negative value means "use platform default" */ + if (i915.enable_guc_loading < 0) + i915.enable_guc_loading = HAS_GUC_UCODE(dev); + if (i915.enable_guc_submission < 0) + i915.enable_guc_submission = HAS_GUC_SCHED(dev); if (!HAS_GUC_UCODE(dev)) { fw_path = NULL; @@ -640,27 +695,26 @@ void intel_guc_ucode_init(struct drm_device *dev) fw_path = I915_SKL_GUC_UCODE; guc_fw->guc_fw_major_wanted = 6; guc_fw->guc_fw_minor_wanted = 1; + } else if (IS_BROXTON(dev)) { + fw_path = I915_BXT_GUC_UCODE; + guc_fw->guc_fw_major_wanted = 8; + guc_fw->guc_fw_minor_wanted = 7; } else { - i915.enable_guc_submission = false; fw_path = ""; /* unknown device */ } - if (!i915.enable_guc_submission) - return; - guc_fw->guc_dev = dev; guc_fw->guc_fw_path = fw_path; guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE; + /* Early (and silent) return if GuC loading is disabled */ + if (!i915.enable_guc_loading) + return; if (fw_path == NULL) return; - - if (*fw_path == '\0') { - DRM_ERROR("No GuC firmware known for this platform\n"); - guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL; + if (*fw_path == '\0') return; - } guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING; DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path); @@ -669,18 +723,18 @@ void intel_guc_ucode_init(struct drm_device *dev) } /** - * intel_guc_ucode_fini() - clean up all allocated resources + * intel_guc_fini() - clean up all allocated resources * @dev: drm device */ -void intel_guc_ucode_fini(struct drm_device *dev) +void intel_guc_fini(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; mutex_lock(&dev->struct_mutex); direct_interrupts_to_host(dev_priv); - i915_guc_submission_disable(dev); - i915_guc_submission_fini(dev); + i915_guc_submission_disable(dev_priv); + i915_guc_submission_fini(dev_priv); if (guc_fw->guc_fw_obj) drm_gem_object_unreference(&guc_fw->guc_fw_obj->base); diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c new file mode 100644 index 000000000000..9fa458ce40a6 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -0,0 +1,100 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "i915_drv.h" +#include "intel_gvt.h" + +/** + * DOC: Intel GVT-g host support + * + * Intel GVT-g is a graphics virtualization technology which shares the + * GPU among multiple virtual machines on a time-sharing basis. Each + * virtual machine is presented a virtual GPU (vGPU), which has equivalent + * features as the underlying physical GPU (pGPU), so i915 driver can run + * seamlessly in a virtual machine. This file provides the englightments + * of GVT and the necessary components used by GVT in i915 driver. + */ + +static bool is_supported_device(struct drm_i915_private *dev_priv) +{ + if (IS_BROADWELL(dev_priv)) + return true; + return false; +} + +/** + * intel_gvt_init - initialize GVT components + * @dev_priv: drm i915 private data + * + * This function is called at the initialization stage to create a GVT device. + * + * Returns: + * Zero on success, negative error code if failed. + * + */ +int intel_gvt_init(struct drm_i915_private *dev_priv) +{ + int ret; + + if (!i915.enable_gvt) { + DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n"); + return 0; + } + + if (!is_supported_device(dev_priv)) { + DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n"); + return 0; + } + + /* + * We're not in host or fail to find a MPT module, disable GVT-g + */ + ret = intel_gvt_init_host(); + if (ret) { + DRM_DEBUG_DRIVER("Not in host or MPT modules not found\n"); + return 0; + } + + ret = intel_gvt_init_device(dev_priv); + if (ret) { + DRM_DEBUG_DRIVER("Fail to init GVT device\n"); + return 0; + } + + return 0; +} + +/** + * intel_gvt_cleanup - cleanup GVT components when i915 driver is unloading + * @dev_priv: drm i915 private * + * + * This function is called at the i915 driver unloading stage, to shutdown + * GVT components and release the related resources. + */ +void intel_gvt_cleanup(struct drm_i915_private *dev_priv) +{ + if (!intel_gvt_active(dev_priv)) + return; + + intel_gvt_clean_device(dev_priv); +} diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h new file mode 100644 index 000000000000..960211df74db --- /dev/null +++ b/drivers/gpu/drm/i915/intel_gvt.h @@ -0,0 +1,45 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _INTEL_GVT_H_ +#define _INTEL_GVT_H_ + +#include "gvt/gvt.h" + +#ifdef CONFIG_DRM_I915_GVT +int intel_gvt_init(struct drm_i915_private *dev_priv); +void intel_gvt_cleanup(struct drm_i915_private *dev_priv); +int intel_gvt_init_device(struct drm_i915_private *dev_priv); +void intel_gvt_clean_device(struct drm_i915_private *dev_priv); +int intel_gvt_init_host(void); +#else +static inline int intel_gvt_init(struct drm_i915_private *dev_priv) +{ + return 0; +} +static inline void intel_gvt_cleanup(struct drm_i915_private *dev_priv) +{ +} +#endif + +#endif /* _INTEL_GVT_H_ */ diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 2c3bd9c2573e..fb21626ada64 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1678,35 +1678,12 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder) struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; - enum dpio_channel port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - u32 val; - /* Enable clock channels for this port */ - mutex_lock(&dev_priv->sb_lock); - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); - val = 0; - if (pipe) - val |= (1<<21); - else - val &= ~(1<<21); - val |= 0x001000c4; - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); + vlv_phy_pre_encoder_enable(encoder); /* HDMI 1.0V-2dB */ - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040); - vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); - - /* Program lane clock */ - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); - mutex_unlock(&dev_priv->sb_lock); + vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a, + 0x2b247878); intel_hdmi->set_infoframes(&encoder->base, intel_crtc->config->has_hdmi_sink, @@ -1719,207 +1696,27 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder) static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - enum dpio_channel port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - intel_hdmi_prepare(encoder); - /* Program Tx lane resets to default */ - mutex_lock(&dev_priv->sb_lock); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), - DPIO_PCS_TX_LANE2_RESET | - DPIO_PCS_TX_LANE1_RESET); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), - DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | - DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | - (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | - DPIO_PCS_CLK_SOFT_RESET); - - /* Fix up inter-pair skew failure */ - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000); - - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); - mutex_unlock(&dev_priv->sb_lock); -} - -static void chv_data_lane_soft_reset(struct intel_encoder *encoder, - bool reset) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); - struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); - enum pipe pipe = crtc->pipe; - uint32_t val; - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); - if (reset) - val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); - else - val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); - - if (crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); - if (reset) - val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); - else - val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); - } - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); - val |= CHV_PCS_REQ_SOFTRESET_EN; - if (reset) - val &= ~DPIO_PCS_CLK_SOFT_RESET; - else - val |= DPIO_PCS_CLK_SOFT_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); - - if (crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); - val |= CHV_PCS_REQ_SOFTRESET_EN; - if (reset) - val &= ~DPIO_PCS_CLK_SOFT_RESET; - else - val |= DPIO_PCS_CLK_SOFT_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); - } + vlv_phy_pre_pll_enable(encoder); } static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - enum dpio_channel ch = vlv_dport_to_channel(dport); - enum pipe pipe = intel_crtc->pipe; - u32 val; - intel_hdmi_prepare(encoder); - /* - * Must trick the second common lane into life. - * Otherwise we can't even access the PLL. - */ - if (ch == DPIO_CH0 && pipe == PIPE_B) - dport->release_cl2_override = - !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true); - - chv_phy_powergate_lanes(encoder, true, 0x0); - - mutex_lock(&dev_priv->sb_lock); - - /* Assert data lane reset */ - chv_data_lane_soft_reset(encoder, true); - - /* program left/right clock distribution */ - if (pipe != PIPE_B) { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); - val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); - if (ch == DPIO_CH0) - val |= CHV_BUFLEFTENA1_FORCE; - if (ch == DPIO_CH1) - val |= CHV_BUFRIGHTENA1_FORCE; - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); - } else { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); - val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); - if (ch == DPIO_CH0) - val |= CHV_BUFLEFTENA2_FORCE; - if (ch == DPIO_CH1) - val |= CHV_BUFRIGHTENA2_FORCE; - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); - } - - /* program clock channel usage */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch)); - val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; - if (pipe != PIPE_B) - val &= ~CHV_PCS_USEDCLKCHANNEL; - else - val |= CHV_PCS_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); - val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; - if (pipe != PIPE_B) - val &= ~CHV_PCS_USEDCLKCHANNEL; - else - val |= CHV_PCS_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); - - /* - * This a a bit weird since generally CL - * matches the pipe, but here we need to - * pick the CL based on the port. - */ - val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch)); - if (pipe != PIPE_B) - val &= ~CHV_CMN_USEDCLKCHANNEL; - else - val |= CHV_CMN_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val); - - mutex_unlock(&dev_priv->sb_lock); + chv_phy_pre_pll_enable(encoder); } static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe; - u32 val; - - mutex_lock(&dev_priv->sb_lock); - - /* disable left/right clock distribution */ - if (pipe != PIPE_B) { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); - val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); - } else { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); - val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); - } - - mutex_unlock(&dev_priv->sb_lock); - - /* - * Leave the power down bit cleared for at least one - * lane so that chv_powergate_phy_ch() will power - * on something when the channel is otherwise unused. - * When the port is off and the override is removed - * the lanes power down anyway, so otherwise it doesn't - * really matter what the state of power down bits is - * after this. - */ - chv_phy_powergate_lanes(encoder, false, 0x0); + chv_phy_post_pll_disable(encoder); } static void vlv_hdmi_post_disable(struct intel_encoder *encoder) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - enum dpio_channel port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - /* Reset lanes to avoid HDMI flicker (VLV w/a) */ - mutex_lock(&dev_priv->sb_lock); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060); - mutex_unlock(&dev_priv->sb_lock); + vlv_phy_reset_lanes(encoder); } static void chv_hdmi_post_disable(struct intel_encoder *encoder) @@ -1944,138 +1741,12 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder) struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; - enum dpio_channel ch = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - int data, i, stagger; - u32 val; - mutex_lock(&dev_priv->sb_lock); - - /* allow hardware to manage TX FIFO reset source */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); - val &= ~DPIO_LANEDESKEW_STRAP_OVRD; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); - val &= ~DPIO_LANEDESKEW_STRAP_OVRD; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); - - /* Program Tx latency optimal setting */ - for (i = 0; i < 4; i++) { - /* Set the upar bit */ - data = (i == 1) ? 0x0 : 0x1; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i), - data << DPIO_UPAR_SHIFT); - } - - /* Data lane stagger programming */ - if (intel_crtc->config->port_clock > 270000) - stagger = 0x18; - else if (intel_crtc->config->port_clock > 135000) - stagger = 0xd; - else if (intel_crtc->config->port_clock > 67500) - stagger = 0x7; - else if (intel_crtc->config->port_clock > 33750) - stagger = 0x4; - else - stagger = 0x2; - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); - val |= DPIO_TX2_STAGGER_MASK(0x1f); - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); - val |= DPIO_TX2_STAGGER_MASK(0x1f); - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); - - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch), - DPIO_LANESTAGGER_STRAP(stagger) | - DPIO_LANESTAGGER_STRAP_OVRD | - DPIO_TX1_STAGGER_MASK(0x1f) | - DPIO_TX1_STAGGER_MULT(6) | - DPIO_TX2_STAGGER_MULT(0)); - - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch), - DPIO_LANESTAGGER_STRAP(stagger) | - DPIO_LANESTAGGER_STRAP_OVRD | - DPIO_TX1_STAGGER_MASK(0x1f) | - DPIO_TX1_STAGGER_MULT(7) | - DPIO_TX2_STAGGER_MULT(5)); - - /* Deassert data lane reset */ - chv_data_lane_soft_reset(encoder, false); - - /* Clear calc init */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); - val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); - val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); - val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); - val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); - val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); - val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch)); - val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); - val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch)); - val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); - val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val); + chv_phy_pre_encoder_enable(encoder); /* FIXME: Program the support xxx V-dB */ /* Use 800mV-0dB */ - for (i = 0; i < 4; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); - val &= ~DPIO_SWING_DEEMPH9P5_MASK; - val |= 128 << DPIO_SWING_DEEMPH9P5_SHIFT; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val); - } - - for (i = 0; i < 4; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); - - val &= ~DPIO_SWING_MARGIN000_MASK; - val |= 102 << DPIO_SWING_MARGIN000_SHIFT; - - /* - * Supposedly this value shouldn't matter when unique transition - * scale is disabled, but in fact it does matter. Let's just - * always program the same value and hope it's OK. - */ - val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT); - val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT; - - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); - } - - /* - * The document said it needs to set bit 27 for ch0 and bit 26 - * for ch1. Might be a typo in the doc. - * For now, for this unique transition scale selection, set bit - * 27 for ch0 and ch1. - */ - for (i = 0; i < 4; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); - val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); - } - - /* Start swing calculation */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); - val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); - val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); - - mutex_unlock(&dev_priv->sb_lock); + chv_set_phy_signal_level(encoder, 128, 102, false); intel_hdmi->set_infoframes(&encoder->base, intel_crtc->config->has_hdmi_sink, @@ -2086,10 +1757,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder) vlv_wait_port_ready(dev_priv, dport, 0x0); /* Second common lane will stay alive on its own now */ - if (dport->release_cl2_override) { - chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false); - dport->release_cl2_override = false; - } + chv_phy_release_cl2_override(encoder); } static void intel_hdmi_destroy(struct drm_connector *connector) @@ -2106,6 +1774,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_hdmi_set_property, .atomic_get_property = intel_connector_atomic_get_property, + .early_unregister = intel_connector_unregister, .destroy = intel_hdmi_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, @@ -2114,7 +1783,6 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { .get_modes = intel_hdmi_get_modes, .mode_valid = intel_hdmi_mode_valid, - .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { @@ -2142,6 +1810,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, enum port port = intel_dig_port->port; uint8_t alternate_ddc_pin; + DRM_DEBUG_KMS("Adding HDMI connector on port %c\n", + port_name(port)); + if (WARN(intel_dig_port->max_lanes < 4, "Not enough lanes (%d) for HDMI on port %c\n", intel_dig_port->max_lanes, port_name(port))) @@ -2239,7 +1910,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; else intel_connector->get_hw_state = intel_connector_get_hw_state; - intel_connector->unregister = intel_connector_unregister; intel_hdmi_add_properties(intel_hdmi, connector); @@ -2277,7 +1947,7 @@ void intel_hdmi_init(struct drm_device *dev, intel_encoder = &intel_dig_port->base; drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port)); intel_encoder->compute_config = intel_hdmi_compute_config; if (HAS_PCH_SPLIT(dev)) { diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index bee673005d48..38eeca7a6e72 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -220,7 +220,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) } } if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev); + dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); intel_runtime_pm_put(dev_priv); @@ -346,7 +346,7 @@ static void i915_hotplug_work_func(struct work_struct *work) /** * intel_hpd_irq_handler - main hotplug irq handler - * @dev: drm device + * @dev_priv: drm_i915_private * @pin_mask: a mask of hpd pins that have triggered the irq * @long_mask: a mask of hpd pins that may be long hpd pulses * @@ -360,10 +360,9 @@ static void i915_hotplug_work_func(struct work_struct *work) * Here, we do hotplug irq storm detection and mitigation, and pass further * processing to appropriate bottom halves. */ -void intel_hpd_irq_handler(struct drm_device *dev, +void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 pin_mask, u32 long_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; int i; enum port port; bool storm_detected = false; @@ -407,7 +406,7 @@ void intel_hpd_irq_handler(struct drm_device *dev, * hotplug bits itself. So only WARN about unexpected * interrupts on saner platforms. */ - WARN_ONCE(!HAS_GMCH_DISPLAY(dev), + WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv), "Received HPD interrupt on pin %d although disabled\n", i); continue; } @@ -427,7 +426,7 @@ void intel_hpd_irq_handler(struct drm_device *dev, } if (storm_detected) - dev_priv->display.hpd_irq_setup(dev); + dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock(&dev_priv->irq_lock); /* @@ -485,7 +484,7 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) */ spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev); + dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 42eac37de047..debed011a958 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -208,31 +208,27 @@ } while (0) enum { - ADVANCED_CONTEXT = 0, - LEGACY_32B_CONTEXT, - ADVANCED_AD_CONTEXT, - LEGACY_64B_CONTEXT -}; -#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3 -#define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\ - LEGACY_64B_CONTEXT :\ - LEGACY_32B_CONTEXT) -enum { FAULT_AND_HANG = 0, FAULT_AND_HALT, /* Debug only */ FAULT_AND_STREAM, FAULT_AND_CONTINUE /* Unsupported */ }; #define GEN8_CTX_ID_SHIFT 32 +#define GEN8_CTX_ID_WIDTH 21 #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 #define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26 -static int intel_lr_context_pin(struct intel_context *ctx, +/* Typical size of the average request (2 pipecontrols and a MI_BB) */ +#define EXECLISTS_REQUEST_SIZE 64 /* bytes */ + +static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, + struct intel_engine_cs *engine); +static int intel_lr_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine); /** * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists - * @dev: DRM device. + * @dev_priv: i915 device private * @enable_execlists: value of i915.enable_execlists module parameter. * * Only certain platforms support Execlists (the prerequisites being @@ -240,23 +236,22 @@ static int intel_lr_context_pin(struct intel_context *ctx, * * Return: 1 if Execlists is supported and has to be enabled. */ -int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists) +int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists) { - WARN_ON(i915.enable_ppgtt == -1); - /* On platforms with execlist available, vGPU will only * support execlist mode, no ring buffer mode. */ - if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev)) + if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv)) return 1; - if (INTEL_INFO(dev)->gen >= 9) + if (INTEL_GEN(dev_priv) >= 9) return 1; if (enable_execlists == 0) return 0; - if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) && + if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && + USES_PPGTT(dev_priv) && i915.use_mmio_flip >= 0) return 1; @@ -266,19 +261,17 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists static void logical_ring_init_platform_invariants(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; + struct drm_i915_private *dev_priv = engine->i915; - if (IS_GEN8(dev) || IS_GEN9(dev)) + if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) engine->idle_lite_restore_wa = ~0; - engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && + engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) && (engine->id == VCS || engine->id == VCS2); engine->ctx_desc_template = GEN8_CTX_VALID; - engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) << - GEN8_CTX_ADDRESSING_MODE_SHIFT; - if (IS_GEN8(dev)) + if (IS_GEN8(dev_priv)) engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE; @@ -297,7 +290,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine) * descriptor for a pinned context * * @ctx: Context to work on - * @ring: Engine the descriptor will be used with + * @engine: Engine the descriptor will be used with * * The context descriptor encodes various attributes of a context, * including its GTT address and some flags. Because it's fairly @@ -305,62 +298,42 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine) * which remains valid until the context is unpinned. * * This is what a descriptor looks like, from LSB to MSB: - * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template) + * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template) * bits 12-31: LRCA, GTT address of (the HWSP of) this context - * bits 32-51: ctx ID, a globally unique tag (the LRCA again!) - * bits 52-63: reserved, may encode the engine ID (for GuC) + * bits 32-52: ctx ID, a globally unique tag + * bits 53-54: mbz, reserved for use by hardware + * bits 55-63: group ID, currently unused and set to 0 */ static void -intel_lr_context_descriptor_update(struct intel_context *ctx, +intel_lr_context_descriptor_update(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { - uint64_t lrca, desc; + struct intel_context *ce = &ctx->engine[engine->id]; + u64 desc; - lrca = ctx->engine[engine->id].lrc_vma->node.start + - LRC_PPHWSP_PN * PAGE_SIZE; + BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH)); - desc = engine->ctx_desc_template; /* bits 0-11 */ - desc |= lrca; /* bits 12-31 */ - desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */ + desc = ctx->desc_template; /* bits 3-4 */ + desc |= engine->ctx_desc_template; /* bits 0-11 */ + desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE; + /* bits 12-31 */ + desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ - ctx->engine[engine->id].lrc_desc = desc; + ce->lrc_desc = desc; } -uint64_t intel_lr_context_descriptor(struct intel_context *ctx, +uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { return ctx->engine[engine->id].lrc_desc; } -/** - * intel_execlists_ctx_id() - get the Execlists Context ID - * @ctx: Context to get the ID for - * @ring: Engine to get the ID for - * - * Do not confuse with ctx->id! Unfortunately we have a name overload - * here: the old context ID we pass to userspace as a handler so that - * they can refer to a context, and the new context ID we pass to the - * ELSP so that the GPU can inform us of the context status via - * interrupts. - * - * The context ID is a portion of the context descriptor, so we can - * just extract the required part from the cached descriptor. - * - * Return: 20-bits globally unique context ID. - */ -u32 intel_execlists_ctx_id(struct intel_context *ctx, - struct intel_engine_cs *engine) -{ - return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT; -} - static void execlists_elsp_write(struct drm_i915_gem_request *rq0, struct drm_i915_gem_request *rq1) { struct intel_engine_cs *engine = rq0->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = rq0->i915; uint64_t desc[2]; if (rq1) { @@ -431,6 +404,20 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0, spin_unlock_irq(&dev_priv->uncore.lock); } +static inline void execlists_context_status_change( + struct drm_i915_gem_request *rq, + unsigned long status) +{ + /* + * Only used when GVT-g is enabled now. When GVT-g is disabled, + * The compiler should eliminate this function as dead-code. + */ + if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) + return; + + atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq); +} + static void execlists_context_unqueue(struct intel_engine_cs *engine) { struct drm_i915_gem_request *req0 = NULL, *req1 = NULL; @@ -442,7 +429,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine) * If irqs are not active generate a warning as batches that finish * without the irqs may get lost and a GPU Hang may occur. */ - WARN_ON(!intel_irqs_enabled(engine->dev->dev_private)); + WARN_ON(!intel_irqs_enabled(engine->i915)); /* Try to read in pairs */ list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue, @@ -453,10 +440,24 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine) /* Same ctx: ignore first request, as second request * will update tail past first request's workload */ cursor->elsp_submitted = req0->elsp_submitted; - list_move_tail(&req0->execlist_link, - &engine->execlist_retired_req_list); + list_del(&req0->execlist_link); + i915_gem_request_unreference(req0); req0 = cursor; } else { + if (IS_ENABLED(CONFIG_DRM_I915_GVT)) { + /* + * req0 (after merged) ctx requires single + * submission, stop picking + */ + if (req0->ctx->execlists_force_single_submission) + break; + /* + * req0 ctx doesn't require single submission, + * but next req ctx requires, stop picking + */ + if (cursor->ctx->execlists_force_single_submission) + break; + } req1 = cursor; WARN_ON(req1->elsp_submitted); break; @@ -466,6 +467,12 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine) if (unlikely(!req0)) return; + execlists_context_status_change(req0, INTEL_CONTEXT_SCHEDULE_IN); + + if (req1) + execlists_context_status_change(req1, + INTEL_CONTEXT_SCHEDULE_IN); + if (req0->elsp_submitted & engine->idle_lite_restore_wa) { /* * WaIdleLiteRestore: make sure we never cause a lite restore @@ -486,7 +493,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine) } static unsigned int -execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id) +execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id) { struct drm_i915_gem_request *head_req; @@ -496,19 +503,18 @@ execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id) struct drm_i915_gem_request, execlist_link); - if (!head_req) - return 0; - - if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id)) - return 0; + if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id))) + return 0; WARN(head_req->elsp_submitted == 0, "Never submitted head request\n"); if (--head_req->elsp_submitted > 0) return 0; - list_move_tail(&head_req->execlist_link, - &engine->execlist_retired_req_list); + execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT); + + list_del(&head_req->execlist_link); + i915_gem_request_unreference(head_req); return 1; } @@ -517,7 +523,7 @@ static u32 get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, u32 *context_id) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u32 status; read_pointer %= GEN8_CSB_ENTRIES; @@ -535,7 +541,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, /** * intel_lrc_irq_handler() - handle Context Switch interrupts - * @engine: Engine Command Streamer to handle. + * @data: tasklet handler passed in unsigned long * * Check the unread Context Status Buffers and manage the submission of new * contexts to the ELSP accordingly. @@ -543,7 +549,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, static void intel_lrc_irq_handler(unsigned long data) { struct intel_engine_cs *engine = (struct intel_engine_cs *)data; - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u32 status_pointer; unsigned int read_pointer, write_pointer; u32 csb[GEN8_CSB_ENTRIES][2]; @@ -612,11 +618,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request) struct drm_i915_gem_request *cursor; int num_elements = 0; - if (request->ctx != request->i915->kernel_context) - intel_lr_context_pin(request->ctx, engine); - - i915_gem_request_reference(request); - spin_lock_bh(&engine->execlist_lock); list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) @@ -633,12 +634,14 @@ static void execlists_context_queue(struct drm_i915_gem_request *request) if (request->ctx == tail_req->ctx) { WARN(tail_req->elsp_submitted != 0, "More than 2 already-submitted reqs queued\n"); - list_move_tail(&tail_req->execlist_link, - &engine->execlist_retired_req_list); + list_del(&tail_req->execlist_link); + i915_gem_request_unreference(tail_req); } } + i915_gem_request_reference(request); list_add_tail(&request->execlist_link, &engine->execlist_queue); + request->ctx_hw_id = request->ctx->hw_id; if (num_elements == 0) execlists_context_unqueue(engine); @@ -698,9 +701,23 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req, int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) { - int ret = 0; + struct intel_engine_cs *engine = request->engine; + struct intel_context *ce = &request->ctx->engine[engine->id]; + int ret; + + /* Flush enough space to reduce the likelihood of waiting after + * we start building the request - in which case we will just + * have to repeat work. + */ + request->reserved_space += EXECLISTS_REQUEST_SIZE; + + if (!ce->state) { + ret = execlists_context_deferred_alloc(request->ctx, engine); + if (ret) + return ret; + } - request->ringbuf = request->ctx->engine[request->engine->id].ringbuf; + request->ringbuf = ce->ringbuf; if (i915.enable_guc_submission) { /* @@ -708,16 +725,39 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request * going any further, as the i915_add_request() call * later on mustn't fail ... */ - struct intel_guc *guc = &request->i915->guc; - - ret = i915_guc_wq_check_space(guc->execbuf_client); + ret = i915_guc_wq_check_space(request); if (ret) return ret; } - if (request->ctx != request->i915->kernel_context) - ret = intel_lr_context_pin(request->ctx, request->engine); + ret = intel_lr_context_pin(request->ctx, engine); + if (ret) + return ret; + + ret = intel_ring_begin(request, 0); + if (ret) + goto err_unpin; + + if (!ce->initialised) { + ret = engine->init_context(request); + if (ret) + goto err_unpin; + + ce->initialised = true; + } + + /* Note that after this point, we have committed to using + * this request as it is being used to both track the + * state of engine initialisation and liveness of the + * golden renderstate above. Think twice before you try + * to cancel/unwind this request now. + */ + + request->reserved_space -= EXECLISTS_REQUEST_SIZE; + return 0; +err_unpin: + intel_lr_context_unpin(request->ctx, engine); return ret; } @@ -734,7 +774,6 @@ static int intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) { struct intel_ringbuffer *ringbuf = request->ringbuf; - struct drm_i915_private *dev_priv = request->i915; struct intel_engine_cs *engine = request->engine; intel_logical_ring_advance(ringbuf); @@ -753,51 +792,28 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) if (intel_engine_stopped(engine)) return 0; - if (engine->last_context != request->ctx) { - if (engine->last_context) - intel_lr_context_unpin(engine->last_context, engine); - if (request->ctx != request->i915->kernel_context) { - intel_lr_context_pin(request->ctx, engine); - engine->last_context = request->ctx; - } else { - engine->last_context = NULL; - } - } + /* We keep the previous context alive until we retire the following + * request. This ensures that any the context object is still pinned + * for any residual writes the HW makes into it on the context switch + * into the next object following the breadcrumb. Otherwise, we may + * retire the context too early. + */ + request->previous_context = engine->last_context; + engine->last_context = request->ctx; - if (dev_priv->guc.execbuf_client) - i915_guc_submit(dev_priv->guc.execbuf_client, request); + if (i915.enable_guc_submission) + i915_guc_submit(request); else execlists_context_queue(request); return 0; } -int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request) -{ - /* - * The first call merely notes the reserve request and is common for - * all back ends. The subsequent localised _begin() call actually - * ensures that the reservation is available. Without the begin, if - * the request creator immediately submitted the request without - * adding any commands to it then there might not actually be - * sufficient room for the submission commands. - */ - intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); - - return intel_ring_begin(request, 0); -} - /** * execlists_submission() - submit a batchbuffer for execution, Execlists style - * @dev: DRM device. - * @file: DRM file. - * @ring: Engine Command Streamer to submit to. - * @ctx: Context to employ for this submission. + * @params: execbuffer call parameters. * @args: execbuffer call arguments. * @vmas: list of vmas. - * @batch_obj: the batchbuffer to submit. - * @exec_start: batchbuffer start virtual address pointer. - * @dispatch_flags: translated execbuffer call flags. * * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts * away the submission details of the execbuffer ioctl call. @@ -881,28 +897,18 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, return 0; } -void intel_execlists_retire_requests(struct intel_engine_cs *engine) +void intel_execlists_cancel_requests(struct intel_engine_cs *engine) { struct drm_i915_gem_request *req, *tmp; - struct list_head retired_list; + LIST_HEAD(cancel_list); - WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); - if (list_empty(&engine->execlist_retired_req_list)) - return; + WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex)); - INIT_LIST_HEAD(&retired_list); spin_lock_bh(&engine->execlist_lock); - list_replace_init(&engine->execlist_retired_req_list, &retired_list); + list_replace_init(&engine->execlist_queue, &cancel_list); spin_unlock_bh(&engine->execlist_lock); - list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) { - struct intel_context *ctx = req->ctx; - struct drm_i915_gem_object *ctx_obj = - ctx->engine[engine->id].state; - - if (ctx_obj && (ctx != req->i915->kernel_context)) - intel_lr_context_unpin(ctx, engine); - + list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) { list_del(&req->execlist_link); i915_gem_request_unreference(req); } @@ -910,7 +916,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine) void intel_logical_ring_stop(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; int ret; if (!intel_engine_initialized(engine)) @@ -946,25 +952,26 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) return 0; } -static int intel_lr_context_do_pin(struct intel_context *ctx, - struct intel_engine_cs *engine) +static int intel_lr_context_pin(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; - struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf; + struct drm_i915_private *dev_priv = ctx->i915; + struct intel_context *ce = &ctx->engine[engine->id]; void *vaddr; u32 *lrc_reg_state; int ret; - WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); + lockdep_assert_held(&ctx->i915->dev->struct_mutex); + + if (ce->pin_count++) + return 0; - ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, - PIN_OFFSET_BIAS | GUC_WOPCM_TOP); + ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN, + PIN_OFFSET_BIAS | GUC_WOPCM_TOP); if (ret) - return ret; + goto err; - vaddr = i915_gem_object_pin_map(ctx_obj); + vaddr = i915_gem_object_pin_map(ce->state); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); goto unpin_ctx_obj; @@ -972,65 +979,54 @@ static int intel_lr_context_do_pin(struct intel_context *ctx, lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; - ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf); + ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf); if (ret) goto unpin_map; - ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); + i915_gem_context_reference(ctx); + ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state); intel_lr_context_descriptor_update(ctx, engine); - lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; - ctx->engine[engine->id].lrc_reg_state = lrc_reg_state; - ctx_obj->dirty = true; + + lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start; + ce->lrc_reg_state = lrc_reg_state; + ce->state->dirty = true; /* Invalidate GuC TLB. */ if (i915.enable_guc_submission) I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); - return ret; + return 0; unpin_map: - i915_gem_object_unpin_map(ctx_obj); + i915_gem_object_unpin_map(ce->state); unpin_ctx_obj: - i915_gem_object_ggtt_unpin(ctx_obj); - + i915_gem_object_ggtt_unpin(ce->state); +err: + ce->pin_count = 0; return ret; } -static int intel_lr_context_pin(struct intel_context *ctx, - struct intel_engine_cs *engine) +void intel_lr_context_unpin(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) { - int ret = 0; + struct intel_context *ce = &ctx->engine[engine->id]; - if (ctx->engine[engine->id].pin_count++ == 0) { - ret = intel_lr_context_do_pin(ctx, engine); - if (ret) - goto reset_pin_count; + lockdep_assert_held(&ctx->i915->dev->struct_mutex); + GEM_BUG_ON(ce->pin_count == 0); - i915_gem_context_reference(ctx); - } - return ret; + if (--ce->pin_count) + return; -reset_pin_count: - ctx->engine[engine->id].pin_count = 0; - return ret; -} + intel_unpin_ringbuffer_obj(ce->ringbuf); -void intel_lr_context_unpin(struct intel_context *ctx, - struct intel_engine_cs *engine) -{ - struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; + i915_gem_object_unpin_map(ce->state); + i915_gem_object_ggtt_unpin(ce->state); - WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex)); - if (--ctx->engine[engine->id].pin_count == 0) { - i915_gem_object_unpin_map(ctx_obj); - intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf); - i915_gem_object_ggtt_unpin(ctx_obj); - ctx->engine[engine->id].lrc_vma = NULL; - ctx->engine[engine->id].lrc_desc = 0; - ctx->engine[engine->id].lrc_reg_state = NULL; + ce->lrc_vma = NULL; + ce->lrc_desc = 0; + ce->lrc_reg_state = NULL; - i915_gem_context_unreference(ctx); - } + i915_gem_context_unreference(ctx); } static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) @@ -1038,9 +1034,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) int ret, i; struct intel_engine_cs *engine = req->engine; struct intel_ringbuffer *ringbuf = req->ringbuf; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_workarounds *w = &dev_priv->workarounds; + struct i915_workarounds *w = &req->i915->workarounds; if (w->count == 0) return 0; @@ -1106,12 +1100,13 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); /* - * WaDisableLSQCROPERFforOCL:skl + * WaDisableLSQCROPERFforOCL:skl,kbl * This WA is implemented in skl_init_clock_gating() but since * this batch updates GEN8_L3SQCREG4 with default value we need to * set this bit here to retain the WA during flush. */ - if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0)) + if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0) || + IS_KBL_REVID(engine->i915, 0, KBL_REVID_E0)) l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | @@ -1163,7 +1158,7 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx, /** * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA * - * @ring: only applicable for RCS + * @engine: only applicable for RCS * @wa_ctx: structure representing wa_ctx * offset: specifies start of the batch, should be cache-aligned. This is updated * with the offset value received as input. @@ -1200,7 +1195,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine, wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ - if (IS_BROADWELL(engine->dev)) { + if (IS_BROADWELL(engine->i915)) { int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index); if (rc < 0) return rc; @@ -1237,7 +1232,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine, /** * gen8_init_perctx_bb() - initialize per ctx batch with WA * - * @ring: only applicable for RCS + * @engine: only applicable for RCS * @wa_ctx: structure representing wa_ctx * offset: specifies start of the batch, should be cache-aligned. * size: size of the batch in DWORDS but HW expects in terms of cachelines @@ -1272,12 +1267,11 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, uint32_t *offset) { int ret; - struct drm_device *dev = engine->dev; uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); /* WaDisableCtxRestoreArbitration:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) || + IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ @@ -1286,6 +1280,22 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, return ret; index = ret; + /* WaClearSlmSpaceAtContextSwitch:kbl */ + /* Actual scratch location is at 128 bytes offset */ + if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) { + uint32_t scratch_addr + = engine->scratch.gtt_offset + 2*CACHELINE_BYTES; + + wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); + wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | + PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_QW_WRITE)); + wa_ctx_emit(batch, index, scratch_addr); + wa_ctx_emit(batch, index, 0); + wa_ctx_emit(batch, index, 0); + wa_ctx_emit(batch, index, 0); + } /* Pad to end of cacheline */ while (index % CACHELINE_DWORDS) wa_ctx_emit(batch, index, MI_NOOP); @@ -1298,12 +1308,11 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine, uint32_t *const batch, uint32_t *offset) { - struct drm_device *dev = engine->dev; uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) || + IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) { wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); wa_ctx_emit(batch, index, @@ -1312,7 +1321,7 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine, } /* WaClearTdlStateAckDirtyBits:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { + if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) { wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4)); wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK); @@ -1331,8 +1340,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine, } /* WaDisableCtxRestoreArbitration:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) || + IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); @@ -1344,11 +1353,13 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size) { int ret; - engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev, + engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev, PAGE_ALIGN(size)); - if (!engine->wa_ctx.obj) { + if (IS_ERR(engine->wa_ctx.obj)) { DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n"); - return -ENOMEM; + ret = PTR_ERR(engine->wa_ctx.obj); + engine->wa_ctx.obj = NULL; + return ret; } ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0); @@ -1382,9 +1393,9 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) WARN_ON(engine->id != RCS); /* update this when WA for higher Gen are added */ - if (INTEL_INFO(engine->dev)->gen > 9) { + if (INTEL_GEN(engine->i915) > 9) { DRM_ERROR("WA batch buffer is not initialized for Gen%d\n", - INTEL_INFO(engine->dev)->gen); + INTEL_GEN(engine->i915)); return 0; } @@ -1404,7 +1415,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) batch = kmap_atomic(page); offset = 0; - if (INTEL_INFO(engine->dev)->gen == 8) { + if (IS_GEN8(engine->i915)) { ret = gen8_init_indirectctx_bb(engine, &wa_ctx->indirect_ctx, batch, @@ -1418,7 +1429,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) &offset); if (ret) goto out; - } else if (INTEL_INFO(engine->dev)->gen == 9) { + } else if (IS_GEN9(engine->i915)) { ret = gen9_init_indirectctx_bb(engine, &wa_ctx->indirect_ctx, batch, @@ -1444,7 +1455,7 @@ out: static void lrc_init_hws(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; I915_WRITE(RING_HWS_PGA(engine->mmio_base), (u32)engine->status_page.gfx_addr); @@ -1453,8 +1464,7 @@ static void lrc_init_hws(struct intel_engine_cs *engine) static int gen8_init_common_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned int next_context_status_buffer_hw; lrc_init_hws(engine); @@ -1501,8 +1511,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) static int gen8_init_render_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; int ret; ret = gen8_init_common_ring(engine); @@ -1579,7 +1588,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, if (req->ctx->ppgtt && (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) { if (!USES_FULL_48BIT_PPGTT(req->i915) && - !intel_vgpu_active(req->i915->dev)) { + !intel_vgpu_active(req->i915)) { ret = intel_logical_ring_emit_pdps(req); if (ret) return ret; @@ -1607,8 +1616,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1627,8 +1635,7 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine) static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1645,8 +1652,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, { struct intel_ringbuffer *ringbuf = request->ringbuf; struct intel_engine_cs *engine = ringbuf->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = request->i915; uint32_t cmd; int ret; @@ -1687,9 +1693,10 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, struct intel_ringbuffer *ringbuf = request->ringbuf; struct intel_engine_cs *engine = ringbuf->engine; u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; - bool vf_flush_wa = false; + bool vf_flush_wa = false, dc_flush_wa = false; u32 flags = 0; int ret; + int len; flags |= PIPE_CONTROL_CS_STALL; @@ -1714,11 +1721,23 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL * pipe control. */ - if (IS_GEN9(engine->dev)) + if (IS_GEN9(request->i915)) vf_flush_wa = true; + + /* WaForGAMHang:kbl */ + if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0)) + dc_flush_wa = true; } - ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6); + len = 6; + + if (vf_flush_wa) + len += 6; + + if (dc_flush_wa) + len += 12; + + ret = intel_ring_begin(request, len); if (ret) return ret; @@ -1731,12 +1750,31 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, intel_logical_ring_emit(ringbuf, 0); } + if (dc_flush_wa) { + intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); + intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, 0); + } + intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); intel_logical_ring_emit(ringbuf, flags); intel_logical_ring_emit(ringbuf, scratch_addr); intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, 0); + + if (dc_flush_wa) { + intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); + intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, 0); + intel_logical_ring_emit(ringbuf, 0); + } + intel_logical_ring_advance(ringbuf); return 0; @@ -1782,11 +1820,6 @@ static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno) */ #define WA_TAIL_DWORDS 2 -static inline u32 hws_seqno_address(struct intel_engine_cs *engine) -{ - return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR; -} - static int gen8_emit_request(struct drm_i915_gem_request *request) { struct intel_ringbuffer *ringbuf = request->ringbuf; @@ -1802,7 +1835,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request) intel_logical_ring_emit(ringbuf, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); intel_logical_ring_emit(ringbuf, - hws_seqno_address(request->engine) | + intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT); intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); @@ -1832,7 +1865,8 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request) (PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL | PIPE_CONTROL_QW_WRITE)); - intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine)); + intel_logical_ring_emit(ringbuf, + intel_hws_seqno_address(request->engine)); intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); /* We're thrashing one dword of HWS. */ @@ -1894,7 +1928,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req) /** * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer * - * @ring: Engine Command Streamer. + * @engine: Engine Command Streamer. * */ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) @@ -1911,7 +1945,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state))) tasklet_kill(&engine->irq_tasklet); - dev_priv = engine->dev->dev_private; + dev_priv = engine->i915; if (engine->buffer) { intel_logical_ring_stop(engine); @@ -1928,18 +1962,18 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) i915_gem_object_unpin_map(engine->status_page.obj); engine->status_page.obj = NULL; } + intel_lr_context_unpin(dev_priv->kernel_context, engine); engine->idle_lite_restore_wa = 0; engine->disable_lite_restore_wa = false; engine->ctx_desc_template = 0; lrc_destroy_wa_ctx_obj(engine); - engine->dev = NULL; + engine->i915 = NULL; } static void -logical_ring_default_vfuncs(struct drm_device *dev, - struct intel_engine_cs *engine) +logical_ring_default_vfuncs(struct intel_engine_cs *engine) { /* Default vfuncs which can be overriden by each engine. */ engine->init_hw = gen8_init_common_ring; @@ -1950,7 +1984,7 @@ logical_ring_default_vfuncs(struct drm_device *dev, engine->emit_bb_start = gen8_emit_bb_start; engine->get_seqno = gen8_get_seqno; engine->set_seqno = gen8_set_seqno; - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) { engine->irq_seqno_barrier = bxt_a_seqno_barrier; engine->set_seqno = bxt_a_set_seqno; } @@ -1961,6 +1995,7 @@ logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift) { engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; + init_waitqueue_head(&engine->irq_queue); } static int @@ -1981,32 +2016,68 @@ lrc_setup_hws(struct intel_engine_cs *engine, return 0; } -static int -logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine) +static const struct logical_ring_info { + const char *name; + unsigned exec_id; + unsigned guc_id; + u32 mmio_base; + unsigned irq_shift; +} logical_rings[] = { + [RCS] = { + .name = "render ring", + .exec_id = I915_EXEC_RENDER, + .guc_id = GUC_RENDER_ENGINE, + .mmio_base = RENDER_RING_BASE, + .irq_shift = GEN8_RCS_IRQ_SHIFT, + }, + [BCS] = { + .name = "blitter ring", + .exec_id = I915_EXEC_BLT, + .guc_id = GUC_BLITTER_ENGINE, + .mmio_base = BLT_RING_BASE, + .irq_shift = GEN8_BCS_IRQ_SHIFT, + }, + [VCS] = { + .name = "bsd ring", + .exec_id = I915_EXEC_BSD, + .guc_id = GUC_VIDEO_ENGINE, + .mmio_base = GEN6_BSD_RING_BASE, + .irq_shift = GEN8_VCS1_IRQ_SHIFT, + }, + [VCS2] = { + .name = "bsd2 ring", + .exec_id = I915_EXEC_BSD, + .guc_id = GUC_VIDEO_ENGINE2, + .mmio_base = GEN8_BSD2_RING_BASE, + .irq_shift = GEN8_VCS2_IRQ_SHIFT, + }, + [VECS] = { + .name = "video enhancement ring", + .exec_id = I915_EXEC_VEBOX, + .guc_id = GUC_VIDEOENHANCE_ENGINE, + .mmio_base = VEBOX_RING_BASE, + .irq_shift = GEN8_VECS_IRQ_SHIFT, + }, +}; + +static struct intel_engine_cs * +logical_ring_setup(struct drm_device *dev, enum intel_engine_id id) { + const struct logical_ring_info *info = &logical_rings[id]; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_context *dctx = dev_priv->kernel_context; + struct intel_engine_cs *engine = &dev_priv->engine[id]; enum forcewake_domains fw_domains; - int ret; - - /* Intentionally left blank. */ - engine->buffer = NULL; - - engine->dev = dev; - INIT_LIST_HEAD(&engine->active_list); - INIT_LIST_HEAD(&engine->request_list); - i915_gem_batch_pool_init(dev, &engine->batch_pool); - init_waitqueue_head(&engine->irq_queue); - INIT_LIST_HEAD(&engine->buffers); - INIT_LIST_HEAD(&engine->execlist_queue); - INIT_LIST_HEAD(&engine->execlist_retired_req_list); - spin_lock_init(&engine->execlist_lock); + engine->id = id; + engine->name = info->name; + engine->exec_id = info->exec_id; + engine->guc_id = info->guc_id; + engine->mmio_base = info->mmio_base; - tasklet_init(&engine->irq_tasklet, - intel_lrc_irq_handler, (unsigned long)engine); + engine->i915 = dev_priv; - logical_ring_init_platform_invariants(engine); + /* Intentionally left blank. */ + engine->buffer = NULL; fw_domains = intel_uncore_forcewake_for_reg(dev_priv, RING_ELSP(engine), @@ -2022,20 +2093,44 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine) engine->fw_domains = fw_domains; + INIT_LIST_HEAD(&engine->active_list); + INIT_LIST_HEAD(&engine->request_list); + INIT_LIST_HEAD(&engine->buffers); + INIT_LIST_HEAD(&engine->execlist_queue); + spin_lock_init(&engine->execlist_lock); + + tasklet_init(&engine->irq_tasklet, + intel_lrc_irq_handler, (unsigned long)engine); + + logical_ring_init_platform_invariants(engine); + logical_ring_default_vfuncs(engine); + logical_ring_default_irqs(engine, info->irq_shift); + + intel_engine_init_hangcheck(engine); + i915_gem_batch_pool_init(dev, &engine->batch_pool); + + return engine; +} + +static int +logical_ring_init(struct intel_engine_cs *engine) +{ + struct i915_gem_context *dctx = engine->i915->kernel_context; + int ret; + ret = i915_cmd_parser_init_ring(engine); if (ret) goto error; - ret = intel_lr_context_deferred_alloc(dctx, engine); + ret = execlists_context_deferred_alloc(dctx, engine); if (ret) goto error; /* As this is the default context, always pin it */ - ret = intel_lr_context_do_pin(dctx, engine); + ret = intel_lr_context_pin(dctx, engine); if (ret) { - DRM_ERROR( - "Failed to pin and map ringbuffer %s: %d\n", - engine->name, ret); + DRM_ERROR("Failed to pin context for %s: %d\n", + engine->name, ret); goto error; } @@ -2055,22 +2150,12 @@ error: static int logical_render_ring_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *engine = &dev_priv->engine[RCS]; + struct intel_engine_cs *engine = logical_ring_setup(dev, RCS); int ret; - engine->name = "render ring"; - engine->id = RCS; - engine->exec_id = I915_EXEC_RENDER; - engine->guc_id = GUC_RENDER_ENGINE; - engine->mmio_base = RENDER_RING_BASE; - - logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT); if (HAS_L3_DPF(dev)) engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; - logical_ring_default_vfuncs(dev, engine); - /* Override some for render ring. */ if (INTEL_INFO(dev)->gen >= 9) engine->init_hw = gen9_init_render_ring; @@ -2081,8 +2166,6 @@ static int logical_render_ring_init(struct drm_device *dev) engine->emit_flush = gen8_emit_flush_render; engine->emit_request = gen8_emit_request_render; - engine->dev = dev; - ret = intel_init_pipe_control(engine); if (ret) return ret; @@ -2098,7 +2181,7 @@ static int logical_render_ring_init(struct drm_device *dev) ret); } - ret = logical_ring_init(dev, engine); + ret = logical_ring_init(engine); if (ret) { lrc_destroy_wa_ctx_obj(engine); } @@ -2108,70 +2191,30 @@ static int logical_render_ring_init(struct drm_device *dev) static int logical_bsd_ring_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *engine = &dev_priv->engine[VCS]; + struct intel_engine_cs *engine = logical_ring_setup(dev, VCS); - engine->name = "bsd ring"; - engine->id = VCS; - engine->exec_id = I915_EXEC_BSD; - engine->guc_id = GUC_VIDEO_ENGINE; - engine->mmio_base = GEN6_BSD_RING_BASE; - - logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT); - logical_ring_default_vfuncs(dev, engine); - - return logical_ring_init(dev, engine); + return logical_ring_init(engine); } static int logical_bsd2_ring_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *engine = &dev_priv->engine[VCS2]; + struct intel_engine_cs *engine = logical_ring_setup(dev, VCS2); - engine->name = "bsd2 ring"; - engine->id = VCS2; - engine->exec_id = I915_EXEC_BSD; - engine->guc_id = GUC_VIDEO_ENGINE2; - engine->mmio_base = GEN8_BSD2_RING_BASE; - - logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT); - logical_ring_default_vfuncs(dev, engine); - - return logical_ring_init(dev, engine); + return logical_ring_init(engine); } static int logical_blt_ring_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *engine = &dev_priv->engine[BCS]; + struct intel_engine_cs *engine = logical_ring_setup(dev, BCS); - engine->name = "blitter ring"; - engine->id = BCS; - engine->exec_id = I915_EXEC_BLT; - engine->guc_id = GUC_BLITTER_ENGINE; - engine->mmio_base = BLT_RING_BASE; - - logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT); - logical_ring_default_vfuncs(dev, engine); - - return logical_ring_init(dev, engine); + return logical_ring_init(engine); } static int logical_vebox_ring_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *engine = &dev_priv->engine[VECS]; - - engine->name = "video enhancement ring"; - engine->id = VECS; - engine->exec_id = I915_EXEC_VEBOX; - engine->guc_id = GUC_VIDEOENHANCE_ENGINE; - engine->mmio_base = VEBOX_RING_BASE; + struct intel_engine_cs *engine = logical_ring_setup(dev, VECS); - logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT); - logical_ring_default_vfuncs(dev, engine); - - return logical_ring_init(dev, engine); + return logical_ring_init(engine); } /** @@ -2232,7 +2275,7 @@ cleanup_render_ring: } static u32 -make_rpcs(struct drm_device *dev) +make_rpcs(struct drm_i915_private *dev_priv) { u32 rpcs = 0; @@ -2240,7 +2283,7 @@ make_rpcs(struct drm_device *dev) * No explicit RPCS request is needed to ensure full * slice/subslice/EU enablement prior to Gen9. */ - if (INTEL_INFO(dev)->gen < 9) + if (INTEL_GEN(dev_priv) < 9) return 0; /* @@ -2249,24 +2292,24 @@ make_rpcs(struct drm_device *dev) * must make an explicit request through RPCS for full * enablement. */ - if (INTEL_INFO(dev)->has_slice_pg) { + if (INTEL_INFO(dev_priv)->has_slice_pg) { rpcs |= GEN8_RPCS_S_CNT_ENABLE; - rpcs |= INTEL_INFO(dev)->slice_total << + rpcs |= INTEL_INFO(dev_priv)->slice_total << GEN8_RPCS_S_CNT_SHIFT; rpcs |= GEN8_RPCS_ENABLE; } - if (INTEL_INFO(dev)->has_subslice_pg) { + if (INTEL_INFO(dev_priv)->has_subslice_pg) { rpcs |= GEN8_RPCS_SS_CNT_ENABLE; - rpcs |= INTEL_INFO(dev)->subslice_per_slice << + rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice << GEN8_RPCS_SS_CNT_SHIFT; rpcs |= GEN8_RPCS_ENABLE; } - if (INTEL_INFO(dev)->has_eu_pg) { - rpcs |= INTEL_INFO(dev)->eu_per_subslice << + if (INTEL_INFO(dev_priv)->has_eu_pg) { + rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice << GEN8_RPCS_EU_MIN_SHIFT; - rpcs |= INTEL_INFO(dev)->eu_per_subslice << + rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice << GEN8_RPCS_EU_MAX_SHIFT; rpcs |= GEN8_RPCS_ENABLE; } @@ -2278,9 +2321,9 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) { u32 indirect_ctx_offset; - switch (INTEL_INFO(engine->dev)->gen) { + switch (INTEL_GEN(engine->i915)) { default: - MISSING_CASE(INTEL_INFO(engine->dev)->gen); + MISSING_CASE(INTEL_GEN(engine->i915)); /* fall through */ case 9: indirect_ctx_offset = @@ -2296,13 +2339,12 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) } static int -populate_lr_context(struct intel_context *ctx, +populate_lr_context(struct i915_gem_context *ctx, struct drm_i915_gem_object *ctx_obj, struct intel_engine_cs *engine, struct intel_ringbuffer *ringbuf) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = ctx->i915; struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; void *vaddr; u32 *reg_state; @@ -2340,7 +2382,7 @@ populate_lr_context(struct intel_context *ctx, RING_CONTEXT_CONTROL(engine), _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | - (HAS_RESOURCE_STREAMER(dev) ? + (HAS_RESOURCE_STREAMER(dev_priv) ? CTX_CTRL_RS_CTX_ENABLE : 0))); ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base), 0); @@ -2429,7 +2471,7 @@ populate_lr_context(struct intel_context *ctx, if (engine->id == RCS) { reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, - make_rpcs(dev)); + make_rpcs(dev_priv)); } i915_gem_object_unpin_map(ctx_obj); @@ -2438,39 +2480,8 @@ populate_lr_context(struct intel_context *ctx, } /** - * intel_lr_context_free() - free the LRC specific bits of a context - * @ctx: the LR context to free. - * - * The real context freeing is done in i915_gem_context_free: this only - * takes care of the bits that are LRC related: the per-engine backing - * objects and the logical ringbuffer. - */ -void intel_lr_context_free(struct intel_context *ctx) -{ - int i; - - for (i = I915_NUM_ENGINES; --i >= 0; ) { - struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf; - struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; - - if (!ctx_obj) - continue; - - if (ctx == ctx->i915->kernel_context) { - intel_unpin_ringbuffer_obj(ringbuf); - i915_gem_object_ggtt_unpin(ctx_obj); - i915_gem_object_unpin_map(ctx_obj); - } - - WARN_ON(ctx->engine[i].pin_count); - intel_ringbuffer_free(ringbuf); - drm_gem_object_unreference(&ctx_obj->base); - } -} - -/** * intel_lr_context_size() - return the size of the context for an engine - * @ring: which engine to find the context size for + * @engine: which engine to find the context size for * * Each engine may require a different amount of space for a context image, * so when allocating (or copying) an image, this function can be used to @@ -2486,11 +2497,11 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine) { int ret = 0; - WARN_ON(INTEL_INFO(engine->dev)->gen < 8); + WARN_ON(INTEL_GEN(engine->i915) < 8); switch (engine->id) { case RCS: - if (INTEL_INFO(engine->dev)->gen >= 9) + if (INTEL_GEN(engine->i915) >= 9) ret = GEN9_LR_CONTEXT_RENDER_SIZE; else ret = GEN8_LR_CONTEXT_RENDER_SIZE; @@ -2507,9 +2518,9 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine) } /** - * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context + * execlists_context_deferred_alloc() - create the LRC specific bits of a context * @ctx: LR context to create. - * @ring: engine to be used with the context. + * @engine: engine to be used with the context. * * This function can be called more than once, with different engines, if we plan * to use the context with them. The context backing objects and the ringbuffers @@ -2519,31 +2530,29 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine) * * Return: non-zero on error. */ - -int intel_lr_context_deferred_alloc(struct intel_context *ctx, - struct intel_engine_cs *engine) +static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; struct drm_i915_gem_object *ctx_obj; + struct intel_context *ce = &ctx->engine[engine->id]; uint32_t context_size; struct intel_ringbuffer *ringbuf; int ret; - WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); - WARN_ON(ctx->engine[engine->id].state); + WARN_ON(ce->state); context_size = round_up(intel_lr_context_size(engine), 4096); /* One extra page as the sharing data between driver and GuC */ context_size += PAGE_SIZE * LRC_PPHWSP_PN; - ctx_obj = i915_gem_alloc_object(dev, context_size); - if (!ctx_obj) { + ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size); + if (IS_ERR(ctx_obj)) { DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); - return -ENOMEM; + return PTR_ERR(ctx_obj); } - ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE); + ringbuf = intel_engine_create_ringbuffer(engine, ctx->ring_size); if (IS_ERR(ringbuf)) { ret = PTR_ERR(ringbuf); goto error_deref_obj; @@ -2555,48 +2564,29 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, goto error_ringbuf; } - ctx->engine[engine->id].ringbuf = ringbuf; - ctx->engine[engine->id].state = ctx_obj; - - if (ctx != ctx->i915->kernel_context && engine->init_context) { - struct drm_i915_gem_request *req; - - req = i915_gem_request_alloc(engine, ctx); - if (IS_ERR(req)) { - ret = PTR_ERR(req); - DRM_ERROR("ring create req: %d\n", ret); - goto error_ringbuf; - } + ce->ringbuf = ringbuf; + ce->state = ctx_obj; + ce->initialised = engine->init_context == NULL; - ret = engine->init_context(req); - i915_add_request_no_flush(req); - if (ret) { - DRM_ERROR("ring init context: %d\n", - ret); - goto error_ringbuf; - } - } return 0; error_ringbuf: intel_ringbuffer_free(ringbuf); error_deref_obj: drm_gem_object_unreference(&ctx_obj->base); - ctx->engine[engine->id].ringbuf = NULL; - ctx->engine[engine->id].state = NULL; + ce->ringbuf = NULL; + ce->state = NULL; return ret; } void intel_lr_context_reset(struct drm_i915_private *dev_priv, - struct intel_context *ctx) + struct i915_gem_context *ctx) { struct intel_engine_cs *engine; for_each_engine(engine, dev_priv) { - struct drm_i915_gem_object *ctx_obj = - ctx->engine[engine->id].state; - struct intel_ringbuffer *ringbuf = - ctx->engine[engine->id].ringbuf; + struct intel_context *ce = &ctx->engine[engine->id]; + struct drm_i915_gem_object *ctx_obj = ce->state; void *vaddr; uint32_t *reg_state; @@ -2615,7 +2605,7 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv, i915_gem_object_unpin_map(ctx_obj); - ringbuf->head = 0; - ringbuf->tail = 0; + ce->ringbuf->head = 0; + ce->ringbuf->tail = 0; } } diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 60a7385bc531..2b8255c19dcc 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -57,6 +57,11 @@ #define GEN8_CSB_READ_PTR(csb_status) \ (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8) +enum { + INTEL_CONTEXT_SCHEDULE_IN = 0, + INTEL_CONTEXT_SCHEDULE_OUT, +}; + /* Logical Rings */ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request); int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request); @@ -99,30 +104,27 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf, #define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1) #define LRC_STATE_PN (LRC_PPHWSP_PN + 1) -void intel_lr_context_free(struct intel_context *ctx); +struct i915_gem_context; + uint32_t intel_lr_context_size(struct intel_engine_cs *engine); -int intel_lr_context_deferred_alloc(struct intel_context *ctx, - struct intel_engine_cs *engine); -void intel_lr_context_unpin(struct intel_context *ctx, +void intel_lr_context_unpin(struct i915_gem_context *ctx, struct intel_engine_cs *engine); struct drm_i915_private; void intel_lr_context_reset(struct drm_i915_private *dev_priv, - struct intel_context *ctx); -uint64_t intel_lr_context_descriptor(struct intel_context *ctx, + struct i915_gem_context *ctx); +uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, struct intel_engine_cs *engine); -u32 intel_execlists_ctx_id(struct intel_context *ctx, - struct intel_engine_cs *engine); - /* Execlists */ -int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); +int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, + int enable_execlists); struct i915_execbuffer_params; int intel_execlists_submission(struct i915_execbuffer_params *params, struct drm_i915_gem_execbuffer2 *args, struct list_head *vmas); -void intel_execlists_retire_requests(struct intel_engine_cs *engine); +void intel_execlists_cancel_requests(struct intel_engine_cs *engine); #endif /* _INTEL_LRC_H_ */ diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index bc53c0dd34d0..e9082185a375 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -190,7 +190,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder) /* Set the dithering flag on LVDS as needed, note that there is no * special lvds dither control bit on pch-split platforms, dithering is * only controlled through the PIPECONF reg. */ - if (INTEL_INFO(dev)->gen == 4) { + if (IS_GEN4(dev_priv)) { /* Bspec wording suggests that LVDS port dithering only exists * for 18bpp panels. */ if (crtc->config->dither && crtc->config->pipe_bpp == 18) @@ -547,7 +547,6 @@ static int intel_lvds_set_property(struct drm_connector *connector, static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { .get_modes = intel_lvds_get_modes, .mode_valid = intel_lvds_mode_valid, - .best_encoder = intel_best_encoder, }; static const struct drm_connector_funcs intel_lvds_connector_funcs = { @@ -556,6 +555,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_lvds_set_property, .atomic_get_property = intel_connector_atomic_get_property, + .early_unregister = intel_connector_unregister, .destroy = intel_lvds_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, @@ -978,7 +978,7 @@ void intel_lvds_init(struct drm_device *dev) DRM_MODE_CONNECTOR_LVDS); drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, - DRM_MODE_ENCODER_LVDS, NULL); + DRM_MODE_ENCODER_LVDS, "LVDS"); intel_encoder->enable = intel_enable_lvds; intel_encoder->pre_enable = intel_pre_enable_lvds; @@ -992,7 +992,6 @@ void intel_lvds_init(struct drm_device *dev) intel_encoder->get_hw_state = intel_lvds_get_hw_state; intel_encoder->get_config = intel_lvds_get_config; intel_connector->get_hw_state = intel_connector_get_hw_state; - intel_connector->unregister = intel_connector_unregister; intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = INTEL_OUTPUT_LVDS; @@ -1082,6 +1081,8 @@ void intel_lvds_init(struct drm_device *dev) fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); if (fixed_mode) { fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + connector->display_info.width_mm = fixed_mode->width_mm; + connector->display_info.height_mm = fixed_mode->height_mm; goto out; } } diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 6ba4bf7f2a89..3c1482b8f2f4 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c @@ -156,6 +156,16 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv, "Platform that should have a MOCS table does not.\n"); } + /* WaDisableSkipCaching:skl,bxt,kbl */ + if (IS_GEN9(dev_priv)) { + int i; + + for (i = 0; i < table->size; i++) + if (WARN_ON(table->table[i].l3cc_value & + (L3_ESC(1) | L3_SCC(0x7)))) + return false; + } + return result; } @@ -189,7 +199,7 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index) */ int intel_mocs_init_engine(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_mocs_table table; unsigned int index; diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 99e26034ae8d..f6d8a21d2c49 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -240,10 +240,11 @@ struct opregion_asle_ext { #define MAX_DSLP 1500 -static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) +static int swsci(struct drm_i915_private *dev_priv, + u32 function, u32 parm, u32 *parm_out) { - struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_swsci *swsci = dev_priv->opregion.swsci; + struct pci_dev *pdev = dev_priv->dev->pdev; u32 main_function, sub_function, scic; u16 swsci_val; u32 dslp; @@ -293,16 +294,16 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) swsci->scic = scic; /* Ensure SCI event is selected and event trigger is cleared. */ - pci_read_config_word(dev->pdev, SWSCI, &swsci_val); + pci_read_config_word(pdev, SWSCI, &swsci_val); if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) { swsci_val |= SWSCI_SCISEL; swsci_val &= ~SWSCI_GSSCIE; - pci_write_config_word(dev->pdev, SWSCI, swsci_val); + pci_write_config_word(pdev, SWSCI, swsci_val); } /* Use event trigger to tell bios to check the mail. */ swsci_val |= SWSCI_GSSCIE; - pci_write_config_word(dev->pdev, SWSCI, swsci_val); + pci_write_config_word(pdev, SWSCI, swsci_val); /* Poll for the result. */ #define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0) @@ -336,13 +337,13 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) { - struct drm_device *dev = intel_encoder->base.dev; + struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); u32 parm = 0; u32 type = 0; u32 port; /* don't care about old stuff for now */ - if (!HAS_DDI(dev)) + if (!HAS_DDI(dev_priv)) return 0; if (intel_encoder->type == INTEL_OUTPUT_DSI) @@ -382,7 +383,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, parm |= type << (16 + port * 3); - return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL); + return swsci(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL); } static const struct { @@ -396,27 +397,28 @@ static const struct { { PCI_D3cold, 0x04 }, }; -int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) +int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, + pci_power_t state) { int i; - if (!HAS_DDI(dev)) + if (!HAS_DDI(dev_priv)) return 0; for (i = 0; i < ARRAY_SIZE(power_state_map); i++) { if (state == power_state_map[i].pci_power_state) - return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE, + return swsci(dev_priv, SWSCI_SBCB_ADAPTER_POWER_STATE, power_state_map[i].parm, NULL); } return -EINVAL; } -static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) +static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_connector *connector; struct opregion_asle *asle = dev_priv->opregion.asle; + struct drm_device *dev = dev_priv->dev; DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); @@ -449,7 +451,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) return 0; } -static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) +static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi) { /* alsi is the current ALS reading in lux. 0 indicates below sensor range, 0xffff indicates above sensor range. 1-0xfffe are valid */ @@ -457,13 +459,13 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) return ASLC_ALS_ILLUM_FAILED; } -static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) +static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb) { DRM_DEBUG_DRIVER("PWM freq is not supported\n"); return ASLC_PWM_FREQ_FAILED; } -static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) +static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit) { /* Panel fitting is currently controlled by the X code, so this is a noop until modesetting support works fully */ @@ -471,13 +473,13 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) return ASLC_PFIT_FAILED; } -static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot) +static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot) { DRM_DEBUG_DRIVER("SROT is not supported\n"); return ASLC_ROTATION_ANGLES_FAILED; } -static u32 asle_set_button_array(struct drm_device *dev, u32 iuer) +static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer) { if (!iuer) DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n"); @@ -495,7 +497,7 @@ static u32 asle_set_button_array(struct drm_device *dev, u32 iuer) return ASLC_BUTTON_ARRAY_FAILED; } -static u32 asle_set_convertible(struct drm_device *dev, u32 iuer) +static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer) { if (iuer & ASLE_IUER_CONVERTIBLE) DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n"); @@ -505,7 +507,7 @@ static u32 asle_set_convertible(struct drm_device *dev, u32 iuer) return ASLC_CONVERTIBLE_FAILED; } -static u32 asle_set_docking(struct drm_device *dev, u32 iuer) +static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer) { if (iuer & ASLE_IUER_DOCKING) DRM_DEBUG_DRIVER("Docking is not supported (docked)\n"); @@ -515,7 +517,7 @@ static u32 asle_set_docking(struct drm_device *dev, u32 iuer) return ASLC_DOCKING_FAILED; } -static u32 asle_isct_state(struct drm_device *dev) +static u32 asle_isct_state(struct drm_i915_private *dev_priv) { DRM_DEBUG_DRIVER("ISCT is not supported\n"); return ASLC_ISCT_STATE_FAILED; @@ -527,7 +529,6 @@ static void asle_work(struct work_struct *work) container_of(work, struct intel_opregion, asle_work); struct drm_i915_private *dev_priv = container_of(opregion, struct drm_i915_private, opregion); - struct drm_device *dev = dev_priv->dev; struct opregion_asle *asle = dev_priv->opregion.asle; u32 aslc_stat = 0; u32 aslc_req; @@ -544,40 +545,38 @@ static void asle_work(struct work_struct *work) } if (aslc_req & ASLC_SET_ALS_ILLUM) - aslc_stat |= asle_set_als_illum(dev, asle->alsi); + aslc_stat |= asle_set_als_illum(dev_priv, asle->alsi); if (aslc_req & ASLC_SET_BACKLIGHT) - aslc_stat |= asle_set_backlight(dev, asle->bclp); + aslc_stat |= asle_set_backlight(dev_priv, asle->bclp); if (aslc_req & ASLC_SET_PFIT) - aslc_stat |= asle_set_pfit(dev, asle->pfit); + aslc_stat |= asle_set_pfit(dev_priv, asle->pfit); if (aslc_req & ASLC_SET_PWM_FREQ) - aslc_stat |= asle_set_pwm_freq(dev, asle->pfmb); + aslc_stat |= asle_set_pwm_freq(dev_priv, asle->pfmb); if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES) - aslc_stat |= asle_set_supported_rotation_angles(dev, + aslc_stat |= asle_set_supported_rotation_angles(dev_priv, asle->srot); if (aslc_req & ASLC_BUTTON_ARRAY) - aslc_stat |= asle_set_button_array(dev, asle->iuer); + aslc_stat |= asle_set_button_array(dev_priv, asle->iuer); if (aslc_req & ASLC_CONVERTIBLE_INDICATOR) - aslc_stat |= asle_set_convertible(dev, asle->iuer); + aslc_stat |= asle_set_convertible(dev_priv, asle->iuer); if (aslc_req & ASLC_DOCKING_INDICATOR) - aslc_stat |= asle_set_docking(dev, asle->iuer); + aslc_stat |= asle_set_docking(dev_priv, asle->iuer); if (aslc_req & ASLC_ISCT_STATE_CHANGE) - aslc_stat |= asle_isct_state(dev); + aslc_stat |= asle_isct_state(dev_priv); asle->aslc = aslc_stat; } -void intel_opregion_asle_intr(struct drm_device *dev) +void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->opregion.asle) schedule_work(&dev_priv->opregion.asle_work); } @@ -658,10 +657,10 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val) } } -static void intel_didl_outputs(struct drm_device *dev) +static void intel_didl_outputs(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; + struct pci_dev *pdev = dev_priv->dev->pdev; struct drm_connector *connector; acpi_handle handle; struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; @@ -670,7 +669,7 @@ static void intel_didl_outputs(struct drm_device *dev) u32 temp, max_outputs; int i = 0; - handle = ACPI_HANDLE(&dev->pdev->dev); + handle = ACPI_HANDLE(&pdev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) return; @@ -725,7 +724,7 @@ end: blind_set: i = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + list_for_each_entry(connector, &dev_priv->dev->mode_config.connector_list, head) { int output_type = ACPI_OTHER_OUTPUT; if (i >= max_outputs) { DRM_DEBUG_KMS("More than %u outputs in connector list\n", @@ -761,9 +760,8 @@ blind_set: goto end; } -static void intel_setup_cadls(struct drm_device *dev) +static void intel_setup_cadls(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; int i = 0; u32 disp_id; @@ -780,17 +778,16 @@ static void intel_setup_cadls(struct drm_device *dev) } while (++i < 8 && disp_id != 0); } -void intel_opregion_init(struct drm_device *dev) +void intel_opregion_register(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; if (!opregion->header) return; if (opregion->acpi) { - intel_didl_outputs(dev); - intel_setup_cadls(dev); + intel_didl_outputs(dev_priv); + intel_setup_cadls(dev_priv); /* Notify BIOS we are ready to handle ACPI video ext notifs. * Right now, all the events are handled by the ACPI video module. @@ -808,9 +805,8 @@ void intel_opregion_init(struct drm_device *dev) } } -void intel_opregion_fini(struct drm_device *dev) +void intel_opregion_unregister(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; if (!opregion->header) @@ -842,9 +838,8 @@ void intel_opregion_fini(struct drm_device *dev) opregion->lid_state = NULL; } -static void swsci_setup(struct drm_device *dev) +static void swsci_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; bool requested_callbacks = false; u32 tmp; @@ -854,7 +849,7 @@ static void swsci_setup(struct drm_device *dev) opregion->swsci_sbcb_sub_functions = 1; /* We use GBDA to ask for supported GBDA calls. */ - if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) { + if (swsci(dev_priv, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) { /* make the bits match the sub-function codes */ tmp <<= 1; opregion->swsci_gbda_sub_functions |= tmp; @@ -865,7 +860,7 @@ static void swsci_setup(struct drm_device *dev) * must not call interfaces that are not specifically requested by the * bios. */ - if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) { + if (swsci(dev_priv, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) { /* here, the bits already match sub-function codes */ opregion->swsci_sbcb_sub_functions |= tmp; requested_callbacks = true; @@ -876,7 +871,7 @@ static void swsci_setup(struct drm_device *dev) * the callback is _requested_. But we still can't call interfaces that * are not requested. */ - if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) { + if (swsci(dev_priv, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) { /* make the bits match the sub-function codes */ u32 low = tmp & 0x7ff; u32 high = tmp & ~0xfff; /* bit 11 is reserved */ @@ -918,10 +913,10 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = { { } }; -int intel_opregion_setup(struct drm_device *dev) +int intel_opregion_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; + struct pci_dev *pdev = dev_priv->dev->pdev; u32 asls, mboxes; char buf[sizeof(OPREGION_SIGNATURE)]; int err = 0; @@ -933,7 +928,7 @@ int intel_opregion_setup(struct drm_device *dev) BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100); BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400); - pci_read_config_dword(dev->pdev, ASLS, &asls); + pci_read_config_dword(pdev, ASLS, &asls); DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); if (asls == 0) { DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n"); @@ -965,7 +960,7 @@ int intel_opregion_setup(struct drm_device *dev) if (mboxes & MBOX_SWSCI) { DRM_DEBUG_DRIVER("SWSCI supported\n"); opregion->swsci = base + OPREGION_SWSCI_OFFSET; - swsci_setup(dev); + swsci_setup(dev_priv); } if (mboxes & MBOX_ASLE) { @@ -1014,12 +1009,12 @@ err_out: } int -intel_opregion_get_panel_type(struct drm_device *dev) +intel_opregion_get_panel_type(struct drm_i915_private *dev_priv) { u32 panel_details; int ret; - ret = swsci(dev, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details); + ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details); if (ret) { DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n", ret); diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index bd38e49f7334..eb93f90bb74d 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -168,7 +168,7 @@ struct overlay_registers { }; struct intel_overlay { - struct drm_device *dev; + struct drm_i915_private *i915; struct intel_crtc *crtc; struct drm_i915_gem_object *vid_bo; struct drm_i915_gem_object *old_vid_bo; @@ -190,15 +190,15 @@ struct intel_overlay { static struct overlay_registers __iomem * intel_overlay_map_regs(struct intel_overlay *overlay) { - struct drm_i915_private *dev_priv = to_i915(overlay->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_i915_private *dev_priv = overlay->i915; struct overlay_registers __iomem *regs; - if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; else - regs = io_mapping_map_wc(ggtt->mappable, - i915_gem_obj_ggtt_offset(overlay->reg_bo)); + regs = io_mapping_map_wc(dev_priv->ggtt.mappable, + overlay->flip_addr, + PAGE_SIZE); return regs; } @@ -206,7 +206,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) static void intel_overlay_unmap_regs(struct intel_overlay *overlay, struct overlay_registers __iomem *regs) { - if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915)) io_mapping_unmap(regs); } @@ -232,14 +232,13 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, /* overlay needs to be disable in OCMD reg */ static int intel_overlay_on(struct intel_overlay *overlay) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; int ret; WARN_ON(overlay->active); - WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); + WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); req = i915_gem_request_alloc(engine, NULL); if (IS_ERR(req)) @@ -266,8 +265,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) static int intel_overlay_continue(struct intel_overlay *overlay, bool load_polyphase_filter) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; u32 flip_addr = overlay->flip_addr; @@ -335,8 +333,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay) /* overlay needs to be disabled in OCMD reg */ static int intel_overlay_off(struct intel_overlay *overlay) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; u32 flip_addr = overlay->flip_addr; @@ -365,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) intel_ring_emit(engine, flip_addr); intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); /* turn overlay off */ - if (IS_I830(dev)) { + if (IS_I830(dev_priv)) { /* Workaround: Don't disable the overlay fully, since otherwise * it dies on the next OVERLAY_ON cmd. */ intel_ring_emit(engine, MI_NOOP); @@ -408,12 +405,11 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) */ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; struct intel_engine_cs *engine = &dev_priv->engine[RCS]; int ret; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + lockdep_assert_held(&dev_priv->dev->struct_mutex); /* Only wait if there is actually an old frame to release to * guarantee forward progress. @@ -537,10 +533,10 @@ static int uv_vsubsampling(u32 format) } } -static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) +static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width) { u32 mask, shift, ret; - if (IS_GEN2(dev)) { + if (IS_GEN2(dev_priv)) { mask = 0x1f; shift = 5; } else { @@ -548,7 +544,7 @@ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) shift = 6; } ret = ((offset + width + mask) >> shift) - (offset >> shift); - if (!IS_GEN2(dev)) + if (!IS_GEN2(dev_priv)) ret <<= 1; ret -= 1; return ret << 2; @@ -741,12 +737,12 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, int ret, tmp_width; struct overlay_registers __iomem *regs; bool scale_changed = false; - struct drm_device *dev = overlay->dev; + struct drm_i915_private *dev_priv = overlay->i915; u32 swidth, swidthsw, sheight, ostride; enum pipe pipe = overlay->crtc->pipe; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + lockdep_assert_held(&dev_priv->dev->struct_mutex); + WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex)); ret = intel_overlay_release_old_vid(overlay); if (ret != 0) @@ -769,7 +765,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, goto out_unpin; } oconfig = OCONF_CC_OUT_8BIT; - if (IS_GEN4(overlay->dev)) + if (IS_GEN4(dev_priv)) oconfig |= OCONF_CSC_MODE_BT709; oconfig |= pipe == 0 ? OCONF_PIPE_A : OCONF_PIPE_B; @@ -796,7 +792,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, tmp_width = params->src_w; swidth = params->src_w; - swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); + swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width); sheight = params->src_h; iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, ®s->OBUF_0Y); ostride = params->stride_Y; @@ -806,9 +802,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, int uv_vscale = uv_vsubsampling(params->format); u32 tmp_U, tmp_V; swidth |= (params->src_w/uv_hscale) << 16; - tmp_U = calc_swidthsw(overlay->dev, params->offset_U, + tmp_U = calc_swidthsw(dev_priv, params->offset_U, params->src_w/uv_hscale); - tmp_V = calc_swidthsw(overlay->dev, params->offset_V, + tmp_V = calc_swidthsw(dev_priv, params->offset_V, params->src_w/uv_hscale); swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; sheight |= (params->src_h/uv_vscale) << 16; @@ -840,8 +836,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, overlay->old_vid_bo = overlay->vid_bo; overlay->vid_bo = new_bo; - intel_frontbuffer_flip(dev, - INTEL_FRONTBUFFER_OVERLAY(pipe)); + intel_frontbuffer_flip(dev_priv->dev, INTEL_FRONTBUFFER_OVERLAY(pipe)); return 0; @@ -852,12 +847,12 @@ out_unpin: int intel_overlay_switch_off(struct intel_overlay *overlay) { + struct drm_i915_private *dev_priv = overlay->i915; struct overlay_registers __iomem *regs; - struct drm_device *dev = overlay->dev; int ret; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + lockdep_assert_held(&dev_priv->dev->struct_mutex); + WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex)); ret = intel_overlay_recover_from_interrupt(overlay); if (ret != 0) @@ -897,15 +892,14 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, static void update_pfit_vscale_ratio(struct intel_overlay *overlay) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; u32 pfit_control = I915_READ(PFIT_CONTROL); u32 ratio; /* XXX: This is not the same logic as in the xorg driver, but more in * line with the intel documentation for the i965 */ - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { /* on i965 use the PGM reg to read out the autoscaler values */ ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; } else { @@ -948,7 +942,7 @@ static int check_overlay_scaling(struct put_image_params *rec) return 0; } -static int check_overlay_src(struct drm_device *dev, +static int check_overlay_src(struct drm_i915_private *dev_priv, struct drm_intel_overlay_put_image *rec, struct drm_i915_gem_object *new_bo) { @@ -959,7 +953,7 @@ static int check_overlay_src(struct drm_device *dev, u32 tmp; /* check src dimensions */ - if (IS_845G(dev) || IS_I830(dev)) { + if (IS_845G(dev_priv) || IS_I830(dev_priv)) { if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY || rec->src_width > IMAGE_MAX_WIDTH_LEGACY) return -EINVAL; @@ -1011,14 +1005,14 @@ static int check_overlay_src(struct drm_device *dev, return -EINVAL; /* stride checking */ - if (IS_I830(dev) || IS_845G(dev)) + if (IS_I830(dev_priv) || IS_845G(dev_priv)) stride_mask = 255; else stride_mask = 63; if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) return -EINVAL; - if (IS_GEN4(dev) && rec->stride_Y < 512) + if (IS_GEN4(dev_priv) && rec->stride_Y < 512) return -EINVAL; tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? @@ -1063,13 +1057,13 @@ static int check_overlay_src(struct drm_device *dev, * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use */ -static int intel_panel_fitter_pipe(struct drm_device *dev) +static int intel_panel_fitter_pipe(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 pfit_control; /* i830 doesn't have a panel fitter */ - if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) + if (INTEL_GEN(dev_priv) <= 3 && + (IS_I830(dev_priv) || !IS_MOBILE(dev_priv))) return -1; pfit_control = I915_READ(PFIT_CONTROL); @@ -1079,15 +1073,15 @@ static int intel_panel_fitter_pipe(struct drm_device *dev) return -1; /* 965 can place panel fitter on either pipe */ - if (IS_GEN4(dev)) + if (IS_GEN4(dev_priv)) return (pfit_control >> 29) & 0x3; /* older chips can only use pipe 1 */ return 1; } -int intel_overlay_put_image(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_intel_overlay_put_image *put_image_rec = data; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1162,7 +1156,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, /* line too wide, i.e. one-line-mode */ if (mode->hdisplay > 1024 && - intel_panel_fitter_pipe(dev) == crtc->pipe) { + intel_panel_fitter_pipe(dev_priv) == crtc->pipe) { overlay->pfit_active = true; update_pfit_vscale_ratio(overlay); } else @@ -1196,7 +1190,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, goto out_unlock; } - ret = check_overlay_src(dev, put_image_rec, new_bo); + ret = check_overlay_src(dev_priv, put_image_rec, new_bo); if (ret != 0) goto out_unlock; params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK; @@ -1284,8 +1278,8 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs) return 0; } -int intel_overlay_attrs(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_intel_overlay_attrs *attrs = data; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1309,7 +1303,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, attrs->contrast = overlay->contrast; attrs->saturation = overlay->saturation; - if (!IS_GEN2(dev)) { + if (!IS_GEN2(dev_priv)) { attrs->gamma0 = I915_READ(OGAMC0); attrs->gamma1 = I915_READ(OGAMC1); attrs->gamma2 = I915_READ(OGAMC2); @@ -1341,7 +1335,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, intel_overlay_unmap_regs(overlay, regs); if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) goto out_unlock; if (overlay->active) { @@ -1371,37 +1365,36 @@ out_unlock: return ret; } -void intel_setup_overlay(struct drm_device *dev) +void intel_setup_overlay(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_overlay *overlay; struct drm_i915_gem_object *reg_bo; struct overlay_registers __iomem *regs; int ret; - if (!HAS_OVERLAY(dev)) + if (!HAS_OVERLAY(dev_priv)) return; overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); if (!overlay) return; - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev_priv->dev->struct_mutex); if (WARN_ON(dev_priv->overlay)) goto out_free; - overlay->dev = dev; + overlay->i915 = dev_priv; reg_bo = NULL; - if (!OVERLAY_NEEDS_PHYSICAL(dev)) - reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); - if (reg_bo == NULL) - reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); + if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) + reg_bo = i915_gem_object_create_stolen(dev_priv->dev, PAGE_SIZE); if (reg_bo == NULL) + reg_bo = i915_gem_object_create(dev_priv->dev, PAGE_SIZE); + if (IS_ERR(reg_bo)) goto out_free; overlay->reg_bo = reg_bo; - if (OVERLAY_NEEDS_PHYSICAL(dev)) { + if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) { ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE); if (ret) { DRM_ERROR("failed to attach phys overlay regs\n"); @@ -1441,25 +1434,23 @@ void intel_setup_overlay(struct drm_device *dev) intel_overlay_unmap_regs(overlay, regs); dev_priv->overlay = overlay; - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->dev->struct_mutex); DRM_INFO("initialized overlay support\n"); return; out_unpin_bo: - if (!OVERLAY_NEEDS_PHYSICAL(dev)) + if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) i915_gem_object_ggtt_unpin(reg_bo); out_free_bo: drm_gem_object_unreference(®_bo->base); out_free: - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->dev->struct_mutex); kfree(overlay); return; } -void intel_cleanup_overlay(struct drm_device *dev) +void intel_cleanup_overlay(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (!dev_priv->overlay) return; @@ -1482,18 +1473,17 @@ struct intel_overlay_error_state { static struct overlay_registers __iomem * intel_overlay_map_regs_atomic(struct intel_overlay *overlay) { - struct drm_i915_private *dev_priv = to_i915(overlay->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_i915_private *dev_priv = overlay->i915; struct overlay_registers __iomem *regs; - if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) /* Cast to make sparse happy, but it's wc memory anyway, so * equivalent to the wc io mapping on X86. */ regs = (struct overlay_registers __iomem *) overlay->reg_bo->phys_handle->vaddr; else - regs = io_mapping_map_atomic_wc(ggtt->mappable, - i915_gem_obj_ggtt_offset(overlay->reg_bo)); + regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable, + overlay->flip_addr); return regs; } @@ -1501,15 +1491,13 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, struct overlay_registers __iomem *regs) { - if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915)) io_mapping_unmap_atomic(regs); } - struct intel_overlay_error_state * -intel_overlay_capture_error_state(struct drm_device *dev) +intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_overlay *overlay = dev_priv->overlay; struct intel_overlay_error_state *error; struct overlay_registers __iomem *regs; @@ -1523,10 +1511,7 @@ intel_overlay_capture_error_state(struct drm_device *dev) error->dovsta = I915_READ(DOVSTA); error->isr = I915_READ(ISR); - if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) - error->base = (__force long)overlay->reg_bo->phys_handle->vaddr; - else - error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); + error->base = overlay->flip_addr; regs = intel_overlay_map_regs_atomic(overlay); if (!regs) diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 8357d571553a..bf721781c259 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -1216,7 +1216,7 @@ static int intel_backlight_device_register(struct intel_connector *connector) return 0; } -static void intel_backlight_device_unregister(struct intel_connector *connector) +void intel_backlight_device_unregister(struct intel_connector *connector) { struct intel_panel *panel = &connector->panel; @@ -1230,9 +1230,6 @@ static int intel_backlight_device_register(struct intel_connector *connector) { return 0; } -static void intel_backlight_device_unregister(struct intel_connector *connector) -{ -} #endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ /* @@ -1724,6 +1721,14 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel) container_of(panel, struct intel_connector, panel); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP && + intel_dp_aux_init_backlight_funcs(connector) == 0) + return; + + if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI && + intel_dsi_dcs_init_backlight_funcs(connector) == 0) + return; + if (IS_BROXTON(dev_priv)) { panel->backlight.setup = bxt_setup_backlight; panel->backlight.enable = bxt_enable_backlight; @@ -1812,11 +1817,3 @@ void intel_backlight_register(struct drm_device *dev) for_each_intel_connector(dev, connector) intel_backlight_device_register(connector); } - -void intel_backlight_unregister(struct drm_device *dev) -{ - struct intel_connector *connector; - - for_each_intel_connector(dev, connector) - intel_backlight_device_unregister(connector); -} diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index a7ef45da0a9e..658a75659657 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -26,6 +26,7 @@ */ #include <linux/cpufreq.h> +#include <drm/drm_plane_helper.h> #include "i915_drv.h" #include "intel_drv.h" #include "../../../platform/x86/intel_ips.h" @@ -54,10 +55,38 @@ #define INTEL_RC6p_ENABLE (1<<1) #define INTEL_RC6pp_ENABLE (1<<2) +static void gen9_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */ + I915_WRITE(CHICKEN_PAR1_1, + I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); + + I915_WRITE(GEN8_CONFIG0, + I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES); + + /* WaEnableChickenDCPR:skl,bxt,kbl */ + I915_WRITE(GEN8_CHICKEN_DCPR_1, + I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); + + /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */ + /* WaFbcWakeMemOn:skl,bxt,kbl */ + I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | + DISP_FBC_WM_DIS | + DISP_FBC_MEMORY_WAKE); + + /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */ + I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | + ILK_DPFC_DISABLE_DUMMY0); +} + static void bxt_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + gen9_init_clock_gating(dev); + /* WaDisableSDEUnitClockGating:bxt */ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); @@ -2012,10 +2041,10 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, } static uint32_t -hsw_compute_linetime_wm(struct drm_device *dev, - struct intel_crtc_state *cstate) +hsw_compute_linetime_wm(const struct intel_crtc_state *cstate) { - struct drm_i915_private *dev_priv = dev->dev_private; + const struct intel_atomic_state *intel_state = + to_intel_atomic_state(cstate->base.state); const struct drm_display_mode *adjusted_mode = &cstate->base.adjusted_mode; u32 linetime, ips_linetime; @@ -2024,7 +2053,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, return 0; if (WARN_ON(adjusted_mode->crtc_clock == 0)) return 0; - if (WARN_ON(dev_priv->cdclk_freq == 0)) + if (WARN_ON(intel_state->cdclk == 0)) return 0; /* The WM are computed with base on how long it takes to fill a single @@ -2033,7 +2062,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, adjusted_mode->crtc_clock); ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, - dev_priv->cdclk_freq); + intel_state->cdclk); return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | PIPE_WM_LINETIME_TIME(linetime); @@ -2146,14 +2175,14 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8]) static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5]) { /* ILK sprite LP0 latency is 1300 ns */ - if (INTEL_INFO(dev)->gen == 5) + if (IS_GEN5(dev)) wm[0] = 13; } static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5]) { /* ILK cursor LP0 latency is 1300 ns */ - if (INTEL_INFO(dev)->gen == 5) + if (IS_GEN5(dev)) wm[0] = 13; /* WaDoubleCursorLP3Latency:ivb */ @@ -2309,7 +2338,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) int level, max_level = ilk_wm_max_level(dev), usable_level; struct ilk_wm_maximums max; - pipe_wm = &cstate->wm.optimal.ilk; + pipe_wm = &cstate->wm.ilk.optimal; for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { struct intel_plane_state *ps; @@ -2352,7 +2381,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) pipe_wm->wm[0] = pipe_wm->raw_wm[0]; if (IS_HASWELL(dev) || IS_BROADWELL(dev)) - pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate); + pipe_wm->linetime = hsw_compute_linetime_wm(cstate); if (!ilk_validate_pipe_wm(dev, pipe_wm)) return -EINVAL; @@ -2391,7 +2420,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev, struct intel_crtc *intel_crtc, struct intel_crtc_state *newstate) { - struct intel_pipe_wm *a = &newstate->wm.intermediate; + struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate; struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk; int level, max_level = ilk_wm_max_level(dev); @@ -2400,7 +2429,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev, * currently active watermarks to get values that are safe both before * and after the vblank. */ - *a = newstate->wm.optimal.ilk; + *a = newstate->wm.ilk.optimal; a->pipe_enabled |= b->pipe_enabled; a->sprites_enabled |= b->sprites_enabled; a->sprites_scaled |= b->sprites_scaled; @@ -2429,7 +2458,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev, * If our intermediate WM are identical to the final WM, then we can * omit the post-vblank programming; only update if it's different. */ - if (memcmp(a, &newstate->wm.optimal.ilk, sizeof(*a)) == 0) + if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0) newstate->wm.need_postvbl_update = false; return 0; @@ -2849,20 +2878,29 @@ skl_wm_plane_id(const struct intel_plane *plane) static void skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, const struct intel_crtc_state *cstate, - const struct intel_wm_config *config, - struct skl_ddb_entry *alloc /* out */) + struct skl_ddb_entry *alloc, /* out */ + int *num_active /* out */) { + struct drm_atomic_state *state = cstate->base.state; + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *for_crtc = cstate->base.crtc; - struct drm_crtc *crtc; unsigned int pipe_size, ddb_size; int nth_active_pipe; + int pipe = to_intel_crtc(for_crtc)->pipe; - if (!cstate->base.active) { + if (WARN_ON(!state) || !cstate->base.active) { alloc->start = 0; alloc->end = 0; + *num_active = hweight32(dev_priv->active_crtcs); return; } + if (intel_state->active_pipe_changes) + *num_active = hweight32(intel_state->active_crtcs); + else + *num_active = hweight32(dev_priv->active_crtcs); + if (IS_BROXTON(dev)) ddb_size = BXT_DDB_SIZE; else @@ -2870,25 +2908,29 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, ddb_size -= 4; /* 4 blocks for bypass path allocation */ - nth_active_pipe = 0; - for_each_crtc(dev, crtc) { - if (!to_intel_crtc(crtc)->active) - continue; - - if (crtc == for_crtc) - break; - - nth_active_pipe++; + /* + * If the state doesn't change the active CRTC's, then there's + * no need to recalculate; the existing pipe allocation limits + * should remain unchanged. Note that we're safe from racing + * commits since any racing commit that changes the active CRTC + * list would need to grab _all_ crtc locks, including the one + * we currently hold. + */ + if (!intel_state->active_pipe_changes) { + *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe]; + return; } - pipe_size = ddb_size / config->num_pipes_active; - alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active; + nth_active_pipe = hweight32(intel_state->active_crtcs & + (drm_crtc_mask(for_crtc) - 1)); + pipe_size = ddb_size / hweight32(intel_state->active_crtcs); + alloc->start = nth_active_pipe * ddb_size / *num_active; alloc->end = alloc->start + pipe_size; } -static unsigned int skl_cursor_allocation(const struct intel_wm_config *config) +static unsigned int skl_cursor_allocation(int num_active) { - if (config->num_pipes_active == 1) + if (num_active == 1) return 32; return 8; @@ -2932,6 +2974,46 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, } } +/* + * Determines the downscale amount of a plane for the purposes of watermark calculations. + * The bspec defines downscale amount as: + * + * """ + * Horizontal down scale amount = maximum[1, Horizontal source size / + * Horizontal destination size] + * Vertical down scale amount = maximum[1, Vertical source size / + * Vertical destination size] + * Total down scale amount = Horizontal down scale amount * + * Vertical down scale amount + * """ + * + * Return value is provided in 16.16 fixed point form to retain fractional part. + * Caller should take care of dividing & rounding off the value. + */ +static uint32_t +skl_plane_downscale_amount(const struct intel_plane_state *pstate) +{ + uint32_t downscale_h, downscale_w; + uint32_t src_w, src_h, dst_w, dst_h; + + if (WARN_ON(!pstate->visible)) + return DRM_PLANE_HELPER_NO_SCALING; + + /* n.b., src is 16.16 fixed point, dst is whole integer */ + src_w = drm_rect_width(&pstate->src); + src_h = drm_rect_height(&pstate->src); + dst_w = drm_rect_width(&pstate->dst); + dst_h = drm_rect_height(&pstate->dst); + if (intel_rotation_90_or_270(pstate->base.rotation)) + swap(dst_w, dst_h); + + downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); + downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); + + /* Provide result in 16.16 fixed point */ + return (uint64_t)downscale_w * downscale_h >> 16; +} + static unsigned int skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, const struct drm_plane_state *pstate, @@ -2939,7 +3021,16 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, { struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); struct drm_framebuffer *fb = pstate->fb; + uint32_t down_scale_amount, data_rate; uint32_t width = 0, height = 0; + unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888; + + if (!intel_pstate->visible) + return 0; + if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR) + return 0; + if (y && format != DRM_FORMAT_NV12) + return 0; width = drm_rect_width(&intel_pstate->src) >> 16; height = drm_rect_height(&intel_pstate->src) >> 16; @@ -2948,17 +3039,21 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, swap(width, height); /* for planar format */ - if (fb->pixel_format == DRM_FORMAT_NV12) { + if (format == DRM_FORMAT_NV12) { if (y) /* y-plane data rate */ - return width * height * - drm_format_plane_cpp(fb->pixel_format, 0); + data_rate = width * height * + drm_format_plane_cpp(format, 0); else /* uv-plane data rate */ - return (width / 2) * (height / 2) * - drm_format_plane_cpp(fb->pixel_format, 1); + data_rate = (width / 2) * (height / 2) * + drm_format_plane_cpp(format, 1); + } else { + /* for packed formats */ + data_rate = width * height * drm_format_plane_cpp(format, 0); } - /* for packed formats */ - return width * height * drm_format_plane_cpp(fb->pixel_format, 0); + down_scale_amount = skl_plane_downscale_amount(intel_pstate); + + return (uint64_t)data_rate * down_scale_amount >> 16; } /* @@ -2967,86 +3062,188 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, * 3 * 4096 * 8192 * 4 < 2^32 */ static unsigned int -skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate) +skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate) { - struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); - struct drm_device *dev = intel_crtc->base.dev; + struct drm_crtc_state *cstate = &intel_cstate->base; + struct drm_atomic_state *state = cstate->state; + struct drm_crtc *crtc = cstate->crtc; + struct drm_device *dev = crtc->dev; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + const struct drm_plane *plane; const struct intel_plane *intel_plane; - unsigned int total_data_rate = 0; + struct drm_plane_state *pstate; + unsigned int rate, total_data_rate = 0; + int id; + int i; - for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { - const struct drm_plane_state *pstate = intel_plane->base.state; + if (WARN_ON(!state)) + return 0; - if (pstate->fb == NULL) - continue; + /* Calculate and cache data rate for each plane */ + for_each_plane_in_state(state, plane, pstate, i) { + id = skl_wm_plane_id(to_intel_plane(plane)); + intel_plane = to_intel_plane(plane); - if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR) + if (intel_plane->pipe != intel_crtc->pipe) continue; /* packed/uv */ - total_data_rate += skl_plane_relative_data_rate(cstate, - pstate, - 0); + rate = skl_plane_relative_data_rate(intel_cstate, + pstate, 0); + intel_cstate->wm.skl.plane_data_rate[id] = rate; + + /* y-plane */ + rate = skl_plane_relative_data_rate(intel_cstate, + pstate, 1); + intel_cstate->wm.skl.plane_y_data_rate[id] = rate; + } - if (pstate->fb->pixel_format == DRM_FORMAT_NV12) - /* y-plane */ - total_data_rate += skl_plane_relative_data_rate(cstate, - pstate, - 1); + /* Calculate CRTC's total data rate from cached values */ + for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { + int id = skl_wm_plane_id(intel_plane); + + /* packed/uv */ + total_data_rate += intel_cstate->wm.skl.plane_data_rate[id]; + total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id]; } + WARN_ON(cstate->plane_mask && total_data_rate == 0); + return total_data_rate; } -static void +static uint16_t +skl_ddb_min_alloc(const struct drm_plane_state *pstate, + const int y) +{ + struct drm_framebuffer *fb = pstate->fb; + struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); + uint32_t src_w, src_h; + uint32_t min_scanlines = 8; + uint8_t plane_bpp; + + if (WARN_ON(!fb)) + return 0; + + /* For packed formats, no y-plane, return 0 */ + if (y && fb->pixel_format != DRM_FORMAT_NV12) + return 0; + + /* For Non Y-tile return 8-blocks */ + if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED && + fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED) + return 8; + + src_w = drm_rect_width(&intel_pstate->src) >> 16; + src_h = drm_rect_height(&intel_pstate->src) >> 16; + + if (intel_rotation_90_or_270(pstate->rotation)) + swap(src_w, src_h); + + /* Halve UV plane width and height for NV12 */ + if (fb->pixel_format == DRM_FORMAT_NV12 && !y) { + src_w /= 2; + src_h /= 2; + } + + if (fb->pixel_format == DRM_FORMAT_NV12 && !y) + plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1); + else + plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0); + + if (intel_rotation_90_or_270(pstate->rotation)) { + switch (plane_bpp) { + case 1: + min_scanlines = 32; + break; + case 2: + min_scanlines = 16; + break; + case 4: + min_scanlines = 8; + break; + case 8: + min_scanlines = 4; + break; + default: + WARN(1, "Unsupported pixel depth %u for rotation", + plane_bpp); + min_scanlines = 32; + } + } + + return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3; +} + +static int skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, struct skl_ddb_allocation *ddb /* out */) { + struct drm_atomic_state *state = cstate->base.state; struct drm_crtc *crtc = cstate->base.crtc; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_wm_config *config = &dev_priv->wm.config; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_plane *intel_plane; + struct drm_plane *plane; + struct drm_plane_state *pstate; enum pipe pipe = intel_crtc->pipe; struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; uint16_t alloc_size, start, cursor_blocks; - uint16_t minimum[I915_MAX_PLANES]; - uint16_t y_minimum[I915_MAX_PLANES]; + uint16_t *minimum = cstate->wm.skl.minimum_blocks; + uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks; unsigned int total_data_rate; + int num_active; + int id, i; + + if (WARN_ON(!state)) + return 0; - skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc); + if (!cstate->base.active) { + ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0; + memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); + memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe])); + return 0; + } + + skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active); alloc_size = skl_ddb_entry_size(alloc); if (alloc_size == 0) { memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); - memset(&ddb->plane[pipe][PLANE_CURSOR], 0, - sizeof(ddb->plane[pipe][PLANE_CURSOR])); - return; + return 0; } - cursor_blocks = skl_cursor_allocation(config); + cursor_blocks = skl_cursor_allocation(num_active); ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks; ddb->plane[pipe][PLANE_CURSOR].end = alloc->end; alloc_size -= cursor_blocks; - alloc->end -= cursor_blocks; /* 1. Allocate the mininum required blocks for each active plane */ - for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { - struct drm_plane *plane = &intel_plane->base; - struct drm_framebuffer *fb = plane->state->fb; - int id = skl_wm_plane_id(intel_plane); + for_each_plane_in_state(state, plane, pstate, i) { + intel_plane = to_intel_plane(plane); + id = skl_wm_plane_id(intel_plane); - if (!to_intel_plane_state(plane->state)->visible) + if (intel_plane->pipe != pipe) continue; - if (plane->type == DRM_PLANE_TYPE_CURSOR) + if (!to_intel_plane_state(pstate)->visible) { + minimum[id] = 0; + y_minimum[id] = 0; + continue; + } + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + minimum[id] = 0; + y_minimum[id] = 0; continue; + } - minimum[id] = 8; - alloc_size -= minimum[id]; - y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0; - alloc_size -= y_minimum[id]; + minimum[id] = skl_ddb_min_alloc(pstate, 0); + y_minimum[id] = skl_ddb_min_alloc(pstate, 1); + } + + for (i = 0; i < PLANE_CURSOR; i++) { + alloc_size -= minimum[i]; + alloc_size -= y_minimum[i]; } /* @@ -3056,21 +3253,16 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, * FIXME: we may not allocate every single block here. */ total_data_rate = skl_get_total_relative_data_rate(cstate); + if (total_data_rate == 0) + return 0; start = alloc->start; for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { - struct drm_plane *plane = &intel_plane->base; - struct drm_plane_state *pstate = intel_plane->base.state; unsigned int data_rate, y_data_rate; uint16_t plane_blocks, y_plane_blocks = 0; int id = skl_wm_plane_id(intel_plane); - if (!to_intel_plane_state(pstate)->visible) - continue; - if (plane->type == DRM_PLANE_TYPE_CURSOR) - continue; - - data_rate = skl_plane_relative_data_rate(cstate, pstate, 0); + data_rate = cstate->wm.skl.plane_data_rate[id]; /* * allocation for (packed formats) or (uv-plane part of planar format): @@ -3081,30 +3273,32 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, plane_blocks += div_u64((uint64_t)alloc_size * data_rate, total_data_rate); - ddb->plane[pipe][id].start = start; - ddb->plane[pipe][id].end = start + plane_blocks; + /* Leave disabled planes at (0,0) */ + if (data_rate) { + ddb->plane[pipe][id].start = start; + ddb->plane[pipe][id].end = start + plane_blocks; + } start += plane_blocks; /* * allocation for y_plane part of planar format: */ - if (pstate->fb->pixel_format == DRM_FORMAT_NV12) { - y_data_rate = skl_plane_relative_data_rate(cstate, - pstate, - 1); - y_plane_blocks = y_minimum[id]; - y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, - total_data_rate); + y_data_rate = cstate->wm.skl.plane_y_data_rate[id]; + + y_plane_blocks = y_minimum[id]; + y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, + total_data_rate); + if (y_data_rate) { ddb->y_plane[pipe][id].start = start; ddb->y_plane[pipe][id].end = start + y_plane_blocks; - - start += y_plane_blocks; } + start += y_plane_blocks; } + return 0; } static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config) @@ -3161,35 +3355,41 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, return ret; } -static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb, - const struct intel_crtc *intel_crtc) +static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate, + struct intel_plane_state *pstate) { - struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; + uint64_t adjusted_pixel_rate; + uint64_t downscale_amount; + uint64_t pixel_rate; + + /* Shouldn't reach here on disabled planes... */ + if (WARN_ON(!pstate->visible)) + return 0; /* - * If ddb allocation of pipes changed, it may require recalculation of - * watermarks + * Adjusted plane pixel rate is just the pipe's adjusted pixel rate + * with additional adjustments for plane-specific scaling. */ - if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe))) - return true; + adjusted_pixel_rate = skl_pipe_pixel_rate(cstate); + downscale_amount = skl_plane_downscale_amount(pstate); + + pixel_rate = adjusted_pixel_rate * downscale_amount >> 16; + WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0)); - return false; + return pixel_rate; } -static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, - struct intel_crtc_state *cstate, - struct intel_plane *intel_plane, - uint16_t ddb_allocation, - int level, - uint16_t *out_blocks, /* out */ - uint8_t *out_lines /* out */) +static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, + struct intel_crtc_state *cstate, + struct intel_plane_state *intel_pstate, + uint16_t ddb_allocation, + int level, + uint16_t *out_blocks, /* out */ + uint8_t *out_lines, /* out */ + bool *enabled /* out */) { - struct drm_plane *plane = &intel_plane->base; - struct drm_framebuffer *fb = plane->state->fb; - struct intel_plane_state *intel_pstate = - to_intel_plane_state(plane->state); + struct drm_plane_state *pstate = &intel_pstate->base; + struct drm_framebuffer *fb = pstate->fb; uint32_t latency = dev_priv->wm.skl_latency[level]; uint32_t method1, method2; uint32_t plane_bytes_per_line, plane_blocks_per_line; @@ -3197,20 +3397,24 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, uint32_t selected_result; uint8_t cpp; uint32_t width = 0, height = 0; + uint32_t plane_pixel_rate; - if (latency == 0 || !cstate->base.active || !intel_pstate->visible) - return false; + if (latency == 0 || !cstate->base.active || !intel_pstate->visible) { + *enabled = false; + return 0; + } width = drm_rect_width(&intel_pstate->src) >> 16; height = drm_rect_height(&intel_pstate->src) >> 16; - if (intel_rotation_90_or_270(plane->state->rotation)) + if (intel_rotation_90_or_270(pstate->rotation)) swap(width, height); cpp = drm_format_plane_cpp(fb->pixel_format, 0); - method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), - cpp, latency); - method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), + plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); + + method1 = skl_wm_method1(plane_pixel_rate, cpp, latency); + method2 = skl_wm_method2(plane_pixel_rate, cstate->base.adjusted_mode.crtc_htotal, width, cpp, @@ -3224,7 +3428,7 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { uint32_t min_scanlines = 4; uint32_t y_tile_minimum; - if (intel_rotation_90_or_270(plane->state->rotation)) { + if (intel_rotation_90_or_270(pstate->rotation)) { int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ? drm_format_plane_cpp(fb->pixel_format, 1) : drm_format_plane_cpp(fb->pixel_format, 0); @@ -3260,40 +3464,99 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, res_blocks++; } - if (res_blocks >= ddb_allocation || res_lines > 31) - return false; + if (res_blocks >= ddb_allocation || res_lines > 31) { + *enabled = false; + + /* + * If there are no valid level 0 watermarks, then we can't + * support this display configuration. + */ + if (level) { + return 0; + } else { + DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n"); + DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n", + to_intel_crtc(cstate->base.crtc)->pipe, + skl_wm_plane_id(to_intel_plane(pstate->plane)), + res_blocks, ddb_allocation, res_lines); + + return -EINVAL; + } + } *out_blocks = res_blocks; *out_lines = res_lines; + *enabled = true; - return true; + return 0; } -static void skl_compute_wm_level(const struct drm_i915_private *dev_priv, - struct skl_ddb_allocation *ddb, - struct intel_crtc_state *cstate, - int level, - struct skl_wm_level *result) +static int +skl_compute_wm_level(const struct drm_i915_private *dev_priv, + struct skl_ddb_allocation *ddb, + struct intel_crtc_state *cstate, + int level, + struct skl_wm_level *result) { struct drm_device *dev = dev_priv->dev; + struct drm_atomic_state *state = cstate->base.state; struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); + struct drm_plane *plane; struct intel_plane *intel_plane; + struct intel_plane_state *intel_pstate; uint16_t ddb_blocks; enum pipe pipe = intel_crtc->pipe; + int ret; - for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { + /* + * We'll only calculate watermarks for planes that are actually + * enabled, so make sure all other planes are set as disabled. + */ + memset(result, 0, sizeof(*result)); + + for_each_intel_plane_mask(dev, intel_plane, cstate->base.plane_mask) { int i = skl_wm_plane_id(intel_plane); + plane = &intel_plane->base; + intel_pstate = NULL; + if (state) + intel_pstate = + intel_atomic_get_existing_plane_state(state, + intel_plane); + + /* + * Note: If we start supporting multiple pending atomic commits + * against the same planes/CRTC's in the future, plane->state + * will no longer be the correct pre-state to use for the + * calculations here and we'll need to change where we get the + * 'unchanged' plane data from. + * + * For now this is fine because we only allow one queued commit + * against a CRTC. Even if the plane isn't modified by this + * transaction and we don't have a plane lock, we still have + * the CRTC's lock, so we know that no other transactions are + * racing with us to update it. + */ + if (!intel_pstate) + intel_pstate = to_intel_plane_state(plane->state); + + WARN_ON(!intel_pstate->base.fb); + ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); - result->plane_en[i] = skl_compute_plane_wm(dev_priv, - cstate, - intel_plane, - ddb_blocks, - level, - &result->plane_res_b[i], - &result->plane_res_l[i]); + ret = skl_compute_plane_wm(dev_priv, + cstate, + intel_pstate, + ddb_blocks, + level, + &result->plane_res_b[i], + &result->plane_res_l[i], + &result->plane_en[i]); + if (ret) + return ret; } + + return 0; } static uint32_t @@ -3327,21 +3590,26 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate, } } -static void skl_compute_pipe_wm(struct intel_crtc_state *cstate, - struct skl_ddb_allocation *ddb, - struct skl_pipe_wm *pipe_wm) +static int skl_build_pipe_wm(struct intel_crtc_state *cstate, + struct skl_ddb_allocation *ddb, + struct skl_pipe_wm *pipe_wm) { struct drm_device *dev = cstate->base.crtc->dev; const struct drm_i915_private *dev_priv = dev->dev_private; int level, max_level = ilk_wm_max_level(dev); + int ret; for (level = 0; level <= max_level; level++) { - skl_compute_wm_level(dev_priv, ddb, cstate, - level, &pipe_wm->wm[level]); + ret = skl_compute_wm_level(dev_priv, ddb, cstate, + level, &pipe_wm->wm[level]); + if (ret) + return ret; } pipe_wm->linetime = skl_compute_linetime_wm(cstate); skl_compute_transition_wm(cstate, &pipe_wm->trans_wm); + + return 0; } static void skl_compute_wm_results(struct drm_device *dev, @@ -3421,7 +3689,9 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv, int i, level, max_level = ilk_wm_max_level(dev); enum pipe pipe = crtc->pipe; - if (!new->dirty[pipe]) + if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0) + continue; + if (!crtc->active) continue; I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]); @@ -3588,87 +3858,144 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv, } } -static bool skl_update_pipe_wm(struct drm_crtc *crtc, - struct skl_ddb_allocation *ddb, /* out */ - struct skl_pipe_wm *pipe_wm /* out */) +static int skl_update_pipe_wm(struct drm_crtc_state *cstate, + struct skl_ddb_allocation *ddb, /* out */ + struct skl_pipe_wm *pipe_wm, /* out */ + bool *changed /* out */) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); + struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc); + struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate); + int ret; - skl_allocate_pipe_ddb(cstate, ddb); - skl_compute_pipe_wm(cstate, ddb, pipe_wm); + ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm); + if (ret) + return ret; if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm))) - return false; - - intel_crtc->wm.active.skl = *pipe_wm; + *changed = false; + else + *changed = true; - return true; + return 0; } -static void skl_update_other_pipe_wm(struct drm_device *dev, - struct drm_crtc *crtc, - struct skl_wm_values *r) +static int +skl_compute_ddb(struct drm_atomic_state *state) { + struct drm_device *dev = state->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct intel_crtc *intel_crtc; - struct intel_crtc *this_crtc = to_intel_crtc(crtc); + struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb; + unsigned realloc_pipes = dev_priv->active_crtcs; + int ret; /* - * If the WM update hasn't changed the allocation for this_crtc (the - * crtc we are currently computing the new WM values for), other - * enabled crtcs will keep the same allocation and we don't need to - * recompute anything for them. + * If this is our first atomic update following hardware readout, + * we can't trust the DDB that the BIOS programmed for us. Let's + * pretend that all pipes switched active status so that we'll + * ensure a full DDB recompute. */ - if (!skl_ddb_allocation_changed(&r->ddb, this_crtc)) - return; + if (dev_priv->wm.distrust_bios_wm) + intel_state->active_pipe_changes = ~0; /* - * Otherwise, because of this_crtc being freshly enabled/disabled, the - * other active pipes need new DDB allocation and WM values. + * If the modeset changes which CRTC's are active, we need to + * recompute the DDB allocation for *all* active pipes, even + * those that weren't otherwise being modified in any way by this + * atomic commit. Due to the shrinking of the per-pipe allocations + * when new active CRTC's are added, it's possible for a pipe that + * we were already using and aren't changing at all here to suddenly + * become invalid if its DDB needs exceeds its new allocation. + * + * Note that if we wind up doing a full DDB recompute, we can't let + * any other display updates race with this transaction, so we need + * to grab the lock on *all* CRTC's. */ - for_each_intel_crtc(dev, intel_crtc) { - struct skl_pipe_wm pipe_wm = {}; - bool wm_changed; - - if (this_crtc->pipe == intel_crtc->pipe) - continue; - - if (!intel_crtc->active) - continue; + if (intel_state->active_pipe_changes) { + realloc_pipes = ~0; + intel_state->wm_results.dirty_pipes = ~0; + } - wm_changed = skl_update_pipe_wm(&intel_crtc->base, - &r->ddb, &pipe_wm); + for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) { + struct intel_crtc_state *cstate; - /* - * If we end up re-computing the other pipe WM values, it's - * because it was really needed, so we expect the WM values to - * be different. - */ - WARN_ON(!wm_changed); + cstate = intel_atomic_get_crtc_state(state, intel_crtc); + if (IS_ERR(cstate)) + return PTR_ERR(cstate); - skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc); - r->dirty[intel_crtc->pipe] = true; + ret = skl_allocate_pipe_ddb(cstate, ddb); + if (ret) + return ret; } + + return 0; } -static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe) +static int +skl_compute_wm(struct drm_atomic_state *state) { - watermarks->wm_linetime[pipe] = 0; - memset(watermarks->plane[pipe], 0, - sizeof(uint32_t) * 8 * I915_MAX_PLANES); - memset(watermarks->plane_trans[pipe], - 0, sizeof(uint32_t) * I915_MAX_PLANES); - watermarks->plane_trans[pipe][PLANE_CURSOR] = 0; + struct drm_crtc *crtc; + struct drm_crtc_state *cstate; + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct skl_wm_values *results = &intel_state->wm_results; + struct skl_pipe_wm *pipe_wm; + bool changed = false; + int ret, i; - /* Clear ddb entries for pipe */ - memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry)); - memset(&watermarks->ddb.plane[pipe], 0, - sizeof(struct skl_ddb_entry) * I915_MAX_PLANES); - memset(&watermarks->ddb.y_plane[pipe], 0, - sizeof(struct skl_ddb_entry) * I915_MAX_PLANES); - memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0, - sizeof(struct skl_ddb_entry)); + /* + * If this transaction isn't actually touching any CRTC's, don't + * bother with watermark calculation. Note that if we pass this + * test, we're guaranteed to hold at least one CRTC state mutex, + * which means we can safely use values like dev_priv->active_crtcs + * since any racing commits that want to update them would need to + * hold _all_ CRTC state mutexes. + */ + for_each_crtc_in_state(state, crtc, cstate, i) + changed = true; + if (!changed) + return 0; + + /* Clear all dirty flags */ + results->dirty_pipes = 0; + + ret = skl_compute_ddb(state); + if (ret) + return ret; + + /* + * Calculate WM's for all pipes that are part of this transaction. + * Note that the DDB allocation above may have added more CRTC's that + * weren't otherwise being modified (and set bits in dirty_pipes) if + * pipe allocations had to change. + * + * FIXME: Now that we're doing this in the atomic check phase, we + * should allow skl_update_pipe_wm() to return failure in cases where + * no suitable watermark values can be found. + */ + for_each_crtc_in_state(state, crtc, cstate, i) { + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc_state *intel_cstate = + to_intel_crtc_state(cstate); + + pipe_wm = &intel_cstate->wm.skl.optimal; + ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm, + &changed); + if (ret) + return ret; + if (changed) + results->dirty_pipes |= drm_crtc_mask(crtc); + + if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) + /* This pipe's WM's did not change */ + continue; + + intel_cstate->update_wm_pre = true; + skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc); + } + + return 0; } static void skl_update_wm(struct drm_crtc *crtc) @@ -3678,26 +4005,22 @@ static void skl_update_wm(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct skl_wm_values *results = &dev_priv->wm.skl_results; struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); - struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl; - - - /* Clear all dirty flags */ - memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES); - - skl_clear_wm(results, intel_crtc->pipe); + struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; - if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm)) + if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) return; - skl_compute_wm_results(dev, pipe_wm, results, intel_crtc); - results->dirty[intel_crtc->pipe] = true; + intel_crtc->wm.active.skl = *pipe_wm; + + mutex_lock(&dev_priv->wm.wm_mutex); - skl_update_other_pipe_wm(dev, crtc, results); skl_write_wm_values(dev_priv, results); skl_flush_wm_values(dev_priv, results); /* store the new configuration */ dev_priv->wm.skl_hw = *results; + + mutex_unlock(&dev_priv->wm.wm_mutex); } static void ilk_compute_wm_config(struct drm_device *dev, @@ -3757,7 +4080,7 @@ static void ilk_initial_watermarks(struct intel_crtc_state *cstate) struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); mutex_lock(&dev_priv->wm.wm_mutex); - intel_crtc->wm.active.ilk = cstate->wm.intermediate; + intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate; ilk_program_watermarks(dev_priv); mutex_unlock(&dev_priv->wm.wm_mutex); } @@ -3769,7 +4092,7 @@ static void ilk_optimize_watermarks(struct intel_crtc_state *cstate) mutex_lock(&dev_priv->wm.wm_mutex); if (cstate->wm.need_postvbl_update) { - intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk; + intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal; ilk_program_watermarks(dev_priv); } mutex_unlock(&dev_priv->wm.wm_mutex); @@ -3826,7 +4149,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) struct skl_wm_values *hw = &dev_priv->wm.skl_hw; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); - struct skl_pipe_wm *active = &cstate->wm.optimal.skl; + struct skl_pipe_wm *active = &cstate->wm.skl.optimal; enum pipe pipe = intel_crtc->pipe; int level, i, max_level; uint32_t temp; @@ -3849,7 +4172,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) if (!intel_crtc->active) return; - hw->dirty[pipe] = true; + hw->dirty_pipes |= drm_crtc_mask(crtc); active->linetime = hw->wm_linetime[pipe]; @@ -3883,6 +4206,14 @@ void skl_wm_get_hw_state(struct drm_device *dev) skl_ddb_get_hw_state(dev_priv, ddb); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) skl_pipe_wm_get_hw_state(crtc); + + if (dev_priv->active_crtcs) { + /* Fully recompute DDB on first atomic commit */ + dev_priv->wm.distrust_bios_wm = true; + } else { + /* Easy/common case; just sanitize DDB now if everything off */ + memset(ddb, 0, sizeof(*ddb)); + } } static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) @@ -3892,7 +4223,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) struct ilk_wm_values *hw = &dev_priv->wm.hw; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); - struct intel_pipe_wm *active = &cstate->wm.optimal.ilk; + struct intel_pipe_wm *active = &cstate->wm.ilk.optimal; enum pipe pipe = intel_crtc->pipe; static const i915_reg_t wm0_pipe_reg[] = { [PIPE_A] = WM0_PIPEA_ILK, @@ -4169,9 +4500,8 @@ DEFINE_SPINLOCK(mchdev_lock); * mchdev_lock. */ static struct drm_i915_private *i915_mch_dev; -bool ironlake_set_drps(struct drm_device *dev, u8 val) +bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val) { - struct drm_i915_private *dev_priv = dev->dev_private; u16 rgvswctl; assert_spin_locked(&mchdev_lock); @@ -4193,9 +4523,8 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val) return true; } -static void ironlake_enable_drps(struct drm_device *dev) +static void ironlake_enable_drps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 rgvmodectl; u8 fmax, fmin, fstart, vstart; @@ -4252,7 +4581,7 @@ static void ironlake_enable_drps(struct drm_device *dev) DRM_ERROR("stuck trying to change perf mode\n"); mdelay(1); - ironlake_set_drps(dev, fstart); + ironlake_set_drps(dev_priv, fstart); dev_priv->ips.last_count1 = I915_READ(DMIEC) + I915_READ(DDREC) + I915_READ(CSIEC); @@ -4263,9 +4592,8 @@ static void ironlake_enable_drps(struct drm_device *dev) spin_unlock_irq(&mchdev_lock); } -static void ironlake_disable_drps(struct drm_device *dev) +static void ironlake_disable_drps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u16 rgvswctl; spin_lock_irq(&mchdev_lock); @@ -4280,7 +4608,7 @@ static void ironlake_disable_drps(struct drm_device *dev) I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); /* Go back to the starting frequency */ - ironlake_set_drps(dev, dev_priv->ips.fstart); + ironlake_set_drps(dev_priv, dev_priv->ips.fstart); mdelay(1); rgvswctl |= MEMCTL_CMD_STS; I915_WRITE(MEMSWCTL, rgvswctl); @@ -4424,12 +4752,10 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) /* gen6_set_rps is called to update the frequency request, but should also be * called when the range (min_delay and max_delay) is modified so that we can * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ -static void gen6_set_rps(struct drm_device *dev, u8 val) +static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) return; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -4442,10 +4768,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val) if (val != dev_priv->rps.cur_freq) { gen6_set_rps_thresholds(dev_priv, val); - if (IS_GEN9(dev)) + if (IS_GEN9(dev_priv)) I915_WRITE(GEN6_RPNSWREQ, GEN9_FREQUENCY(val)); - else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(val)); else @@ -4467,15 +4793,13 @@ static void gen6_set_rps(struct drm_device *dev, u8 val) trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); } -static void valleyview_set_rps(struct drm_device *dev, u8 val) +static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val) { - struct drm_i915_private *dev_priv = dev->dev_private; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); WARN_ON(val > dev_priv->rps.max_freq); WARN_ON(val < dev_priv->rps.min_freq); - if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1), + if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1), "Odd GPU freq value\n")) val &= ~1; @@ -4508,7 +4832,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) /* Wake up the media well, as that takes a lot less * power than the Render well. */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); - valleyview_set_rps(dev_priv->dev, val); + valleyview_set_rps(dev_priv, val); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); } @@ -4526,14 +4850,12 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv) void gen6_rps_idle(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; - mutex_lock(&dev_priv->rps.hw_lock); if (dev_priv->rps.enabled) { - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) vlv_set_rps_idle(dev_priv); else - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); dev_priv->rps.last_adj = 0; I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); } @@ -4581,49 +4903,39 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv, spin_unlock(&dev_priv->rps.client_lock); } -void intel_set_rps(struct drm_device *dev, u8 val) +void intel_set_rps(struct drm_i915_private *dev_priv, u8 val) { - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) - valleyview_set_rps(dev, val); + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + valleyview_set_rps(dev_priv, val); else - gen6_set_rps(dev, val); + gen6_set_rps(dev_priv, val); } -static void gen9_disable_rc6(struct drm_device *dev) +static void gen9_disable_rc6(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(GEN6_RC_CONTROL, 0); I915_WRITE(GEN9_PG_ENABLE, 0); } -static void gen9_disable_rps(struct drm_device *dev) +static void gen9_disable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(GEN6_RP_CONTROL, 0); } -static void gen6_disable_rps(struct drm_device *dev) +static void gen6_disable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(GEN6_RC_CONTROL, 0); I915_WRITE(GEN6_RPNSWREQ, 1 << 31); I915_WRITE(GEN6_RP_CONTROL, 0); } -static void cherryview_disable_rps(struct drm_device *dev) +static void cherryview_disable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(GEN6_RC_CONTROL, 0); } -static void valleyview_disable_rps(struct drm_device *dev) +static void valleyview_disable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* we're doing forcewake before Disabling RC6, * This what the BIOS expects when going into suspend */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); @@ -4633,15 +4945,15 @@ static void valleyview_disable_rps(struct drm_device *dev) intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void intel_print_rc6_info(struct drm_device *dev, u32 mode) +static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode) { - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1))) mode = GEN6_RC_CTL_RC6_ENABLE; else mode = 0; } - if (HAS_RC6p(dev)) + if (HAS_RC6p(dev_priv)) DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n", onoff(mode & GEN6_RC_CTL_RC6_ENABLE), onoff(mode & GEN6_RC_CTL_RC6p_ENABLE), @@ -4652,9 +4964,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode) onoff(mode & GEN6_RC_CTL_RC6_ENABLE)); } -static bool bxt_check_bios_rc6_setup(const struct drm_device *dev) +static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; bool enable_rc6 = true; unsigned long rc6_ctx_base; @@ -4695,16 +5006,16 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev) return enable_rc6; } -int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) +int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6) { /* No RC6 before Ironlake and code is gone for ilk. */ - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_INFO(dev_priv)->gen < 6) return 0; if (!enable_rc6) return 0; - if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) { + if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) { DRM_INFO("RC6 disabled by BIOS\n"); return 0; } @@ -4713,7 +5024,7 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) if (enable_rc6 >= 0) { int mask; - if (HAS_RC6p(dev)) + if (HAS_RC6p(dev_priv)) mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE | INTEL_RC6pp_ENABLE; else @@ -4726,20 +5037,14 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) return enable_rc6 & mask; } - if (IS_IVYBRIDGE(dev)) + if (IS_IVYBRIDGE(dev_priv)) return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); return INTEL_RC6_ENABLE; } -int intel_enable_rc6(const struct drm_device *dev) +static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv) { - return i915.enable_rc6; -} - -static void gen6_init_rps_frequencies(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; uint32_t rp_state_cap; u32 ddcc_status = 0; int ret; @@ -4747,7 +5052,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) /* All of these values are in units of 50MHz */ dev_priv->rps.cur_freq = 0; /* static values from HW: RP0 > RP1 > RPn (min_freq) */ - if (IS_BROXTON(dev)) { + if (IS_BROXTON(dev_priv)) { rp_state_cap = I915_READ(BXT_RP_STATE_CAP); dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; @@ -4763,8 +5068,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; - if (IS_HASWELL(dev) || IS_BROADWELL(dev) || - IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) || + IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { ret = sandybridge_pcode_read(dev_priv, HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, &ddcc_status); @@ -4776,7 +5081,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) dev_priv->rps.max_freq); } - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { /* Store the frequency values in 16.66 MHZ units, which is the natural hardware unit for SKL */ dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; @@ -4793,7 +5098,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; if (dev_priv->rps.min_freq_softlimit == 0) { - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) dev_priv->rps.min_freq_softlimit = max_t(int, dev_priv->rps.efficient_freq, intel_freq_opcode(dev_priv, 450)); @@ -4804,16 +5109,14 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) } /* See the Gen9_GT_PM_Programming_Guide doc for the below */ -static void gen9_enable_rps(struct drm_device *dev) +static void gen9_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - gen6_init_rps_frequencies(dev); + gen6_init_rps_frequencies(dev_priv); /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { /* * BIOS could leave the Hw Turbo enabled, so need to explicitly * clear out the Control register just to avoid inconsitency @@ -4823,7 +5126,7 @@ static void gen9_enable_rps(struct drm_device *dev) * if the Turbo is left enabled in the Control register, as the * Up/Down interrupts would remain masked. */ - gen9_disable_rps(dev); + gen9_disable_rps(dev_priv); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); return; } @@ -4842,14 +5145,13 @@ static void gen9_enable_rps(struct drm_device *dev) * Up/Down EI & threshold registers, as well as the RP_CONTROL, * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void gen9_enable_rc6(struct drm_device *dev) +static void gen9_enable_rc6(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; uint32_t rc6_mask = 0; @@ -4866,7 +5168,7 @@ static void gen9_enable_rc6(struct drm_device *dev) /* 2b: Program RC6 thresholds.*/ /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ - if (IS_SKYLAKE(dev)) + if (IS_SKYLAKE(dev_priv)) I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); else I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); @@ -4875,7 +5177,7 @@ static void gen9_enable_rc6(struct drm_device *dev) for_each_engine(engine, dev_priv) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - if (HAS_GUC_UCODE(dev)) + if (HAS_GUC(dev_priv)) I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); I915_WRITE(GEN6_RC_SLEEP, 0); @@ -4885,12 +5187,12 @@ static void gen9_enable_rc6(struct drm_device *dev) I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25); /* 3a: Enable RC6 */ - if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) + if (intel_enable_rc6() & INTEL_RC6_ENABLE) rc6_mask = GEN6_RC_CTL_RC6_ENABLE; DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE)); /* WaRsUseTimeoutMode */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | GEN7_RC_CTL_TO_MODE | @@ -4906,19 +5208,17 @@ static void gen9_enable_rc6(struct drm_device *dev) * 3b: Enable Coarse Power Gating only when RC6 is enabled. * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. */ - if (NEEDS_WaRsDisableCoarsePowerGating(dev)) + if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) I915_WRITE(GEN9_PG_ENABLE, 0); else I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - } -static void gen8_enable_rps(struct drm_device *dev) +static void gen8_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; uint32_t rc6_mask = 0; @@ -4933,7 +5233,7 @@ static void gen8_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_CONTROL, 0); /* Initialize rps frequencies */ - gen6_init_rps_frequencies(dev); + gen6_init_rps_frequencies(dev_priv); /* 2b: Program RC6 thresholds.*/ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); @@ -4942,16 +5242,16 @@ static void gen8_enable_rps(struct drm_device *dev) for_each_engine(engine, dev_priv) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(GEN6_RC_SLEEP, 0); - if (IS_BROADWELL(dev)) + if (IS_BROADWELL(dev_priv)) I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ else I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ /* 3: Enable RC6 */ - if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) + if (intel_enable_rc6() & INTEL_RC6_ENABLE) rc6_mask = GEN6_RC_CTL_RC6_ENABLE; - intel_print_rc6_info(dev, rc6_mask); - if (IS_BROADWELL(dev)) + intel_print_rc6_info(dev_priv, rc6_mask); + if (IS_BROADWELL(dev_priv)) I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | GEN7_RC_CTL_TO_MODE | rc6_mask); @@ -4992,14 +5292,13 @@ static void gen8_enable_rps(struct drm_device *dev) /* 6: Ring frequency + overclocking (our driver does this later */ dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void gen6_enable_rps(struct drm_device *dev) +static void gen6_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; u32 gtfifodbg; @@ -5026,7 +5325,7 @@ static void gen6_enable_rps(struct drm_device *dev) intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); /* Initialize rps frequencies */ - gen6_init_rps_frequencies(dev); + gen6_init_rps_frequencies(dev_priv); /* disable the counters and set deterministic thresholds */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -5042,7 +5341,7 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); - if (IS_IVYBRIDGE(dev)) + if (IS_IVYBRIDGE(dev_priv)) I915_WRITE(GEN6_RC6_THRESHOLD, 125000); else I915_WRITE(GEN6_RC6_THRESHOLD, 50000); @@ -5050,12 +5349,12 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ /* Check if we are enabling RC6 */ - rc6_mode = intel_enable_rc6(dev_priv->dev); + rc6_mode = intel_enable_rc6(); if (rc6_mode & INTEL_RC6_ENABLE) rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; /* We don't use those on Haswell */ - if (!IS_HASWELL(dev)) { + if (!IS_HASWELL(dev_priv)) { if (rc6_mode & INTEL_RC6p_ENABLE) rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; @@ -5063,7 +5362,7 @@ static void gen6_enable_rps(struct drm_device *dev) rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; } - intel_print_rc6_info(dev, rc6_mask); + intel_print_rc6_info(dev_priv, rc6_mask); I915_WRITE(GEN6_RC_CONTROL, rc6_mask | @@ -5087,13 +5386,13 @@ static void gen6_enable_rps(struct drm_device *dev) } dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); rc6vids = 0; ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); - if (IS_GEN6(dev) && ret) { + if (IS_GEN6(dev_priv) && ret) { DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); - } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { + } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); rc6vids &= 0xffff00; @@ -5106,9 +5405,8 @@ static void gen6_enable_rps(struct drm_device *dev) intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void __gen6_update_ring_freq(struct drm_device *dev) +static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; int min_freq = 15; unsigned int gpu_freq; unsigned int max_ia_freq, min_ring_freq; @@ -5137,7 +5435,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev) /* convert DDR frequency from units of 266.6MHz to bandwidth */ min_ring_freq = mult_frac(min_ring_freq, 8, 3); - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { /* Convert GT frequency to 50 HZ units */ min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; @@ -5155,16 +5453,16 @@ static void __gen6_update_ring_freq(struct drm_device *dev) int diff = max_gpu_freq - gpu_freq; unsigned int ia_freq = 0, ring_freq = 0; - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { /* * ring_freq = 2 * GT. ring_freq is in 100MHz units * No floor required for ring frequency on SKL. */ ring_freq = gpu_freq; - } else if (INTEL_INFO(dev)->gen >= 8) { + } else if (INTEL_INFO(dev_priv)->gen >= 8) { /* max(2 * GT, DDR). NB: GT is 50MHz units */ ring_freq = max(min_ring_freq, gpu_freq); - } else if (IS_HASWELL(dev)) { + } else if (IS_HASWELL(dev_priv)) { ring_freq = mult_frac(gpu_freq, 5, 4); ring_freq = max(min_ring_freq, ring_freq); /* leave ia_freq as the default, chosen by cpufreq */ @@ -5191,26 +5489,23 @@ static void __gen6_update_ring_freq(struct drm_device *dev) } } -void gen6_update_ring_freq(struct drm_device *dev) +void gen6_update_ring_freq(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!HAS_CORE_RING_FREQ(dev)) + if (!HAS_CORE_RING_FREQ(dev_priv)) return; mutex_lock(&dev_priv->rps.hw_lock); - __gen6_update_ring_freq(dev); + __gen6_update_ring_freq(dev_priv); mutex_unlock(&dev_priv->rps.hw_lock); } static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; u32 val, rp0; val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); - switch (INTEL_INFO(dev)->eu_total) { + switch (INTEL_INFO(dev_priv)->eu_total) { case 8: /* (2 * 4) config */ rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); @@ -5321,9 +5616,8 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv) WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); } -static void cherryview_setup_pctx(struct drm_device *dev) +static void cherryview_setup_pctx(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; unsigned long pctx_paddr, paddr; u32 pcbr; @@ -5342,15 +5636,14 @@ static void cherryview_setup_pctx(struct drm_device *dev) DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); } -static void valleyview_setup_pctx(struct drm_device *dev) +static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *pctx; unsigned long pctx_paddr; u32 pcbr; int pctx_size = 24*1024; - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev_priv->dev->struct_mutex); pcbr = I915_READ(VLV_PCBR); if (pcbr) { @@ -5375,7 +5668,7 @@ static void valleyview_setup_pctx(struct drm_device *dev) * overlap with other ranges, such as the frame buffer, protected * memory, or any other relevant ranges. */ - pctx = i915_gem_object_create_stolen(dev, pctx_size); + pctx = i915_gem_object_create_stolen(dev_priv->dev, pctx_size); if (!pctx) { DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); goto out; @@ -5387,13 +5680,11 @@ static void valleyview_setup_pctx(struct drm_device *dev) out: DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); dev_priv->vlv_pctx = pctx; - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->dev->struct_mutex); } -static void valleyview_cleanup_pctx(struct drm_device *dev) +static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (WARN_ON(!dev_priv->vlv_pctx)) return; @@ -5412,12 +5703,11 @@ static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv) dev_priv->rps.gpll_ref_freq); } -static void valleyview_init_gt_powersave(struct drm_device *dev) +static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 val; - valleyview_setup_pctx(dev); + valleyview_setup_pctx(dev_priv); vlv_init_gpll_ref_freq(dev_priv); @@ -5471,12 +5761,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev) mutex_unlock(&dev_priv->rps.hw_lock); } -static void cherryview_init_gt_powersave(struct drm_device *dev) +static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 val; - cherryview_setup_pctx(dev); + cherryview_setup_pctx(dev_priv); vlv_init_gpll_ref_freq(dev_priv); @@ -5536,14 +5825,13 @@ static void cherryview_init_gt_powersave(struct drm_device *dev) mutex_unlock(&dev_priv->rps.hw_lock); } -static void valleyview_cleanup_gt_powersave(struct drm_device *dev) +static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv) { - valleyview_cleanup_pctx(dev); + valleyview_cleanup_pctx(dev_priv); } -static void cherryview_enable_rps(struct drm_device *dev) +static void cherryview_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; u32 gtfifodbg, val, rc6_mode = 0, pcbr; @@ -5588,8 +5876,8 @@ static void cherryview_enable_rps(struct drm_device *dev) pcbr = I915_READ(VLV_PCBR); /* 3: Enable RC6 */ - if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && - (pcbr >> VLV_PCBR_ADDR_SHIFT)) + if ((intel_enable_rc6() & INTEL_RC6_ENABLE) && + (pcbr >> VLV_PCBR_ADDR_SHIFT)) rc6_mode = GEN7_RC_CTL_TO_MODE; I915_WRITE(GEN6_RC_CONTROL, rc6_mode); @@ -5634,14 +5922,13 @@ static void cherryview_enable_rps(struct drm_device *dev) intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), dev_priv->rps.idle_freq); - valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void valleyview_enable_rps(struct drm_device *dev) +static void valleyview_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; u32 gtfifodbg, val, rc6_mode = 0; @@ -5694,10 +5981,10 @@ static void valleyview_enable_rps(struct drm_device *dev) VLV_MEDIA_RC6_COUNT_EN | VLV_RENDER_RC6_COUNT_EN)); - if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) + if (intel_enable_rc6() & INTEL_RC6_ENABLE) rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; - intel_print_rc6_info(dev, rc6_mode); + intel_print_rc6_info(dev_priv, rc6_mode); I915_WRITE(GEN6_RC_CONTROL, rc6_mode); @@ -5724,7 +6011,7 @@ static void valleyview_enable_rps(struct drm_device *dev) intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), dev_priv->rps.idle_freq); - valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } @@ -5814,10 +6101,9 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; unsigned long val; - if (INTEL_INFO(dev)->gen != 5) + if (INTEL_INFO(dev_priv)->gen != 5) return 0; spin_lock_irq(&mchdev_lock); @@ -5857,11 +6143,10 @@ static int _pxvid_to_vd(u8 pxvid) static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) { - struct drm_device *dev = dev_priv->dev; const int vd = _pxvid_to_vd(pxvid); const int vm = vd - 1125; - if (INTEL_INFO(dev)->is_mobile) + if (INTEL_INFO(dev_priv)->is_mobile) return vm > 0 ? vm : 0; return vd; @@ -5902,9 +6187,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) void i915_update_gfx_val(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; - - if (INTEL_INFO(dev)->gen != 5) + if (INTEL_INFO(dev_priv)->gen != 5) return; spin_lock_irq(&mchdev_lock); @@ -5953,10 +6236,9 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; unsigned long val; - if (INTEL_INFO(dev)->gen != 5) + if (INTEL_INFO(dev_priv)->gen != 5) return 0; spin_lock_irq(&mchdev_lock); @@ -6097,7 +6379,7 @@ bool i915_gpu_turbo_disable(void) dev_priv->ips.max_delay = dev_priv->ips.fstart; - if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart)) + if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart)) ret = false; out_unlock: @@ -6145,9 +6427,8 @@ void intel_gpu_ips_teardown(void) spin_unlock_irq(&mchdev_lock); } -static void intel_init_emon(struct drm_device *dev) +static void intel_init_emon(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 lcfuse; u8 pxw[16]; int i; @@ -6216,10 +6497,8 @@ static void intel_init_emon(struct drm_device *dev) dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); } -void intel_init_gt_powersave(struct drm_device *dev) +void intel_init_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* * RPM depends on RC6 to save restore the GT HW context, so make RC6 a * requirement. @@ -6229,74 +6508,66 @@ void intel_init_gt_powersave(struct drm_device *dev) intel_runtime_pm_get(dev_priv); } - if (IS_CHERRYVIEW(dev)) - cherryview_init_gt_powersave(dev); - else if (IS_VALLEYVIEW(dev)) - valleyview_init_gt_powersave(dev); + if (IS_CHERRYVIEW(dev_priv)) + cherryview_init_gt_powersave(dev_priv); + else if (IS_VALLEYVIEW(dev_priv)) + valleyview_init_gt_powersave(dev_priv); } -void intel_cleanup_gt_powersave(struct drm_device *dev) +void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_CHERRYVIEW(dev)) + if (IS_CHERRYVIEW(dev_priv)) return; - else if (IS_VALLEYVIEW(dev)) - valleyview_cleanup_gt_powersave(dev); + else if (IS_VALLEYVIEW(dev_priv)) + valleyview_cleanup_gt_powersave(dev_priv); if (!i915.enable_rc6) intel_runtime_pm_put(dev_priv); } -static void gen6_suspend_rps(struct drm_device *dev) +static void gen6_suspend_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - flush_delayed_work(&dev_priv->rps.delayed_resume_work); - gen6_disable_rps_interrupts(dev); + gen6_disable_rps_interrupts(dev_priv); } /** * intel_suspend_gt_powersave - suspend PM work and helper threads - * @dev: drm device + * @dev_priv: i915 device * * We don't want to disable RC6 or other features here, we just want * to make sure any work we've queued has finished and won't bother * us while we're suspended. */ -void intel_suspend_gt_powersave(struct drm_device *dev) +void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_GEN(dev_priv) < 6) return; - gen6_suspend_rps(dev); + gen6_suspend_rps(dev_priv); /* Force GPU to min freq during suspend */ gen6_rps_idle(dev_priv); } -void intel_disable_gt_powersave(struct drm_device *dev) +void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_IRONLAKE_M(dev)) { - ironlake_disable_drps(dev); - } else if (INTEL_INFO(dev)->gen >= 6) { - intel_suspend_gt_powersave(dev); + if (IS_IRONLAKE_M(dev_priv)) { + ironlake_disable_drps(dev_priv); + } else if (INTEL_INFO(dev_priv)->gen >= 6) { + intel_suspend_gt_powersave(dev_priv); mutex_lock(&dev_priv->rps.hw_lock); - if (INTEL_INFO(dev)->gen >= 9) { - gen9_disable_rc6(dev); - gen9_disable_rps(dev); - } else if (IS_CHERRYVIEW(dev)) - cherryview_disable_rps(dev); - else if (IS_VALLEYVIEW(dev)) - valleyview_disable_rps(dev); + if (INTEL_INFO(dev_priv)->gen >= 9) { + gen9_disable_rc6(dev_priv); + gen9_disable_rps(dev_priv); + } else if (IS_CHERRYVIEW(dev_priv)) + cherryview_disable_rps(dev_priv); + else if (IS_VALLEYVIEW(dev_priv)) + valleyview_disable_rps(dev_priv); else - gen6_disable_rps(dev); + gen6_disable_rps(dev_priv); dev_priv->rps.enabled = false; mutex_unlock(&dev_priv->rps.hw_lock); @@ -6308,27 +6579,26 @@ static void intel_gen6_powersave_work(struct work_struct *work) struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, rps.delayed_resume_work.work); - struct drm_device *dev = dev_priv->dev; mutex_lock(&dev_priv->rps.hw_lock); - gen6_reset_rps_interrupts(dev); + gen6_reset_rps_interrupts(dev_priv); - if (IS_CHERRYVIEW(dev)) { - cherryview_enable_rps(dev); - } else if (IS_VALLEYVIEW(dev)) { - valleyview_enable_rps(dev); - } else if (INTEL_INFO(dev)->gen >= 9) { - gen9_enable_rc6(dev); - gen9_enable_rps(dev); - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) - __gen6_update_ring_freq(dev); - } else if (IS_BROADWELL(dev)) { - gen8_enable_rps(dev); - __gen6_update_ring_freq(dev); + if (IS_CHERRYVIEW(dev_priv)) { + cherryview_enable_rps(dev_priv); + } else if (IS_VALLEYVIEW(dev_priv)) { + valleyview_enable_rps(dev_priv); + } else if (INTEL_INFO(dev_priv)->gen >= 9) { + gen9_enable_rc6(dev_priv); + gen9_enable_rps(dev_priv); + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + __gen6_update_ring_freq(dev_priv); + } else if (IS_BROADWELL(dev_priv)) { + gen8_enable_rps(dev_priv); + __gen6_update_ring_freq(dev_priv); } else { - gen6_enable_rps(dev); - __gen6_update_ring_freq(dev); + gen6_enable_rps(dev_priv); + __gen6_update_ring_freq(dev_priv); } WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq); @@ -6339,27 +6609,25 @@ static void intel_gen6_powersave_work(struct work_struct *work) dev_priv->rps.enabled = true; - gen6_enable_rps_interrupts(dev); + gen6_enable_rps_interrupts(dev_priv); mutex_unlock(&dev_priv->rps.hw_lock); intel_runtime_pm_put(dev_priv); } -void intel_enable_gt_powersave(struct drm_device *dev) +void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* Powersaving is controlled by the host when inside a VM */ - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) return; - if (IS_IRONLAKE_M(dev)) { - ironlake_enable_drps(dev); - mutex_lock(&dev->struct_mutex); - intel_init_emon(dev); - mutex_unlock(&dev->struct_mutex); - } else if (INTEL_INFO(dev)->gen >= 6) { + if (IS_IRONLAKE_M(dev_priv)) { + ironlake_enable_drps(dev_priv); + mutex_lock(&dev_priv->dev->struct_mutex); + intel_init_emon(dev_priv); + mutex_unlock(&dev_priv->dev->struct_mutex); + } else if (INTEL_INFO(dev_priv)->gen >= 6) { /* * PCU communication is slow and this doesn't need to be * done at any specific time, so do this out of our fast path @@ -6378,14 +6646,12 @@ void intel_enable_gt_powersave(struct drm_device *dev) } } -void intel_reset_gt_powersave(struct drm_device *dev) +void intel_reset_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_INFO(dev_priv)->gen < 6) return; - gen6_suspend_rps(dev); + gen6_suspend_rps(dev_priv); dev_priv->rps.enabled = false; } @@ -6698,11 +6964,69 @@ static void lpt_suspend_hw(struct drm_device *dev) } } +static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, + int general_prio_credits, + int high_prio_credits) +{ + u32 misccpctl; + + /* WaTempDisableDOPClkGating:bdw */ + misccpctl = I915_READ(GEN7_MISCCPCTL); + I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); + + I915_WRITE(GEN8_L3SQCREG1, + L3_GENERAL_PRIO_CREDITS(general_prio_credits) | + L3_HIGH_PRIO_CREDITS(high_prio_credits)); + + /* + * Wait at least 100 clocks before re-enabling clock gating. + * See the definition of L3SQCREG1 in BSpec. + */ + POSTING_READ(GEN8_L3SQCREG1); + udelay(1); + I915_WRITE(GEN7_MISCCPCTL, misccpctl); +} + +static void kabylake_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + gen9_init_clock_gating(dev); + + /* WaDisableSDEUnitClockGating:kbl */ + if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) + I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | + GEN8_SDEUNIT_CLOCK_GATE_DISABLE); + + /* WaDisableGamClockGating:kbl */ + if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) + I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | + GEN6_GAMUNIT_CLOCK_GATE_DISABLE); + + /* WaFbcNukeOnHostModify:kbl */ + I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | + ILK_DPFC_NUKE_ON_ANY_MODIFICATION); +} + +static void skylake_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + gen9_init_clock_gating(dev); + + /* WAC6entrylatency:skl */ + I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) | + FBC_LLC_FULLY_OPEN); + + /* WaFbcNukeOnHostModify:skl */ + I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | + ILK_DPFC_NUKE_ON_ANY_MODIFICATION); +} + static void broadwell_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; enum pipe pipe; - uint32_t misccpctl; ilk_init_lp_watermarks(dev); @@ -6733,20 +7057,8 @@ static void broadwell_init_clock_gating(struct drm_device *dev) I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); - /* - * WaProgramL3SqcReg1Default:bdw - * WaTempDisableDOPClkGating:bdw - */ - misccpctl = I915_READ(GEN7_MISCCPCTL); - I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); - I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT); - /* - * Wait at least 100 clocks before re-enabling clock gating. See - * the definition of L3SQCREG1 in BSpec. - */ - POSTING_READ(GEN8_L3SQCREG1); - udelay(1); - I915_WRITE(GEN7_MISCCPCTL, misccpctl); + /* WaProgramL3SqcReg1Default:bdw */ + gen8_set_l3sqc_credits(dev_priv, 30, 2); /* * WaGttCachingOffByDefault:bdw @@ -6755,6 +7067,10 @@ static void broadwell_init_clock_gating(struct drm_device *dev) */ I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL); + /* WaKVMNotificationOnConfigChange:bdw */ + I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1) + | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT); + lpt_init_clock_gating(dev); } @@ -7017,6 +7333,13 @@ static void cherryview_init_clock_gating(struct drm_device *dev) GEN8_SDEUNIT_CLOCK_GATE_DISABLE); /* + * WaProgramL3SqcReg1Default:chv + * See gfxspecs/Related Documents/Performance Guide/ + * LSQC Setting Recommendations. + */ + gen8_set_l3sqc_credits(dev_priv, 38, 2); + + /* * GTT cache may not work with big pages, so if those * are ever enabled GTT cache may need to be disabled. */ @@ -7163,9 +7486,9 @@ static void nop_init_clock_gating(struct drm_device *dev) void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) { if (IS_SKYLAKE(dev_priv)) - dev_priv->display.init_clock_gating = nop_init_clock_gating; + dev_priv->display.init_clock_gating = skylake_init_clock_gating; else if (IS_KABYLAKE(dev_priv)) - dev_priv->display.init_clock_gating = nop_init_clock_gating; + dev_priv->display.init_clock_gating = kabylake_init_clock_gating; else if (IS_BROXTON(dev_priv)) dev_priv->display.init_clock_gating = bxt_init_clock_gating; else if (IS_BROADWELL(dev_priv)) @@ -7217,6 +7540,7 @@ void intel_init_pm(struct drm_device *dev) if (INTEL_INFO(dev)->gen >= 9) { skl_setup_wm_latency(dev); dev_priv->display.update_wm = skl_update_wm; + dev_priv->display.compute_global_watermarks = skl_compute_wm; } else if (HAS_PCH_SPLIT(dev)) { ilk_setup_wm_latency(dev); @@ -7390,19 +7714,17 @@ static void __intel_rps_boost_work(struct work_struct *work) struct drm_i915_gem_request *req = boost->req; if (!i915_gem_request_completed(req, true)) - gen6_rps_boost(to_i915(req->engine->dev), NULL, - req->emitted_jiffies); + gen6_rps_boost(req->i915, NULL, req->emitted_jiffies); - i915_gem_request_unreference__unlocked(req); + i915_gem_request_unreference(req); kfree(boost); } -void intel_queue_rps_boost_for_request(struct drm_device *dev, - struct drm_i915_gem_request *req) +void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req) { struct request_boost *boost; - if (req == NULL || INTEL_INFO(dev)->gen < 6) + if (req == NULL || INTEL_GEN(req->i915) < 6) return; if (i915_gem_request_completed(req, true)) @@ -7416,7 +7738,7 @@ void intel_queue_rps_boost_for_request(struct drm_device *dev, boost->req = req; INIT_WORK(&boost->work, __intel_rps_boost_work); - queue_work(to_i915(dev)->wq, &boost->work); + queue_work(req->i915->wq, &boost->work); } void intel_pm_setup(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index a788d1e9589b..29a09bf6bd18 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -176,7 +176,6 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t aux_clock_divider; i915_reg_t aux_ctl_reg; - int precharge = 0x3; static const uint8_t aux_msg[] = { [0] = DP_AUX_NATIVE_WRITE << 4, [1] = DP_SET_POWER >> 8, @@ -185,6 +184,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) [4] = DP_SET_POWER_D0, }; enum port port = dig_port->port; + u32 aux_ctl; int i; BUILD_BUG_ON(sizeof(aux_msg) > 20); @@ -197,6 +197,13 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF, DP_AUX_FRAME_SYNC_ENABLE); + if (dev_priv->psr.link_standby) + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, + DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); + else + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, + DP_PSR_ENABLE); + aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port); /* Setup AUX registers */ @@ -204,33 +211,9 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2), intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); - if (INTEL_INFO(dev)->gen >= 9) { - uint32_t val; - - val = I915_READ(aux_ctl_reg); - val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK; - val |= DP_AUX_CH_CTL_TIME_OUT_1600us; - val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK; - val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); - /* Use hardcoded data values for PSR, frame sync and GTC */ - val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL; - val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL; - val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL; - I915_WRITE(aux_ctl_reg, val); - } else { - I915_WRITE(aux_ctl_reg, - DP_AUX_CH_CTL_TIME_OUT_400us | - (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | - (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | - (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); - } - - if (dev_priv->psr.link_standby) - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, - DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); - else - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, - DP_PSR_ENABLE); + aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg), + aux_clock_divider); + I915_WRITE(aux_ctl_reg, aux_ctl); } static void vlv_psr_enable_source(struct intel_dp *intel_dp) @@ -272,14 +255,14 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t max_sleep_time = 0x1f; - /* - * Let's respect VBT in case VBT asks a higher idle_frame value. - * Let's use 6 as the minimum to cover all known cases including - * the off-by-one issue that HW has in some cases. Also there are - * cases where sink should be able to train - * with the 5 or 6 idle patterns. + /* Lately it was identified that depending on panel idle frame count + * calculated at HW can be off by 1. So let's use what came + * from VBT + 1. + * There are also other cases where panel demands at least 4 + * but VBT is not being set. To cover these 2 cases lets use + * at least 5 when VBT isn't set to be on the safest side. */ - uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); + uint32_t idle_frames = dev_priv->vbt.psr.idle_frames + 1; uint32_t val = EDP_PSR_ENABLE; val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 04402bb9d26b..fedd27049814 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -34,6 +34,11 @@ #include "i915_trace.h" #include "intel_drv.h" +/* Rough estimate of the typical request size, performing a flush, + * set-context and then emitting the batch. + */ +#define LEGACY_REQUEST_SIZE 200 + int __intel_ring_space(int head, int tail, int size) { int space = head - tail; @@ -55,7 +60,7 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf) bool intel_engine_stopped(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine); } @@ -101,7 +106,6 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 flush_domains) { struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; u32 cmd; int ret; @@ -140,7 +144,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, cmd |= MI_EXE_FLUSH; if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && - (IS_G4X(dev) || IS_GEN5(dev))) + (IS_G4X(req->i915) || IS_GEN5(req->i915))) cmd |= MI_INVALIDATE_ISP; ret = intel_ring_begin(req, 2); @@ -426,19 +430,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, static void ring_write_tail(struct intel_engine_cs *engine, u32 value) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; I915_WRITE_TAIL(engine, value); } u64 intel_ring_get_active_head(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u64 acthd; - if (INTEL_INFO(engine->dev)->gen >= 8) + if (INTEL_GEN(dev_priv) >= 8) acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), RING_ACTHD_UDW(engine->mmio_base)); - else if (INTEL_INFO(engine->dev)->gen >= 4) + else if (INTEL_GEN(dev_priv) >= 4) acthd = I915_READ(RING_ACTHD(engine->mmio_base)); else acthd = I915_READ(ACTHD); @@ -448,25 +452,24 @@ u64 intel_ring_get_active_head(struct intel_engine_cs *engine) static void ring_setup_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u32 addr; addr = dev_priv->status_page_dmah->busaddr; - if (INTEL_INFO(engine->dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; I915_WRITE(HWS_PGA, addr); } static void intel_ring_setup_status_page(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; i915_reg_t mmio; /* The ring status page addresses are no longer next to the rest of * the ring registers as of gen7. */ - if (IS_GEN7(dev)) { + if (IS_GEN7(dev_priv)) { switch (engine->id) { case RCS: mmio = RENDER_HWS_PGA_GEN7; @@ -486,7 +489,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) mmio = VEBOX_HWS_PGA_GEN7; break; } - } else if (IS_GEN6(engine->dev)) { + } else if (IS_GEN6(dev_priv)) { mmio = RING_HWS_PGA_GEN6(engine->mmio_base); } else { /* XXX: gen8 returns to sanity */ @@ -503,7 +506,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) * arises: do we still need this and if so how should we go about * invalidating the TLB? */ - if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { + if (IS_GEN(dev_priv, 6, 7)) { i915_reg_t reg = RING_INSTPM(engine->mmio_base); /* ring should be idle before issuing a sync flush*/ @@ -521,9 +524,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) static bool stop_ring(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; - if (!IS_GEN2(engine->dev)) { + if (!IS_GEN2(dev_priv)) { I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { DRM_ERROR("%s : timed out trying to stop ring\n", @@ -541,7 +544,7 @@ static bool stop_ring(struct intel_engine_cs *engine) I915_WRITE_HEAD(engine, 0); engine->write_tail(engine, 0); - if (!IS_GEN2(engine->dev)) { + if (!IS_GEN2(dev_priv)) { (void)I915_READ_CTL(engine); I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); } @@ -556,8 +559,7 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine) static int init_ring_common(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct intel_ringbuffer *ringbuf = engine->buffer; struct drm_i915_gem_object *obj = ringbuf->obj; int ret = 0; @@ -587,7 +589,7 @@ static int init_ring_common(struct intel_engine_cs *engine) } } - if (I915_NEED_GFX_HWS(dev)) + if (I915_NEED_GFX_HWS(dev_priv)) intel_ring_setup_status_page(engine); else ring_setup_phys_status_page(engine); @@ -644,12 +646,10 @@ out: void intel_fini_pipe_control(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - if (engine->scratch.obj == NULL) return; - if (INTEL_INFO(dev)->gen >= 5) { + if (INTEL_GEN(engine->i915) >= 5) { kunmap(sg_page(engine->scratch.obj->pages->sgl)); i915_gem_object_ggtt_unpin(engine->scratch.obj); } @@ -665,10 +665,11 @@ intel_init_pipe_control(struct intel_engine_cs *engine) WARN_ON(engine->scratch.obj); - engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096); - if (engine->scratch.obj == NULL) { + engine->scratch.obj = i915_gem_object_create(engine->i915->dev, 4096); + if (IS_ERR(engine->scratch.obj)) { DRM_ERROR("Failed to allocate seqno page\n"); - ret = -ENOMEM; + ret = PTR_ERR(engine->scratch.obj); + engine->scratch.obj = NULL; goto err; } @@ -702,11 +703,9 @@ err: static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) { - int ret, i; struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_workarounds *w = &dev_priv->workarounds; + struct i915_workarounds *w = &req->i915->workarounds; + int ret, i; if (w->count == 0) return 0; @@ -795,7 +794,7 @@ static int wa_add(struct drm_i915_private *dev_priv, static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, i915_reg_t reg) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct i915_workarounds *wa = &dev_priv->workarounds; const uint32_t index = wa->hw_whitelist_count[engine->id]; @@ -811,8 +810,7 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, static int gen8_init_workarounds(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); @@ -863,9 +861,8 @@ static int gen8_init_workarounds(struct intel_engine_cs *engine) static int bdw_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen8_init_workarounds(engine); if (ret) @@ -885,16 +882,15 @@ static int bdw_init_workarounds(struct intel_engine_cs *engine) /* WaForceContextSaveRestoreNonCoherent:bdw */ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ - (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); + (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); return 0; } static int chv_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen8_init_workarounds(engine); if (ret) @@ -911,38 +907,39 @@ static int chv_init_workarounds(struct intel_engine_cs *engine) static int gen9_init_workarounds(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t tmp; + struct drm_i915_private *dev_priv = engine->i915; int ret; - /* WaEnableLbsSlaRetryTimerDecrement:skl */ + /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */ + I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE)); + + /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */ I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); - /* WaDisableKillLogic:bxt,skl */ + /* WaDisableKillLogic:bxt,skl,kbl */ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | ECOCHK_DIS_TLB); - /* WaClearFlowControlGpgpuContextSave:skl,bxt */ - /* WaDisablePartialInstShootdown:skl,bxt */ + /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */ + /* WaDisablePartialInstShootdown:skl,bxt,kbl */ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, FLOW_CONTROL_ENABLE | PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); - /* Syncing dependencies between camera and graphics:skl,bxt */ + /* Syncing dependencies between camera and graphics:skl,bxt,kbl */ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, GEN9_DG_MIRROR_FIX_ENABLE); /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, GEN9_RHWO_OPTIMIZATION_DISABLE); /* @@ -952,52 +949,78 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) */ } - /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */ - /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt */ + /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */ + /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */ WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, GEN9_ENABLE_YV12_BUGFIX | GEN9_ENABLE_GPGPU_PREEMPTION); - /* Wa4x4STCOptimizationDisable:skl,bxt */ - /* WaDisablePartialResolveInVc:skl,bxt */ + /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */ + /* WaDisablePartialResolveInVc:skl,bxt,kbl */ WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE | GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE)); - /* WaCcsTlbPrefetchDisable:skl,bxt */ + /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */ WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, GEN9_CCS_TLB_PREFETCH_ENABLE); /* WaDisableMaskBasedCammingInRCC:skl,bxt */ - if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, PIXEL_MASK_CAMMING_DISABLE); - /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ - tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; - if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) || - IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) - tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; - WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); + /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */ + WA_SET_BIT_MASKED(HDC_CHICKEN0, + HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | + HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE); + + /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are + * both tied to WaForceContextSaveRestoreNonCoherent + * in some hsds for skl. We keep the tie for all gen9. The + * documentation is a bit hazy and so we want to get common behaviour, + * even though there is no clear evidence we would need both on kbl/bxt. + * This area has been source of system hangs so we play it safe + * and mimic the skl regardless of what bspec says. + * + * Use Force Non-Coherent whenever executing a 3D context. This + * is a workaround for a possible hang in the unlikely event + * a TLB invalidation occurs during a PSD flush. + */ + + /* WaForceEnableNonCoherent:skl,bxt,kbl */ + WA_SET_BIT_MASKED(HDC_CHICKEN0, + HDC_FORCE_NON_COHERENT); - /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */ - if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0)) + /* WaDisableHDCInvalidation:skl,bxt,kbl */ + I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | + BDW_DISABLE_HDC_INVALIDATION); + + /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */ + if (IS_SKYLAKE(dev_priv) || + IS_KABYLAKE(dev_priv) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, GEN8_SAMPLER_POWER_BYPASS_DIS); - /* WaDisableSTUnitPowerOptimization:skl,bxt */ + /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); - /* WaOCLCoherentLineFlush:skl,bxt */ + /* WaOCLCoherentLineFlush:skl,bxt,kbl */ I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES)); - /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */ + /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */ + ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG); + if (ret) + return ret; + + /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */ ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); if (ret) return ret; - /* WaAllowUMDToModifyHDCChicken1:skl,bxt */ + /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */ ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1); if (ret) return ret; @@ -1007,8 +1030,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) static int skl_tune_iz_hashing(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u8 vals[3] = { 0, 0, 0 }; unsigned int i; @@ -1049,9 +1071,8 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine) static int skl_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen9_init_workarounds(engine); if (ret) @@ -1062,12 +1083,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) * until D0 which is the default case so this is equivalent to * !WaDisablePerCtxtPreemptionGranularityControl:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) { + if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) { I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); } - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) { /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ I915_WRITE(FF_SLICE_CS_CHICKEN2, _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); @@ -1076,50 +1097,37 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes * involving this register should also be added to WA batch as required. */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_E0)) + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) /* WaDisableLSQCROPERFforOCL:skl */ I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_RO_PERF_DIS); /* WaEnableGapsTsvCreditFix:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) { + if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) { I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE)); } /* WaDisablePowerCompilerClockGating:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0)) + if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0)) WA_SET_BIT_MASKED(HIZ_CHICKEN, BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); - /* This is tied to WaForceContextSaveRestoreNonCoherent */ - if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) { - /* - *Use Force Non-Coherent whenever executing a 3D context. This - * is a workaround for a possible hang in the unlikely event - * a TLB invalidation occurs during a PSD flush. - */ - /* WaForceEnableNonCoherent:skl */ - WA_SET_BIT_MASKED(HDC_CHICKEN0, - HDC_FORCE_NON_COHERENT); - - /* WaDisableHDCInvalidation:skl */ - I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | - BDW_DISABLE_HDC_INVALIDATION); - } - /* WaBarrierPerformanceFixDisable:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0)) + if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0)) WA_SET_BIT_MASKED(HDC_CHICKEN0, HDC_FENCE_DEST_SLM_DISABLE | HDC_BARRIER_PERFORMANCE_DISABLE); /* WaDisableSbeCacheDispatchPortSharing:skl */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0)) WA_SET_BIT_MASKED( GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); + /* WaDisableGafsUnitClkGating:skl */ + WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + /* WaDisableLSQCROPERFforOCL:skl */ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); if (ret) @@ -1130,9 +1138,8 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) static int bxt_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen9_init_workarounds(engine); if (ret) @@ -1140,11 +1147,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) /* WaStoreMultiplePTEenable:bxt */ /* This is a requirement according to Hardware specification */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); /* WaSetClckGatingDisableMedia:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); } @@ -1153,8 +1160,14 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); + /* WaDisablePooledEuLoadBalancingFix:bxt */ + if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) { + WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2, + GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); + } + /* WaDisableSbeCacheDispatchPortSharing:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) { WA_SET_BIT_MASKED( GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); @@ -1164,7 +1177,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */ /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ /* WaDisableLSQCROPERFforOCL:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1); if (ret) return ret; @@ -1174,44 +1187,107 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) return ret; } + /* WaProgramL3SqcReg1DefaultForPerf:bxt */ + if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) + I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | + L3_HIGH_PRIO_CREDITS(2)); + + /* WaInsertDummyPushConstPs:bxt */ + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + + return 0; +} + +static int kbl_init_workarounds(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + int ret; + + ret = gen9_init_workarounds(engine); + if (ret) + return ret; + + /* WaEnableGapsTsvCreditFix:kbl */ + I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | + GEN9_GAPS_TSV_CREDIT_DISABLE)); + + /* WaDisableDynamicCreditSharing:kbl */ + if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) + WA_SET_BIT(GAMT_CHKN_BIT_REG, + GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); + + /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */ + if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0)) + WA_SET_BIT_MASKED(HDC_CHICKEN0, + HDC_FENCE_DEST_SLM_DISABLE); + + /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes + * involving this register should also be added to WA batch as required. + */ + if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) + /* WaDisableLSQCROPERFforOCL:kbl */ + I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | + GEN8_LQSC_RO_PERF_DIS); + + /* WaInsertDummyPushConstPs:kbl */ + if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + + /* WaDisableGafsUnitClkGating:kbl */ + WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + + /* WaDisableSbeCacheDispatchPortSharing:kbl */ + WA_SET_BIT_MASKED( + GEN7_HALF_SLICE_CHICKEN1, + GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); + + /* WaDisableLSQCROPERFforOCL:kbl */ + ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); + if (ret) + return ret; + return 0; } int init_workarounds_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; WARN_ON(engine->id != RCS); dev_priv->workarounds.count = 0; dev_priv->workarounds.hw_whitelist_count[RCS] = 0; - if (IS_BROADWELL(dev)) + if (IS_BROADWELL(dev_priv)) return bdw_init_workarounds(engine); - if (IS_CHERRYVIEW(dev)) + if (IS_CHERRYVIEW(dev_priv)) return chv_init_workarounds(engine); - if (IS_SKYLAKE(dev)) + if (IS_SKYLAKE(dev_priv)) return skl_init_workarounds(engine); - if (IS_BROXTON(dev)) + if (IS_BROXTON(dev_priv)) return bxt_init_workarounds(engine); + if (IS_KABYLAKE(dev_priv)) + return kbl_init_workarounds(engine); + return 0; } static int init_render_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; int ret = init_ring_common(engine); if (ret) return ret; /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ - if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) + if (IS_GEN(dev_priv, 4, 6)) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); /* We need to disable the AsyncFlip performance optimisations in order @@ -1220,22 +1296,22 @@ static int init_render_ring(struct intel_engine_cs *engine) * * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv */ - if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) + if (IS_GEN(dev_priv, 6, 7)) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); /* Required for the hardware to program scanline values for waiting */ /* WaEnableFlushTlbInvalidationMode:snb */ - if (INTEL_INFO(dev)->gen == 6) + if (IS_GEN6(dev_priv)) I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ - if (IS_GEN7(dev)) + if (IS_GEN7(dev_priv)) I915_WRITE(GFX_MODE_GEN7, _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); - if (IS_GEN6(dev)) { + if (IS_GEN6(dev_priv)) { /* From the Sandybridge PRM, volume 1 part 3, page 24: * "If this bit is set, STCunit will have LRA as replacement * policy. [...] This bit must be reset. LRA replacement @@ -1245,19 +1321,18 @@ static int init_render_ring(struct intel_engine_cs *engine) _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); } - if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) + if (IS_GEN(dev_priv, 6, 7)) I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); - if (HAS_L3_DPF(dev)) - I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); + if (HAS_L3_DPF(dev_priv)) + I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv)); return init_workarounds_ring(engine); } static void render_ring_cleanup(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; if (dev_priv->semaphore_obj) { i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); @@ -1273,13 +1348,12 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, { #define MBOX_UPDATE_DWORDS 8 struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_device *dev = signaller->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *waiter; enum intel_engine_id id; int ret, num_rings; - num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; #undef MBOX_UPDATE_DWORDS @@ -1297,7 +1371,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_FLUSH_ENABLE); + PIPE_CONTROL_CS_STALL); intel_ring_emit(signaller, lower_32_bits(gtt_offset)); intel_ring_emit(signaller, upper_32_bits(gtt_offset)); intel_ring_emit(signaller, seqno); @@ -1315,13 +1389,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, { #define MBOX_UPDATE_DWORDS 6 struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_device *dev = signaller->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *waiter; enum intel_engine_id id; int ret, num_rings; - num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; #undef MBOX_UPDATE_DWORDS @@ -1354,14 +1427,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req, unsigned int num_dwords) { struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_device *dev = signaller->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *useless; enum intel_engine_id id; int ret, num_rings; #define MBOX_UPDATE_DWORDS 3 - num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); #undef MBOX_UPDATE_DWORDS @@ -1420,10 +1492,38 @@ gen6_add_request(struct drm_i915_gem_request *req) return 0; } -static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, +static int +gen8_render_add_request(struct drm_i915_gem_request *req) +{ + struct intel_engine_cs *engine = req->engine; + int ret; + + if (engine->semaphore.signal) + ret = engine->semaphore.signal(req, 8); + else + ret = intel_ring_begin(req, 8); + if (ret) + return ret; + + intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_QW_WRITE)); + intel_ring_emit(engine, intel_hws_seqno_address(req->engine)); + intel_ring_emit(engine, 0); + intel_ring_emit(engine, i915_gem_request_get_seqno(req)); + /* We're thrashing one dword of HWS. */ + intel_ring_emit(engine, 0); + intel_ring_emit(engine, MI_USER_INTERRUPT); + intel_ring_emit(engine, MI_NOOP); + __intel_ring_advance(engine); + + return 0; +} + +static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv, u32 seqno) { - struct drm_i915_private *dev_priv = dev->dev_private; return dev_priv->last_seqno < seqno; } @@ -1441,7 +1541,8 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req, u32 seqno) { struct intel_engine_cs *waiter = waiter_req->engine; - struct drm_i915_private *dev_priv = waiter->dev->dev_private; + struct drm_i915_private *dev_priv = waiter_req->i915; + struct i915_hw_ppgtt *ppgtt; int ret; ret = intel_ring_begin(waiter_req, 4); @@ -1450,7 +1551,6 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req, intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT | - MI_SEMAPHORE_POLL | MI_SEMAPHORE_SAD_GTE_SDD); intel_ring_emit(waiter, seqno); intel_ring_emit(waiter, @@ -1458,6 +1558,15 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req, intel_ring_emit(waiter, upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); intel_ring_advance(waiter); + + /* When the !RCS engines idle waiting upon a semaphore, they lose their + * pagetables and we must reload them before executing the batch. + * We do this on the i915_switch_context() following the wait and + * before the dispatch. + */ + ppgtt = waiter_req->ctx->ppgtt; + if (ppgtt && waiter_req->engine->id != RCS) + ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine); return 0; } @@ -1486,7 +1595,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req, return ret; /* If seqno wrap happened, omit the wait with no-ops */ - if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { + if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) { intel_ring_emit(waiter, dw1 | wait_mbox); intel_ring_emit(waiter, seqno); intel_ring_emit(waiter, 0); @@ -1567,7 +1676,7 @@ pc_render_add_request(struct drm_i915_gem_request *req) static void gen6_seqno_barrier(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; /* Workaround to force correct ordering between irq and seqno writes on * ivb (and maybe also on snb) by reading from a CS register (like @@ -1616,8 +1725,7 @@ pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno) static bool gen5_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1634,8 +1742,7 @@ gen5_ring_get_irq(struct intel_engine_cs *engine) static void gen5_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1647,8 +1754,7 @@ gen5_ring_put_irq(struct intel_engine_cs *engine) static bool i9xx_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (!intel_irqs_enabled(dev_priv)) @@ -1668,8 +1774,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *engine) static void i9xx_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1684,8 +1789,7 @@ i9xx_ring_put_irq(struct intel_engine_cs *engine) static bool i8xx_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (!intel_irqs_enabled(dev_priv)) @@ -1705,8 +1809,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *engine) static void i8xx_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1759,8 +1862,7 @@ i9xx_add_request(struct drm_i915_gem_request *req) static bool gen6_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1768,10 +1870,10 @@ gen6_ring_get_irq(struct intel_engine_cs *engine) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (engine->irq_refcount++ == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) + if (HAS_L3_DPF(dev_priv) && engine->id == RCS) I915_WRITE_IMR(engine, ~(engine->irq_enable_mask | - GT_PARITY_ERROR(dev))); + GT_PARITY_ERROR(dev_priv))); else I915_WRITE_IMR(engine, ~engine->irq_enable_mask); gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); @@ -1784,14 +1886,13 @@ gen6_ring_get_irq(struct intel_engine_cs *engine) static void gen6_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--engine->irq_refcount == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) - I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); + if (HAS_L3_DPF(dev_priv) && engine->id == RCS) + I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv)); else I915_WRITE_IMR(engine, ~0); gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); @@ -1802,8 +1903,7 @@ gen6_ring_put_irq(struct intel_engine_cs *engine) static bool hsw_vebox_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1822,8 +1922,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *engine) static void hsw_vebox_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1837,8 +1936,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *engine) static bool gen8_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1846,7 +1944,7 @@ gen8_ring_get_irq(struct intel_engine_cs *engine) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (engine->irq_refcount++ == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) { + if (HAS_L3_DPF(dev_priv) && engine->id == RCS) { I915_WRITE_IMR(engine, ~(engine->irq_enable_mask | GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); @@ -1863,13 +1961,12 @@ gen8_ring_get_irq(struct intel_engine_cs *engine) static void gen8_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--engine->irq_refcount == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) { + if (HAS_L3_DPF(dev_priv) && engine->id == RCS) { I915_WRITE_IMR(engine, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); } else { @@ -1991,12 +2088,12 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req, static void cleanup_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; if (!dev_priv->status_page_dmah) return; - drm_pci_free(engine->dev, dev_priv->status_page_dmah); + drm_pci_free(dev_priv->dev, dev_priv->status_page_dmah); engine->status_page.page_addr = NULL; } @@ -2022,10 +2119,10 @@ static int init_status_page(struct intel_engine_cs *engine) unsigned flags; int ret; - obj = i915_gem_alloc_object(engine->dev, 4096); - if (obj == NULL) { + obj = i915_gem_object_create(engine->i915->dev, 4096); + if (IS_ERR(obj)) { DRM_ERROR("Failed to allocate status page\n"); - return -ENOMEM; + return PTR_ERR(obj); } ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); @@ -2033,7 +2130,7 @@ static int init_status_page(struct intel_engine_cs *engine) goto err_unref; flags = 0; - if (!HAS_LLC(engine->dev)) + if (!HAS_LLC(engine->i915)) /* On g33, we cannot place HWS above 256MiB, so * restrict its pinning to the low mappable arena. * Though this restriction is not documented for @@ -2067,11 +2164,11 @@ err_unref: static int init_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; if (!dev_priv->status_page_dmah) { dev_priv->status_page_dmah = - drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE); + drm_pci_alloc(dev_priv->dev, PAGE_SIZE, PAGE_SIZE); if (!dev_priv->status_page_dmah) return -ENOMEM; } @@ -2084,20 +2181,22 @@ static int init_phys_status_page(struct intel_engine_cs *engine) void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) { + GEM_BUG_ON(ringbuf->vma == NULL); + GEM_BUG_ON(ringbuf->virtual_start == NULL); + if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen) i915_gem_object_unpin_map(ringbuf->obj); else - iounmap(ringbuf->virtual_start); + i915_vma_unpin_iomap(ringbuf->vma); ringbuf->virtual_start = NULL; - ringbuf->vma = NULL; + i915_gem_object_ggtt_unpin(ringbuf->obj); + ringbuf->vma = NULL; } -int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, +int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv, struct intel_ringbuffer *ringbuf) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_object *obj = ringbuf->obj; /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ unsigned flags = PIN_OFFSET_BIAS | 4096; @@ -2131,10 +2230,9 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, /* Access through the GTT requires the device to be awake. */ assert_rpm_wakelock_held(dev_priv); - addr = ioremap_wc(ggtt->mappable_base + - i915_gem_obj_ggtt_offset(obj), ringbuf->size); - if (addr == NULL) { - ret = -ENOMEM; + addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj)); + if (IS_ERR(addr)) { + ret = PTR_ERR(addr); goto err_unpin; } } @@ -2163,9 +2261,9 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev, if (!HAS_LLC(dev)) obj = i915_gem_object_create_stolen(dev, ringbuf->size); if (obj == NULL) - obj = i915_gem_alloc_object(dev, ringbuf->size); - if (obj == NULL) - return -ENOMEM; + obj = i915_gem_object_create(dev, ringbuf->size); + if (IS_ERR(obj)) + return PTR_ERR(obj); /* mark ring buffers as read-only from GPU side by default */ obj->gt_ro = 1; @@ -2197,13 +2295,13 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) * of the buffer. */ ring->effective_size = size; - if (IS_I830(engine->dev) || IS_845G(engine->dev)) + if (IS_I830(engine->i915) || IS_845G(engine->i915)) ring->effective_size -= 2 * CACHELINE_BYTES; ring->last_retired_head = -1; intel_ring_update_space(ring); - ret = intel_alloc_ringbuffer_obj(engine->dev, ring); + ret = intel_alloc_ringbuffer_obj(engine->i915->dev, ring); if (ret) { DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n", engine->name, ret); @@ -2226,12 +2324,13 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring) static int intel_init_ring_buffer(struct drm_device *dev, struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_ringbuffer *ringbuf; int ret; WARN_ON(engine->buffer); - engine->dev = dev; + engine->i915 = dev_priv; INIT_LIST_HEAD(&engine->active_list); INIT_LIST_HEAD(&engine->request_list); INIT_LIST_HEAD(&engine->execlist_queue); @@ -2249,7 +2348,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, } engine->buffer = ringbuf; - if (I915_NEED_GFX_HWS(dev)) { + if (I915_NEED_GFX_HWS(dev_priv)) { ret = init_status_page(engine); if (ret) goto error; @@ -2260,7 +2359,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, goto error; } - ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); + ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf); if (ret) { DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", engine->name, ret); @@ -2286,11 +2385,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) if (!intel_engine_initialized(engine)) return; - dev_priv = to_i915(engine->dev); + dev_priv = engine->i915; if (engine->buffer) { intel_stop_engine(engine); - WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); + WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); intel_unpin_ringbuffer_obj(engine->buffer); intel_ringbuffer_free(engine->buffer); @@ -2300,7 +2399,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) if (engine->cleanup) engine->cleanup(engine); - if (I915_NEED_GFX_HWS(engine->dev)) { + if (I915_NEED_GFX_HWS(dev_priv)) { cleanup_status_page(engine); } else { WARN_ON(engine->id != RCS); @@ -2309,7 +2408,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) i915_cmd_parser_fini_ring(engine); i915_gem_batch_pool_fini(&engine->batch_pool); - engine->dev = NULL; + engine->i915 = NULL; } int intel_engine_idle(struct intel_engine_cs *engine) @@ -2332,46 +2431,22 @@ int intel_engine_idle(struct intel_engine_cs *engine) int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) { - request->ringbuf = request->engine->buffer; - return 0; -} + int ret; -int intel_ring_reserve_space(struct drm_i915_gem_request *request) -{ - /* - * The first call merely notes the reserve request and is common for - * all back ends. The subsequent localised _begin() call actually - * ensures that the reservation is available. Without the begin, if - * the request creator immediately submitted the request without - * adding any commands to it then there might not actually be - * sufficient room for the submission commands. + /* Flush enough space to reduce the likelihood of waiting after + * we start building the request - in which case we will just + * have to repeat work. */ - intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); + request->reserved_space += LEGACY_REQUEST_SIZE; - return intel_ring_begin(request, 0); -} - -void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size) -{ - GEM_BUG_ON(ringbuf->reserved_size); - ringbuf->reserved_size = size; -} - -void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf) -{ - GEM_BUG_ON(!ringbuf->reserved_size); - ringbuf->reserved_size = 0; -} + request->ringbuf = request->engine->buffer; -void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) -{ - GEM_BUG_ON(!ringbuf->reserved_size); - ringbuf->reserved_size = 0; -} + ret = intel_ring_begin(request, 0); + if (ret) + return ret; -void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) -{ - GEM_BUG_ON(ringbuf->reserved_size); + request->reserved_space -= LEGACY_REQUEST_SIZE; + return 0; } static int wait_for_space(struct drm_i915_gem_request *req, int bytes) @@ -2393,7 +2468,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) * * See also i915_gem_request_alloc() and i915_add_request(). */ - GEM_BUG_ON(!ringbuf->reserved_size); + GEM_BUG_ON(!req->reserved_space); list_for_each_entry(target, &engine->request_list, list) { unsigned space; @@ -2428,7 +2503,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) int total_bytes, wait_bytes; bool need_wrap = false; - total_bytes = bytes + ringbuf->reserved_size; + total_bytes = bytes + req->reserved_space; if (unlikely(bytes > remain_usable)) { /* @@ -2444,7 +2519,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) * and only need to effectively wait for the reserved * size space from the start of ringbuffer. */ - wait_bytes = remain_actual + ringbuf->reserved_size; + wait_bytes = remain_actual + req->reserved_space; } else { /* No wrapping required, just waiting. */ wait_bytes = total_bytes; @@ -2501,7 +2576,7 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req) void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; /* Our semaphore implementation is strictly monotonic (i.e. we proceed * so long as the semaphore value in the register/page is greater @@ -2511,7 +2586,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) * the semaphore value, then when the seqno moves backwards all * future waits will complete instantly (causing rendering corruption). */ - if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) { + if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); if (HAS_VEBOX(dev_priv)) @@ -2537,7 +2612,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, u32 value) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; /* Every tail move must follow the sequence below */ @@ -2579,7 +2654,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, return ret; cmd = MI_FLUSH_DW; - if (INTEL_INFO(engine->dev)->gen >= 8) + if (INTEL_GEN(req->i915) >= 8) cmd += 1; /* We always require a command barrier so that subsequent @@ -2601,7 +2676,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, intel_ring_emit(engine, cmd); intel_ring_emit(engine, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); - if (INTEL_INFO(engine->dev)->gen >= 8) { + if (INTEL_GEN(req->i915) >= 8) { intel_ring_emit(engine, 0); /* upper addr */ intel_ring_emit(engine, 0); /* value */ } else { @@ -2692,7 +2767,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 invalidate, u32 flush) { struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; uint32_t cmd; int ret; @@ -2701,7 +2775,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, return ret; cmd = MI_FLUSH_DW; - if (INTEL_INFO(dev)->gen >= 8) + if (INTEL_GEN(req->i915) >= 8) cmd += 1; /* We always require a command barrier so that subsequent @@ -2722,7 +2796,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, intel_ring_emit(engine, cmd); intel_ring_emit(engine, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(req->i915) >= 8) { intel_ring_emit(engine, 0); /* upper addr */ intel_ring_emit(engine, 0); /* value */ } else { @@ -2747,10 +2821,10 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->hw_id = 0; engine->mmio_base = RENDER_RING_BASE; - if (INTEL_INFO(dev)->gen >= 8) { - if (i915_semaphore_is_enabled(dev)) { - obj = i915_gem_alloc_object(dev, 4096); - if (obj == NULL) { + if (INTEL_GEN(dev_priv) >= 8) { + if (i915_semaphore_is_enabled(dev_priv)) { + obj = i915_gem_object_create(dev, 4096); + if (IS_ERR(obj)) { DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); i915.semaphores = 0; } else { @@ -2766,25 +2840,24 @@ int intel_init_render_ring_buffer(struct drm_device *dev) } engine->init_context = intel_rcs_ctx_init; - engine->add_request = gen6_add_request; + engine->add_request = gen8_render_add_request; engine->flush = gen8_render_ring_flush; engine->irq_get = gen8_ring_get_irq; engine->irq_put = gen8_ring_put_irq; engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; - engine->irq_seqno_barrier = gen6_seqno_barrier; engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { WARN_ON(!dev_priv->semaphore_obj); engine->semaphore.sync_to = gen8_ring_sync; engine->semaphore.signal = gen8_rcs_signal; GEN8_RING_SEMAPHORE_INIT(engine); } - } else if (INTEL_INFO(dev)->gen >= 6) { + } else if (INTEL_GEN(dev_priv) >= 6) { engine->init_context = intel_rcs_ctx_init; engine->add_request = gen6_add_request; engine->flush = gen7_render_ring_flush; - if (INTEL_INFO(dev)->gen == 6) + if (IS_GEN6(dev_priv)) engine->flush = gen6_render_ring_flush; engine->irq_get = gen6_ring_get_irq; engine->irq_put = gen6_ring_put_irq; @@ -2792,7 +2865,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->irq_seqno_barrier = gen6_seqno_barrier; engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen6_ring_sync; engine->semaphore.signal = gen6_signal; /* @@ -2813,7 +2886,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; } - } else if (IS_GEN5(dev)) { + } else if (IS_GEN5(dev_priv)) { engine->add_request = pc_render_add_request; engine->flush = gen4_render_ring_flush; engine->get_seqno = pc_render_get_seqno; @@ -2824,13 +2897,13 @@ int intel_init_render_ring_buffer(struct drm_device *dev) GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; } else { engine->add_request = i9xx_add_request; - if (INTEL_INFO(dev)->gen < 4) + if (INTEL_GEN(dev_priv) < 4) engine->flush = gen2_render_ring_flush; else engine->flush = gen4_render_ring_flush; engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (IS_GEN2(dev)) { + if (IS_GEN2(dev_priv)) { engine->irq_get = i8xx_ring_get_irq; engine->irq_put = i8xx_ring_put_irq; } else { @@ -2841,15 +2914,15 @@ int intel_init_render_ring_buffer(struct drm_device *dev) } engine->write_tail = ring_write_tail; - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev_priv)) engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; - else if (IS_GEN8(dev)) + else if (IS_GEN8(dev_priv)) engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - else if (INTEL_INFO(dev)->gen >= 6) + else if (INTEL_GEN(dev_priv) >= 6) engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - else if (INTEL_INFO(dev)->gen >= 4) + else if (INTEL_GEN(dev_priv) >= 4) engine->dispatch_execbuffer = i965_dispatch_execbuffer; - else if (IS_I830(dev) || IS_845G(dev)) + else if (IS_I830(dev_priv) || IS_845G(dev_priv)) engine->dispatch_execbuffer = i830_dispatch_execbuffer; else engine->dispatch_execbuffer = i915_dispatch_execbuffer; @@ -2857,11 +2930,11 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->cleanup = render_ring_cleanup; /* Workaround batchbuffer to combat CS tlb bug. */ - if (HAS_BROKEN_CS_TLB(dev)) { - obj = i915_gem_alloc_object(dev, I830_WA_SIZE); - if (obj == NULL) { + if (HAS_BROKEN_CS_TLB(dev_priv)) { + obj = i915_gem_object_create(dev, I830_WA_SIZE); + if (IS_ERR(obj)) { DRM_ERROR("Failed to allocate batch bo\n"); - return -ENOMEM; + return PTR_ERR(obj); } ret = i915_gem_obj_ggtt_pin(obj, 0, 0); @@ -2879,7 +2952,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) if (ret) return ret; - if (INTEL_INFO(dev)->gen >= 5) { + if (INTEL_GEN(dev_priv) >= 5) { ret = intel_init_pipe_control(engine); if (ret) return ret; @@ -2899,24 +2972,24 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) engine->hw_id = 1; engine->write_tail = ring_write_tail; - if (INTEL_INFO(dev)->gen >= 6) { + if (INTEL_GEN(dev_priv) >= 6) { engine->mmio_base = GEN6_BSD_RING_BASE; /* gen6 bsd needs a special wa for tail updates */ - if (IS_GEN6(dev)) + if (IS_GEN6(dev_priv)) engine->write_tail = gen6_bsd_ring_write_tail; engine->flush = gen6_bsd_ring_flush; engine->add_request = gen6_add_request; engine->irq_seqno_barrier = gen6_seqno_barrier; engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; engine->irq_get = gen8_ring_get_irq; engine->irq_put = gen8_ring_put_irq; engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen8_ring_sync; engine->semaphore.signal = gen8_xcs_signal; GEN8_RING_SEMAPHORE_INIT(engine); @@ -2927,7 +3000,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) engine->irq_put = gen6_ring_put_irq; engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen6_ring_sync; engine->semaphore.signal = gen6_signal; engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; @@ -2948,7 +3021,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) engine->add_request = i9xx_add_request; engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (IS_GEN5(dev)) { + if (IS_GEN5(dev_priv)) { engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; engine->irq_get = gen5_ring_get_irq; engine->irq_put = gen5_ring_put_irq; @@ -2990,7 +3063,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) engine->irq_put = gen8_ring_put_irq; engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen8_ring_sync; engine->semaphore.signal = gen8_xcs_signal; GEN8_RING_SEMAPHORE_INIT(engine); @@ -3017,13 +3090,13 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) engine->irq_seqno_barrier = gen6_seqno_barrier; engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; engine->irq_get = gen8_ring_get_irq; engine->irq_put = gen8_ring_put_irq; engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen8_ring_sync; engine->semaphore.signal = gen8_xcs_signal; GEN8_RING_SEMAPHORE_INIT(engine); @@ -3033,7 +3106,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) engine->irq_get = gen6_ring_get_irq; engine->irq_put = gen6_ring_put_irq; engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.signal = gen6_signal; engine->semaphore.sync_to = gen6_ring_sync; /* @@ -3078,13 +3151,13 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; engine->irq_get = gen8_ring_get_irq; engine->irq_put = gen8_ring_put_irq; engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen8_ring_sync; engine->semaphore.signal = gen8_xcs_signal; GEN8_RING_SEMAPHORE_INIT(engine); @@ -3094,7 +3167,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) engine->irq_get = hsw_vebox_get_irq; engine->irq_put = hsw_vebox_put_irq; engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen6_ring_sync; engine->semaphore.signal = gen6_signal; engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index ff126485d398..b33c876fed20 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -107,7 +107,6 @@ struct intel_ringbuffer { int space; int size; int effective_size; - int reserved_size; /** We track the position of the requests in the ring buffer, and * when each is retired we increment last_retired_head as the GPU @@ -120,7 +119,7 @@ struct intel_ringbuffer { u32 last_retired_head; }; -struct intel_context; +struct i915_gem_context; struct drm_i915_reg_table; /* @@ -142,7 +141,8 @@ struct i915_ctx_workarounds { struct drm_i915_gem_object *obj; }; -struct intel_engine_cs { +struct intel_engine_cs { + struct drm_i915_private *i915; const char *name; enum intel_engine_id { RCS = 0, @@ -157,7 +157,6 @@ struct intel_engine_cs { unsigned int hw_id; unsigned int guc_id; /* XXX same as hw_id? */ u32 mmio_base; - struct drm_device *dev; struct intel_ringbuffer *buffer; struct list_head buffers; @@ -268,7 +267,6 @@ struct intel_engine_cs { struct tasklet_struct irq_tasklet; spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */ struct list_head execlist_queue; - struct list_head execlist_retired_req_list; unsigned int fw_domains; unsigned int next_context_status_buffer; unsigned int idle_lite_restore_wa; @@ -312,7 +310,7 @@ struct intel_engine_cs { wait_queue_head_t irq_queue; - struct intel_context *last_context; + struct i915_gem_context *last_context; struct intel_ring_hangcheck hangcheck; @@ -352,7 +350,7 @@ struct intel_engine_cs { static inline bool intel_engine_initialized(struct intel_engine_cs *engine) { - return engine->dev != NULL; + return engine->i915 != NULL; } static inline unsigned @@ -427,7 +425,7 @@ intel_write_status_page(struct intel_engine_cs *engine, struct intel_ringbuffer * intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); -int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, +int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv, struct intel_ringbuffer *ringbuf); void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); void intel_ringbuffer_free(struct intel_ringbuffer *ring); @@ -486,26 +484,15 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) /* * Arbitrary size for largest possible 'add request' sequence. The code paths * are complex and variable. Empirical measurement shows that the worst case - * is ILK at 136 words. Reserving too much is better than reserving too little - * as that allows for corner cases that might have been missed. So the figure - * has been rounded up to 160 words. + * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However, + * we need to allocate double the largest single packet within that emission + * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW). */ -#define MIN_SPACE_FOR_ADD_REQUEST 160 +#define MIN_SPACE_FOR_ADD_REQUEST 336 -/* - * Reserve space in the ring to guarantee that the i915_add_request() call - * will always have sufficient room to do its stuff. The request creation - * code calls this automatically. - */ -void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size); -/* Cancel the reservation, e.g. because the request is being discarded. */ -void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf); -/* Use the reserved space - for use by i915_add_request() only. */ -void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf); -/* Finish with the reserved space - for use by i915_add_request() only. */ -void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf); - -/* Legacy ringbuffer specific portion of reservation code: */ -int intel_ring_reserve_space(struct drm_i915_gem_request *request); +static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) +{ + return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR; +} #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 7fb1da4e7fc3..e856d49d6dc3 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -65,6 +65,9 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, int power_well_id); +static struct i915_power_well * +lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id); + const char * intel_display_power_domain_str(enum intel_display_power_domain domain) { @@ -151,6 +154,23 @@ static void intel_power_well_disable(struct drm_i915_private *dev_priv, power_well->ops->disable(dev_priv, power_well); } +static void intel_power_well_get(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + if (!power_well->count++) + intel_power_well_enable(dev_priv, power_well); +} + +static void intel_power_well_put(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + WARN(!power_well->count, "Use count on power well %s is already zero", + power_well->name); + + if (!--power_well->count) + intel_power_well_disable(dev_priv, power_well); +} + /* * We should only use the power well if we explicitly asked the hardware to * enable it, so check if it's enabled and also check if we've requested it to @@ -419,6 +439,16 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv, BIT(POWER_DOMAIN_MODESET) | \ BIT(POWER_DOMAIN_AUX_A) | \ BIT(POWER_DOMAIN_INIT)) +#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ + BIT(POWER_DOMAIN_AUX_A) | \ + BIT(POWER_DOMAIN_INIT)) +#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ + BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT(POWER_DOMAIN_AUX_B) | \ + BIT(POWER_DOMAIN_AUX_C) | \ + BIT(POWER_DOMAIN_INIT)) static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) { @@ -800,21 +830,99 @@ static void skl_power_well_disable(struct drm_i915_private *dev_priv, skl_set_power_well(dev_priv, power_well, false); } +static enum dpio_phy bxt_power_well_to_phy(struct i915_power_well *power_well) +{ + enum skl_disp_power_wells power_well_id = power_well->data; + + return power_well_id == BXT_DPIO_CMN_A ? DPIO_PHY1 : DPIO_PHY0; +} + +static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum skl_disp_power_wells power_well_id = power_well->data; + struct i915_power_well *cmn_a_well; + + if (power_well_id == BXT_DPIO_CMN_BC) { + /* + * We need to copy the GRC calibration value from the eDP PHY, + * so make sure it's powered up. + */ + cmn_a_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A); + intel_power_well_get(dev_priv, cmn_a_well); + } + + bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well)); + + if (power_well_id == BXT_DPIO_CMN_BC) + intel_power_well_put(dev_priv, cmn_a_well); +} + +static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + bxt_ddi_phy_uninit(dev_priv, bxt_power_well_to_phy(power_well)); +} + +static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + return bxt_ddi_phy_is_enabled(dev_priv, + bxt_power_well_to_phy(power_well)); +} + +static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + if (power_well->count > 0) + bxt_dpio_cmn_power_well_enable(dev_priv, power_well); + else + bxt_dpio_cmn_power_well_disable(dev_priv, power_well); +} + + +static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) +{ + struct i915_power_well *power_well; + + power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A); + if (power_well->count > 0) + bxt_ddi_phy_verify_state(dev_priv, + bxt_power_well_to_phy(power_well)); + + power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC); + if (power_well->count > 0) + bxt_ddi_phy_verify_state(dev_priv, + bxt_power_well_to_phy(power_well)); +} + static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; } +static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) +{ + u32 tmp = I915_READ(DBUF_CTL); + + WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) != + (DBUF_POWER_STATE | DBUF_POWER_REQUEST), + "Unexpected DBuf power power state (0x%08x)\n", tmp); +} + static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - if (IS_BROXTON(dev_priv)) { - broxton_cdclk_verify_state(dev_priv); - broxton_ddi_phy_verify_state(dev_priv); - } + WARN_ON(dev_priv->cdclk_freq != + dev_priv->display.get_display_clock_speed(dev_priv->dev)); + + gen9_assert_dbuf_enabled(dev_priv); + + if (IS_BROXTON(dev_priv)) + bxt_verify_ddi_phy_power_wells(dev_priv); } static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, @@ -948,6 +1056,11 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) */ I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); I915_WRITE(CBR1_VLV, 0); + + WARN_ON(dev_priv->rawclk_freq == 0); + + I915_WRITE(RAWCLK_FREQ_VLV, + DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000)); } static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) @@ -1501,10 +1614,8 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv, struct i915_power_well *power_well; int i; - for_each_power_well(i, power_well, BIT(domain), power_domains) { - if (!power_well->count++) - intel_power_well_enable(dev_priv, power_well); - } + for_each_power_well(i, power_well, BIT(domain), power_domains) + intel_power_well_get(dev_priv, power_well); power_domains->domain_use_count[domain]++; } @@ -1598,14 +1709,8 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, intel_display_power_domain_str(domain)); power_domains->domain_use_count[domain]--; - for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { - WARN(!power_well->count, - "Use count on power well %s is already zero", - power_well->name); - - if (!--power_well->count) - intel_power_well_disable(dev_priv, power_well); - } + for_each_power_well_rev(i, power_well, BIT(domain), power_domains) + intel_power_well_put(dev_priv, power_well); mutex_unlock(&power_domains->lock); @@ -1776,6 +1881,13 @@ static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { .is_enabled = gen9_dc_off_power_well_enabled, }; +static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { + .sync_hw = bxt_dpio_cmn_power_well_sync_hw, + .enable = bxt_dpio_cmn_power_well_enable, + .disable = bxt_dpio_cmn_power_well_disable, + .is_enabled = bxt_dpio_cmn_power_well_enabled, +}; + static struct i915_power_well hsw_power_wells[] = { { .name = "always-on", @@ -2012,6 +2124,18 @@ static struct i915_power_well bxt_power_wells[] = { .ops = &skl_power_well_ops, .data = SKL_DISP_PW_2, }, + { + .name = "dpio-common-a", + .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, + .ops = &bxt_dpio_cmn_power_well_ops, + .data = BXT_DPIO_CMN_A, + }, + { + .name = "dpio-common-bc", + .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, + .ops = &bxt_dpio_cmn_power_well_ops, + .data = BXT_DPIO_CMN_BC, + }, }; static int @@ -2171,6 +2295,28 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) mutex_unlock(&power_domains->lock); } +static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) +{ + I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); + POSTING_READ(DBUF_CTL); + + udelay(10); + + if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) + DRM_ERROR("DBuf power enable timeout\n"); +} + +static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) +{ + I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); + POSTING_READ(DBUF_CTL); + + udelay(10); + + if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) + DRM_ERROR("DBuf power disable timeout!\n"); +} + static void skl_display_core_init(struct drm_i915_private *dev_priv, bool resume) { @@ -2195,12 +2341,11 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv, mutex_unlock(&power_domains->lock); - if (!resume) - return; - skl_init_cdclk(dev_priv); - if (dev_priv->csr.dmc_payload) + gen9_dbuf_enable(dev_priv); + + if (resume && dev_priv->csr.dmc_payload) intel_csr_load_program(dev_priv); } @@ -2211,6 +2356,8 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv) gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + gen9_dbuf_disable(dev_priv); + skl_uninit_cdclk(dev_priv); /* The spec doesn't call for removing the reset handshake flag */ @@ -2254,11 +2401,9 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv, mutex_unlock(&power_domains->lock); - broxton_init_cdclk(dev_priv); - broxton_ddi_phy_init(dev_priv); + bxt_init_cdclk(dev_priv); - broxton_cdclk_verify_state(dev_priv); - broxton_ddi_phy_verify_state(dev_priv); + gen9_dbuf_enable(dev_priv); if (resume && dev_priv->csr.dmc_payload) intel_csr_load_program(dev_priv); @@ -2271,8 +2416,9 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv) gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - broxton_ddi_phy_uninit(dev_priv); - broxton_uninit_cdclk(dev_priv); + gen9_dbuf_disable(dev_priv); + + bxt_uninit_cdclk(dev_priv); /* The spec doesn't call for removing the reset handshake flag */ @@ -2403,6 +2549,7 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) /** * intel_power_domains_init_hw - initialize hardware power domain state * @dev_priv: i915 device instance + * @resume: Called from resume code paths or not * * This function initializes the hardware power domain state and enables all * power domains using intel_display_set_init_power(). diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 2128fae5687d..02b4a6695528 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2177,12 +2177,23 @@ done: #undef CHECK_PROPERTY } +static void +intel_sdvo_connector_unregister(struct drm_connector *connector) +{ + struct intel_sdvo *sdvo = intel_attached_sdvo(connector); + + sysfs_remove_link(&connector->kdev->kobj, + sdvo->ddc.dev.kobj.name); + intel_connector_unregister(connector); +} + static const struct drm_connector_funcs intel_sdvo_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = intel_sdvo_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_sdvo_set_property, .atomic_get_property = intel_connector_atomic_get_property, + .early_unregister = intel_sdvo_connector_unregister, .destroy = intel_sdvo_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, @@ -2191,7 +2202,6 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = { static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { .get_modes = intel_sdvo_get_modes, .mode_valid = intel_sdvo_mode_valid, - .best_encoder = intel_best_encoder, }; static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) @@ -2346,20 +2356,6 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo) return 0x72; } -static void -intel_sdvo_connector_unregister(struct intel_connector *intel_connector) -{ - struct drm_connector *drm_connector; - struct intel_sdvo *sdvo_encoder; - - drm_connector = &intel_connector->base; - sdvo_encoder = intel_attached_sdvo(&intel_connector->base); - - sysfs_remove_link(&drm_connector->kdev->kobj, - sdvo_encoder->ddc.dev.kobj.name); - intel_connector_unregister(intel_connector); -} - static int intel_sdvo_connector_init(struct intel_sdvo_connector *connector, struct intel_sdvo *encoder) @@ -2382,7 +2378,6 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector, connector->base.base.doublescan_allowed = 0; connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; connector->base.get_hw_state = intel_sdvo_connector_get_hw_state; - connector->base.unregister = intel_sdvo_connector_unregister; intel_connector_attach_encoder(&connector->base, &encoder->base); ret = drm_connector_register(drm_connector); @@ -2981,7 +2976,7 @@ bool intel_sdvo_init(struct drm_device *dev, intel_encoder = &intel_sdvo->base; intel_encoder->type = INTEL_OUTPUT_SDVO; drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0, - NULL); + "SDVO %c", port_name(port)); /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 0f3e2303e0e9..fc654173c491 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -80,9 +80,7 @@ static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, */ void intel_pipe_update_start(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->base.dev; const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; - enum pipe pipe = crtc->pipe; long timeout = msecs_to_jiffies_timeout(1); int scanline, min, max, vblank_start; wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); @@ -139,8 +137,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc) crtc->debug.scanline_start = scanline; crtc->debug.start_vbl_time = ktime_get(); - crtc->debug.start_vbl_count = - dev->driver->get_vblank_counter(dev, pipe); + crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); trace_i915_pipe_update_vblank_evaded(crtc); } @@ -154,16 +151,35 @@ void intel_pipe_update_start(struct intel_crtc *crtc) * re-enables interrupts and verifies the update was actually completed * before a vblank using the value of @start_vbl_count. */ -void intel_pipe_update_end(struct intel_crtc *crtc) +void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work) { - struct drm_device *dev = crtc->base.dev; enum pipe pipe = crtc->pipe; int scanline_end = intel_get_crtc_scanline(crtc); - u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe); + u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc); ktime_t end_vbl_time = ktime_get(); + if (work) { + work->flip_queued_vblank = end_vbl_count; + smp_mb__before_atomic(); + atomic_set(&work->pending, 1); + } + trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end); + /* We're still in the vblank-evade critical section, this can't race. + * Would be slightly nice to just grab the vblank count and arm the + * event outside of the critical section - the spinlock might spin for a + * while ... */ + if (crtc->base.state->event) { + WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0); + + spin_lock(&crtc->base.dev->event_lock); + drm_crtc_arm_vblank_event(&crtc->base, crtc->base.state->event); + spin_unlock(&crtc->base.dev->event_lock); + + crtc->base.state->event = NULL; + } + local_irq_enable(); if (crtc->debug.start_vbl_count && @@ -203,8 +219,6 @@ skl_update_plane(struct drm_plane *drm_plane, uint32_t y = plane_state->src.y1 >> 16; uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; - const struct intel_scaler *scaler = - &crtc_state->scaler_state.scalers[plane_state->scaler_id]; plane_ctl = PLANE_CTL_ENABLE | PLANE_CTL_PIPE_GAMMA_ENABLE | @@ -260,13 +274,16 @@ skl_update_plane(struct drm_plane *drm_plane, /* program plane scaler */ if (plane_state->scaler_id >= 0) { - uint32_t ps_ctrl = 0; int scaler_id = plane_state->scaler_id; + const struct intel_scaler *scaler; DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane, PS_PLANE_SEL(plane)); - ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode; - I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); + + scaler = &crtc_state->scaler_state.scalers[scaler_id]; + + I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), + PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode); I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y); I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), @@ -1111,10 +1128,18 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) possible_crtcs = (1 << pipe); - ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, - &intel_plane_funcs, - plane_formats, num_plane_formats, - DRM_PLANE_TYPE_OVERLAY, NULL); + if (INTEL_INFO(dev)->gen >= 9) + ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, + &intel_plane_funcs, + plane_formats, num_plane_formats, + DRM_PLANE_TYPE_OVERLAY, + "plane %d%c", plane + 2, pipe_name(pipe)); + else + ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, + &intel_plane_funcs, + plane_formats, num_plane_formats, + DRM_PLANE_TYPE_OVERLAY, + "sprite %c", sprite_name(pipe, plane)); if (ret) goto fail; diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 223129d3c765..4ce70a9f9df2 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1501,6 +1501,7 @@ out: static const struct drm_connector_funcs intel_tv_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = intel_tv_detect, + .early_unregister = intel_connector_unregister, .destroy = intel_tv_destroy, .set_property = intel_tv_set_property, .atomic_get_property = intel_connector_atomic_get_property, @@ -1512,7 +1513,6 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = { static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { .mode_valid = intel_tv_mode_valid, .get_modes = intel_tv_get_modes, - .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_tv_enc_funcs = { @@ -1591,7 +1591,7 @@ intel_tv_init(struct drm_device *dev) DRM_MODE_CONNECTOR_SVIDEO); drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, - DRM_MODE_ENCODER_TVDAC, NULL); + DRM_MODE_ENCODER_TVDAC, "TV"); intel_encoder->compute_config = intel_tv_compute_config; intel_encoder->get_config = intel_tv_get_config; @@ -1600,7 +1600,6 @@ intel_tv_init(struct drm_device *dev) intel_encoder->disable = intel_disable_tv; intel_encoder->get_hw_state = intel_tv_get_hw_state; intel_connector->get_hw_state = intel_connector_get_hw_state; - intel_connector->unregister = intel_connector_unregister; intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = INTEL_OUTPUT_TVOUT; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 4f1dfe616856..c1ca458d688e 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -248,9 +248,9 @@ intel_uncore_fw_release_timer(struct hrtimer *timer) return HRTIMER_NORESTART; } -void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) +void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, + bool restore) { - struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; struct intel_uncore_forcewake_domain *domain; int retry_count = 100; @@ -304,7 +304,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) if (fw) dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); - if (IS_GEN6(dev) || IS_GEN7(dev)) + if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv); } @@ -400,43 +400,42 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) return false; } -static void __intel_uncore_early_sanitize(struct drm_device *dev, +static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, bool restore_forcewake) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* clear out unclaimed reg detection bit */ if (check_for_unclaimed_mmio(dev_priv)) DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); /* clear out old GT FIFO errors */ - if (IS_GEN6(dev) || IS_GEN7(dev)) + if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) __raw_i915_write32(dev_priv, GTFIFODBG, __raw_i915_read32(dev_priv, GTFIFODBG)); /* WaDisableShadowRegForCpd:chv */ - if (IS_CHERRYVIEW(dev)) { + if (IS_CHERRYVIEW(dev_priv)) { __raw_i915_write32(dev_priv, GTFIFOCTL, __raw_i915_read32(dev_priv, GTFIFOCTL) | GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | GT_FIFO_CTL_RC6_POLICY_STALL); } - intel_uncore_forcewake_reset(dev, restore_forcewake); + intel_uncore_forcewake_reset(dev_priv, restore_forcewake); } -void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) +void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, + bool restore_forcewake) { - __intel_uncore_early_sanitize(dev, restore_forcewake); - i915_check_and_clear_faults(dev); + __intel_uncore_early_sanitize(dev_priv, restore_forcewake); + i915_check_and_clear_faults(dev_priv); } -void intel_uncore_sanitize(struct drm_device *dev) +void intel_uncore_sanitize(struct drm_i915_private *dev_priv) { - i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); + i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6); /* BIOS often leaves RC6 enabled, but disable it for hw init */ - intel_disable_gt_powersave(dev); + intel_disable_gt_powersave(dev_priv); } static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, @@ -1233,14 +1232,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv, fw_domain_reset(d); } -static void intel_uncore_fw_domains_init(struct drm_device *dev) +static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (INTEL_INFO(dev_priv)->gen <= 5) return; - if (IS_GEN9(dev)) { + if (IS_GEN9(dev_priv)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get; dev_priv->uncore.funcs.force_wake_put = fw_domains_put; fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, @@ -1251,9 +1248,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) FORCEWAKE_ACK_BLITTER_GEN9); fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); - } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get; - if (!IS_CHERRYVIEW(dev)) + if (!IS_CHERRYVIEW(dev_priv)) dev_priv->uncore.funcs.force_wake_put = fw_domains_put_with_fifo; else @@ -1262,17 +1259,17 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); - } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { + } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get_with_thread_status; - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev_priv)) dev_priv->uncore.funcs.force_wake_put = fw_domains_put_with_fifo; else dev_priv->uncore.funcs.force_wake_put = fw_domains_put; fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_ACK_HSW); - } else if (IS_IVYBRIDGE(dev)) { + } else if (IS_IVYBRIDGE(dev_priv)) { u32 ecobus; /* IVB configs may use multi-threaded forcewake */ @@ -1302,11 +1299,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_MT_ACK); - mutex_lock(&dev->struct_mutex); fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); ecobus = __raw_i915_read32(dev_priv, ECOBUS); fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); - mutex_unlock(&dev->struct_mutex); if (!(ecobus & FORCEWAKE_MT_ENABLE)) { DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); @@ -1314,7 +1309,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE, FORCEWAKE_ACK); } - } else if (IS_GEN6(dev)) { + } else if (IS_GEN6(dev_priv)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get_with_thread_status; dev_priv->uncore.funcs.force_wake_put = @@ -1327,26 +1322,24 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) WARN_ON(dev_priv->uncore.fw_domains == 0); } -void intel_uncore_init(struct drm_device *dev) +void intel_uncore_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - i915_check_vgpu(dev); + i915_check_vgpu(dev_priv); intel_uncore_edram_detect(dev_priv); - intel_uncore_fw_domains_init(dev); - __intel_uncore_early_sanitize(dev, false); + intel_uncore_fw_domains_init(dev_priv); + __intel_uncore_early_sanitize(dev_priv, false); dev_priv->uncore.unclaimed_mmio_check = 1; - switch (INTEL_INFO(dev)->gen) { + switch (INTEL_INFO(dev_priv)->gen) { default: case 9: ASSIGN_WRITE_MMIO_VFUNCS(gen9); ASSIGN_READ_MMIO_VFUNCS(gen9); break; case 8: - if (IS_CHERRYVIEW(dev)) { + if (IS_CHERRYVIEW(dev_priv)) { ASSIGN_WRITE_MMIO_VFUNCS(chv); ASSIGN_READ_MMIO_VFUNCS(chv); @@ -1357,13 +1350,13 @@ void intel_uncore_init(struct drm_device *dev) break; case 7: case 6: - if (IS_HASWELL(dev)) { + if (IS_HASWELL(dev_priv)) { ASSIGN_WRITE_MMIO_VFUNCS(hsw); } else { ASSIGN_WRITE_MMIO_VFUNCS(gen6); } - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev_priv)) { ASSIGN_READ_MMIO_VFUNCS(vlv); } else { ASSIGN_READ_MMIO_VFUNCS(gen6); @@ -1381,24 +1374,24 @@ void intel_uncore_init(struct drm_device *dev) break; } - if (intel_vgpu_active(dev)) { + if (intel_vgpu_active(dev_priv)) { ASSIGN_WRITE_MMIO_VFUNCS(vgpu); ASSIGN_READ_MMIO_VFUNCS(vgpu); } - i915_check_and_clear_faults(dev); + i915_check_and_clear_faults(dev_priv); } #undef ASSIGN_WRITE_MMIO_VFUNCS #undef ASSIGN_READ_MMIO_VFUNCS -void intel_uncore_fini(struct drm_device *dev) +void intel_uncore_fini(struct drm_i915_private *dev_priv) { /* Paranoia: make sure we have disabled everything before we exit. */ - intel_uncore_sanitize(dev); - intel_uncore_forcewake_reset(dev, false); + intel_uncore_sanitize(dev_priv); + intel_uncore_forcewake_reset(dev_priv, false); } -#define GEN_RANGE(l, h) GENMASK(h, l) +#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1) static const struct register_whitelist { i915_reg_t offset_ldw, offset_udw; @@ -1423,7 +1416,7 @@ int i915_reg_read_ioctl(struct drm_device *dev, for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) && - (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) + (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask)) break; } @@ -1467,83 +1460,47 @@ out: return ret; } -int i915_get_reset_stats_ioctl(struct drm_device *dev, - void *data, struct drm_file *file) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_reset_stats *args = data; - struct i915_ctx_hang_stats *hs; - struct intel_context *ctx; - int ret; - - if (args->flags || args->pad) - return -EINVAL; - - if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN)) - return -EPERM; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - - ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); - if (IS_ERR(ctx)) { - mutex_unlock(&dev->struct_mutex); - return PTR_ERR(ctx); - } - hs = &ctx->hang_stats; - - if (capable(CAP_SYS_ADMIN)) - args->reset_count = i915_reset_count(&dev_priv->gpu_error); - else - args->reset_count = 0; - - args->batch_active = hs->batch_active; - args->batch_pending = hs->batch_pending; - - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -static int i915_reset_complete(struct drm_device *dev) +static int i915_reset_complete(struct pci_dev *pdev) { u8 gdrst; - pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); + pci_read_config_byte(pdev, I915_GDRST, &gdrst); return (gdrst & GRDOM_RESET_STATUS) == 0; } -static int i915_do_reset(struct drm_device *dev, unsigned engine_mask) +static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { + struct pci_dev *pdev = dev_priv->dev->pdev; + /* assert reset for at least 20 usec */ - pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); udelay(20); - pci_write_config_byte(dev->pdev, I915_GDRST, 0); + pci_write_config_byte(pdev, I915_GDRST, 0); - return wait_for(i915_reset_complete(dev), 500); + return wait_for(i915_reset_complete(pdev), 500); } -static int g4x_reset_complete(struct drm_device *dev) +static int g4x_reset_complete(struct pci_dev *pdev) { u8 gdrst; - pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); + pci_read_config_byte(pdev, I915_GDRST, &gdrst); return (gdrst & GRDOM_RESET_ENABLE) == 0; } -static int g33_do_reset(struct drm_device *dev, unsigned engine_mask) +static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { - pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); - return wait_for(g4x_reset_complete(dev), 500); + struct pci_dev *pdev = dev_priv->dev->pdev; + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); + return wait_for(g4x_reset_complete(pdev), 500); } -static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask) +static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = dev_priv->dev->pdev; int ret; - pci_write_config_byte(dev->pdev, I915_GDRST, + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RENDER | GRDOM_RESET_ENABLE); - ret = wait_for(g4x_reset_complete(dev), 500); + ret = wait_for(g4x_reset_complete(pdev), 500); if (ret) return ret; @@ -1551,9 +1508,9 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask) I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); POSTING_READ(VDECCLK_GATE_D); - pci_write_config_byte(dev->pdev, I915_GDRST, + pci_write_config_byte(pdev, I915_GDRST, GRDOM_MEDIA | GRDOM_RESET_ENABLE); - ret = wait_for(g4x_reset_complete(dev), 500); + ret = wait_for(g4x_reset_complete(pdev), 500); if (ret) return ret; @@ -1561,14 +1518,14 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask) I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); POSTING_READ(VDECCLK_GATE_D); - pci_write_config_byte(dev->pdev, I915_GDRST, 0); + pci_write_config_byte(pdev, I915_GDRST, 0); return 0; } -static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask) +static int ironlake_do_reset(struct drm_i915_private *dev_priv, + unsigned engine_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; int ret; I915_WRITE(ILK_GDSR, @@ -1612,7 +1569,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, /** * gen6_reset_engines - reset individual engines - * @dev: DRM device + * @dev_priv: i915 device * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset * * This function will reset the individual engines that are set in engine_mask. @@ -1623,9 +1580,9 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, * * Returns 0 on success, nonzero on error. */ -static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask) +static int gen6_reset_engines(struct drm_i915_private *dev_priv, + unsigned engine_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; const u32 hw_engine_mask[I915_NUM_ENGINES] = { [RCS] = GEN6_GRDOM_RENDER, @@ -1647,7 +1604,7 @@ static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask) ret = gen6_hw_domain_reset(dev_priv, hw_mask); - intel_uncore_forcewake_reset(dev, true); + intel_uncore_forcewake_reset(dev_priv, true); return ret; } @@ -1663,8 +1620,8 @@ static int wait_for_register_fw(struct drm_i915_private *dev_priv, static int gen8_request_engine_reset(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_i915_private *dev_priv = engine->dev->dev_private; I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); @@ -1682,22 +1639,22 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine) static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); } -static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask) +static int gen8_reset_engines(struct drm_i915_private *dev_priv, + unsigned engine_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; for_each_engine_masked(engine, dev_priv, engine_mask) if (gen8_request_engine_reset(engine)) goto not_ready; - return gen6_reset_engines(dev, engine_mask); + return gen6_reset_engines(dev_priv, engine_mask); not_ready: for_each_engine_masked(engine, dev_priv, engine_mask) @@ -1706,35 +1663,35 @@ not_ready: return -EIO; } -static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *, - unsigned engine_mask) +typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask); + +static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) { if (!i915.reset) return NULL; - if (INTEL_INFO(dev)->gen >= 8) + if (INTEL_INFO(dev_priv)->gen >= 8) return gen8_reset_engines; - else if (INTEL_INFO(dev)->gen >= 6) + else if (INTEL_INFO(dev_priv)->gen >= 6) return gen6_reset_engines; - else if (IS_GEN5(dev)) + else if (IS_GEN5(dev_priv)) return ironlake_do_reset; - else if (IS_G4X(dev)) + else if (IS_G4X(dev_priv)) return g4x_do_reset; - else if (IS_G33(dev)) + else if (IS_G33(dev_priv)) return g33_do_reset; - else if (INTEL_INFO(dev)->gen >= 3) + else if (INTEL_INFO(dev_priv)->gen >= 3) return i915_do_reset; else return NULL; } -int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask) +int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { - struct drm_i915_private *dev_priv = to_i915(dev); - int (*reset)(struct drm_device *, unsigned); + reset_func reset; int ret; - reset = intel_get_gpu_reset(dev); + reset = intel_get_gpu_reset(dev_priv); if (reset == NULL) return -ENODEV; @@ -1742,15 +1699,15 @@ int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask) * request may be dropped and never completes (causing -EIO). */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - ret = reset(dev, engine_mask); + ret = reset(dev_priv, engine_mask); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); return ret; } -bool intel_has_gpu_reset(struct drm_device *dev) +bool intel_has_gpu_reset(struct drm_i915_private *dev_priv) { - return intel_get_gpu_reset(dev) != NULL; + return intel_get_gpu_reset(dev_priv) != NULL; } int intel_guc_reset(struct drm_i915_private *dev_priv) @@ -1758,7 +1715,7 @@ int intel_guc_reset(struct drm_i915_private *dev_priv) int ret; unsigned long irqflags; - if (!i915.enable_guc_submission) + if (!HAS_GUC(dev_priv)) return -EINVAL; intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); @@ -1802,10 +1759,10 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv, { enum forcewake_domains fw_domains; - if (intel_vgpu_active(dev_priv->dev)) + if (intel_vgpu_active(dev_priv)) return 0; - switch (INTEL_INFO(dev_priv)->gen) { + switch (INTEL_GEN(dev_priv)) { case 9: fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg)); break; @@ -1842,10 +1799,10 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, { enum forcewake_domains fw_domains; - if (intel_vgpu_active(dev_priv->dev)) + if (intel_vgpu_active(dev_priv)) return 0; - switch (INTEL_INFO(dev_priv)->gen) { + switch (INTEL_GEN(dev_priv)) { case 9: fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg)); break; diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index c15051de8023..68db9621f1f0 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h @@ -403,9 +403,10 @@ struct lvds_dvo_timing { u8 vsync_off:4; u8 rsvd0:6; u8 hsync_off_hi:2; - u8 h_image; - u8 v_image; - u8 max_hv; + u8 himage_lo; + u8 vimage_lo; + u8 vimage_hi:4; + u8 himage_hi:4; u8 h_border; u8 v_border; u8 rsvd1:3; @@ -446,10 +447,16 @@ struct bdb_lfp_backlight_data_entry { u8 obsolete3; } __packed; +struct bdb_lfp_backlight_control_method { + u8 type:4; + u8 controller:4; +} __packed; + struct bdb_lfp_backlight_data { u8 entry_size; struct bdb_lfp_backlight_data_entry data[16]; u8 level[16]; + struct bdb_lfp_backlight_control_method backlight_control[16]; } __packed; struct aimdb_header { diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 82656654fb21..7746418a4c08 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -407,7 +407,6 @@ static struct drm_driver imx_drm_driver = { .load = imx_drm_driver_load, .unload = imx_drm_driver_unload, .lastclose = imx_drm_driver_lastclose, - .set_busid = drm_platform_set_busid, .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = drm_gem_cma_dumb_create, diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index a6898d5dcc98..23ac8041c562 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig @@ -6,7 +6,6 @@ config DRM_MEDIATEK select DRM_KMS_HELPER select DRM_MIPI_DSI select DRM_PANEL - select IOMMU_DMA select MEMORY select MTK_SMI help diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index b1223d54d0ab..eebb7d881c2b 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -91,7 +91,7 @@ static int mtk_atomic_commit(struct drm_device *drm, mutex_lock(&private->commit.lock); flush_work(&private->commit.work); - drm_atomic_helper_swap_state(drm, state); + drm_atomic_helper_swap_state(state, true); if (async) mtk_atomic_schedule(private, state); @@ -243,7 +243,7 @@ static struct drm_driver mtk_drm_driver = { .enable_vblank = mtk_drm_crtc_enable_vblank, .disable_vblank = mtk_drm_crtc_disable_vblank, - .gem_free_object = mtk_drm_gem_free_object, + .gem_free_object_unlocked = mtk_drm_gem_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = mtk_drm_gem_dumb_create, .dumb_map_offset = mtk_drm_gem_dumb_map_offset, @@ -280,8 +280,6 @@ static int mtk_drm_bind(struct device *dev) if (!drm) return -ENOMEM; - drm_dev_set_unique(drm, dev_name(dev)); - drm->dev_private = private; private->drm = drm; @@ -293,14 +291,8 @@ static int mtk_drm_bind(struct device *dev) if (ret < 0) goto err_deinit; - ret = drm_connector_register_all(drm); - if (ret < 0) - goto err_unregister; - return 0; -err_unregister: - drm_dev_unregister(drm); err_deinit: mtk_drm_kms_deinit(drm); err_free: @@ -455,7 +447,6 @@ static int mtk_drm_remove(struct platform_device *pdev) struct drm_device *drm = private->drm; int i; - drm_connector_unregister_all(drm); drm_dev_unregister(drm); mtk_drm_kms_deinit(drm); drm_dev_unref(drm); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index 51bc8988fc26..3995765a90dc 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -170,6 +170,7 @@ static int mtk_plane_atomic_check(struct drm_plane *plane, return drm_plane_helper_check_update(plane, state->crtc, fb, &src, &dest, &clip, + state->rotation, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, true, true, &visible); diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 769559124562..28b2044ed9f2 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -575,14 +575,6 @@ static int mtk_dsi_connector_get_modes(struct drm_connector *connector) return drm_panel_get_modes(dsi->panel); } -static struct drm_encoder *mtk_dsi_connector_best_encoder( - struct drm_connector *connector) -{ - struct mtk_dsi *dsi = connector_to_dsi(connector); - - return &dsi->encoder; -} - static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = { .mode_fixup = mtk_dsi_encoder_mode_fixup, .mode_set = mtk_dsi_encoder_mode_set, @@ -603,7 +595,6 @@ static const struct drm_connector_funcs mtk_dsi_connector_funcs = { static const struct drm_connector_helper_funcs mtk_dsi_connector_helper_funcs = { .get_modes = mtk_dsi_connector_get_modes, - .best_encoder = mtk_dsi_connector_best_encoder, }; static int mtk_drm_attach_bridge(struct drm_bridge *bridge, diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index ebb470ff7200..2b4b125eebc3 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -101,7 +101,7 @@ static struct drm_driver driver = { .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, - .gem_free_object = mgag200_gem_free_object, + .gem_free_object_unlocked = mgag200_gem_free_object, .dumb_create = mgag200_dumb_create, .dumb_map_offset = mgag200_dumb_mmap_offset, .dumb_destroy = drm_gem_dumb_destroy, diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index d347dca17267..6b21cb27e1cc 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1352,19 +1352,20 @@ static void mga_crtc_commit(struct drm_crtc *crtc) * use this for 8-bit mode so can't perform smooth fades on deeper modes, * but it's a requirement that we provide the function */ -static void mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct mga_crtc *mga_crtc = to_mga_crtc(crtc); - int end = (start + size > MGAG200_LUT_SIZE) ? MGAG200_LUT_SIZE : start + size; int i; - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { mga_crtc->lut_r[i] = red[i] >> 8; mga_crtc->lut_g[i] = green[i] >> 8; mga_crtc->lut_b[i] = blue[i] >> 8; } mga_crtc_load_lut(crtc); + + return 0; } /* Simple cleanup function */ diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c index 72360cd038c0..5960628ceb93 100644 --- a/drivers/gpu/drm/msm/edp/edp_connector.c +++ b/drivers/gpu/drm/msm/edp/edp_connector.c @@ -91,15 +91,6 @@ static int edp_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_encoder * -edp_connector_best_encoder(struct drm_connector *connector) -{ - struct edp_connector *edp_connector = to_edp_connector(connector); - - DBG(""); - return edp_connector->edp->encoder; -} - static const struct drm_connector_funcs edp_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = edp_connector_detect, @@ -113,7 +104,6 @@ static const struct drm_connector_funcs edp_connector_funcs = { static const struct drm_connector_helper_funcs edp_connector_helper_funcs = { .get_modes = edp_connector_get_modes, .mode_valid = edp_connector_mode_valid, - .best_encoder = edp_connector_best_encoder, }; /* initialize connector */ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index b15d72683112..a2515b466ce5 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -406,13 +406,6 @@ static int msm_hdmi_connector_mode_valid(struct drm_connector *connector, return 0; } -static struct drm_encoder * -msm_hdmi_connector_best_encoder(struct drm_connector *connector) -{ - struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); - return hdmi_connector->hdmi->encoder; -} - static const struct drm_connector_funcs hdmi_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = hdmi_connector_detect, @@ -426,7 +419,6 @@ static const struct drm_connector_funcs hdmi_connector_funcs = { static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = { .get_modes = msm_hdmi_connector_get_modes, .mode_valid = msm_hdmi_connector_mode_valid, - .best_encoder = msm_hdmi_connector_best_encoder, }; /* initialize connector */ diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 67442d50a6c2..f145d256e332 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -106,31 +106,27 @@ out: static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) { struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); - int i, ncrtcs = state->dev->mode_config.num_crtc; + int i; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; mdp4_enable(mdp4_kms); /* see 119ecb7fd */ - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc *crtc = state->crtcs[i]; - if (!crtc) - continue; + for_each_crtc_in_state(state, crtc, crtc_state, i) drm_crtc_vblank_get(crtc); - } } static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) { struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); - int i, ncrtcs = state->dev->mode_config.num_crtc; + int i; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; /* see 119ecb7fd */ - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc *crtc = state->crtcs[i]; - if (!crtc) - continue; + for_each_crtc_in_state(state, crtc, crtc_state, i) drm_crtc_vblank_put(crtc); - } mdp4_disable(mdp4_kms); } diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c index 2648cd7631ef..353429b05733 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c @@ -90,14 +90,6 @@ static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_encoder * -mdp4_lvds_connector_best_encoder(struct drm_connector *connector) -{ - struct mdp4_lvds_connector *mdp4_lvds_connector = - to_mdp4_lvds_connector(connector); - return mdp4_lvds_connector->encoder; -} - static const struct drm_connector_funcs mdp4_lvds_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = mdp4_lvds_connector_detect, @@ -111,7 +103,6 @@ static const struct drm_connector_funcs mdp4_lvds_connector_funcs = { static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = { .get_modes = mdp4_lvds_connector_get_modes, .mode_valid = mdp4_lvds_connector_mode_valid, - .best_encoder = mdp4_lvds_connector_best_encoder, }; /* initialize connector */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 88fe256c1931..4e8ed739f558 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -374,6 +374,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct plane_state pstates[STAGE_MAX + 1]; const struct mdp5_cfg_hw *hw_cfg; + const struct drm_plane_state *pstate; int cnt = 0, i; DBG("%s: check", mdp5_crtc->name); @@ -382,20 +383,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, * and that we don't have conflicting mixer stages: */ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - drm_atomic_crtc_state_for_each_plane(plane, state) { - struct drm_plane_state *pstate; + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { if (cnt >= (hw_cfg->lm.nb_stages)) { dev_err(dev->dev, "too many planes!\n"); return -EINVAL; } - pstate = state->state->plane_states[drm_plane_index(plane)]; - /* plane might not have changed, in which case take - * current state: - */ - if (!pstate) - pstate = plane->state; pstates[cnt].plane = plane; pstates[cnt].state = to_mdp5_plane_state(pstate); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 484b4d15e71d..f0c285b1c027 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -78,17 +78,11 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s { int i; struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - int nplanes = mdp5_kms->dev->mode_config.num_total_plane; - - for (i = 0; i < nplanes; i++) { - struct drm_plane *plane = state->planes[i]; - struct drm_plane_state *plane_state = state->plane_states[i]; - - if (!plane) - continue; + struct drm_plane *plane; + struct drm_plane_state *plane_state; + for_each_plane_in_state(state, plane, plane_state, i) mdp5_plane_complete_commit(plane, plane_state); - } mdp5_disable(mdp5_kms); } diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index e3892c263f27..4a8a6f1f1151 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -84,17 +84,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev, struct drm_atomic_state *old_state) { struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; struct msm_drm_private *priv = old_state->dev->dev_private; struct msm_kms *kms = priv->kms; - int ncrtcs = old_state->dev->mode_config.num_crtc; int i; - for (i = 0; i < ncrtcs; i++) { - crtc = old_state->crtcs[i]; - - if (!crtc) - continue; - + for_each_crtc_in_state(old_state, crtc, crtc_state, i) { if (!crtc->state->enable) continue; @@ -192,9 +187,11 @@ int msm_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock) { struct msm_drm_private *priv = dev->dev_private; - int nplanes = dev->mode_config.num_total_plane; - int ncrtcs = dev->mode_config.num_crtc; struct msm_commit *c; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_plane *plane; + struct drm_plane_state *plane_state; int i, ret; ret = drm_atomic_helper_prepare_planes(dev, state); @@ -210,28 +207,18 @@ int msm_atomic_commit(struct drm_device *dev, /* * Figure out what crtcs we have: */ - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc *crtc = state->crtcs[i]; - if (!crtc) - continue; - c->crtc_mask |= (1 << drm_crtc_index(crtc)); - } + for_each_crtc_in_state(state, crtc, crtc_state, i) + c->crtc_mask |= drm_crtc_mask(crtc); /* * Figure out what fence to wait for: */ - for (i = 0; i < nplanes; i++) { - struct drm_plane *plane = state->planes[i]; - struct drm_plane_state *new_state = state->plane_states[i]; - - if (!plane) - continue; - - if ((plane->state->fb != new_state->fb) && new_state->fb) { - struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0); + for_each_plane_in_state(state, plane, plane_state, i) { + if ((plane->state->fb != plane_state->fb) && plane_state->fb) { + struct drm_gem_object *obj = msm_framebuffer_bo(plane_state->fb, 0); struct msm_gem_object *msm_obj = to_msm_bo(obj); - new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv); + plane_state->fence = reservation_object_get_excl_rcu(msm_obj->resv); } } @@ -251,7 +238,7 @@ int msm_atomic_commit(struct drm_device *dev, * the software side now. */ - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); /* * Everything below can be run asynchronously without the need to grab diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 9c654092ef78..a02dc2b27739 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -197,8 +197,6 @@ static int msm_drm_uninit(struct device *dev) drm_kms_helper_poll_fini(ddev); - drm_connector_unregister_all(ddev); - drm_dev_unregister(ddev); #ifdef CONFIG_DRM_FBDEV_EMULATION @@ -431,12 +429,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) if (ret) goto fail; - ret = drm_connector_register_all(ddev); - if (ret) { - dev_err(dev, "failed to register connectors\n"); - goto fail; - } - drm_mode_config_reset(ddev); #ifdef CONFIG_DRM_FBDEV_EMULATION @@ -730,7 +722,6 @@ static struct drm_driver msm_driver = { .open = msm_open, .preclose = msm_preclose, .lastclose = msm_lastclose, - .set_busid = drm_platform_set_busid, .irq_handler = msm_irq, .irq_preinstall = msm_irq_preinstall, .irq_postinstall = msm_irq_postinstall, diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index 461dc8b873f0..7919c24c6ddd 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -56,17 +56,9 @@ static void msm_framebuffer_destroy(struct drm_framebuffer *fb) kfree(msm_fb); } -static int msm_framebuffer_dirty(struct drm_framebuffer *fb, - struct drm_file *file_priv, unsigned flags, unsigned color, - struct drm_clip_rect *clips, unsigned num_clips) -{ - return 0; -} - static const struct drm_framebuffer_funcs msm_framebuffer_funcs = { .create_handle = msm_framebuffer_create_handle, .destroy = msm_framebuffer_destroy, - .dirty = msm_framebuffer_dirty, }; #ifdef CONFIG_DEBUG_FS diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index d9759bf3482e..1a061e3e8b9e 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -184,21 +184,7 @@ fail: return ret; } -static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc, - u16 red, u16 green, u16 blue, int regno) -{ - DBG("fbdev: set gamma"); -} - -static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc, - u16 *red, u16 *green, u16 *blue, int regno) -{ - DBG("fbdev: get gamma"); -} - static const struct drm_fb_helper_funcs msm_fb_helper_funcs = { - .gamma_set = msm_crtc_fb_gamma_set, - .gamma_get = msm_crtc_fb_gamma_get, .fb_probe = msm_fbdev_create, }; diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 6f318c54da33..0cb7a18cde26 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c @@ -785,14 +785,14 @@ nv_crtc_disable(struct drm_crtc *crtc) nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); } -static void -nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, +static int +nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t size) { - int end = (start + size > 256) ? 256 : start + size, i; struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + int i; - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { nv_crtc->lut.r[i] = r[i]; nv_crtc->lut.g[i] = g[i]; nv_crtc->lut.b[i] = b[i]; @@ -805,10 +805,12 @@ nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, */ if (!nv_crtc->base.primary->fb) { nv_crtc->lut.depth = 0; - return; + return 0; } nv_crtc_gamma_load(crtc); + + return 0; } static int diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 7c77f960c8b8..6072fe292db8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -760,12 +760,11 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, /* Initialize a page flip struct */ *s = (struct nouveau_page_flip_state) - { { }, event, nouveau_crtc(crtc)->index, - fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y, + { { }, event, crtc, fb->bits_per_pixel, fb->pitches[0], new_bo->bo.offset }; /* Keep vblanks on during flip, for the target crtc of this flip */ - drm_vblank_get(dev, nouveau_crtc(crtc)->index); + drm_crtc_vblank_get(crtc); /* Emit a page flip */ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { @@ -810,7 +809,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, return 0; fail_unreserve: - drm_vblank_put(dev, nouveau_crtc(crtc)->index); + drm_crtc_vblank_put(crtc); ttm_bo_unreserve(&old_bo->bo); fail_unpin: mutex_unlock(&cli->mutex); @@ -842,17 +841,17 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); if (s->event) { if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { - drm_arm_vblank_event(dev, s->crtc, s->event); + drm_crtc_arm_vblank_event(s->crtc, s->event); } else { - drm_send_vblank_event(dev, s->crtc, s->event); + drm_crtc_send_vblank_event(s->crtc, s->event); /* Give up ownership of vblank for page-flipped crtc */ - drm_vblank_put(dev, s->crtc); + drm_crtc_vblank_put(s->crtc); } } else { /* Give up ownership of vblank for page-flipped crtc */ - drm_vblank_put(dev, s->crtc); + drm_crtc_vblank_put(s->crtc); } list_del(&s->head); @@ -873,9 +872,10 @@ nouveau_flip_complete(struct nvif_notify *notify) if (!nouveau_finish_page_flip(chan, &state)) { if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { - nv_set_crtc_base(drm->dev, state.crtc, state.offset + - state.y * state.pitch + - state.x * state.bpp / 8); + nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc), + state.offset + state.crtc->y * + state.pitch + state.crtc->x * + state.bpp / 8); } } diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index 24273bacd885..0420ee861ea4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h @@ -28,7 +28,8 @@ int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *, struct nouveau_page_flip_state { struct list_head head; struct drm_pending_vblank_event *event; - int crtc, bpp, pitch, x, y; + struct drm_crtc *crtc; + int bpp, pitch; u64 offset; }; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 11f8dd9c0edb..295e7621cc68 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -22,13 +22,11 @@ * Authors: Ben Skeggs */ -#include <linux/apple-gmux.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/pm_runtime.h> -#include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> #include "drmP.h" @@ -315,13 +313,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev, bool boot = false; int ret; - /* - * apple-gmux is needed on dual GPU MacBook Pro - * to probe the panel if we're the inactive GPU. - */ - if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) && - apple_gmux_present() && pdev != vga_default_device() && - !vga_switcheroo_handler_flags()) + if (vga_switcheroo_client_probe_defer(pdev)) return -EPROBE_DEFER; /* remove conflicting drivers (vesafb, efifb etc) */ @@ -970,7 +962,7 @@ driver_stub = { .gem_prime_vmap = nouveau_gem_prime_vmap, .gem_prime_vunmap = nouveau_gem_prime_vunmap, - .gem_free_object = nouveau_gem_object_del, + .gem_free_object_unlocked = nouveau_gem_object_del, .gem_open_object = nouveau_gem_object_open, .gem_close_object = nouveau_gem_object_close, @@ -1078,7 +1070,6 @@ nouveau_drm_init(void) driver_pci = driver_stub; driver_pci.set_busid = drm_pci_set_busid; driver_platform = driver_stub; - driver_platform.set_busid = drm_platform_set_busid; nouveau_display_options(); diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index 2e3a62d38fe9..64c4ce7115ad 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h @@ -57,7 +57,8 @@ struct nouveau_fence_priv { int (*context_new)(struct nouveau_channel *); void (*context_del)(struct nouveau_channel *); - u32 contexts, context_base; + u32 contexts; + u64 context_base; bool uevent; }; diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c index 675e9e077a95..08f9c6fa0f7f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_usif.c +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c @@ -212,7 +212,6 @@ usif_notify_get(struct drm_file *f, void *data, u32 size, void *argv, u32 argc) ntfy->p->base.event = &ntfy->p->e.base; ntfy->p->base.file_priv = f; ntfy->p->base.pid = current->pid; - ntfy->p->base.destroy =(void(*)(struct drm_pending_event *))kfree; ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF; ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply; diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 3ffc2b0057bf..7a7788212df7 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -1346,21 +1346,22 @@ nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) return 0; } -static void +static int nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, - uint32_t start, uint32_t size) + uint32_t size) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - u32 end = min_t(u32, start + size, 256); u32 i; - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { nv_crtc->lut.r[i] = r[i]; nv_crtc->lut.g[i] = g[i]; nv_crtc->lut.b[i] = b[i]; } nv50_crtc_lut_load(crtc); + + return 0; } static void diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig index 2a618afe0f53..c226da145fb3 100644 --- a/drivers/gpu/drm/omapdrm/displays/Kconfig +++ b/drivers/gpu/drm/omapdrm/displays/Kconfig @@ -1,80 +1,80 @@ menu "OMAPDRM External Display Device Drivers" -config DISPLAY_ENCODER_OPA362 +config DRM_OMAP_ENCODER_OPA362 tristate "OPA362 external analog amplifier" help Driver for OPA362 external analog TV amplifier controlled through a GPIO. -config DISPLAY_ENCODER_TFP410 +config DRM_OMAP_ENCODER_TFP410 tristate "TFP410 DPI to DVI Encoder" help Driver for TFP410 DPI to DVI encoder. -config DISPLAY_ENCODER_TPD12S015 +config DRM_OMAP_ENCODER_TPD12S015 tristate "TPD12S015 HDMI ESD protection and level shifter" help Driver for TPD12S015, which offers HDMI ESD protection and level shifting. -config DISPLAY_CONNECTOR_DVI +config DRM_OMAP_CONNECTOR_DVI tristate "DVI Connector" depends on I2C help Driver for a generic DVI connector. -config DISPLAY_CONNECTOR_HDMI +config DRM_OMAP_CONNECTOR_HDMI tristate "HDMI Connector" help Driver for a generic HDMI connector. -config DISPLAY_CONNECTOR_ANALOG_TV +config DRM_OMAP_CONNECTOR_ANALOG_TV tristate "Analog TV Connector" help Driver for a generic analog TV connector. -config DISPLAY_PANEL_DPI +config DRM_OMAP_PANEL_DPI tristate "Generic DPI panel" help Driver for generic DPI panels. -config DISPLAY_PANEL_DSI_CM +config DRM_OMAP_PANEL_DSI_CM tristate "Generic DSI Command Mode Panel" depends on BACKLIGHT_CLASS_DEVICE help Driver for generic DSI command mode panels. -config DISPLAY_PANEL_SONY_ACX565AKM +config DRM_OMAP_PANEL_SONY_ACX565AKM tristate "ACX565AKM Panel" depends on SPI && BACKLIGHT_CLASS_DEVICE help This is the LCD panel used on Nokia N900 -config DISPLAY_PANEL_LGPHILIPS_LB035Q02 +config DRM_OMAP_PANEL_LGPHILIPS_LB035Q02 tristate "LG.Philips LB035Q02 LCD Panel" depends on SPI help LCD Panel used on the Gumstix Overo Palo35 -config DISPLAY_PANEL_SHARP_LS037V7DW01 +config DRM_OMAP_PANEL_SHARP_LS037V7DW01 tristate "Sharp LS037V7DW01 LCD Panel" depends on BACKLIGHT_CLASS_DEVICE help LCD Panel used in TI's SDP3430 and EVM boards -config DISPLAY_PANEL_TPO_TD028TTEC1 +config DRM_OMAP_PANEL_TPO_TD028TTEC1 tristate "TPO TD028TTEC1 LCD Panel" depends on SPI help LCD panel used in Openmoko. -config DISPLAY_PANEL_TPO_TD043MTEA1 +config DRM_OMAP_PANEL_TPO_TD043MTEA1 tristate "TPO TD043MTEA1 LCD Panel" depends on SPI help LCD Panel used in OMAP3 Pandora -config DISPLAY_PANEL_NEC_NL8048HL11 +config DRM_OMAP_PANEL_NEC_NL8048HL11 tristate "NEC NL8048HL11 Panel" depends on SPI depends on BACKLIGHT_CLASS_DEVICE diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile index 9aa176bfbf2e..46baafb1a83e 100644 --- a/drivers/gpu/drm/omapdrm/displays/Makefile +++ b/drivers/gpu/drm/omapdrm/displays/Makefile @@ -1,14 +1,14 @@ -obj-$(CONFIG_DISPLAY_ENCODER_OPA362) += encoder-opa362.o -obj-$(CONFIG_DISPLAY_ENCODER_TFP410) += encoder-tfp410.o -obj-$(CONFIG_DISPLAY_ENCODER_TPD12S015) += encoder-tpd12s015.o -obj-$(CONFIG_DISPLAY_CONNECTOR_DVI) += connector-dvi.o -obj-$(CONFIG_DISPLAY_CONNECTOR_HDMI) += connector-hdmi.o -obj-$(CONFIG_DISPLAY_CONNECTOR_ANALOG_TV) += connector-analog-tv.o -obj-$(CONFIG_DISPLAY_PANEL_DPI) += panel-dpi.o -obj-$(CONFIG_DISPLAY_PANEL_DSI_CM) += panel-dsi-cm.o -obj-$(CONFIG_DISPLAY_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o -obj-$(CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o -obj-$(CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o -obj-$(CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o -obj-$(CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o -obj-$(CONFIG_DISPLAY_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o +obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o +obj-$(CONFIG_DRM_OMAP_ENCODER_TFP410) += encoder-tfp410.o +obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o +obj-$(CONFIG_DRM_OMAP_CONNECTOR_DVI) += connector-dvi.o +obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o +obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o +obj-$(CONFIG_DRM_OMAP_PANEL_DPI) += panel-dpi.o +obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o +obj-$(CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o +obj-$(CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o +obj-$(CONFIG_DRM_OMAP_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o +obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o +obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o +obj-$(CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index 8511c648a15c..3485d1ecd655 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -14,9 +14,10 @@ #include <linux/platform_device.h> #include <linux/of.h> -#include <video/omapdss.h> #include <video/omap-panel-data.h> +#include "../dss/omapdss.h" + struct panel_drv_data { struct omap_dss_device dssdev; struct omap_dss_device *in; @@ -25,7 +26,6 @@ struct panel_drv_data { struct omap_video_timings timings; - enum omap_dss_venc_type connector_type; bool invert_polarity; }; @@ -45,10 +45,6 @@ static const struct omap_video_timings tvc_pal_timings = { static const struct of_device_id tvc_of_match[]; -struct tvc_of_data { - enum omap_dss_venc_type connector_type; -}; - #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) static int tvc_connect(struct omap_dss_device *dssdev) @@ -99,7 +95,7 @@ static int tvc_enable(struct omap_dss_device *dssdev) in->ops.atv->set_timings(in, &ddata->timings); if (!ddata->dev->of_node) { - in->ops.atv->set_type(in, ddata->connector_type); + in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE); in->ops.atv->invert_vid_out_polarity(in, ddata->invert_polarity); @@ -207,7 +203,6 @@ static int tvc_probe_pdata(struct platform_device *pdev) ddata->in = in; - ddata->connector_type = pdata->connector_type; ddata->invert_polarity = pdata->invert_polarity; dssdev = &ddata->dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index 747f26a55e43..75f7827525cf 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -15,10 +15,10 @@ #include <linux/slab.h> #include <drm/drm_edid.h> - -#include <video/omapdss.h> #include <video/omap-panel-data.h> +#include "../dss/omapdss.h" + static const struct omap_video_timings dvic_default_timings = { .x_res = 640, .y_res = 480, diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 667ca4a24ece..7bdf83af9797 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -17,10 +17,10 @@ #include <linux/of_gpio.h> #include <drm/drm_edid.h> - -#include <video/omapdss.h> #include <video/omap-panel-data.h> +#include "../dss/omapdss.h" + static const struct omap_video_timings hdmic_default_timings = { .x_res = 640, .y_res = 480, diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 9594ff7a2b0c..fe4e7ec3bab0 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -18,9 +18,8 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> -#include <linux/of_gpio.h> -#include <video/omapdss.h> +#include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index 671806ca7d6a..d768217cefe0 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -15,8 +15,7 @@ #include <linux/slab.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index 916a89978387..46855c8f5cbf 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -16,8 +16,7 @@ #include <linux/platform_device.h> #include <linux/gpio/consumer.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index 7c2331be8d15..7f16f985ab22 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -15,11 +15,13 @@ #include <linux/slab.h> #include <linux/of.h> #include <linux/of_gpio.h> +#include <linux/regulator/consumer.h> -#include <video/omapdss.h> #include <video/omap-panel-data.h> #include <video/of_display_timing.h> +#include "../dss/omapdss.h" + struct panel_drv_data { struct omap_dss_device dssdev; struct omap_dss_device *in; @@ -32,6 +34,7 @@ struct panel_drv_data { int backlight_gpio; struct gpio_desc *enable_gpio; + struct regulator *vcc_supply; }; #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) @@ -83,6 +86,12 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev) if (r) return r; + r = regulator_enable(ddata->vcc_supply); + if (r) { + in->ops.dpi->disable(in); + return r; + } + gpiod_set_value_cansleep(ddata->enable_gpio, 1); if (gpio_is_valid(ddata->backlight_gpio)) @@ -105,6 +114,7 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev) gpio_set_value_cansleep(ddata->backlight_gpio, 0); gpiod_set_value_cansleep(ddata->enable_gpio, 0); + regulator_disable(ddata->vcc_supply); in->ops.dpi->disable(in); @@ -213,6 +223,20 @@ static int panel_dpi_probe_of(struct platform_device *pdev) ddata->enable_gpio = gpio; + /* + * Many different panels are supported by this driver and there are + * probably very different needs for their reset pins in regards to + * timing and order relative to the enable gpio. So for now it's just + * ensured that the reset line isn't active. + */ + gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(gpio)) + return PTR_ERR(gpio); + + ddata->vcc_supply = devm_regulator_get(&pdev->dev, "vcc"); + if (IS_ERR(ddata->vcc_supply)) + return PTR_ERR(ddata->vcc_supply); + ddata->backlight_gpio = -ENOENT; r = of_get_display_timing(node, "panel-timing", &timing); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 2b118071b5a1..1b0cf2d8224b 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -25,10 +25,10 @@ #include <linux/of_device.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> #include <video/mipi_display.h> +#include "../dss/omapdss.h" + /* DSI Virtual channel. Hardcoded for now. */ #define TCH 0 diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index ac680e1de603..6dfb96cea293 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -17,8 +17,7 @@ #include <linux/gpio.h> #include <linux/gpio/consumer.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include "../dss/omapdss.h" static struct omap_video_timings lb035q02_timings = { .x_res = 320, @@ -51,9 +50,6 @@ struct panel_drv_data { struct omap_video_timings videomode; - /* used for non-DT boot, to be removed */ - int backlight_gpio; - struct gpio_desc *enable_gpio; }; @@ -171,9 +167,6 @@ static int lb035q02_enable(struct omap_dss_device *dssdev) if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 1); - if (gpio_is_valid(ddata->backlight_gpio)) - gpio_set_value_cansleep(ddata->backlight_gpio, 1); - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; return 0; @@ -190,9 +183,6 @@ static void lb035q02_disable(struct omap_dss_device *dssdev) if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 0); - if (gpio_is_valid(ddata->backlight_gpio)) - gpio_set_value_cansleep(ddata->backlight_gpio, 0); - in->ops.dpi->disable(in); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; @@ -256,8 +246,6 @@ static int lb035q02_probe_of(struct spi_device *spi) ddata->enable_gpio = gpio; - ddata->backlight_gpio = -ENOENT; - in = omapdss_of_find_source_for_first_ep(node); if (IS_ERR(in)) { dev_err(&spi->dev, "failed to find video source\n"); @@ -290,13 +278,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) if (r) return r; - if (gpio_is_valid(ddata->backlight_gpio)) { - r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio, - GPIOF_OUT_INIT_LOW, "panel backlight"); - if (r) - goto err_gpio; - } - ddata->videomode = lb035q02_timings; dssdev = &ddata->dssdev; @@ -316,7 +297,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) return 0; err_reg: -err_gpio: omap_dss_put_device(ddata->in); return r; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 38d2920a95e6..fc4c238c9583 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -18,7 +18,7 @@ #include <linux/gpio/consumer.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> +#include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index 4363fffc87e3..3d3efc561ea9 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -13,11 +13,11 @@ #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/regulator/consumer.h> -#include <video/omapdss.h> + +#include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index deb416736aad..157c512205d1 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -33,9 +33,10 @@ #include <linux/of.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> #include <video/omap-panel-data.h> +#include "../dss/omapdss.h" + #define MIPID_CMD_READ_DISP_ID 0x04 #define MIPID_CMD_READ_RED 0x06 #define MIPID_CMD_READ_GREEN 0x07 diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index bd8d85041926..e859b3f893f7 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -28,7 +28,8 @@ #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/gpio.h> -#include <video/omapdss.h> + +#include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index d93175b03a12..66c6bbe6472b 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -19,7 +19,7 @@ #include <linux/slab.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> +#include "../dss/omapdss.h" #define TPO_R02_MODE(x) ((x) & 7) #define TPO_R02_MODE_800x480 7 diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c index 7e4e5bebabbe..6a3ebfcd7223 100644 --- a/drivers/gpu/drm/omapdrm/dss/core.c +++ b/drivers/gpu/drm/omapdrm/dss/core.c @@ -35,8 +35,7 @@ #include <linux/suspend.h> #include <linux/slab.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" @@ -196,8 +195,6 @@ static int __init omap_dss_probe(struct platform_device *pdev) core.default_display_name = def_disp_name; else if (pdata->default_display_name) core.default_display_name = pdata->default_display_name; - else if (pdata->default_device) - core.default_display_name = pdata->default_device->name; return 0; diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index f83608b69e68..535240fba671 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -41,8 +41,7 @@ #include <linux/of.h> #include <linux/component.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" #include "dispc.h" @@ -113,9 +112,14 @@ struct dispc_features { * never both, we can just use this flag for now. */ bool reverse_ilace_field_order:1; + + bool has_gamma_table:1; + + bool has_gamma_i734_bug:1; }; #define DISPC_MAX_NR_FIFOS 5 +#define DISPC_MAX_CHANNEL_GAMMA 4 static struct { struct platform_device *pdev; @@ -135,6 +139,8 @@ static struct { bool ctx_valid; u32 ctx[DISPC_SZ_REGS / sizeof(u32)]; + u32 *gamma_table[DISPC_MAX_CHANNEL_GAMMA]; + const struct dispc_features *feat; bool is_enabled; @@ -178,11 +184,19 @@ struct dispc_reg_field { u8 low; }; +struct dispc_gamma_desc { + u32 len; + u32 bits; + u16 reg; + bool has_index; +}; + static const struct { const char *name; u32 vsync_irq; u32 framedone_irq; u32 sync_lost_irq; + struct dispc_gamma_desc gamma; struct dispc_reg_field reg_desc[DISPC_MGR_FLD_NUM]; } mgr_desc[] = { [OMAP_DSS_CHANNEL_LCD] = { @@ -190,6 +204,12 @@ static const struct { .vsync_irq = DISPC_IRQ_VSYNC, .framedone_irq = DISPC_IRQ_FRAMEDONE, .sync_lost_irq = DISPC_IRQ_SYNC_LOST, + .gamma = { + .len = 256, + .bits = 8, + .reg = DISPC_GAMMA_TABLE0, + .has_index = true, + }, .reg_desc = { [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 0, 0 }, [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL, 3, 3 }, @@ -207,6 +227,12 @@ static const struct { .vsync_irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN, .framedone_irq = DISPC_IRQ_FRAMEDONETV, .sync_lost_irq = DISPC_IRQ_SYNC_LOST_DIGIT, + .gamma = { + .len = 1024, + .bits = 10, + .reg = DISPC_GAMMA_TABLE2, + .has_index = false, + }, .reg_desc = { [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 1, 1 }, [DISPC_MGR_FLD_STNTFT] = { }, @@ -224,6 +250,12 @@ static const struct { .vsync_irq = DISPC_IRQ_VSYNC2, .framedone_irq = DISPC_IRQ_FRAMEDONE2, .sync_lost_irq = DISPC_IRQ_SYNC_LOST2, + .gamma = { + .len = 256, + .bits = 8, + .reg = DISPC_GAMMA_TABLE1, + .has_index = true, + }, .reg_desc = { [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL2, 0, 0 }, [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL2, 3, 3 }, @@ -241,6 +273,12 @@ static const struct { .vsync_irq = DISPC_IRQ_VSYNC3, .framedone_irq = DISPC_IRQ_FRAMEDONE3, .sync_lost_irq = DISPC_IRQ_SYNC_LOST3, + .gamma = { + .len = 256, + .bits = 8, + .reg = DISPC_GAMMA_TABLE3, + .has_index = true, + }, .reg_desc = { [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL3, 0, 0 }, [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL3, 3, 3 }, @@ -1084,20 +1122,6 @@ static u32 dispc_ovl_get_burst_size(enum omap_plane plane) return unit * 8; } -void dispc_enable_gamma_table(bool enable) -{ - /* - * This is partially implemented to support only disabling of - * the gamma table. - */ - if (enable) { - DSSWARN("Gamma table enabling for TV not yet supported"); - return; - } - - REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9); -} - static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable) { if (channel == OMAP_DSS_CHANNEL_DIGIT) @@ -3299,30 +3323,21 @@ static void dispc_mgr_get_lcd_divisor(enum omap_channel channel, int *lck_div, static unsigned long dispc_fclk_rate(void) { - struct dss_pll *pll; - unsigned long r = 0; + unsigned long r; + enum dss_clk_source src; + + src = dss_get_dispc_clk_source(); - switch (dss_get_dispc_clk_source()) { - case OMAP_DSS_CLK_SRC_FCK: + if (src == DSS_CLK_SRC_FCK) { r = dss_get_dispc_clk_rate(); - break; - case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: - pll = dss_pll_find("dsi0"); - if (!pll) - pll = dss_pll_find("video0"); + } else { + struct dss_pll *pll; + unsigned clkout_idx; - r = pll->cinfo.clkout[0]; - break; - case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: - pll = dss_pll_find("dsi1"); - if (!pll) - pll = dss_pll_find("video1"); + pll = dss_pll_find_by_src(src); + clkout_idx = dss_pll_get_clkout_idx_for_src(src); - r = pll->cinfo.clkout[0]; - break; - default: - BUG(); - return 0; + r = pll->cinfo.clkout[clkout_idx]; } return r; @@ -3330,43 +3345,31 @@ static unsigned long dispc_fclk_rate(void) static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel) { - struct dss_pll *pll; int lcd; unsigned long r; - u32 l; - - if (dss_mgr_is_lcd(channel)) { - l = dispc_read_reg(DISPC_DIVISORo(channel)); + enum dss_clk_source src; - lcd = FLD_GET(l, 23, 16); + /* for TV, LCLK rate is the FCLK rate */ + if (!dss_mgr_is_lcd(channel)) + return dispc_fclk_rate(); - switch (dss_get_lcd_clk_source(channel)) { - case OMAP_DSS_CLK_SRC_FCK: - r = dss_get_dispc_clk_rate(); - break; - case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: - pll = dss_pll_find("dsi0"); - if (!pll) - pll = dss_pll_find("video0"); + src = dss_get_lcd_clk_source(channel); - r = pll->cinfo.clkout[0]; - break; - case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: - pll = dss_pll_find("dsi1"); - if (!pll) - pll = dss_pll_find("video1"); + if (src == DSS_CLK_SRC_FCK) { + r = dss_get_dispc_clk_rate(); + } else { + struct dss_pll *pll; + unsigned clkout_idx; - r = pll->cinfo.clkout[0]; - break; - default: - BUG(); - return 0; - } + pll = dss_pll_find_by_src(src); + clkout_idx = dss_pll_get_clkout_idx_for_src(src); - return r / lcd; - } else { - return dispc_fclk_rate(); + r = pll->cinfo.clkout[clkout_idx]; } + + lcd = REG_GET(DISPC_DIVISORo(channel), 23, 16); + + return r / lcd; } static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel) @@ -3426,15 +3429,14 @@ static unsigned long dispc_plane_lclk_rate(enum omap_plane plane) static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel) { int lcd, pcd; - enum omap_dss_clk_source lcd_clk_src; + enum dss_clk_source lcd_clk_src; seq_printf(s, "- %s -\n", mgr_desc[channel].name); lcd_clk_src = dss_get_lcd_clk_source(channel); - seq_printf(s, "%s clk source = %s (%s)\n", mgr_desc[channel].name, - dss_get_generic_clk_source_name(lcd_clk_src), - dss_feat_get_clk_source_name(lcd_clk_src)); + seq_printf(s, "%s clk source = %s\n", mgr_desc[channel].name, + dss_get_clk_source_name(lcd_clk_src)); dispc_mgr_get_lcd_divisor(channel, &lcd, &pcd); @@ -3448,16 +3450,15 @@ void dispc_dump_clocks(struct seq_file *s) { int lcd; u32 l; - enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); + enum dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); if (dispc_runtime_get()) return; seq_printf(s, "- DISPC -\n"); - seq_printf(s, "dispc fclk source = %s (%s)\n", - dss_get_generic_clk_source_name(dispc_clk_src), - dss_feat_get_clk_source_name(dispc_clk_src)); + seq_printf(s, "dispc fclk source = %s\n", + dss_get_clk_source_name(dispc_clk_src)); seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate()); @@ -3814,6 +3815,139 @@ void dispc_disable_sidle(void) REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */ } +u32 dispc_mgr_gamma_size(enum omap_channel channel) +{ + const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma; + + if (!dispc.feat->has_gamma_table) + return 0; + + return gdesc->len; +} +EXPORT_SYMBOL(dispc_mgr_gamma_size); + +static void dispc_mgr_write_gamma_table(enum omap_channel channel) +{ + const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma; + u32 *table = dispc.gamma_table[channel]; + unsigned int i; + + DSSDBG("%s: channel %d\n", __func__, channel); + + for (i = 0; i < gdesc->len; ++i) { + u32 v = table[i]; + + if (gdesc->has_index) + v |= i << 24; + else if (i == 0) + v |= 1 << 31; + + dispc_write_reg(gdesc->reg, v); + } +} + +static void dispc_restore_gamma_tables(void) +{ + DSSDBG("%s()\n", __func__); + + if (!dispc.feat->has_gamma_table) + return; + + dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD); + + dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_DIGIT); + + if (dss_has_feature(FEAT_MGR_LCD2)) + dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD2); + + if (dss_has_feature(FEAT_MGR_LCD3)) + dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD3); +} + +static const struct drm_color_lut dispc_mgr_gamma_default_lut[] = { + { .red = 0, .green = 0, .blue = 0, }, + { .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, }, +}; + +void dispc_mgr_set_gamma(enum omap_channel channel, + const struct drm_color_lut *lut, + unsigned int length) +{ + const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma; + u32 *table = dispc.gamma_table[channel]; + uint i; + + DSSDBG("%s: channel %d, lut len %u, hw len %u\n", __func__, + channel, length, gdesc->len); + + if (!dispc.feat->has_gamma_table) + return; + + if (lut == NULL || length < 2) { + lut = dispc_mgr_gamma_default_lut; + length = ARRAY_SIZE(dispc_mgr_gamma_default_lut); + } + + for (i = 0; i < length - 1; ++i) { + uint first = i * (gdesc->len - 1) / (length - 1); + uint last = (i + 1) * (gdesc->len - 1) / (length - 1); + uint w = last - first; + u16 r, g, b; + uint j; + + if (w == 0) + continue; + + for (j = 0; j <= w; j++) { + r = (lut[i].red * (w - j) + lut[i+1].red * j) / w; + g = (lut[i].green * (w - j) + lut[i+1].green * j) / w; + b = (lut[i].blue * (w - j) + lut[i+1].blue * j) / w; + + r >>= 16 - gdesc->bits; + g >>= 16 - gdesc->bits; + b >>= 16 - gdesc->bits; + + table[first + j] = (r << (gdesc->bits * 2)) | + (g << gdesc->bits) | b; + } + } + + if (dispc.is_enabled) + dispc_mgr_write_gamma_table(channel); +} +EXPORT_SYMBOL(dispc_mgr_set_gamma); + +static int dispc_init_gamma_tables(void) +{ + int channel; + + if (!dispc.feat->has_gamma_table) + return 0; + + for (channel = 0; channel < ARRAY_SIZE(dispc.gamma_table); channel++) { + const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma; + u32 *gt; + + if (channel == OMAP_DSS_CHANNEL_LCD2 && + !dss_has_feature(FEAT_MGR_LCD2)) + continue; + + if (channel == OMAP_DSS_CHANNEL_LCD3 && + !dss_has_feature(FEAT_MGR_LCD3)) + continue; + + gt = devm_kmalloc_array(&dispc.pdev->dev, gdesc->len, + sizeof(u32), GFP_KERNEL); + if (!gt) + return -ENOMEM; + + dispc.gamma_table[channel] = gt; + + dispc_mgr_set_gamma(channel, NULL, 0); + } + return 0; +} + static void _omap_dispc_initial_config(void) { u32 l; @@ -3829,8 +3963,15 @@ static void _omap_dispc_initial_config(void) dispc.core_clk_rate = dispc_fclk_rate(); } - /* FUNCGATED */ - if (dss_has_feature(FEAT_FUNCGATED)) + /* Use gamma table mode, instead of palette mode */ + if (dispc.feat->has_gamma_table) + REG_FLD_MOD(DISPC_CONFIG, 1, 3, 3); + + /* For older DSS versions (FEAT_FUNCGATED) this enables + * func-clock auto-gating. For newer versions + * (dispc.feat->has_gamma_table) this enables tv-out gamma tables. + */ + if (dss_has_feature(FEAT_FUNCGATED) || dispc.feat->has_gamma_table) REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9); dispc_setup_color_conv_coef(); @@ -3934,6 +4075,8 @@ static const struct dispc_features omap44xx_dispc_feats = { .has_writeback = true, .supports_double_pixel = true, .reverse_ilace_field_order = true, + .has_gamma_table = true, + .has_gamma_i734_bug = true, }; static const struct dispc_features omap54xx_dispc_feats = { @@ -3959,6 +4102,8 @@ static const struct dispc_features omap54xx_dispc_feats = { .has_writeback = true, .supports_double_pixel = true, .reverse_ilace_field_order = true, + .has_gamma_table = true, + .has_gamma_i734_bug = true, }; static int dispc_init_features(struct platform_device *pdev) @@ -4050,6 +4195,168 @@ void dispc_free_irq(void *dev_id) } EXPORT_SYMBOL(dispc_free_irq); +/* + * Workaround for errata i734 in DSS dispc + * - LCD1 Gamma Correction Is Not Working When GFX Pipe Is Disabled + * + * For gamma tables to work on LCD1 the GFX plane has to be used at + * least once after DSS HW has come out of reset. The workaround + * sets up a minimal LCD setup with GFX plane and waits for one + * vertical sync irq before disabling the setup and continuing with + * the context restore. The physical outputs are gated during the + * operation. This workaround requires that gamma table's LOADMODE + * is set to 0x2 in DISPC_CONTROL1 register. + * + * For details see: + * OMAP543x Multimedia Device Silicon Revision 2.0 Silicon Errata + * Literature Number: SWPZ037E + * Or some other relevant errata document for the DSS IP version. + */ + +static const struct dispc_errata_i734_data { + struct omap_video_timings timings; + struct omap_overlay_info ovli; + struct omap_overlay_manager_info mgri; + struct dss_lcd_mgr_config lcd_conf; +} i734 = { + .timings = { + .x_res = 8, .y_res = 1, + .pixelclock = 16000000, + .hsw = 8, .hfp = 4, .hbp = 4, + .vsw = 1, .vfp = 1, .vbp = 1, + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .interlace = false, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .double_pixel = false, + }, + .ovli = { + .screen_width = 1, + .width = 1, .height = 1, + .color_mode = OMAP_DSS_COLOR_RGB24U, + .rotation = OMAP_DSS_ROT_0, + .rotation_type = OMAP_DSS_ROT_DMA, + .mirror = 0, + .pos_x = 0, .pos_y = 0, + .out_width = 0, .out_height = 0, + .global_alpha = 0xff, + .pre_mult_alpha = 0, + .zorder = 0, + }, + .mgri = { + .default_color = 0, + .trans_enabled = false, + .partial_alpha_enabled = false, + .cpr_enable = false, + }, + .lcd_conf = { + .io_pad_mode = DSS_IO_PAD_MODE_BYPASS, + .stallmode = false, + .fifohandcheck = false, + .clock_info = { + .lck_div = 1, + .pck_div = 2, + }, + .video_port_width = 24, + .lcden_sig_polarity = 0, + }, +}; + +static struct i734_buf { + size_t size; + dma_addr_t paddr; + void *vaddr; +} i734_buf; + +static int dispc_errata_i734_wa_init(void) +{ + if (!dispc.feat->has_gamma_i734_bug) + return 0; + + i734_buf.size = i734.ovli.width * i734.ovli.height * + color_mode_to_bpp(i734.ovli.color_mode) / 8; + + i734_buf.vaddr = dma_alloc_writecombine(&dispc.pdev->dev, i734_buf.size, + &i734_buf.paddr, GFP_KERNEL); + if (!i734_buf.vaddr) { + dev_err(&dispc.pdev->dev, "%s: dma_alloc_writecombine failed", + __func__); + return -ENOMEM; + } + + return 0; +} + +static void dispc_errata_i734_wa_fini(void) +{ + if (!dispc.feat->has_gamma_i734_bug) + return; + + dma_free_writecombine(&dispc.pdev->dev, i734_buf.size, i734_buf.vaddr, + i734_buf.paddr); +} + +static void dispc_errata_i734_wa(void) +{ + u32 framedone_irq = dispc_mgr_get_framedone_irq(OMAP_DSS_CHANNEL_LCD); + struct omap_overlay_info ovli; + struct dss_lcd_mgr_config lcd_conf; + u32 gatestate; + unsigned int count; + + if (!dispc.feat->has_gamma_i734_bug) + return; + + gatestate = REG_GET(DISPC_CONFIG, 8, 4); + + ovli = i734.ovli; + ovli.paddr = i734_buf.paddr; + lcd_conf = i734.lcd_conf; + + /* Gate all LCD1 outputs */ + REG_FLD_MOD(DISPC_CONFIG, 0x1f, 8, 4); + + /* Setup and enable GFX plane */ + dispc_ovl_set_channel_out(OMAP_DSS_GFX, OMAP_DSS_CHANNEL_LCD); + dispc_ovl_setup(OMAP_DSS_GFX, &ovli, false, &i734.timings, false); + dispc_ovl_enable(OMAP_DSS_GFX, true); + + /* Set up and enable display manager for LCD1 */ + dispc_mgr_setup(OMAP_DSS_CHANNEL_LCD, &i734.mgri); + dispc_calc_clock_rates(dss_get_dispc_clk_rate(), + &lcd_conf.clock_info); + dispc_mgr_set_lcd_config(OMAP_DSS_CHANNEL_LCD, &lcd_conf); + dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.timings); + + dispc_clear_irqstatus(framedone_irq); + + /* Enable and shut the channel to produce just one frame */ + dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, true); + dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, false); + + /* Busy wait for framedone. We can't fiddle with irq handlers + * in PM resume. Typically the loop runs less than 5 times and + * waits less than a micro second. + */ + count = 0; + while (!(dispc_read_irqstatus() & framedone_irq)) { + if (count++ > 10000) { + dev_err(&dispc.pdev->dev, "%s: framedone timeout\n", + __func__); + break; + } + } + dispc_ovl_enable(OMAP_DSS_GFX, false); + + /* Clear all irq bits before continuing */ + dispc_clear_irqstatus(0xffffffff); + + /* Restore the original state to LCD1 output gates */ + REG_FLD_MOD(DISPC_CONFIG, gatestate, 8, 4); +} + /* DISPC HW IP initialisation */ static int dispc_bind(struct device *dev, struct device *master, void *data) { @@ -4067,6 +4374,10 @@ static int dispc_bind(struct device *dev, struct device *master, void *data) if (r) return r; + r = dispc_errata_i734_wa_init(); + if (r) + return r; + dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0); if (!dispc_mem) { DSSERR("can't get IORESOURCE_MEM DISPC\n"); @@ -4100,6 +4411,10 @@ static int dispc_bind(struct device *dev, struct device *master, void *data) } } + r = dispc_init_gamma_tables(); + if (r) + return r; + pm_runtime_enable(&pdev->dev); r = dispc_runtime_get(); @@ -4127,6 +4442,8 @@ static void dispc_unbind(struct device *dev, struct device *master, void *data) { pm_runtime_disable(dev); + + dispc_errata_i734_wa_fini(); } static const struct component_ops dispc_component_ops = { @@ -4169,7 +4486,11 @@ static int dispc_runtime_resume(struct device *dev) if (REG_GET(DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) { _omap_dispc_initial_config(); + dispc_errata_i734_wa(); + dispc_restore_context(); + + dispc_restore_gamma_tables(); } dispc.is_enabled = true; diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.h b/drivers/gpu/drm/omapdrm/dss/dispc.h index 483744223dd1..bc1d8126ee87 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.h +++ b/drivers/gpu/drm/omapdrm/dss/dispc.h @@ -42,6 +42,11 @@ #define DISPC_MSTANDBY_CTRL 0x0858 #define DISPC_GLOBAL_MFLAG_ATTRIBUTE 0x085C +#define DISPC_GAMMA_TABLE0 0x0630 +#define DISPC_GAMMA_TABLE1 0x0634 +#define DISPC_GAMMA_TABLE2 0x0638 +#define DISPC_GAMMA_TABLE3 0x0850 + /* DISPC overlay registers */ #define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \ DISPC_BA0_OFFSET(n)) diff --git a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c index 038c15b04215..34fad2376f8d 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c @@ -18,8 +18,8 @@ */ #include <linux/kernel.h> -#include <video/omapdss.h> +#include "omapdss.h" #include "dispc.h" static const struct dispc_coef coef3_M8[8] = { diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c index 9f3dd09b0a6c..8dcdd7cf9937 100644 --- a/drivers/gpu/drm/omapdrm/dss/display.c +++ b/drivers/gpu/drm/omapdrm/dss/display.c @@ -28,7 +28,7 @@ #include <linux/platform_device.h> #include <linux/of.h> -#include <video/omapdss.h> +#include "omapdss.h" #include "dss.h" #include "dss_features.h" diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 97ea60257884..b268295b76cf 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -34,17 +34,15 @@ #include <linux/clk.h> #include <linux/component.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" -#define HSDIV_DISPC 0 - struct dpi_data { struct platform_device *pdev; struct regulator *vdds_dsi_reg; + enum dss_clk_source clk_src; struct dss_pll *pll; struct mutex lock; @@ -69,7 +67,7 @@ static struct dpi_data *dpi_get_data_from_pdev(struct platform_device *pdev) return dev_get_drvdata(&pdev->dev); } -static struct dss_pll *dpi_get_pll(enum omap_channel channel) +static enum dss_clk_source dpi_get_clk_src(enum omap_channel channel) { /* * XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL @@ -83,64 +81,51 @@ static struct dss_pll *dpi_get_pll(enum omap_channel channel) case OMAPDSS_VER_OMAP3630: case OMAPDSS_VER_AM35xx: case OMAPDSS_VER_AM43xx: - return NULL; + return DSS_CLK_SRC_FCK; case OMAPDSS_VER_OMAP4430_ES1: case OMAPDSS_VER_OMAP4430_ES2: case OMAPDSS_VER_OMAP4: switch (channel) { case OMAP_DSS_CHANNEL_LCD: - return dss_pll_find("dsi0"); + return DSS_CLK_SRC_PLL1_1; case OMAP_DSS_CHANNEL_LCD2: - return dss_pll_find("dsi1"); + return DSS_CLK_SRC_PLL2_1; default: - return NULL; + return DSS_CLK_SRC_FCK; } case OMAPDSS_VER_OMAP5: switch (channel) { case OMAP_DSS_CHANNEL_LCD: - return dss_pll_find("dsi0"); + return DSS_CLK_SRC_PLL1_1; case OMAP_DSS_CHANNEL_LCD3: - return dss_pll_find("dsi1"); + return DSS_CLK_SRC_PLL2_1; + case OMAP_DSS_CHANNEL_LCD2: default: - return NULL; + return DSS_CLK_SRC_FCK; } case OMAPDSS_VER_DRA7xx: switch (channel) { case OMAP_DSS_CHANNEL_LCD: + return DSS_CLK_SRC_PLL1_1; case OMAP_DSS_CHANNEL_LCD2: - return dss_pll_find("video0"); + return DSS_CLK_SRC_PLL1_3; case OMAP_DSS_CHANNEL_LCD3: - return dss_pll_find("video1"); + return DSS_CLK_SRC_PLL2_1; default: - return NULL; + return DSS_CLK_SRC_FCK; } default: - return NULL; - } -} - -static enum omap_dss_clk_source dpi_get_alt_clk_src(enum omap_channel channel) -{ - switch (channel) { - case OMAP_DSS_CHANNEL_LCD: - return OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC; - case OMAP_DSS_CHANNEL_LCD2: - return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC; - case OMAP_DSS_CHANNEL_LCD3: - return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC; - default: - /* this shouldn't happen */ - WARN_ON(1); - return OMAP_DSS_CLK_SRC_FCK; + return DSS_CLK_SRC_FCK; } } struct dpi_clk_calc_ctx { struct dss_pll *pll; + unsigned clkout_idx; /* inputs */ @@ -148,7 +133,7 @@ struct dpi_clk_calc_ctx { /* outputs */ - struct dss_pll_clock_info dsi_cinfo; + struct dss_pll_clock_info pll_cinfo; unsigned long fck; struct dispc_clock_info dispc_cinfo; }; @@ -193,8 +178,8 @@ static bool dpi_calc_hsdiv_cb(int m_dispc, unsigned long dispc, if (m_dispc > 1 && m_dispc % 2 != 0 && ctx->pck_min >= 100000000) return false; - ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc; - ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc; + ctx->pll_cinfo.mX[ctx->clkout_idx] = m_dispc; + ctx->pll_cinfo.clkout[ctx->clkout_idx] = dispc; return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max, dpi_calc_dispc_cb, ctx); @@ -207,12 +192,12 @@ static bool dpi_calc_pll_cb(int n, int m, unsigned long fint, { struct dpi_clk_calc_ctx *ctx = data; - ctx->dsi_cinfo.n = n; - ctx->dsi_cinfo.m = m; - ctx->dsi_cinfo.fint = fint; - ctx->dsi_cinfo.clkdco = clkdco; + ctx->pll_cinfo.n = n; + ctx->pll_cinfo.m = m; + ctx->pll_cinfo.fint = fint; + ctx->pll_cinfo.clkdco = clkdco; - return dss_pll_hsdiv_calc(ctx->pll, clkdco, + return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), dpi_calc_hsdiv_cb, ctx); } @@ -227,25 +212,39 @@ static bool dpi_calc_dss_cb(unsigned long fck, void *data) dpi_calc_dispc_cb, ctx); } -static bool dpi_dsi_clk_calc(struct dpi_data *dpi, unsigned long pck, +static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck, struct dpi_clk_calc_ctx *ctx) { unsigned long clkin; - unsigned long pll_min, pll_max; memset(ctx, 0, sizeof(*ctx)); ctx->pll = dpi->pll; - ctx->pck_min = pck - 1000; - ctx->pck_max = pck + 1000; + ctx->clkout_idx = dss_pll_get_clkout_idx_for_src(dpi->clk_src); - pll_min = 0; - pll_max = 0; + clkin = clk_get_rate(dpi->pll->clkin); - clkin = clk_get_rate(ctx->pll->clkin); + if (dpi->pll->hw->type == DSS_PLL_TYPE_A) { + unsigned long pll_min, pll_max; - return dss_pll_calc(ctx->pll, clkin, - pll_min, pll_max, - dpi_calc_pll_cb, ctx); + ctx->pck_min = pck - 1000; + ctx->pck_max = pck + 1000; + + pll_min = 0; + pll_max = 0; + + return dss_pll_calc_a(ctx->pll, clkin, + pll_min, pll_max, + dpi_calc_pll_cb, ctx); + } else { /* DSS_PLL_TYPE_B */ + dss_pll_calc_b(dpi->pll, clkin, pck, &ctx->pll_cinfo); + + ctx->dispc_cinfo.lck_div = 1; + ctx->dispc_cinfo.pck_div = 1; + ctx->dispc_cinfo.lck = ctx->pll_cinfo.clkout[0]; + ctx->dispc_cinfo.pck = ctx->dispc_cinfo.lck; + + return true; + } } static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx) @@ -279,7 +278,7 @@ static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx) -static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel, +static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel, unsigned long pck_req, unsigned long *fck, int *lck_div, int *pck_div) { @@ -287,20 +286,19 @@ static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel, int r; bool ok; - ok = dpi_dsi_clk_calc(dpi, pck_req, &ctx); + ok = dpi_pll_clk_calc(dpi, pck_req, &ctx); if (!ok) return -EINVAL; - r = dss_pll_set_config(dpi->pll, &ctx.dsi_cinfo); + r = dss_pll_set_config(dpi->pll, &ctx.pll_cinfo); if (r) return r; - dss_select_lcd_clk_source(channel, - dpi_get_alt_clk_src(channel)); + dss_select_lcd_clk_source(channel, dpi->clk_src); dpi->mgr_config.clock_info = ctx.dispc_cinfo; - *fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC]; + *fck = ctx.pll_cinfo.clkout[ctx.clkout_idx]; *lck_div = ctx.dispc_cinfo.lck_div; *pck_div = ctx.dispc_cinfo.pck_div; @@ -342,7 +340,7 @@ static int dpi_set_mode(struct dpi_data *dpi) int r = 0; if (dpi->pll) - r = dpi_set_dsi_clk(dpi, channel, t->pixelclock, &fck, + r = dpi_set_pll_clk(dpi, channel, t->pixelclock, &fck, &lck_div, &pck_div); else r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck, @@ -419,7 +417,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev) if (dpi->pll) { r = dss_pll_enable(dpi->pll); if (r) - goto err_dsi_pll_init; + goto err_pll_init; } r = dpi_set_mode(dpi); @@ -442,7 +440,7 @@ err_mgr_enable: err_set_mode: if (dpi->pll) dss_pll_disable(dpi->pll); -err_dsi_pll_init: +err_pll_init: err_src_sel: dispc_runtime_put(); err_get_dispc: @@ -465,7 +463,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev) dss_mgr_disable(channel); if (dpi->pll) { - dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK); + dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK); dss_pll_disable(dpi->pll); } @@ -524,11 +522,11 @@ static int dpi_check_timings(struct omap_dss_device *dssdev, return -EINVAL; if (dpi->pll) { - ok = dpi_dsi_clk_calc(dpi, timings->pixelclock, &ctx); + ok = dpi_pll_clk_calc(dpi, timings->pixelclock, &ctx); if (!ok) return -EINVAL; - fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC]; + fck = ctx.pll_cinfo.clkout[ctx.clkout_idx]; } else { ok = dpi_dss_clk_calc(timings->pixelclock, &ctx); if (!ok) @@ -558,7 +556,7 @@ static void dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines) mutex_unlock(&dpi->lock); } -static int dpi_verify_dsi_pll(struct dss_pll *pll) +static int dpi_verify_pll(struct dss_pll *pll) { int r; @@ -602,16 +600,14 @@ static void dpi_init_pll(struct dpi_data *dpi) if (dpi->pll) return; - pll = dpi_get_pll(dpi->output.dispc_channel); + dpi->clk_src = dpi_get_clk_src(dpi->output.dispc_channel); + + pll = dss_pll_find_by_src(dpi->clk_src); if (!pll) return; - /* On DRA7 we need to set a mux to use the PLL */ - if (omapdss_get_version() == OMAPDSS_VER_DRA7xx) - dss_ctrl_pll_set_control_mux(pll->id, dpi->output.dispc_channel); - - if (dpi_verify_dsi_pll(pll)) { - DSSWARN("DSI PLL not operational\n"); + if (dpi_verify_pll(pll)) { + DSSWARN("PLL not operational\n"); return; } diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 9ed8272e54ae..6f45e9d00b41 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -42,9 +42,9 @@ #include <linux/of_platform.h> #include <linux/component.h> -#include <video/omapdss.h> #include <video/mipi_display.h> +#include "omapdss.h" #include "dss.h" #include "dss_features.h" @@ -1262,7 +1262,7 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev) unsigned long r; struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); - if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) { + if (dss_get_dsi_clk_source(dsi->module_id) == DSS_CLK_SRC_FCK) { /* DSI FCLK source is DSS_CLK_FCK */ r = clk_get_rate(dsi->dss_clk); } else { @@ -1475,7 +1475,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev, { struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; - enum omap_dss_clk_source dispc_clk_src, dsi_clk_src; + enum dss_clk_source dispc_clk_src, dsi_clk_src; int dsi_module = dsi->module_id; struct dss_pll *pll = &dsi->pll; @@ -1495,28 +1495,27 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev, cinfo->clkdco, cinfo->m); seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n", - dss_feat_get_clk_source_name(dsi_module == 0 ? - OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC : - OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC), + dss_get_clk_source_name(dsi_module == 0 ? + DSS_CLK_SRC_PLL1_1 : + DSS_CLK_SRC_PLL2_1), cinfo->clkout[HSDIV_DISPC], cinfo->mX[HSDIV_DISPC], - dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ? + dispc_clk_src == DSS_CLK_SRC_FCK ? "off" : "on"); seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n", - dss_feat_get_clk_source_name(dsi_module == 0 ? - OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI : - OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI), + dss_get_clk_source_name(dsi_module == 0 ? + DSS_CLK_SRC_PLL1_2 : + DSS_CLK_SRC_PLL2_2), cinfo->clkout[HSDIV_DSI], cinfo->mX[HSDIV_DSI], - dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ? + dsi_clk_src == DSS_CLK_SRC_FCK ? "off" : "on"); seq_printf(s, "- DSI%d -\n", dsi_module + 1); - seq_printf(s, "dsi fclk source = %s (%s)\n", - dss_get_generic_clk_source_name(dsi_clk_src), - dss_feat_get_clk_source_name(dsi_clk_src)); + seq_printf(s, "dsi fclk source = %s\n", + dss_get_clk_source_name(dsi_clk_src)); seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev)); @@ -4102,8 +4101,8 @@ static int dsi_display_init_dispc(struct platform_device *dsidev, int r; dss_select_lcd_clk_source(channel, dsi->module_id == 0 ? - OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC : - OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC); + DSS_CLK_SRC_PLL1_1 : + DSS_CLK_SRC_PLL2_1); if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) { r = dss_mgr_register_framedone_handler(channel, @@ -4150,7 +4149,7 @@ err1: dss_mgr_unregister_framedone_handler(channel, dsi_framedone_irq_callback, dsidev); err: - dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK); + dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK); return r; } @@ -4163,7 +4162,7 @@ static void dsi_display_uninit_dispc(struct platform_device *dsidev, dss_mgr_unregister_framedone_handler(channel, dsi_framedone_irq_callback, dsidev); - dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK); + dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK); } static int dsi_configure_dsi_clocks(struct platform_device *dsidev) @@ -4197,8 +4196,8 @@ static int dsi_display_init_dsi(struct platform_device *dsidev) goto err1; dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ? - OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI : - OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI); + DSS_CLK_SRC_PLL1_2 : + DSS_CLK_SRC_PLL2_2); DSSDBG("PLL OK\n"); @@ -4230,7 +4229,7 @@ static int dsi_display_init_dsi(struct platform_device *dsidev) err3: dsi_cio_uninit(dsidev); err2: - dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); + dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK); err1: dss_pll_disable(&dsi->pll); err0: @@ -4252,7 +4251,7 @@ static void dsi_display_uninit_dsi(struct platform_device *dsidev, dsi_vc_enable(dsidev, 2, 0); dsi_vc_enable(dsidev, 3, 0); - dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); + dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK); dsi_cio_uninit(dsidev); dsi_pll_uninit(dsidev, disconnect_lanes); } @@ -4453,7 +4452,7 @@ static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint, ctx->dsi_cinfo.fint = fint; ctx->dsi_cinfo.clkdco = clkdco; - return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min, + return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), dsi_cm_calc_hsdiv_cb, ctx); } @@ -4492,7 +4491,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi, pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4); pll_max = cfg->hs_clk_max * 4; - return dss_pll_calc(ctx->pll, clkin, + return dss_pll_calc_a(ctx->pll, clkin, pll_min, pll_max, dsi_cm_calc_pll_cb, ctx); } @@ -4751,7 +4750,7 @@ static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint, ctx->dsi_cinfo.fint = fint; ctx->dsi_cinfo.clkdco = clkdco; - return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min, + return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), dsi_vm_calc_hsdiv_cb, ctx); } @@ -4793,7 +4792,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi, pll_max = byteclk_max * 4 * 4; } - return dss_pll_calc(ctx->pll, clkin, + return dss_pll_calc_a(ctx->pll, clkin, pll_min, pll_max, dsi_vm_calc_pll_cb, ctx); } @@ -5139,6 +5138,8 @@ static const struct dss_pll_ops dsi_pll_ops = { }; static const struct dss_pll_hw dss_omap3_dsi_pll_hw = { + .type = DSS_PLL_TYPE_A, + .n_max = (1 << 7) - 1, .m_max = (1 << 11) - 1, .mX_max = (1 << 4) - 1, @@ -5164,6 +5165,8 @@ static const struct dss_pll_hw dss_omap3_dsi_pll_hw = { }; static const struct dss_pll_hw dss_omap4_dsi_pll_hw = { + .type = DSS_PLL_TYPE_A, + .n_max = (1 << 8) - 1, .m_max = (1 << 12) - 1, .mX_max = (1 << 5) - 1, @@ -5189,6 +5192,8 @@ static const struct dss_pll_hw dss_omap4_dsi_pll_hw = { }; static const struct dss_pll_hw dss_omap5_dsi_pll_hw = { + .type = DSS_PLL_TYPE_A, + .n_max = (1 << 8) - 1, .m_max = (1 << 12) - 1, .mX_max = (1 << 5) - 1, diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c index bf407b6ba15c..dfd4e9621e3b 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss-of.c +++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c @@ -18,8 +18,7 @@ #include <linux/of.h> #include <linux/seq_file.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" struct device_node * diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 3303cfad4838..14887d5b02e5 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -42,8 +42,7 @@ #include <linux/suspend.h> #include <linux/component.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" @@ -76,6 +75,8 @@ struct dss_features { const enum omap_display_type *ports; int num_ports; int (*dpi_select_source)(int port, enum omap_channel channel); + int (*select_lcd_source)(enum omap_channel channel, + enum dss_clk_source clk_src); }; static struct { @@ -92,9 +93,9 @@ static struct { unsigned long cache_prate; struct dispc_clock_info cache_dispc_cinfo; - enum omap_dss_clk_source dsi_clk_source[MAX_NUM_DSI]; - enum omap_dss_clk_source dispc_clk_source; - enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS]; + enum dss_clk_source dsi_clk_source[MAX_NUM_DSI]; + enum dss_clk_source dispc_clk_source; + enum dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS]; bool ctx_valid; u32 ctx[DSS_SZ_REGS / sizeof(u32)]; @@ -106,11 +107,14 @@ static struct { } dss; static const char * const dss_generic_clk_source_names[] = { - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC", - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI", - [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK", - [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DSI_PLL2_HSDIV_DISPC", - [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DSI_PLL2_HSDIV_DSI", + [DSS_CLK_SRC_FCK] = "FCK", + [DSS_CLK_SRC_PLL1_1] = "PLL1:1", + [DSS_CLK_SRC_PLL1_2] = "PLL1:2", + [DSS_CLK_SRC_PLL1_3] = "PLL1:3", + [DSS_CLK_SRC_PLL2_1] = "PLL2:1", + [DSS_CLK_SRC_PLL2_2] = "PLL2:2", + [DSS_CLK_SRC_PLL2_3] = "PLL2:3", + [DSS_CLK_SRC_HDMI_PLL] = "HDMI PLL", }; static bool dss_initialized; @@ -203,68 +207,70 @@ void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable) 1 << shift, val << shift); } -void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id, +static int dss_ctrl_pll_set_control_mux(enum dss_clk_source clk_src, enum omap_channel channel) { unsigned shift, val; if (!dss.syscon_pll_ctrl) - return; + return -EINVAL; switch (channel) { case OMAP_DSS_CHANNEL_LCD: shift = 3; - switch (pll_id) { - case DSS_PLL_VIDEO1: + switch (clk_src) { + case DSS_CLK_SRC_PLL1_1: val = 0; break; - case DSS_PLL_HDMI: + case DSS_CLK_SRC_HDMI_PLL: val = 1; break; default: DSSERR("error in PLL mux config for LCD\n"); - return; + return -EINVAL; } break; case OMAP_DSS_CHANNEL_LCD2: shift = 5; - switch (pll_id) { - case DSS_PLL_VIDEO1: + switch (clk_src) { + case DSS_CLK_SRC_PLL1_3: val = 0; break; - case DSS_PLL_VIDEO2: + case DSS_CLK_SRC_PLL2_3: val = 1; break; - case DSS_PLL_HDMI: + case DSS_CLK_SRC_HDMI_PLL: val = 2; break; default: DSSERR("error in PLL mux config for LCD2\n"); - return; + return -EINVAL; } break; case OMAP_DSS_CHANNEL_LCD3: shift = 7; - switch (pll_id) { - case DSS_PLL_VIDEO1: - val = 1; break; - case DSS_PLL_VIDEO2: + switch (clk_src) { + case DSS_CLK_SRC_PLL2_1: val = 0; break; - case DSS_PLL_HDMI: + case DSS_CLK_SRC_PLL1_3: + val = 1; break; + case DSS_CLK_SRC_HDMI_PLL: val = 2; break; default: DSSERR("error in PLL mux config for LCD3\n"); - return; + return -EINVAL; } break; default: DSSERR("error in PLL mux config\n"); - return; + return -EINVAL; } regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset, 0x3 << shift, val << shift); + + return 0; } void dss_sdi_init(int datapairs) @@ -354,14 +360,14 @@ void dss_sdi_disable(void) REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */ } -const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src) +const char *dss_get_clk_source_name(enum dss_clk_source clk_src) { return dss_generic_clk_source_names[clk_src]; } void dss_dump_clocks(struct seq_file *s) { - const char *fclk_name, *fclk_real_name; + const char *fclk_name; unsigned long fclk_rate; if (dss_runtime_get()) @@ -369,12 +375,11 @@ void dss_dump_clocks(struct seq_file *s) seq_printf(s, "- DSS -\n"); - fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK); - fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK); + fclk_name = dss_get_clk_source_name(DSS_CLK_SRC_FCK); fclk_rate = clk_get_rate(dss.dss_clk); - seq_printf(s, "%s (%s) = %lu\n", - fclk_name, fclk_real_name, + seq_printf(s, "%s = %lu\n", + fclk_name, fclk_rate); dss_runtime_put(); @@ -403,19 +408,42 @@ static void dss_dump_regs(struct seq_file *s) #undef DUMPREG } -static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src) +static int dss_get_channel_index(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0; + case OMAP_DSS_CHANNEL_LCD2: + return 1; + case OMAP_DSS_CHANNEL_LCD3: + return 2; + default: + WARN_ON(1); + return 0; + } +} + +static void dss_select_dispc_clk_source(enum dss_clk_source clk_src) { int b; u8 start, end; + /* + * We always use PRCM clock as the DISPC func clock, except on DSS3, + * where we don't have separate DISPC and LCD clock sources. + */ + if (WARN_ON(dss_has_feature(FEAT_LCD_CLK_SRC) && + clk_src != DSS_CLK_SRC_FCK)) + return; + switch (clk_src) { - case OMAP_DSS_CLK_SRC_FCK: + case DSS_CLK_SRC_FCK: b = 0; break; - case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: + case DSS_CLK_SRC_PLL1_1: b = 1; break; - case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: + case DSS_CLK_SRC_PLL2_1: b = 2; break; default: @@ -431,19 +459,19 @@ static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src) } void dss_select_dsi_clk_source(int dsi_module, - enum omap_dss_clk_source clk_src) + enum dss_clk_source clk_src) { int b, pos; switch (clk_src) { - case OMAP_DSS_CLK_SRC_FCK: + case DSS_CLK_SRC_FCK: b = 0; break; - case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI: + case DSS_CLK_SRC_PLL1_2: BUG_ON(dsi_module != 0); b = 1; break; - case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI: + case DSS_CLK_SRC_PLL2_2: BUG_ON(dsi_module != 1); b = 1; break; @@ -458,59 +486,125 @@ void dss_select_dsi_clk_source(int dsi_module, dss.dsi_clk_source[dsi_module] = clk_src; } +static int dss_lcd_clk_mux_dra7(enum omap_channel channel, + enum dss_clk_source clk_src) +{ + const u8 ctrl_bits[] = { + [OMAP_DSS_CHANNEL_LCD] = 0, + [OMAP_DSS_CHANNEL_LCD2] = 12, + [OMAP_DSS_CHANNEL_LCD3] = 19, + }; + + u8 ctrl_bit = ctrl_bits[channel]; + int r; + + if (clk_src == DSS_CLK_SRC_FCK) { + /* LCDx_CLK_SWITCH */ + REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit); + return -EINVAL; + } + + r = dss_ctrl_pll_set_control_mux(clk_src, channel); + if (r) + return r; + + REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit); + + return 0; +} + +static int dss_lcd_clk_mux_omap5(enum omap_channel channel, + enum dss_clk_source clk_src) +{ + const u8 ctrl_bits[] = { + [OMAP_DSS_CHANNEL_LCD] = 0, + [OMAP_DSS_CHANNEL_LCD2] = 12, + [OMAP_DSS_CHANNEL_LCD3] = 19, + }; + const enum dss_clk_source allowed_plls[] = { + [OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1, + [OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_FCK, + [OMAP_DSS_CHANNEL_LCD3] = DSS_CLK_SRC_PLL2_1, + }; + + u8 ctrl_bit = ctrl_bits[channel]; + + if (clk_src == DSS_CLK_SRC_FCK) { + /* LCDx_CLK_SWITCH */ + REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit); + return -EINVAL; + } + + if (WARN_ON(allowed_plls[channel] != clk_src)) + return -EINVAL; + + REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit); + + return 0; +} + +static int dss_lcd_clk_mux_omap4(enum omap_channel channel, + enum dss_clk_source clk_src) +{ + const u8 ctrl_bits[] = { + [OMAP_DSS_CHANNEL_LCD] = 0, + [OMAP_DSS_CHANNEL_LCD2] = 12, + }; + const enum dss_clk_source allowed_plls[] = { + [OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1, + [OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_PLL2_1, + }; + + u8 ctrl_bit = ctrl_bits[channel]; + + if (clk_src == DSS_CLK_SRC_FCK) { + /* LCDx_CLK_SWITCH */ + REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit); + return 0; + } + + if (WARN_ON(allowed_plls[channel] != clk_src)) + return -EINVAL; + + REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit); + + return 0; +} + void dss_select_lcd_clk_source(enum omap_channel channel, - enum omap_dss_clk_source clk_src) + enum dss_clk_source clk_src) { - int b, ix, pos; + int idx = dss_get_channel_index(channel); + int r; if (!dss_has_feature(FEAT_LCD_CLK_SRC)) { dss_select_dispc_clk_source(clk_src); + dss.lcd_clk_source[idx] = clk_src; return; } - switch (clk_src) { - case OMAP_DSS_CLK_SRC_FCK: - b = 0; - break; - case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: - BUG_ON(channel != OMAP_DSS_CHANNEL_LCD); - b = 1; - break; - case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: - BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2 && - channel != OMAP_DSS_CHANNEL_LCD3); - b = 1; - break; - default: - BUG(); + r = dss.feat->select_lcd_source(channel, clk_src); + if (r) return; - } - - pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : - (channel == OMAP_DSS_CHANNEL_LCD2 ? 12 : 19); - REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* LCDx_CLK_SWITCH */ - ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : - (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2); - dss.lcd_clk_source[ix] = clk_src; + dss.lcd_clk_source[idx] = clk_src; } -enum omap_dss_clk_source dss_get_dispc_clk_source(void) +enum dss_clk_source dss_get_dispc_clk_source(void) { return dss.dispc_clk_source; } -enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module) +enum dss_clk_source dss_get_dsi_clk_source(int dsi_module) { return dss.dsi_clk_source[dsi_module]; } -enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel) +enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel) { if (dss_has_feature(FEAT_LCD_CLK_SRC)) { - int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : - (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2); - return dss.lcd_clk_source[ix]; + int idx = dss_get_channel_index(channel); + return dss.lcd_clk_source[idx]; } else { /* LCD_CLK source is the same as DISPC_FCLK source for * OMAP2 and OMAP3 */ @@ -859,6 +953,7 @@ static const struct dss_features omap44xx_dss_feats = { .dpi_select_source = &dss_dpi_select_source_omap4, .ports = omap2plus_ports, .num_ports = ARRAY_SIZE(omap2plus_ports), + .select_lcd_source = &dss_lcd_clk_mux_omap4, }; static const struct dss_features omap54xx_dss_feats = { @@ -868,6 +963,7 @@ static const struct dss_features omap54xx_dss_feats = { .dpi_select_source = &dss_dpi_select_source_omap5, .ports = omap2plus_ports, .num_ports = ARRAY_SIZE(omap2plus_ports), + .select_lcd_source = &dss_lcd_clk_mux_omap5, }; static const struct dss_features am43xx_dss_feats = { @@ -886,6 +982,7 @@ static const struct dss_features dra7xx_dss_feats = { .dpi_select_source = &dss_dpi_select_source_dra7xx, .ports = dra7xx_ports, .num_ports = ARRAY_SIZE(dra7xx_ports), + .select_lcd_source = &dss_lcd_clk_mux_dra7, }; static int dss_init_features(struct platform_device *pdev) @@ -1143,18 +1240,18 @@ static int dss_bind(struct device *dev) /* Select DPLL */ REG_FLD_MOD(DSS_CONTROL, 0, 0, 0); - dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); + dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); #ifdef CONFIG_OMAP2_DSS_VENC REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */ REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */ REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */ #endif - dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; - dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; - dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK; - dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; - dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; + dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK; + dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK; + dss.dispc_clk_source = DSS_CLK_SRC_FCK; + dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK; + dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK; rev = dss_read_reg(DSS_REVISION); printk(KERN_INFO "OMAP DSS rev %d.%d\n", diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h index 38e6ab50142d..4fd06dc41cb3 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.h +++ b/drivers/gpu/drm/omapdrm/dss/dss.h @@ -102,6 +102,20 @@ enum dss_writeback_channel { DSS_WB_LCD3_MGR = 7, }; +enum dss_clk_source { + DSS_CLK_SRC_FCK = 0, + + DSS_CLK_SRC_PLL1_1, + DSS_CLK_SRC_PLL1_2, + DSS_CLK_SRC_PLL1_3, + + DSS_CLK_SRC_PLL2_1, + DSS_CLK_SRC_PLL2_2, + DSS_CLK_SRC_PLL2_3, + + DSS_CLK_SRC_HDMI_PLL, +}; + enum dss_pll_id { DSS_PLL_DSI1, DSS_PLL_DSI2, @@ -114,6 +128,11 @@ struct dss_pll; #define DSS_PLL_MAX_HSDIVS 4 +enum dss_pll_type { + DSS_PLL_TYPE_A, + DSS_PLL_TYPE_B, +}; + /* * Type-A PLLs: clkout[]/mX[] refer to hsdiv outputs m4, m5, m6, m7. * Type-B PLLs: clkout[0] refers to m2. @@ -140,6 +159,8 @@ struct dss_pll_ops { }; struct dss_pll_hw { + enum dss_pll_type type; + unsigned n_max; unsigned m_min; unsigned m_max; @@ -227,7 +248,7 @@ unsigned long dss_get_dispc_clk_rate(void); int dss_dpi_select_source(int port, enum omap_channel channel); void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select); enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void); -const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src); +const char *dss_get_clk_source_name(enum dss_clk_source clk_src); void dss_dump_clocks(struct seq_file *s); /* DSS VIDEO PLL */ @@ -244,20 +265,18 @@ void dss_debug_dump_clocks(struct seq_file *s); #endif void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable); -void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id, - enum omap_channel channel); void dss_sdi_init(int datapairs); int dss_sdi_enable(void); void dss_sdi_disable(void); void dss_select_dsi_clk_source(int dsi_module, - enum omap_dss_clk_source clk_src); + enum dss_clk_source clk_src); void dss_select_lcd_clk_source(enum omap_channel channel, - enum omap_dss_clk_source clk_src); -enum omap_dss_clk_source dss_get_dispc_clk_source(void); -enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module); -enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel); + enum dss_clk_source clk_src); +enum dss_clk_source dss_get_dispc_clk_source(void); +enum dss_clk_source dss_get_dsi_clk_source(int dsi_module); +enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel); void dss_set_venc_output(enum omap_dss_venc_type type); void dss_set_dac_pwrdn_bgz(bool enable); @@ -409,17 +428,23 @@ typedef bool (*dss_hsdiv_calc_func)(int m_dispc, unsigned long dispc, int dss_pll_register(struct dss_pll *pll); void dss_pll_unregister(struct dss_pll *pll); struct dss_pll *dss_pll_find(const char *name); +struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src); +unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src); int dss_pll_enable(struct dss_pll *pll); void dss_pll_disable(struct dss_pll *pll); int dss_pll_set_config(struct dss_pll *pll, const struct dss_pll_clock_info *cinfo); -bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco, +bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco, unsigned long out_min, unsigned long out_max, dss_hsdiv_calc_func func, void *data); -bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin, +bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin, unsigned long pll_min, unsigned long pll_max, dss_pll_calc_func func, void *data); + +bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin, + unsigned long target_clkout, struct dss_pll_clock_info *cinfo); + int dss_pll_write_config_type_a(struct dss_pll *pll, const struct dss_pll_clock_info *cinfo); int dss_pll_write_config_type_b(struct dss_pll *pll, diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.c b/drivers/gpu/drm/omapdrm/dss/dss_features.c index c886a2927f73..ee5b93ce2763 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss_features.c +++ b/drivers/gpu/drm/omapdrm/dss/dss_features.c @@ -23,8 +23,7 @@ #include <linux/err.h> #include <linux/slab.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" @@ -50,7 +49,6 @@ struct omap_dss_features { const enum omap_dss_output_id *supported_outputs; const enum omap_color_mode *supported_color_modes; const enum omap_overlay_caps *overlay_caps; - const char * const *clksrc_names; const struct dss_param_range *dss_params; const enum omap_dss_rotation_type supported_rotation_types; @@ -389,34 +387,6 @@ static const enum omap_overlay_caps omap4_dss_overlay_caps[] = { OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION, }; -static const char * const omap2_dss_clk_source_names[] = { - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "N/A", - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "N/A", - [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK1", -}; - -static const char * const omap3_dss_clk_source_names[] = { - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI1_PLL_FCLK", - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI2_PLL_FCLK", - [OMAP_DSS_CLK_SRC_FCK] = "DSS1_ALWON_FCLK", -}; - -static const char * const omap4_dss_clk_source_names[] = { - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "PLL1_CLK1", - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "PLL1_CLK2", - [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK", - [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "PLL2_CLK1", - [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "PLL2_CLK2", -}; - -static const char * const omap5_dss_clk_source_names[] = { - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DPLL_DSI1_A_CLK1", - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DPLL_DSI1_A_CLK2", - [OMAP_DSS_CLK_SRC_FCK] = "DSS_CLK", - [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DPLL_DSI1_C_CLK1", - [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DPLL_DSI1_C_CLK2", -}; - static const struct dss_param_range omap2_dss_param_range[] = { [FEAT_PARAM_DSS_FCK] = { 0, 133000000 }, [FEAT_PARAM_DSS_PCD] = { 2, 255 }, @@ -631,7 +601,6 @@ static const struct omap_dss_features omap2_dss_features = { .supported_outputs = omap2_dss_supported_outputs, .supported_color_modes = omap2_dss_supported_color_modes, .overlay_caps = omap2_dss_overlay_caps, - .clksrc_names = omap2_dss_clk_source_names, .dss_params = omap2_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, .buffer_size_unit = 1, @@ -652,7 +621,6 @@ static const struct omap_dss_features omap3430_dss_features = { .supported_outputs = omap3430_dss_supported_outputs, .supported_color_modes = omap3_dss_supported_color_modes, .overlay_caps = omap3430_dss_overlay_caps, - .clksrc_names = omap3_dss_clk_source_names, .dss_params = omap3_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, .buffer_size_unit = 1, @@ -676,7 +644,6 @@ static const struct omap_dss_features am35xx_dss_features = { .supported_outputs = omap3430_dss_supported_outputs, .supported_color_modes = omap3_dss_supported_color_modes, .overlay_caps = omap3430_dss_overlay_caps, - .clksrc_names = omap3_dss_clk_source_names, .dss_params = omap3_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, .buffer_size_unit = 1, @@ -696,7 +663,6 @@ static const struct omap_dss_features am43xx_dss_features = { .supported_outputs = am43xx_dss_supported_outputs, .supported_color_modes = omap3_dss_supported_color_modes, .overlay_caps = omap3430_dss_overlay_caps, - .clksrc_names = omap2_dss_clk_source_names, .dss_params = am43xx_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA, .buffer_size_unit = 1, @@ -716,7 +682,6 @@ static const struct omap_dss_features omap3630_dss_features = { .supported_outputs = omap3630_dss_supported_outputs, .supported_color_modes = omap3_dss_supported_color_modes, .overlay_caps = omap3630_dss_overlay_caps, - .clksrc_names = omap3_dss_clk_source_names, .dss_params = omap3_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, .buffer_size_unit = 1, @@ -738,7 +703,6 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = { .supported_outputs = omap4_dss_supported_outputs, .supported_color_modes = omap4_dss_supported_color_modes, .overlay_caps = omap4_dss_overlay_caps, - .clksrc_names = omap4_dss_clk_source_names, .dss_params = omap4_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, .buffer_size_unit = 16, @@ -759,7 +723,6 @@ static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = { .supported_outputs = omap4_dss_supported_outputs, .supported_color_modes = omap4_dss_supported_color_modes, .overlay_caps = omap4_dss_overlay_caps, - .clksrc_names = omap4_dss_clk_source_names, .dss_params = omap4_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, .buffer_size_unit = 16, @@ -780,7 +743,6 @@ static const struct omap_dss_features omap4_dss_features = { .supported_outputs = omap4_dss_supported_outputs, .supported_color_modes = omap4_dss_supported_color_modes, .overlay_caps = omap4_dss_overlay_caps, - .clksrc_names = omap4_dss_clk_source_names, .dss_params = omap4_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, .buffer_size_unit = 16, @@ -801,7 +763,6 @@ static const struct omap_dss_features omap5_dss_features = { .supported_outputs = omap5_dss_supported_outputs, .supported_color_modes = omap4_dss_supported_color_modes, .overlay_caps = omap4_dss_overlay_caps, - .clksrc_names = omap5_dss_clk_source_names, .dss_params = omap5_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, .buffer_size_unit = 16, @@ -859,11 +820,6 @@ bool dss_feat_color_mode_supported(enum omap_plane plane, color_mode; } -const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id) -{ - return omap_current_dss_features->clksrc_names[id]; -} - u32 dss_feat_get_buffer_size_unit(void) { return omap_current_dss_features->buffer_size_unit; diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.h b/drivers/gpu/drm/omapdrm/dss/dss_features.h index 3d67d39f192f..bb4b7f0e642b 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss_features.h +++ b/drivers/gpu/drm/omapdrm/dss/dss_features.h @@ -91,7 +91,6 @@ unsigned long dss_feat_get_param_max(enum dss_range_param param); enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane); bool dss_feat_color_mode_supported(enum omap_plane plane, enum omap_color_mode color_mode); -const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id); u32 dss_feat_get_buffer_size_unit(void); /* in bytes */ u32 dss_feat_get_burst_size_unit(void); /* in bytes */ diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h index 53616b02b613..63e711545865 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi.h +++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h @@ -23,8 +23,9 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/hdmi.h> -#include <video/omapdss.h> +#include <sound/omap-hdmi-audio.h> +#include "omapdss.h" #include "dss.h" /* HDMI Wrapper */ @@ -240,6 +241,7 @@ struct hdmi_pll_data { void __iomem *base; + struct platform_device *pdev; struct hdmi_wp_data *wp; }; @@ -306,8 +308,6 @@ phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp); /* HDMI PLL funcs */ void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s); -void hdmi_pll_compute(struct hdmi_pll_data *pll, - unsigned long target_tmds, struct dss_pll_clock_info *pi); int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll, struct hdmi_wp_data *wp); void hdmi_pll_uninit(struct hdmi_pll_data *hpll); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 4d46cdf7a037..cbd28dfdb86a 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -34,9 +34,9 @@ #include <linux/regulator/consumer.h> #include <linux/component.h> #include <linux/of.h> -#include <video/omapdss.h> #include <sound/omap-hdmi-audio.h> +#include "omapdss.h" #include "hdmi4_core.h" #include "dss.h" #include "dss_features.h" @@ -177,7 +177,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev) if (p->double_pixel) pc *= 2; - hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo); + /* DSS_HDMI_TCLK is bitclk / 10 */ + pc *= 10; + + dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin), + pc, &hdmi_cinfo); r = dss_pll_enable(&hdmi.pll.pll); if (r) { @@ -204,9 +208,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev) hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg); - /* bypass TV gamma table */ - dispc_enable_gamma_table(0); - /* tv size */ dss_mgr_set_timings(channel, p); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index e129245eb8a9..061f9bab4c9b 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -39,9 +39,9 @@ #include <linux/regulator/consumer.h> #include <linux/component.h> #include <linux/of.h> -#include <video/omapdss.h> #include <sound/omap-hdmi-audio.h> +#include "omapdss.h" #include "hdmi5_core.h" #include "dss.h" #include "dss_features.h" @@ -190,7 +190,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev) if (p->double_pixel) pc *= 2; - hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo); + /* DSS_HDMI_TCLK is bitclk / 10 */ + pc *= 10; + + dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin), + pc, &hdmi_cinfo); /* disable and clear irqs */ hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff); @@ -222,9 +226,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev) hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg); - /* bypass TV gamma table */ - dispc_enable_gamma_table(0); - /* tv size */ dss_mgr_set_timings(channel, p); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c index 1b8fcc6c4ba1..4dfb67fe5f6d 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c @@ -4,8 +4,8 @@ #include <linux/kernel.h> #include <linux/err.h> #include <linux/of.h> -#include <video/omapdss.h> +#include "omapdss.h" #include "hdmi.h" int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep, diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c index f98b750fc499..3ead47cccac5 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c @@ -14,8 +14,8 @@ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/seq_file.h> -#include <video/omapdss.h> +#include "omapdss.h" #include "dss.h" #include "hdmi.h" diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c index f1015e8b8267..b8bf6a9e5557 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c @@ -17,9 +17,9 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/seq_file.h> +#include <linux/pm_runtime.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "hdmi.h" @@ -39,71 +39,14 @@ void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s) DUMPPLL(PLLCTRL_CFG4); } -void hdmi_pll_compute(struct hdmi_pll_data *pll, - unsigned long target_tmds, struct dss_pll_clock_info *pi) -{ - unsigned long fint, clkdco, clkout; - unsigned long target_bitclk, target_clkdco; - unsigned long min_dco; - unsigned n, m, mf, m2, sd; - unsigned long clkin; - const struct dss_pll_hw *hw = pll->pll.hw; - - clkin = clk_get_rate(pll->pll.clkin); - - DSSDBG("clkin %lu, target tmds %lu\n", clkin, target_tmds); - - target_bitclk = target_tmds * 10; - - /* Fint */ - n = DIV_ROUND_UP(clkin, hw->fint_max); - fint = clkin / n; - - /* adjust m2 so that the clkdco will be high enough */ - min_dco = roundup(hw->clkdco_min, fint); - m2 = DIV_ROUND_UP(min_dco, target_bitclk); - if (m2 == 0) - m2 = 1; - - target_clkdco = target_bitclk * m2; - m = target_clkdco / fint; - - clkdco = fint * m; - - /* adjust clkdco with fractional mf */ - if (WARN_ON(target_clkdco - clkdco > fint)) - mf = 0; - else - mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint); - - if (mf > 0) - clkdco += (u32)div_u64((u64)mf * fint, 262144); - - clkout = clkdco / m2; - - /* sigma-delta */ - sd = DIV_ROUND_UP(fint * m, 250000000); - - DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n", - n, m, mf, m2, sd); - DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout); - - pi->n = n; - pi->m = m; - pi->mf = mf; - pi->mX[0] = m2; - pi->sd = sd; - - pi->fint = fint; - pi->clkdco = clkdco; - pi->clkout[0] = clkout; -} - static int hdmi_pll_enable(struct dss_pll *dsspll) { struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll); struct hdmi_wp_data *wp = pll->wp; - u16 r = 0; + int r; + + r = pm_runtime_get_sync(&pll->pdev->dev); + WARN_ON(r < 0); dss_ctrl_pll_enable(DSS_PLL_HDMI, true); @@ -118,10 +61,14 @@ static void hdmi_pll_disable(struct dss_pll *dsspll) { struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll); struct hdmi_wp_data *wp = pll->wp; + int r; hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF); dss_ctrl_pll_enable(DSS_PLL_HDMI, false); + + r = pm_runtime_put_sync(&pll->pdev->dev); + WARN_ON(r < 0 && r != -ENOSYS); } static const struct dss_pll_ops dsi_pll_ops = { @@ -131,6 +78,8 @@ static const struct dss_pll_ops dsi_pll_ops = { }; static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = { + .type = DSS_PLL_TYPE_B, + .n_max = 255, .m_min = 20, .m_max = 4095, @@ -154,6 +103,8 @@ static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = { }; static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = { + .type = DSS_PLL_TYPE_B, + .n_max = 255, .m_min = 20, .m_max = 2045, @@ -225,6 +176,7 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll, int r; struct resource *res; + pll->pdev = pdev; pll->wp = wp; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll"); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c index 055f62fca5dc..203694a52d18 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c @@ -15,8 +15,8 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/seq_file.h> -#include <video/omapdss.h> +#include "omapdss.h" #include "dss.h" #include "hdmi.h" diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index d7e7c909bbc2..6eaf1adbd606 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -18,7 +18,872 @@ #ifndef __OMAP_DRM_DSS_H #define __OMAP_DRM_DSS_H -#include <video/omapdss.h> +#include <linux/list.h> +#include <linux/kobject.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <video/videomode.h> +#include <linux/platform_data/omapdss.h> +#include <uapi/drm/drm_mode.h> + +#define DISPC_IRQ_FRAMEDONE (1 << 0) +#define DISPC_IRQ_VSYNC (1 << 1) +#define DISPC_IRQ_EVSYNC_EVEN (1 << 2) +#define DISPC_IRQ_EVSYNC_ODD (1 << 3) +#define DISPC_IRQ_ACBIAS_COUNT_STAT (1 << 4) +#define DISPC_IRQ_PROG_LINE_NUM (1 << 5) +#define DISPC_IRQ_GFX_FIFO_UNDERFLOW (1 << 6) +#define DISPC_IRQ_GFX_END_WIN (1 << 7) +#define DISPC_IRQ_PAL_GAMMA_MASK (1 << 8) +#define DISPC_IRQ_OCP_ERR (1 << 9) +#define DISPC_IRQ_VID1_FIFO_UNDERFLOW (1 << 10) +#define DISPC_IRQ_VID1_END_WIN (1 << 11) +#define DISPC_IRQ_VID2_FIFO_UNDERFLOW (1 << 12) +#define DISPC_IRQ_VID2_END_WIN (1 << 13) +#define DISPC_IRQ_SYNC_LOST (1 << 14) +#define DISPC_IRQ_SYNC_LOST_DIGIT (1 << 15) +#define DISPC_IRQ_WAKEUP (1 << 16) +#define DISPC_IRQ_SYNC_LOST2 (1 << 17) +#define DISPC_IRQ_VSYNC2 (1 << 18) +#define DISPC_IRQ_VID3_END_WIN (1 << 19) +#define DISPC_IRQ_VID3_FIFO_UNDERFLOW (1 << 20) +#define DISPC_IRQ_ACBIAS_COUNT_STAT2 (1 << 21) +#define DISPC_IRQ_FRAMEDONE2 (1 << 22) +#define DISPC_IRQ_FRAMEDONEWB (1 << 23) +#define DISPC_IRQ_FRAMEDONETV (1 << 24) +#define DISPC_IRQ_WBBUFFEROVERFLOW (1 << 25) +#define DISPC_IRQ_WBUNCOMPLETEERROR (1 << 26) +#define DISPC_IRQ_SYNC_LOST3 (1 << 27) +#define DISPC_IRQ_VSYNC3 (1 << 28) +#define DISPC_IRQ_ACBIAS_COUNT_STAT3 (1 << 29) +#define DISPC_IRQ_FRAMEDONE3 (1 << 30) + +struct omap_dss_device; +struct omap_overlay_manager; +struct dss_lcd_mgr_config; +struct snd_aes_iec958; +struct snd_cea_861_aud_if; +struct hdmi_avi_infoframe; + +enum omap_display_type { + OMAP_DISPLAY_TYPE_NONE = 0, + OMAP_DISPLAY_TYPE_DPI = 1 << 0, + OMAP_DISPLAY_TYPE_DBI = 1 << 1, + OMAP_DISPLAY_TYPE_SDI = 1 << 2, + OMAP_DISPLAY_TYPE_DSI = 1 << 3, + OMAP_DISPLAY_TYPE_VENC = 1 << 4, + OMAP_DISPLAY_TYPE_HDMI = 1 << 5, + OMAP_DISPLAY_TYPE_DVI = 1 << 6, +}; + +enum omap_plane { + OMAP_DSS_GFX = 0, + OMAP_DSS_VIDEO1 = 1, + OMAP_DSS_VIDEO2 = 2, + OMAP_DSS_VIDEO3 = 3, + OMAP_DSS_WB = 4, +}; + +enum omap_channel { + OMAP_DSS_CHANNEL_LCD = 0, + OMAP_DSS_CHANNEL_DIGIT = 1, + OMAP_DSS_CHANNEL_LCD2 = 2, + OMAP_DSS_CHANNEL_LCD3 = 3, + OMAP_DSS_CHANNEL_WB = 4, +}; + +enum omap_color_mode { + OMAP_DSS_COLOR_CLUT1 = 1 << 0, /* BITMAP 1 */ + OMAP_DSS_COLOR_CLUT2 = 1 << 1, /* BITMAP 2 */ + OMAP_DSS_COLOR_CLUT4 = 1 << 2, /* BITMAP 4 */ + OMAP_DSS_COLOR_CLUT8 = 1 << 3, /* BITMAP 8 */ + OMAP_DSS_COLOR_RGB12U = 1 << 4, /* RGB12, 16-bit container */ + OMAP_DSS_COLOR_ARGB16 = 1 << 5, /* ARGB16 */ + OMAP_DSS_COLOR_RGB16 = 1 << 6, /* RGB16 */ + OMAP_DSS_COLOR_RGB24U = 1 << 7, /* RGB24, 32-bit container */ + OMAP_DSS_COLOR_RGB24P = 1 << 8, /* RGB24, 24-bit container */ + OMAP_DSS_COLOR_YUV2 = 1 << 9, /* YUV2 4:2:2 co-sited */ + OMAP_DSS_COLOR_UYVY = 1 << 10, /* UYVY 4:2:2 co-sited */ + OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32 */ + OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32 */ + OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32 */ + OMAP_DSS_COLOR_NV12 = 1 << 14, /* NV12 format: YUV 4:2:0 */ + OMAP_DSS_COLOR_RGBA16 = 1 << 15, /* RGBA16 - 4444 */ + OMAP_DSS_COLOR_RGBX16 = 1 << 16, /* RGBx16 - 4444 */ + OMAP_DSS_COLOR_ARGB16_1555 = 1 << 17, /* ARGB16 - 1555 */ + OMAP_DSS_COLOR_XRGB16_1555 = 1 << 18, /* xRGB16 - 1555 */ +}; + +enum omap_dss_load_mode { + OMAP_DSS_LOAD_CLUT_AND_FRAME = 0, + OMAP_DSS_LOAD_CLUT_ONLY = 1, + OMAP_DSS_LOAD_FRAME_ONLY = 2, + OMAP_DSS_LOAD_CLUT_ONCE_FRAME = 3, +}; + +enum omap_dss_trans_key_type { + OMAP_DSS_COLOR_KEY_GFX_DST = 0, + OMAP_DSS_COLOR_KEY_VID_SRC = 1, +}; + +enum omap_rfbi_te_mode { + OMAP_DSS_RFBI_TE_MODE_1 = 1, + OMAP_DSS_RFBI_TE_MODE_2 = 2, +}; + +enum omap_dss_signal_level { + OMAPDSS_SIG_ACTIVE_LOW, + OMAPDSS_SIG_ACTIVE_HIGH, +}; + +enum omap_dss_signal_edge { + OMAPDSS_DRIVE_SIG_FALLING_EDGE, + OMAPDSS_DRIVE_SIG_RISING_EDGE, +}; + +enum omap_dss_venc_type { + OMAP_DSS_VENC_TYPE_COMPOSITE, + OMAP_DSS_VENC_TYPE_SVIDEO, +}; + +enum omap_dss_dsi_pixel_format { + OMAP_DSS_DSI_FMT_RGB888, + OMAP_DSS_DSI_FMT_RGB666, + OMAP_DSS_DSI_FMT_RGB666_PACKED, + OMAP_DSS_DSI_FMT_RGB565, +}; + +enum omap_dss_dsi_mode { + OMAP_DSS_DSI_CMD_MODE = 0, + OMAP_DSS_DSI_VIDEO_MODE, +}; + +enum omap_display_caps { + OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE = 1 << 0, + OMAP_DSS_DISPLAY_CAP_TEAR_ELIM = 1 << 1, +}; + +enum omap_dss_display_state { + OMAP_DSS_DISPLAY_DISABLED = 0, + OMAP_DSS_DISPLAY_ACTIVE, +}; + +enum omap_dss_rotation_type { + OMAP_DSS_ROT_DMA = 1 << 0, + OMAP_DSS_ROT_VRFB = 1 << 1, + OMAP_DSS_ROT_TILER = 1 << 2, +}; + +/* clockwise rotation angle */ +enum omap_dss_rotation_angle { + OMAP_DSS_ROT_0 = 0, + OMAP_DSS_ROT_90 = 1, + OMAP_DSS_ROT_180 = 2, + OMAP_DSS_ROT_270 = 3, +}; + +enum omap_overlay_caps { + OMAP_DSS_OVL_CAP_SCALE = 1 << 0, + OMAP_DSS_OVL_CAP_GLOBAL_ALPHA = 1 << 1, + OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA = 1 << 2, + OMAP_DSS_OVL_CAP_ZORDER = 1 << 3, + OMAP_DSS_OVL_CAP_POS = 1 << 4, + OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5, +}; + +enum omap_overlay_manager_caps { + OMAP_DSS_DUMMY_VALUE, /* add a dummy value to prevent compiler error */ +}; + +enum omap_dss_clk_source { + OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK + * OMAP4: DSS_FCLK */ + OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK + * OMAP4: PLL1_CLK1 */ + OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK + * OMAP4: PLL1_CLK2 */ + OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */ + OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */ +}; + +enum omap_hdmi_flags { + OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP = 1 << 0, +}; + +enum omap_dss_output_id { + OMAP_DSS_OUTPUT_DPI = 1 << 0, + OMAP_DSS_OUTPUT_DBI = 1 << 1, + OMAP_DSS_OUTPUT_SDI = 1 << 2, + OMAP_DSS_OUTPUT_DSI1 = 1 << 3, + OMAP_DSS_OUTPUT_DSI2 = 1 << 4, + OMAP_DSS_OUTPUT_VENC = 1 << 5, + OMAP_DSS_OUTPUT_HDMI = 1 << 6, +}; + +/* RFBI */ + +struct rfbi_timings { + int cs_on_time; + int cs_off_time; + int we_on_time; + int we_off_time; + int re_on_time; + int re_off_time; + int we_cycle_time; + int re_cycle_time; + int cs_pulse_width; + int access_time; + + int clk_div; + + u32 tim[5]; /* set by rfbi_convert_timings() */ + + int converted; +}; + +/* DSI */ + +enum omap_dss_dsi_trans_mode { + /* Sync Pulses: both sync start and end packets sent */ + OMAP_DSS_DSI_PULSE_MODE, + /* Sync Events: only sync start packets sent */ + OMAP_DSS_DSI_EVENT_MODE, + /* Burst: only sync start packets sent, pixels are time compressed */ + OMAP_DSS_DSI_BURST_MODE, +}; + +struct omap_dss_dsi_videomode_timings { + unsigned long hsclk; + + unsigned ndl; + unsigned bitspp; + + /* pixels */ + u16 hact; + /* lines */ + u16 vact; + + /* DSI video mode blanking data */ + /* Unit: byte clock cycles */ + u16 hss; + u16 hsa; + u16 hse; + u16 hfp; + u16 hbp; + /* Unit: line clocks */ + u16 vsa; + u16 vfp; + u16 vbp; + + /* DSI blanking modes */ + int blanking_mode; + int hsa_blanking_mode; + int hbp_blanking_mode; + int hfp_blanking_mode; + + enum omap_dss_dsi_trans_mode trans_mode; + + bool ddr_clk_always_on; + int window_sync; +}; + +struct omap_dss_dsi_config { + enum omap_dss_dsi_mode mode; + enum omap_dss_dsi_pixel_format pixel_format; + const struct omap_video_timings *timings; + + unsigned long hs_clk_min, hs_clk_max; + unsigned long lp_clk_min, lp_clk_max; + + bool ddr_clk_always_on; + enum omap_dss_dsi_trans_mode trans_mode; +}; + +struct omap_video_timings { + /* Unit: pixels */ + u16 x_res; + /* Unit: pixels */ + u16 y_res; + /* Unit: Hz */ + u32 pixelclock; + /* Unit: pixel clocks */ + u16 hsw; /* Horizontal synchronization pulse width */ + /* Unit: pixel clocks */ + u16 hfp; /* Horizontal front porch */ + /* Unit: pixel clocks */ + u16 hbp; /* Horizontal back porch */ + /* Unit: line clocks */ + u16 vsw; /* Vertical synchronization pulse width */ + /* Unit: line clocks */ + u16 vfp; /* Vertical front porch */ + /* Unit: line clocks */ + u16 vbp; /* Vertical back porch */ + + /* Vsync logic level */ + enum omap_dss_signal_level vsync_level; + /* Hsync logic level */ + enum omap_dss_signal_level hsync_level; + /* Interlaced or Progressive timings */ + bool interlace; + /* Pixel clock edge to drive LCD data */ + enum omap_dss_signal_edge data_pclk_edge; + /* Data enable logic level */ + enum omap_dss_signal_level de_level; + /* Pixel clock edges to drive HSYNC and VSYNC signals */ + enum omap_dss_signal_edge sync_pclk_edge; + + bool double_pixel; +}; + +/* Hardcoded timings for tv modes. Venc only uses these to + * identify the mode, and does not actually use the configs + * itself. However, the configs should be something that + * a normal monitor can also show */ +extern const struct omap_video_timings omap_dss_pal_timings; +extern const struct omap_video_timings omap_dss_ntsc_timings; + +struct omap_dss_cpr_coefs { + s16 rr, rg, rb; + s16 gr, gg, gb; + s16 br, bg, bb; +}; + +struct omap_overlay_info { + dma_addr_t paddr; + dma_addr_t p_uv_addr; /* for NV12 format */ + u16 screen_width; + u16 width; + u16 height; + enum omap_color_mode color_mode; + u8 rotation; + enum omap_dss_rotation_type rotation_type; + bool mirror; + + u16 pos_x; + u16 pos_y; + u16 out_width; /* if 0, out_width == width */ + u16 out_height; /* if 0, out_height == height */ + u8 global_alpha; + u8 pre_mult_alpha; + u8 zorder; +}; + +struct omap_overlay { + struct kobject kobj; + struct list_head list; + + /* static fields */ + const char *name; + enum omap_plane id; + enum omap_color_mode supported_modes; + enum omap_overlay_caps caps; + + /* dynamic fields */ + struct omap_overlay_manager *manager; + + /* + * The following functions do not block: + * + * is_enabled + * set_overlay_info + * get_overlay_info + * + * The rest of the functions may block and cannot be called from + * interrupt context + */ + + int (*enable)(struct omap_overlay *ovl); + int (*disable)(struct omap_overlay *ovl); + bool (*is_enabled)(struct omap_overlay *ovl); + + int (*set_manager)(struct omap_overlay *ovl, + struct omap_overlay_manager *mgr); + int (*unset_manager)(struct omap_overlay *ovl); + + int (*set_overlay_info)(struct omap_overlay *ovl, + struct omap_overlay_info *info); + void (*get_overlay_info)(struct omap_overlay *ovl, + struct omap_overlay_info *info); + + int (*wait_for_go)(struct omap_overlay *ovl); + + struct omap_dss_device *(*get_device)(struct omap_overlay *ovl); +}; + +struct omap_overlay_manager_info { + u32 default_color; + + enum omap_dss_trans_key_type trans_key_type; + u32 trans_key; + bool trans_enabled; + + bool partial_alpha_enabled; + + bool cpr_enable; + struct omap_dss_cpr_coefs cpr_coefs; +}; + +struct omap_overlay_manager { + struct kobject kobj; + + /* static fields */ + const char *name; + enum omap_channel id; + enum omap_overlay_manager_caps caps; + struct list_head overlays; + enum omap_display_type supported_displays; + enum omap_dss_output_id supported_outputs; + + /* dynamic fields */ + struct omap_dss_device *output; + + /* + * The following functions do not block: + * + * set_manager_info + * get_manager_info + * apply + * + * The rest of the functions may block and cannot be called from + * interrupt context + */ + + int (*set_output)(struct omap_overlay_manager *mgr, + struct omap_dss_device *output); + int (*unset_output)(struct omap_overlay_manager *mgr); + + int (*set_manager_info)(struct omap_overlay_manager *mgr, + struct omap_overlay_manager_info *info); + void (*get_manager_info)(struct omap_overlay_manager *mgr, + struct omap_overlay_manager_info *info); + + int (*apply)(struct omap_overlay_manager *mgr); + int (*wait_for_go)(struct omap_overlay_manager *mgr); + int (*wait_for_vsync)(struct omap_overlay_manager *mgr); + + struct omap_dss_device *(*get_device)(struct omap_overlay_manager *mgr); +}; + +/* 22 pins means 1 clk lane and 10 data lanes */ +#define OMAP_DSS_MAX_DSI_PINS 22 + +struct omap_dsi_pin_config { + int num_pins; + /* + * pin numbers in the following order: + * clk+, clk- + * data1+, data1- + * data2+, data2- + * ... + */ + int pins[OMAP_DSS_MAX_DSI_PINS]; +}; + +struct omap_dss_writeback_info { + u32 paddr; + u32 p_uv_addr; + u16 buf_width; + u16 width; + u16 height; + enum omap_color_mode color_mode; + u8 rotation; + enum omap_dss_rotation_type rotation_type; + bool mirror; + u8 pre_mult_alpha; +}; + +struct omapdss_dpi_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*set_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*get_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + + void (*set_data_lines)(struct omap_dss_device *dssdev, int data_lines); +}; + +struct omapdss_sdi_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*set_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*get_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + + void (*set_datapairs)(struct omap_dss_device *dssdev, int datapairs); +}; + +struct omapdss_dvi_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*set_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*get_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); +}; + +struct omapdss_atv_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*set_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*get_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + + void (*set_type)(struct omap_dss_device *dssdev, + enum omap_dss_venc_type type); + void (*invert_vid_out_polarity)(struct omap_dss_device *dssdev, + bool invert_polarity); + + int (*set_wss)(struct omap_dss_device *dssdev, u32 wss); + u32 (*get_wss)(struct omap_dss_device *dssdev); +}; + +struct omapdss_hdmi_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*set_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*get_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + + int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); + bool (*detect)(struct omap_dss_device *dssdev); + + int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode); + int (*set_infoframe)(struct omap_dss_device *dssdev, + const struct hdmi_avi_infoframe *avi); +}; + +struct omapdss_dsi_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes, + bool enter_ulps); + + /* bus configuration */ + int (*set_config)(struct omap_dss_device *dssdev, + const struct omap_dss_dsi_config *cfg); + int (*configure_pins)(struct omap_dss_device *dssdev, + const struct omap_dsi_pin_config *pin_cfg); + + void (*enable_hs)(struct omap_dss_device *dssdev, int channel, + bool enable); + int (*enable_te)(struct omap_dss_device *dssdev, bool enable); + + int (*update)(struct omap_dss_device *dssdev, int channel, + void (*callback)(int, void *), void *data); + + void (*bus_lock)(struct omap_dss_device *dssdev); + void (*bus_unlock)(struct omap_dss_device *dssdev); + + int (*enable_video_output)(struct omap_dss_device *dssdev, int channel); + void (*disable_video_output)(struct omap_dss_device *dssdev, + int channel); + + int (*request_vc)(struct omap_dss_device *dssdev, int *channel); + int (*set_vc_id)(struct omap_dss_device *dssdev, int channel, + int vc_id); + void (*release_vc)(struct omap_dss_device *dssdev, int channel); + + /* data transfer */ + int (*dcs_write)(struct omap_dss_device *dssdev, int channel, + u8 *data, int len); + int (*dcs_write_nosync)(struct omap_dss_device *dssdev, int channel, + u8 *data, int len); + int (*dcs_read)(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd, + u8 *data, int len); + + int (*gen_write)(struct omap_dss_device *dssdev, int channel, + u8 *data, int len); + int (*gen_write_nosync)(struct omap_dss_device *dssdev, int channel, + u8 *data, int len); + int (*gen_read)(struct omap_dss_device *dssdev, int channel, + u8 *reqdata, int reqlen, + u8 *data, int len); + + int (*bta_sync)(struct omap_dss_device *dssdev, int channel); + + int (*set_max_rx_packet_size)(struct omap_dss_device *dssdev, + int channel, u16 plen); +}; + +struct omap_dss_device { + struct kobject kobj; + struct device *dev; + + struct module *owner; + + struct list_head panel_list; + + /* alias in the form of "display%d" */ + char alias[16]; + + enum omap_display_type type; + enum omap_display_type output_type; + + union { + struct { + u8 data_lines; + } dpi; + + struct { + u8 channel; + u8 data_lines; + } rfbi; + + struct { + u8 datapairs; + } sdi; + + struct { + int module; + } dsi; + + struct { + enum omap_dss_venc_type type; + bool invert_polarity; + } venc; + } phy; + + struct { + struct omap_video_timings timings; + + enum omap_dss_dsi_pixel_format dsi_pix_fmt; + enum omap_dss_dsi_mode dsi_mode; + } panel; + + struct { + u8 pixel_size; + struct rfbi_timings rfbi_timings; + } ctrl; + + const char *name; + + /* used to match device to driver */ + const char *driver_name; + + void *data; + + struct omap_dss_driver *driver; + + union { + const struct omapdss_dpi_ops *dpi; + const struct omapdss_sdi_ops *sdi; + const struct omapdss_dvi_ops *dvi; + const struct omapdss_hdmi_ops *hdmi; + const struct omapdss_atv_ops *atv; + const struct omapdss_dsi_ops *dsi; + } ops; + + /* helper variable for driver suspend/resume */ + bool activate_after_resume; + + enum omap_display_caps caps; + + struct omap_dss_device *src; + + enum omap_dss_display_state state; + + /* OMAP DSS output specific fields */ + + struct list_head list; + + /* DISPC channel for this output */ + enum omap_channel dispc_channel; + bool dispc_channel_connected; + + /* output instance */ + enum omap_dss_output_id id; + + /* the port number in the DT node */ + int port_num; + + /* dynamic fields */ + struct omap_overlay_manager *manager; + + struct omap_dss_device *dst; +}; + +struct omap_dss_driver { + int (*probe)(struct omap_dss_device *); + void (*remove)(struct omap_dss_device *); + + int (*connect)(struct omap_dss_device *dssdev); + void (*disconnect)(struct omap_dss_device *dssdev); + + int (*enable)(struct omap_dss_device *display); + void (*disable)(struct omap_dss_device *display); + int (*run_test)(struct omap_dss_device *display, int test); + + int (*update)(struct omap_dss_device *dssdev, + u16 x, u16 y, u16 w, u16 h); + int (*sync)(struct omap_dss_device *dssdev); + + int (*enable_te)(struct omap_dss_device *dssdev, bool enable); + int (*get_te)(struct omap_dss_device *dssdev); + + u8 (*get_rotate)(struct omap_dss_device *dssdev); + int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate); + + bool (*get_mirror)(struct omap_dss_device *dssdev); + int (*set_mirror)(struct omap_dss_device *dssdev, bool enable); + + int (*memory_read)(struct omap_dss_device *dssdev, + void *buf, size_t size, + u16 x, u16 y, u16 w, u16 h); + + void (*get_resolution)(struct omap_dss_device *dssdev, + u16 *xres, u16 *yres); + void (*get_dimensions)(struct omap_dss_device *dssdev, + u32 *width, u32 *height); + int (*get_recommended_bpp)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*set_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*get_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + + int (*set_wss)(struct omap_dss_device *dssdev, u32 wss); + u32 (*get_wss)(struct omap_dss_device *dssdev); + + int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); + bool (*detect)(struct omap_dss_device *dssdev); + + int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode); + int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev, + const struct hdmi_avi_infoframe *avi); +}; + +enum omapdss_version omapdss_get_version(void); +bool omapdss_is_initialized(void); + +int omap_dss_register_driver(struct omap_dss_driver *); +void omap_dss_unregister_driver(struct omap_dss_driver *); + +int omapdss_register_display(struct omap_dss_device *dssdev); +void omapdss_unregister_display(struct omap_dss_device *dssdev); + +struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev); +void omap_dss_put_device(struct omap_dss_device *dssdev); +#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL) +struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from); +struct omap_dss_device *omap_dss_find_device(void *data, + int (*match)(struct omap_dss_device *dssdev, void *data)); +const char *omapdss_get_default_display_name(void); + +void videomode_to_omap_video_timings(const struct videomode *vm, + struct omap_video_timings *ovt); +void omap_video_timings_to_videomode(const struct omap_video_timings *ovt, + struct videomode *vm); + +int dss_feat_get_num_mgrs(void); +int dss_feat_get_num_ovls(void); +enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane); + + + +int omap_dss_get_num_overlay_managers(void); +struct omap_overlay_manager *omap_dss_get_overlay_manager(int num); + +int omap_dss_get_num_overlays(void); +struct omap_overlay *omap_dss_get_overlay(int num); + +int omapdss_register_output(struct omap_dss_device *output); +void omapdss_unregister_output(struct omap_dss_device *output); +struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id); +struct omap_dss_device *omap_dss_find_output(const char *name); +struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port); +int omapdss_output_set_device(struct omap_dss_device *out, + struct omap_dss_device *dssdev); +int omapdss_output_unset_device(struct omap_dss_device *out); + +struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev); +struct omap_overlay_manager *omapdss_find_mgr_from_display(struct omap_dss_device *dssdev); + +void omapdss_default_get_resolution(struct omap_dss_device *dssdev, + u16 *xres, u16 *yres); +int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev); +void omapdss_default_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + +typedef void (*omap_dispc_isr_t) (void *arg, u32 mask); +int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask); +int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask); + +int omapdss_compat_init(void); +void omapdss_compat_uninit(void); + +static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev) +{ + return dssdev->src; +} + +static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev) +{ + return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE; +} + +struct device_node * +omapdss_of_get_next_port(const struct device_node *parent, + struct device_node *prev); + +struct device_node * +omapdss_of_get_next_endpoint(const struct device_node *parent, + struct device_node *prev); + +struct device_node * +omapdss_of_get_first_endpoint(const struct device_node *parent); + +struct omap_dss_device * +omapdss_of_find_source_for_first_ep(struct device_node *node); u32 dispc_read_irqstatus(void); void dispc_clear_irqstatus(u32 mask); @@ -44,6 +909,10 @@ void dispc_mgr_set_timings(enum omap_channel channel, const struct omap_video_timings *timings); void dispc_mgr_setup(enum omap_channel channel, const struct omap_overlay_manager_info *info); +u32 dispc_mgr_gamma_size(enum omap_channel channel); +void dispc_mgr_set_gamma(enum omap_channel channel, + const struct drm_color_lut *lut, + unsigned int length); int dispc_ovl_enable(enum omap_plane plane, bool enable); bool dispc_ovl_enabled(enum omap_plane plane); diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index 829232ad8c81..24f859488201 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -21,8 +21,7 @@ #include <linux/slab.h> #include <linux/of.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" static LIST_HEAD(output_list); diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c index f974ddcd3b6e..0a76c89cdc2e 100644 --- a/drivers/gpu/drm/omapdrm/dss/pll.c +++ b/drivers/gpu/drm/omapdrm/dss/pll.c @@ -22,8 +22,7 @@ #include <linux/regulator/consumer.h> #include <linux/sched.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #define PLL_CONTROL 0x0000 @@ -76,6 +75,59 @@ struct dss_pll *dss_pll_find(const char *name) return NULL; } +struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src) +{ + struct dss_pll *pll; + + switch (src) { + default: + case DSS_CLK_SRC_FCK: + return NULL; + + case DSS_CLK_SRC_HDMI_PLL: + return dss_pll_find("hdmi"); + + case DSS_CLK_SRC_PLL1_1: + case DSS_CLK_SRC_PLL1_2: + case DSS_CLK_SRC_PLL1_3: + pll = dss_pll_find("dsi0"); + if (!pll) + pll = dss_pll_find("video0"); + return pll; + + case DSS_CLK_SRC_PLL2_1: + case DSS_CLK_SRC_PLL2_2: + case DSS_CLK_SRC_PLL2_3: + pll = dss_pll_find("dsi1"); + if (!pll) + pll = dss_pll_find("video1"); + return pll; + } +} + +unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src) +{ + switch (src) { + case DSS_CLK_SRC_HDMI_PLL: + return 0; + + case DSS_CLK_SRC_PLL1_1: + case DSS_CLK_SRC_PLL2_1: + return 0; + + case DSS_CLK_SRC_PLL1_2: + case DSS_CLK_SRC_PLL2_2: + return 1; + + case DSS_CLK_SRC_PLL1_3: + case DSS_CLK_SRC_PLL2_3: + return 2; + + default: + return 0; + } +} + int dss_pll_enable(struct dss_pll *pll) { int r; @@ -129,7 +181,7 @@ int dss_pll_set_config(struct dss_pll *pll, const struct dss_pll_clock_info *cin return 0; } -bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco, +bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco, unsigned long out_min, unsigned long out_max, dss_hsdiv_calc_func func, void *data) { @@ -154,7 +206,11 @@ bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco, return false; } -bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin, +/* + * clkdco = clkin / n * m * 2 + * clkoutX = clkdco / mX + */ +bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin, unsigned long pll_min, unsigned long pll_max, dss_pll_calc_func func, void *data) { @@ -195,6 +251,71 @@ bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin, return false; } +/* + * This calculates a PLL config that will provide the target_clkout rate + * for clkout. Additionally clkdco rate will be the same as clkout rate + * when clkout rate is >= min_clkdco. + * + * clkdco = clkin / n * m + clkin / n * mf / 262144 + * clkout = clkdco / m2 + */ +bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin, + unsigned long target_clkout, struct dss_pll_clock_info *cinfo) +{ + unsigned long fint, clkdco, clkout; + unsigned long target_clkdco; + unsigned long min_dco; + unsigned n, m, mf, m2, sd; + const struct dss_pll_hw *hw = pll->hw; + + DSSDBG("clkin %lu, target clkout %lu\n", clkin, target_clkout); + + /* Fint */ + n = DIV_ROUND_UP(clkin, hw->fint_max); + fint = clkin / n; + + /* adjust m2 so that the clkdco will be high enough */ + min_dco = roundup(hw->clkdco_min, fint); + m2 = DIV_ROUND_UP(min_dco, target_clkout); + if (m2 == 0) + m2 = 1; + + target_clkdco = target_clkout * m2; + m = target_clkdco / fint; + + clkdco = fint * m; + + /* adjust clkdco with fractional mf */ + if (WARN_ON(target_clkdco - clkdco > fint)) + mf = 0; + else + mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint); + + if (mf > 0) + clkdco += (u32)div_u64((u64)mf * fint, 262144); + + clkout = clkdco / m2; + + /* sigma-delta */ + sd = DIV_ROUND_UP(fint * m, 250000000); + + DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n", + n, m, mf, m2, sd); + DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout); + + cinfo->n = n; + cinfo->m = m; + cinfo->mf = mf; + cinfo->mX[0] = m2; + cinfo->sd = sd; + + cinfo->fint = fint; + cinfo->clkdco = clkdco; + cinfo->clkout[0] = clkout; + + return true; +} + static int wait_for_bit_change(void __iomem *reg, int bitnum, int value) { unsigned long timeout; diff --git a/drivers/gpu/drm/omapdrm/dss/rfbi.c b/drivers/gpu/drm/omapdrm/dss/rfbi.c index 3796576dfadf..cd53566d75eb 100644 --- a/drivers/gpu/drm/omapdrm/dss/rfbi.c +++ b/drivers/gpu/drm/omapdrm/dss/rfbi.c @@ -38,7 +38,7 @@ #include <linux/pm_runtime.h> #include <linux/component.h> -#include <video/omapdss.h> +#include "omapdss.h" #include "dss.h" struct rfbi_reg { u16 idx; }; diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index cd6d3bfb041d..0a96c321ce62 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -29,7 +29,7 @@ #include <linux/of.h> #include <linux/component.h> -#include <video/omapdss.h> +#include "omapdss.h" #include "dss.h" static struct { diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 08a2cc778ba9..6eedf2118708 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -37,8 +37,7 @@ #include <linux/of.h> #include <linux/component.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c index b1ec59e42940..7429de928d4e 100644 --- a/drivers/gpu/drm/omapdrm/dss/video-pll.c +++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c @@ -17,8 +17,7 @@ #include <linux/platform_device.h> #include <linux/sched.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" @@ -108,6 +107,8 @@ static const struct dss_pll_ops dss_pll_ops = { }; static const struct dss_pll_hw dss_dra7_video_pll_hw = { + .type = DSS_PLL_TYPE_A, + .n_max = (1 << 8) - 1, .m_max = (1 << 12) - 1, .mX_max = (1 << 5) - 1, @@ -124,6 +125,10 @@ static const struct dss_pll_hw dss_dra7_video_pll_hw = { .mX_lsb[0] = 21, .mX_msb[1] = 30, .mX_lsb[1] = 26, + .mX_msb[2] = 4, + .mX_lsb[2] = 0, + .mX_msb[3] = 9, + .mX_lsb[3] = 5, .has_refsel = true, }; diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index ce2d67b6a8c7..137fe690a0da 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -32,7 +32,6 @@ struct omap_connector { struct drm_connector base; struct omap_dss_device *dssdev; - struct drm_encoder *encoder; bool hdmi_mode; }; @@ -256,13 +255,6 @@ static int omap_connector_mode_valid(struct drm_connector *connector, return ret; } -struct drm_encoder *omap_connector_attached_encoder( - struct drm_connector *connector) -{ - struct omap_connector *omap_connector = to_omap_connector(connector); - return omap_connector->encoder; -} - static const struct drm_connector_funcs omap_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .reset = drm_atomic_helper_connector_reset, @@ -276,7 +268,6 @@ static const struct drm_connector_funcs omap_connector_funcs = { static const struct drm_connector_helper_funcs omap_connector_helper_funcs = { .get_modes = omap_connector_get_modes, .mode_valid = omap_connector_mode_valid, - .best_encoder = omap_connector_attached_encoder, }; /* initialize connector */ @@ -296,7 +287,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev, goto fail; omap_connector->dssdev = dssdev; - omap_connector->encoder = encoder; connector = &omap_connector->base; diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 075f2bb44867..180f644e861e 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -372,6 +372,20 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc) copy_timings_drm_to_omap(&omap_crtc->timings, mode); } +static int omap_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + if (state->color_mgmt_changed && state->gamma_lut) { + uint length = state->gamma_lut->length / + sizeof(struct drm_color_lut); + + if (length < 2) + return -EINVAL; + } + + return 0; +} + static void omap_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { @@ -384,6 +398,32 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc, WARN_ON(omap_crtc->vblank_irq.registered); + if (crtc->state->color_mgmt_changed) { + struct drm_color_lut *lut = NULL; + uint length = 0; + + if (crtc->state->gamma_lut) { + lut = (struct drm_color_lut *) + crtc->state->gamma_lut->data; + length = crtc->state->gamma_lut->length / + sizeof(*lut); + } + dispc_mgr_set_gamma(omap_crtc->channel, lut, length); + } + + if (crtc->state->color_mgmt_changed) { + struct drm_color_lut *lut = NULL; + uint length = 0; + + if (crtc->state->gamma_lut) { + lut = (struct drm_color_lut *) + crtc->state->gamma_lut->data; + length = crtc->state->gamma_lut->length / + sizeof(*lut); + } + dispc_mgr_set_gamma(omap_crtc->channel, lut, length); + } + if (dispc_mgr_is_enabled(omap_crtc->channel)) { DBG("%s: GO", omap_crtc->name); @@ -460,6 +500,7 @@ static const struct drm_crtc_funcs omap_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .destroy = omap_crtc_destroy, .page_flip = drm_atomic_helper_page_flip, + .gamma_set = drm_atomic_helper_legacy_gamma_set, .set_property = drm_atomic_helper_crtc_set_property, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, @@ -471,6 +512,7 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = { .mode_set_nofb = omap_crtc_mode_set_nofb, .disable = omap_crtc_disable, .enable = omap_crtc_enable, + .atomic_check = omap_crtc_atomic_check, .atomic_begin = omap_crtc_atomic_begin, .atomic_flush = omap_crtc_atomic_flush, }; @@ -534,6 +576,20 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs); + /* The dispc API adapts to what ever size, but the HW supports + * 256 element gamma table for LCDs and 1024 element table for + * OMAP_DSS_CHANNEL_DIGIT. X server assumes 256 element gamma + * tables so lets use that. Size of HW gamma table can be + * extracted with dispc_mgr_gamma_size(). If it returns 0 + * gamma table is not supprted. + */ + if (dispc_mgr_gamma_size(channel)) { + uint gamma_lut_size = 256; + + drm_crtc_enable_color_mgmt(crtc, 0, false, gamma_lut_size); + drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size); + } + omap_plane_install_properties(crtc->primary, &crtc->base); omap_crtcs[channel] = omap_crtc; diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index d86f5479345b..26c6134eb744 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -142,8 +142,9 @@ static int omap_atomic_commit(struct drm_device *dev, { struct omap_drm_private *priv = dev->dev_private; struct omap_atomic_state_commit *commit; - unsigned int i; - int ret; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int i, ret; ret = drm_atomic_helper_prepare_planes(dev, state); if (ret) @@ -163,10 +164,8 @@ static int omap_atomic_commit(struct drm_device *dev, /* Wait until all affected CRTCs have completed previous commits and * mark them as pending. */ - for (i = 0; i < dev->mode_config.num_crtc; ++i) { - if (state->crtcs[i]) - commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]); - } + for_each_crtc_in_state(state, crtc, crtc_state, i) + commit->crtcs |= drm_crtc_mask(crtc); wait_event(priv->commit.wait, !omap_atomic_is_pending(priv, commit)); @@ -175,7 +174,7 @@ static int omap_atomic_commit(struct drm_device *dev, spin_unlock(&priv->commit.lock); /* Swap the state, this is the point of no return. */ - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); if (nonblock) schedule_work(&commit->work); @@ -203,6 +202,8 @@ static int get_connector_type(struct omap_dss_device *dssdev) return DRM_MODE_CONNECTOR_HDMIA; case OMAP_DISPLAY_TYPE_DVI: return DRM_MODE_CONNECTOR_DVID; + case OMAP_DISPLAY_TYPE_DSI: + return DRM_MODE_CONNECTOR_DSI; default: return DRM_MODE_CONNECTOR_Unknown; } @@ -800,7 +801,6 @@ static struct drm_driver omap_drm_driver = { .unload = dev_unload, .open = dev_open, .lastclose = dev_lastclose, - .set_busid = drm_platform_set_busid, .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = omap_irq_enable_vblank, .disable_vblank = omap_irq_disable_vblank, diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 3f823c368912..dcc30a98b9d4 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -24,7 +24,6 @@ #include <linux/platform_data/omap_drm.h> #include <linux/types.h> #include <linux/wait.h> -#include <video/omapdss.h> #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> @@ -183,7 +182,6 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); -struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p); int omap_framebuffer_pin(struct drm_framebuffer *fb); void omap_framebuffer_unpin(struct drm_framebuffer *fb); void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, @@ -231,7 +229,6 @@ int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient, int x, int y, dma_addr_t *paddr); uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj); size_t omap_gem_mmap_size(struct drm_gem_object *obj); -int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h); int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient); struct dma_buf *omap_gem_prime_export(struct drm_device *dev, @@ -239,17 +236,6 @@ struct dma_buf *omap_gem_prime_export(struct drm_device *dev, struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, struct dma_buf *buffer); -static inline int align_pitch(int pitch, int width, int bpp) -{ - int bytespp = (bpp + 7) / 8; - /* in case someone tries to feed us a completely bogus stride: */ - pitch = max(pitch, width * bytespp); - /* PVR needs alignment to 8 pixels.. right now that is the most - * restrictive stride requirement.. - */ - return roundup(pitch, 8 * bytespp); -} - /* map crtc to vblank mask */ uint32_t pipe2vbl(struct drm_crtc *crtc); struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder); diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index f84570d1636c..983c8cf2441c 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -122,17 +122,9 @@ static void omap_framebuffer_destroy(struct drm_framebuffer *fb) kfree(omap_fb); } -static int omap_framebuffer_dirty(struct drm_framebuffer *fb, - struct drm_file *file_priv, unsigned flags, unsigned color, - struct drm_clip_rect *clips, unsigned num_clips) -{ - return 0; -} - static const struct drm_framebuffer_funcs omap_framebuffer_funcs = { .create_handle = omap_framebuffer_create_handle, .destroy = omap_framebuffer_destroy, - .dirty = omap_framebuffer_dirty, }; static uint32_t get_linear_addr(struct plane *plane, @@ -320,14 +312,6 @@ void omap_framebuffer_unpin(struct drm_framebuffer *fb) mutex_unlock(&omap_fb->lock); } -struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p) -{ - struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); - if (p >= drm_format_num_planes(fb->pixel_format)) - return NULL; - return omap_fb->planes[p].bo; -} - /* iterate thru all the connectors, returning ones that are attached * to the same fb.. */ diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 89da41ac64d2..adb10fbe918d 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -125,9 +125,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper, mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; - mode_cmd.pitches[0] = align_pitch( - mode_cmd.width * ((sizes->surface_bpp + 7) / 8), - mode_cmd.width, sizes->surface_bpp); + mode_cmd.pitches[0] = + DIV_ROUND_UP(mode_cmd.width * sizes->surface_bpp, 8); fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled; if (fbdev->ywrap_enabled) { @@ -280,9 +279,6 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev) if (ret) goto fini; - /* disable all the possible outputs/crtcs before entering KMS mode */ - drm_helper_disable_unused_functions(dev); - ret = drm_fb_helper_initial_config(helper, 32); if (ret) goto fini; diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 03698b6c806c..9b3f565fd8d7 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -383,18 +383,6 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj) return size; } -/* get tiled size, returns -EINVAL if not tiled buffer */ -int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h) -{ - struct omap_gem_object *omap_obj = to_omap_bo(obj); - if (omap_obj->flags & OMAP_BO_TILED) { - *w = omap_obj->width; - *h = omap_obj->height; - return 0; - } - return -EINVAL; -} - /* ----------------------------------------------------------------------------- * Fault Handling */ @@ -661,7 +649,8 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, { union omap_gem_size gsize; - args->pitch = align_pitch(0, args->width, args->bpp); + args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + args->size = PAGE_ALIGN(args->pitch * args->height); gsize = (union omap_gem_size){ diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 8b5d54385892..ad429683fef7 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -221,7 +221,6 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct qxl_device *qdev = dev->dev_private; - struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); struct qxl_framebuffer *qfb_src = to_qxl_framebuffer(fb); struct qxl_framebuffer *qfb_old = to_qxl_framebuffer(crtc->primary->fb); struct qxl_bo *bo_old = gem_to_qxl_bo(qfb_old->obj); @@ -252,14 +251,14 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc, qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0, &norect, one_clip_rect, inc); - drm_vblank_get(dev, qcrtc->index); + drm_crtc_vblank_get(crtc); if (event) { spin_lock_irqsave(&dev->event_lock, flags); - drm_send_vblank_event(dev, qcrtc->index, event); + drm_crtc_send_vblank_event(crtc, event); spin_unlock_irqrestore(&dev->event_lock, flags); } - drm_vblank_put(dev, qcrtc->index); + drm_crtc_vblank_put(crtc); ret = qxl_bo_reserve(bo, false); if (!ret) { @@ -730,7 +729,6 @@ static int qdev_crtc_init(struct drm_device *dev, int crtc_id) drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs); qxl_crtc->index = crtc_id; - drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256); drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs); return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index dc9df5fe50ba..460bbceae297 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -256,7 +256,7 @@ static struct drm_driver qxl_driver = { .gem_prime_vmap = qxl_gem_prime_vmap, .gem_prime_vunmap = qxl_gem_prime_vunmap, .gem_prime_mmap = qxl_gem_prime_mmap, - .gem_free_object = qxl_gem_object_free, + .gem_free_object_unlocked = qxl_gem_object_free, .gem_open_object = qxl_gem_object_open, .gem_close_object = qxl_gem_object_close, .fops = &qxl_fops, diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 5ea57f6320b8..df2657051afd 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -131,10 +131,6 @@ static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev, int ret; int aligned_size, size; int height = mode_cmd->height; - int bpp; - int depth; - - drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth); size = mode_cmd->pitches[0] * height; aligned_size = ALIGN(size, PAGE_SIZE); diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 4efa8e261baf..f599cd073b72 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -96,7 +96,7 @@ retry: return 0; if (have_drawable_releases && sc > 300) { - FENCE_WARN(fence, "failed to wait on release %d " + FENCE_WARN(fence, "failed to wait on release %llu " "after spincount %d\n", fence->context & ~0xf0000000, sc); goto signaled; diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 2e216e2ea78c..e91763d5d800 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -276,14 +276,14 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); atombios_blank_crtc(crtc, ATOM_DISABLE); if (dev->num_crtcs > radeon_crtc->crtc_id) - drm_vblank_on(dev, radeon_crtc->crtc_id); + drm_crtc_vblank_on(crtc); radeon_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: if (dev->num_crtcs > radeon_crtc->crtc_id) - drm_vblank_off(dev, radeon_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (radeon_crtc->enabled) atombios_blank_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 80b24a495d6c..5633ee3eb46e 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2386,7 +2386,7 @@ struct radeon_device { struct radeon_mman mman; struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; wait_queue_head_t fence_queue; - unsigned fence_context; + u64 fence_context; struct mutex ring_lock; struct radeon_ring ring[RADEON_NUM_RINGS]; bool ib_pool_ready; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 6a41b4982647..3965d1916b9c 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -231,19 +231,21 @@ void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, *blue = radeon_crtc->lut_b[regno] << 6; } -static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { radeon_crtc->lut_r[i] = red[i] >> 6; radeon_crtc->lut_g[i] = green[i] >> 6; radeon_crtc->lut_b[i] = blue[i] >> 6; } radeon_crtc_load_lut(crtc); + + return 0; } static void radeon_crtc_destroy(struct drm_crtc *crtc) @@ -381,7 +383,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); - drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); + drm_crtc_vblank_put(&radeon_crtc->base); radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); queue_work(radeon_crtc->flip_queue, &work->unpin_work); } @@ -598,7 +600,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, } work->base = base; - r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); + r = drm_crtc_vblank_get(crtc); if (r) { DRM_ERROR("failed to get vblank before flip\n"); goto pflip_cleanup; @@ -625,7 +627,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, return 0; vblank_cleanup: - drm_vblank_put(crtc->dev, radeon_crtc->crtc_id); + drm_crtc_vblank_put(crtc); pflip_cleanup: if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) { @@ -688,6 +690,7 @@ radeon_crtc_set_config(struct drm_mode_set *set) pm_runtime_put_autosuspend(dev->dev); return ret; } + static const struct drm_crtc_funcs radeon_crtc_funcs = { .cursor_set2 = radeon_crtc_cursor_set2, .cursor_move = radeon_crtc_cursor_move, diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index b55aa740171f..a455dc7d4aa1 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -34,11 +34,9 @@ #include "radeon_drv.h" #include <drm/drm_pciids.h> -#include <linux/apple-gmux.h> #include <linux/console.h> #include <linux/module.h> #include <linux/pm_runtime.h> -#include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> #include <drm/drm_gem.h> @@ -340,13 +338,7 @@ static int radeon_pci_probe(struct pci_dev *pdev, if (ret == -EPROBE_DEFER) return ret; - /* - * apple-gmux is needed on dual GPU MacBook Pro - * to probe the panel if we're the inactive GPU. - */ - if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) && - apple_gmux_present() && pdev != vga_default_device() && - !vga_switcheroo_handler_flags()) + if (vga_switcheroo_client_probe_defer(pdev)) return -EPROBE_DEFER; /* Get rid of things like offb */ diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 478d4099b0d0..d0de4022fff9 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -332,14 +332,14 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl)); } if (dev->num_crtcs > radeon_crtc->crtc_id) - drm_vblank_on(dev, radeon_crtc->crtc_id); + drm_crtc_vblank_on(crtc); radeon_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: if (dev->num_crtcs > radeon_crtc->crtc_id) - drm_vblank_off(dev, radeon_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (radeon_crtc->crtc_id) WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask)); else { diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 38226d925a5b..4b6542538ff9 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -246,6 +246,7 @@ static void radeon_set_power_state(struct radeon_device *rdev) static void radeon_pm_set_clocks(struct radeon_device *rdev) { + struct drm_crtc *crtc; int i, r; /* no need to take locks, etc. if nothing's going to change */ @@ -274,26 +275,30 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) radeon_unmap_vram_bos(rdev); if (rdev->irq.installed) { - for (i = 0; i < rdev->num_crtc; i++) { + i = 0; + drm_for_each_crtc(crtc, rdev->ddev) { if (rdev->pm.active_crtcs & (1 << i)) { /* This can fail if a modeset is in progress */ - if (drm_vblank_get(rdev->ddev, i) == 0) + if (drm_crtc_vblank_get(crtc) == 0) rdev->pm.req_vblank |= (1 << i); else DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n", i); } + i++; } } radeon_set_power_state(rdev); if (rdev->irq.installed) { - for (i = 0; i < rdev->num_crtc; i++) { + i = 0; + drm_for_each_crtc(crtc, rdev->ddev) { if (rdev->pm.req_vblank & (1 << i)) { rdev->pm.req_vblank &= ~(1 << i); - drm_vblank_put(rdev->ddev, i); + drm_crtc_vblank_put(crtc); } + i++; } } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index fb9242d27883..899ef7a2a7b4 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -217,7 +217,7 @@ static struct drm_driver rcar_du_driver = { .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = rcar_du_enable_vblank, .disable_vblank = rcar_du_disable_vblank, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, @@ -278,7 +278,6 @@ static int rcar_du_remove(struct platform_device *pdev) struct rcar_du_device *rcdu = platform_get_drvdata(pdev); struct drm_device *ddev = rcdu->ddev; - drm_connector_unregister_all(ddev); drm_dev_unregister(ddev); if (rcdu->fbdev) @@ -320,8 +319,6 @@ static int rcar_du_probe(struct platform_device *pdev) if (!ddev) return -ENOMEM; - drm_dev_set_unique(ddev, dev_name(&pdev->dev)); - rcdu->ddev = ddev; ddev->dev_private = rcdu; @@ -339,15 +336,15 @@ static int rcar_du_probe(struct platform_device *pdev) * disabled for all CRTCs. */ ret = drm_vblank_init(ddev, (1 << rcdu->info->num_crtcs) - 1); - if (ret < 0) { - dev_err(&pdev->dev, "failed to initialize vblank\n"); + if (ret < 0) goto error; - } /* DRM/KMS objects */ ret = rcar_du_modeset_init(rcdu); if (ret < 0) { - dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed to initialize DRM/KMS (%d)\n", ret); goto error; } @@ -360,10 +357,6 @@ static int rcar_du_probe(struct platform_device *pdev) if (ret) goto error; - ret = drm_connector_register_all(ddev); - if (ret < 0) - goto error; - DRM_INFO("Device %s probed\n", dev_name(&pdev->dev)); return 0; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index 4e939e41f030..55149e9ce28e 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -27,18 +27,6 @@ #include "rcar_du_vgacon.h" /* ----------------------------------------------------------------------------- - * Common connector functions - */ - -struct drm_encoder * -rcar_du_connector_best_encoder(struct drm_connector *connector) -{ - struct rcar_du_connector *rcon = to_rcar_connector(connector); - - return rcar_encoder_to_drm_encoder(rcon->encoder); -} - -/* ----------------------------------------------------------------------------- * Encoder */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h index 719b6f2a031c..a8669c3e0dd5 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h @@ -49,9 +49,6 @@ struct rcar_du_connector { #define to_rcar_connector(c) \ container_of(c, struct rcar_du_connector, connector) -struct drm_encoder * -rcar_du_connector_best_encoder(struct drm_connector *connector); - int rcar_du_encoder_init(struct rcar_du_device *rcdu, enum rcar_du_encoder_type type, enum rcar_du_output output, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c index 6c927144b5c9..612b4d5ae098 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c @@ -52,7 +52,6 @@ static int rcar_du_hdmi_connector_mode_valid(struct drm_connector *connector, static const struct drm_connector_helper_funcs connector_helper_funcs = { .get_modes = rcar_du_hdmi_connector_get_modes, .mode_valid = rcar_du_hdmi_connector_mode_valid, - .best_encoder = rcar_du_connector_best_encoder, }; static enum drm_connector_status diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index e70a4f33d970..6bb032d8ac6b 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -288,6 +288,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev, { struct rcar_du_device *rcdu = dev->dev_private; struct rcar_du_commit *commit; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; unsigned int i; int ret; @@ -309,10 +311,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev, /* Wait until all affected CRTCs have completed previous commits and * mark them as pending. */ - for (i = 0; i < dev->mode_config.num_crtc; ++i) { - if (state->crtcs[i]) - commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]); - } + for_each_crtc_in_state(state, crtc, crtc_state, i) + commit->crtcs |= drm_crtc_mask(crtc); spin_lock(&rcdu->commit.wait.lock); ret = wait_event_interruptible_locked(rcdu->commit.wait, @@ -327,7 +327,7 @@ static int rcar_du_atomic_commit(struct drm_device *dev, } /* Swap the state, this is the point of no return. */ - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); if (nonblock) schedule_work(&commit->work); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c index e905f5da7aaa..6afd0af312ba 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c @@ -59,7 +59,6 @@ static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector) static const struct drm_connector_helper_funcs connector_helper_funcs = { .get_modes = rcar_du_lvds_connector_get_modes, - .best_encoder = rcar_du_connector_best_encoder, }; static enum drm_connector_status diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index d445e67f78e1..bfe31ca870cc 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -140,18 +140,17 @@ int rcar_du_atomic_check_planes(struct drm_device *dev, bool needs_realloc = false; unsigned int groups = 0; unsigned int i; + struct drm_plane *drm_plane; + struct drm_plane_state *drm_plane_state; /* Check if hardware planes need to be reallocated. */ - for (i = 0; i < dev->mode_config.num_total_plane; ++i) { + for_each_plane_in_state(state, drm_plane, drm_plane_state, i) { struct rcar_du_plane_state *plane_state; struct rcar_du_plane *plane; unsigned int index; - if (!state->planes[i]) - continue; - - plane = to_rcar_plane(state->planes[i]); - plane_state = to_rcar_plane_state(state->plane_states[i]); + plane = to_rcar_plane(drm_plane); + plane_state = to_rcar_plane_state(drm_plane_state); dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__, plane->group->index, plane - plane->group->planes); @@ -247,18 +246,15 @@ int rcar_du_atomic_check_planes(struct drm_device *dev, } /* Reallocate hardware planes for each plane that needs it. */ - for (i = 0; i < dev->mode_config.num_total_plane; ++i) { + for_each_plane_in_state(state, drm_plane, drm_plane_state, i) { struct rcar_du_plane_state *plane_state; struct rcar_du_plane *plane; unsigned int crtc_planes; unsigned int free; int idx; - if (!state->planes[i]) - continue; - - plane = to_rcar_plane(state->planes[i]); - plane_state = to_rcar_plane_state(state->plane_states[i]); + plane = to_rcar_plane(drm_plane); + plane_state = to_rcar_plane_state(drm_plane_state); dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__, plane->group->index, plane - plane->group->planes); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h index d2f66068e52c..fedb0161e234 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h @@ -195,9 +195,10 @@ #define DEFR6_ODPM12_DISP (2 << 8) #define DEFR6_ODPM12_CDE (3 << 8) #define DEFR6_ODPM12_MASK (3 << 8) -#define DEFR6_TCNE2 (1 << 6) +#define DEFR6_TCNE1 (1 << 6) +#define DEFR6_TCNE0 (1 << 4) #define DEFR6_MLOS1 (1 << 2) -#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE2) +#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE1) /* ----------------------------------------------------------------------------- * R8A7790-only Control Registers diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c index 9d7e5c99caf6..8d6125c1c0f9 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c @@ -28,7 +28,6 @@ static int rcar_du_vga_connector_get_modes(struct drm_connector *connector) static const struct drm_connector_helper_funcs connector_helper_funcs = { .get_modes = rcar_du_vga_connector_get_modes, - .best_encoder = rcar_du_connector_best_encoder, }; static enum drm_connector_status @@ -79,7 +78,5 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, if (ret < 0) return ret; - rcon->encoder = renc; - return 0; } diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index d30bdc38a760..e48611e83c03 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -2,6 +2,7 @@ config DRM_ROCKCHIP tristate "DRM Support for Rockchip" depends on DRM && ROCKCHIP_IOMMU depends on RESET_CONTROLLER + select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER select DRM_KMS_FB_HELPER select DRM_PANEL diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index 7f6a55cae27a..c120172add5c 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -349,20 +349,11 @@ static int rockchip_dp_remove(struct platform_device *pdev) return 0; } +static const struct dev_pm_ops rockchip_dp_pm_ops = { #ifdef CONFIG_PM_SLEEP -static int rockchip_dp_suspend(struct device *dev) -{ - return analogix_dp_suspend(dev); -} - -static int rockchip_dp_resume(struct device *dev) -{ - return analogix_dp_resume(dev); -} + .suspend = analogix_dp_suspend, + .resume_early = analogix_dp_resume, #endif - -static const struct dev_pm_ops rockchip_dp_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(rockchip_dp_suspend, rockchip_dp_resume) }; static const struct of_device_id rockchip_dp_dt_ids[] = { diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c index dedc65b40f36..ca22e5ee89ca 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c @@ -964,18 +964,9 @@ static enum drm_mode_status dw_mipi_dsi_mode_valid( return mode_status; } -static struct drm_encoder *dw_mipi_dsi_connector_best_encoder( - struct drm_connector *connector) -{ - struct dw_mipi_dsi *dsi = con_to_dsi(connector); - - return &dsi->encoder; -} - static struct drm_connector_helper_funcs dw_mipi_dsi_connector_helper_funcs = { .get_modes = dw_mipi_dsi_connector_get_modes, .mode_valid = dw_mipi_dsi_mode_valid, - .best_encoder = dw_mipi_dsi_connector_best_encoder, }; static enum drm_connector_status diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c index f8b4feb60b25..006260de9dbd 100644 --- a/drivers/gpu/drm/rockchip/inno_hdmi.c +++ b/drivers/gpu/drm/rockchip/inno_hdmi.c @@ -579,14 +579,6 @@ inno_hdmi_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_encoder * -inno_hdmi_connector_best_encoder(struct drm_connector *connector) -{ - struct inno_hdmi *hdmi = to_inno_hdmi(connector); - - return &hdmi->encoder; -} - static int inno_hdmi_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY) @@ -613,7 +605,6 @@ static struct drm_connector_funcs inno_hdmi_connector_funcs = { static struct drm_connector_helper_funcs inno_hdmi_connector_helper_funcs = { .get_modes = inno_hdmi_connector_get_modes, .mode_valid = inno_hdmi_connector_mode_valid, - .best_encoder = inno_hdmi_connector_best_encoder, }; static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index a409d1f703cb..d665fb04d264 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -19,11 +19,13 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_gem_cma_helper.h> #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/module.h> #include <linux/of_graph.h> #include <linux/component.h> +#include <linux/console.h> #include "rockchip_drm_drv.h" #include "rockchip_drm_fb.h" @@ -37,6 +39,7 @@ #define DRIVER_MINOR 0 static bool is_support_iommu = true; +static struct drm_driver rockchip_drm_driver; /* * Attach a (component) device to the shared drm dma mapping from master drm @@ -132,20 +135,24 @@ static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev, priv->crtc_funcs[pipe]->disable_vblank(crtc); } -static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags) +static int rockchip_drm_bind(struct device *dev) { + struct drm_device *drm_dev; struct rockchip_drm_private *private; struct dma_iommu_mapping *mapping = NULL; - struct device *dev = drm_dev->dev; - struct drm_connector *connector; int ret; - private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL); - if (!private) + drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev); + if (!drm_dev) return -ENOMEM; - mutex_init(&private->commit.lock); - INIT_WORK(&private->commit.work, rockchip_drm_atomic_work); + dev_set_drvdata(dev, drm_dev); + + private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL); + if (!private) { + ret = -ENOMEM; + goto err_free; + } drm_dev->dev_private = private; @@ -186,23 +193,6 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags) if (ret) goto err_detach_device; - /* - * All components are now added, we can publish the connector sysfs - * entries to userspace. This will generate hotplug events and so - * userspace will expect to be able to access DRM at this point. - */ - list_for_each_entry(connector, &drm_dev->mode_config.connector_list, - head) { - ret = drm_connector_register(connector); - if (ret) { - dev_err(drm_dev->dev, - "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n", - connector->base.id, - connector->name, ret); - goto err_unbind; - } - } - /* init kms poll for handling hpd */ drm_kms_helper_poll_init(drm_dev); @@ -222,14 +212,19 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags) if (ret) goto err_vblank_cleanup; + ret = drm_dev_register(drm_dev, 0); + if (ret) + goto err_fbdev_fini; + if (is_support_iommu) arm_iommu_release_mapping(mapping); return 0; +err_fbdev_fini: + rockchip_drm_fbdev_fini(drm_dev); err_vblank_cleanup: drm_vblank_cleanup(drm_dev); err_kms_helper_poll_fini: drm_kms_helper_poll_fini(drm_dev); -err_unbind: component_unbind_all(dev, drm_dev); err_detach_device: if (is_support_iommu) @@ -240,12 +235,14 @@ err_release_mapping: err_config_cleanup: drm_mode_config_cleanup(drm_dev); drm_dev->dev_private = NULL; +err_free: + drm_dev_unref(drm_dev); return ret; } -static int rockchip_drm_unload(struct drm_device *drm_dev) +static void rockchip_drm_unbind(struct device *dev) { - struct device *dev = drm_dev->dev; + struct drm_device *drm_dev = dev_get_drvdata(dev); rockchip_drm_fbdev_fini(drm_dev); drm_vblank_cleanup(drm_dev); @@ -255,29 +252,9 @@ static int rockchip_drm_unload(struct drm_device *drm_dev) arm_iommu_detach_device(dev); drm_mode_config_cleanup(drm_dev); drm_dev->dev_private = NULL; - - return 0; -} - -static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc, - struct drm_file *file_priv) -{ - struct rockchip_drm_private *priv = crtc->dev->dev_private; - int pipe = drm_crtc_index(crtc); - - if (pipe < ROCKCHIP_MAX_CRTC && - priv->crtc_funcs[pipe] && - priv->crtc_funcs[pipe]->cancel_pending_vblank) - priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv); -} - -static void rockchip_drm_preclose(struct drm_device *dev, - struct drm_file *file_priv) -{ - struct drm_crtc *crtc; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) - rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv); + drm_dev_unregister(drm_dev); + drm_dev_unref(drm_dev); + dev_set_drvdata(dev, NULL); } void rockchip_drm_lastclose(struct drm_device *dev) @@ -300,23 +277,15 @@ static const struct file_operations rockchip_drm_driver_fops = { .release = drm_release, }; -const struct vm_operations_struct rockchip_drm_vm_ops = { - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - static struct drm_driver rockchip_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, - .load = rockchip_drm_load, - .unload = rockchip_drm_unload, - .preclose = rockchip_drm_preclose, .lastclose = rockchip_drm_lastclose, .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = rockchip_drm_crtc_enable_vblank, .disable_vblank = rockchip_drm_crtc_disable_vblank, - .gem_vm_ops = &rockchip_drm_vm_ops, - .gem_free_object = rockchip_gem_free_object, + .gem_vm_ops = &drm_gem_cma_vm_ops, + .gem_free_object_unlocked = rockchip_gem_free_object, .dumb_create = rockchip_gem_dumb_create, .dumb_map_offset = rockchip_gem_dumb_map_offset, .dumb_destroy = drm_gem_dumb_destroy, @@ -337,25 +306,38 @@ static struct drm_driver rockchip_drm_driver = { }; #ifdef CONFIG_PM_SLEEP -static int rockchip_drm_sys_suspend(struct device *dev) +void rockchip_drm_fb_suspend(struct drm_device *drm) { - struct drm_device *drm = dev_get_drvdata(dev); - struct drm_connector *connector; + struct rockchip_drm_private *priv = drm->dev_private; - if (!drm) - return 0; + console_lock(); + drm_fb_helper_set_suspend(&priv->fbdev_helper, 1); + console_unlock(); +} - drm_modeset_lock_all(drm); - list_for_each_entry(connector, &drm->mode_config.connector_list, head) { - int old_dpms = connector->dpms; +void rockchip_drm_fb_resume(struct drm_device *drm) +{ + struct rockchip_drm_private *priv = drm->dev_private; - if (connector->funcs->dpms) - connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); + console_lock(); + drm_fb_helper_set_suspend(&priv->fbdev_helper, 0); + console_unlock(); +} - /* Set the old mode back to the connector for resume */ - connector->dpms = old_dpms; +static int rockchip_drm_sys_suspend(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct rockchip_drm_private *priv = drm->dev_private; + + drm_kms_helper_poll_disable(drm); + rockchip_drm_fb_suspend(drm); + + priv->state = drm_atomic_helper_suspend(drm); + if (IS_ERR(priv->state)) { + rockchip_drm_fb_resume(drm); + drm_kms_helper_poll_enable(drm); + return PTR_ERR(priv->state); } - drm_modeset_unlock_all(drm); return 0; } @@ -363,47 +345,11 @@ static int rockchip_drm_sys_suspend(struct device *dev) static int rockchip_drm_sys_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - struct drm_connector *connector; - enum drm_connector_status status; - bool changed = false; - - if (!drm) - return 0; + struct rockchip_drm_private *priv = drm->dev_private; - drm_modeset_lock_all(drm); - list_for_each_entry(connector, &drm->mode_config.connector_list, head) { - int desired_mode = connector->dpms; - - /* - * at suspend time, we save dpms to connector->dpms, - * restore the old_dpms, and at current time, the connector - * dpms status must be DRM_MODE_DPMS_OFF. - */ - connector->dpms = DRM_MODE_DPMS_OFF; - - /* - * If the connector has been disconnected during suspend, - * disconnect it from the encoder and leave it off. We'll notify - * userspace at the end. - */ - if (desired_mode == DRM_MODE_DPMS_ON) { - status = connector->funcs->detect(connector, true); - if (status == connector_status_disconnected) { - connector->encoder = NULL; - connector->status = status; - changed = true; - continue; - } - } - if (connector->funcs->dpms) - connector->funcs->dpms(connector, desired_mode); - } - drm_modeset_unlock_all(drm); - - drm_helper_resume_force_mode(drm); - - if (changed) - drm_kms_helper_hotplug_event(drm); + drm_atomic_helper_resume(drm, priv->state); + rockchip_drm_fb_resume(drm); + drm_kms_helper_poll_enable(drm); return 0; } @@ -444,37 +390,6 @@ static void rockchip_add_endpoints(struct device *dev, } } -static int rockchip_drm_bind(struct device *dev) -{ - struct drm_device *drm; - int ret; - - drm = drm_dev_alloc(&rockchip_drm_driver, dev); - if (!drm) - return -ENOMEM; - - ret = drm_dev_register(drm, 0); - if (ret) - goto err_free; - - dev_set_drvdata(dev, drm); - - return 0; - -err_free: - drm_dev_unref(drm); - return ret; -} - -static void rockchip_drm_unbind(struct device *dev) -{ - struct drm_device *drm = dev_get_drvdata(dev); - - drm_dev_unregister(drm); - drm_dev_unref(drm); - dev_set_drvdata(dev, NULL); -} - static const struct component_master_ops rockchip_drm_ops = { .bind = rockchip_drm_bind, .unbind = rockchip_drm_unbind, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index 56f43a364c7f..ea3932940061 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -40,14 +40,6 @@ struct rockchip_crtc_funcs { int (*enable_vblank)(struct drm_crtc *crtc); void (*disable_vblank)(struct drm_crtc *crtc); void (*wait_for_update)(struct drm_crtc *crtc); - void (*cancel_pending_vblank)(struct drm_crtc *crtc, struct drm_file *file_priv); -}; - -struct rockchip_atomic_commit { - struct work_struct work; - struct drm_atomic_state *state; - struct drm_device *dev; - struct mutex lock; }; struct rockchip_crtc_state { @@ -68,11 +60,9 @@ struct rockchip_drm_private { struct drm_fb_helper fbdev_helper; struct drm_gem_object *fbdev_bo; const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC]; - - struct rockchip_atomic_commit commit; + struct drm_atomic_state *state; }; -void rockchip_drm_atomic_work(struct work_struct *work); int rockchip_register_crtc_funcs(struct drm_crtc *crtc, const struct rockchip_crtc_funcs *crtc_funcs); void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c index 755cfdba61cd..20f12bc5a386 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c @@ -228,87 +228,32 @@ rockchip_atomic_wait_for_complete(struct drm_device *dev, struct drm_atomic_stat } static void -rockchip_atomic_commit_complete(struct rockchip_atomic_commit *commit) +rockchip_atomic_commit_tail(struct drm_atomic_state *state) { - struct drm_atomic_state *state = commit->state; - struct drm_device *dev = commit->dev; + struct drm_device *dev = state->dev; - /* - * TODO: do fence wait here. - */ - - /* - * Rockchip crtc support runtime PM, can't update display planes - * when crtc is disabled. - * - * drm_atomic_helper_commit comments detail that: - * For drivers supporting runtime PM the recommended sequence is - * - * drm_atomic_helper_commit_modeset_disables(dev, state); - * - * drm_atomic_helper_commit_modeset_enables(dev, state); - * - * drm_atomic_helper_commit_planes(dev, state, true); - * - * See the kerneldoc entries for these three functions for more details. - */ drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_modeset_enables(dev, state); drm_atomic_helper_commit_planes(dev, state, true); + drm_atomic_helper_commit_hw_done(state); + rockchip_atomic_wait_for_complete(dev, state); drm_atomic_helper_cleanup_planes(dev, state); - - drm_atomic_state_free(state); -} - -void rockchip_drm_atomic_work(struct work_struct *work) -{ - struct rockchip_atomic_commit *commit = container_of(work, - struct rockchip_atomic_commit, work); - - rockchip_atomic_commit_complete(commit); } -int rockchip_drm_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, - bool nonblock) -{ - struct rockchip_drm_private *private = dev->dev_private; - struct rockchip_atomic_commit *commit = &private->commit; - int ret; - - ret = drm_atomic_helper_prepare_planes(dev, state); - if (ret) - return ret; - - /* serialize outstanding nonblocking commits */ - mutex_lock(&commit->lock); - flush_work(&commit->work); - - drm_atomic_helper_swap_state(dev, state); - - commit->dev = dev; - commit->state = state; - - if (nonblock) - schedule_work(&commit->work); - else - rockchip_atomic_commit_complete(commit); - - mutex_unlock(&commit->lock); - - return 0; -} +struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = { + .atomic_commit_tail = rockchip_atomic_commit_tail, +}; static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = { .fb_create = rockchip_user_fb_create, .output_poll_changed = rockchip_drm_output_poll_changed, .atomic_check = drm_atomic_helper_check, - .atomic_commit = rockchip_drm_atomic_commit, + .atomic_commit = drm_atomic_helper_commit, }; struct drm_framebuffer * @@ -339,4 +284,5 @@ void rockchip_drm_mode_config_init(struct drm_device *dev) dev->mode_config.max_height = 4096; dev->mode_config.funcs = &rockchip_drm_mode_config_funcs; + dev->mode_config.helper_private = &rockchip_mode_config_helpers; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c index f261512bb4a0..207e01de6e32 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c @@ -108,7 +108,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper, fbi->screen_size = rk_obj->base.size; fbi->fix.smem_len = rk_obj->base.size; - DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n", + DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%zu\n", fb->width, fb->height, fb->depth, rk_obj->kvaddr, offset, size); @@ -156,9 +156,6 @@ int rockchip_drm_fbdev_init(struct drm_device *dev) goto err_drm_fb_helper_fini; } - /* disable all the possible outputs/crtcs before entering KMS mode */ - drm_helper_disable_unused_functions(dev); - ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP); if (ret < 0) { dev_err(dev->dev, "Failed to set initial hw config - %d.\n", diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 9c2d8a894093..059e902f872d 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -38,7 +38,7 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, &rk_obj->dma_addr, GFP_KERNEL, &rk_obj->dma_attrs); if (!rk_obj->kvaddr) { - DRM_ERROR("failed to allocate %#x byte dma buffer", obj->size); + DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size); return -ENOMEM; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 1c4d5b5a70a2..6255e5bcd954 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -98,7 +98,9 @@ struct vop_win { const struct vop_win_data *data; struct vop *vop; - struct vop_plane_state state; + /* protected by dev->event_lock */ + bool enable; + dma_addr_t yrgb_mst; }; struct vop { @@ -112,6 +114,8 @@ struct vop { bool vsync_work_pending; struct completion dsp_hold_completion; struct completion wait_update_complete; + + /* protected by dev->event_lock */ struct drm_pending_vblank_event *event; const struct vop_data *data; @@ -431,9 +435,6 @@ static void vop_enable(struct drm_crtc *crtc) struct vop *vop = to_vop(crtc); int ret; - if (vop->is_enabled) - return; - ret = pm_runtime_get_sync(vop->dev); if (ret < 0) { dev_err(vop->dev, "failed to get pm runtime: %d\n", ret); @@ -501,8 +502,7 @@ static void vop_crtc_disable(struct drm_crtc *crtc) struct vop *vop = to_vop(crtc); int i; - if (!vop->is_enabled) - return; + WARN_ON(vop->event); /* * We need to make sure that all windows are disabled before we @@ -553,6 +553,14 @@ static void vop_crtc_disable(struct drm_crtc *crtc) clk_disable(vop->aclk); clk_disable(vop->hclk); pm_runtime_put(vop->dev); + + if (crtc->state->event && !crtc->state->active) { + spin_lock_irq(&crtc->dev->event_lock); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + spin_unlock_irq(&crtc->dev->event_lock); + + crtc->state->event = NULL; + } } static void vop_plane_destroy(struct drm_plane *plane) @@ -618,6 +626,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane, ret = drm_plane_helper_check_update(plane, crtc, state->fb, src, dest, &clip, + state->rotation, min_scale, max_scale, true, true, &visible); @@ -658,6 +667,11 @@ static void vop_plane_atomic_disable(struct drm_plane *plane, if (!old_state->crtc) return; + spin_lock_irq(&plane->dev->event_lock); + vop_win->enable = false; + vop_win->yrgb_mst = 0; + spin_unlock_irq(&plane->dev->event_lock); + spin_lock(&vop->reg_lock); VOP_WIN_SET(vop, win, enable, 0); @@ -692,7 +706,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane, /* * can't update plane when vop is disabled. */ - if (!crtc) + if (WARN_ON(!crtc)) return; if (WARN_ON(!vop->is_enabled)) @@ -721,6 +735,11 @@ static void vop_plane_atomic_update(struct drm_plane *plane, offset += (src->y1 >> 16) * fb->pitches[0]; vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0]; + spin_lock_irq(&plane->dev->event_lock); + vop_win->enable = true; + vop_win->yrgb_mst = vop_plane_state->yrgb_mst; + spin_unlock_irq(&plane->dev->event_lock); + spin_lock(&vop->reg_lock); VOP_WIN_SET(vop, win, format, vop_plane_state->format); @@ -876,30 +895,10 @@ static void vop_crtc_wait_for_update(struct drm_crtc *crtc) WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100)); } -static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc, - struct drm_file *file_priv) -{ - struct drm_device *drm = crtc->dev; - struct vop *vop = to_vop(crtc); - struct drm_pending_vblank_event *e; - unsigned long flags; - - spin_lock_irqsave(&drm->event_lock, flags); - e = vop->event; - if (e && e->base.file_priv == file_priv) { - vop->event = NULL; - - e->base.destroy(&e->base); - file_priv->event_space += sizeof(e->event); - } - spin_unlock_irqrestore(&drm->event_lock, flags); -} - static const struct rockchip_crtc_funcs private_crtc_funcs = { .enable_vblank = vop_crtc_enable_vblank, .disable_vblank = vop_crtc_disable_vblank, .wait_for_update = vop_crtc_wait_for_update, - .cancel_pending_vblank = vop_crtc_cancel_pending_vblank, }; static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, @@ -931,6 +930,8 @@ static void vop_crtc_enable(struct drm_crtc *crtc) u16 vact_end = vact_st + vdisplay; uint32_t val; + WARN_ON(vop->event); + vop_enable(crtc); /* * If dclk rate is zero, mean that scanout is stop, @@ -1027,12 +1028,15 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc, { struct vop *vop = to_vop(crtc); + spin_lock_irq(&crtc->dev->event_lock); if (crtc->state->event) { WARN_ON(drm_crtc_vblank_get(crtc) != 0); + WARN_ON(vop->event); vop->event = crtc->state->event; crtc->state->event = NULL; } + spin_unlock_irq(&crtc->dev->event_lock); } static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = { @@ -1080,16 +1084,14 @@ static const struct drm_crtc_funcs vop_crtc_funcs = { static bool vop_win_pending_is_complete(struct vop_win *vop_win) { - struct drm_plane *plane = &vop_win->base; - struct vop_plane_state *state = to_vop_plane_state(plane->state); dma_addr_t yrgb_mst; - if (!state->enable) + if (!vop_win->enable) return VOP_WIN_GET(vop_win->vop, vop_win->data, enable) == 0; yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data); - return yrgb_mst == state->yrgb_mst; + return yrgb_mst == vop_win->yrgb_mst; } static void vop_handle_vblank(struct vop *vop) @@ -1104,15 +1106,16 @@ static void vop_handle_vblank(struct vop *vop) return; } + spin_lock_irqsave(&drm->event_lock, flags); if (vop->event) { - spin_lock_irqsave(&drm->event_lock, flags); drm_crtc_send_vblank_event(crtc, vop->event); drm_crtc_vblank_put(crtc); vop->event = NULL; - spin_unlock_irqrestore(&drm->event_lock, flags); } + spin_unlock_irqrestore(&drm->event_lock, flags); + if (!completion_done(&vop->wait_update_complete)) complete(&vop->wait_update_complete); } diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index 1e154fc779d5..6547b1db460a 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c @@ -441,7 +441,7 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc) scrtc->event = NULL; if (event) { drm_crtc_send_vblank_event(&scrtc->crtc, event); - drm_vblank_put(dev, 0); + drm_crtc_vblank_put(&scrtc->crtc); } spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -467,7 +467,7 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc, if (event) { event->pipe = 0; - drm_vblank_get(dev, 0); + drm_crtc_vblank_get(&scrtc->crtc); spin_lock_irqsave(&dev->event_lock, flags); scrtc->event = event; spin_unlock_irqrestore(&dev->event_lock, flags); diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index 7700ff172079..f0492603ea88 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -259,12 +259,11 @@ static struct drm_driver shmob_drm_driver = { | DRIVER_PRIME, .load = shmob_drm_load, .unload = shmob_drm_unload, - .set_busid = drm_platform_set_busid, .irq_handler = shmob_drm_irq, .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = shmob_drm_enable_vblank, .disable_vblank = shmob_drm_disable_vblank, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c index 93ad8a5704d1..03defda77766 100644 --- a/drivers/gpu/drm/sis/sis_mm.c +++ b/drivers/gpu/drm/sis/sis_mm.c @@ -316,7 +316,7 @@ void sis_reclaim_buffers_locked(struct drm_device *dev, struct sis_file_private *file_priv = file->driver_priv; struct sis_memblock *entry, *next; - if (!(file->minor->master && file->master->lock.hw_lock)) + if (!(dev->master && file->master->lock.hw_lock)) return; drm_legacy_idlelock_take(&file->master->lock); diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c index 3d2fa3ab33df..794148ff0e57 100644 --- a/drivers/gpu/drm/sti/sti_compositor.c +++ b/drivers/gpu/drm/sti/sti_compositor.c @@ -55,6 +55,26 @@ struct sti_compositor_data stih416_compositor_data = { }, }; +int sti_compositor_debufs_init(struct sti_compositor *compo, + struct drm_minor *minor) +{ + int ret = 0, i; + + for (i = 0; compo->vid[i]; i++) { + ret = vid_debugfs_init(compo->vid[i], minor); + if (ret) + return ret; + } + + for (i = 0; compo->mixer[i]; i++) { + ret = sti_mixer_debugfs_init(compo->mixer[i], minor); + if (ret) + return ret; + } + + return 0; +} + static int sti_compositor_bind(struct device *dev, struct device *master, void *data) diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h index 1a4a73dab11e..24444ef42a98 100644 --- a/drivers/gpu/drm/sti/sti_compositor.h +++ b/drivers/gpu/drm/sti/sti_compositor.h @@ -81,4 +81,7 @@ struct sti_compositor { struct notifier_block vtg_vblank_nb; }; +int sti_compositor_debufs_init(struct sti_compositor *compo, + struct drm_minor *minor); + #endif diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c index e04deedabd4a..7fab3af7473b 100644 --- a/drivers/gpu/drm/sti/sti_crtc.c +++ b/drivers/gpu/drm/sti/sti_crtc.c @@ -331,6 +331,17 @@ void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe) } } +static int sti_crtc_late_register(struct drm_crtc *crtc) +{ + struct sti_mixer *mixer = to_sti_mixer(crtc); + struct sti_compositor *compo = dev_get_drvdata(mixer->dev); + + if (drm_crtc_index(crtc) == 0) + return sti_compositor_debufs_init(compo, crtc->dev->primary); + + return 0; +} + static const struct drm_crtc_funcs sti_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, @@ -339,6 +350,7 @@ static const struct drm_crtc_funcs sti_crtc_funcs = { .reset = drm_atomic_helper_crtc_reset, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .late_register = sti_crtc_late_register, }; bool sti_crtc_is_main(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c index 4e990299735c..a263bbba4119 100644 --- a/drivers/gpu/drm/sti/sti_cursor.c +++ b/drivers/gpu/drm/sti/sti_cursor.c @@ -105,12 +105,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_cursor *cursor = (struct sti_cursor *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "%s: (vaddr = 0x%p)", sti_plane_to_str(&cursor->plane), cursor->regs); @@ -129,7 +123,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data) DBGFS_DUMP(CUR_AWE); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -336,6 +329,33 @@ static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = { .atomic_disable = sti_cursor_atomic_disable, }; +static void sti_cursor_destroy(struct drm_plane *drm_plane) +{ + DRM_DEBUG_DRIVER("\n"); + + drm_plane_helper_disable(drm_plane); + drm_plane_cleanup(drm_plane); +} + +static int sti_cursor_late_register(struct drm_plane *drm_plane) +{ + struct sti_plane *plane = to_sti_plane(drm_plane); + struct sti_cursor *cursor = to_sti_cursor(plane); + + return cursor_debugfs_init(cursor, drm_plane->dev->primary); +} + +struct drm_plane_funcs sti_cursor_plane_helpers_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = sti_cursor_destroy, + .set_property = sti_plane_set_property, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .late_register = sti_cursor_late_register, +}; + struct drm_plane *sti_cursor_create(struct drm_device *drm_dev, struct device *dev, int desc, void __iomem *baseaddr, @@ -370,7 +390,7 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev, res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane, possible_crtcs, - &sti_plane_helpers_funcs, + &sti_cursor_plane_helpers_funcs, cursor_supported_formats, ARRAY_SIZE(cursor_supported_formats), DRM_PLANE_TYPE_CURSOR, NULL); @@ -384,9 +404,6 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev, sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR); - if (cursor_debugfs_init(cursor, drm_dev->primary)) - DRM_ERROR("CURSOR debugfs setup failed\n"); - return &cursor->plane.drm_plane; err_plane: diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 872495e72294..96bd3d08b2d4 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -72,11 +72,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data) struct drm_info_node *node = s->private; struct drm_device *dev = node->minor->dev; struct drm_plane *p; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; list_for_each_entry(p, &dev->mode_config.plane_list, head) { struct sti_plane *plane = to_sti_plane(p); @@ -86,7 +81,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data) plane->fps_info.fips_str); } - mutex_unlock(&dev->struct_mutex); return 0; } @@ -221,7 +215,7 @@ static int sti_atomic_commit(struct drm_device *drm, * the software side now. */ - drm_atomic_helper_swap_state(drm, state); + drm_atomic_helper_swap_state(state, true); if (nonblock) sti_atomic_schedule(private, state); @@ -232,8 +226,28 @@ static int sti_atomic_commit(struct drm_device *drm, return 0; } +static void sti_output_poll_changed(struct drm_device *ddev) +{ + struct sti_private *private = ddev->dev_private; + + if (!ddev->mode_config.num_connector) + return; + + if (private->fbdev) { + drm_fbdev_cma_hotplug_event(private->fbdev); + return; + } + + private->fbdev = drm_fbdev_cma_init(ddev, 32, + ddev->mode_config.num_crtc, + ddev->mode_config.num_connector); + if (IS_ERR(private->fbdev)) + private->fbdev = NULL; +} + static const struct drm_mode_config_funcs sti_mode_config_funcs = { .fb_create = drm_fb_cma_create, + .output_poll_changed = sti_output_poll_changed, .atomic_check = drm_atomic_helper_check, .atomic_commit = sti_atomic_commit, }; @@ -254,45 +268,6 @@ static void sti_mode_config_init(struct drm_device *dev) dev->mode_config.funcs = &sti_mode_config_funcs; } -static int sti_load(struct drm_device *dev, unsigned long flags) -{ - struct sti_private *private; - int ret; - - private = kzalloc(sizeof(*private), GFP_KERNEL); - if (!private) { - DRM_ERROR("Failed to allocate private\n"); - return -ENOMEM; - } - dev->dev_private = (void *)private; - private->drm_dev = dev; - - mutex_init(&private->commit.lock); - INIT_WORK(&private->commit.work, sti_atomic_work); - - drm_mode_config_init(dev); - drm_kms_helper_poll_init(dev); - - sti_mode_config_init(dev); - - ret = component_bind_all(dev->dev, dev); - if (ret) { - drm_kms_helper_poll_fini(dev); - drm_mode_config_cleanup(dev); - kfree(private); - return ret; - } - - drm_mode_config_reset(dev); - - drm_helper_disable_unused_functions(dev); - drm_fbdev_cma_init(dev, 32, - dev->mode_config.num_crtc, - dev->mode_config.num_connector); - - return 0; -} - static const struct file_operations sti_driver_fops = { .owner = THIS_MODULE, .open = drm_open, @@ -309,8 +284,7 @@ static const struct file_operations sti_driver_fops = { static struct drm_driver sti_driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, - .load = sti_load, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = drm_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, @@ -346,14 +320,88 @@ static int compare_of(struct device *dev, void *data) return dev->of_node == data; } +static int sti_init(struct drm_device *ddev) +{ + struct sti_private *private; + + private = kzalloc(sizeof(*private), GFP_KERNEL); + if (!private) + return -ENOMEM; + + ddev->dev_private = (void *)private; + dev_set_drvdata(ddev->dev, ddev); + private->drm_dev = ddev; + + mutex_init(&private->commit.lock); + INIT_WORK(&private->commit.work, sti_atomic_work); + + drm_mode_config_init(ddev); + + sti_mode_config_init(ddev); + + drm_kms_helper_poll_init(ddev); + + return 0; +} + +static void sti_cleanup(struct drm_device *ddev) +{ + struct sti_private *private = ddev->dev_private; + + if (private->fbdev) { + drm_fbdev_cma_fini(private->fbdev); + private->fbdev = NULL; + } + + drm_kms_helper_poll_fini(ddev); + drm_vblank_cleanup(ddev); + kfree(private); + ddev->dev_private = NULL; +} + static int sti_bind(struct device *dev) { - return drm_platform_init(&sti_driver, to_platform_device(dev)); + struct drm_device *ddev; + int ret; + + ddev = drm_dev_alloc(&sti_driver, dev); + if (!ddev) + return -ENOMEM; + + ddev->platformdev = to_platform_device(dev); + + ret = sti_init(ddev); + if (ret) + goto err_drm_dev_unref; + + ret = component_bind_all(ddev->dev, ddev); + if (ret) + goto err_cleanup; + + ret = drm_dev_register(ddev, 0); + if (ret) + goto err_register; + + drm_mode_config_reset(ddev); + + return 0; + +err_register: + drm_mode_config_cleanup(ddev); +err_cleanup: + sti_cleanup(ddev); +err_drm_dev_unref: + drm_dev_unref(ddev); + return ret; } static void sti_unbind(struct device *dev) { - drm_put_dev(dev_get_drvdata(dev)); + struct drm_device *ddev = dev_get_drvdata(dev); + + drm_dev_unregister(ddev); + sti_cleanup(ddev); + drm_dev_unref(ddev); } static const struct component_master_ops sti_ops = { diff --git a/drivers/gpu/drm/sti/sti_drv.h b/drivers/gpu/drm/sti/sti_drv.h index 30ddc20841c3..78ebe5e30f53 100644 --- a/drivers/gpu/drm/sti/sti_drv.h +++ b/drivers/gpu/drm/sti/sti_drv.h @@ -24,6 +24,7 @@ struct sti_private { struct sti_compositor *compo; struct drm_property *plane_zorder_property; struct drm_device *drm_dev; + struct drm_fbdev_cma *fbdev; struct { struct drm_atomic_state *state; diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index 25f76632002c..ec3108074350 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -177,12 +177,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_dvo *dvo = (struct sti_dvo *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "DVO: (vaddr = 0x%p)", dvo->regs); DBGFS_DUMP(DVO_AWG_DIGSYNC_CTRL); @@ -193,7 +187,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data) dvo_dbg_awg_microcode(s, dvo->regs + DVO_DIGSYNC_INSTR_I); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -384,20 +377,10 @@ static int sti_dvo_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -struct drm_encoder *sti_dvo_best_encoder(struct drm_connector *connector) -{ - struct sti_dvo_connector *dvo_connector - = to_sti_dvo_connector(connector); - - /* Best encoder is the one associated during connector creation */ - return dvo_connector->encoder; -} - static const struct drm_connector_helper_funcs sti_dvo_connector_helper_funcs = { .get_modes = sti_dvo_connector_get_modes, .mode_valid = sti_dvo_connector_mode_valid, - .best_encoder = sti_dvo_best_encoder, }; static enum drm_connector_status @@ -421,24 +404,29 @@ sti_dvo_connector_detect(struct drm_connector *connector, bool force) return connector_status_disconnected; } -static void sti_dvo_connector_destroy(struct drm_connector *connector) +static int sti_dvo_late_register(struct drm_connector *connector) { struct sti_dvo_connector *dvo_connector = to_sti_dvo_connector(connector); + struct sti_dvo *dvo = dvo_connector->dvo; - drm_connector_unregister(connector); - drm_connector_cleanup(connector); - kfree(dvo_connector); + if (dvo_debugfs_init(dvo, dvo->drm_dev->primary)) { + DRM_ERROR("DVO debugfs setup failed\n"); + return -EINVAL; + } + + return 0; } static const struct drm_connector_funcs sti_dvo_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = sti_dvo_connector_detect, - .destroy = sti_dvo_connector_destroy, + .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .late_register = sti_dvo_late_register, }; static struct drm_encoder *sti_dvo_find_encoder(struct drm_device *dev) @@ -509,26 +497,16 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data) drm_connector_helper_add(drm_connector, &sti_dvo_connector_helper_funcs); - err = drm_connector_register(drm_connector); - if (err) - goto err_connector; - err = drm_mode_connector_attach_encoder(drm_connector, encoder); if (err) { DRM_ERROR("Failed to attach a connector to a encoder\n"); goto err_sysfs; } - if (dvo_debugfs_init(dvo, drm_dev->primary)) - DRM_ERROR("DVO debugfs setup failed\n"); - return 0; err_sysfs: - drm_connector_unregister(drm_connector); -err_connector: drm_bridge_remove(bridge); - drm_connector_cleanup(drm_connector); return -EINVAL; } diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index ff33c38da197..bf63086a3dc8 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -208,14 +208,8 @@ static int gdp_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; struct drm_plane *drm_plane = &gdp->plane.drm_plane; struct drm_crtc *crtc = drm_plane->crtc; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "%s: (vaddr = 0x%p)", sti_plane_to_str(&gdp->plane), gdp->regs); @@ -248,7 +242,6 @@ static int gdp_dbg_show(struct seq_file *s, void *data) seq_printf(s, " Connected to DRM CRTC #%d (%s)\n", crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc))); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -279,13 +272,7 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg) { struct drm_info_node *node = s->private; struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; unsigned int b; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; for (b = 0; b < GDP_NODE_NB_BANK; b++) { seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b); @@ -294,7 +281,6 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg) gdp_node_dump_node(s, gdp->node_list[b].btm_field); } - mutex_unlock(&dev->struct_mutex); return 0; } @@ -880,6 +866,33 @@ static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = { .atomic_disable = sti_gdp_atomic_disable, }; +static void sti_gdp_destroy(struct drm_plane *drm_plane) +{ + DRM_DEBUG_DRIVER("\n"); + + drm_plane_helper_disable(drm_plane); + drm_plane_cleanup(drm_plane); +} + +static int sti_gdp_late_register(struct drm_plane *drm_plane) +{ + struct sti_plane *plane = to_sti_plane(drm_plane); + struct sti_gdp *gdp = to_sti_gdp(plane); + + return gdp_debugfs_init(gdp, drm_plane->dev->primary); +} + +struct drm_plane_funcs sti_gdp_plane_helpers_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = sti_gdp_destroy, + .set_property = sti_plane_set_property, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .late_register = sti_gdp_late_register, +}; + struct drm_plane *sti_gdp_create(struct drm_device *drm_dev, struct device *dev, int desc, void __iomem *baseaddr, @@ -906,7 +919,7 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev, res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane, possible_crtcs, - &sti_plane_helpers_funcs, + &sti_gdp_plane_helpers_funcs, gdp_supported_formats, ARRAY_SIZE(gdp_supported_formats), type, NULL); @@ -919,9 +932,6 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev, sti_plane_init_property(&gdp->plane, type); - if (gdp_debugfs_init(gdp, drm_dev->primary)) - DRM_ERROR("GDP debugfs setup failed\n"); - return &gdp->plane.drm_plane; err: diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index f7d3464cdf09..8505569f75de 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -376,12 +376,6 @@ static int hda_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_hda *hda = (struct sti_hda *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "HD Analog: (vaddr = 0x%p)", hda->regs); DBGFS_DUMP(HDA_ANA_CFG); @@ -397,7 +391,6 @@ static int hda_dbg_show(struct seq_file *s, void *data) hda_dbg_video_dacs_ctrl(s, hda->video_dacs_ctrl); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -676,20 +669,10 @@ static int sti_hda_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -struct drm_encoder *sti_hda_best_encoder(struct drm_connector *connector) -{ - struct sti_hda_connector *hda_connector - = to_sti_hda_connector(connector); - - /* Best encoder is the one associated during connector creation */ - return hda_connector->encoder; -} - static const struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = { .get_modes = sti_hda_connector_get_modes, .mode_valid = sti_hda_connector_mode_valid, - .best_encoder = sti_hda_best_encoder, }; static enum drm_connector_status @@ -698,24 +681,29 @@ sti_hda_connector_detect(struct drm_connector *connector, bool force) return connector_status_connected; } -static void sti_hda_connector_destroy(struct drm_connector *connector) +static int sti_hda_late_register(struct drm_connector *connector) { struct sti_hda_connector *hda_connector = to_sti_hda_connector(connector); + struct sti_hda *hda = hda_connector->hda; + + if (hda_debugfs_init(hda, hda->drm_dev->primary)) { + DRM_ERROR("HDA debugfs setup failed\n"); + return -EINVAL; + } - drm_connector_unregister(connector); - drm_connector_cleanup(connector); - kfree(hda_connector); + return 0; } static const struct drm_connector_funcs sti_hda_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = sti_hda_connector_detect, - .destroy = sti_hda_connector_destroy, + .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .late_register = sti_hda_late_register, }; static struct drm_encoder *sti_hda_find_encoder(struct drm_device *dev) @@ -773,10 +761,6 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data) drm_connector_helper_add(drm_connector, &sti_hda_connector_helper_funcs); - err = drm_connector_register(drm_connector); - if (err) - goto err_connector; - err = drm_mode_connector_attach_encoder(drm_connector, encoder); if (err) { DRM_ERROR("Failed to attach a connector to a encoder\n"); @@ -786,15 +770,10 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data) /* force to disable hd dacs at startup */ hda_enable_hd_dacs(hda, false); - if (hda_debugfs_init(hda, drm_dev->primary)) - DRM_ERROR("HDA debugfs setup failed\n"); - return 0; err_sysfs: - drm_connector_unregister(drm_connector); -err_connector: - drm_connector_cleanup(drm_connector); + drm_bridge_remove(bridge); return -EINVAL; } diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 6ef0715bd5b9..8d1402b245bf 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -628,12 +628,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_hdmi *hdmi = (struct sti_hdmi *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "HDMI: (vaddr = 0x%p)", hdmi->regs); DBGFS_DUMP("\n", HDMI_CFG); @@ -690,7 +684,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data) DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_VENDOR); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -897,20 +890,10 @@ static int sti_hdmi_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -struct drm_encoder *sti_hdmi_best_encoder(struct drm_connector *connector) -{ - struct sti_hdmi_connector *hdmi_connector - = to_sti_hdmi_connector(connector); - - /* Best encoder is the one associated during connector creation */ - return hdmi_connector->encoder; -} - static const struct drm_connector_helper_funcs sti_hdmi_connector_helper_funcs = { .get_modes = sti_hdmi_connector_get_modes, .mode_valid = sti_hdmi_connector_mode_valid, - .best_encoder = sti_hdmi_best_encoder, }; /* get detection status of display device */ @@ -932,16 +915,6 @@ sti_hdmi_connector_detect(struct drm_connector *connector, bool force) return connector_status_disconnected; } -static void sti_hdmi_connector_destroy(struct drm_connector *connector) -{ - struct sti_hdmi_connector *hdmi_connector - = to_sti_hdmi_connector(connector); - - drm_connector_unregister(connector); - drm_connector_cleanup(connector); - kfree(hdmi_connector); -} - static void sti_hdmi_connector_init_property(struct drm_device *drm_dev, struct drm_connector *connector) { @@ -1024,17 +997,31 @@ sti_hdmi_connector_get_property(struct drm_connector *connector, return -EINVAL; } +static int sti_hdmi_late_register(struct drm_connector *connector) +{ + struct sti_hdmi_connector *hdmi_connector + = to_sti_hdmi_connector(connector); + struct sti_hdmi *hdmi = hdmi_connector->hdmi; + + if (hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary)) { + DRM_ERROR("HDMI debugfs setup failed\n"); + return -EINVAL; + } + + return 0; +} + static const struct drm_connector_funcs sti_hdmi_connector_funcs = { - .dpms = drm_atomic_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = sti_hdmi_connector_detect, - .destroy = sti_hdmi_connector_destroy, + .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .set_property = drm_atomic_helper_connector_set_property, .atomic_set_property = sti_hdmi_connector_set_property, .atomic_get_property = sti_hdmi_connector_get_property, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .late_register = sti_hdmi_late_register, }; static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev) @@ -1095,10 +1082,6 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) /* initialise property */ sti_hdmi_connector_init_property(drm_dev, drm_connector); - err = drm_connector_register(drm_connector); - if (err) - goto err_connector; - err = drm_mode_connector_attach_encoder(drm_connector, encoder); if (err) { DRM_ERROR("Failed to attach a connector to a encoder\n"); @@ -1108,16 +1091,10 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) /* Enable default interrupts */ hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN); - if (hdmi_debugfs_init(hdmi, drm_dev->primary)) - DRM_ERROR("HDMI debugfs setup failed\n"); - return 0; err_sysfs: - drm_connector_unregister(drm_connector); -err_connector: - drm_connector_cleanup(drm_connector); - + drm_bridge_remove(bridge); return -EINVAL; } diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index 1edec29b9e45..33d2f42550cc 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -555,14 +555,8 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; int cmd, cmd_offset, infoxp70; void *virt; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "%s: (vaddr = 0x%p)", sti_plane_to_str(&hqvdp->plane), hqvdp->regs); @@ -630,7 +624,6 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data) seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -1241,6 +1234,33 @@ static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = { .atomic_disable = sti_hqvdp_atomic_disable, }; +static void sti_hqvdp_destroy(struct drm_plane *drm_plane) +{ + DRM_DEBUG_DRIVER("\n"); + + drm_plane_helper_disable(drm_plane); + drm_plane_cleanup(drm_plane); +} + +static int sti_hqvdp_late_register(struct drm_plane *drm_plane) +{ + struct sti_plane *plane = to_sti_plane(drm_plane); + struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane); + + return hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary); +} + +struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = sti_hqvdp_destroy, + .set_property = sti_plane_set_property, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, + .late_register = sti_hqvdp_late_register, +}; + static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev, struct device *dev, int desc) { @@ -1253,7 +1273,7 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev, sti_hqvdp_init(hqvdp); res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1, - &sti_plane_helpers_funcs, + &sti_hqvdp_plane_helpers_funcs, hqvdp_supported_formats, ARRAY_SIZE(hqvdp_supported_formats), DRM_PLANE_TYPE_OVERLAY, NULL); @@ -1266,9 +1286,6 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev, sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY); - if (hqvdp_debugfs_init(hqvdp, drm_dev->primary)) - DRM_ERROR("HQVDP debugfs setup failed\n"); - return &hqvdp->plane.drm_plane; } diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c index aed7801b51f7..1885c7ab5a8b 100644 --- a/drivers/gpu/drm/sti/sti_mixer.c +++ b/drivers/gpu/drm/sti/sti_mixer.c @@ -151,12 +151,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg) { struct drm_info_node *node = s->private; struct sti_mixer *mixer = (struct sti_mixer *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "%s: (vaddr = 0x%p)", sti_mixer_to_str(mixer), mixer->regs); @@ -176,7 +170,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg) mixer_dbg_mxn(s, mixer->regs + GAM_MIXER_MX0); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -188,7 +181,7 @@ static struct drm_info_list mixer1_debugfs_files[] = { { "mixer_aux", mixer_dbg_show, 0, NULL }, }; -static int mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor) +int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor) { unsigned int i; struct drm_info_list *mixer_debugfs_files; @@ -400,8 +393,5 @@ struct sti_mixer *sti_mixer_create(struct device *dev, DRM_DEBUG_DRIVER("%s created. Regs=%p\n", sti_mixer_to_str(mixer), mixer->regs); - if (mixer_debugfs_init(mixer, drm_dev->primary)) - DRM_ERROR("MIXER debugfs setup failed\n"); - return mixer; } diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h index 6f35fc086873..830a3c42d886 100644 --- a/drivers/gpu/drm/sti/sti_mixer.h +++ b/drivers/gpu/drm/sti/sti_mixer.h @@ -55,6 +55,8 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer, void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable); +int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor); + /* depth in Cross-bar control = z order */ #define GAM_MIXER_NB_DEPTH_LEVEL 6 diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c index f10c98d3f012..85cee9098439 100644 --- a/drivers/gpu/drm/sti/sti_plane.c +++ b/drivers/gpu/drm/sti/sti_plane.c @@ -106,17 +106,9 @@ void sti_plane_update_fps(struct sti_plane *plane, plane->fps_info.fips_str); } -static void sti_plane_destroy(struct drm_plane *drm_plane) -{ - DRM_DEBUG_DRIVER("\n"); - - drm_plane_helper_disable(drm_plane); - drm_plane_cleanup(drm_plane); -} - -static int sti_plane_set_property(struct drm_plane *drm_plane, - struct drm_property *property, - uint64_t val) +int sti_plane_set_property(struct drm_plane *drm_plane, + struct drm_property *property, + uint64_t val) { struct drm_device *dev = drm_plane->dev; struct sti_private *private = dev->dev_private; @@ -170,13 +162,3 @@ void sti_plane_init_property(struct sti_plane *plane, plane->drm_plane.base.id, sti_plane_to_str(plane), plane->zorder); } - -struct drm_plane_funcs sti_plane_helpers_funcs = { - .update_plane = drm_atomic_helper_update_plane, - .disable_plane = drm_atomic_helper_disable_plane, - .destroy = sti_plane_destroy, - .set_property = sti_plane_set_property, - .reset = drm_atomic_helper_plane_reset, - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, -}; diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h index c50a3b9f5d37..39d39f5b7dd9 100644 --- a/drivers/gpu/drm/sti/sti_plane.h +++ b/drivers/gpu/drm/sti/sti_plane.h @@ -11,8 +11,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_plane_helper.h> -extern struct drm_plane_funcs sti_plane_helpers_funcs; - #define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane) #define STI_PLANE_TYPE_SHIFT 8 @@ -83,6 +81,11 @@ const char *sti_plane_to_str(struct sti_plane *plane); void sti_plane_update_fps(struct sti_plane *plane, bool new_frame, bool new_field); + +int sti_plane_set_property(struct drm_plane *drm_plane, + struct drm_property *property, + uint64_t val); + void sti_plane_init_property(struct sti_plane *plane, enum drm_plane_type type); #endif diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c index f983db5a59da..e25995b35715 100644 --- a/drivers/gpu/drm/sti/sti_tvout.c +++ b/drivers/gpu/drm/sti/sti_tvout.c @@ -112,6 +112,7 @@ struct sti_tvout { struct drm_encoder *hdmi; struct drm_encoder *hda; struct drm_encoder *dvo; + bool debugfs_registered; }; struct sti_tvout_encoder { @@ -515,13 +516,7 @@ static int tvout_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_tvout *tvout = (struct sti_tvout *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; struct drm_crtc *crtc; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "TVOUT: (vaddr = 0x%p)", tvout->regs); @@ -587,7 +582,6 @@ static int tvout_dbg_show(struct seq_file *s, void *data) DBGFS_DUMP(TVO_AUX_IN_VID_FORMAT); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -632,8 +626,37 @@ static void sti_tvout_encoder_destroy(struct drm_encoder *encoder) kfree(sti_encoder); } +static int sti_tvout_late_register(struct drm_encoder *encoder) +{ + struct sti_tvout *tvout = to_sti_tvout(encoder); + int ret; + + if (tvout->debugfs_registered) + return 0; + + ret = tvout_debugfs_init(tvout, encoder->dev->primary); + if (ret) + return ret; + + tvout->debugfs_registered = true; + return 0; +} + +static void sti_tvout_early_unregister(struct drm_encoder *encoder) +{ + struct sti_tvout *tvout = to_sti_tvout(encoder); + + if (!tvout->debugfs_registered) + return; + + tvout_debugfs_exit(tvout, encoder->dev->primary); + tvout->debugfs_registered = false; +} + static const struct drm_encoder_funcs sti_tvout_encoder_funcs = { .destroy = sti_tvout_encoder_destroy, + .late_register = sti_tvout_late_register, + .early_unregister = sti_tvout_early_unregister, }; static void sti_dvo_encoder_enable(struct drm_encoder *encoder) @@ -820,9 +843,6 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data) sti_tvout_create_encoders(drm_dev, tvout); - if (tvout_debugfs_init(tvout, drm_dev->primary)) - DRM_ERROR("TVOUT debugfs setup failed\n"); - return 0; } @@ -830,11 +850,8 @@ static void sti_tvout_unbind(struct device *dev, struct device *master, void *data) { struct sti_tvout *tvout = dev_get_drvdata(dev); - struct drm_device *drm_dev = data; sti_tvout_destroy_encoders(tvout); - - tvout_debugfs_exit(tvout, drm_dev->primary); } static const struct component_ops sti_tvout_ops = { diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c index 523ed19f5ac6..47634a0251fc 100644 --- a/drivers/gpu/drm/sti/sti_vid.c +++ b/drivers/gpu/drm/sti/sti_vid.c @@ -92,12 +92,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg) { struct drm_info_node *node = s->private; struct sti_vid *vid = (struct sti_vid *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "VID: (vaddr= 0x%p)", vid->regs); @@ -122,7 +116,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg) DBGFS_DUMP(VID_CSAT); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -130,7 +123,7 @@ static struct drm_info_list vid_debugfs_files[] = { { "vid", vid_dbg_show, 0, NULL }, }; -static int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor) +int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor) { unsigned int i; @@ -227,8 +220,5 @@ struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev, sti_vid_init(vid); - if (vid_debugfs_init(vid, drm_dev->primary)) - DRM_ERROR("VID debugfs setup failed\n"); - return vid; } diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h index 6c842344f3d8..fdc90f922a05 100644 --- a/drivers/gpu/drm/sti/sti_vid.h +++ b/drivers/gpu/drm/sti/sti_vid.h @@ -26,4 +26,6 @@ void sti_vid_disable(struct sti_vid *vid); struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev, int id, void __iomem *baseaddr); +int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor); + #endif diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c index 4182a21f5923..f628b6d8f23f 100644 --- a/drivers/gpu/drm/sun4i/sun4i_crtc.c +++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c @@ -51,10 +51,22 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc, { struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); struct sun4i_drv *drv = scrtc->drv; + struct drm_pending_vblank_event *event = crtc->state->event; DRM_DEBUG_DRIVER("Committing plane changes\n"); sun4i_backend_commit(drv->backend); + + if (event) { + crtc->state->event = NULL; + + spin_lock_irq(&crtc->dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irq(&crtc->dev->event_lock); + } } static void sun4i_crtc_disable(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 76e922bb60e5..9a67f927a53e 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -24,34 +24,6 @@ #include "sun4i_layer.h" #include "sun4i_tcon.h" -static int sun4i_drv_connector_plug_all(struct drm_device *drm) -{ - struct drm_connector *connector, *failed; - int ret; - - mutex_lock(&drm->mode_config.mutex); - list_for_each_entry(connector, &drm->mode_config.connector_list, head) { - ret = drm_connector_register(connector); - if (ret) { - failed = connector; - goto err; - } - } - mutex_unlock(&drm->mode_config.mutex); - return 0; - -err: - list_for_each_entry(connector, &drm->mode_config.connector_list, head) { - if (failed == connector) - break; - - drm_connector_unregister(connector); - } - mutex_unlock(&drm->mode_config.mutex); - - return ret; -} - static int sun4i_drv_enable_vblank(struct drm_device *drm, unsigned int pipe) { struct sun4i_drv *drv = drm->dev_private; @@ -103,7 +75,7 @@ static struct drm_driver sun4i_drv_driver = { .dumb_create = drm_gem_cma_dumb_create, .dumb_destroy = drm_gem_dumb_destroy, .dumb_map_offset = drm_gem_cma_dumb_map_offset, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, /* PRIME Operations */ @@ -135,10 +107,6 @@ static int sun4i_drv_bind(struct device *dev) if (!drm) return -ENOMEM; - ret = drm_dev_set_unique(drm, dev_name(drm->dev)); - if (ret) - goto free_drm; - drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); if (!drv) { ret = -ENOMEM; @@ -187,14 +155,8 @@ static int sun4i_drv_bind(struct device *dev) if (ret) goto free_drm; - ret = sun4i_drv_connector_plug_all(drm); - if (ret) - goto unregister_drm; - return 0; -unregister_drm: - drm_dev_unregister(drm); free_drm: drm_dev_unref(drm); return ret; diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index ab6494818050..442cfe271688 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -90,19 +90,9 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_encoder * -sun4i_rgb_best_encoder(struct drm_connector *connector) -{ - struct sun4i_rgb *rgb = - drm_connector_to_sun4i_rgb(connector); - - return &rgb->encoder; -} - static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = { .get_modes = sun4i_rgb_get_modes, .mode_valid = sun4i_rgb_mode_valid, - .best_encoder = sun4i_rgb_best_encoder, }; static enum drm_connector_status diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c index bc047f923508..b84147896294 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tv.c +++ b/drivers/gpu/drm/sun4i/sun4i_tv.c @@ -526,18 +526,9 @@ static int sun4i_tv_comp_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_encoder * -sun4i_tv_comp_best_encoder(struct drm_connector *connector) -{ - struct sun4i_tv *tv = drm_connector_to_sun4i_tv(connector); - - return &tv->encoder; -} - static struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = { .get_modes = sun4i_tv_comp_get_modes, .mode_valid = sun4i_tv_comp_mode_valid, - .best_encoder = sun4i_tv_comp_best_encoder, }; static enum drm_connector_status diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index b59c3bf0df44..a177a42a9849 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -93,7 +93,7 @@ static int tegra_atomic_commit(struct drm_device *drm, * the software side now. */ - drm_atomic_helper_swap_state(drm, state); + drm_atomic_helper_swap_state(state, true); if (nonblock) tegra_atomic_schedule(tegra, state); diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index f52d6cb24ff5..0ddcce1b420d 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -239,8 +239,6 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output); void tegra_output_exit(struct tegra_output *output); int tegra_output_connector_get_modes(struct drm_connector *connector); -struct drm_encoder * -tegra_output_connector_best_encoder(struct drm_connector *connector); enum drm_connector_status tegra_output_connector_detect(struct drm_connector *connector, bool force); void tegra_output_connector_destroy(struct drm_connector *connector); diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index d1239ebc190f..099cccb2fbcb 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -794,7 +794,6 @@ tegra_dsi_connector_mode_valid(struct drm_connector *connector, static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs = { .get_modes = tegra_output_connector_get_modes, .mode_valid = tegra_dsi_connector_mode_valid, - .best_encoder = tegra_output_connector_best_encoder, }; static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = { diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index b7ef4929e347..2fdb8796443e 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -806,7 +806,6 @@ static const struct drm_connector_helper_funcs tegra_hdmi_connector_helper_funcs = { .get_modes = tegra_output_connector_get_modes, .mode_valid = tegra_hdmi_connector_mode_valid, - .best_encoder = tegra_output_connector_best_encoder, }; static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = { diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index 46664b622270..1480f6aaffe4 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -42,14 +42,6 @@ int tegra_output_connector_get_modes(struct drm_connector *connector) return err; } -struct drm_encoder * -tegra_output_connector_best_encoder(struct drm_connector *connector) -{ - struct tegra_output *output = connector_to_output(connector); - - return &output->encoder; -} - enum drm_connector_status tegra_output_connector_detect(struct drm_connector *connector, bool force) { diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index e246334e0252..a131b44e2d6f 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c @@ -112,7 +112,6 @@ tegra_rgb_connector_mode_valid(struct drm_connector *connector, static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = { .get_modes = tegra_output_connector_get_modes, .mode_valid = tegra_rgb_connector_mode_valid, - .best_encoder = tegra_output_connector_best_encoder, }; static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = { diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 757c6e8603af..34958d71284b 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -1087,7 +1087,6 @@ tegra_sor_connector_mode_valid(struct drm_connector *connector, static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs = { .get_modes = tegra_sor_connector_get_modes, .mode_valid = tegra_sor_connector_mode_valid, - .best_encoder = tegra_output_connector_best_encoder, }; static const struct drm_encoder_funcs tegra_sor_encoder_funcs = { diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 709bc903524d..d27809372d54 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -541,7 +541,6 @@ static struct drm_driver tilcdc_driver = { .load = tilcdc_load, .unload = tilcdc_unload, .lastclose = tilcdc_lastclose, - .set_busid = drm_platform_set_busid, .irq_handler = tilcdc_irq, .irq_preinstall = tilcdc_irq_preinstall, .irq_postinstall = tilcdc_irq_postinstall, @@ -549,7 +548,7 @@ static struct drm_driver tilcdc_driver = { .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = tilcdc_enable_vblank, .disable_vblank = tilcdc_disable_vblank, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = drm_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index b87afee44995..f92ea9579674 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -376,7 +376,7 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc, spin_lock_irqsave(&dev->event_lock, flags); if (event) - drm_send_vblank_event(dev, 0, event); + drm_crtc_send_vblank_event(crtc, event); spin_unlock_irqrestore(&dev->event_lock, flags); crtc->primary->fb = fb; diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index e5a9d3aaf45f..59adcf8532dd 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -291,8 +291,6 @@ static void vc4_bo_cache_free_old(struct drm_device *dev) /* Called on the last userspace/kernel unreference of the BO. Returns * it to the BO cache if possible, otherwise frees it. - * - * Note that this is called with the struct_mutex held. */ void vc4_free_object(struct drm_gem_object *gem_bo) { diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 904d0754ad78..4c0f26a644a3 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -175,20 +175,22 @@ vc4_crtc_lut_load(struct drm_crtc *crtc) HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]); } -static void +static int vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, - uint32_t start, uint32_t size) + uint32_t size) { struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); u32 i; - for (i = start; i < start + size; i++) { + for (i = 0; i < size; i++) { vc4_crtc->lut_r[i] = r[i] >> 8; vc4_crtc->lut_g[i] = g[i] >> 8; vc4_crtc->lut_b[i] = b[i] >> 8; } vc4_crtc_lut_load(crtc); + + return 0; } static u32 vc4_get_fifo_full_level(u32 format) @@ -395,6 +397,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_plane *plane; unsigned long flags; + const struct drm_plane_state *plane_state; u32 dlist_count = 0; int ret; @@ -404,18 +407,8 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, if (hweight32(state->connector_mask) > 1) return -EINVAL; - drm_atomic_crtc_state_for_each_plane(plane, state) { - struct drm_plane_state *plane_state = - state->state->plane_states[drm_plane_index(plane)]; - - /* plane might not have changed, in which case take - * current state: - */ - if (!plane_state) - plane_state = plane->state; - + drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, state) dlist_count += vc4_plane_dlist_size(plane_state); - } dlist_count++; /* Account for SCALER_CTL0_END. */ diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c index 9817dbfa4ac3..dba1114297e4 100644 --- a/drivers/gpu/drm/vc4/vc4_dpi.c +++ b/drivers/gpu/drm/vc4/vc4_dpi.c @@ -208,14 +208,6 @@ static int vc4_dpi_connector_get_modes(struct drm_connector *connector) return 0; } -static struct drm_encoder * -vc4_dpi_connector_best_encoder(struct drm_connector *connector) -{ - struct vc4_dpi_connector *dpi_connector = - to_vc4_dpi_connector(connector); - return dpi_connector->encoder; -} - static const struct drm_connector_funcs vc4_dpi_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = vc4_dpi_connector_detect, @@ -228,7 +220,6 @@ static const struct drm_connector_funcs vc4_dpi_connector_funcs = { static const struct drm_connector_helper_funcs vc4_dpi_connector_helper_funcs = { .get_modes = vc4_dpi_connector_get_modes, - .best_encoder = vc4_dpi_connector_best_encoder, }; static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev, diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 3446ece21b4a..9e88231b8906 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -99,7 +99,7 @@ static struct drm_driver vc4_drm_driver = { #endif .gem_create_object = vc4_create_object, - .gem_free_object = vc4_free_object, + .gem_free_object_unlocked = vc4_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, @@ -176,7 +176,6 @@ static int vc4_drm_bind(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm; - struct drm_connector *connector; struct vc4_dev *vc4; int ret = 0; @@ -211,22 +210,10 @@ static int vc4_drm_bind(struct device *dev) if (ret < 0) goto unbind_all; - /* Connector registration has to occur after DRM device - * registration, because it creates sysfs entries based on the - * DRM device. - */ - list_for_each_entry(connector, &drm->mode_config.connector_list, head) { - ret = drm_connector_register(connector); - if (ret) - goto unregister; - } - vc4_kms_load(drm); return 0; -unregister: - drm_dev_unregister(drm); unbind_all: component_unbind_all(dev, drm); gem_destroy: diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 37cac59401d7..c799baabc008 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -469,7 +469,7 @@ int vc4_kms_load(struct drm_device *dev); struct drm_plane *vc4_plane_init(struct drm_device *dev, enum drm_plane_type type); u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); -u32 vc4_plane_dlist_size(struct drm_plane_state *state); +u32 vc4_plane_dlist_size(const struct drm_plane_state *state); void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb); diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 46899d6de675..6155e8aca1c6 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -53,10 +53,8 @@ vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state) { unsigned int i; - mutex_lock(&dev->struct_mutex); for (i = 0; i < state->user_state.bo_count; i++) - drm_gem_object_unreference(state->bo[i]); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(state->bo[i]); kfree(state); } @@ -687,11 +685,9 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) struct vc4_dev *vc4 = to_vc4_dev(dev); unsigned i; - /* Need the struct lock for drm_gem_object_unreference(). */ - mutex_lock(&dev->struct_mutex); if (exec->bo) { for (i = 0; i < exec->bo_count; i++) - drm_gem_object_unreference(&exec->bo[i]->base); + drm_gem_object_unreference_unlocked(&exec->bo[i]->base); kfree(exec->bo); } @@ -699,9 +695,8 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) struct vc4_bo *bo = list_first_entry(&exec->unref_list, struct vc4_bo, unref_head); list_del(&bo->unref_head); - drm_gem_object_unreference(&bo->base.base); + drm_gem_object_unreference_unlocked(&bo->base.base); } - mutex_unlock(&dev->struct_mutex); mutex_lock(&vc4->power_lock); if (--vc4->power_refcount == 0) diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index fd2644d231ff..68df91c3f860 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -208,14 +208,6 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) return ret; } -static struct drm_encoder * -vc4_hdmi_connector_best_encoder(struct drm_connector *connector) -{ - struct vc4_hdmi_connector *hdmi_connector = - to_vc4_hdmi_connector(connector); - return hdmi_connector->encoder; -} - static const struct drm_connector_funcs vc4_hdmi_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = vc4_hdmi_connector_detect, @@ -228,7 +220,6 @@ static const struct drm_connector_funcs vc4_hdmi_connector_funcs = { static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = { .get_modes = vc4_hdmi_connector_get_modes, - .best_encoder = vc4_hdmi_connector_best_encoder, }; static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev, diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index cb37751bc99f..8f4d5ffc32be 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -111,6 +111,8 @@ static int vc4_atomic_commit(struct drm_device *dev, int i; uint64_t wait_seqno = 0; struct vc4_commit *c; + struct drm_plane *plane; + struct drm_plane_state *new_state; c = commit_init(state); if (!c) @@ -130,13 +132,7 @@ static int vc4_atomic_commit(struct drm_device *dev, return ret; } - for (i = 0; i < dev->mode_config.num_total_plane; i++) { - struct drm_plane *plane = state->planes[i]; - struct drm_plane_state *new_state = state->plane_states[i]; - - if (!plane) - continue; - + for_each_plane_in_state(state, plane, new_state, i) { if ((plane->state->fb != new_state->fb) && new_state->fb) { struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(new_state->fb, 0); @@ -152,7 +148,7 @@ static int vc4_atomic_commit(struct drm_device *dev, * the software side now. */ - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); /* * Everything below can be run asynchronously without the need to grab diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 4037b52fde31..5d2c3d9fd17a 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -690,9 +690,10 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist) return vc4_state->dlist_count; } -u32 vc4_plane_dlist_size(struct drm_plane_state *state) +u32 vc4_plane_dlist_size(const struct drm_plane_state *state) { - struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); + const struct vc4_plane_state *vc4_state = + container_of(state, typeof(*vc4_state), base); return vc4_state->dlist_count; } diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 341f9be3dde6..35ea5d02a827 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -235,7 +235,7 @@ static const struct file_operations vgem_driver_fops = { static struct drm_driver vgem_driver = { .driver_features = DRIVER_GEM, - .gem_free_object = vgem_gem_free_object, + .gem_free_object_unlocked = vgem_gem_free_object, .gem_vm_ops = &vgem_gem_vm_ops, .ioctls = vgem_ioctls, .fops = &vgem_driver_fops, @@ -260,8 +260,6 @@ static int __init vgem_init(void) goto out; } - drm_dev_set_unique(vgem_device, "vgem"); - ret = drm_dev_register(vgem_device, 0); if (ret) diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c index 4f20742e7788..a04ef1c992d9 100644 --- a/drivers/gpu/drm/via/via_mm.c +++ b/drivers/gpu/drm/via/via_mm.c @@ -208,7 +208,7 @@ void via_reclaim_buffers_locked(struct drm_device *dev, struct via_file_private *file_priv = file->driver_priv; struct via_memblock *entry, *next; - if (!(file->minor->master && file->master->lock.hw_lock)) + if (!(dev->master && file->master->lock.hw_lock)) return; drm_legacy_idlelock_take(&file->master->lock); diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index d4305da88f44..ac758cdbc1bc 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -29,8 +29,8 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_atomic_helper.h> -#define XRES_MIN 320 -#define YRES_MIN 200 +#define XRES_MIN 32 +#define YRES_MIN 32 #define XRES_DEF 1024 #define YRES_DEF 768 @@ -38,138 +38,11 @@ #define XRES_MAX 8192 #define YRES_MAX 8192 -static void -virtio_gpu_hide_cursor(struct virtio_gpu_device *vgdev, - struct virtio_gpu_output *output) -{ - output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); - output->cursor.resource_id = 0; - virtio_gpu_cursor_ping(vgdev, output); -} - -static int virtio_gpu_crtc_cursor_set(struct drm_crtc *crtc, - struct drm_file *file_priv, - uint32_t handle, - uint32_t width, - uint32_t height, - int32_t hot_x, int32_t hot_y) -{ - struct virtio_gpu_device *vgdev = crtc->dev->dev_private; - struct virtio_gpu_output *output = - container_of(crtc, struct virtio_gpu_output, crtc); - struct drm_gem_object *gobj = NULL; - struct virtio_gpu_object *qobj = NULL; - struct virtio_gpu_fence *fence = NULL; - int ret = 0; - - if (handle == 0) { - virtio_gpu_hide_cursor(vgdev, output); - return 0; - } - - /* lookup the cursor */ - gobj = drm_gem_object_lookup(file_priv, handle); - if (gobj == NULL) - return -ENOENT; - - qobj = gem_to_virtio_gpu_obj(gobj); - - if (!qobj->hw_res_handle) { - ret = -EINVAL; - goto out; - } - - virtio_gpu_cmd_transfer_to_host_2d(vgdev, qobj->hw_res_handle, 0, - cpu_to_le32(64), - cpu_to_le32(64), - 0, 0, &fence); - ret = virtio_gpu_object_reserve(qobj, false); - if (!ret) { - reservation_object_add_excl_fence(qobj->tbo.resv, - &fence->f); - fence_put(&fence->f); - virtio_gpu_object_unreserve(qobj); - virtio_gpu_object_wait(qobj, false); - } - - output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); - output->cursor.resource_id = cpu_to_le32(qobj->hw_res_handle); - output->cursor.hot_x = cpu_to_le32(hot_x); - output->cursor.hot_y = cpu_to_le32(hot_y); - virtio_gpu_cursor_ping(vgdev, output); - ret = 0; - -out: - drm_gem_object_unreference_unlocked(gobj); - return ret; -} - -static int virtio_gpu_crtc_cursor_move(struct drm_crtc *crtc, - int x, int y) -{ - struct virtio_gpu_device *vgdev = crtc->dev->dev_private; - struct virtio_gpu_output *output = - container_of(crtc, struct virtio_gpu_output, crtc); - - output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR); - output->cursor.pos.x = cpu_to_le32(x); - output->cursor.pos.y = cpu_to_le32(y); - virtio_gpu_cursor_ping(vgdev, output); - return 0; -} - -static int virtio_gpu_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t flags) -{ - struct virtio_gpu_device *vgdev = crtc->dev->dev_private; - struct virtio_gpu_output *output = - container_of(crtc, struct virtio_gpu_output, crtc); - struct drm_plane *plane = crtc->primary; - struct virtio_gpu_framebuffer *vgfb; - struct virtio_gpu_object *bo; - unsigned long irqflags; - uint32_t handle; - - plane->fb = fb; - vgfb = to_virtio_gpu_framebuffer(plane->fb); - bo = gem_to_virtio_gpu_obj(vgfb->obj); - handle = bo->hw_res_handle; - - DRM_DEBUG("handle 0x%x%s, crtc %dx%d\n", handle, - bo->dumb ? ", dumb" : "", - crtc->mode.hdisplay, crtc->mode.vdisplay); - if (bo->dumb) { - virtio_gpu_cmd_transfer_to_host_2d - (vgdev, handle, 0, - cpu_to_le32(crtc->mode.hdisplay), - cpu_to_le32(crtc->mode.vdisplay), - 0, 0, NULL); - } - virtio_gpu_cmd_set_scanout(vgdev, output->index, handle, - crtc->mode.hdisplay, - crtc->mode.vdisplay, 0, 0); - virtio_gpu_cmd_resource_flush(vgdev, handle, 0, 0, - crtc->mode.hdisplay, - crtc->mode.vdisplay); - - if (event) { - spin_lock_irqsave(&crtc->dev->event_lock, irqflags); - drm_send_vblank_event(crtc->dev, -1, event); - spin_unlock_irqrestore(&crtc->dev->event_lock, irqflags); - } - - return 0; -} - static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = { - .cursor_set2 = virtio_gpu_crtc_cursor_set, - .cursor_move = virtio_gpu_crtc_cursor_move, .set_config = drm_atomic_helper_set_config, .destroy = drm_crtc_cleanup, - .page_flip = virtio_gpu_page_flip, + .page_flip = drm_atomic_helper_page_flip, .reset = drm_atomic_helper_crtc_reset, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, @@ -267,6 +140,7 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, spin_lock_irqsave(&crtc->dev->event_lock, flags); if (crtc->state->event) drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; spin_unlock_irqrestore(&crtc->dev->event_lock, flags); } @@ -341,15 +215,6 @@ static int virtio_gpu_conn_mode_valid(struct drm_connector *connector, return MODE_BAD; } -static struct drm_encoder* -virtio_gpu_best_encoder(struct drm_connector *connector) -{ - struct virtio_gpu_output *virtio_gpu_output = - drm_connector_to_virtio_gpu_output(connector); - - return &virtio_gpu_output->enc; -} - static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = { .mode_set = virtio_gpu_enc_mode_set, .enable = virtio_gpu_enc_enable, @@ -359,7 +224,6 @@ static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = { static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = { .get_modes = virtio_gpu_conn_get_modes, .mode_valid = virtio_gpu_conn_mode_valid, - .best_encoder = virtio_gpu_best_encoder, }; static enum drm_connector_status virtio_gpu_conn_detect( @@ -406,7 +270,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) struct drm_connector *connector = &output->conn; struct drm_encoder *encoder = &output->enc; struct drm_crtc *crtc = &output->crtc; - struct drm_plane *plane; + struct drm_plane *primary, *cursor; output->index = index; if (index == 0) { @@ -415,13 +279,17 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) output->info.r.height = cpu_to_le32(YRES_DEF); } - plane = virtio_gpu_plane_init(vgdev, index); - if (IS_ERR(plane)) - return PTR_ERR(plane); - drm_crtc_init_with_planes(dev, crtc, plane, NULL, + primary = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_PRIMARY, index); + if (IS_ERR(primary)) + return PTR_ERR(primary); + cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index); + if (IS_ERR(cursor)) + return PTR_ERR(cursor); + drm_crtc_init_with_planes(dev, crtc, primary, cursor, &virtio_gpu_crtc_funcs, NULL); drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs); - plane->crtc = crtc; + primary->crtc = crtc; + cursor->crtc = crtc; drm_connector_init(dev, connector, &virtio_gpu_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); @@ -466,6 +334,24 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev, return &virtio_gpu_fb->base; } +static void vgdev_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + + drm_atomic_helper_commit_modeset_disables(dev, state); + drm_atomic_helper_commit_modeset_enables(dev, state); + drm_atomic_helper_commit_planes(dev, state, true); + + drm_atomic_helper_commit_hw_done(state); + + drm_atomic_helper_wait_for_vblanks(dev, state); + drm_atomic_helper_cleanup_planes(dev, state); +} + +struct drm_mode_config_helper_funcs virtio_mode_config_helpers = { + .atomic_commit_tail = vgdev_atomic_commit_tail, +}; + static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = { .fb_create = virtio_gpu_user_framebuffer_create, .atomic_check = drm_atomic_helper_check, @@ -477,7 +363,8 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) int i; drm_mode_config_init(vgdev->ddev); - vgdev->ddev->mode_config.funcs = (void *)&virtio_gpu_mode_funcs; + vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs; + vgdev->ddev->mode_config.helper_private = &virtio_mode_config_helpers; /* modes will be validated against the framebuffer size */ vgdev->ddev->mode_config.min_width = XRES_MIN; diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c index 88a39165edd5..7f0e93f87a55 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c +++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c @@ -27,16 +27,6 @@ #include "virtgpu_drv.h" -int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master) -{ - struct pci_dev *pdev = dev->pdev; - - if (pdev) { - return drm_pci_set_busid(dev, master); - } - return 0; -} - static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev) { struct apertures_struct *ap; diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 3cc7afa77a35..c13f70cfc461 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -117,7 +117,6 @@ static const struct file_operations virtio_gpu_driver_fops = { static struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC, - .set_busid = drm_virtio_set_busid, .load = virtio_gpu_driver_load, .unload = virtio_gpu_driver_unload, .open = virtio_gpu_driver_open, @@ -143,7 +142,7 @@ static struct drm_driver driver = { .gem_prime_vunmap = virtgpu_gem_prime_vunmap, .gem_prime_mmap = virtgpu_gem_prime_mmap, - .gem_free_object = virtio_gpu_gem_free_object, + .gem_free_object_unlocked = virtio_gpu_gem_free_object, .gem_open_object = virtio_gpu_gem_object_open, .gem_close_object = virtio_gpu_gem_object_close, .fops = &virtio_gpu_driver_fops, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 0a54f43f846a..b18ef3111f0c 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -33,6 +33,7 @@ #include <drm/drmP.h> #include <drm/drm_gem.h> +#include <drm/drm_atomic.h> #include <drm/drm_crtc_helper.h> #include <ttm/ttm_bo_api.h> #include <ttm/ttm_bo_driver.h> @@ -48,7 +49,6 @@ #define DRIVER_PATCHLEVEL 1 /* virtgpu_drm_bus.c */ -int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master); int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev); struct virtio_gpu_object { @@ -335,6 +335,7 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev); /* virtio_gpu_plane.c */ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, + enum drm_plane_type type, int index); /* virtio_gpu_ttm.c */ diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 70b44a2345ab..925ca25209df 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -38,6 +38,10 @@ static const uint32_t virtio_gpu_formats[] = { DRM_FORMAT_ABGR8888, }; +static const uint32_t virtio_gpu_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + static void virtio_gpu_plane_destroy(struct drm_plane *plane) { kfree(plane); @@ -58,16 +62,22 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, return 0; } -static void virtio_gpu_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) +static void virtio_gpu_primary_plane_update(struct drm_plane *plane, + struct drm_plane_state *old_state) { struct drm_device *dev = plane->dev; struct virtio_gpu_device *vgdev = dev->dev_private; - struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(plane->crtc); + struct virtio_gpu_output *output = NULL; struct virtio_gpu_framebuffer *vgfb; struct virtio_gpu_object *bo; uint32_t handle; + if (plane->state->crtc) + output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); + if (old_state->crtc) + output = drm_crtc_to_virtio_gpu_output(old_state->crtc); + WARN_ON(!output); + if (plane->state->fb) { vgfb = to_virtio_gpu_framebuffer(plane->state->fb); bo = gem_to_virtio_gpu_obj(vgfb->obj); @@ -75,55 +85,149 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane, if (bo->dumb) { virtio_gpu_cmd_transfer_to_host_2d (vgdev, handle, 0, - cpu_to_le32(plane->state->crtc_w), - cpu_to_le32(plane->state->crtc_h), - plane->state->crtc_x, plane->state->crtc_y, NULL); + cpu_to_le32(plane->state->src_w >> 16), + cpu_to_le32(plane->state->src_h >> 16), + plane->state->src_x >> 16, + plane->state->src_y >> 16, NULL); } } else { handle = 0; } - DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d\n", handle, + DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle, plane->state->crtc_w, plane->state->crtc_h, - plane->state->crtc_x, plane->state->crtc_y); + plane->state->crtc_x, plane->state->crtc_y, + plane->state->src_w >> 16, + plane->state->src_h >> 16, + plane->state->src_x >> 16, + plane->state->src_y >> 16); virtio_gpu_cmd_set_scanout(vgdev, output->index, handle, - plane->state->crtc_w, - plane->state->crtc_h, - plane->state->crtc_x, - plane->state->crtc_y); + plane->state->src_w >> 16, + plane->state->src_h >> 16, + plane->state->src_x >> 16, + plane->state->src_y >> 16); virtio_gpu_cmd_resource_flush(vgdev, handle, - plane->state->crtc_x, - plane->state->crtc_y, - plane->state->crtc_w, - plane->state->crtc_h); + plane->state->src_x >> 16, + plane->state->src_y >> 16, + plane->state->src_w >> 16, + plane->state->src_h >> 16); } +static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_device *dev = plane->dev; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_output *output = NULL; + struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_fence *fence = NULL; + struct virtio_gpu_object *bo = NULL; + uint32_t handle; + int ret = 0; -static const struct drm_plane_helper_funcs virtio_gpu_plane_helper_funcs = { + if (plane->state->crtc) + output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); + if (old_state->crtc) + output = drm_crtc_to_virtio_gpu_output(old_state->crtc); + WARN_ON(!output); + + if (plane->state->fb) { + vgfb = to_virtio_gpu_framebuffer(plane->state->fb); + bo = gem_to_virtio_gpu_obj(vgfb->obj); + handle = bo->hw_res_handle; + } else { + handle = 0; + } + + if (bo && bo->dumb && (plane->state->fb != old_state->fb)) { + /* new cursor -- update & wait */ + virtio_gpu_cmd_transfer_to_host_2d + (vgdev, handle, 0, + cpu_to_le32(plane->state->crtc_w), + cpu_to_le32(plane->state->crtc_h), + 0, 0, &fence); + ret = virtio_gpu_object_reserve(bo, false); + if (!ret) { + reservation_object_add_excl_fence(bo->tbo.resv, + &fence->f); + fence_put(&fence->f); + fence = NULL; + virtio_gpu_object_unreserve(bo); + virtio_gpu_object_wait(bo, false); + } + } + + if (plane->state->fb != old_state->fb) { + DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle, + plane->state->crtc_x, + plane->state->crtc_y, + plane->state->fb ? plane->state->fb->hot_x : 0, + plane->state->fb ? plane->state->fb->hot_y : 0); + output->cursor.hdr.type = + cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); + output->cursor.resource_id = cpu_to_le32(handle); + if (plane->state->fb) { + output->cursor.hot_x = + cpu_to_le32(plane->state->fb->hot_x); + output->cursor.hot_y = + cpu_to_le32(plane->state->fb->hot_y); + } else { + output->cursor.hot_x = cpu_to_le32(0); + output->cursor.hot_y = cpu_to_le32(0); + } + } else { + DRM_DEBUG("move +%d+%d\n", + plane->state->crtc_x, + plane->state->crtc_y); + output->cursor.hdr.type = + cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR); + } + output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x); + output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y); + virtio_gpu_cursor_ping(vgdev, output); +} + +static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = { + .atomic_check = virtio_gpu_plane_atomic_check, + .atomic_update = virtio_gpu_primary_plane_update, +}; + +static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { .atomic_check = virtio_gpu_plane_atomic_check, - .atomic_update = virtio_gpu_plane_atomic_update, + .atomic_update = virtio_gpu_cursor_plane_update, }; struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, + enum drm_plane_type type, int index) { struct drm_device *dev = vgdev->ddev; + const struct drm_plane_helper_funcs *funcs; struct drm_plane *plane; - int ret; + const uint32_t *formats; + int ret, nformats; plane = kzalloc(sizeof(*plane), GFP_KERNEL); if (!plane) return ERR_PTR(-ENOMEM); + if (type == DRM_PLANE_TYPE_CURSOR) { + formats = virtio_gpu_cursor_formats; + nformats = ARRAY_SIZE(virtio_gpu_cursor_formats); + funcs = &virtio_gpu_cursor_helper_funcs; + } else { + formats = virtio_gpu_formats; + nformats = ARRAY_SIZE(virtio_gpu_formats); + funcs = &virtio_gpu_primary_helper_funcs; + } ret = drm_universal_plane_init(dev, plane, 1 << index, &virtio_gpu_plane_funcs, - virtio_gpu_formats, - ARRAY_SIZE(virtio_gpu_formats), - DRM_PLANE_TYPE_PRIMARY, NULL); + formats, nformats, + type, NULL); if (ret) goto err_plane_init; - drm_plane_helper_add(plane, &virtio_gpu_plane_helper_funcs); + drm_plane_helper_add(plane, funcs); return plane; err_plane_init: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 67cebb23c940..aa04fb0159a7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -293,13 +293,10 @@ static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header) struct vmw_cmdbuf_man *man = header->man; u32 val; - if (sizeof(header->handle) > 4) - val = (header->handle >> 32); - else - val = 0; + val = upper_32_bits(header->handle); vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val); - val = (header->handle & 0xFFFFFFFFULL); + val = lower_32_bits(header->handle); val |= header->cb_context & SVGA_CB_CONTEXT_MASK; vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 9fcd8200d485..60646644bef3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1049,7 +1049,7 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev, if (unlikely(ret != 0)) return ERR_PTR(-ERESTARTSYS); - if (file_priv->is_master) { + if (drm_is_current_master(file_priv)) { mutex_unlock(&dev->master_mutex); return NULL; } @@ -1228,8 +1228,7 @@ static int vmw_master_set(struct drm_device *dev, } static void vmw_master_drop(struct drm_device *dev, - struct drm_file *file_priv, - bool from_release) + struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 1980e2a28265..9a90f824814e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -32,6 +32,7 @@ #include <drm/drmP.h> #include <drm/vmwgfx_drm.h> #include <drm/drm_hashtab.h> +#include <drm/drm_auth.h> #include <linux/suspend.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_object.h> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index e959df6ede83..26ac8e80a478 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -46,7 +46,7 @@ struct vmw_fence_manager { bool goal_irq_on; /* Protected by @goal_irq_mutex */ bool seqno_valid; /* Protected by @lock, and may not be set to true without the @goal_irq_mutex held. */ - unsigned ctx; + u64 ctx; }; struct vmw_user_fence { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 55231cce73a0..8a69d4da40b5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1404,9 +1404,9 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, return 0; } -void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, - u16 *r, u16 *g, u16 *b, - uint32_t start, uint32_t size) +int vmw_du_crtc_gamma_set(struct drm_crtc *crtc, + u16 *r, u16 *g, u16 *b, + uint32_t size) { struct vmw_private *dev_priv = vmw_priv(crtc->dev); int i; @@ -1418,6 +1418,8 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8); vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8); } + + return 0; } int vmw_du_connector_dpms(struct drm_connector *connector, int mode) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 57203212c501..ff4803c107bc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -195,9 +195,9 @@ struct vmw_display_unit { void vmw_du_cleanup(struct vmw_display_unit *du); void vmw_du_crtc_save(struct drm_crtc *crtc); void vmw_du_crtc_restore(struct drm_crtc *crtc); -void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, +int vmw_du_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, - uint32_t start, uint32_t size); + uint32_t size); int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height, int32_t hot_x, int32_t hot_y); diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index cbd7c986d926..2df216b39cc5 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -30,6 +30,7 @@ #define pr_fmt(fmt) "vga_switcheroo: " fmt +#include <linux/apple-gmux.h> #include <linux/console.h> #include <linux/debugfs.h> #include <linux/fb.h> @@ -308,7 +309,8 @@ static int register_client(struct pci_dev *pdev, * * Register vga client (GPU). Enable vga_switcheroo if another GPU and a * handler have already registered. The power state of the client is assumed - * to be ON. + * to be ON. Beforehand, vga_switcheroo_client_probe_defer() shall be called + * to ensure that all prerequisites are met. * * Return: 0 on success, -ENOMEM on memory allocation error. */ @@ -329,7 +331,8 @@ EXPORT_SYMBOL(vga_switcheroo_register_client); * @id: client identifier * * Register audio client (audio device on a GPU). The power state of the - * client is assumed to be ON. + * client is assumed to be ON. Beforehand, vga_switcheroo_client_probe_defer() + * shall be called to ensure that all prerequisites are met. * * Return: 0 on success, -ENOMEM on memory allocation error. */ @@ -376,6 +379,33 @@ find_active_client(struct list_head *head) } /** + * vga_switcheroo_client_probe_defer() - whether to defer probing a given client + * @pdev: client pci device + * + * Determine whether any prerequisites are not fulfilled to probe a given + * client. Drivers shall invoke this early on in their ->probe callback + * and return %-EPROBE_DEFER if it evaluates to %true. Thou shalt not + * register the client ere thou hast called this. + * + * Return: %true if probing should be deferred, otherwise %false. + */ +bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) +{ + if ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { + /* + * apple-gmux is needed on pre-retina MacBook Pro + * to probe the panel if pdev is the inactive GPU. + */ + if (apple_gmux_present() && pdev != vga_default_device() && + !vgasr_priv.handler_flags) + return true; + } + + return false; +} +EXPORT_SYMBOL(vga_switcheroo_client_probe_defer); + +/** * vga_switcheroo_get_client_state() - obtain power state of a given client * @pdev: client pci device * diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c index 70c28d19ea04..22cf60991df6 100644 --- a/drivers/media/platform/omap/omap_vout.c +++ b/drivers/media/platform/omap/omap_vout.c @@ -45,7 +45,7 @@ #include <media/v4l2-ioctl.h> #include <video/omapvrfb.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "omap_voutlib.h" #include "omap_voutdef.h" diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h index 9ccfe1f475a4..94b5d65afb19 100644 --- a/drivers/media/platform/omap/omap_voutdef.h +++ b/drivers/media/platform/omap/omap_voutdef.h @@ -11,7 +11,7 @@ #ifndef OMAP_VOUTDEF_H #define OMAP_VOUTDEF_H -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omapvrfb.h> #define YUYV_BPP 2 diff --git a/drivers/media/platform/omap/omap_voutlib.c b/drivers/media/platform/omap/omap_voutlib.c index 80b0d88f125c..58a25fdf0cce 100644 --- a/drivers/media/platform/omap/omap_voutlib.c +++ b/drivers/media/platform/omap/omap_voutlib.c @@ -26,7 +26,7 @@ #include <linux/dma-mapping.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "omap_voutlib.h" diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c index b3cc3ab63799..6fc156a3918d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -205,7 +205,9 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node) goto free_uar; } - uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT); + uar->bf_map = io_mapping_map_wc(priv->bf_mapping, + uar->index << PAGE_SHIFT, + PAGE_SIZE); if (!uar->bf_map) { err = -ENOMEM; goto unamp_uar; diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h index b56885c14839..ebb34dca60df 100644 --- a/drivers/staging/android/sync.h +++ b/drivers/staging/android/sync.h @@ -68,7 +68,8 @@ struct sync_timeline { /* protected by child_list_lock */ bool destroyed; - int context, value; + u64 context; + int value; struct list_head child_list_head; spinlock_t child_list_lock; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c index 8511c648a15c..9d78411a3bf7 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c @@ -14,7 +14,7 @@ #include <linux/platform_device.h> #include <linux/of.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omap-panel-data.h> struct panel_drv_data { @@ -25,7 +25,6 @@ struct panel_drv_data { struct omap_video_timings timings; - enum omap_dss_venc_type connector_type; bool invert_polarity; }; @@ -45,10 +44,6 @@ static const struct omap_video_timings tvc_pal_timings = { static const struct of_device_id tvc_of_match[]; -struct tvc_of_data { - enum omap_dss_venc_type connector_type; -}; - #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) static int tvc_connect(struct omap_dss_device *dssdev) @@ -99,7 +94,7 @@ static int tvc_enable(struct omap_dss_device *dssdev) in->ops.atv->set_timings(in, &ddata->timings); if (!ddata->dev->of_node) { - in->ops.atv->set_type(in, ddata->connector_type); + in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE); in->ops.atv->invert_vid_out_polarity(in, ddata->invert_polarity); @@ -207,7 +202,6 @@ static int tvc_probe_pdata(struct platform_device *pdev) ddata->in = in; - ddata->connector_type = pdata->connector_type; ddata->invert_polarity = pdata->invert_polarity; dssdev = &ddata->dssdev; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c index d811e6dcaef7..06e1db34541e 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c @@ -16,8 +16,7 @@ #include <drm/drm_edid.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> static const struct omap_video_timings dvic_default_timings = { .x_res = 640, @@ -236,46 +235,6 @@ static struct omap_dss_driver dvic_driver = { .detect = dvic_detect, }; -static int dvic_probe_pdata(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct connector_dvi_platform_data *pdata; - struct omap_dss_device *in, *dssdev; - int i2c_bus_num; - - pdata = dev_get_platdata(&pdev->dev); - i2c_bus_num = pdata->i2c_bus_num; - - if (i2c_bus_num != -1) { - struct i2c_adapter *adapter; - - adapter = i2c_get_adapter(i2c_bus_num); - if (!adapter) { - dev_err(&pdev->dev, - "Failed to get I2C adapter, bus %d\n", - i2c_bus_num); - return -EPROBE_DEFER; - } - - ddata->i2c_adapter = adapter; - } - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - i2c_put_adapter(ddata->i2c_adapter); - - dev_err(&pdev->dev, "Failed to find video source\n"); - return -EPROBE_DEFER; - } - - ddata->in = in; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int dvic_probe_of(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); @@ -313,23 +272,18 @@ static int dvic_probe(struct platform_device *pdev) struct omap_dss_device *dssdev; int r; + if (!pdev->dev.of_node) + return -ENODEV; + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; platform_set_drvdata(pdev, ddata); - if (dev_get_platdata(&pdev->dev)) { - r = dvic_probe_pdata(pdev); - if (r) - return r; - } else if (pdev->dev.of_node) { - r = dvic_probe_of(pdev); - if (r) - return r; - } else { - return -ENODEV; - } + r = dvic_probe_of(pdev); + if (r) + return r; ddata->timings = dvic_default_timings; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c index 6ee4129bc0c0..58d5803ede67 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c @@ -17,8 +17,7 @@ #include <drm/drm_edid.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> static const struct omap_video_timings hdmic_default_timings = { .x_res = 640, @@ -206,30 +205,6 @@ static struct omap_dss_driver hdmic_driver = { .set_hdmi_infoframe = hdmic_set_infoframe, }; -static int hdmic_probe_pdata(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct connector_hdmi_platform_data *pdata; - struct omap_dss_device *in, *dssdev; - - pdata = dev_get_platdata(&pdev->dev); - - ddata->hpd_gpio = -ENODEV; - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&pdev->dev, "Failed to find video source\n"); - return -EPROBE_DEFER; - } - - ddata->in = in; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int hdmic_probe_of(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); @@ -261,6 +236,9 @@ static int hdmic_probe(struct platform_device *pdev) struct omap_dss_device *dssdev; int r; + if (!pdev->dev.of_node) + return -ENODEV; + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; @@ -268,17 +246,9 @@ static int hdmic_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ddata); ddata->dev = &pdev->dev; - if (dev_get_platdata(&pdev->dev)) { - r = hdmic_probe_pdata(pdev); - if (r) - return r; - } else if (pdev->dev.of_node) { - r = hdmic_probe_of(pdev); - if (r) - return r; - } else { - return -ENODEV; - } + r = hdmic_probe_of(pdev); + if (r) + return r; if (gpio_is_valid(ddata->hpd_gpio)) { r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio, diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c index 8c246c213e06..a9a67167cc3d 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c @@ -20,7 +20,7 @@ #include <linux/slab.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c index d9048b3df495..8c0953d069b7 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c @@ -15,8 +15,7 @@ #include <linux/slab.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> struct panel_drv_data { struct omap_dss_device dssdev; @@ -166,32 +165,6 @@ static const struct omapdss_dvi_ops tfp410_dvi_ops = { .get_timings = tfp410_get_timings, }; -static int tfp410_probe_pdata(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct encoder_tfp410_platform_data *pdata; - struct omap_dss_device *dssdev, *in; - - pdata = dev_get_platdata(&pdev->dev); - - ddata->pd_gpio = pdata->power_down_gpio; - - ddata->data_lines = pdata->data_lines; - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&pdev->dev, "Failed to find video source\n"); - return -ENODEV; - } - - ddata->in = in; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int tfp410_probe_of(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); @@ -225,23 +198,18 @@ static int tfp410_probe(struct platform_device *pdev) struct omap_dss_device *dssdev; int r; + if (!pdev->dev.of_node) + return -ENODEV; + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; platform_set_drvdata(pdev, ddata); - if (dev_get_platdata(&pdev->dev)) { - r = tfp410_probe_pdata(pdev); - if (r) - return r; - } else if (pdev->dev.of_node) { - r = tfp410_probe_of(pdev); - if (r) - return r; - } else { - return -ENODEV; - } + r = tfp410_probe_of(pdev); + if (r) + return r; if (gpio_is_valid(ddata->pd_gpio)) { r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio, diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c index 677e2545fcbe..80dc47347e21 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c @@ -16,8 +16,7 @@ #include <linux/platform_device.h> #include <linux/gpio/consumer.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c index e780fd4f8b46..ace3d818afe5 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c @@ -16,7 +16,7 @@ #include <linux/of.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omap-panel-data.h> #include <video/of_display_timing.h> diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c index 3414c2609320..b58012b82b6f 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c @@ -25,8 +25,7 @@ #include <linux/of_device.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> #include <video/mipi_display.h> /* DSI Virtual channel. Hardcoded for now. */ @@ -1127,40 +1126,6 @@ static struct omap_dss_driver dsicm_ops = { .memory_read = dsicm_memory_read, }; -static int dsicm_probe_pdata(struct platform_device *pdev) -{ - const struct panel_dsicm_platform_data *pdata; - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *dssdev, *in; - - pdata = dev_get_platdata(&pdev->dev); - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&pdev->dev, "failed to find video source\n"); - return -EPROBE_DEFER; - } - ddata->in = in; - - ddata->reset_gpio = pdata->reset_gpio; - - if (pdata->use_ext_te) - ddata->ext_te_gpio = pdata->ext_te_gpio; - else - ddata->ext_te_gpio = -1; - - ddata->ulps_timeout = pdata->ulps_timeout; - - ddata->use_dsi_backlight = pdata->use_dsi_backlight; - - ddata->pin_config = pdata->pin_config; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int dsicm_probe_of(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; @@ -1207,6 +1172,9 @@ static int dsicm_probe(struct platform_device *pdev) dev_dbg(dev, "probe\n"); + if (!pdev->dev.of_node) + return -ENODEV; + ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; @@ -1214,17 +1182,9 @@ static int dsicm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ddata); ddata->pdev = pdev; - if (dev_get_platdata(dev)) { - r = dsicm_probe_pdata(pdev); - if (r) - return r; - } else if (pdev->dev.of_node) { - r = dsicm_probe_of(pdev); - if (r) - return r; - } else { - return -ENODEV; - } + r = dsicm_probe_of(pdev); + if (r) + return r; ddata->timings.x_res = 864; ddata->timings.y_res = 480; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c index 18eb60e9c9ec..f14691ce8d02 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c @@ -16,8 +16,7 @@ #include <linux/mutex.h> #include <linux/gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> static struct omap_video_timings lb035q02_timings = { .x_res = 320, @@ -240,44 +239,6 @@ static struct omap_dss_driver lb035q02_ops = { .get_resolution = omapdss_default_get_resolution, }; -static int lb035q02_probe_pdata(struct spi_device *spi) -{ - const struct panel_lb035q02_platform_data *pdata; - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); - struct omap_dss_device *dssdev, *in; - int r; - - pdata = dev_get_platdata(&spi->dev); - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&spi->dev, "failed to find video source '%s'\n", - pdata->source); - return -EPROBE_DEFER; - } - - ddata->in = in; - - ddata->data_lines = pdata->data_lines; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - r = devm_gpio_request_one(&spi->dev, pdata->enable_gpio, - GPIOF_OUT_INIT_LOW, "panel enable"); - if (r) - goto err_gpio; - - ddata->enable_gpio = gpio_to_desc(pdata->enable_gpio); - - ddata->backlight_gpio = pdata->backlight_gpio; - - return 0; -err_gpio: - omap_dss_put_device(ddata->in); - return r; -} - static int lb035q02_probe_of(struct spi_device *spi) { struct device_node *node = spi->dev.of_node; @@ -312,6 +273,9 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) struct omap_dss_device *dssdev; int r; + if (!spi->dev.of_node) + return -ENODEV; + ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL); if (ddata == NULL) return -ENOMEM; @@ -320,17 +284,9 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) ddata->spi = spi; - if (dev_get_platdata(&spi->dev)) { - r = lb035q02_probe_pdata(spi); - if (r) - return r; - } else if (spi->dev.of_node) { - r = lb035q02_probe_of(spi); - if (r) - return r; - } else { - return -ENODEV; - } + r = lb035q02_probe_of(spi); + if (r) + return r; if (gpio_is_valid(ddata->backlight_gpio)) { r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio, diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c index 8a928c9a2fc9..a2cbadd3eca3 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c @@ -18,8 +18,7 @@ #include <linux/gpio.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> struct panel_drv_data { struct omap_dss_device dssdev; @@ -233,33 +232,6 @@ static struct omap_dss_driver nec_8048_ops = { }; -static int nec_8048_probe_pdata(struct spi_device *spi) -{ - const struct panel_nec_nl8048hl11_platform_data *pdata; - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); - struct omap_dss_device *dssdev, *in; - - pdata = dev_get_platdata(&spi->dev); - - ddata->qvga_gpio = pdata->qvga_gpio; - ddata->res_gpio = pdata->res_gpio; - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&spi->dev, "failed to find video source '%s'\n", - pdata->source); - return -EPROBE_DEFER; - } - ddata->in = in; - - ddata->data_lines = pdata->data_lines; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int nec_8048_probe_of(struct spi_device *spi) { struct device_node *node = spi->dev.of_node; @@ -296,6 +268,9 @@ static int nec_8048_probe(struct spi_device *spi) dev_dbg(&spi->dev, "%s\n", __func__); + if (!spi->dev.of_node) + return -ENODEV; + spi->mode = SPI_MODE_0; spi->bits_per_word = 32; @@ -315,17 +290,9 @@ static int nec_8048_probe(struct spi_device *spi) ddata->spi = spi; - if (dev_get_platdata(&spi->dev)) { - r = nec_8048_probe_pdata(spi); - if (r) - return r; - } else if (spi->dev.of_node) { - r = nec_8048_probe_of(spi); - if (r) - return r; - } else { - return -ENODEV; - } + r = nec_8048_probe_of(spi); + if (r) + return r; if (gpio_is_valid(ddata->qvga_gpio)) { r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio, diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c index 1954ec913ce5..a8be18a87fa0 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c @@ -17,8 +17,7 @@ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/regulator/consumer.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> struct panel_drv_data { struct omap_dss_device dssdev; @@ -197,69 +196,6 @@ static struct omap_dss_driver sharp_ls_ops = { .get_resolution = omapdss_default_get_resolution, }; -static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags, - char *desc, struct gpio_desc **gpiod) -{ - int r; - - r = devm_gpio_request_one(dev, gpio, flags, desc); - if (r) { - *gpiod = NULL; - return r == -ENOENT ? 0 : r; - } - - *gpiod = gpio_to_desc(gpio); - - return 0; -} - -static int sharp_ls_probe_pdata(struct platform_device *pdev) -{ - const struct panel_sharp_ls037v7dw01_platform_data *pdata; - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *dssdev, *in; - int r; - - pdata = dev_get_platdata(&pdev->dev); - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&pdev->dev, "failed to find video source '%s'\n", - pdata->source); - return -EPROBE_DEFER; - } - - ddata->in = in; - - ddata->data_lines = pdata->data_lines; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - r = sharp_ls_get_gpio(&pdev->dev, pdata->mo_gpio, GPIOF_OUT_INIT_LOW, - "lcd MO", &ddata->mo_gpio); - if (r) - return r; - r = sharp_ls_get_gpio(&pdev->dev, pdata->lr_gpio, GPIOF_OUT_INIT_HIGH, - "lcd LR", &ddata->lr_gpio); - if (r) - return r; - r = sharp_ls_get_gpio(&pdev->dev, pdata->ud_gpio, GPIOF_OUT_INIT_HIGH, - "lcd UD", &ddata->ud_gpio); - if (r) - return r; - r = sharp_ls_get_gpio(&pdev->dev, pdata->resb_gpio, GPIOF_OUT_INIT_LOW, - "lcd RESB", &ddata->resb_gpio); - if (r) - return r; - r = sharp_ls_get_gpio(&pdev->dev, pdata->ini_gpio, GPIOF_OUT_INIT_LOW, - "lcd INI", &ddata->ini_gpio); - if (r) - return r; - - return 0; -} - static int sharp_ls_get_gpio_of(struct device *dev, int index, int val, const char *desc, struct gpio_desc **gpiod) { @@ -330,23 +266,18 @@ static int sharp_ls_probe(struct platform_device *pdev) struct omap_dss_device *dssdev; int r; + if (!pdev->dev.of_node) + return -ENODEV; + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (ddata == NULL) return -ENOMEM; platform_set_drvdata(pdev, ddata); - if (dev_get_platdata(&pdev->dev)) { - r = sharp_ls_probe_pdata(pdev); - if (r) - return r; - } else if (pdev->dev.of_node) { - r = sharp_ls_probe_of(pdev); - if (r) - return r; - } else { - return -ENODEV; - } + r = sharp_ls_probe_of(pdev); + if (r) + return r; ddata->videomode = sharp_ls_timings; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c index 31efcca801bd..468560a6daae 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c @@ -33,7 +33,7 @@ #include <linux/of.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omap-panel-data.h> #define MIPID_CMD_READ_DISP_ID 0x04 diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c index 4d657f3ab679..b529a8c2b652 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c @@ -28,8 +28,7 @@ #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> struct panel_drv_data { struct omap_dss_device dssdev; @@ -365,31 +364,6 @@ static struct omap_dss_driver td028ttec1_ops = { .check_timings = td028ttec1_panel_check_timings, }; -static int td028ttec1_panel_probe_pdata(struct spi_device *spi) -{ - const struct panel_tpo_td028ttec1_platform_data *pdata; - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); - struct omap_dss_device *dssdev, *in; - - pdata = dev_get_platdata(&spi->dev); - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&spi->dev, "failed to find video source '%s'\n", - pdata->source); - return -EPROBE_DEFER; - } - - ddata->in = in; - - ddata->data_lines = pdata->data_lines; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int td028ttec1_probe_of(struct spi_device *spi) { struct device_node *node = spi->dev.of_node; @@ -415,6 +389,9 @@ static int td028ttec1_panel_probe(struct spi_device *spi) dev_dbg(&spi->dev, "%s\n", __func__); + if (!spi->dev.of_node) + return -ENODEV; + spi->bits_per_word = 9; spi->mode = SPI_MODE_3; @@ -432,17 +409,9 @@ static int td028ttec1_panel_probe(struct spi_device *spi) ddata->spi_dev = spi; - if (dev_get_platdata(&spi->dev)) { - r = td028ttec1_panel_probe_pdata(spi); - if (r) - return r; - } else if (spi->dev.of_node) { - r = td028ttec1_probe_of(spi); - if (r) - return r; - } else { - return -ENODEV; - } + r = td028ttec1_probe_of(spi); + if (r) + return r; ddata->videomode = td028ttec1_panel_timings; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c index 68e3b68a2920..51e628b85f4a 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c @@ -19,8 +19,7 @@ #include <linux/slab.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> #define TPO_R02_MODE(x) ((x) & 7) #define TPO_R02_MODE_800x480 7 @@ -465,32 +464,6 @@ static struct omap_dss_driver tpo_td043_ops = { }; -static int tpo_td043_probe_pdata(struct spi_device *spi) -{ - const struct panel_tpo_td043mtea1_platform_data *pdata; - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); - struct omap_dss_device *dssdev, *in; - - pdata = dev_get_platdata(&spi->dev); - - ddata->nreset_gpio = pdata->nreset_gpio; - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&spi->dev, "failed to find video source '%s'\n", - pdata->source); - return -EPROBE_DEFER; - } - ddata->in = in; - - ddata->data_lines = pdata->data_lines; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int tpo_td043_probe_of(struct spi_device *spi) { struct device_node *node = spi->dev.of_node; @@ -524,6 +497,9 @@ static int tpo_td043_probe(struct spi_device *spi) dev_dbg(&spi->dev, "%s\n", __func__); + if (!spi->dev.of_node) + return -ENODEV; + spi->bits_per_word = 16; spi->mode = SPI_MODE_0; @@ -541,17 +517,9 @@ static int tpo_td043_probe(struct spi_device *spi) ddata->spi = spi; - if (dev_get_platdata(&spi->dev)) { - r = tpo_td043_probe_pdata(spi); - if (r) - return r; - } else if (spi->dev.of_node) { - r = tpo_td043_probe_of(spi); - if (r) - return r; - } else { - return -ENODEV; - } + r = tpo_td043_probe_of(spi); + if (r) + return r; ddata->mode = TPO_R02_MODE_800x480; memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma)); diff --git a/drivers/video/fbdev/omap2/omapfb/dss/apply.c b/drivers/video/fbdev/omap2/omapfb/dss/apply.c index 663ccc3bf4e5..2481f4871f66 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/apply.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/apply.c @@ -23,7 +23,7 @@ #include <linux/spinlock.h> #include <linux/jiffies.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c index 5a87179b7312..29de4827589d 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/core.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c @@ -35,7 +35,7 @@ #include <linux/suspend.h> #include <linux/slab.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" @@ -208,8 +208,6 @@ static int __init omap_dss_probe(struct platform_device *pdev) core.default_display_name = def_disp_name; else if (pdata->default_display_name) core.default_display_name = pdata->default_display_name; - else if (pdata->default_device) - core.default_display_name = pdata->default_device->name; register_pm_notifier(&omap_dss_pm_notif_block); diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c index 6607db37a5e4..3691bde4ce0a 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c @@ -26,7 +26,7 @@ #include <linux/interrupt.h> #include <linux/seq_file.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c index 5491e304f4fe..7a75dfda9845 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c @@ -41,7 +41,7 @@ #include <linux/of.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c index 038c15b04215..59c9a5c47ca9 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c @@ -18,7 +18,7 @@ */ #include <linux/kernel.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dispc.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c index 75b5286029ee..b3fdbfd0b82d 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c @@ -25,7 +25,7 @@ #include <linux/platform_device.h> #include <linux/sysfs.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf) diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display.c b/drivers/video/fbdev/omap2/omapfb/dss/display.c index ef5b9027985d..dd5468695c43 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/display.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/display.c @@ -28,7 +28,7 @@ #include <linux/platform_device.h> #include <linux/of.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c index 7953e6a52346..da09806b940c 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c @@ -34,7 +34,7 @@ #include <linux/clk.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c index d63e59807707..9e4800a4e3d1 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c @@ -42,7 +42,7 @@ #include <linux/of_platform.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/mipi_display.h> #include "dss.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c index bf407b6ba15c..d356a252ab4a 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c @@ -18,7 +18,7 @@ #include <linux/of.h> #include <linux/seq_file.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c index 0078c4d1fc31..47d7f69ad9ad 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c @@ -41,7 +41,7 @@ #include <linux/suspend.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.h b/drivers/video/fbdev/omap2/omapfb/dss/dss.h index 0184a8461df1..a3cc0ca8f9d2 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss.h +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.h @@ -73,6 +73,17 @@ #define FLD_MOD(orig, val, start, end) \ (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end)) +enum omap_dss_clk_source { + OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK + * OMAP4: DSS_FCLK */ + OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK + * OMAP4: PLL1_CLK1 */ + OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK + * OMAP4: PLL1_CLK2 */ + OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */ + OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */ +}; + enum dss_io_pad_mode { DSS_IO_PAD_MODE_RESET, DSS_IO_PAD_MODE_RFBI, diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c index c886a2927f73..8fc843b56b26 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c @@ -23,7 +23,7 @@ #include <linux/err.h> #include <linux/slab.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h b/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h index 53616b02b613..f6de87e078b0 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h @@ -23,7 +23,8 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/hdmi.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> +#include <sound/omap-hdmi-audio.h> #include "dss.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c index 2e71aec838b1..926a6f20dbb2 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c @@ -33,7 +33,7 @@ #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <sound/omap-hdmi-audio.h> #include "hdmi4_core.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c index aade6d99662a..0ee829a165c3 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c @@ -38,7 +38,7 @@ #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <sound/omap-hdmi-audio.h> #include "hdmi5_core.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c index 1b8fcc6c4ba1..189a5ad125a3 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c @@ -4,7 +4,7 @@ #include <linux/kernel.h> #include <linux/err.h> #include <linux/of.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "hdmi.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c index 1f5d19c119ce..9a13c35fd6d8 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c @@ -13,7 +13,7 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/slab.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "hdmi.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c index 06e23a7c432c..eac3665aba6c 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c @@ -17,7 +17,7 @@ #include <linux/platform_device.h> #include <linux/clk.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "hdmi.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c index 7c544bc56fb5..705373e4cf38 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c @@ -14,7 +14,7 @@ #include <linux/err.h> #include <linux/io.h> #include <linux/platform_device.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "hdmi.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c index a7414fb12830..9e2a67fdf4d2 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c @@ -26,7 +26,7 @@ #include <linux/platform_device.h> #include <linux/jiffies.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager.c b/drivers/video/fbdev/omap2/omapfb/dss/manager.c index 08a67f4f6a20..69f86d2cc274 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/manager.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/manager.c @@ -28,7 +28,7 @@ #include <linux/platform_device.h> #include <linux/jiffies.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" @@ -69,7 +69,6 @@ int dss_init_overlay_managers(void) break; } - mgr->caps = 0; mgr->supported_displays = dss_feat_get_supported_displays(mgr->id); mgr->supported_outputs = diff --git a/drivers/video/fbdev/omap2/omapfb/dss/output.c b/drivers/video/fbdev/omap2/omapfb/dss/output.c index 16072159bd24..bed9a978269d 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/output.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/output.c @@ -21,7 +21,7 @@ #include <linux/slab.h> #include <linux/of.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c index 4cc5ddebfb34..f1f6c0aea752 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c @@ -26,7 +26,7 @@ #include <linux/kobject.h> #include <linux/platform_device.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay.c index 2f7cee985cdd..d6c5d75d2ef8 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/overlay.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay.c @@ -30,7 +30,7 @@ #include <linux/delay.h> #include <linux/slab.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/pll.c b/drivers/video/fbdev/omap2/omapfb/dss/pll.c index f974ddcd3b6e..0564c5606cd0 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/pll.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/pll.c @@ -22,7 +22,7 @@ #include <linux/regulator/consumer.h> #include <linux/sched.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c b/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c index aea6a1d0fb20..562b0c4ae0c6 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c @@ -38,7 +38,7 @@ #include <linux/pm_runtime.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" struct rfbi_reg { u16 idx; }; diff --git a/drivers/video/fbdev/omap2/omapfb/dss/sdi.c b/drivers/video/fbdev/omap2/omapfb/dss/sdi.c index d747cc6b59e1..c4be732a4714 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/sdi.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/sdi.c @@ -29,7 +29,7 @@ #include <linux/of.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" static struct { diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c index 26e0ee30adf8..392464da12e4 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c @@ -37,7 +37,7 @@ #include <linux/of.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c b/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c index b1ec59e42940..a890540f2037 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c @@ -17,7 +17,7 @@ #include <linux/platform_device.h> #include <linux/sched.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c index 9ddfdd63b84c..ef69273074ba 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c @@ -30,7 +30,7 @@ #include <linux/export.h> #include <linux/sizes.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omapvrfb.h> #include "omapfb.h" diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c index d3af01c94a58..2fb90cb6803f 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c @@ -30,7 +30,7 @@ #include <linux/platform_device.h> #include <linux/omapfb.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omapvrfb.h> #include "omapfb.h" diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c index 18fa9e1d0033..8087a009c54f 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c @@ -29,7 +29,7 @@ #include <linux/mm.h> #include <linux/omapfb.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omapvrfb.h> #include "omapfb.h" diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb.h b/drivers/video/fbdev/omap2/omapfb/omapfb.h index 623cd872a367..bcb9ff4a607d 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb.h +++ b/drivers/video/fbdev/omap2/omapfb/omapfb.h @@ -31,7 +31,7 @@ #include <linux/dma-attrs.h> #include <linux/dma-mapping.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #ifdef DEBUG extern bool omapfb_debug; |