diff options
Diffstat (limited to 'drivers/gpu')
207 files changed, 10935 insertions, 4697 deletions
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile index 70da9eb52a42..e9ed439a5b65 100644 --- a/drivers/gpu/Makefile +++ b/drivers/gpu/Makefile @@ -1,3 +1,6 @@ -obj-y += drm/ vga/ +# drm/tegra depends on host1x, so if both drivers are built-in care must be +# taken to initialize them in the correct order. Link order is the only way +# to ensure this currently. obj-$(CONFIG_TEGRA_HOST1X) += host1x/ +obj-y += drm/ vga/ obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/ diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 308c104ccdbd..151a050129e7 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -62,12 +62,13 @@ config DRM_TTM config DRM_GEM_CMA_HELPER bool - depends on DRM + depends on DRM && HAVE_DMA_ATTRS help Choose this if you need the GEM CMA helper functions config DRM_KMS_CMA_HELPER bool + depends on DRM && HAVE_DMA_ATTRS select DRM_GEM_CMA_HELPER select DRM_KMS_FB_HELPER select FB_SYS_FILLRECT diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index cf0eed8208b5..2c239b99de64 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -14,7 +14,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm_info.o drm_debugfs.o drm_encoder_slave.o \ drm_trace_points.o drm_global.o drm_prime.o \ drm_rect.o drm_vma_manager.o drm_flip_work.o \ - drm_modeset_lock.o drm_atomic.o + drm_modeset_lock.o drm_atomic.o drm_bridge.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 1ba8332419fa..5bc32c26b989 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -183,16 +183,15 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->shared_resources = *gpu_resources; /* calculate max size of mqds needed for queues */ - size = max_num_of_processes * - max_num_of_queues_per_process * - kfd->device_info->mqd_size_aligned; + size = max_num_of_queues_per_device * + kfd->device_info->mqd_size_aligned; /* * calculate max size of runlist packet. * There can be only 2 packets at once */ - size += (max_num_of_processes * sizeof(struct pm4_map_process) + - max_num_of_processes * max_num_of_queues_per_process * + size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_map_process) + + max_num_of_queues_per_device * sizeof(struct pm4_map_queues) + sizeof(struct pm4_runlist)) * 2; /* Add size of HIQ & DIQ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index b189f9791c90..b3589d0e39b9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -135,6 +135,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, mutex_lock(&dqm->lock); + if (dqm->total_queue_count >= max_num_of_queues_per_device) { + pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", + dqm->total_queue_count); + mutex_unlock(&dqm->lock); + return -EPERM; + } + if (list_empty(&qpd->queues_list)) { retval = allocate_vmid(dqm, qpd, q); if (retval != 0) { @@ -160,9 +167,20 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, } list_add(&q->list, &qpd->queues_list); - dqm->queue_count++; + if (q->properties.is_active) + dqm->queue_count++; + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) dqm->sdma_queue_count++; + + /* + * Unconditionally increment this counter, regardless of the queue's + * type or whether the queue is active. + */ + dqm->total_queue_count++; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + mutex_unlock(&dqm->lock); return 0; } @@ -296,7 +314,17 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm, list_del(&q->list); if (list_empty(&qpd->queues_list)) deallocate_vmid(dqm, qpd, q); - dqm->queue_count--; + if (q->properties.is_active) + dqm->queue_count--; + + /* + * Unconditionally decrement this counter, regardless of the queue's + * type + */ + dqm->total_queue_count--; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + out: mutex_unlock(&dqm->lock); return retval; @@ -470,10 +498,14 @@ int init_pipelines(struct device_queue_manager *dqm, for (i = 0; i < pipes_num; i++) { inx = i + first_pipe; + /* + * HPD buffer on GTT is allocated by amdkfd, no need to waste + * space in GTT for pipelines we don't initialize + */ pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr); /* = log2(bytes/4)-1 */ - kfd2kgd->init_pipeline(dqm->dev->kgd, i, + kfd2kgd->init_pipeline(dqm->dev->kgd, inx, CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr); } @@ -488,8 +520,7 @@ static int init_scheduler(struct device_queue_manager *dqm) pr_debug("kfd: In %s\n", __func__); - retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE); - + retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm)); return retval; } @@ -744,6 +775,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, pr_debug("kfd: In func %s\n", __func__); mutex_lock(&dqm->lock); + if (dqm->total_queue_count >= max_num_of_queues_per_device) { + pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n", + dqm->total_queue_count); + mutex_unlock(&dqm->lock); + return -EPERM; + } + + /* + * Unconditionally increment this counter, regardless of the queue's + * type or whether the queue is active. + */ + dqm->total_queue_count++; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + list_add(&kq->list, &qpd->priv_queue_list); dqm->queue_count++; qpd->is_debug = true; @@ -767,6 +813,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, dqm->queue_count--; qpd->is_debug = false; execute_queues_cpsch(dqm, false); + /* + * Unconditionally decrement this counter, regardless of the queue's + * type. + */ + dqm->total_queue_count--; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); mutex_unlock(&dqm->lock); } @@ -793,6 +846,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, mutex_lock(&dqm->lock); + if (dqm->total_queue_count >= max_num_of_queues_per_device) { + pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", + dqm->total_queue_count); + retval = -EPERM; + goto out; + } + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) select_sdma_engine_id(q); @@ -817,6 +877,14 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, if (q->properties.type == KFD_QUEUE_TYPE_SDMA) dqm->sdma_queue_count++; + /* + * Unconditionally increment this counter, regardless of the queue's + * type or whether the queue is active. + */ + dqm->total_queue_count++; + + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); out: mutex_unlock(&dqm->lock); @@ -952,12 +1020,21 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, dqm->sdma_queue_count--; list_del(&q->list); - dqm->queue_count--; + if (q->properties.is_active) + dqm->queue_count--; execute_queues_cpsch(dqm, false); mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); + /* + * Unconditionally decrement this counter, regardless of the queue's + * type + */ + dqm->total_queue_count--; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + mutex_unlock(&dqm->lock); return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index e7b17b28330e..d64f86cda34f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -144,6 +144,7 @@ struct device_queue_manager { unsigned int processes_count; unsigned int queue_count; unsigned int sdma_queue_count; + unsigned int total_queue_count; unsigned int next_pipe_to_allocate; unsigned int *allocated_queues; unsigned int sdma_bitmap; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index ac5445415667..3f34ae16f075 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444); MODULE_PARM_DESC(sched_policy, "Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)"); -int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT; -module_param(max_num_of_processes, int, 0444); -MODULE_PARM_DESC(max_num_of_processes, - "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes"); - -int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT; -module_param(max_num_of_queues_per_process, int, 0444); -MODULE_PARM_DESC(max_num_of_queues_per_process, - "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process"); +int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT; +module_param(max_num_of_queues_per_device, int, 0444); +MODULE_PARM_DESC(max_num_of_queues_per_device, + "Maximum number of supported queues per device (1 = Minimum, 4096 = default)"); bool kgd2kfd_init(unsigned interface_version, const struct kfd2kgd_calls *f2g, @@ -100,16 +95,10 @@ static int __init kfd_module_init(void) } /* Verify module parameters */ - if ((max_num_of_processes < 0) || - (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) { - pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n"); - return -1; - } - - if ((max_num_of_queues_per_process < 0) || - (max_num_of_queues_per_process > - KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) { - pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n"); + if ((max_num_of_queues_per_device < 1) || + (max_num_of_queues_per_device > + KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) { + pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n"); return -1; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index a318743cdcc2..a09e18a339f3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -94,6 +94,9 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, m->cp_hqd_pipe_priority = 1; m->cp_hqd_queue_priority = 15; + if (q->format == KFD_QUEUE_FORMAT_AQL) + m->cp_hqd_iq_rptr = AQL_ENABLE; + *mqd = m; if (gart_addr != NULL) *gart_addr = addr; @@ -187,7 +190,6 @@ static int update_mqd(struct mqd_manager *mm, void *mqd, m->cp_hqd_vmid = q->vmid; if (q->format == KFD_QUEUE_FORMAT_AQL) { - m->cp_hqd_iq_rptr = AQL_ENABLE; m->cp_hqd_pq_control |= NO_UPDATE_RPTR; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c index 4c25ef504f79..6cfe7f1f18cf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c @@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex); int kfd_pasid_init(void) { - pasid_limit = max_num_of_processes; + pasid_limit = KFD_MAX_NUM_OF_PROCESSES; pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); if (!pasid_bitmap) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 1b35a9c87437..5a44f2fecf38 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -52,20 +52,19 @@ #define kfd_alloc_struct(ptr_to_struct) \ ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) -/* Kernel module parameter to specify maximum number of supported processes */ -extern int max_num_of_processes; - -#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32 #define KFD_MAX_NUM_OF_PROCESSES 512 +#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 /* - * Kernel module parameter to specify maximum number of supported queues - * per process + * Kernel module parameter to specify maximum number of supported queues per + * device */ -extern int max_num_of_queues_per_process; +extern int max_num_of_queues_per_device; -#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128 -#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 +#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096 +#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ + (KFD_MAX_NUM_OF_PROCESSES * \ + KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) #define KFD_KERNEL_QUEUE_SIZE 2048 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 513eeb6e402a..530b82c4e78b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm, pr_debug("kfd: in %s\n", __func__); found = find_first_zero_bit(pqm->queue_slot_bitmap, - max_num_of_queues_per_process); + KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); pr_debug("kfd: the new slot id %lu\n", found); - if (found >= max_num_of_queues_per_process) { + if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { pr_info("amdkfd: Can not open more queues for process with pasid %d\n", pqm->process->pasid); return -ENOMEM; @@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p) INIT_LIST_HEAD(&pqm->queues); pqm->queue_slot_bitmap = - kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process, + kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_BYTE), GFP_KERNEL); if (pqm->queue_slot_bitmap == NULL) return -ENOMEM; @@ -206,6 +206,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, pqn->kq = NULL; retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, &q->properties.vmid); + pr_debug("DQM returned %d for create_queue\n", retval); print_queue(q); break; case KFD_QUEUE_TYPE_DIQ: @@ -226,7 +227,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, } if (retval != 0) { - pr_err("kfd: error dqm create queue\n"); + pr_debug("Error dqm create queue\n"); goto err_create_queue; } @@ -245,7 +246,10 @@ int pqm_create_queue(struct process_queue_manager *pqm, err_create_queue: kfree(pqn); err_allocate_pqn: + /* check if queues list is empty unregister process from device */ clear_bit(*qid, pqm->queue_slot_bitmap); + if (list_empty(&pqm->queues)) + dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd); return retval; } @@ -318,7 +322,11 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, BUG_ON(!pqm); pqn = get_queue_by_qid(pqm, qid); - BUG_ON(!pqn); + if (!pqn) { + pr_debug("amdkfd: No queue %d exists for update operation\n", + qid); + return -EFAULT; + } pqn->q->properties.queue_address = p->queue_address; pqn->q->properties.queue_size = p->queue_size; diff --git a/drivers/gpu/drm/atmel-hlcdc/Kconfig b/drivers/gpu/drm/atmel-hlcdc/Kconfig index 1a085625538a..99b4f0698a30 100644 --- a/drivers/gpu/drm/atmel-hlcdc/Kconfig +++ b/drivers/gpu/drm/atmel-hlcdc/Kconfig @@ -1,6 +1,6 @@ config DRM_ATMEL_HLCDC tristate "DRM Support for ATMEL HLCDC Display Controller" - depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC + depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER select DRM_KMS_FB_HELPER diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index 0409b907de5d..b81cea6baeea 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -21,6 +21,7 @@ #include <linux/clk.h> #include <linux/pm.h> #include <linux/pm_runtime.h> +#include <linux/pinctrl/consumer.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> @@ -37,14 +38,14 @@ * @hlcdc: pointer to the atmel_hlcdc structure provided by the MFD device * @event: pointer to the current page flip event * @id: CRTC id (returned by drm_crtc_index) - * @dpms: DPMS mode + * @enabled: CRTC state */ struct atmel_hlcdc_crtc { struct drm_crtc base; struct atmel_hlcdc_dc *dc; struct drm_pending_vblank_event *event; int id; - int dpms; + bool enabled; }; static inline struct atmel_hlcdc_crtc * @@ -53,86 +54,17 @@ drm_crtc_to_atmel_hlcdc_crtc(struct drm_crtc *crtc) return container_of(crtc, struct atmel_hlcdc_crtc, base); } -static void atmel_hlcdc_crtc_dpms(struct drm_crtc *c, int mode) -{ - struct drm_device *dev = c->dev; - struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); - struct regmap *regmap = crtc->dc->hlcdc->regmap; - unsigned int status; - - if (mode != DRM_MODE_DPMS_ON) - mode = DRM_MODE_DPMS_OFF; - - if (crtc->dpms == mode) - return; - - pm_runtime_get_sync(dev->dev); - - if (mode != DRM_MODE_DPMS_ON) { - regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_DISP); - while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) && - (status & ATMEL_HLCDC_DISP)) - cpu_relax(); - - regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_SYNC); - while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) && - (status & ATMEL_HLCDC_SYNC)) - cpu_relax(); - - regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PIXEL_CLK); - while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) && - (status & ATMEL_HLCDC_PIXEL_CLK)) - cpu_relax(); - - clk_disable_unprepare(crtc->dc->hlcdc->sys_clk); - - pm_runtime_allow(dev->dev); - } else { - pm_runtime_forbid(dev->dev); - - clk_prepare_enable(crtc->dc->hlcdc->sys_clk); - - regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_PIXEL_CLK); - while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) && - !(status & ATMEL_HLCDC_PIXEL_CLK)) - cpu_relax(); - - - regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_SYNC); - while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) && - !(status & ATMEL_HLCDC_SYNC)) - cpu_relax(); - - regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_DISP); - while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) && - !(status & ATMEL_HLCDC_DISP)) - cpu_relax(); - } - - pm_runtime_put_sync(dev->dev); - - crtc->dpms = mode; -} - -static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c, - struct drm_display_mode *mode, - struct drm_display_mode *adj, - int x, int y, - struct drm_framebuffer *old_fb) +static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c) { struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); struct regmap *regmap = crtc->dc->hlcdc->regmap; - struct drm_plane *plane = c->primary; - struct drm_framebuffer *fb; + struct drm_display_mode *adj = &c->state->adjusted_mode; unsigned long mode_rate; struct videomode vm; unsigned long prate; unsigned int cfg; int div; - if (atmel_hlcdc_dc_mode_valid(crtc->dc, adj) != MODE_OK) - return -EINVAL; - vm.vfront_porch = adj->crtc_vsync_start - adj->crtc_vdisplay; vm.vback_porch = adj->crtc_vtotal - adj->crtc_vsync_end; vm.vsync_len = adj->crtc_vsync_end - adj->crtc_vsync_start; @@ -156,7 +88,7 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c, cfg = ATMEL_HLCDC_CLKPOL; prate = clk_get_rate(crtc->dc->hlcdc->sys_clk); - mode_rate = mode->crtc_clock * 1000; + mode_rate = adj->crtc_clock * 1000; if ((prate / 2) < mode_rate) { prate *= 2; cfg |= ATMEL_HLCDC_CLKSEL; @@ -174,10 +106,10 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c, cfg = 0; - if (mode->flags & DRM_MODE_FLAG_NVSYNC) + if (adj->flags & DRM_MODE_FLAG_NVSYNC) cfg |= ATMEL_HLCDC_VSPOL; - if (mode->flags & DRM_MODE_FLAG_NHSYNC) + if (adj->flags & DRM_MODE_FLAG_NHSYNC) cfg |= ATMEL_HLCDC_HSPOL; regmap_update_bits(regmap, ATMEL_HLCDC_CFG(5), @@ -187,77 +119,134 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c, ATMEL_HLCDC_VSPSU | ATMEL_HLCDC_VSPHO | ATMEL_HLCDC_GUARDTIME_MASK, cfg); - - fb = plane->fb; - plane->fb = old_fb; - - return atmel_hlcdc_plane_update_with_mode(plane, c, fb, 0, 0, - adj->hdisplay, adj->vdisplay, - x << 16, y << 16, - adj->hdisplay << 16, - adj->vdisplay << 16, - adj); } -int atmel_hlcdc_crtc_mode_set_base(struct drm_crtc *c, int x, int y, - struct drm_framebuffer *old_fb) +static bool atmel_hlcdc_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { - struct drm_plane *plane = c->primary; - struct drm_framebuffer *fb = plane->fb; - struct drm_display_mode *mode = &c->hwmode; - - plane->fb = old_fb; - - return plane->funcs->update_plane(plane, c, fb, - 0, 0, - mode->hdisplay, - mode->vdisplay, - x << 16, y << 16, - mode->hdisplay << 16, - mode->vdisplay << 16); + return true; } -static void atmel_hlcdc_crtc_prepare(struct drm_crtc *crtc) +static void atmel_hlcdc_crtc_disable(struct drm_crtc *c) { - atmel_hlcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + struct drm_device *dev = c->dev; + struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); + struct regmap *regmap = crtc->dc->hlcdc->regmap; + unsigned int status; + + if (!crtc->enabled) + return; + + drm_crtc_vblank_off(c); + + pm_runtime_get_sync(dev->dev); + + regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_DISP); + while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) && + (status & ATMEL_HLCDC_DISP)) + cpu_relax(); + + regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_SYNC); + while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) && + (status & ATMEL_HLCDC_SYNC)) + cpu_relax(); + + regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PIXEL_CLK); + while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) && + (status & ATMEL_HLCDC_PIXEL_CLK)) + cpu_relax(); + + clk_disable_unprepare(crtc->dc->hlcdc->sys_clk); + pinctrl_pm_select_sleep_state(dev->dev); + + pm_runtime_allow(dev->dev); + + pm_runtime_put_sync(dev->dev); + + crtc->enabled = false; } -static void atmel_hlcdc_crtc_commit(struct drm_crtc *crtc) +static void atmel_hlcdc_crtc_enable(struct drm_crtc *c) { - atmel_hlcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON); + struct drm_device *dev = c->dev; + struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); + struct regmap *regmap = crtc->dc->hlcdc->regmap; + unsigned int status; + + if (crtc->enabled) + return; + + pm_runtime_get_sync(dev->dev); + + pm_runtime_forbid(dev->dev); + + pinctrl_pm_select_default_state(dev->dev); + clk_prepare_enable(crtc->dc->hlcdc->sys_clk); + + regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_PIXEL_CLK); + while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) && + !(status & ATMEL_HLCDC_PIXEL_CLK)) + cpu_relax(); + + + regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_SYNC); + while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) && + !(status & ATMEL_HLCDC_SYNC)) + cpu_relax(); + + regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_DISP); + while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) && + !(status & ATMEL_HLCDC_DISP)) + cpu_relax(); + + pm_runtime_put_sync(dev->dev); + + drm_crtc_vblank_on(c); + + crtc->enabled = true; } -static bool atmel_hlcdc_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c, + struct drm_crtc_state *s) { - return true; + struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); + + if (atmel_hlcdc_dc_mode_valid(crtc->dc, &s->adjusted_mode) != MODE_OK) + return -EINVAL; + + return atmel_hlcdc_plane_prepare_disc_area(s); } -static void atmel_hlcdc_crtc_disable(struct drm_crtc *crtc) +static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c) { - struct drm_plane *plane; + struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); - atmel_hlcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); - crtc->primary->funcs->disable_plane(crtc->primary); + if (c->state->event) { + c->state->event->pipe = drm_crtc_index(c); - drm_for_each_legacy_plane(plane, &crtc->dev->mode_config.plane_list) { - if (plane->crtc != crtc) - continue; + WARN_ON(drm_crtc_vblank_get(c) != 0); - plane->funcs->disable_plane(crtc->primary); - plane->crtc = NULL; + crtc->event = c->state->event; + c->state->event = NULL; } } +static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc) +{ + /* TODO: write common plane control register if available */ +} + static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = { .mode_fixup = atmel_hlcdc_crtc_mode_fixup, - .dpms = atmel_hlcdc_crtc_dpms, - .mode_set = atmel_hlcdc_crtc_mode_set, - .mode_set_base = atmel_hlcdc_crtc_mode_set_base, - .prepare = atmel_hlcdc_crtc_prepare, - .commit = atmel_hlcdc_crtc_commit, + .mode_set = drm_helper_crtc_mode_set, + .mode_set_nofb = atmel_hlcdc_crtc_mode_set_nofb, + .mode_set_base = drm_helper_crtc_mode_set_base, .disable = atmel_hlcdc_crtc_disable, + .enable = atmel_hlcdc_crtc_enable, + .atomic_check = atmel_hlcdc_crtc_atomic_check, + .atomic_begin = atmel_hlcdc_crtc_atomic_begin, + .atomic_flush = atmel_hlcdc_crtc_atomic_flush, }; static void atmel_hlcdc_crtc_destroy(struct drm_crtc *c) @@ -306,61 +295,13 @@ void atmel_hlcdc_crtc_irq(struct drm_crtc *c) atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c)); } -static int atmel_hlcdc_crtc_page_flip(struct drm_crtc *c, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags) -{ - struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); - struct atmel_hlcdc_plane_update_req req; - struct drm_plane *plane = c->primary; - struct drm_device *dev = c->dev; - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&dev->event_lock, flags); - if (crtc->event) - ret = -EBUSY; - spin_unlock_irqrestore(&dev->event_lock, flags); - - if (ret) - return ret; - - memset(&req, 0, sizeof(req)); - req.crtc_x = 0; - req.crtc_y = 0; - req.crtc_h = c->mode.crtc_vdisplay; - req.crtc_w = c->mode.crtc_hdisplay; - req.src_x = c->x << 16; - req.src_y = c->y << 16; - req.src_w = req.crtc_w << 16; - req.src_h = req.crtc_h << 16; - req.fb = fb; - - ret = atmel_hlcdc_plane_prepare_update_req(plane, &req, &c->hwmode); - if (ret) - return ret; - - if (event) { - drm_vblank_get(c->dev, crtc->id); - spin_lock_irqsave(&dev->event_lock, flags); - crtc->event = event; - spin_unlock_irqrestore(&dev->event_lock, flags); - } - - ret = atmel_hlcdc_plane_apply_update_req(plane, &req); - if (ret) - crtc->event = NULL; - else - plane->fb = fb; - - return ret; -} - static const struct drm_crtc_funcs atmel_hlcdc_crtc_funcs = { - .page_flip = atmel_hlcdc_crtc_page_flip, - .set_config = drm_crtc_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .set_config = drm_atomic_helper_set_config, .destroy = atmel_hlcdc_crtc_destroy, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, }; int atmel_hlcdc_crtc_create(struct drm_device *dev) @@ -375,7 +316,6 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev) if (!crtc) return -ENOMEM; - crtc->dpms = DRM_MODE_DPMS_OFF; crtc->dc = dc; ret = drm_crtc_init_with_planes(dev, &crtc->base, diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 7320a6c6613f..01afeafe17b1 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -222,6 +222,8 @@ static void atmel_hlcdc_fb_output_poll_changed(struct drm_device *dev) static const struct drm_mode_config_funcs mode_config_funcs = { .fb_create = atmel_hlcdc_fb_create, .output_poll_changed = atmel_hlcdc_fb_output_poll_changed, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, }; static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev) @@ -319,6 +321,8 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) goto err_periph_clk_disable; } + drm_mode_config_reset(dev); + ret = drm_vblank_init(dev, 1); if (ret < 0) { dev_err(dev->dev, "failed to initialize vblank\n"); @@ -557,6 +561,52 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM +static int atmel_hlcdc_dc_drm_suspend(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + struct drm_crtc *crtc; + + if (pm_runtime_suspended(dev)) + return 0; + + drm_modeset_lock_all(drm_dev); + list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) { + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + if (crtc->enabled) { + crtc_funcs->disable(crtc); + /* save enable state for resume */ + crtc->enabled = true; + } + } + drm_modeset_unlock_all(drm_dev); + return 0; +} + +static int atmel_hlcdc_dc_drm_resume(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + struct drm_crtc *crtc; + + if (pm_runtime_suspended(dev)) + return 0; + + drm_modeset_lock_all(drm_dev); + list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) { + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + if (crtc->enabled) { + crtc->enabled = false; + crtc_funcs->enable(crtc); + } + } + drm_modeset_unlock_all(drm_dev); + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(atmel_hlcdc_dc_drm_pm_ops, + atmel_hlcdc_dc_drm_suspend, atmel_hlcdc_dc_drm_resume); + static const struct of_device_id atmel_hlcdc_dc_of_match[] = { { .compatible = "atmel,hlcdc-display-controller" }, { }, @@ -567,6 +617,7 @@ static struct platform_driver atmel_hlcdc_dc_platform_driver = { .remove = atmel_hlcdc_dc_drm_remove, .driver = { .name = "atmel-hlcdc-display-controller", + .pm = &atmel_hlcdc_dc_drm_pm_ops, .of_match_table = atmel_hlcdc_dc_of_match, }, }; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h index 7bc96af3397a..1ea9c2ccd8a7 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h @@ -26,11 +26,14 @@ #include <linux/irqdomain.h> #include <linux/pwm.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_panel.h> +#include <drm/drm_plane_helper.h> #include <drm/drmP.h> #include "atmel_hlcdc_layer.h" @@ -69,7 +72,6 @@ struct atmel_hlcdc_dc_desc { */ struct atmel_hlcdc_plane_properties { struct drm_property *alpha; - struct drm_property *rotation; }; /** @@ -84,7 +86,6 @@ struct atmel_hlcdc_plane { struct drm_plane base; struct atmel_hlcdc_layer layer; struct atmel_hlcdc_plane_properties *properties; - unsigned int rotation; }; static inline struct atmel_hlcdc_plane * @@ -100,43 +101,6 @@ atmel_hlcdc_layer_to_plane(struct atmel_hlcdc_layer *l) } /** - * Atmel HLCDC Plane update request structure. - * - * @crtc_x: x position of the plane relative to the CRTC - * @crtc_y: y position of the plane relative to the CRTC - * @crtc_w: visible width of the plane - * @crtc_h: visible height of the plane - * @src_x: x buffer position - * @src_y: y buffer position - * @src_w: buffer width - * @src_h: buffer height - * @fb: framebuffer object object - * @bpp: bytes per pixel deduced from pixel_format - * @offsets: offsets to apply to the GEM buffers - * @xstride: value to add to the pixel pointer between each line - * @pstride: value to add to the pixel pointer between each pixel - * @nplanes: number of planes (deduced from pixel_format) - */ -struct atmel_hlcdc_plane_update_req { - int crtc_x; - int crtc_y; - unsigned int crtc_w; - unsigned int crtc_h; - uint32_t src_x; - uint32_t src_y; - uint32_t src_w; - uint32_t src_h; - struct drm_framebuffer *fb; - - /* These fields are private and should not be touched */ - int bpp[ATMEL_HLCDC_MAX_PLANES]; - unsigned int offsets[ATMEL_HLCDC_MAX_PLANES]; - int xstride[ATMEL_HLCDC_MAX_PLANES]; - int pstride[ATMEL_HLCDC_MAX_PLANES]; - int nplanes; -}; - -/** * Atmel HLCDC Planes. * * This structure stores the instantiated HLCDC Planes and can be accessed by @@ -184,22 +148,7 @@ int atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc, struct atmel_hlcdc_planes * atmel_hlcdc_create_planes(struct drm_device *dev); -int atmel_hlcdc_plane_prepare_update_req(struct drm_plane *p, - struct atmel_hlcdc_plane_update_req *req, - const struct drm_display_mode *mode); - -int atmel_hlcdc_plane_apply_update_req(struct drm_plane *p, - struct atmel_hlcdc_plane_update_req *req); - -int atmel_hlcdc_plane_update_with_mode(struct drm_plane *p, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, - unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h, - const struct drm_display_mode *mode); +int atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state); void atmel_hlcdc_crtc_irq(struct drm_crtc *c); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c index 063d2a7b941f..d1dca39b76dd 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c @@ -298,7 +298,7 @@ void atmel_hlcdc_layer_irq(struct atmel_hlcdc_layer *layer) spin_unlock_irqrestore(&layer->lock, flags); } -int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer) +void atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer) { struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma; struct atmel_hlcdc_layer_update *upd = &layer->update; @@ -340,8 +340,6 @@ int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer) dma->status = ATMEL_HLCDC_LAYER_DISABLED; spin_unlock_irqrestore(&layer->lock, flags); - - return 0; } int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer) diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h index 27e56c0862ec..9beabc940bce 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.h @@ -120,6 +120,7 @@ #define ATMEL_HLCDC_LAYER_DISCEN BIT(11) #define ATMEL_HLCDC_LAYER_GA_SHIFT 16 #define ATMEL_HLCDC_LAYER_GA_MASK GENMASK(23, ATMEL_HLCDC_LAYER_GA_SHIFT) +#define ATMEL_HLCDC_LAYER_GA(x) ((x) << ATMEL_HLCDC_LAYER_GA_SHIFT) #define ATMEL_HLCDC_LAYER_CSC_CFG(p, o) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.csc + o) @@ -376,7 +377,7 @@ int atmel_hlcdc_layer_init(struct drm_device *dev, void atmel_hlcdc_layer_cleanup(struct drm_device *dev, struct atmel_hlcdc_layer *layer); -int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer); +void atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer); int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index c402192362c5..9c4513005310 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c @@ -86,25 +86,22 @@ atmel_hlcdc_rgb_output_to_panel(struct atmel_hlcdc_rgb_output *output) return container_of(output, struct atmel_hlcdc_panel, base); } -static void atmel_hlcdc_panel_encoder_dpms(struct drm_encoder *encoder, - int mode) +static void atmel_hlcdc_panel_encoder_enable(struct drm_encoder *encoder) { struct atmel_hlcdc_rgb_output *rgb = drm_encoder_to_atmel_hlcdc_rgb_output(encoder); struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb); - if (mode != DRM_MODE_DPMS_ON) - mode = DRM_MODE_DPMS_OFF; - - if (mode == rgb->dpms) - return; + drm_panel_enable(panel->panel); +} - if (mode != DRM_MODE_DPMS_ON) - drm_panel_disable(panel->panel); - else - drm_panel_enable(panel->panel); +static void atmel_hlcdc_panel_encoder_disable(struct drm_encoder *encoder) +{ + struct atmel_hlcdc_rgb_output *rgb = + drm_encoder_to_atmel_hlcdc_rgb_output(encoder); + struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb); - rgb->dpms = mode; + drm_panel_disable(panel->panel); } static bool @@ -115,16 +112,6 @@ atmel_hlcdc_panel_encoder_mode_fixup(struct drm_encoder *encoder, return true; } -static void atmel_hlcdc_panel_encoder_prepare(struct drm_encoder *encoder) -{ - atmel_hlcdc_panel_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); -} - -static void atmel_hlcdc_panel_encoder_commit(struct drm_encoder *encoder) -{ - atmel_hlcdc_panel_encoder_dpms(encoder, DRM_MODE_DPMS_ON); -} - static void atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, @@ -156,11 +143,10 @@ atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder, } static struct drm_encoder_helper_funcs atmel_hlcdc_panel_encoder_helper_funcs = { - .dpms = atmel_hlcdc_panel_encoder_dpms, .mode_fixup = atmel_hlcdc_panel_encoder_mode_fixup, - .prepare = atmel_hlcdc_panel_encoder_prepare, - .commit = atmel_hlcdc_panel_encoder_commit, .mode_set = atmel_hlcdc_rgb_encoder_mode_set, + .disable = atmel_hlcdc_panel_encoder_disable, + .enable = atmel_hlcdc_panel_encoder_enable, }; static void atmel_hlcdc_rgb_encoder_destroy(struct drm_encoder *encoder) @@ -226,10 +212,13 @@ atmel_hlcdc_panel_connector_destroy(struct drm_connector *connector) } static const struct drm_connector_funcs atmel_hlcdc_panel_connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, .detect = atmel_hlcdc_panel_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = atmel_hlcdc_panel_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static int atmel_hlcdc_create_panel_output(struct drm_device *dev, diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index c5892dcfd745..dbf97d999d40 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -19,6 +19,59 @@ #include "atmel_hlcdc_dc.h" +/** + * Atmel HLCDC Plane state structure. + * + * @base: DRM plane state + * @crtc_x: x position of the plane relative to the CRTC + * @crtc_y: y position of the plane relative to the CRTC + * @crtc_w: visible width of the plane + * @crtc_h: visible height of the plane + * @src_x: x buffer position + * @src_y: y buffer position + * @src_w: buffer width + * @src_h: buffer height + * @alpha: alpha blending of the plane + * @bpp: bytes per pixel deduced from pixel_format + * @offsets: offsets to apply to the GEM buffers + * @xstride: value to add to the pixel pointer between each line + * @pstride: value to add to the pixel pointer between each pixel + * @nplanes: number of planes (deduced from pixel_format) + */ +struct atmel_hlcdc_plane_state { + struct drm_plane_state base; + int crtc_x; + int crtc_y; + unsigned int crtc_w; + unsigned int crtc_h; + uint32_t src_x; + uint32_t src_y; + uint32_t src_w; + uint32_t src_h; + + u8 alpha; + + bool disc_updated; + + int disc_x; + int disc_y; + int disc_w; + int disc_h; + + /* These fields are private and should not be touched */ + int bpp[ATMEL_HLCDC_MAX_PLANES]; + unsigned int offsets[ATMEL_HLCDC_MAX_PLANES]; + int xstride[ATMEL_HLCDC_MAX_PLANES]; + int pstride[ATMEL_HLCDC_MAX_PLANES]; + int nplanes; +}; + +static inline struct atmel_hlcdc_plane_state * +drm_plane_state_to_atmel_hlcdc_plane_state(struct drm_plane_state *s) +{ + return container_of(s, struct atmel_hlcdc_plane_state, base); +} + #define SUBPIXEL_MASK 0xffff static uint32_t rgb_formats[] = { @@ -128,7 +181,7 @@ static int atmel_hlcdc_format_to_plane_mode(u32 format, u32 *mode) return 0; } -static bool atmel_hlcdc_format_embedds_alpha(u32 format) +static bool atmel_hlcdc_format_embeds_alpha(u32 format) { int i; @@ -204,7 +257,7 @@ static u32 heo_upscaling_ycoef[] = { static void atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane, - struct atmel_hlcdc_plane_update_req *req) + struct atmel_hlcdc_plane_state *state) { const struct atmel_hlcdc_layer_cfg_layout *layout = &plane->layer.desc->layout; @@ -213,69 +266,69 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane, atmel_hlcdc_layer_update_cfg(&plane->layer, layout->size, 0xffffffff, - (req->crtc_w - 1) | - ((req->crtc_h - 1) << 16)); + (state->crtc_w - 1) | + ((state->crtc_h - 1) << 16)); if (layout->memsize) atmel_hlcdc_layer_update_cfg(&plane->layer, layout->memsize, 0xffffffff, - (req->src_w - 1) | - ((req->src_h - 1) << 16)); + (state->src_w - 1) | + ((state->src_h - 1) << 16)); if (layout->pos) atmel_hlcdc_layer_update_cfg(&plane->layer, layout->pos, 0xffffffff, - req->crtc_x | - (req->crtc_y << 16)); + state->crtc_x | + (state->crtc_y << 16)); /* TODO: rework the rescaling part */ - if (req->crtc_w != req->src_w || req->crtc_h != req->src_h) { + if (state->crtc_w != state->src_w || state->crtc_h != state->src_h) { u32 factor_reg = 0; - if (req->crtc_w != req->src_w) { + if (state->crtc_w != state->src_w) { int i; u32 factor; u32 *coeff_tab = heo_upscaling_xcoef; u32 max_memsize; - if (req->crtc_w < req->src_w) + if (state->crtc_w < state->src_w) coeff_tab = heo_downscaling_xcoef; for (i = 0; i < ARRAY_SIZE(heo_upscaling_xcoef); i++) atmel_hlcdc_layer_update_cfg(&plane->layer, 17 + i, 0xffffffff, coeff_tab[i]); - factor = ((8 * 256 * req->src_w) - (256 * 4)) / - req->crtc_w; + factor = ((8 * 256 * state->src_w) - (256 * 4)) / + state->crtc_w; factor++; - max_memsize = ((factor * req->crtc_w) + (256 * 4)) / + max_memsize = ((factor * state->crtc_w) + (256 * 4)) / 2048; - if (max_memsize > req->src_w) + if (max_memsize > state->src_w) factor--; factor_reg |= factor | 0x80000000; } - if (req->crtc_h != req->src_h) { + if (state->crtc_h != state->src_h) { int i; u32 factor; u32 *coeff_tab = heo_upscaling_ycoef; u32 max_memsize; - if (req->crtc_w < req->src_w) + if (state->crtc_w < state->src_w) coeff_tab = heo_downscaling_ycoef; for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++) atmel_hlcdc_layer_update_cfg(&plane->layer, 33 + i, 0xffffffff, coeff_tab[i]); - factor = ((8 * 256 * req->src_w) - (256 * 4)) / - req->crtc_w; + factor = ((8 * 256 * state->src_w) - (256 * 4)) / + state->crtc_w; factor++; - max_memsize = ((factor * req->crtc_w) + (256 * 4)) / + max_memsize = ((factor * state->crtc_w) + (256 * 4)) / 2048; - if (max_memsize > req->src_w) + if (max_memsize > state->src_w) factor--; factor_reg |= (factor << 16) | 0x80000000; } @@ -287,7 +340,7 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane, static void atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane, - struct atmel_hlcdc_plane_update_req *req) + struct atmel_hlcdc_plane_state *state) { const struct atmel_hlcdc_layer_cfg_layout *layout = &plane->layer.desc->layout; @@ -297,10 +350,11 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane, cfg |= ATMEL_HLCDC_LAYER_OVR | ATMEL_HLCDC_LAYER_ITER2BL | ATMEL_HLCDC_LAYER_ITER; - if (atmel_hlcdc_format_embedds_alpha(req->fb->pixel_format)) + if (atmel_hlcdc_format_embeds_alpha(state->base.fb->pixel_format)) cfg |= ATMEL_HLCDC_LAYER_LAEN; else - cfg |= ATMEL_HLCDC_LAYER_GAEN; + cfg |= ATMEL_HLCDC_LAYER_GAEN | + ATMEL_HLCDC_LAYER_GA(state->alpha); } atmel_hlcdc_layer_update_cfg(&plane->layer, @@ -312,24 +366,26 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane, ATMEL_HLCDC_LAYER_ITER2BL | ATMEL_HLCDC_LAYER_ITER | ATMEL_HLCDC_LAYER_GAEN | + ATMEL_HLCDC_LAYER_GA_MASK | ATMEL_HLCDC_LAYER_LAEN | ATMEL_HLCDC_LAYER_OVR | ATMEL_HLCDC_LAYER_DMA, cfg); } static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane, - struct atmel_hlcdc_plane_update_req *req) + struct atmel_hlcdc_plane_state *state) { u32 cfg; int ret; - ret = atmel_hlcdc_format_to_plane_mode(req->fb->pixel_format, &cfg); + ret = atmel_hlcdc_format_to_plane_mode(state->base.fb->pixel_format, + &cfg); if (ret) return; - if ((req->fb->pixel_format == DRM_FORMAT_YUV422 || - req->fb->pixel_format == DRM_FORMAT_NV61) && - (plane->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270)))) + if ((state->base.fb->pixel_format == DRM_FORMAT_YUV422 || + state->base.fb->pixel_format == DRM_FORMAT_NV61) && + (state->base.rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270)))) cfg |= ATMEL_HLCDC_YUV422ROT; atmel_hlcdc_layer_update_cfg(&plane->layer, @@ -341,7 +397,7 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane, * Rotation optimization is not working on RGB888 (rotation is still * working but without any optimization). */ - if (req->fb->pixel_format == DRM_FORMAT_RGB888) + if (state->base.fb->pixel_format == DRM_FORMAT_RGB888) cfg = ATMEL_HLCDC_LAYER_DMA_ROTDIS; else cfg = 0; @@ -352,73 +408,142 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane, } static void atmel_hlcdc_plane_update_buffers(struct atmel_hlcdc_plane *plane, - struct atmel_hlcdc_plane_update_req *req) + struct atmel_hlcdc_plane_state *state) { struct atmel_hlcdc_layer *layer = &plane->layer; const struct atmel_hlcdc_layer_cfg_layout *layout = &layer->desc->layout; int i; - atmel_hlcdc_layer_update_set_fb(&plane->layer, req->fb, req->offsets); + atmel_hlcdc_layer_update_set_fb(&plane->layer, state->base.fb, + state->offsets); - for (i = 0; i < req->nplanes; i++) { + for (i = 0; i < state->nplanes; i++) { if (layout->xstride[i]) { atmel_hlcdc_layer_update_cfg(&plane->layer, layout->xstride[i], 0xffffffff, - req->xstride[i]); + state->xstride[i]); } if (layout->pstride[i]) { atmel_hlcdc_layer_update_cfg(&plane->layer, layout->pstride[i], 0xffffffff, - req->pstride[i]); + state->pstride[i]); } } } -static int atmel_hlcdc_plane_check_update_req(struct drm_plane *p, - struct atmel_hlcdc_plane_update_req *req, - const struct drm_display_mode *mode) +int +atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state) { - struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); - const struct atmel_hlcdc_layer_cfg_layout *layout = - &plane->layer.desc->layout; + int disc_x = 0, disc_y = 0, disc_w = 0, disc_h = 0; + const struct atmel_hlcdc_layer_cfg_layout *layout; + struct atmel_hlcdc_plane_state *primary_state; + struct drm_plane_state *primary_s; + struct atmel_hlcdc_plane *primary; + struct drm_plane *ovl; + + primary = drm_plane_to_atmel_hlcdc_plane(c_state->crtc->primary); + layout = &primary->layer.desc->layout; + if (!layout->disc_pos || !layout->disc_size) + return 0; + + primary_s = drm_atomic_get_plane_state(c_state->state, + &primary->base); + if (IS_ERR(primary_s)) + return PTR_ERR(primary_s); + + primary_state = drm_plane_state_to_atmel_hlcdc_plane_state(primary_s); + + drm_atomic_crtc_state_for_each_plane(ovl, c_state) { + struct atmel_hlcdc_plane_state *ovl_state; + struct drm_plane_state *ovl_s; + + if (ovl == c_state->crtc->primary) + continue; - if (!layout->size && - (mode->hdisplay != req->crtc_w || - mode->vdisplay != req->crtc_h)) - return -EINVAL; + ovl_s = drm_atomic_get_plane_state(c_state->state, ovl); + if (IS_ERR(ovl_s)) + return PTR_ERR(ovl_s); - if (plane->layer.desc->max_height && - req->crtc_h > plane->layer.desc->max_height) - return -EINVAL; + ovl_state = drm_plane_state_to_atmel_hlcdc_plane_state(ovl_s); - if (plane->layer.desc->max_width && - req->crtc_w > plane->layer.desc->max_width) - return -EINVAL; + if (!ovl_s->fb || + atmel_hlcdc_format_embeds_alpha(ovl_s->fb->pixel_format) || + ovl_state->alpha != 255) + continue; - if ((req->crtc_h != req->src_h || req->crtc_w != req->src_w) && - (!layout->memsize || - atmel_hlcdc_format_embedds_alpha(req->fb->pixel_format))) - return -EINVAL; + /* TODO: implement a smarter hidden area detection */ + if (ovl_state->crtc_h * ovl_state->crtc_w < disc_h * disc_w) + continue; - if (req->crtc_x < 0 || req->crtc_y < 0) - return -EINVAL; + disc_x = ovl_state->crtc_x; + disc_y = ovl_state->crtc_y; + disc_h = ovl_state->crtc_h; + disc_w = ovl_state->crtc_w; + } - if (req->crtc_w + req->crtc_x > mode->hdisplay || - req->crtc_h + req->crtc_y > mode->vdisplay) - return -EINVAL; + if (disc_x == primary_state->disc_x && + disc_y == primary_state->disc_y && + disc_w == primary_state->disc_w && + disc_h == primary_state->disc_h) + return 0; + + + primary_state->disc_x = disc_x; + primary_state->disc_y = disc_y; + primary_state->disc_w = disc_w; + primary_state->disc_h = disc_h; + primary_state->disc_updated = true; return 0; } -int atmel_hlcdc_plane_prepare_update_req(struct drm_plane *p, - struct atmel_hlcdc_plane_update_req *req, - const struct drm_display_mode *mode) +static void +atmel_hlcdc_plane_update_disc_area(struct atmel_hlcdc_plane *plane, + struct atmel_hlcdc_plane_state *state) +{ + const struct atmel_hlcdc_layer_cfg_layout *layout = + &plane->layer.desc->layout; + int disc_surface = 0; + + if (!state->disc_updated) + return; + + disc_surface = state->disc_h * state->disc_w; + + atmel_hlcdc_layer_update_cfg(&plane->layer, layout->general_config, + ATMEL_HLCDC_LAYER_DISCEN, + disc_surface ? ATMEL_HLCDC_LAYER_DISCEN : 0); + + if (!disc_surface) + return; + + atmel_hlcdc_layer_update_cfg(&plane->layer, + layout->disc_pos, + 0xffffffff, + state->disc_x | (state->disc_y << 16)); + + atmel_hlcdc_layer_update_cfg(&plane->layer, + layout->disc_size, + 0xffffffff, + (state->disc_w - 1) | + ((state->disc_h - 1) << 16)); +} + +static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, + struct drm_plane_state *s) { struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); + struct atmel_hlcdc_plane_state *state = + drm_plane_state_to_atmel_hlcdc_plane_state(s); + const struct atmel_hlcdc_layer_cfg_layout *layout = + &plane->layer.desc->layout; + struct drm_framebuffer *fb = state->base.fb; + const struct drm_display_mode *mode; + struct drm_crtc_state *crtc_state; unsigned int patched_crtc_w; unsigned int patched_crtc_h; unsigned int patched_src_w; @@ -430,196 +555,195 @@ int atmel_hlcdc_plane_prepare_update_req(struct drm_plane *p, int vsub = 1; int i; - if ((req->src_x | req->src_y | req->src_w | req->src_h) & + if (!state->base.crtc || !fb) + return 0; + + crtc_state = s->state->crtc_states[drm_crtc_index(s->crtc)]; + mode = &crtc_state->adjusted_mode; + + state->src_x = s->src_x; + state->src_y = s->src_y; + state->src_h = s->src_h; + state->src_w = s->src_w; + state->crtc_x = s->crtc_x; + state->crtc_y = s->crtc_y; + state->crtc_h = s->crtc_h; + state->crtc_w = s->crtc_w; + if ((state->src_x | state->src_y | state->src_w | state->src_h) & SUBPIXEL_MASK) return -EINVAL; - req->src_x >>= 16; - req->src_y >>= 16; - req->src_w >>= 16; - req->src_h >>= 16; + state->src_x >>= 16; + state->src_y >>= 16; + state->src_w >>= 16; + state->src_h >>= 16; - req->nplanes = drm_format_num_planes(req->fb->pixel_format); - if (req->nplanes > ATMEL_HLCDC_MAX_PLANES) + state->nplanes = drm_format_num_planes(fb->pixel_format); + if (state->nplanes > ATMEL_HLCDC_MAX_PLANES) return -EINVAL; /* * Swap width and size in case of 90 or 270 degrees rotation */ - if (plane->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) { - tmp = req->crtc_w; - req->crtc_w = req->crtc_h; - req->crtc_h = tmp; - tmp = req->src_w; - req->src_w = req->src_h; - req->src_h = tmp; + if (state->base.rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) { + tmp = state->crtc_w; + state->crtc_w = state->crtc_h; + state->crtc_h = tmp; + tmp = state->src_w; + state->src_w = state->src_h; + state->src_h = tmp; } - if (req->crtc_x + req->crtc_w > mode->hdisplay) - patched_crtc_w = mode->hdisplay - req->crtc_x; + if (state->crtc_x + state->crtc_w > mode->hdisplay) + patched_crtc_w = mode->hdisplay - state->crtc_x; else - patched_crtc_w = req->crtc_w; + patched_crtc_w = state->crtc_w; - if (req->crtc_x < 0) { - patched_crtc_w += req->crtc_x; - x_offset = -req->crtc_x; - req->crtc_x = 0; + if (state->crtc_x < 0) { + patched_crtc_w += state->crtc_x; + x_offset = -state->crtc_x; + state->crtc_x = 0; } - if (req->crtc_y + req->crtc_h > mode->vdisplay) - patched_crtc_h = mode->vdisplay - req->crtc_y; + if (state->crtc_y + state->crtc_h > mode->vdisplay) + patched_crtc_h = mode->vdisplay - state->crtc_y; else - patched_crtc_h = req->crtc_h; + patched_crtc_h = state->crtc_h; - if (req->crtc_y < 0) { - patched_crtc_h += req->crtc_y; - y_offset = -req->crtc_y; - req->crtc_y = 0; + if (state->crtc_y < 0) { + patched_crtc_h += state->crtc_y; + y_offset = -state->crtc_y; + state->crtc_y = 0; } - patched_src_w = DIV_ROUND_CLOSEST(patched_crtc_w * req->src_w, - req->crtc_w); - patched_src_h = DIV_ROUND_CLOSEST(patched_crtc_h * req->src_h, - req->crtc_h); + patched_src_w = DIV_ROUND_CLOSEST(patched_crtc_w * state->src_w, + state->crtc_w); + patched_src_h = DIV_ROUND_CLOSEST(patched_crtc_h * state->src_h, + state->crtc_h); - hsub = drm_format_horz_chroma_subsampling(req->fb->pixel_format); - vsub = drm_format_vert_chroma_subsampling(req->fb->pixel_format); + hsub = drm_format_horz_chroma_subsampling(fb->pixel_format); + vsub = drm_format_vert_chroma_subsampling(fb->pixel_format); - for (i = 0; i < req->nplanes; i++) { + for (i = 0; i < state->nplanes; i++) { unsigned int offset = 0; int xdiv = i ? hsub : 1; int ydiv = i ? vsub : 1; - req->bpp[i] = drm_format_plane_cpp(req->fb->pixel_format, i); - if (!req->bpp[i]) + state->bpp[i] = drm_format_plane_cpp(fb->pixel_format, i); + if (!state->bpp[i]) return -EINVAL; - switch (plane->rotation & 0xf) { + switch (state->base.rotation & 0xf) { case BIT(DRM_ROTATE_90): - offset = ((y_offset + req->src_y + patched_src_w - 1) / - ydiv) * req->fb->pitches[i]; - offset += ((x_offset + req->src_x) / xdiv) * - req->bpp[i]; - req->xstride[i] = ((patched_src_w - 1) / ydiv) * - req->fb->pitches[i]; - req->pstride[i] = -req->fb->pitches[i] - req->bpp[i]; + offset = ((y_offset + state->src_y + patched_src_w - 1) / + ydiv) * fb->pitches[i]; + offset += ((x_offset + state->src_x) / xdiv) * + state->bpp[i]; + state->xstride[i] = ((patched_src_w - 1) / ydiv) * + fb->pitches[i]; + state->pstride[i] = -fb->pitches[i] - state->bpp[i]; break; case BIT(DRM_ROTATE_180): - offset = ((y_offset + req->src_y + patched_src_h - 1) / - ydiv) * req->fb->pitches[i]; - offset += ((x_offset + req->src_x + patched_src_w - 1) / - xdiv) * req->bpp[i]; - req->xstride[i] = ((((patched_src_w - 1) / xdiv) - 1) * - req->bpp[i]) - req->fb->pitches[i]; - req->pstride[i] = -2 * req->bpp[i]; + offset = ((y_offset + state->src_y + patched_src_h - 1) / + ydiv) * fb->pitches[i]; + offset += ((x_offset + state->src_x + patched_src_w - 1) / + xdiv) * state->bpp[i]; + state->xstride[i] = ((((patched_src_w - 1) / xdiv) - 1) * + state->bpp[i]) - fb->pitches[i]; + state->pstride[i] = -2 * state->bpp[i]; break; case BIT(DRM_ROTATE_270): - offset = ((y_offset + req->src_y) / ydiv) * - req->fb->pitches[i]; - offset += ((x_offset + req->src_x + patched_src_h - 1) / - xdiv) * req->bpp[i]; - req->xstride[i] = -(((patched_src_w - 1) / ydiv) * - req->fb->pitches[i]) - - (2 * req->bpp[i]); - req->pstride[i] = req->fb->pitches[i] - req->bpp[i]; + offset = ((y_offset + state->src_y) / ydiv) * + fb->pitches[i]; + offset += ((x_offset + state->src_x + patched_src_h - 1) / + xdiv) * state->bpp[i]; + state->xstride[i] = -(((patched_src_w - 1) / ydiv) * + fb->pitches[i]) - + (2 * state->bpp[i]); + state->pstride[i] = fb->pitches[i] - state->bpp[i]; break; case BIT(DRM_ROTATE_0): default: - offset = ((y_offset + req->src_y) / ydiv) * - req->fb->pitches[i]; - offset += ((x_offset + req->src_x) / xdiv) * - req->bpp[i]; - req->xstride[i] = req->fb->pitches[i] - + offset = ((y_offset + state->src_y) / ydiv) * + fb->pitches[i]; + offset += ((x_offset + state->src_x) / xdiv) * + state->bpp[i]; + state->xstride[i] = fb->pitches[i] - ((patched_src_w / xdiv) * - req->bpp[i]); - req->pstride[i] = 0; + state->bpp[i]); + state->pstride[i] = 0; break; } - req->offsets[i] = offset + req->fb->offsets[i]; + state->offsets[i] = offset + fb->offsets[i]; } - req->src_w = patched_src_w; - req->src_h = patched_src_h; - req->crtc_w = patched_crtc_w; - req->crtc_h = patched_crtc_h; + state->src_w = patched_src_w; + state->src_h = patched_src_h; + state->crtc_w = patched_crtc_w; + state->crtc_h = patched_crtc_h; - return atmel_hlcdc_plane_check_update_req(p, req, mode); -} + if (!layout->size && + (mode->hdisplay != state->crtc_w || + mode->vdisplay != state->crtc_h)) + return -EINVAL; -int atmel_hlcdc_plane_apply_update_req(struct drm_plane *p, - struct atmel_hlcdc_plane_update_req *req) -{ - struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); - int ret; + if (plane->layer.desc->max_height && + state->crtc_h > plane->layer.desc->max_height) + return -EINVAL; - ret = atmel_hlcdc_layer_update_start(&plane->layer); - if (ret) - return ret; + if (plane->layer.desc->max_width && + state->crtc_w > plane->layer.desc->max_width) + return -EINVAL; - atmel_hlcdc_plane_update_pos_and_size(plane, req); - atmel_hlcdc_plane_update_general_settings(plane, req); - atmel_hlcdc_plane_update_format(plane, req); - atmel_hlcdc_plane_update_buffers(plane, req); + if ((state->crtc_h != state->src_h || state->crtc_w != state->src_w) && + (!layout->memsize || + atmel_hlcdc_format_embeds_alpha(state->base.fb->pixel_format))) + return -EINVAL; - atmel_hlcdc_layer_update_commit(&plane->layer); + if (state->crtc_x < 0 || state->crtc_y < 0) + return -EINVAL; + + if (state->crtc_w + state->crtc_x > mode->hdisplay || + state->crtc_h + state->crtc_y > mode->vdisplay) + return -EINVAL; return 0; } -int atmel_hlcdc_plane_update_with_mode(struct drm_plane *p, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, - unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h, - const struct drm_display_mode *mode) +static int atmel_hlcdc_plane_prepare_fb(struct drm_plane *p, + struct drm_framebuffer *fb) { struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); - struct atmel_hlcdc_plane_update_req req; - int ret = 0; - - memset(&req, 0, sizeof(req)); - req.crtc_x = crtc_x; - req.crtc_y = crtc_y; - req.crtc_w = crtc_w; - req.crtc_h = crtc_h; - req.src_x = src_x; - req.src_y = src_y; - req.src_w = src_w; - req.src_h = src_h; - req.fb = fb; - - ret = atmel_hlcdc_plane_prepare_update_req(&plane->base, &req, mode); - if (ret) - return ret; - if (!req.crtc_h || !req.crtc_w) - return atmel_hlcdc_layer_disable(&plane->layer); - - return atmel_hlcdc_plane_apply_update_req(&plane->base, &req); + return atmel_hlcdc_layer_update_start(&plane->layer); } -static int atmel_hlcdc_plane_update(struct drm_plane *p, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h) +static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p, + struct drm_plane_state *old_s) { - return atmel_hlcdc_plane_update_with_mode(p, crtc, fb, crtc_x, crtc_y, - crtc_w, crtc_h, src_x, src_y, - src_w, src_h, &crtc->hwmode); + struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); + struct atmel_hlcdc_plane_state *state = + drm_plane_state_to_atmel_hlcdc_plane_state(p->state); + + if (!p->state->crtc || !p->state->fb) + return; + + atmel_hlcdc_plane_update_pos_and_size(plane, state); + atmel_hlcdc_plane_update_general_settings(plane, state); + atmel_hlcdc_plane_update_format(plane, state); + atmel_hlcdc_plane_update_buffers(plane, state); + atmel_hlcdc_plane_update_disc_area(plane, state); + + atmel_hlcdc_layer_update_commit(&plane->layer); } -static int atmel_hlcdc_plane_disable(struct drm_plane *p) +static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p, + struct drm_plane_state *old_state) { struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); - return atmel_hlcdc_layer_disable(&plane->layer); + atmel_hlcdc_layer_disable(&plane->layer); } static void atmel_hlcdc_plane_destroy(struct drm_plane *p) @@ -635,38 +759,36 @@ static void atmel_hlcdc_plane_destroy(struct drm_plane *p) devm_kfree(p->dev->dev, plane); } -static int atmel_hlcdc_plane_set_alpha(struct atmel_hlcdc_plane *plane, - u8 alpha) +static int atmel_hlcdc_plane_atomic_set_property(struct drm_plane *p, + struct drm_plane_state *s, + struct drm_property *property, + uint64_t val) { - atmel_hlcdc_layer_update_start(&plane->layer); - atmel_hlcdc_layer_update_cfg(&plane->layer, - plane->layer.desc->layout.general_config, - ATMEL_HLCDC_LAYER_GA_MASK, - alpha << ATMEL_HLCDC_LAYER_GA_SHIFT); - atmel_hlcdc_layer_update_commit(&plane->layer); - - return 0; -} + struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); + struct atmel_hlcdc_plane_properties *props = plane->properties; + struct atmel_hlcdc_plane_state *state = + drm_plane_state_to_atmel_hlcdc_plane_state(s); -static int atmel_hlcdc_plane_set_rotation(struct atmel_hlcdc_plane *plane, - unsigned int rotation) -{ - plane->rotation = rotation; + if (property == props->alpha) + state->alpha = val; + else + return -EINVAL; return 0; } -static int atmel_hlcdc_plane_set_property(struct drm_plane *p, - struct drm_property *property, - uint64_t value) +static int atmel_hlcdc_plane_atomic_get_property(struct drm_plane *p, + const struct drm_plane_state *s, + struct drm_property *property, + uint64_t *val) { struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); struct atmel_hlcdc_plane_properties *props = plane->properties; + const struct atmel_hlcdc_plane_state *state = + container_of(s, const struct atmel_hlcdc_plane_state, base); if (property == props->alpha) - atmel_hlcdc_plane_set_alpha(plane, value); - else if (property == props->rotation) - atmel_hlcdc_plane_set_rotation(plane, value); + *val = state->alpha; else return -EINVAL; @@ -694,8 +816,8 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane, if (desc->layout.xstride && desc->layout.pstride) drm_object_attach_property(&plane->base.base, - props->rotation, - BIT(DRM_ROTATE_0)); + plane->base.dev->mode_config.rotation_property, + BIT(DRM_ROTATE_0)); if (desc->layout.csc) { /* @@ -717,11 +839,76 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane, } } +static struct drm_plane_helper_funcs atmel_hlcdc_layer_plane_helper_funcs = { + .prepare_fb = atmel_hlcdc_plane_prepare_fb, + .atomic_check = atmel_hlcdc_plane_atomic_check, + .atomic_update = atmel_hlcdc_plane_atomic_update, + .atomic_disable = atmel_hlcdc_plane_atomic_disable, +}; + +static void atmel_hlcdc_plane_reset(struct drm_plane *p) +{ + struct atmel_hlcdc_plane_state *state; + + if (p->state) { + state = drm_plane_state_to_atmel_hlcdc_plane_state(p->state); + + if (state->base.fb) + drm_framebuffer_unreference(state->base.fb); + + kfree(state); + p->state = NULL; + } + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state) { + state->alpha = 255; + p->state = &state->base; + p->state->plane = p; + } +} + +static struct drm_plane_state * +atmel_hlcdc_plane_atomic_duplicate_state(struct drm_plane *p) +{ + struct atmel_hlcdc_plane_state *state = + drm_plane_state_to_atmel_hlcdc_plane_state(p->state); + struct atmel_hlcdc_plane_state *copy; + + copy = kmemdup(state, sizeof(*state), GFP_KERNEL); + if (!copy) + return NULL; + + copy->disc_updated = false; + + if (copy->base.fb) + drm_framebuffer_reference(copy->base.fb); + + return ©->base; +} + +static void atmel_hlcdc_plane_atomic_destroy_state(struct drm_plane *plane, + struct drm_plane_state *s) +{ + struct atmel_hlcdc_plane_state *state = + drm_plane_state_to_atmel_hlcdc_plane_state(s); + + if (s->fb) + drm_framebuffer_unreference(s->fb); + + kfree(state); +} + static struct drm_plane_funcs layer_plane_funcs = { - .update_plane = atmel_hlcdc_plane_update, - .disable_plane = atmel_hlcdc_plane_disable, - .set_property = atmel_hlcdc_plane_set_property, + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .set_property = drm_atomic_helper_plane_set_property, .destroy = atmel_hlcdc_plane_destroy, + .reset = atmel_hlcdc_plane_reset, + .atomic_duplicate_state = atmel_hlcdc_plane_atomic_duplicate_state, + .atomic_destroy_state = atmel_hlcdc_plane_atomic_destroy_state, + .atomic_set_property = atmel_hlcdc_plane_atomic_set_property, + .atomic_get_property = atmel_hlcdc_plane_atomic_get_property, }; static struct atmel_hlcdc_plane * @@ -755,6 +942,9 @@ atmel_hlcdc_plane_create(struct drm_device *dev, if (ret) return ERR_PTR(ret); + drm_plane_helper_add(&plane->base, + &atmel_hlcdc_layer_plane_helper_funcs); + /* Set default property values*/ atmel_hlcdc_plane_init_properties(plane, desc, props); @@ -774,12 +964,13 @@ atmel_hlcdc_plane_create_properties(struct drm_device *dev) if (!props->alpha) return ERR_PTR(-ENOMEM); - props->rotation = drm_mode_create_rotation_property(dev, - BIT(DRM_ROTATE_0) | - BIT(DRM_ROTATE_90) | - BIT(DRM_ROTATE_180) | - BIT(DRM_ROTATE_270)); - if (!props->rotation) + dev->mode_config.rotation_property = + drm_mode_create_rotation_property(dev, + BIT(DRM_ROTATE_0) | + BIT(DRM_ROTATE_90) | + BIT(DRM_ROTATE_180) | + BIT(DRM_ROTATE_270)); + if (!dev->mode_config.rotation_property) return ERR_PTR(-ENOMEM); return props; diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index b70f3c8d4e8a..f38bbcdf929b 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -1,10 +1,13 @@ -config DRM_PTN3460 - tristate "PTN3460 DP/LVDS bridge" +config DRM_DW_HDMI + tristate depends on DRM select DRM_KMS_HELPER - ---help--- -config DRM_DW_HDMI - tristate +config DRM_PTN3460 + tristate "PTN3460 DP/LVDS bridge" depends on DRM + depends on OF select DRM_KMS_HELPER + select DRM_PANEL + ---help--- + ptn3460 eDP-LVDS bridge chip driver. diff --git a/drivers/gpu/drm/bridge/dw_hdmi.c b/drivers/gpu/drm/bridge/dw_hdmi.c index 6ea000504173..cd6a70647e32 100644 --- a/drivers/gpu/drm/bridge/dw_hdmi.c +++ b/drivers/gpu/drm/bridge/dw_hdmi.c @@ -1373,12 +1373,6 @@ static void dw_hdmi_bridge_enable(struct drm_bridge *bridge) dw_hdmi_poweron(hdmi); } -static void dw_hdmi_bridge_destroy(struct drm_bridge *bridge) -{ - drm_bridge_cleanup(bridge); - kfree(bridge); -} - static void dw_hdmi_bridge_nop(struct drm_bridge *bridge) { /* do nothing */ @@ -1468,7 +1462,6 @@ struct drm_bridge_funcs dw_hdmi_bridge_funcs = { .post_disable = dw_hdmi_bridge_nop, .mode_set = dw_hdmi_bridge_mode_set, .mode_fixup = dw_hdmi_bridge_mode_fixup, - .destroy = dw_hdmi_bridge_destroy, }; static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id) @@ -1531,8 +1524,8 @@ static int dw_hdmi_register(struct drm_device *drm, struct dw_hdmi *hdmi) hdmi->bridge = bridge; bridge->driver_private = hdmi; - - ret = drm_bridge_init(drm, bridge, &dw_hdmi_bridge_funcs); + bridge->funcs = &dw_hdmi_bridge_funcs; + ret = drm_bridge_attach(drm, bridge); if (ret) { DRM_ERROR("Failed to initialize bridge with drm\n"); return -EINVAL; @@ -1649,7 +1642,7 @@ int dw_hdmi_bind(struct device *dev, struct device *master, dw_hdmi_irq, IRQF_SHARED, dev_name(dev), hdmi); if (ret) - return ret; + goto err_iahb; /* * To prevent overflows in HDMI_IH_FC_STAT2, set the clk regenerator diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c index d466696ed5e8..826833e396f0 100644 --- a/drivers/gpu/drm/bridge/ptn3460.c +++ b/drivers/gpu/drm/bridge/ptn3460.c @@ -13,20 +13,23 @@ * GNU General Public License for more details. */ +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/i2c.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_gpio.h> -#include <linux/i2c.h> -#include <linux/gpio.h> -#include <linux/delay.h> +#include <linux/of_graph.h> -#include "drmP.h" -#include "drm_edid.h" -#include "drm_crtc.h" -#include "drm_crtc_helper.h" +#include <drm/drm_panel.h> #include "bridge/ptn3460.h" +#include "drm_crtc.h" +#include "drm_crtc_helper.h" +#include "drm_edid.h" +#include "drmP.h" + #define PTN3460_EDID_ADDR 0x0 #define PTN3460_EDID_EMULATION_ADDR 0x84 #define PTN3460_EDID_ENABLE_EMULATION 0 @@ -36,15 +39,27 @@ struct ptn3460_bridge { struct drm_connector connector; struct i2c_client *client; - struct drm_encoder *encoder; - struct drm_bridge *bridge; + struct drm_bridge bridge; struct edid *edid; - int gpio_pd_n; - int gpio_rst_n; + struct drm_panel *panel; + struct gpio_desc *gpio_pd_n; + struct gpio_desc *gpio_rst_n; u32 edid_emulation; bool enabled; }; +static inline struct ptn3460_bridge * + bridge_to_ptn3460(struct drm_bridge *bridge) +{ + return container_of(bridge, struct ptn3460_bridge, bridge); +} + +static inline struct ptn3460_bridge * + connector_to_ptn3460(struct drm_connector *connector) +{ + return container_of(connector, struct ptn3460_bridge, connector); +} + static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr, u8 *buf, int len) { @@ -92,7 +107,7 @@ static int ptn3460_select_edid(struct ptn3460_bridge *ptn_bridge) ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_SRAM_LOAD_ADDR, ptn_bridge->edid_emulation); if (ret) { - DRM_ERROR("Failed to transfer edid to sram, ret=%d\n", ret); + DRM_ERROR("Failed to transfer EDID to sram, ret=%d\n", ret); return ret; } @@ -102,7 +117,7 @@ static int ptn3460_select_edid(struct ptn3460_bridge *ptn_bridge) ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_EMULATION_ADDR, val); if (ret) { - DRM_ERROR("Failed to write edid value, ret=%d\n", ret); + DRM_ERROR("Failed to write EDID value, ret=%d\n", ret); return ret; } @@ -111,19 +126,21 @@ static int ptn3460_select_edid(struct ptn3460_bridge *ptn_bridge) static void ptn3460_pre_enable(struct drm_bridge *bridge) { - struct ptn3460_bridge *ptn_bridge = bridge->driver_private; + struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); int ret; if (ptn_bridge->enabled) return; - if (gpio_is_valid(ptn_bridge->gpio_pd_n)) - gpio_set_value(ptn_bridge->gpio_pd_n, 1); + gpiod_set_value(ptn_bridge->gpio_pd_n, 1); + + gpiod_set_value(ptn_bridge->gpio_rst_n, 0); + usleep_range(10, 20); + gpiod_set_value(ptn_bridge->gpio_rst_n, 1); - if (gpio_is_valid(ptn_bridge->gpio_rst_n)) { - gpio_set_value(ptn_bridge->gpio_rst_n, 0); - udelay(10); - gpio_set_value(ptn_bridge->gpio_rst_n, 1); + if (drm_panel_prepare(ptn_bridge->panel)) { + DRM_ERROR("failed to prepare panel\n"); + return; } /* @@ -135,73 +152,67 @@ static void ptn3460_pre_enable(struct drm_bridge *bridge) ret = ptn3460_select_edid(ptn_bridge); if (ret) - DRM_ERROR("Select edid failed ret=%d\n", ret); + DRM_ERROR("Select EDID failed ret=%d\n", ret); ptn_bridge->enabled = true; } static void ptn3460_enable(struct drm_bridge *bridge) { + struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); + + if (drm_panel_enable(ptn_bridge->panel)) { + DRM_ERROR("failed to enable panel\n"); + return; + } } static void ptn3460_disable(struct drm_bridge *bridge) { - struct ptn3460_bridge *ptn_bridge = bridge->driver_private; + struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); if (!ptn_bridge->enabled) return; ptn_bridge->enabled = false; - if (gpio_is_valid(ptn_bridge->gpio_rst_n)) - gpio_set_value(ptn_bridge->gpio_rst_n, 1); + if (drm_panel_disable(ptn_bridge->panel)) { + DRM_ERROR("failed to disable panel\n"); + return; + } - if (gpio_is_valid(ptn_bridge->gpio_pd_n)) - gpio_set_value(ptn_bridge->gpio_pd_n, 0); + gpiod_set_value(ptn_bridge->gpio_rst_n, 1); + gpiod_set_value(ptn_bridge->gpio_pd_n, 0); } static void ptn3460_post_disable(struct drm_bridge *bridge) { -} + struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); -void ptn3460_bridge_destroy(struct drm_bridge *bridge) -{ - struct ptn3460_bridge *ptn_bridge = bridge->driver_private; - - drm_bridge_cleanup(bridge); - if (gpio_is_valid(ptn_bridge->gpio_pd_n)) - gpio_free(ptn_bridge->gpio_pd_n); - if (gpio_is_valid(ptn_bridge->gpio_rst_n)) - gpio_free(ptn_bridge->gpio_rst_n); - /* Nothing else to free, we've got devm allocated memory */ + if (drm_panel_unprepare(ptn_bridge->panel)) { + DRM_ERROR("failed to unprepare panel\n"); + return; + } } -struct drm_bridge_funcs ptn3460_bridge_funcs = { - .pre_enable = ptn3460_pre_enable, - .enable = ptn3460_enable, - .disable = ptn3460_disable, - .post_disable = ptn3460_post_disable, - .destroy = ptn3460_bridge_destroy, -}; - -int ptn3460_get_modes(struct drm_connector *connector) +static int ptn3460_get_modes(struct drm_connector *connector) { struct ptn3460_bridge *ptn_bridge; u8 *edid; - int ret, num_modes; + int ret, num_modes = 0; bool power_off; - ptn_bridge = container_of(connector, struct ptn3460_bridge, connector); + ptn_bridge = connector_to_ptn3460(connector); if (ptn_bridge->edid) return drm_add_edid_modes(connector, ptn_bridge->edid); power_off = !ptn_bridge->enabled; - ptn3460_pre_enable(ptn_bridge->bridge); + ptn3460_pre_enable(&ptn_bridge->bridge); edid = kmalloc(EDID_LENGTH, GFP_KERNEL); if (!edid) { - DRM_ERROR("Failed to allocate edid\n"); + DRM_ERROR("Failed to allocate EDID\n"); return 0; } @@ -209,7 +220,6 @@ int ptn3460_get_modes(struct drm_connector *connector) EDID_LENGTH); if (ret) { kfree(edid); - num_modes = 0; goto out; } @@ -220,124 +230,188 @@ int ptn3460_get_modes(struct drm_connector *connector) out: if (power_off) - ptn3460_disable(ptn_bridge->bridge); + ptn3460_disable(&ptn_bridge->bridge); return num_modes; } -struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector) +static struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector) { - struct ptn3460_bridge *ptn_bridge; - - ptn_bridge = container_of(connector, struct ptn3460_bridge, connector); + struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector); - return ptn_bridge->encoder; + return ptn_bridge->bridge.encoder; } -struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = { +static struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = { .get_modes = ptn3460_get_modes, .best_encoder = ptn3460_best_encoder, }; -enum drm_connector_status ptn3460_detect(struct drm_connector *connector, +static enum drm_connector_status ptn3460_detect(struct drm_connector *connector, bool force) { return connector_status_connected; } -void ptn3460_connector_destroy(struct drm_connector *connector) +static void ptn3460_connector_destroy(struct drm_connector *connector) { drm_connector_cleanup(connector); } -struct drm_connector_funcs ptn3460_connector_funcs = { +static struct drm_connector_funcs ptn3460_connector_funcs = { .dpms = drm_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = ptn3460_detect, .destroy = ptn3460_connector_destroy, }; -int ptn3460_init(struct drm_device *dev, struct drm_encoder *encoder, - struct i2c_client *client, struct device_node *node) +int ptn3460_bridge_attach(struct drm_bridge *bridge) { + struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); int ret; - struct drm_bridge *bridge; - struct ptn3460_bridge *ptn_bridge; - bridge = devm_kzalloc(dev->dev, sizeof(*bridge), GFP_KERNEL); - if (!bridge) { - DRM_ERROR("Failed to allocate drm bridge\n"); - return -ENOMEM; + if (!bridge->encoder) { + DRM_ERROR("Parent encoder object not found"); + return -ENODEV; + } + + ptn_bridge->connector.polled = DRM_CONNECTOR_POLL_HPD; + ret = drm_connector_init(bridge->dev, &ptn_bridge->connector, + &ptn3460_connector_funcs, DRM_MODE_CONNECTOR_LVDS); + if (ret) { + DRM_ERROR("Failed to initialize connector with drm\n"); + return ret; } + drm_connector_helper_add(&ptn_bridge->connector, + &ptn3460_connector_helper_funcs); + drm_connector_register(&ptn_bridge->connector); + drm_mode_connector_attach_encoder(&ptn_bridge->connector, + bridge->encoder); - ptn_bridge = devm_kzalloc(dev->dev, sizeof(*ptn_bridge), GFP_KERNEL); + if (ptn_bridge->panel) + drm_panel_attach(ptn_bridge->panel, &ptn_bridge->connector); + + drm_helper_hpd_irq_event(ptn_bridge->connector.dev); + + return ret; +} + +static struct drm_bridge_funcs ptn3460_bridge_funcs = { + .pre_enable = ptn3460_pre_enable, + .enable = ptn3460_enable, + .disable = ptn3460_disable, + .post_disable = ptn3460_post_disable, + .attach = ptn3460_bridge_attach, +}; + +static int ptn3460_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct ptn3460_bridge *ptn_bridge; + struct device_node *endpoint, *panel_node; + int ret; + + ptn_bridge = devm_kzalloc(dev, sizeof(*ptn_bridge), GFP_KERNEL); if (!ptn_bridge) { - DRM_ERROR("Failed to allocate ptn bridge\n"); return -ENOMEM; } - ptn_bridge->client = client; - ptn_bridge->encoder = encoder; - ptn_bridge->bridge = bridge; - ptn_bridge->gpio_pd_n = of_get_named_gpio(node, "powerdown-gpio", 0); - if (gpio_is_valid(ptn_bridge->gpio_pd_n)) { - ret = gpio_request_one(ptn_bridge->gpio_pd_n, - GPIOF_OUT_INIT_HIGH, "PTN3460_PD_N"); - if (ret) { - DRM_ERROR("Request powerdown-gpio failed (%d)\n", ret); - return ret; + endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); + if (endpoint) { + panel_node = of_graph_get_remote_port_parent(endpoint); + if (panel_node) { + ptn_bridge->panel = of_drm_find_panel(panel_node); + of_node_put(panel_node); + if (!ptn_bridge->panel) + return -EPROBE_DEFER; } } - ptn_bridge->gpio_rst_n = of_get_named_gpio(node, "reset-gpio", 0); - if (gpio_is_valid(ptn_bridge->gpio_rst_n)) { - /* - * Request the reset pin low to avoid the bridge being - * initialized prematurely - */ - ret = gpio_request_one(ptn_bridge->gpio_rst_n, - GPIOF_OUT_INIT_LOW, "PTN3460_RST_N"); - if (ret) { - DRM_ERROR("Request reset-gpio failed (%d)\n", ret); - gpio_free(ptn_bridge->gpio_pd_n); - return ret; - } + ptn_bridge->client = client; + + ptn_bridge->gpio_pd_n = devm_gpiod_get(&client->dev, "powerdown"); + if (IS_ERR(ptn_bridge->gpio_pd_n)) { + ret = PTR_ERR(ptn_bridge->gpio_pd_n); + dev_err(dev, "cannot get gpio_pd_n %d\n", ret); + return ret; } - ret = of_property_read_u32(node, "edid-emulation", - &ptn_bridge->edid_emulation); + ret = gpiod_direction_output(ptn_bridge->gpio_pd_n, 1); if (ret) { - DRM_ERROR("Can't read edid emulation value\n"); - goto err; + DRM_ERROR("cannot configure gpio_pd_n\n"); + return ret; } - ret = drm_bridge_init(dev, bridge, &ptn3460_bridge_funcs); + ptn_bridge->gpio_rst_n = devm_gpiod_get(&client->dev, "reset"); + if (IS_ERR(ptn_bridge->gpio_rst_n)) { + ret = PTR_ERR(ptn_bridge->gpio_rst_n); + DRM_ERROR("cannot get gpio_rst_n %d\n", ret); + return ret; + } + /* + * Request the reset pin low to avoid the bridge being + * initialized prematurely + */ + ret = gpiod_direction_output(ptn_bridge->gpio_rst_n, 0); if (ret) { - DRM_ERROR("Failed to initialize bridge with drm\n"); - goto err; + DRM_ERROR("cannot configure gpio_rst_n\n"); + return ret; } - bridge->driver_private = ptn_bridge; - encoder->bridge = bridge; + ret = of_property_read_u32(dev->of_node, "edid-emulation", + &ptn_bridge->edid_emulation); + if (ret) { + dev_err(dev, "Can't read EDID emulation value\n"); + return ret; + } - ret = drm_connector_init(dev, &ptn_bridge->connector, - &ptn3460_connector_funcs, DRM_MODE_CONNECTOR_LVDS); + ptn_bridge->bridge.funcs = &ptn3460_bridge_funcs; + ptn_bridge->bridge.of_node = dev->of_node; + ret = drm_bridge_add(&ptn_bridge->bridge); if (ret) { - DRM_ERROR("Failed to initialize connector with drm\n"); - goto err; + DRM_ERROR("Failed to add bridge\n"); + return ret; } - drm_connector_helper_add(&ptn_bridge->connector, - &ptn3460_connector_helper_funcs); - drm_connector_register(&ptn_bridge->connector); - drm_mode_connector_attach_encoder(&ptn_bridge->connector, encoder); + + i2c_set_clientdata(client, ptn_bridge); return 0; +} -err: - if (gpio_is_valid(ptn_bridge->gpio_pd_n)) - gpio_free(ptn_bridge->gpio_pd_n); - if (gpio_is_valid(ptn_bridge->gpio_rst_n)) - gpio_free(ptn_bridge->gpio_rst_n); - return ret; +static int ptn3460_remove(struct i2c_client *client) +{ + struct ptn3460_bridge *ptn_bridge = i2c_get_clientdata(client); + + drm_bridge_remove(&ptn_bridge->bridge); + + return 0; } -EXPORT_SYMBOL(ptn3460_init); + +static const struct i2c_device_id ptn3460_i2c_table[] = { + {"nxp,ptn3460", 0}, + {}, +}; +MODULE_DEVICE_TABLE(i2c, ptn3460_i2c_table); + +static const struct of_device_id ptn3460_match[] = { + { .compatible = "nxp,ptn3460" }, + {}, +}; +MODULE_DEVICE_TABLE(of, ptn3460_match); + +static struct i2c_driver ptn3460_driver = { + .id_table = ptn3460_i2c_table, + .probe = ptn3460_probe, + .remove = ptn3460_remove, + .driver = { + .name = "nxp,ptn3460", + .owner = THIS_MODULE, + .of_match_table = ptn3460_match, + }, +}; +module_i2c_driver(ptn3460_driver); + +MODULE_AUTHOR("Sean Paul <seanpaul@chromium.org>"); +MODULE_DESCRIPTION("NXP ptn3460 eDP-LVDS converter driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index c2a1cba1e984..b9140032962d 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -16,9 +16,12 @@ #include "cirrus_drv.h" int cirrus_modeset = -1; +int cirrus_bpp = 24; MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); module_param_named(modeset, cirrus_modeset, int, 0400); +MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:24)"); +module_param_named(bpp, cirrus_bpp, int, 0400); /* * This is the generic driver code. This binds the driver to the drm core, diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h index 693a4565c4ff..705061537a27 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h @@ -262,4 +262,7 @@ static inline void cirrus_bo_unreserve(struct cirrus_bo *bo) int cirrus_bo_push_sysram(struct cirrus_bo *bo); int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr); + +extern int cirrus_bpp; + #endif /* __CIRRUS_DRV_H__ */ diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c index 4c2d68e9102d..e4b976658087 100644 --- a/drivers/gpu/drm/cirrus/cirrus_main.c +++ b/drivers/gpu/drm/cirrus/cirrus_main.c @@ -320,6 +320,8 @@ bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height, const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */ const int max_size = cdev->mc.vram_size; + if (bpp > cirrus_bpp) + return false; if (bpp > 32) return false; diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index 99d4a74ffeaf..61385f2298bf 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -501,8 +501,13 @@ static int cirrus_vga_get_modes(struct drm_connector *connector) int count; /* Just add a static list of modes */ - count = drm_add_modes_noedid(connector, 1280, 1024); - drm_set_preferred_mode(connector, 1024, 768); + if (cirrus_bpp <= 24) { + count = drm_add_modes_noedid(connector, 1280, 1024); + drm_set_preferred_mode(connector, 1024, 768); + } else { + count = drm_add_modes_noedid(connector, 800, 600); + drm_set_preferred_mode(connector, 800, 600); + } return count; } diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 4c5c9c3899e0..321e098ddf04 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -92,7 +92,7 @@ drm_atomic_state_alloc(struct drm_device *dev) state->dev = dev; - DRM_DEBUG_KMS("Allocate atomic state %p\n", state); + DRM_DEBUG_ATOMIC("Allocate atomic state %p\n", state); return state; fail: @@ -122,7 +122,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state) struct drm_mode_config *config = &dev->mode_config; int i; - DRM_DEBUG_KMS("Clearing atomic state %p\n", state); + DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state); for (i = 0; i < state->num_connector; i++) { struct drm_connector *connector = state->connectors[i]; @@ -134,6 +134,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state) connector->funcs->atomic_destroy_state(connector, state->connector_states[i]); + state->connector_states[i] = NULL; } for (i = 0; i < config->num_crtc; i++) { @@ -144,6 +145,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state) crtc->funcs->atomic_destroy_state(crtc, state->crtc_states[i]); + state->crtc_states[i] = NULL; } for (i = 0; i < config->num_total_plane; i++) { @@ -154,6 +156,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state) plane->funcs->atomic_destroy_state(plane, state->plane_states[i]); + state->plane_states[i] = NULL; } } EXPORT_SYMBOL(drm_atomic_state_clear); @@ -169,7 +172,7 @@ void drm_atomic_state_free(struct drm_atomic_state *state) { drm_atomic_state_clear(state); - DRM_DEBUG_KMS("Freeing atomic state %p\n", state); + DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state); kfree_state(state); } @@ -214,8 +217,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, state->crtcs[index] = crtc; crtc_state->state = state; - DRM_DEBUG_KMS("Added [CRTC:%d] %p state to %p\n", - crtc->base.id, crtc_state, state); + DRM_DEBUG_ATOMIC("Added [CRTC:%d] %p state to %p\n", + crtc->base.id, crtc_state, state); return crtc_state; } @@ -241,7 +244,13 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, struct drm_crtc_state *state, struct drm_property *property, uint64_t val) { - if (crtc->funcs->atomic_set_property) + struct drm_device *dev = crtc->dev; + struct drm_mode_config *config = &dev->mode_config; + + /* FIXME: Mode prop is missing, which also controls ->enable. */ + if (property == config->prop_active) { + state->active = val; + } else if (crtc->funcs->atomic_set_property) return crtc->funcs->atomic_set_property(crtc, state, property, val); return -EINVAL; } @@ -282,6 +291,13 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc, * * TODO: Add generic modeset state checks once we support those. */ + + if (state->active && !state->enable) { + DRM_DEBUG_ATOMIC("[CRTC:%d] active without enabled\n", + crtc->base.id); + return -EINVAL; + } + return 0; } @@ -324,8 +340,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, state->planes[index] = plane; plane_state->state = state; - DRM_DEBUG_KMS("Added [PLANE:%d] %p state to %p\n", - plane->base.id, plane_state, state); + DRM_DEBUG_ATOMIC("Added [PLANE:%d] %p state to %p\n", + plane->base.id, plane_state, state); if (plane_state->crtc) { struct drm_crtc_state *crtc_state; @@ -461,10 +477,10 @@ static int drm_atomic_plane_check(struct drm_plane *plane, /* either *both* CRTC and FB must be set, or neither */ if (WARN_ON(state->crtc && !state->fb)) { - DRM_DEBUG_KMS("CRTC set but no FB\n"); + DRM_DEBUG_ATOMIC("CRTC set but no FB\n"); return -EINVAL; } else if (WARN_ON(state->fb && !state->crtc)) { - DRM_DEBUG_KMS("FB set but no CRTC\n"); + DRM_DEBUG_ATOMIC("FB set but no CRTC\n"); return -EINVAL; } @@ -474,7 +490,7 @@ static int drm_atomic_plane_check(struct drm_plane *plane, /* Check whether this plane is usable on this CRTC */ if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) { - DRM_DEBUG_KMS("Invalid crtc for plane\n"); + DRM_DEBUG_ATOMIC("Invalid crtc for plane\n"); return -EINVAL; } @@ -483,8 +499,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane, if (state->fb->pixel_format == plane->format_types[i]) break; if (i == plane->format_count) { - DRM_DEBUG_KMS("Invalid pixel format %s\n", - drm_get_format_name(state->fb->pixel_format)); + DRM_DEBUG_ATOMIC("Invalid pixel format %s\n", + drm_get_format_name(state->fb->pixel_format)); return -EINVAL; } @@ -493,9 +509,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane, state->crtc_x > INT_MAX - (int32_t) state->crtc_w || state->crtc_h > INT_MAX || state->crtc_y > INT_MAX - (int32_t) state->crtc_h) { - DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n", - state->crtc_w, state->crtc_h, - state->crtc_x, state->crtc_y); + DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n", + state->crtc_w, state->crtc_h, + state->crtc_x, state->crtc_y); return -ERANGE; } @@ -507,12 +523,12 @@ static int drm_atomic_plane_check(struct drm_plane *plane, state->src_x > fb_width - state->src_w || state->src_h > fb_height || state->src_y > fb_height - state->src_h) { - DRM_DEBUG_KMS("Invalid source coordinates " - "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n", - state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10, - state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10, - state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10, - state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10); + DRM_DEBUG_ATOMIC("Invalid source coordinates " + "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n", + state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10, + state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10, + state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10, + state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10); return -ENOSPC; } @@ -559,7 +575,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, * at most the array is a bit too large. */ if (index >= state->num_connector) { - DRM_DEBUG_KMS("Hot-added connector would overflow state array, restarting\n"); + DRM_DEBUG_ATOMIC("Hot-added connector would overflow state array, restarting\n"); return ERR_PTR(-EAGAIN); } @@ -574,8 +590,8 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, state->connectors[index] = connector; connector_state->state = state; - DRM_DEBUG_KMS("Added [CONNECTOR:%d] %p state to %p\n", - connector->base.id, connector_state, state); + DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n", + connector->base.id, connector_state, state); if (connector_state->crtc) { struct drm_crtc_state *crtc_state; @@ -736,10 +752,11 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, } if (crtc) - DRM_DEBUG_KMS("Link plane state %p to [CRTC:%d]\n", - plane_state, crtc->base.id); + DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d]\n", + plane_state, crtc->base.id); else - DRM_DEBUG_KMS("Link plane state %p to [NOCRTC]\n", plane_state); + DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n", + plane_state); return 0; } @@ -766,10 +783,11 @@ drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, plane_state->fb = fb; if (fb) - DRM_DEBUG_KMS("Set [FB:%d] for plane state %p\n", - fb->base.id, plane_state); + DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n", + fb->base.id, plane_state); else - DRM_DEBUG_KMS("Set [NOFB] for plane state %p\n", plane_state); + DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n", + plane_state); } EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); @@ -802,11 +820,11 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, conn_state->crtc = crtc; if (crtc) - DRM_DEBUG_KMS("Link connector state %p to [CRTC:%d]\n", - conn_state, crtc->base.id); + DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d]\n", + conn_state, crtc->base.id); else - DRM_DEBUG_KMS("Link connector state %p to [NOCRTC]\n", - conn_state); + DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n", + conn_state); return 0; } @@ -842,8 +860,8 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state, if (ret) return ret; - DRM_DEBUG_KMS("Adding all current connectors for [CRTC:%d] to %p\n", - crtc->base.id, state); + DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d] to %p\n", + crtc->base.id, state); /* * Changed connectors are already in @state, so only need to look at the @@ -885,8 +903,8 @@ drm_atomic_connectors_for_crtc(struct drm_atomic_state *state, num_connected_connectors++; } - DRM_DEBUG_KMS("State %p has %i connectors for [CRTC:%d]\n", - state, num_connected_connectors, crtc->base.id); + DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d]\n", + state, num_connected_connectors, crtc->base.id); return num_connected_connectors; } @@ -937,7 +955,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state) int ncrtcs = config->num_crtc; int i, ret = 0; - DRM_DEBUG_KMS("checking %p\n", state); + DRM_DEBUG_ATOMIC("checking %p\n", state); for (i = 0; i < nplanes; i++) { struct drm_plane *plane = state->planes[i]; @@ -947,8 +965,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) ret = drm_atomic_plane_check(plane, state->plane_states[i]); if (ret) { - DRM_DEBUG_KMS("[PLANE:%d] atomic core check failed\n", - plane->base.id); + DRM_DEBUG_ATOMIC("[PLANE:%d] atomic core check failed\n", + plane->base.id); return ret; } } @@ -961,8 +979,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) ret = drm_atomic_crtc_check(crtc, state->crtc_states[i]); if (ret) { - DRM_DEBUG_KMS("[CRTC:%d] atomic core check failed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d] atomic core check failed\n", + crtc->base.id); return ret; } } @@ -978,9 +996,10 @@ int drm_atomic_check_only(struct drm_atomic_state *state) if (!crtc) continue; - if (crtc_state->mode_changed) { - DRM_DEBUG_KMS("[CRTC:%d] requires full modeset\n", - crtc->base.id); + if (crtc_state->mode_changed || + crtc_state->active_changed) { + DRM_DEBUG_ATOMIC("[CRTC:%d] requires full modeset\n", + crtc->base.id); return -EINVAL; } } @@ -1015,7 +1034,7 @@ int drm_atomic_commit(struct drm_atomic_state *state) if (ret) return ret; - DRM_DEBUG_KMS("commiting %p\n", state); + DRM_DEBUG_ATOMIC("commiting %p\n", state); return config->funcs->atomic_commit(state->dev, state, false); } @@ -1046,7 +1065,7 @@ int drm_atomic_async_commit(struct drm_atomic_state *state) if (ret) return ret; - DRM_DEBUG_KMS("commiting %p asynchronously\n", state); + DRM_DEBUG_ATOMIC("commiting %p asynchronously\n", state); return config->funcs->atomic_commit(state->dev, state, true); } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 541ba833ed36..28aa87510551 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -116,9 +116,9 @@ steal_encoder(struct drm_atomic_state *state, */ WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); - DRM_DEBUG_KMS("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n", - encoder->base.id, encoder->name, - encoder_crtc->base.id); + DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n", + encoder->base.id, encoder->name, + encoder_crtc->base.id); crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc); if (IS_ERR(crtc_state)) @@ -130,9 +130,9 @@ steal_encoder(struct drm_atomic_state *state, if (connector->state->best_encoder != encoder) continue; - DRM_DEBUG_KMS("Stealing encoder from [CONNECTOR:%d:%s]\n", - connector->base.id, - connector->name); + DRM_DEBUG_ATOMIC("Stealing encoder from [CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); connector_state = drm_atomic_get_connector_state(state, connector); @@ -165,9 +165,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) if (!connector) return 0; - DRM_DEBUG_KMS("Updating routing for [CONNECTOR:%d:%s]\n", - connector->base.id, - connector->name); + DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); if (connector->state->crtc != connector_state->crtc) { if (connector->state->crtc) { @@ -186,7 +186,7 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) } if (!connector_state->crtc) { - DRM_DEBUG_KMS("Disabling [CONNECTOR:%d:%s]\n", + DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n", connector->base.id, connector->name); @@ -199,19 +199,19 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) new_encoder = funcs->best_encoder(connector); if (!new_encoder) { - DRM_DEBUG_KMS("No suitable encoder found for [CONNECTOR:%d:%s]\n", - connector->base.id, - connector->name); + DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); return -EINVAL; } if (new_encoder == connector_state->best_encoder) { - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", - connector->base.id, - connector->name, - new_encoder->base.id, - new_encoder->name, - connector_state->crtc->base.id); + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", + connector->base.id, + connector->name, + new_encoder->base.id, + new_encoder->name, + connector_state->crtc->base.id); return 0; } @@ -222,9 +222,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) if (encoder_crtc) { ret = steal_encoder(state, new_encoder, encoder_crtc); if (ret) { - DRM_DEBUG_KMS("Encoder stealing failed for [CONNECTOR:%d:%s]\n", - connector->base.id, - connector->name); + DRM_DEBUG_ATOMIC("Encoder stealing failed for [CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); return ret; } } @@ -235,12 +235,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) crtc_state = state->crtc_states[idx]; crtc_state->mode_changed = true; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n", - connector->base.id, - connector->name, - new_encoder->base.id, - new_encoder->name, - connector_state->crtc->base.id); + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n", + connector->base.id, + connector->name, + new_encoder->base.id, + new_encoder->name, + connector_state->crtc->base.id); return 0; } @@ -292,18 +292,27 @@ mode_fixup(struct drm_atomic_state *state) encoder->bridge, &crtc_state->mode, &crtc_state->adjusted_mode); if (!ret) { - DRM_DEBUG_KMS("Bridge fixup failed\n"); + DRM_DEBUG_ATOMIC("Bridge fixup failed\n"); return -EINVAL; } } - - ret = funcs->mode_fixup(encoder, &crtc_state->mode, - &crtc_state->adjusted_mode); - if (!ret) { - DRM_DEBUG_KMS("[ENCODER:%d:%s] fixup failed\n", - encoder->base.id, encoder->name); - return -EINVAL; + if (funcs->atomic_check) { + ret = funcs->atomic_check(encoder, crtc_state, + conn_state); + if (ret) { + DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n", + encoder->base.id, encoder->name); + return ret; + } + } else { + ret = funcs->mode_fixup(encoder, &crtc_state->mode, + &crtc_state->adjusted_mode); + if (!ret) { + DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n", + encoder->base.id, encoder->name); + return -EINVAL; + } } } @@ -321,8 +330,8 @@ mode_fixup(struct drm_atomic_state *state) ret = funcs->mode_fixup(crtc, &crtc_state->mode, &crtc_state->adjusted_mode); if (!ret) { - DRM_DEBUG_KMS("[CRTC:%d] fixup failed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d] fixup failed\n", + crtc->base.id); return -EINVAL; } } @@ -330,6 +339,12 @@ mode_fixup(struct drm_atomic_state *state) return 0; } +static bool +needs_modeset(struct drm_crtc_state *state) +{ + return state->mode_changed || state->active_changed; +} + /** * drm_atomic_helper_check - validate state object for modeset changes * @dev: DRM device @@ -369,14 +384,14 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, continue; if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) { - DRM_DEBUG_KMS("[CRTC:%d] mode changed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d] mode changed\n", + crtc->base.id); crtc_state->mode_changed = true; } if (crtc->state->enable != crtc_state->enable) { - DRM_DEBUG_KMS("[CRTC:%d] enable changed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n", + crtc->base.id); crtc_state->mode_changed = true; } } @@ -404,12 +419,27 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, crtc = state->crtcs[i]; crtc_state = state->crtc_states[i]; - if (!crtc || !crtc_state->mode_changed) + if (!crtc) continue; - DRM_DEBUG_KMS("[CRTC:%d] needs full modeset, enable: %c\n", - crtc->base.id, - crtc_state->enable ? 'y' : 'n'); + /* + * We must set ->active_changed after walking connectors for + * otherwise an update that only changes active would result in + * a full modeset because update_connector_routing force that. + */ + if (crtc->state->active != crtc_state->active) { + DRM_DEBUG_ATOMIC("[CRTC:%d] active changed\n", + crtc->base.id); + crtc_state->active_changed = true; + } + + if (!needs_modeset(crtc_state)) + continue; + + DRM_DEBUG_ATOMIC("[CRTC:%d] needs all connectors, enable: %c, active: %c\n", + crtc->base.id, + crtc_state->enable ? 'y' : 'n', + crtc_state->active ? 'y' : 'n'); ret = drm_atomic_add_affected_connectors(state, crtc); if (ret != 0) @@ -419,8 +449,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, crtc); if (crtc_state->enable != !!num_connectors) { - DRM_DEBUG_KMS("[CRTC:%d] enabled/connectors mismatch\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d] enabled/connectors mismatch\n", + crtc->base.id); return -EINVAL; } @@ -467,8 +497,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev, ret = funcs->atomic_check(plane, plane_state); if (ret) { - DRM_DEBUG_KMS("[PLANE:%d] atomic driver check failed\n", - plane->base.id); + DRM_DEBUG_ATOMIC("[PLANE:%d] atomic driver check failed\n", + plane->base.id); return ret; } } @@ -487,8 +517,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev, ret = funcs->atomic_check(crtc, state->crtc_states[i]); if (ret) { - DRM_DEBUG_KMS("[CRTC:%d] atomic driver check failed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d] atomic driver check failed\n", + crtc->base.id); return ret; } } @@ -545,6 +575,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) struct drm_connector *connector; struct drm_encoder_helper_funcs *funcs; struct drm_encoder *encoder; + struct drm_crtc_state *old_crtc_state; old_conn_state = old_state->connector_states[i]; connector = old_state->connectors[i]; @@ -554,6 +585,11 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) if (!old_conn_state || !old_conn_state->crtc) continue; + old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)]; + + if (!old_crtc_state->active) + continue; + encoder = old_conn_state->best_encoder; /* We shouldn't get this far if we didn't previously have @@ -564,6 +600,9 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) funcs = encoder->helper_private; + DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); + /* * Each encoder has at most one connector (since we always steal * it away), so we won't call call disable hooks twice. @@ -572,7 +611,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) encoder->bridge->funcs->disable(encoder->bridge); /* Right function depends upon target state. */ - if (connector->state->crtc) + if (connector->state->crtc && funcs->prepare) funcs->prepare(encoder); else if (funcs->disable) funcs->disable(encoder); @@ -586,17 +625,26 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) for (i = 0; i < ncrtcs; i++) { struct drm_crtc_helper_funcs *funcs; struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; crtc = old_state->crtcs[i]; + old_crtc_state = old_state->crtc_states[i]; /* Shut down everything that needs a full modeset. */ - if (!crtc || !crtc->state->mode_changed) + if (!crtc || !needs_modeset(crtc->state)) + continue; + + if (!old_crtc_state->active) continue; funcs = crtc->helper_private; + DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n", + crtc->base.id); + + /* Right function depends upon target state. */ - if (crtc->state->enable) + if (crtc->state->enable && funcs->prepare) funcs->prepare(crtc); else if (funcs->disable) funcs->disable(crtc); @@ -675,8 +723,12 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) funcs = crtc->helper_private; - if (crtc->state->enable) + if (crtc->state->enable && funcs->mode_set_nofb) { + DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n", + crtc->base.id); + funcs->mode_set_nofb(crtc); + } } for (i = 0; i < old_state->num_connector; i++) { @@ -697,11 +749,18 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) mode = &new_crtc_state->mode; adjusted_mode = &new_crtc_state->adjusted_mode; + if (!new_crtc_state->mode_changed) + continue; + + DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); + /* * Each encoder has at most one connector (since we always steal * it away), so we won't call call mode_set hooks twice. */ - funcs->mode_set(encoder, mode, adjusted_mode); + if (funcs->mode_set) + funcs->mode_set(encoder, mode, adjusted_mode); if (encoder->bridge && encoder->bridge->funcs->mode_set) encoder->bridge->funcs->mode_set(encoder->bridge, @@ -710,34 +769,44 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) } /** - * drm_atomic_helper_commit_pre_planes - modeset commit before plane updates + * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs * @dev: DRM device - * @state: atomic state + * @old_state: atomic state object with old state structures * - * This function commits the modeset changes that need to be committed before - * updating planes. It shuts down all the outputs that need to be shut down and + * This function shuts down all the outputs that need to be shut down and * prepares them (if required) with the new mode. + * + * For compatability with legacy crtc helpers this should be called before + * drm_atomic_helper_commit_planes(), which is what the default commit function + * does. But drivers with different needs can group the modeset commits together + * and do the plane commits at the end. This is useful for drivers doing runtime + * PM since planes updates then only happen when the CRTC is actually enabled. */ -void drm_atomic_helper_commit_pre_planes(struct drm_device *dev, - struct drm_atomic_state *state) +void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, + struct drm_atomic_state *old_state) { - disable_outputs(dev, state); - set_routing_links(dev, state); - crtc_set_mode(dev, state); + disable_outputs(dev, old_state); + set_routing_links(dev, old_state); + crtc_set_mode(dev, old_state); } -EXPORT_SYMBOL(drm_atomic_helper_commit_pre_planes); +EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables); /** - * drm_atomic_helper_commit_post_planes - modeset commit after plane updates + * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs * @dev: DRM device * @old_state: atomic state object with old state structures * - * This function commits the modeset changes that need to be committed after - * updating planes: It enables all the outputs with the new configuration which - * had to be turned off for the update. + * This function enables all the outputs with the new configuration which had to + * be turned off for the update. + * + * For compatability with legacy crtc helpers this should be called after + * drm_atomic_helper_commit_planes(), which is what the default commit function + * does. But drivers with different needs can group the modeset commits together + * and do the plane commits at the end. This is useful for drivers doing runtime + * PM since planes updates then only happen when the CRTC is actually enabled. */ -void drm_atomic_helper_commit_post_planes(struct drm_device *dev, - struct drm_atomic_state *old_state) +void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, + struct drm_atomic_state *old_state) { int ncrtcs = old_state->dev->mode_config.num_crtc; int i; @@ -749,13 +818,23 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev, crtc = old_state->crtcs[i]; /* Need to filter out CRTCs where only planes change. */ - if (!crtc || !crtc->state->mode_changed) + if (!crtc || !needs_modeset(crtc->state)) + continue; + + if (!crtc->state->active) continue; funcs = crtc->helper_private; - if (crtc->state->enable) - funcs->commit(crtc); + if (crtc->state->enable) { + DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n", + crtc->base.id); + + if (funcs->enable) + funcs->enable(crtc); + else + funcs->commit(crtc); + } } for (i = 0; i < old_state->num_connector; i++) { @@ -768,9 +847,15 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev, if (!connector || !connector->state->best_encoder) continue; + if (!connector->state->crtc->state->active) + continue; + encoder = connector->state->best_encoder; funcs = encoder->helper_private; + DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); + /* * Each encoder has at most one connector (since we always steal * it away), so we won't call call enable hooks twice. @@ -778,13 +863,16 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev, if (encoder->bridge) encoder->bridge->funcs->pre_enable(encoder->bridge); - funcs->commit(encoder); + if (funcs->enable) + funcs->enable(encoder); + else + funcs->commit(encoder); if (encoder->bridge) encoder->bridge->funcs->enable(encoder->bridge); } } -EXPORT_SYMBOL(drm_atomic_helper_commit_post_planes); +EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables); static void wait_for_fences(struct drm_device *dev, struct drm_atomic_state *state) @@ -868,6 +956,11 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, if (!crtc->state->enable) continue; + /* Legacy cursor ioctls are completely unsynced, and userspace + * relies on that (by doing tons of cursor updates). */ + if (old_state->legacy_cursor_update) + continue; + if (!framebuffer_changed(dev, old_state, crtc)) continue; @@ -948,11 +1041,11 @@ int drm_atomic_helper_commit(struct drm_device *dev, wait_for_fences(dev, state); - drm_atomic_helper_commit_pre_planes(dev, state); + drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_planes(dev, state); - drm_atomic_helper_commit_post_planes(dev, state); + drm_atomic_helper_commit_modeset_enables(dev, state); drm_atomic_helper_wait_for_vblanks(dev, state); @@ -1108,12 +1201,19 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, funcs = plane->helper_private; - if (!funcs || !funcs->atomic_update) + if (!funcs) continue; old_plane_state = old_state->plane_states[i]; - funcs->atomic_update(plane, old_plane_state); + /* + * Special-case disabling the plane if drivers support it. + */ + if (drm_atomic_plane_disabling(plane, old_plane_state) && + funcs->atomic_disable) + funcs->atomic_disable(plane, old_plane_state); + else + funcs->atomic_update(plane, old_plane_state); } for (i = 0; i < ncrtcs; i++) { @@ -1294,6 +1394,9 @@ retry: if (ret != 0) goto fail; + if (plane == crtc->cursor) + state->legacy_cursor_update = true; + /* Driver takes ownership of state on successful commit. */ return 0; fail: @@ -1369,6 +1472,9 @@ retry: plane_state->src_h = 0; plane_state->src_w = 0; + if (plane == plane->crtc->cursor) + state->legacy_cursor_update = true; + ret = drm_atomic_commit(state); if (ret != 0) goto fail; @@ -1518,6 +1624,7 @@ retry: WARN_ON(set->num_connectors); crtc_state->enable = false; + crtc_state->active = false; ret = drm_atomic_set_crtc_for_plane(primary_state, NULL); if (ret != 0) @@ -1532,6 +1639,7 @@ retry: WARN_ON(!set->num_connectors); crtc_state->enable = true; + crtc_state->active = true; drm_mode_copy(&crtc_state->mode, set->mode); ret = drm_atomic_set_crtc_for_plane(primary_state, crtc); @@ -1581,12 +1689,13 @@ backoff: EXPORT_SYMBOL(drm_atomic_helper_set_config); /** - * drm_atomic_helper_crtc_set_property - helper for crtc prorties + * drm_atomic_helper_crtc_set_property - helper for crtc properties * @crtc: DRM crtc * @property: DRM property * @val: value of property * - * Provides a default plane disablle handler using the atomic driver interface. + * Provides a default crtc set_property handler using the atomic driver + * interface. * * RETURNS: * Zero on success, error code on failure @@ -1640,12 +1749,13 @@ backoff: EXPORT_SYMBOL(drm_atomic_helper_crtc_set_property); /** - * drm_atomic_helper_plane_set_property - helper for plane prorties + * drm_atomic_helper_plane_set_property - helper for plane properties * @plane: DRM plane * @property: DRM property * @val: value of property * - * Provides a default plane disable handler using the atomic driver interface. + * Provides a default plane set_property handler using the atomic driver + * interface. * * RETURNS: * Zero on success, error code on failure @@ -1699,12 +1809,13 @@ backoff: EXPORT_SYMBOL(drm_atomic_helper_plane_set_property); /** - * drm_atomic_helper_connector_set_property - helper for connector prorties + * drm_atomic_helper_connector_set_property - helper for connector properties * @connector: DRM connector * @property: DRM property * @val: value of property * - * Provides a default plane disablle handler using the atomic driver interface. + * Provides a default connector set_property handler using the atomic driver + * interface. * * RETURNS: * Zero on success, error code on failure @@ -1844,6 +1955,83 @@ backoff: EXPORT_SYMBOL(drm_atomic_helper_page_flip); /** + * drm_atomic_helper_connector_dpms() - connector dpms helper implementation + * @connector: affected connector + * @mode: DPMS mode + * + * This is the main helper function provided by the atomic helper framework for + * implementing the legacy DPMS connector interface. It computes the new desired + * ->active state for the corresponding CRTC (if the connector is enabled) and + * updates it. + */ +void drm_atomic_helper_connector_dpms(struct drm_connector *connector, + int mode) +{ + struct drm_mode_config *config = &connector->dev->mode_config; + struct drm_atomic_state *state; + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + struct drm_connector *tmp_connector; + int ret; + bool active = false; + + if (mode != DRM_MODE_DPMS_ON) + mode = DRM_MODE_DPMS_OFF; + + connector->dpms = mode; + crtc = connector->state->crtc; + + if (!crtc) + return; + + /* FIXME: ->dpms has no return value so can't forward the -ENOMEM. */ + state = drm_atomic_state_alloc(connector->dev); + if (!state) + return; + + state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); +retry: + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return; + + WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); + + list_for_each_entry(tmp_connector, &config->connector_list, head) { + if (connector->state->crtc != crtc) + continue; + + if (connector->dpms == DRM_MODE_DPMS_ON) { + active = true; + break; + } + } + crtc_state->active = active; + + ret = drm_atomic_commit(state); + if (ret != 0) + goto fail; + + /* Driver takes ownership of state on successful async commit. */ + return; +fail: + if (ret == -EDEADLK) + goto backoff; + + drm_atomic_state_free(state); + + WARN(1, "Driver bug: Changing ->active failed with ret=%i\n", ret); + + return; +backoff: + drm_atomic_state_clear(state); + drm_atomic_legacy_backoff(state); + + goto retry; +} +EXPORT_SYMBOL(drm_atomic_helper_connector_dpms); + +/** * DOC: atomic state reset and initialization * * Both the drm core and the atomic helpers assume that there is always the full @@ -1894,6 +2082,7 @@ drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc) if (state) { state->mode_changed = false; + state->active_changed = false; state->planes_changed = false; state->event = NULL; } diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c new file mode 100644 index 000000000000..d1187e571c6d --- /dev/null +++ b/drivers/gpu/drm/drm_bridge.c @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include <linux/err.h> +#include <linux/module.h> + +#include <drm/drm_crtc.h> + +#include "drm/drmP.h" + +static DEFINE_MUTEX(bridge_lock); +static LIST_HEAD(bridge_list); + +int drm_bridge_add(struct drm_bridge *bridge) +{ + mutex_lock(&bridge_lock); + list_add_tail(&bridge->list, &bridge_list); + mutex_unlock(&bridge_lock); + + return 0; +} +EXPORT_SYMBOL(drm_bridge_add); + +void drm_bridge_remove(struct drm_bridge *bridge) +{ + mutex_lock(&bridge_lock); + list_del_init(&bridge->list); + mutex_unlock(&bridge_lock); +} +EXPORT_SYMBOL(drm_bridge_remove); + +extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge) +{ + if (!dev || !bridge) + return -EINVAL; + + if (bridge->dev) + return -EBUSY; + + bridge->dev = dev; + + if (bridge->funcs->attach) + return bridge->funcs->attach(bridge); + + return 0; +} +EXPORT_SYMBOL(drm_bridge_attach); + +#ifdef CONFIG_OF +struct drm_bridge *of_drm_find_bridge(struct device_node *np) +{ + struct drm_bridge *bridge; + + mutex_lock(&bridge_lock); + + list_for_each_entry(bridge, &bridge_list, list) { + if (bridge->of_node == np) { + mutex_unlock(&bridge_lock); + return bridge; + } + } + + mutex_unlock(&bridge_lock); + return NULL; +} +EXPORT_SYMBOL(of_drm_find_bridge); +#endif + +MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>"); +MODULE_DESCRIPTION("DRM bridge infrastructure"); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index b15d720eda4c..29dbde5ecef7 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -691,6 +691,10 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, if (cursor) cursor->possible_crtcs = 1 << drm_crtc_index(crtc); + if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { + drm_object_attach_property(&crtc->base, config->prop_active, 0); + } + return 0; } EXPORT_SYMBOL(drm_crtc_init_with_planes); @@ -783,7 +787,7 @@ int drm_display_info_set_bus_formats(struct drm_display_info *info, if (formats && num_formats) { fmts = kmemdup(formats, sizeof(*formats) * num_formats, GFP_KERNEL); - if (!formats) + if (!fmts) return -ENOMEM; } @@ -1062,61 +1066,6 @@ void drm_connector_unplug_all(struct drm_device *dev) EXPORT_SYMBOL(drm_connector_unplug_all); /** - * drm_bridge_init - initialize a drm transcoder/bridge - * @dev: drm device - * @bridge: transcoder/bridge to set up - * @funcs: bridge function table - * - * Initialises a preallocated bridge. Bridges should be - * subclassed as part of driver connector objects. - * - * Returns: - * Zero on success, error code on failure. - */ -int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge, - const struct drm_bridge_funcs *funcs) -{ - int ret; - - drm_modeset_lock_all(dev); - - ret = drm_mode_object_get(dev, &bridge->base, DRM_MODE_OBJECT_BRIDGE); - if (ret) - goto out; - - bridge->dev = dev; - bridge->funcs = funcs; - - list_add_tail(&bridge->head, &dev->mode_config.bridge_list); - dev->mode_config.num_bridge++; - - out: - drm_modeset_unlock_all(dev); - return ret; -} -EXPORT_SYMBOL(drm_bridge_init); - -/** - * drm_bridge_cleanup - cleans up an initialised bridge - * @bridge: bridge to cleanup - * - * Cleans up the bridge but doesn't free the object. - */ -void drm_bridge_cleanup(struct drm_bridge *bridge) -{ - struct drm_device *dev = bridge->dev; - - drm_modeset_lock_all(dev); - drm_mode_object_put(dev, &bridge->base); - list_del(&bridge->head); - dev->mode_config.num_bridge--; - drm_modeset_unlock_all(dev); - - memset(bridge, 0, sizeof(*bridge)); -} -EXPORT_SYMBOL(drm_bridge_cleanup); - -/** * drm_encoder_init - Init a preallocated encoder * @dev: drm device * @encoder: the encoder to init @@ -1481,6 +1430,12 @@ static int drm_mode_create_standard_properties(struct drm_device *dev) return -ENOMEM; dev->mode_config.prop_crtc_id = prop; + prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC, + "ACTIVE"); + if (!prop) + return -ENOMEM; + dev->mode_config.prop_active = prop; + return 0; } @@ -1705,7 +1660,6 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr total_objects += dev->mode_config.num_crtc; total_objects += dev->mode_config.num_connector; total_objects += dev->mode_config.num_encoder; - total_objects += dev->mode_config.num_bridge; group->id_list = kcalloc(total_objects, sizeof(uint32_t), GFP_KERNEL); if (!group->id_list) @@ -1714,7 +1668,6 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr group->num_crtcs = 0; group->num_connectors = 0; group->num_encoders = 0; - group->num_bridges = 0; return 0; } @@ -1734,7 +1687,6 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_crtc *crtc; struct drm_encoder *encoder; struct drm_connector *connector; - struct drm_bridge *bridge; int ret; ret = drm_mode_group_init(dev, group); @@ -1752,11 +1704,6 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev, group->id_list[group->num_crtcs + group->num_encoders + group->num_connectors++] = connector->base.id; - list_for_each_entry(bridge, &dev->mode_config.bridge_list, head) - group->id_list[group->num_crtcs + group->num_encoders + - group->num_connectors + group->num_bridges++] = - bridge->base.id; - return 0; } EXPORT_SYMBOL(drm_mode_group_init_legacy_group); @@ -2062,21 +2009,32 @@ int drm_mode_getcrtc(struct drm_device *dev, return -ENOENT; drm_modeset_lock_crtc(crtc, crtc->primary); - crtc_resp->x = crtc->x; - crtc_resp->y = crtc->y; crtc_resp->gamma_size = crtc->gamma_size; if (crtc->primary->fb) crtc_resp->fb_id = crtc->primary->fb->base.id; else crtc_resp->fb_id = 0; - if (crtc->enabled) { - - drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode); - crtc_resp->mode_valid = 1; + if (crtc->state) { + crtc_resp->x = crtc->primary->state->src_x >> 16; + crtc_resp->y = crtc->primary->state->src_y >> 16; + if (crtc->state->enable) { + drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->state->mode); + crtc_resp->mode_valid = 1; + } else { + crtc_resp->mode_valid = 0; + } } else { - crtc_resp->mode_valid = 0; + crtc_resp->x = crtc->x; + crtc_resp->y = crtc->y; + if (crtc->enabled) { + drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode); + crtc_resp->mode_valid = 1; + + } else { + crtc_resp->mode_valid = 0; + } } drm_modeset_unlock_crtc(crtc); @@ -3822,7 +3780,7 @@ static struct drm_property *property_create_range(struct drm_device *dev, } /** - * drm_property_create_range - create a new ranged property type + * drm_property_create_range - create a new unsigned ranged property type * @dev: drm device * @flags: flags specifying the property type * @name: name of the property @@ -3833,8 +3791,8 @@ static struct drm_property *property_create_range(struct drm_device *dev, * object with drm_object_attach_property. The returned property object must be * freed with drm_property_destroy. * - * Userspace is allowed to set any integer value in the (min, max) range - * inclusive. + * Userspace is allowed to set any unsigned integer value in the (min, max) + * range inclusive. * * Returns: * A pointer to the newly created property on success, NULL on failure. @@ -3848,6 +3806,24 @@ struct drm_property *drm_property_create_range(struct drm_device *dev, int flags } EXPORT_SYMBOL(drm_property_create_range); +/** + * drm_property_create_signed_range - create a new signed ranged property type + * @dev: drm device + * @flags: flags specifying the property type + * @name: name of the property + * @min: minimum value of the property + * @max: maximum value of the property + * + * This creates a new generic drm property which can then be attached to a drm + * object with drm_object_attach_property. The returned property object must be + * freed with drm_property_destroy. + * + * Userspace is allowed to set any signed integer value in the (min, max) + * range inclusive. + * + * Returns: + * A pointer to the newly created property on success, NULL on failure. + */ struct drm_property *drm_property_create_signed_range(struct drm_device *dev, int flags, const char *name, int64_t min, int64_t max) @@ -3857,6 +3833,23 @@ struct drm_property *drm_property_create_signed_range(struct drm_device *dev, } EXPORT_SYMBOL(drm_property_create_signed_range); +/** + * drm_property_create_object - create a new object property type + * @dev: drm device + * @flags: flags specifying the property type + * @name: name of the property + * @type: object type from DRM_MODE_OBJECT_* defines + * + * This creates a new generic drm property which can then be attached to a drm + * object with drm_object_attach_property. The returned property object must be + * freed with drm_property_destroy. + * + * Userspace is only allowed to set this to any property value of the given + * @type. Only useful for atomic properties, which is enforced. + * + * Returns: + * A pointer to the newly created property on success, NULL on failure. + */ struct drm_property *drm_property_create_object(struct drm_device *dev, int flags, const char *name, uint32_t type) { @@ -3864,6 +3857,9 @@ struct drm_property *drm_property_create_object(struct drm_device *dev, flags |= DRM_MODE_PROP_OBJECT; + if (WARN_ON(!(flags & DRM_MODE_PROP_ATOMIC))) + return NULL; + property = drm_property_create(dev, flags, name, 1); if (!property) return NULL; @@ -3875,6 +3871,28 @@ struct drm_property *drm_property_create_object(struct drm_device *dev, EXPORT_SYMBOL(drm_property_create_object); /** + * drm_property_create_bool - create a new boolean property type + * @dev: drm device + * @flags: flags specifying the property type + * @name: name of the property + * + * This creates a new generic drm property which can then be attached to a drm + * object with drm_object_attach_property. The returned property object must be + * freed with drm_property_destroy. + * + * This is implemented as a ranged property with only {0, 1} as valid values. + * + * Returns: + * A pointer to the newly created property on success, NULL on failure. + */ +struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags, + const char *name) +{ + return drm_property_create_range(dev, flags, name, 0, 1); +} +EXPORT_SYMBOL(drm_property_create_bool); + +/** * drm_property_add_enum - add a possible value to an enumeration property * @property: enumeration property to change * @index: index of the new enumeration @@ -5385,7 +5403,6 @@ void drm_mode_config_init(struct drm_device *dev) INIT_LIST_HEAD(&dev->mode_config.fb_list); INIT_LIST_HEAD(&dev->mode_config.crtc_list); INIT_LIST_HEAD(&dev->mode_config.connector_list); - INIT_LIST_HEAD(&dev->mode_config.bridge_list); INIT_LIST_HEAD(&dev->mode_config.encoder_list); INIT_LIST_HEAD(&dev->mode_config.property_list); INIT_LIST_HEAD(&dev->mode_config.property_blob_list); @@ -5425,7 +5442,6 @@ void drm_mode_config_cleanup(struct drm_device *dev) struct drm_connector *connector, *ot; struct drm_crtc *crtc, *ct; struct drm_encoder *encoder, *enct; - struct drm_bridge *bridge, *brt; struct drm_framebuffer *fb, *fbt; struct drm_property *property, *pt; struct drm_property_blob *blob, *bt; @@ -5436,11 +5452,6 @@ void drm_mode_config_cleanup(struct drm_device *dev) encoder->funcs->destroy(encoder); } - list_for_each_entry_safe(bridge, brt, - &dev->mode_config.bridge_list, head) { - bridge->funcs->destroy(bridge); - } - list_for_each_entry_safe(connector, ot, &dev->mode_config.connector_list, head) { connector->funcs->destroy(connector); diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 79968e39c8d0..f1283878ff6d 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -354,6 +354,37 @@ int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link) EXPORT_SYMBOL(drm_dp_link_power_up); /** + * drm_dp_link_power_down() - power down a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 value; + int err; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D3; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + return 0; +} +EXPORT_SYMBOL(drm_dp_link_power_down); + +/** * drm_dp_link_configure() - configure a DisplayPort link * @aux: DisplayPort AUX channel * @link: pointer to a structure containing the link configuration diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index d5c1db55abf7..1e6a0c760c5d 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_ } EXPORT_SYMBOL(drm_fb_helper_add_one_connector); +static void remove_from_modeset(struct drm_mode_set *set, + struct drm_connector *connector) +{ + int i, j; + + for (i = 0; i < set->num_connectors; i++) { + if (set->connectors[i] == connector) + break; + } + + if (i == set->num_connectors) + return; + + for (j = i + 1; j < set->num_connectors; j++) { + set->connectors[j - 1] = set->connectors[j]; + } + set->num_connectors--; + + /* because i915 is pissy about this.. + * TODO maybe need to makes sure we set it back to !=NULL somewhere? + */ + if (set->num_connectors == 0) + set->fb = NULL; +} + int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector) { @@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, } fb_helper->connector_count--; kfree(fb_helper_connector); + + /* also cleanup dangling references to the connector: */ + for (i = 0; i < fb_helper->crtc_count; i++) + remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector); + return 0; } EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 75647e7f012b..c9f5453f20e7 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -185,8 +185,15 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) return; } - dev->driver->disable_vblank(dev, crtc); - vblank->enabled = false; + /* + * Only disable vblank interrupts if they're enabled. This avoids + * calling the ->disable_vblank() operation in atomic context with the + * hardware potentially runtime suspended. + */ + if (vblank->enabled) { + dev->driver->disable_vblank(dev, crtc); + vblank->enabled = false; + } /* No further vblank irq's will be processed after * this point. Get current hardware vblank count and @@ -1045,7 +1052,7 @@ EXPORT_SYMBOL(drm_vblank_get); * Acquire a reference count on vblank events to avoid having them disabled * while in use. * - * This is the native kms version of drm_vblank_off(). + * This is the native kms version of drm_vblank_get(). * * Returns: * Zero on success, nonzero on failure. diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index c0644bb865f2..2d5ca8eec13a 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -323,8 +323,6 @@ EXPORT_SYMBOL(mipi_dsi_packet_format_is_long); int mipi_dsi_create_packet(struct mipi_dsi_packet *packet, const struct mipi_dsi_msg *msg) { - const u8 *tx = msg->tx_buf; - if (!packet || !msg) return -EINVAL; @@ -353,8 +351,10 @@ int mipi_dsi_create_packet(struct mipi_dsi_packet *packet, packet->header[2] = (msg->tx_len >> 8) & 0xff; packet->payload_length = msg->tx_len; - packet->payload = tx; + packet->payload = msg->tx_buf; } else { + const u8 *tx = msg->tx_buf; + packet->header[1] = (msg->tx_len > 0) ? tx[0] : 0; packet->header[2] = (msg->tx_len > 1) ? tx[1] : 0; } diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 20d977a52c58..487d0e35c134 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1011,6 +1011,62 @@ drm_mode_validate_size(const struct drm_display_mode *mode, } EXPORT_SYMBOL(drm_mode_validate_size); +#define MODE_STATUS(status) [MODE_ ## status + 3] = #status + +static const char * const drm_mode_status_names[] = { + MODE_STATUS(OK), + MODE_STATUS(HSYNC), + MODE_STATUS(VSYNC), + MODE_STATUS(H_ILLEGAL), + MODE_STATUS(V_ILLEGAL), + MODE_STATUS(BAD_WIDTH), + MODE_STATUS(NOMODE), + MODE_STATUS(NO_INTERLACE), + MODE_STATUS(NO_DBLESCAN), + MODE_STATUS(NO_VSCAN), + MODE_STATUS(MEM), + MODE_STATUS(VIRTUAL_X), + MODE_STATUS(VIRTUAL_Y), + MODE_STATUS(MEM_VIRT), + MODE_STATUS(NOCLOCK), + MODE_STATUS(CLOCK_HIGH), + MODE_STATUS(CLOCK_LOW), + MODE_STATUS(CLOCK_RANGE), + MODE_STATUS(BAD_HVALUE), + MODE_STATUS(BAD_VVALUE), + MODE_STATUS(BAD_VSCAN), + MODE_STATUS(HSYNC_NARROW), + MODE_STATUS(HSYNC_WIDE), + MODE_STATUS(HBLANK_NARROW), + MODE_STATUS(HBLANK_WIDE), + MODE_STATUS(VSYNC_NARROW), + MODE_STATUS(VSYNC_WIDE), + MODE_STATUS(VBLANK_NARROW), + MODE_STATUS(VBLANK_WIDE), + MODE_STATUS(PANEL), + MODE_STATUS(INTERLACE_WIDTH), + MODE_STATUS(ONE_WIDTH), + MODE_STATUS(ONE_HEIGHT), + MODE_STATUS(ONE_SIZE), + MODE_STATUS(NO_REDUCED), + MODE_STATUS(NO_STEREO), + MODE_STATUS(UNVERIFIED), + MODE_STATUS(BAD), + MODE_STATUS(ERROR), +}; + +#undef MODE_STATUS + +static const char *drm_get_mode_status_name(enum drm_mode_status status) +{ + int index = status + 3; + + if (WARN_ON(index < 0 || index >= ARRAY_SIZE(drm_mode_status_names))) + return ""; + + return drm_mode_status_names[index]; +} + /** * drm_mode_prune_invalid - remove invalid modes from mode list * @dev: DRM device @@ -1032,8 +1088,9 @@ void drm_mode_prune_invalid(struct drm_device *dev, list_del(&mode->head); if (verbose) { drm_mode_debug_printmodeline(mode); - DRM_DEBUG_KMS("Not using %s mode %d\n", - mode->name, mode->status); + DRM_DEBUG_KMS("Not using %s mode: %s\n", + mode->name, + drm_get_mode_status_name(mode->status)); } drm_mode_destroy(dev, mode); } diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index f24c4cfe674b..5ba5792bfdba 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -435,7 +435,8 @@ int drm_plane_helper_commit(struct drm_plane *plane, goto out; } - if (plane_funcs->prepare_fb && plane_state->fb) { + if (plane_funcs->prepare_fb && plane_state->fb && + plane_state->fb != old_fb) { ret = plane_funcs->prepare_fb(plane, plane_state->fb); if (ret) goto out; @@ -449,13 +450,28 @@ int drm_plane_helper_commit(struct drm_plane *plane, crtc_funcs[i]->atomic_begin(crtc[i]); } - plane_funcs->atomic_update(plane, plane_state); + /* + * Drivers may optionally implement the ->atomic_disable callback, so + * special-case that here. + */ + if (drm_atomic_plane_disabling(plane, plane_state) && + plane_funcs->atomic_disable) + plane_funcs->atomic_disable(plane, plane_state); + else + plane_funcs->atomic_update(plane, plane_state); for (i = 0; i < 2; i++) { if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush) crtc_funcs[i]->atomic_flush(crtc[i]); } + /* + * If we only moved the plane and didn't change fb's, there's no need to + * wait for vblank. + */ + if (plane->state->fb == old_fb) + goto out; + for (i = 0; i < 2; i++) { if (!crtc[i]) continue; @@ -484,7 +500,7 @@ out: } /** - * drm_plane_helper_update() - Helper for primary plane update + * drm_plane_helper_update() - Transitional helper for plane update * @plane: plane object to update * @crtc: owning CRTC of owning plane * @fb: framebuffer to flip onto plane @@ -541,7 +557,7 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc, EXPORT_SYMBOL(drm_plane_helper_update); /** - * drm_plane_helper_disable() - Helper for primary plane disable + * drm_plane_helper_disable() - Transitional helper for plane disable * @plane: plane to disable * * Provides a default plane disable handler using the atomic plane update diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index cc3d6d6d67e0..5c99d3773212 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -339,19 +339,51 @@ static ssize_t select_subconnector_show(struct device *device, drm_get_dvi_i_select_name((int)subconnector)); } -static struct device_attribute connector_attrs[] = { - __ATTR_RO(status), - __ATTR_RO(enabled), - __ATTR_RO(dpms), - __ATTR_RO(modes), +static DEVICE_ATTR_RO(status); +static DEVICE_ATTR_RO(enabled); +static DEVICE_ATTR_RO(dpms); +static DEVICE_ATTR_RO(modes); + +static struct attribute *connector_dev_attrs[] = { + &dev_attr_status.attr, + &dev_attr_enabled.attr, + &dev_attr_dpms.attr, + &dev_attr_modes.attr, + NULL }; /* These attributes are for both DVI-I connectors and all types of tv-out. */ -static struct device_attribute connector_attrs_opt1[] = { - __ATTR_RO(subconnector), - __ATTR_RO(select_subconnector), +static DEVICE_ATTR_RO(subconnector); +static DEVICE_ATTR_RO(select_subconnector); + +static struct attribute *connector_opt_dev_attrs[] = { + &dev_attr_subconnector.attr, + &dev_attr_select_subconnector.attr, + NULL }; +static umode_t connector_opt_dev_is_visible(struct kobject *kobj, + struct attribute *attr, int idx) +{ + struct device *dev = kobj_to_dev(kobj); + struct drm_connector *connector = to_drm_connector(dev); + + /* + * In the long run it maybe a good idea to make one set of + * optionals per connector type. + */ + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_DVII: + case DRM_MODE_CONNECTOR_Composite: + case DRM_MODE_CONNECTOR_SVIDEO: + case DRM_MODE_CONNECTOR_Component: + case DRM_MODE_CONNECTOR_TV: + return attr->mode; + } + + return 0; +} + static struct bin_attribute edid_attr = { .attr.name = "edid", .attr.mode = 0444, @@ -359,6 +391,27 @@ static struct bin_attribute edid_attr = { .read = edid_show, }; +static struct bin_attribute *connector_bin_attrs[] = { + &edid_attr, + NULL +}; + +static const struct attribute_group connector_dev_group = { + .attrs = connector_dev_attrs, + .bin_attrs = connector_bin_attrs, +}; + +static const struct attribute_group connector_opt_dev_group = { + .attrs = connector_opt_dev_attrs, + .is_visible = connector_opt_dev_is_visible, +}; + +static const struct attribute_group *connector_dev_groups[] = { + &connector_dev_group, + &connector_opt_dev_group, + NULL +}; + /** * drm_sysfs_connector_add - add a connector to sysfs * @connector: connector to add @@ -371,73 +424,27 @@ static struct bin_attribute edid_attr = { int drm_sysfs_connector_add(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - int attr_cnt = 0; - int opt_cnt = 0; - int i; - int ret; if (connector->kdev) return 0; - connector->kdev = device_create(drm_class, dev->primary->kdev, - 0, connector, "card%d-%s", - dev->primary->index, connector->name); + connector->kdev = + device_create_with_groups(drm_class, dev->primary->kdev, 0, + connector, connector_dev_groups, + "card%d-%s", dev->primary->index, + connector->name); DRM_DEBUG("adding \"%s\" to sysfs\n", connector->name); if (IS_ERR(connector->kdev)) { DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev)); - ret = PTR_ERR(connector->kdev); - goto out; - } - - /* Standard attributes */ - - for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) { - ret = device_create_file(connector->kdev, &connector_attrs[attr_cnt]); - if (ret) - goto err_out_files; + return PTR_ERR(connector->kdev); } - /* Optional attributes */ - /* - * In the long run it maybe a good idea to make one set of - * optionals per connector type. - */ - switch (connector->connector_type) { - case DRM_MODE_CONNECTOR_DVII: - case DRM_MODE_CONNECTOR_Composite: - case DRM_MODE_CONNECTOR_SVIDEO: - case DRM_MODE_CONNECTOR_Component: - case DRM_MODE_CONNECTOR_TV: - for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) { - ret = device_create_file(connector->kdev, &connector_attrs_opt1[opt_cnt]); - if (ret) - goto err_out_files; - } - break; - default: - break; - } - - ret = sysfs_create_bin_file(&connector->kdev->kobj, &edid_attr); - if (ret) - goto err_out_files; - /* Let userspace know we have a new connector */ drm_sysfs_hotplug_event(dev); return 0; - -err_out_files: - for (i = 0; i < opt_cnt; i++) - device_remove_file(connector->kdev, &connector_attrs_opt1[i]); - for (i = 0; i < attr_cnt; i++) - device_remove_file(connector->kdev, &connector_attrs[i]); - device_unregister(connector->kdev); - -out: - return ret; } /** @@ -455,16 +462,11 @@ out: */ void drm_sysfs_connector_remove(struct drm_connector *connector) { - int i; - if (!connector->kdev) return; DRM_DEBUG("removing \"%s\" from sysfs\n", connector->name); - for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) - device_remove_file(connector->kdev, &connector_attrs[i]); - sysfs_remove_bin_file(&connector->kdev->kobj, &edid_attr); device_unregister(connector->kdev); connector->kdev = NULL; } diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c index 63b471205072..68c1f32fb086 100644 --- a/drivers/gpu/drm/drm_vma_manager.c +++ b/drivers/gpu/drm/drm_vma_manager.c @@ -50,8 +50,7 @@ * * You must not use multiple offset managers on a single address_space. * Otherwise, mm-core will be unable to tear down memory mappings as the VM will - * no longer be linear. Please use VM_NONLINEAR in that case and implement your - * own offset managers. + * no longer be linear. * * This offset manager works on page-based addresses. That is, every argument * and return code (with the exception of drm_vma_node_offset_addr()) is given diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index c072999b7e03..a5e74612100e 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -12,16 +12,9 @@ config DRM_EXYNOS If M is selected the module will be called exynosdrm. config DRM_EXYNOS_IOMMU - bool "EXYNOS DRM IOMMU Support" + bool depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU - help - Choose this option if you want to use IOMMU feature for DRM. - -config DRM_EXYNOS_DMABUF - bool "EXYNOS DRM DMABUF" - depends on DRM_EXYNOS - help - Choose this option if you want to use DMABUF feature for DRM. + default y config DRM_EXYNOS_FIMD bool "Exynos DRM FIMD" @@ -31,9 +24,16 @@ config DRM_EXYNOS_FIMD help Choose this option if you want to use Exynos FIMD for DRM. +config DRM_EXYNOS7_DECON + bool "Exynos DRM DECON" + depends on DRM_EXYNOS + select FB_MODE_HELPERS + help + Choose this option if you want to use Exynos DECON for DRM. + config DRM_EXYNOS_DPI bool "EXYNOS DRM parallel output support" - depends on DRM_EXYNOS_FIMD + depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) select DRM_PANEL default n help @@ -41,7 +41,7 @@ config DRM_EXYNOS_DPI config DRM_EXYNOS_DSI bool "EXYNOS DRM MIPI-DSI driver support" - depends on DRM_EXYNOS_FIMD + depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) select DRM_MIPI_DSI select DRM_PANEL default n @@ -50,7 +50,7 @@ config DRM_EXYNOS_DSI config DRM_EXYNOS_DP bool "EXYNOS DRM DP driver support" - depends on DRM_EXYNOS_FIMD && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) + depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) default DRM_EXYNOS select DRM_PANEL help diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile index 33ae3652b8da..cc90679cfc06 100644 --- a/drivers/gpu/drm/exynos/Makefile +++ b/drivers/gpu/drm/exynos/Makefile @@ -6,11 +6,11 @@ ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o \ exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \ exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \ - exynos_drm_plane.o + exynos_drm_plane.o exynos_drm_dmabuf.o exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o -exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o +exynosdrm-$(CONFIG_DRM_EXYNOS7_DECON) += exynos7_drm_decon.o exynosdrm-$(CONFIG_DRM_EXYNOS_DPI) += exynos_drm_dpi.o exynosdrm-$(CONFIG_DRM_EXYNOS_DSI) += exynos_drm_dsi.o exynosdrm-$(CONFIG_DRM_EXYNOS_DP) += exynos_dp_core.o exynos_dp_reg.o diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c new file mode 100644 index 000000000000..63f02e2380ae --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -0,0 +1,990 @@ +/* drivers/gpu/drm/exynos/exynos7_drm_decon.c + * + * Copyright (C) 2014 Samsung Electronics Co.Ltd + * Authors: + * Akshu Agarwal <akshua@gmail.com> + * Ajay Kumar <ajaykumar.rs@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#include <drm/drmP.h> +#include <drm/exynos_drm.h> + +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include <video/of_display_timing.h> +#include <video/of_videomode.h> +#include <video/exynos7_decon.h> + +#include "exynos_drm_crtc.h" +#include "exynos_drm_drv.h" +#include "exynos_drm_fbdev.h" +#include "exynos_drm_iommu.h" + +/* + * DECON stands for Display and Enhancement controller. + */ + +#define DECON_DEFAULT_FRAMERATE 60 +#define MIN_FB_WIDTH_FOR_16WORD_BURST 128 + +#define WINDOWS_NR 2 + +struct decon_win_data { + unsigned int ovl_x; + unsigned int ovl_y; + unsigned int offset_x; + unsigned int offset_y; + unsigned int ovl_width; + unsigned int ovl_height; + unsigned int fb_width; + unsigned int fb_height; + unsigned int bpp; + unsigned int pixel_format; + dma_addr_t dma_addr; + bool enabled; + bool resume; +}; + +struct decon_context { + struct device *dev; + struct drm_device *drm_dev; + struct exynos_drm_crtc *crtc; + struct clk *pclk; + struct clk *aclk; + struct clk *eclk; + struct clk *vclk; + void __iomem *regs; + struct decon_win_data win_data[WINDOWS_NR]; + unsigned int default_win; + unsigned long irq_flags; + bool i80_if; + bool suspended; + int pipe; + wait_queue_head_t wait_vsync_queue; + atomic_t wait_vsync_event; + + struct exynos_drm_panel_info panel; + struct exynos_drm_display *display; +}; + +static const struct of_device_id decon_driver_dt_match[] = { + {.compatible = "samsung,exynos7-decon"}, + {}, +}; +MODULE_DEVICE_TABLE(of, decon_driver_dt_match); + +static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc) +{ + struct decon_context *ctx = crtc->ctx; + + if (ctx->suspended) + return; + + atomic_set(&ctx->wait_vsync_event, 1); + + /* + * wait for DECON to signal VSYNC interrupt or return after + * timeout which is set to 50ms (refresh rate of 20). + */ + if (!wait_event_timeout(ctx->wait_vsync_queue, + !atomic_read(&ctx->wait_vsync_event), + HZ/20)) + DRM_DEBUG_KMS("vblank wait timed out.\n"); +} + +static void decon_clear_channel(struct decon_context *ctx) +{ + int win, ch_enabled = 0; + + DRM_DEBUG_KMS("%s\n", __FILE__); + + /* Check if any channel is enabled. */ + for (win = 0; win < WINDOWS_NR; win++) { + u32 val = readl(ctx->regs + WINCON(win)); + + if (val & WINCONx_ENWIN) { + val &= ~WINCONx_ENWIN; + writel(val, ctx->regs + WINCON(win)); + ch_enabled = 1; + } + } + + /* Wait for vsync, as disable channel takes effect at next vsync */ + if (ch_enabled) { + unsigned int state = ctx->suspended; + + ctx->suspended = 0; + decon_wait_for_vblank(ctx->crtc); + ctx->suspended = state; + } +} + +static int decon_ctx_initialize(struct decon_context *ctx, + struct drm_device *drm_dev) +{ + struct exynos_drm_private *priv = drm_dev->dev_private; + + ctx->drm_dev = drm_dev; + ctx->pipe = priv->pipe++; + + /* attach this sub driver to iommu mapping if supported. */ + if (is_drm_iommu_supported(ctx->drm_dev)) { + int ret; + + /* + * If any channel is already active, iommu will throw + * a PAGE FAULT when enabled. So clear any channel if enabled. + */ + decon_clear_channel(ctx); + ret = drm_iommu_attach_device(ctx->drm_dev, ctx->dev); + if (ret) { + DRM_ERROR("drm_iommu_attach failed.\n"); + return ret; + } + } + + return 0; +} + +static void decon_ctx_remove(struct decon_context *ctx) +{ + /* detach this sub driver from iommu mapping if supported. */ + if (is_drm_iommu_supported(ctx->drm_dev)) + drm_iommu_detach_device(ctx->drm_dev, ctx->dev); +} + +static u32 decon_calc_clkdiv(struct decon_context *ctx, + const struct drm_display_mode *mode) +{ + unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh; + u32 clkdiv; + + /* Find the clock divider value that gets us closest to ideal_clk */ + clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->vclk), ideal_clk); + + return (clkdiv < 0x100) ? clkdiv : 0xff; +} + +static bool decon_mode_fixup(struct exynos_drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + if (adjusted_mode->vrefresh == 0) + adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE; + + return true; +} + +static void decon_commit(struct exynos_drm_crtc *crtc) +{ + struct decon_context *ctx = crtc->ctx; + struct drm_display_mode *mode = &crtc->base.mode; + u32 val, clkdiv; + + if (ctx->suspended) + return; + + /* nothing to do if we haven't set the mode yet */ + if (mode->htotal == 0 || mode->vtotal == 0) + return; + + if (!ctx->i80_if) { + int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd; + /* setup vertical timing values. */ + vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; + vbpd = mode->crtc_vtotal - mode->crtc_vsync_end; + vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay; + + val = VIDTCON0_VBPD(vbpd - 1) | VIDTCON0_VFPD(vfpd - 1); + writel(val, ctx->regs + VIDTCON0); + + val = VIDTCON1_VSPW(vsync_len - 1); + writel(val, ctx->regs + VIDTCON1); + + /* setup horizontal timing values. */ + hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; + hbpd = mode->crtc_htotal - mode->crtc_hsync_end; + hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay; + + /* setup horizontal timing values. */ + val = VIDTCON2_HBPD(hbpd - 1) | VIDTCON2_HFPD(hfpd - 1); + writel(val, ctx->regs + VIDTCON2); + + val = VIDTCON3_HSPW(hsync_len - 1); + writel(val, ctx->regs + VIDTCON3); + } + + /* setup horizontal and vertical display size. */ + val = VIDTCON4_LINEVAL(mode->vdisplay - 1) | + VIDTCON4_HOZVAL(mode->hdisplay - 1); + writel(val, ctx->regs + VIDTCON4); + + writel(mode->vdisplay - 1, ctx->regs + LINECNT_OP_THRESHOLD); + + /* + * fields of register with prefix '_F' would be updated + * at vsync(same as dma start) + */ + val = VIDCON0_ENVID | VIDCON0_ENVID_F; + writel(val, ctx->regs + VIDCON0); + + clkdiv = decon_calc_clkdiv(ctx, mode); + if (clkdiv > 1) { + val = VCLKCON1_CLKVAL_NUM_VCLK(clkdiv - 1); + writel(val, ctx->regs + VCLKCON1); + writel(val, ctx->regs + VCLKCON2); + } + + val = readl(ctx->regs + DECON_UPDATE); + val |= DECON_UPDATE_STANDALONE_F; + writel(val, ctx->regs + DECON_UPDATE); +} + +static int decon_enable_vblank(struct exynos_drm_crtc *crtc) +{ + struct decon_context *ctx = crtc->ctx; + u32 val; + + if (ctx->suspended) + return -EPERM; + + if (!test_and_set_bit(0, &ctx->irq_flags)) { + val = readl(ctx->regs + VIDINTCON0); + + val |= VIDINTCON0_INT_ENABLE; + + if (!ctx->i80_if) { + val |= VIDINTCON0_INT_FRAME; + val &= ~VIDINTCON0_FRAMESEL0_MASK; + val |= VIDINTCON0_FRAMESEL0_VSYNC; + } + + writel(val, ctx->regs + VIDINTCON0); + } + + return 0; +} + +static void decon_disable_vblank(struct exynos_drm_crtc *crtc) +{ + struct decon_context *ctx = crtc->ctx; + u32 val; + + if (ctx->suspended) + return; + + if (test_and_clear_bit(0, &ctx->irq_flags)) { + val = readl(ctx->regs + VIDINTCON0); + + val &= ~VIDINTCON0_INT_ENABLE; + if (!ctx->i80_if) + val &= ~VIDINTCON0_INT_FRAME; + + writel(val, ctx->regs + VIDINTCON0); + } +} + +static void decon_win_mode_set(struct exynos_drm_crtc *crtc, + struct exynos_drm_plane *plane) +{ + struct decon_context *ctx = crtc->ctx; + struct decon_win_data *win_data; + int win, padding; + + if (!plane) { + DRM_ERROR("plane is NULL\n"); + return; + } + + win = plane->zpos; + if (win == DEFAULT_ZPOS) + win = ctx->default_win; + + if (win < 0 || win >= WINDOWS_NR) + return; + + + win_data = &ctx->win_data[win]; + + padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width; + win_data->offset_x = plane->fb_x; + win_data->offset_y = plane->fb_y; + win_data->fb_width = plane->fb_width + padding; + win_data->fb_height = plane->fb_height; + win_data->ovl_x = plane->crtc_x; + win_data->ovl_y = plane->crtc_y; + win_data->ovl_width = plane->crtc_width; + win_data->ovl_height = plane->crtc_height; + win_data->dma_addr = plane->dma_addr[0]; + win_data->bpp = plane->bpp; + win_data->pixel_format = plane->pixel_format; + + DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n", + win_data->offset_x, win_data->offset_y); + DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", + win_data->ovl_width, win_data->ovl_height); + DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr); + DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", + plane->fb_width, plane->crtc_width); +} + +static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win) +{ + struct decon_win_data *win_data = &ctx->win_data[win]; + unsigned long val; + + val = readl(ctx->regs + WINCON(win)); + val &= ~WINCONx_BPPMODE_MASK; + + switch (win_data->pixel_format) { + case DRM_FORMAT_RGB565: + val |= WINCONx_BPPMODE_16BPP_565; + val |= WINCONx_BURSTLEN_16WORD; + break; + case DRM_FORMAT_XRGB8888: + val |= WINCONx_BPPMODE_24BPP_xRGB; + val |= WINCONx_BURSTLEN_16WORD; + break; + case DRM_FORMAT_XBGR8888: + val |= WINCONx_BPPMODE_24BPP_xBGR; + val |= WINCONx_BURSTLEN_16WORD; + break; + case DRM_FORMAT_RGBX8888: + val |= WINCONx_BPPMODE_24BPP_RGBx; + val |= WINCONx_BURSTLEN_16WORD; + break; + case DRM_FORMAT_BGRX8888: + val |= WINCONx_BPPMODE_24BPP_BGRx; + val |= WINCONx_BURSTLEN_16WORD; + break; + case DRM_FORMAT_ARGB8888: + val |= WINCONx_BPPMODE_32BPP_ARGB | WINCONx_BLD_PIX | + WINCONx_ALPHA_SEL; + val |= WINCONx_BURSTLEN_16WORD; + break; + case DRM_FORMAT_ABGR8888: + val |= WINCONx_BPPMODE_32BPP_ABGR | WINCONx_BLD_PIX | + WINCONx_ALPHA_SEL; + val |= WINCONx_BURSTLEN_16WORD; + break; + case DRM_FORMAT_RGBA8888: + val |= WINCONx_BPPMODE_32BPP_RGBA | WINCONx_BLD_PIX | + WINCONx_ALPHA_SEL; + val |= WINCONx_BURSTLEN_16WORD; + break; + case DRM_FORMAT_BGRA8888: + val |= WINCONx_BPPMODE_32BPP_BGRA | WINCONx_BLD_PIX | + WINCONx_ALPHA_SEL; + val |= WINCONx_BURSTLEN_16WORD; + break; + default: + DRM_DEBUG_KMS("invalid pixel size so using unpacked 24bpp.\n"); + + val |= WINCONx_BPPMODE_24BPP_xRGB; + val |= WINCONx_BURSTLEN_16WORD; + break; + } + + DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp); + + /* + * In case of exynos, setting dma-burst to 16Word causes permanent + * tearing for very small buffers, e.g. cursor buffer. Burst Mode + * switching which is based on plane size is not recommended as + * plane size varies a lot towards the end of the screen and rapid + * movement causes unstable DMA which results into iommu crash/tear. + */ + + if (win_data->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) { + val &= ~WINCONx_BURSTLEN_MASK; + val |= WINCONx_BURSTLEN_8WORD; + } + + writel(val, ctx->regs + WINCON(win)); +} + +static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win) +{ + unsigned int keycon0 = 0, keycon1 = 0; + + keycon0 = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F | + WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0); + + keycon1 = WxKEYCON1_COLVAL(0xffffffff); + + writel(keycon0, ctx->regs + WKEYCON0_BASE(win)); + writel(keycon1, ctx->regs + WKEYCON1_BASE(win)); +} + +/** + * shadow_protect_win() - disable updating values from shadow registers at vsync + * + * @win: window to protect registers for + * @protect: 1 to protect (disable updates) + */ +static void decon_shadow_protect_win(struct decon_context *ctx, + int win, bool protect) +{ + u32 bits, val; + + bits = SHADOWCON_WINx_PROTECT(win); + + val = readl(ctx->regs + SHADOWCON); + if (protect) + val |= bits; + else + val &= ~bits; + writel(val, ctx->regs + SHADOWCON); +} + +static void decon_win_commit(struct exynos_drm_crtc *crtc, int zpos) +{ + struct decon_context *ctx = crtc->ctx; + struct drm_display_mode *mode = &crtc->base.mode; + struct decon_win_data *win_data; + int win = zpos; + unsigned long val, alpha; + unsigned int last_x; + unsigned int last_y; + + if (ctx->suspended) + return; + + if (win == DEFAULT_ZPOS) + win = ctx->default_win; + + if (win < 0 || win >= WINDOWS_NR) + return; + + win_data = &ctx->win_data[win]; + + /* If suspended, enable this on resume */ + if (ctx->suspended) { + win_data->resume = true; + return; + } + + /* + * SHADOWCON/PRTCON register is used for enabling timing. + * + * for example, once only width value of a register is set, + * if the dma is started then decon hardware could malfunction so + * with protect window setting, the register fields with prefix '_F' + * wouldn't be updated at vsync also but updated once unprotect window + * is set. + */ + + /* protect windows */ + decon_shadow_protect_win(ctx, win, true); + + /* buffer start address */ + val = (unsigned long)win_data->dma_addr; + writel(val, ctx->regs + VIDW_BUF_START(win)); + + /* buffer size */ + writel(win_data->fb_width, ctx->regs + VIDW_WHOLE_X(win)); + writel(win_data->fb_height, ctx->regs + VIDW_WHOLE_Y(win)); + + /* offset from the start of the buffer to read */ + writel(win_data->offset_x, ctx->regs + VIDW_OFFSET_X(win)); + writel(win_data->offset_y, ctx->regs + VIDW_OFFSET_Y(win)); + + DRM_DEBUG_KMS("start addr = 0x%lx\n", + (unsigned long)win_data->dma_addr); + DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", + win_data->ovl_width, win_data->ovl_height); + + /* + * OSD position. + * In case the window layout goes of LCD layout, DECON fails. + */ + if ((win_data->ovl_x + win_data->ovl_width) > mode->hdisplay) + win_data->ovl_x = mode->hdisplay - win_data->ovl_width; + if ((win_data->ovl_y + win_data->ovl_height) > mode->vdisplay) + win_data->ovl_y = mode->vdisplay - win_data->ovl_height; + + val = VIDOSDxA_TOPLEFT_X(win_data->ovl_x) | + VIDOSDxA_TOPLEFT_Y(win_data->ovl_y); + writel(val, ctx->regs + VIDOSD_A(win)); + + last_x = win_data->ovl_x + win_data->ovl_width; + if (last_x) + last_x--; + last_y = win_data->ovl_y + win_data->ovl_height; + if (last_y) + last_y--; + + val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y); + + writel(val, ctx->regs + VIDOSD_B(win)); + + DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n", + win_data->ovl_x, win_data->ovl_y, last_x, last_y); + + /* OSD alpha */ + alpha = VIDOSDxC_ALPHA0_R_F(0x0) | + VIDOSDxC_ALPHA0_G_F(0x0) | + VIDOSDxC_ALPHA0_B_F(0x0); + + writel(alpha, ctx->regs + VIDOSD_C(win)); + + alpha = VIDOSDxD_ALPHA1_R_F(0xff) | + VIDOSDxD_ALPHA1_G_F(0xff) | + VIDOSDxD_ALPHA1_B_F(0xff); + + writel(alpha, ctx->regs + VIDOSD_D(win)); + + decon_win_set_pixfmt(ctx, win); + + /* hardware window 0 doesn't support color key. */ + if (win != 0) + decon_win_set_colkey(ctx, win); + + /* wincon */ + val = readl(ctx->regs + WINCON(win)); + val |= WINCONx_TRIPLE_BUF_MODE; + val |= WINCONx_ENWIN; + writel(val, ctx->regs + WINCON(win)); + + /* Enable DMA channel and unprotect windows */ + decon_shadow_protect_win(ctx, win, false); + + val = readl(ctx->regs + DECON_UPDATE); + val |= DECON_UPDATE_STANDALONE_F; + writel(val, ctx->regs + DECON_UPDATE); + + win_data->enabled = true; +} + +static void decon_win_disable(struct exynos_drm_crtc *crtc, int zpos) +{ + struct decon_context *ctx = crtc->ctx; + struct decon_win_data *win_data; + int win = zpos; + u32 val; + + if (win == DEFAULT_ZPOS) + win = ctx->default_win; + + if (win < 0 || win >= WINDOWS_NR) + return; + + win_data = &ctx->win_data[win]; + + if (ctx->suspended) { + /* do not resume this window*/ + win_data->resume = false; + return; + } + + /* protect windows */ + decon_shadow_protect_win(ctx, win, true); + + /* wincon */ + val = readl(ctx->regs + WINCON(win)); + val &= ~WINCONx_ENWIN; + writel(val, ctx->regs + WINCON(win)); + + /* unprotect windows */ + decon_shadow_protect_win(ctx, win, false); + + val = readl(ctx->regs + DECON_UPDATE); + val |= DECON_UPDATE_STANDALONE_F; + writel(val, ctx->regs + DECON_UPDATE); + + win_data->enabled = false; +} + +static void decon_window_suspend(struct decon_context *ctx) +{ + struct decon_win_data *win_data; + int i; + + for (i = 0; i < WINDOWS_NR; i++) { + win_data = &ctx->win_data[i]; + win_data->resume = win_data->enabled; + if (win_data->enabled) + decon_win_disable(ctx->crtc, i); + } +} + +static void decon_window_resume(struct decon_context *ctx) +{ + struct decon_win_data *win_data; + int i; + + for (i = 0; i < WINDOWS_NR; i++) { + win_data = &ctx->win_data[i]; + win_data->enabled = win_data->resume; + win_data->resume = false; + } +} + +static void decon_apply(struct decon_context *ctx) +{ + struct decon_win_data *win_data; + int i; + + for (i = 0; i < WINDOWS_NR; i++) { + win_data = &ctx->win_data[i]; + if (win_data->enabled) + decon_win_commit(ctx->crtc, i); + else + decon_win_disable(ctx->crtc, i); + } + + decon_commit(ctx->crtc); +} + +static void decon_init(struct decon_context *ctx) +{ + u32 val; + + writel(VIDCON0_SWRESET, ctx->regs + VIDCON0); + + val = VIDOUTCON0_DISP_IF_0_ON; + if (!ctx->i80_if) + val |= VIDOUTCON0_RGBIF; + writel(val, ctx->regs + VIDOUTCON0); + + writel(VCLKCON0_CLKVALUP | VCLKCON0_VCLKFREE, ctx->regs + VCLKCON0); + + if (!ctx->i80_if) + writel(VIDCON1_VCLK_HOLD, ctx->regs + VIDCON1(0)); +} + +static int decon_poweron(struct decon_context *ctx) +{ + int ret; + + if (!ctx->suspended) + return 0; + + ctx->suspended = false; + + pm_runtime_get_sync(ctx->dev); + + ret = clk_prepare_enable(ctx->pclk); + if (ret < 0) { + DRM_ERROR("Failed to prepare_enable the pclk [%d]\n", ret); + goto pclk_err; + } + + ret = clk_prepare_enable(ctx->aclk); + if (ret < 0) { + DRM_ERROR("Failed to prepare_enable the aclk [%d]\n", ret); + goto aclk_err; + } + + ret = clk_prepare_enable(ctx->eclk); + if (ret < 0) { + DRM_ERROR("Failed to prepare_enable the eclk [%d]\n", ret); + goto eclk_err; + } + + ret = clk_prepare_enable(ctx->vclk); + if (ret < 0) { + DRM_ERROR("Failed to prepare_enable the vclk [%d]\n", ret); + goto vclk_err; + } + + decon_init(ctx); + + /* if vblank was enabled status, enable it again. */ + if (test_and_clear_bit(0, &ctx->irq_flags)) { + ret = decon_enable_vblank(ctx->crtc); + if (ret) { + DRM_ERROR("Failed to re-enable vblank [%d]\n", ret); + goto err; + } + } + + decon_window_resume(ctx); + + decon_apply(ctx); + + return 0; + +err: + clk_disable_unprepare(ctx->vclk); +vclk_err: + clk_disable_unprepare(ctx->eclk); +eclk_err: + clk_disable_unprepare(ctx->aclk); +aclk_err: + clk_disable_unprepare(ctx->pclk); +pclk_err: + ctx->suspended = true; + return ret; +} + +static int decon_poweroff(struct decon_context *ctx) +{ + if (ctx->suspended) + return 0; + + /* + * We need to make sure that all windows are disabled before we + * suspend that connector. Otherwise we might try to scan from + * a destroyed buffer later. + */ + decon_window_suspend(ctx); + + clk_disable_unprepare(ctx->vclk); + clk_disable_unprepare(ctx->eclk); + clk_disable_unprepare(ctx->aclk); + clk_disable_unprepare(ctx->pclk); + + pm_runtime_put_sync(ctx->dev); + + ctx->suspended = true; + return 0; +} + +static void decon_dpms(struct exynos_drm_crtc *crtc, int mode) +{ + DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode); + + switch (mode) { + case DRM_MODE_DPMS_ON: + decon_poweron(crtc->ctx); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + decon_poweroff(crtc->ctx); + break; + default: + DRM_DEBUG_KMS("unspecified mode %d\n", mode); + break; + } +} + +static struct exynos_drm_crtc_ops decon_crtc_ops = { + .dpms = decon_dpms, + .mode_fixup = decon_mode_fixup, + .commit = decon_commit, + .enable_vblank = decon_enable_vblank, + .disable_vblank = decon_disable_vblank, + .wait_for_vblank = decon_wait_for_vblank, + .win_mode_set = decon_win_mode_set, + .win_commit = decon_win_commit, + .win_disable = decon_win_disable, +}; + + +static irqreturn_t decon_irq_handler(int irq, void *dev_id) +{ + struct decon_context *ctx = (struct decon_context *)dev_id; + u32 val, clear_bit; + + val = readl(ctx->regs + VIDINTCON1); + + clear_bit = ctx->i80_if ? VIDINTCON1_INT_I80 : VIDINTCON1_INT_FRAME; + if (val & clear_bit) + writel(clear_bit, ctx->regs + VIDINTCON1); + + /* check the crtc is detached already from encoder */ + if (ctx->pipe < 0 || !ctx->drm_dev) + goto out; + + if (!ctx->i80_if) { + drm_handle_vblank(ctx->drm_dev, ctx->pipe); + exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); + + /* set wait vsync event to zero and wake up queue. */ + if (atomic_read(&ctx->wait_vsync_event)) { + atomic_set(&ctx->wait_vsync_event, 0); + wake_up(&ctx->wait_vsync_queue); + } + } +out: + return IRQ_HANDLED; +} + +static int decon_bind(struct device *dev, struct device *master, void *data) +{ + struct decon_context *ctx = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + int ret; + + ret = decon_ctx_initialize(ctx, drm_dev); + if (ret) { + DRM_ERROR("decon_ctx_initialize failed.\n"); + return ret; + } + + ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe, + EXYNOS_DISPLAY_TYPE_LCD, + &decon_crtc_ops, ctx); + if (IS_ERR(ctx->crtc)) { + decon_ctx_remove(ctx); + return PTR_ERR(ctx->crtc); + } + + if (ctx->display) + exynos_drm_create_enc_conn(drm_dev, ctx->display); + + return 0; + +} + +static void decon_unbind(struct device *dev, struct device *master, + void *data) +{ + struct decon_context *ctx = dev_get_drvdata(dev); + + decon_dpms(ctx->crtc, DRM_MODE_DPMS_OFF); + + if (ctx->display) + exynos_dpi_remove(ctx->display); + + decon_ctx_remove(ctx); +} + +static const struct component_ops decon_component_ops = { + .bind = decon_bind, + .unbind = decon_unbind, +}; + +static int decon_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct decon_context *ctx; + struct device_node *i80_if_timings; + struct resource *res; + int ret; + + if (!dev->of_node) + return -ENODEV; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ret = exynos_drm_component_add(dev, EXYNOS_DEVICE_TYPE_CRTC, + EXYNOS_DISPLAY_TYPE_LCD); + if (ret) + return ret; + + ctx->dev = dev; + ctx->suspended = true; + + i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings"); + if (i80_if_timings) + ctx->i80_if = true; + of_node_put(i80_if_timings); + + ctx->regs = of_iomap(dev->of_node, 0); + if (IS_ERR(ctx->regs)) { + ret = PTR_ERR(ctx->regs); + goto err_del_component; + } + + ctx->pclk = devm_clk_get(dev, "pclk_decon0"); + if (IS_ERR(ctx->pclk)) { + dev_err(dev, "failed to get bus clock pclk\n"); + ret = PTR_ERR(ctx->pclk); + goto err_iounmap; + } + + ctx->aclk = devm_clk_get(dev, "aclk_decon0"); + if (IS_ERR(ctx->aclk)) { + dev_err(dev, "failed to get bus clock aclk\n"); + ret = PTR_ERR(ctx->aclk); + goto err_iounmap; + } + + ctx->eclk = devm_clk_get(dev, "decon0_eclk"); + if (IS_ERR(ctx->eclk)) { + dev_err(dev, "failed to get eclock\n"); + ret = PTR_ERR(ctx->eclk); + goto err_iounmap; + } + + ctx->vclk = devm_clk_get(dev, "decon0_vclk"); + if (IS_ERR(ctx->vclk)) { + dev_err(dev, "failed to get vclock\n"); + ret = PTR_ERR(ctx->vclk); + goto err_iounmap; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + ctx->i80_if ? "lcd_sys" : "vsync"); + if (!res) { + dev_err(dev, "irq request failed.\n"); + ret = -ENXIO; + goto err_iounmap; + } + + ret = devm_request_irq(dev, res->start, decon_irq_handler, + 0, "drm_decon", ctx); + if (ret) { + dev_err(dev, "irq request failed.\n"); + goto err_iounmap; + } + + init_waitqueue_head(&ctx->wait_vsync_queue); + atomic_set(&ctx->wait_vsync_event, 0); + + platform_set_drvdata(pdev, ctx); + + ctx->display = exynos_dpi_probe(dev); + if (IS_ERR(ctx->display)) { + ret = PTR_ERR(ctx->display); + goto err_iounmap; + } + + pm_runtime_enable(dev); + + ret = component_add(dev, &decon_component_ops); + if (ret) + goto err_disable_pm_runtime; + + return ret; + +err_disable_pm_runtime: + pm_runtime_disable(dev); + +err_iounmap: + iounmap(ctx->regs); + +err_del_component: + exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CRTC); + return ret; +} + +static int decon_remove(struct platform_device *pdev) +{ + struct decon_context *ctx = dev_get_drvdata(&pdev->dev); + + pm_runtime_disable(&pdev->dev); + + iounmap(ctx->regs); + + component_del(&pdev->dev, &decon_component_ops); + exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC); + + return 0; +} + +struct platform_driver decon_driver = { + .probe = decon_probe, + .remove = decon_remove, + .driver = { + .name = "exynos-decon", + .of_match_table = decon_driver_dt_match, + }, +}; diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index 34d46aa75416..bf17a60b40ed 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c @@ -18,6 +18,7 @@ #include <linux/interrupt.h> #include <linux/of.h> #include <linux/of_gpio.h> +#include <linux/of_graph.h> #include <linux/gpio.h> #include <linux/component.h> #include <linux/phy/phy.h> @@ -993,32 +994,20 @@ static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = { .best_encoder = exynos_dp_best_encoder, }; -static bool find_bridge(const char *compat, struct bridge_init *bridge) -{ - bridge->client = NULL; - bridge->node = of_find_compatible_node(NULL, NULL, compat); - if (!bridge->node) - return false; - - bridge->client = of_find_i2c_device_by_node(bridge->node); - if (!bridge->client) - return false; - - return true; -} - /* returns the number of bridges attached */ -static int exynos_drm_attach_lcd_bridge(struct drm_device *dev, +static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp, struct drm_encoder *encoder) { - struct bridge_init bridge; int ret; - if (find_bridge("nxp,ptn3460", &bridge)) { - ret = ptn3460_init(dev, encoder, bridge.client, bridge.node); - if (!ret) - return 1; + encoder->bridge = dp->bridge; + dp->bridge->encoder = encoder; + ret = drm_bridge_attach(encoder->dev, dp->bridge); + if (ret) { + DRM_ERROR("Failed to attach bridge to drm\n"); + return ret; } + return 0; } @@ -1032,9 +1021,11 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display, dp->encoder = encoder; /* Pre-empt DP connector creation if there's a bridge */ - ret = exynos_drm_attach_lcd_bridge(dp->drm_dev, encoder); - if (ret) - return 0; + if (dp->bridge) { + ret = exynos_drm_attach_lcd_bridge(dp, encoder); + if (!ret) + return 0; + } connector->polled = DRM_CONNECTOR_POLL_HPD; @@ -1067,10 +1058,8 @@ static void exynos_dp_phy_exit(struct exynos_dp_device *dp) phy_power_off(dp->phy); } -static void exynos_dp_poweron(struct exynos_drm_display *display) +static void exynos_dp_poweron(struct exynos_dp_device *dp) { - struct exynos_dp_device *dp = display_to_dp(display); - if (dp->dpms_mode == DRM_MODE_DPMS_ON) return; @@ -1085,13 +1074,11 @@ static void exynos_dp_poweron(struct exynos_drm_display *display) exynos_dp_phy_init(dp); exynos_dp_init_dp(dp); enable_irq(dp->irq); - exynos_dp_commit(display); + exynos_dp_commit(&dp->display); } -static void exynos_dp_poweroff(struct exynos_drm_display *display) +static void exynos_dp_poweroff(struct exynos_dp_device *dp) { - struct exynos_dp_device *dp = display_to_dp(display); - if (dp->dpms_mode != DRM_MODE_DPMS_ON) return; @@ -1119,12 +1106,12 @@ static void exynos_dp_dpms(struct exynos_drm_display *display, int mode) switch (mode) { case DRM_MODE_DPMS_ON: - exynos_dp_poweron(display); + exynos_dp_poweron(dp); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - exynos_dp_poweroff(display); + exynos_dp_poweroff(dp); break; default: break; @@ -1241,7 +1228,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) } } - if (!dp->panel) { + if (!dp->panel && !dp->bridge) { ret = exynos_dp_dt_parse_panel(dp); if (ret) return ret; @@ -1325,7 +1312,7 @@ static const struct component_ops exynos_dp_ops = { static int exynos_dp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *panel_node; + struct device_node *panel_node, *bridge_node, *endpoint; struct exynos_dp_device *dp; int ret; @@ -1351,6 +1338,18 @@ static int exynos_dp_probe(struct platform_device *pdev) return -EPROBE_DEFER; } + endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); + if (endpoint) { + bridge_node = of_graph_get_remote_port_parent(endpoint); + if (bridge_node) { + dp->bridge = of_drm_find_bridge(bridge_node); + of_node_put(bridge_node); + if (!dp->bridge) + return -EPROBE_DEFER; + } else + return -EPROBE_DEFER; + } + ret = component_add(&pdev->dev, &exynos_dp_ops); if (ret) exynos_drm_component_del(&pdev->dev, diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h index 164f171168e7..a4e799679669 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.h +++ b/drivers/gpu/drm/exynos/exynos_dp_core.h @@ -153,6 +153,7 @@ struct exynos_dp_device { struct drm_connector connector; struct drm_encoder *encoder; struct drm_panel *panel; + struct drm_bridge *bridge; struct clk *clock; unsigned int irq; void __iomem *reg_base; diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c index 9c8088462c26..24994ba10e28 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c @@ -63,11 +63,11 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, return -ENOMEM; } - buf->kvaddr = (void __iomem *)dma_alloc_attrs(dev->dev, + buf->cookie = dma_alloc_attrs(dev->dev, buf->size, &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs); - if (!buf->kvaddr) { + if (!buf->cookie) { DRM_ERROR("failed to allocate buffer.\n"); ret = -ENOMEM; goto err_free; @@ -132,7 +132,7 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev, buf->sgt = NULL; if (!is_drm_iommu_supported(dev)) { - dma_free_attrs(dev->dev, buf->size, buf->kvaddr, + dma_free_attrs(dev->dev, buf->size, buf->cookie, (dma_addr_t)buf->dma_addr, &buf->dma_attrs); drm_free_large(buf->pages); } else diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index a85c451ba392..48ccab7fdf63 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -66,8 +66,6 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc) if (exynos_crtc->ops->commit) exynos_crtc->ops->commit(exynos_crtc); - - exynos_plane_dpms(crtc->primary, DRM_MODE_DPMS_ON); } static bool @@ -234,70 +232,12 @@ static void exynos_drm_crtc_destroy(struct drm_crtc *crtc) kfree(exynos_crtc); } -static int exynos_drm_crtc_set_property(struct drm_crtc *crtc, - struct drm_property *property, - uint64_t val) -{ - struct drm_device *dev = crtc->dev; - struct exynos_drm_private *dev_priv = dev->dev_private; - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - - if (property == dev_priv->crtc_mode_property) { - enum exynos_crtc_mode mode = val; - - if (mode == exynos_crtc->mode) - return 0; - - exynos_crtc->mode = mode; - - switch (mode) { - case CRTC_MODE_NORMAL: - exynos_drm_crtc_commit(crtc); - break; - case CRTC_MODE_BLANK: - exynos_plane_dpms(crtc->primary, DRM_MODE_DPMS_OFF); - break; - default: - break; - } - - return 0; - } - - return -EINVAL; -} - static struct drm_crtc_funcs exynos_crtc_funcs = { .set_config = drm_crtc_helper_set_config, .page_flip = exynos_drm_crtc_page_flip, .destroy = exynos_drm_crtc_destroy, - .set_property = exynos_drm_crtc_set_property, -}; - -static const struct drm_prop_enum_list mode_names[] = { - { CRTC_MODE_NORMAL, "normal" }, - { CRTC_MODE_BLANK, "blank" }, }; -static void exynos_drm_crtc_attach_mode_property(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct exynos_drm_private *dev_priv = dev->dev_private; - struct drm_property *prop; - - prop = dev_priv->crtc_mode_property; - if (!prop) { - prop = drm_property_create_enum(dev, 0, "mode", mode_names, - ARRAY_SIZE(mode_names)); - if (!prop) - return; - - dev_priv->crtc_mode_property = prop; - } - - drm_object_attach_property(&crtc->base, prop, 0); -} - struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, int pipe, enum exynos_drm_output_type type, @@ -340,8 +280,6 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs); - exynos_drm_crtc_attach_mode_property(crtc); - return exynos_crtc; err_crtc: diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index 60192ed544f0..3833bf8ca025 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c @@ -279,7 +279,3 @@ err_buf_detach: return ERR_PTR(ret); } - -MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); -MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module"); -MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h index 49acfafb4fdb..886de9ff484d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h @@ -12,14 +12,9 @@ #ifndef _EXYNOS_DRM_DMABUF_H_ #define _EXYNOS_DRM_DMABUF_H_ -#ifdef CONFIG_DRM_EXYNOS_DMABUF struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev, struct drm_gem_object *obj, int flags); struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, struct dma_buf *dma_buf); -#else -#define exynos_dmabuf_prime_export NULL -#define exynos_dmabuf_prime_import NULL -#endif #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 1bcbe07cecfc..90168d7cf66a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -556,6 +556,9 @@ static struct platform_driver *const exynos_drm_kms_drivers[] = { #ifdef CONFIG_DRM_EXYNOS_FIMD &fimd_driver, #endif +#ifdef CONFIG_DRM_EXYNOS7_DECON + &decon_driver, +#endif #ifdef CONFIG_DRM_EXYNOS_DP &dp_driver, #endif @@ -612,6 +615,7 @@ static const char * const strings[] = { "samsung,exynos3", "samsung,exynos4", "samsung,exynos5", + "samsung,exynos7", }; static struct platform_driver exynos_drm_platform_driver = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index d490b49f71c9..9afd390d4674 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -197,11 +197,6 @@ struct exynos_drm_crtc_ops { void (*te_handler)(struct exynos_drm_crtc *crtc); }; -enum exynos_crtc_mode { - CRTC_MODE_NORMAL, /* normal mode */ - CRTC_MODE_BLANK, /* The private plane of crtc is blank */ -}; - /* * Exynos specific crtc structure. * @@ -215,7 +210,6 @@ enum exynos_crtc_mode { * we can refer to the crtc to current hardware interrupt occurred through * this pipe value. * @dpms: store the crtc dpms value - * @mode: store the crtc mode value * @ops: pointer to callbacks for exynos drm specific functionality * @ctx: A pointer to the crtc's implementation specific context */ @@ -224,7 +218,6 @@ struct exynos_drm_crtc { enum exynos_drm_output_type type; unsigned int pipe; unsigned int dpms; - enum exynos_crtc_mode mode; wait_queue_head_t pending_flip_queue; atomic_t pending_flip; struct exynos_drm_crtc_ops *ops; @@ -265,7 +258,6 @@ struct exynos_drm_private { */ struct drm_crtc *crtc[MAX_CRTC]; struct drm_property *plane_zpos_property; - struct drm_property *crtc_mode_property; unsigned long da_start; unsigned long da_space_size; @@ -352,6 +344,7 @@ void exynos_drm_component_del(struct device *dev, enum exynos_drm_device_type dev_type); extern struct platform_driver fimd_driver; +extern struct platform_driver decon_driver; extern struct platform_driver dp_driver; extern struct platform_driver dsi_driver; extern struct platform_driver mixer_driver; diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c index 7e282e3d6038..57de0bdc5a3b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c @@ -102,7 +102,7 @@ static void exynos_drm_encoder_disable(struct drm_encoder *encoder) /* all planes connected to this encoder should be also disabled. */ drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { - if (plane->crtc == encoder->crtc) + if (plane->crtc && (plane->crtc == encoder->crtc)) plane->funcs->disable_plane(plane); } } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index e12ea90c6237..84f8dfe1c5ec 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -79,9 +79,9 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, struct drm_framebuffer *fb) { struct fb_info *fbi = helper->fbdev; - struct drm_device *dev = helper->dev; struct exynos_drm_gem_buf *buffer; unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3); + unsigned int nr_pages; unsigned long offset; drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); @@ -94,25 +94,14 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, return -EFAULT; } - /* map pages with kernel virtual space. */ + nr_pages = buffer->size >> PAGE_SHIFT; + + buffer->kvaddr = (void __iomem *) vmap(buffer->pages, + nr_pages, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); if (!buffer->kvaddr) { - if (is_drm_iommu_supported(dev)) { - unsigned int nr_pages = buffer->size >> PAGE_SHIFT; - - buffer->kvaddr = (void __iomem *) vmap(buffer->pages, - nr_pages, VM_MAP, - pgprot_writecombine(PAGE_KERNEL)); - } else { - phys_addr_t dma_addr = buffer->dma_addr; - if (dma_addr) - buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr); - else - buffer->kvaddr = (void __iomem *)NULL; - } - if (!buffer->kvaddr) { - DRM_ERROR("failed to map pages to kernel space.\n"); - return -EIO; - } + DRM_ERROR("failed to map pages to kernel space.\n"); + return -EIO; } /* buffer count to framebuffer always is 1 at booting time. */ @@ -313,7 +302,7 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev, struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj; struct drm_framebuffer *fb; - if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr) + if (exynos_gem_obj->buffer->kvaddr) vunmap(exynos_gem_obj->buffer->kvaddr); /* release drm framebuffer and real buffer */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 835b6af00970..842d6b8dc3c4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -461,7 +461,6 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt) cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE; break; case DRM_FORMAT_NV12: - case DRM_FORMAT_NV12MT: case DRM_FORMAT_NV16: cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR | EXYNOS_MSCTRL_C_INT_IN_2PLANE); @@ -511,7 +510,6 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt) case DRM_FORMAT_YVU420: case DRM_FORMAT_NV12: case DRM_FORMAT_NV21: - case DRM_FORMAT_NV12MT: cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420; break; default: @@ -524,10 +522,7 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt) cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM); cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK; - if (fmt == DRM_FORMAT_NV12MT) - cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32; - else - cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR; + cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR; fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM); @@ -812,7 +807,6 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt) cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE; break; case DRM_FORMAT_NV12: - case DRM_FORMAT_NV12MT: case DRM_FORMAT_NV16: cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR; cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE; @@ -867,7 +861,6 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt) case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: case DRM_FORMAT_NV12: - case DRM_FORMAT_NV12MT: case DRM_FORMAT_NV21: cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420; break; @@ -883,10 +876,7 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt) cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM); cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK; - if (fmt == DRM_FORMAT_NV12MT) - cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32; - else - cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR; + cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR; fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 682806ef4d33..925fc69af1a0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -253,9 +253,8 @@ static void fimd_enable_shadow_channel_path(struct fimd_context *ctx, int win, writel(val, ctx->regs + SHADOWCON); } -static void fimd_clear_channel(struct exynos_drm_crtc *crtc) +static void fimd_clear_channel(struct fimd_context *ctx) { - struct fimd_context *ctx = crtc->ctx; int win, ch_enabled = 0; DRM_DEBUG_KMS("%s\n", __FILE__); @@ -280,7 +279,7 @@ static void fimd_clear_channel(struct exynos_drm_crtc *crtc) unsigned int state = ctx->suspended; ctx->suspended = 0; - fimd_wait_for_vblank(crtc); + fimd_wait_for_vblank(ctx->crtc); ctx->suspended = state; } } @@ -302,7 +301,7 @@ static int fimd_ctx_initialize(struct fimd_context *ctx, * If any channel is already active, iommu will throw * a PAGE FAULT when enabled. So clear any channel if enabled. */ - fimd_clear_channel(ctx->crtc); + fimd_clear_channel(ctx); ret = drm_iommu_attach_device(ctx->drm_dev, ctx->dev); if (ret) { DRM_ERROR("drm_iommu_attach failed.\n"); @@ -823,9 +822,8 @@ static void fimd_win_disable(struct exynos_drm_crtc *crtc, int zpos) win_data->enabled = false; } -static void fimd_window_suspend(struct exynos_drm_crtc *crtc) +static void fimd_window_suspend(struct fimd_context *ctx) { - struct fimd_context *ctx = crtc->ctx; struct fimd_win_data *win_data; int i; @@ -833,13 +831,12 @@ static void fimd_window_suspend(struct exynos_drm_crtc *crtc) win_data = &ctx->win_data[i]; win_data->resume = win_data->enabled; if (win_data->enabled) - fimd_win_disable(crtc, i); + fimd_win_disable(ctx->crtc, i); } } -static void fimd_window_resume(struct exynos_drm_crtc *crtc) +static void fimd_window_resume(struct fimd_context *ctx) { - struct fimd_context *ctx = crtc->ctx; struct fimd_win_data *win_data; int i; @@ -850,26 +847,24 @@ static void fimd_window_resume(struct exynos_drm_crtc *crtc) } } -static void fimd_apply(struct exynos_drm_crtc *crtc) +static void fimd_apply(struct fimd_context *ctx) { - struct fimd_context *ctx = crtc->ctx; struct fimd_win_data *win_data; int i; for (i = 0; i < WINDOWS_NR; i++) { win_data = &ctx->win_data[i]; if (win_data->enabled) - fimd_win_commit(crtc, i); + fimd_win_commit(ctx->crtc, i); else - fimd_win_disable(crtc, i); + fimd_win_disable(ctx->crtc, i); } - fimd_commit(crtc); + fimd_commit(ctx->crtc); } -static int fimd_poweron(struct exynos_drm_crtc *crtc) +static int fimd_poweron(struct fimd_context *ctx) { - struct fimd_context *ctx = crtc->ctx; int ret; if (!ctx->suspended) @@ -893,16 +888,16 @@ static int fimd_poweron(struct exynos_drm_crtc *crtc) /* if vblank was enabled status, enable it again. */ if (test_and_clear_bit(0, &ctx->irq_flags)) { - ret = fimd_enable_vblank(crtc); + ret = fimd_enable_vblank(ctx->crtc); if (ret) { DRM_ERROR("Failed to re-enable vblank [%d]\n", ret); goto enable_vblank_err; } } - fimd_window_resume(crtc); + fimd_window_resume(ctx); - fimd_apply(crtc); + fimd_apply(ctx); return 0; @@ -915,10 +910,8 @@ bus_clk_err: return ret; } -static int fimd_poweroff(struct exynos_drm_crtc *crtc) +static int fimd_poweroff(struct fimd_context *ctx) { - struct fimd_context *ctx = crtc->ctx; - if (ctx->suspended) return 0; @@ -927,7 +920,7 @@ static int fimd_poweroff(struct exynos_drm_crtc *crtc) * suspend that connector. Otherwise we might try to scan from * a destroyed buffer later. */ - fimd_window_suspend(crtc); + fimd_window_suspend(ctx); clk_disable_unprepare(ctx->lcd_clk); clk_disable_unprepare(ctx->bus_clk); @@ -944,12 +937,12 @@ static void fimd_dpms(struct exynos_drm_crtc *crtc, int mode) switch (mode) { case DRM_MODE_DPMS_ON: - fimd_poweron(crtc); + fimd_poweron(crtc->ctx); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - fimd_poweroff(crtc); + fimd_poweroff(crtc->ctx); break; default: DRM_DEBUG_KMS("unspecified mode %d\n", mode); @@ -1065,18 +1058,19 @@ static int fimd_bind(struct device *dev, struct device *master, void *data) struct drm_device *drm_dev = data; int ret; - ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe, - EXYNOS_DISPLAY_TYPE_LCD, - &fimd_crtc_ops, ctx); - if (IS_ERR(ctx->crtc)) - return PTR_ERR(ctx->crtc); - ret = fimd_ctx_initialize(ctx, drm_dev); if (ret) { DRM_ERROR("fimd_ctx_initialize failed.\n"); return ret; } + ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe, + EXYNOS_DISPLAY_TYPE_LCD, + &fimd_crtc_ops, ctx); + if (IS_ERR(ctx->crtc)) { + fimd_ctx_remove(ctx); + return PTR_ERR(ctx->crtc); + } if (ctx->display) exynos_drm_create_enc_conn(drm_dev, ctx->display); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index ec58fe9c40df..308173cb4f0a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -22,6 +22,7 @@ /* * exynos drm gem buffer structure. * + * @cookie: cookie returned by dma_alloc_attrs * @kvaddr: kernel virtual address to allocated memory region. * *userptr: user space address. * @dma_addr: bus address(accessed by dma) to allocated memory region. @@ -35,6 +36,7 @@ * VM_PFNMAP or not. */ struct exynos_drm_gem_buf { + void *cookie; void __iomem *kvaddr; unsigned long userptr; dma_addr_t dma_addr; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 0261468c8019..8040ed2a831f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -542,9 +542,6 @@ static int gsc_src_set_fmt(struct device *dev, u32 fmt) cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P); break; - case DRM_FORMAT_NV12MT: - cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE); - break; default: dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt); return -EINVAL; @@ -809,9 +806,6 @@ static int gsc_dst_set_fmt(struct device *dev, u32 fmt) cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P); break; - case DRM_FORMAT_NV12MT: - cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE); - break; default: dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt); return -EINVAL; diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 358cff67e5ce..a5616872eee7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -23,7 +23,6 @@ static const uint32_t formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_NV12, - DRM_FORMAT_NV12MT, }; /* @@ -145,32 +144,6 @@ void exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, exynos_crtc->ops->win_mode_set(exynos_crtc, exynos_plane); } -void exynos_plane_dpms(struct drm_plane *plane, int mode) -{ - struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc); - - if (mode == DRM_MODE_DPMS_ON) { - if (exynos_plane->enabled) - return; - - if (exynos_crtc->ops->win_enable) - exynos_crtc->ops->win_enable(exynos_crtc, - exynos_plane->zpos); - - exynos_plane->enabled = true; - } else { - if (!exynos_plane->enabled) - return; - - if (exynos_crtc->ops->win_disable) - exynos_crtc->ops->win_disable(exynos_crtc, - exynos_plane->zpos); - - exynos_plane->enabled = false; - } -} - int exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, @@ -199,7 +172,12 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, static int exynos_disable_plane(struct drm_plane *plane) { - exynos_plane_dpms(plane, DRM_MODE_DPMS_OFF); + struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); + struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc); + + if (exynos_crtc->ops->win_disable) + exynos_crtc->ops->win_disable(exynos_crtc, + exynos_plane->zpos); return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h index 59d40755095b..9d3c374e7b3e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.h +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h @@ -20,7 +20,6 @@ int exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h); -void exynos_plane_dpms(struct drm_plane *plane, int mode); struct drm_plane *exynos_plane_init(struct drm_device *dev, unsigned long possible_crtcs, enum drm_plane_type type); diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 9c8300edd348..b886972b5888 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -97,17 +97,16 @@ static const char fake_edid_info[] = { 0x00, 0x00, 0x00, 0x06 }; -static void vidi_apply(struct exynos_drm_crtc *crtc) +static void vidi_apply(struct vidi_context *ctx) { - struct vidi_context *ctx = crtc->ctx; - struct exynos_drm_crtc_ops *crtc_ops = crtc->ops; + struct exynos_drm_crtc_ops *crtc_ops = ctx->crtc->ops; struct vidi_win_data *win_data; int i; for (i = 0; i < WINDOWS_NR; i++) { win_data = &ctx->win_data[i]; if (win_data->enabled && (crtc_ops && crtc_ops->win_commit)) - crtc_ops->win_commit(crtc, i); + crtc_ops->win_commit(ctx->crtc, i); } } @@ -240,10 +239,8 @@ static void vidi_win_disable(struct exynos_drm_crtc *crtc, int zpos) /* TODO. */ } -static int vidi_power_on(struct exynos_drm_crtc *crtc, bool enable) +static int vidi_power_on(struct vidi_context *ctx, bool enable) { - struct vidi_context *ctx = crtc->ctx; - DRM_DEBUG_KMS("%s\n", __FILE__); if (enable != false && enable != true) @@ -254,9 +251,9 @@ static int vidi_power_on(struct exynos_drm_crtc *crtc, bool enable) /* if vblank was enabled status, enable it again. */ if (test_and_clear_bit(0, &ctx->irq_flags)) - vidi_enable_vblank(crtc); + vidi_enable_vblank(ctx->crtc); - vidi_apply(crtc); + vidi_apply(ctx); } else { ctx->suspended = true; } @@ -274,12 +271,12 @@ static void vidi_dpms(struct exynos_drm_crtc *crtc, int mode) switch (mode) { case DRM_MODE_DPMS_ON: - vidi_power_on(crtc, true); + vidi_power_on(ctx, true); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - vidi_power_on(crtc, false); + vidi_power_on(ctx, false); break; default: DRM_DEBUG_KMS("unspecified mode %d\n", mode); @@ -548,6 +545,8 @@ static int vidi_bind(struct device *dev, struct device *master, void *data) struct drm_device *drm_dev = data; int ret; + vidi_ctx_initialize(ctx, drm_dev); + ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe, EXYNOS_DISPLAY_TYPE_VIDI, &vidi_crtc_ops, ctx); @@ -556,8 +555,6 @@ static int vidi_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(ctx->crtc); } - vidi_ctx_initialize(ctx, drm_dev); - ret = exynos_drm_create_enc_conn(drm_dev, &ctx->display); if (ret) { ctx->crtc->base.funcs->destroy(&ctx->crtc->base); diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 98051e8e855a..229b3613c60b 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -2032,9 +2032,8 @@ static void hdmi_commit(struct exynos_drm_display *display) hdmi_conf_apply(hdata); } -static void hdmi_poweron(struct exynos_drm_display *display) +static void hdmi_poweron(struct hdmi_context *hdata) { - struct hdmi_context *hdata = display_to_hdmi(display); struct hdmi_resources *res = &hdata->res; mutex_lock(&hdata->hdmi_mutex); @@ -2060,12 +2059,11 @@ static void hdmi_poweron(struct exynos_drm_display *display) clk_prepare_enable(res->sclk_hdmi); hdmiphy_poweron(hdata); - hdmi_commit(display); + hdmi_commit(&hdata->display); } -static void hdmi_poweroff(struct exynos_drm_display *display) +static void hdmi_poweroff(struct hdmi_context *hdata) { - struct hdmi_context *hdata = display_to_hdmi(display); struct hdmi_resources *res = &hdata->res; mutex_lock(&hdata->hdmi_mutex); @@ -2109,7 +2107,7 @@ static void hdmi_dpms(struct exynos_drm_display *display, int mode) switch (mode) { case DRM_MODE_DPMS_ON: - hdmi_poweron(display); + hdmi_poweron(hdata); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: @@ -2128,7 +2126,7 @@ static void hdmi_dpms(struct exynos_drm_display *display, int mode) if (funcs && funcs->dpms) (*funcs->dpms)(crtc, mode); - hdmi_poweroff(display); + hdmi_poweroff(hdata); break; default: DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode); diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index ed44cd4f01f7..3518bc4654c5 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -72,6 +72,7 @@ struct mixer_resources { spinlock_t reg_slock; struct clk *mixer; struct clk *vp; + struct clk *hdmi; struct clk *sclk_mixer; struct clk *sclk_hdmi; struct clk *mout_mixer; @@ -412,8 +413,6 @@ static void vp_video_buffer(struct mixer_context *ctx, int win) win_data = &ctx->win_data[win]; switch (win_data->pixel_format) { - case DRM_FORMAT_NV12MT: - tiled_mode = true; case DRM_FORMAT_NV12: crcb_mode = false; buf_num = 2; @@ -582,8 +581,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win) /* setup display size */ if (ctx->mxr_ver == MXR_VER_128_0_0_184 && win == MIXER_DEFAULT_WIN) { - val = MXR_MXR_RES_HEIGHT(win_data->fb_height); - val |= MXR_MXR_RES_WIDTH(win_data->fb_width); + val = MXR_MXR_RES_HEIGHT(win_data->mode_height); + val |= MXR_MXR_RES_WIDTH(win_data->mode_width); mixer_reg_write(res, MXR_RESOLUTION, val); } @@ -769,6 +768,12 @@ static int mixer_resources_init(struct mixer_context *mixer_ctx) return -ENODEV; } + mixer_res->hdmi = devm_clk_get(dev, "hdmi"); + if (IS_ERR(mixer_res->hdmi)) { + dev_err(dev, "failed to get clock 'hdmi'\n"); + return PTR_ERR(mixer_res->hdmi); + } + mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi"); if (IS_ERR(mixer_res->sclk_hdmi)) { dev_err(dev, "failed to get clock 'sclk_hdmi'\n"); @@ -1047,23 +1052,21 @@ static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc) drm_vblank_put(mixer_ctx->drm_dev, mixer_ctx->pipe); } -static void mixer_window_suspend(struct exynos_drm_crtc *crtc) +static void mixer_window_suspend(struct mixer_context *ctx) { - struct mixer_context *ctx = crtc->ctx; struct hdmi_win_data *win_data; int i; for (i = 0; i < MIXER_WIN_NR; i++) { win_data = &ctx->win_data[i]; win_data->resume = win_data->enabled; - mixer_win_disable(crtc, i); + mixer_win_disable(ctx->crtc, i); } - mixer_wait_for_vblank(crtc); + mixer_wait_for_vblank(ctx->crtc); } -static void mixer_window_resume(struct exynos_drm_crtc *crtc) +static void mixer_window_resume(struct mixer_context *ctx) { - struct mixer_context *ctx = crtc->ctx; struct hdmi_win_data *win_data; int i; @@ -1072,13 +1075,12 @@ static void mixer_window_resume(struct exynos_drm_crtc *crtc) win_data->enabled = win_data->resume; win_data->resume = false; if (win_data->enabled) - mixer_win_commit(crtc, i); + mixer_win_commit(ctx->crtc, i); } } -static void mixer_poweron(struct exynos_drm_crtc *crtc) +static void mixer_poweron(struct mixer_context *ctx) { - struct mixer_context *ctx = crtc->ctx; struct mixer_resources *res = &ctx->mixer_res; mutex_lock(&ctx->mixer_mutex); @@ -1092,6 +1094,7 @@ static void mixer_poweron(struct exynos_drm_crtc *crtc) pm_runtime_get_sync(ctx->dev); clk_prepare_enable(res->mixer); + clk_prepare_enable(res->hdmi); if (ctx->vp_enabled) { clk_prepare_enable(res->vp); if (ctx->has_sclk) @@ -1107,12 +1110,11 @@ static void mixer_poweron(struct exynos_drm_crtc *crtc) mixer_reg_write(res, MXR_INT_EN, ctx->int_en); mixer_win_reset(ctx); - mixer_window_resume(crtc); + mixer_window_resume(ctx); } -static void mixer_poweroff(struct exynos_drm_crtc *crtc) +static void mixer_poweroff(struct mixer_context *ctx) { - struct mixer_context *ctx = crtc->ctx; struct mixer_resources *res = &ctx->mixer_res; mutex_lock(&ctx->mixer_mutex); @@ -1123,7 +1125,7 @@ static void mixer_poweroff(struct exynos_drm_crtc *crtc) mutex_unlock(&ctx->mixer_mutex); mixer_stop(ctx); - mixer_window_suspend(crtc); + mixer_window_suspend(ctx); ctx->int_en = mixer_reg_read(res, MXR_INT_EN); @@ -1131,6 +1133,7 @@ static void mixer_poweroff(struct exynos_drm_crtc *crtc) ctx->powered = false; mutex_unlock(&ctx->mixer_mutex); + clk_disable_unprepare(res->hdmi); clk_disable_unprepare(res->mixer); if (ctx->vp_enabled) { clk_disable_unprepare(res->vp); @@ -1145,12 +1148,12 @@ static void mixer_dpms(struct exynos_drm_crtc *crtc, int mode) { switch (mode) { case DRM_MODE_DPMS_ON: - mixer_poweron(crtc); + mixer_poweron(crtc->ctx); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - mixer_poweroff(crtc); + mixer_poweroff(crtc->ctx); break; default: DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode); @@ -1249,18 +1252,19 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data) struct drm_device *drm_dev = data; int ret; + ret = mixer_initialize(ctx, drm_dev); + if (ret) + return ret; + ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe, EXYNOS_DISPLAY_TYPE_HDMI, &mixer_crtc_ops, ctx); if (IS_ERR(ctx->crtc)) { + mixer_ctx_remove(ctx); ret = PTR_ERR(ctx->crtc); goto free_ctx; } - ret = mixer_initialize(ctx, drm_dev); - if (ret) - goto free_ctx; - return 0; free_ctx: diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index d4762799351d..5febffdb027d 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -25,6 +25,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_encoder_slave.h> #include <drm/drm_edid.h> +#include <drm/drm_of.h> #include <drm/i2c/tda998x.h> #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) @@ -32,6 +33,8 @@ struct tda998x_priv { struct i2c_client *cec; struct i2c_client *hdmi; + struct mutex mutex; + struct delayed_work dwork; uint16_t rev; uint8_t current_page; int dpms; @@ -385,7 +388,7 @@ set_page(struct tda998x_priv *priv, uint16_t reg) }; int ret = i2c_master_send(client, buf, sizeof(buf)); if (ret < 0) { - dev_err(&client->dev, "setpage %04x err %d\n", + dev_err(&client->dev, "%s %04x err %d\n", __func__, reg, ret); return ret; } @@ -402,9 +405,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt) uint8_t addr = REG2ADDR(reg); int ret; + mutex_lock(&priv->mutex); ret = set_page(priv, reg); if (ret < 0) - return ret; + goto out; ret = i2c_master_send(client, &addr, sizeof(addr)); if (ret < 0) @@ -414,10 +418,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt) if (ret < 0) goto fail; - return ret; + goto out; fail: dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg); +out: + mutex_unlock(&priv->mutex); return ret; } @@ -431,13 +437,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt) buf[0] = REG2ADDR(reg); memcpy(&buf[1], p, cnt); + mutex_lock(&priv->mutex); ret = set_page(priv, reg); if (ret < 0) - return; + goto out; ret = i2c_master_send(client, buf, cnt + 1); if (ret < 0) dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); +out: + mutex_unlock(&priv->mutex); } static int @@ -459,13 +468,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val) uint8_t buf[] = {REG2ADDR(reg), val}; int ret; + mutex_lock(&priv->mutex); ret = set_page(priv, reg); if (ret < 0) - return; + goto out; ret = i2c_master_send(client, buf, sizeof(buf)); if (ret < 0) dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); +out: + mutex_unlock(&priv->mutex); } static void @@ -475,13 +487,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val) uint8_t buf[] = {REG2ADDR(reg), val >> 8, val}; int ret; + mutex_lock(&priv->mutex); ret = set_page(priv, reg); if (ret < 0) - return; + goto out; ret = i2c_master_send(client, buf, sizeof(buf)); if (ret < 0) dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); +out: + mutex_unlock(&priv->mutex); } static void @@ -536,6 +551,17 @@ tda998x_reset(struct tda998x_priv *priv) reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24); } +/* handle HDMI connect/disconnect */ +static void tda998x_hpd(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct tda998x_priv *priv = + container_of(dwork, struct tda998x_priv, dwork); + + if (priv->encoder && priv->encoder->dev) + drm_kms_helper_hotplug_event(priv->encoder->dev); +} + /* * only 2 interrupts may occur: screen plug/unplug and EDID read */ @@ -559,8 +585,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data) priv->wq_edid_wait = 0; wake_up(&priv->wq_edid); } else if (cec != 0) { /* HPD change */ - if (priv->encoder && priv->encoder->dev) - drm_helper_hpd_irq_event(priv->encoder->dev); + schedule_delayed_work(&priv->dwork, HZ/10); } return IRQ_HANDLED; } @@ -1011,8 +1036,9 @@ tda998x_encoder_detect(struct tda998x_priv *priv) connector_status_disconnected; } -static int read_edid_block(struct tda998x_priv *priv, uint8_t *buf, int blk) +static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length) { + struct tda998x_priv *priv = data; uint8_t offset, segptr; int ret, i; @@ -1056,8 +1082,8 @@ static int read_edid_block(struct tda998x_priv *priv, uint8_t *buf, int blk) return -ETIMEDOUT; } - ret = reg_read_range(priv, REG_EDID_DATA_0, buf, EDID_LENGTH); - if (ret != EDID_LENGTH) { + ret = reg_read_range(priv, REG_EDID_DATA_0, buf, length); + if (ret != length) { dev_err(&priv->hdmi->dev, "failed to read edid block %d: %d\n", blk, ret); return ret; @@ -1066,82 +1092,31 @@ static int read_edid_block(struct tda998x_priv *priv, uint8_t *buf, int blk) return 0; } -static uint8_t *do_get_edid(struct tda998x_priv *priv) +static int +tda998x_encoder_get_modes(struct tda998x_priv *priv, + struct drm_connector *connector) { - int j, valid_extensions = 0; - uint8_t *block, *new; - bool print_bad_edid = drm_debug & DRM_UT_KMS; - - if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) - return NULL; + struct edid *edid; + int n; if (priv->rev == TDA19988) reg_clear(priv, REG_TX4, TX4_PD_RAM); - /* base block fetch */ - if (read_edid_block(priv, block, 0)) - goto fail; - - if (!drm_edid_block_valid(block, 0, print_bad_edid)) - goto fail; - - /* if there's no extensions, we're done */ - if (block[0x7e] == 0) - goto done; - - new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL); - if (!new) - goto fail; - block = new; - - for (j = 1; j <= block[0x7e]; j++) { - uint8_t *ext_block = block + (valid_extensions + 1) * EDID_LENGTH; - if (read_edid_block(priv, ext_block, j)) - goto fail; - - if (!drm_edid_block_valid(ext_block, j, print_bad_edid)) - goto fail; + edid = drm_do_get_edid(connector, read_edid_block, priv); - valid_extensions++; - } - - if (valid_extensions != block[0x7e]) { - block[EDID_LENGTH-1] += block[0x7e] - valid_extensions; - block[0x7e] = valid_extensions; - new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); - if (!new) - goto fail; - block = new; - } - -done: - if (priv->rev == TDA19988) - reg_set(priv, REG_TX4, TX4_PD_RAM); - - return block; - -fail: if (priv->rev == TDA19988) reg_set(priv, REG_TX4, TX4_PD_RAM); - dev_warn(&priv->hdmi->dev, "failed to read EDID\n"); - kfree(block); - return NULL; -} -static int -tda998x_encoder_get_modes(struct tda998x_priv *priv, - struct drm_connector *connector) -{ - struct edid *edid = (struct edid *)do_get_edid(priv); - int n = 0; - - if (edid) { - drm_mode_connector_update_edid_property(connector, edid); - n = drm_add_edid_modes(connector, edid); - priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid); - kfree(edid); + if (!edid) { + dev_warn(&priv->hdmi->dev, "failed to read EDID\n"); + return 0; } + drm_mode_connector_update_edid_property(connector, edid); + n = drm_add_edid_modes(connector, edid); + priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid); + kfree(edid); + return n; } @@ -1170,8 +1145,10 @@ static void tda998x_destroy(struct tda998x_priv *priv) /* disable all IRQs and free the IRQ handler */ cec_write(priv, REG_CEC_RXSHPDINTENA, 0); reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); - if (priv->hdmi->irq) + if (priv->hdmi->irq) { free_irq(priv->hdmi->irq, priv); + cancel_delayed_work_sync(&priv->dwork); + } i2c_unregister_device(priv->cec); } @@ -1255,6 +1232,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) struct device_node *np = client->dev.of_node; u32 video; int rev_lo, rev_hi, ret; + unsigned short cec_addr; priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3); priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); @@ -1262,12 +1240,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) priv->current_page = 0xff; priv->hdmi = client; - priv->cec = i2c_new_dummy(client->adapter, 0x34); + /* CEC I2C address bound to TDA998x I2C addr by configuration pins */ + cec_addr = 0x34 + (client->addr & 0x03); + priv->cec = i2c_new_dummy(client->adapter, cec_addr); if (!priv->cec) return -ENODEV; priv->dpms = DRM_MODE_DPMS_OFF; + mutex_init(&priv->mutex); /* protect the page access */ + /* wake up the device: */ cec_write(priv, REG_CEC_ENAMODS, CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); @@ -1323,8 +1305,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) if (client->irq) { int irqf_trigger; - /* init read EDID waitqueue */ + /* init read EDID waitqueue and HDP work */ init_waitqueue_head(&priv->wq_edid); + INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd); /* clear pending interrupts */ reg_read(priv, REG_INT_FLAGS_0); @@ -1515,6 +1498,7 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data) struct i2c_client *client = to_i2c_client(dev); struct drm_device *drm = data; struct tda998x_priv2 *priv; + uint32_t crtcs = 0; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -1523,9 +1507,18 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data) dev_set_drvdata(dev, priv); + if (dev->of_node) + crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); + + /* If no CRTCs were found, fall back to our old behaviour */ + if (crtcs == 0) { + dev_warn(dev, "Falling back to first CRTC\n"); + crtcs = 1 << 0; + } + priv->base.encoder = &priv->encoder; priv->connector.interlace_allowed = 1; - priv->encoder.possible_crtcs = 1 << 0; + priv->encoder.possible_crtcs = crtcs; ret = tda998x_create(client, &priv->base); if (ret) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 661527cb00ea..164fa8286fb9 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1223,8 +1223,11 @@ out: static int i915_hangcheck_info(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; - struct drm_i915_private *dev_priv = to_i915(node->minor->dev); + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; + u64 acthd[I915_NUM_RINGS]; + u32 seqno[I915_NUM_RINGS]; int i; if (!i915.enable_hangcheck) { @@ -1232,6 +1235,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) return 0; } + intel_runtime_pm_get(dev_priv); + + for_each_ring(ring, dev_priv, i) { + seqno[i] = ring->get_seqno(ring, false); + acthd[i] = intel_ring_get_active_head(ring); + } + + intel_runtime_pm_put(dev_priv); + if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) { seq_printf(m, "Hangcheck active, fires in %dms\n", jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - @@ -1242,14 +1254,14 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) for_each_ring(ring, dev_priv, i) { seq_printf(m, "%s:\n", ring->name); seq_printf(m, "\tseqno = %x [current %x]\n", - ring->hangcheck.seqno, ring->get_seqno(ring, false)); - seq_printf(m, "\taction = %d\n", ring->hangcheck.action); - seq_printf(m, "\tscore = %d\n", ring->hangcheck.score); + ring->hangcheck.seqno, seqno[i]); seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", (long long)ring->hangcheck.acthd, - (long long)intel_ring_get_active_head(ring)); + (long long)acthd[i]); seq_printf(m, "\tmax ACTHD = 0x%08llx\n", (long long)ring->hangcheck.max_acthd); + seq_printf(m, "\tscore = %d\n", ring->hangcheck.score); + seq_printf(m, "\taction = %d\n", ring->hangcheck.action); } return 0; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ba6862f5b6b2..4badb23f2c58 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -477,19 +477,13 @@ void intel_detect_pch(struct drm_device *dev) } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_LPT; DRM_DEBUG_KMS("Found LynxPoint PCH\n"); - WARN_ON(!IS_HASWELL(dev)); - WARN_ON(IS_HSW_ULT(dev)); - } else if (IS_BROADWELL(dev)) { - dev_priv->pch_type = PCH_LPT; - dev_priv->pch_id = - INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; - DRM_DEBUG_KMS("This is Broadwell, assuming " - "LynxPoint LP PCH\n"); + WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); + WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)); } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_LPT; DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); - WARN_ON(!IS_HASWELL(dev)); - WARN_ON(!IS_HSW_ULT(dev)); + WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); + WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev)); } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_SPT; DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3f210aa7652e..39f7e5676c9b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2381,8 +2381,7 @@ struct drm_i915_cmd_table { #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ - ((INTEL_DEVID(dev) & 0xf) == 0x2 || \ - (INTEL_DEVID(dev) & 0xf) == 0x6 || \ + ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ (INTEL_DEVID(dev) & 0xf) == 0xe)) #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ (INTEL_DEVID(dev) & 0x00F0) == 0x0020) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f28f0dea6c96..61134ab0be81 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3174,6 +3174,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, u32 size = i915_gem_obj_ggtt_size(obj); uint64_t val; + /* Adjust fence size to match tiled area */ + if (obj->tiling_mode != I915_TILING_NONE) { + uint32_t row_size = obj->stride * + (obj->tiling_mode == I915_TILING_Y ? 32 : 8); + size = (size / row_size) * row_size; + } + val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & 0xfffff000) << 32; val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; @@ -4829,25 +4836,18 @@ i915_gem_init_hw(struct drm_device *dev) for (i = 0; i < NUM_L3_SLICES(dev); i++) i915_gem_l3_remap(&dev_priv->ring[RCS], i); - /* - * XXX: Contexts should only be initialized once. Doing a switch to the - * default context switch however is something we'd like to do after - * reset or thaw (the latter may not actually be necessary for HW, but - * goes with our code better). Context switching requires rings (for - * the do_switch), but before enabling PPGTT. So don't move this. - */ - ret = i915_gem_context_enable(dev_priv); + ret = i915_ppgtt_init_hw(dev); if (ret && ret != -EIO) { - DRM_ERROR("Context enable failed %d\n", ret); + DRM_ERROR("PPGTT enable failed %d\n", ret); i915_gem_cleanup_ringbuffer(dev); - - return ret; } - ret = i915_ppgtt_init_hw(dev); + ret = i915_gem_context_enable(dev_priv); if (ret && ret != -EIO) { - DRM_ERROR("PPGTT enable failed %d\n", ret); + DRM_ERROR("Context enable failed %d\n", ret); i915_gem_cleanup_ringbuffer(dev); + + return ret; } return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index d182058383a9..1719078c763a 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -113,7 +113,10 @@ restart: continue; obj = mo->obj; - drm_gem_object_reference(&obj->base); + + if (!kref_get_unless_zero(&obj->base.refcount)) + continue; + spin_unlock(&mn->lock); cancel_userptr(obj); @@ -149,7 +152,20 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, it = interval_tree_iter_first(&mn->objects, start, end); if (it != NULL) { obj = container_of(it, struct i915_mmu_object, it)->obj; - drm_gem_object_reference(&obj->base); + + /* The mmu_object is released late when destroying the + * GEM object so it is entirely possible to gain a + * reference on an object in the process of being freed + * since our serialisation is via the spinlock and not + * the struct_mutex - and consequently use it after it + * is freed and then double free it. + */ + if (!kref_get_unless_zero(&obj->base.refcount)) { + spin_unlock(&mn->lock); + serial = 0; + continue; + } + serial = mn->serial; } spin_unlock(&mn->lock); diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 19a9dd5408f3..011b8960fd75 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -134,9 +134,9 @@ int intel_atomic_commit(struct drm_device *dev, * FIXME: The proper sequence here will eventually be: * * drm_atomic_helper_swap_state(dev, state) - * drm_atomic_helper_commit_pre_planes(dev, state); + * drm_atomic_helper_commit_modeset_disables(dev, state); * drm_atomic_helper_commit_planes(dev, state); - * drm_atomic_helper_commit_post_planes(dev, state); + * drm_atomic_helper_commit_modeset_enables(dev, state); * drm_atomic_helper_wait_for_vblanks(dev, state); * drm_atomic_helper_cleanup_planes(dev, state); * drm_atomic_state_free(state); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index dfd94c52a4cc..868a07ba5e59 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -3521,8 +3521,6 @@ intel_dp_link_down(struct intel_dp *intel_dp) enum port port = intel_dig_port->port; struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(intel_dig_port->base.base.crtc); uint32_t DP = intel_dp->DP; if (WARN_ON(HAS_DDI(dev))) @@ -3547,8 +3545,6 @@ intel_dp_link_down(struct intel_dp *intel_dp) if (HAS_PCH_IBX(dev) && I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { - struct drm_crtc *crtc = intel_dig_port->base.base.crtc; - /* Hardware workaround: leaving our transcoder select * set to transcoder B while it's off will prevent the * corresponding HDMI output on transcoder A. @@ -3559,18 +3555,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) */ DP &= ~DP_PIPEB_SELECT; I915_WRITE(intel_dp->output_reg, DP); - - /* Changes to enable or select take place the vblank - * after being written. - */ - if (WARN_ON(crtc == NULL)) { - /* We should never try to disable a port without a crtc - * attached. For paranoia keep the code around for a - * bit. */ - POSTING_READ(intel_dp->output_reg); - msleep(50); - } else - intel_wait_for_vblank(dev, intel_crtc->pipe); + POSTING_READ(intel_dp->output_reg); } DP &= ~DP_AUDIO_OUTPUT_ENABLE; @@ -4446,7 +4431,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) */ DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n", port_name(intel_dig_port->port)); - return false; + return IRQ_HANDLED; } DRM_DEBUG_KMS("got hpd irq on port %c - %s\n", diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 6ce9c4592fe4..c8c8b24e300c 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -360,12 +360,11 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder) I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_ENTER); usleep_range(2500, 3000); - val = I915_READ(MIPI_PORT_CTRL(port)); - /* Enable MIPI PHY transparent latch * Common bit for both MIPI Port A & MIPI Port C * No similar bit in MIPI Port C reg */ + val = I915_READ(MIPI_PORT_CTRL(PORT_A)); I915_WRITE(MIPI_PORT_CTRL(PORT_A), val | LP_OUTPUT_HOLD); usleep_range(1000, 1500); @@ -543,10 +542,10 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) == 0x00000), 30)) DRM_ERROR("DSI LP not going Low\n"); - val = I915_READ(MIPI_PORT_CTRL(port)); /* Disable MIPI PHY transparent latch * Common bit for both MIPI Port A & MIPI Port C */ + val = I915_READ(MIPI_PORT_CTRL(PORT_A)); I915_WRITE(MIPI_PORT_CTRL(PORT_A), val & ~LP_OUTPUT_HOLD); usleep_range(1000, 1500); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index b355da4cae88..aafcef3b6b23 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1232,15 +1232,17 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf, cmd = MI_FLUSH_DW + 1; - if (ring == &dev_priv->ring[VCS]) { - if (invalidate_domains & I915_GEM_GPU_DOMAINS) - cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | - MI_FLUSH_DW_STORE_INDEX | - MI_FLUSH_DW_OP_STOREDW; - } else { - if (invalidate_domains & I915_GEM_DOMAIN_RENDER) - cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | - MI_FLUSH_DW_OP_STOREDW; + /* We always require a command barrier so that subsequent + * commands, such as breadcrumb interrupts, are strictly ordered + * wrt the contents of the write cache being flushed to memory + * (and thus being coherent from the CPU). + */ + cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; + + if (invalidate_domains & I915_GEM_GPU_DOMAINS) { + cmd |= MI_INVALIDATE_TLB; + if (ring == &dev_priv->ring[VCS]) + cmd |= MI_INVALIDATE_BSD; } intel_logical_ring_emit(ringbuf, cmd); diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index d7be68a7bbda..d8686ce89160 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -962,7 +962,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector) WARN_ON(panel->backlight.max == 0); - if (panel->backlight.level == 0) { + if (panel->backlight.level <= panel->backlight.min) { panel->backlight.level = panel->backlight.max; if (panel->backlight.device) panel->backlight.device->props.brightness = diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index f68d12cb0246..f7c993843be8 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4028,7 +4028,10 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) &ddcc_status); if (0 == ret) dev_priv->rps.efficient_freq = - (ddcc_status >> 8) & 0xff; + clamp_t(u8, + ((ddcc_status >> 8) & 0xff), + dev_priv->rps.min_freq, + dev_priv->rps.max_freq); } /* Preserve min/max settings in case of re-init */ diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e758c0592675..d17e76d32e03 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -2319,6 +2319,14 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring, cmd = MI_FLUSH_DW; if (INTEL_INFO(ring->dev)->gen >= 8) cmd += 1; + + /* We always require a command barrier so that subsequent + * commands, such as breadcrumb interrupts, are strictly ordered + * wrt the contents of the write cache being flushed to memory + * (and thus being coherent from the CPU). + */ + cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; + /* * Bspec vol 1c.5 - video engine command streamer: * "If ENABLED, all TLBs will be invalidated once the flush @@ -2326,8 +2334,8 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring, * Post-Sync Operation field is a value of 1h or 3h." */ if (invalidate & I915_GEM_GPU_DOMAINS) - cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | - MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; + cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; + intel_ring_emit(ring, cmd); intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); if (INTEL_INFO(ring->dev)->gen >= 8) { @@ -2423,6 +2431,14 @@ static int gen6_ring_flush(struct intel_engine_cs *ring, cmd = MI_FLUSH_DW; if (INTEL_INFO(ring->dev)->gen >= 8) cmd += 1; + + /* We always require a command barrier so that subsequent + * commands, such as breadcrumb interrupts, are strictly ordered + * wrt the contents of the write cache being flushed to memory + * (and thus being coherent from the CPU). + */ + cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; + /* * Bspec vol 1c.3 - blitter engine command streamer: * "If ENABLED, all TLBs will be invalidated once the flush @@ -2430,8 +2446,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring, * Post-Sync Operation field is a value of 1h or 3h." */ if (invalidate & I915_GEM_DOMAIN_RENDER) - cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | - MI_FLUSH_DW_OP_STOREDW; + cmd |= MI_INVALIDATE_TLB; intel_ring_emit(ring, cmd); intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); if (INTEL_INFO(ring->dev)->gen >= 8) { diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index 3c42eeffa3cb..693ce8281970 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -82,7 +82,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr) WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); mutex_lock(&dev_priv->dpio_lock); - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, SB_CRRDDA_NP, addr, &val); mutex_unlock(&dev_priv->dpio_lock); @@ -94,7 +94,7 @@ void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val) WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); mutex_lock(&dev_priv->dpio_lock); - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, SB_CRWRDA_NP, addr, &val); mutex_unlock(&dev_priv->dpio_lock); } @@ -103,7 +103,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg) { u32 val = 0; - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT, SB_CRRDDA_NP, reg, &val); return val; @@ -111,7 +111,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg) void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) { - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT, SB_CRWRDA_NP, reg, &val); } @@ -122,7 +122,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr) WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); mutex_lock(&dev_priv->dpio_lock); - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC, SB_CRRDDA_NP, addr, &val); mutex_unlock(&dev_priv->dpio_lock); @@ -132,56 +132,56 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr) u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg) { u32 val = 0; - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC, SB_CRRDDA_NP, reg, &val); return val; } void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) { - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC, SB_CRWRDA_NP, reg, &val); } u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg) { u32 val = 0; - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK, SB_CRRDDA_NP, reg, &val); return val; } void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) { - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK, SB_CRWRDA_NP, reg, &val); } u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg) { u32 val = 0; - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU, SB_CRRDDA_NP, reg, &val); return val; } void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) { - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU, SB_CRWRDA_NP, reg, &val); } u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg) { u32 val = 0; - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE, SB_CRRDDA_NP, reg, &val); return val; } void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) { - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE, SB_CRWRDA_NP, reg, &val); } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 27cc45849715..876e06360c36 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -167,7 +167,8 @@ fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_do struct intel_uncore_forcewake_domain *d; enum forcewake_domain_id id; - WARN_ON(dev_priv->uncore.fw_domains == 0); + if (dev_priv->uncore.fw_domains == 0) + return; for_each_fw_domain_mask(d, fw_domains, dev_priv, id) fw_domain_reset(d); @@ -1026,6 +1027,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + if (INTEL_INFO(dev_priv->dev)->gen <= 5) + return; + if (IS_GEN9(dev)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get; dev_priv->uncore.funcs.force_wake_put = fw_domains_put; @@ -1098,6 +1102,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE, FORCEWAKE_ACK); } + + /* All future platforms are expected to require complex power gating */ + WARN_ON(dev_priv->uncore.fw_domains == 0); } void intel_uncore_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig index 5d5e4092d40a..33cdddf26684 100644 --- a/drivers/gpu/drm/imx/Kconfig +++ b/drivers/gpu/drm/imx/Kconfig @@ -5,7 +5,7 @@ config DRM_IMX select VIDEOMODE_HELPERS select DRM_GEM_CMA_HELPER select DRM_KMS_CMA_HELPER - depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) + depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS depends on IMX_IPUV3_CORE help enable i.MX graphics support diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index b63601d04601..4216e479a9be 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -191,10 +191,18 @@ static int tve_setup_vga(struct imx_tve *tve) /* set gain to (1 + 10/128) to provide 0.7V peak-to-peak amplitude */ ret = regmap_update_bits(tve->regmap, TVE_TVDAC0_CONT_REG, TVE_TVDAC_GAIN_MASK, 0x0a); + if (ret) + return ret; + ret = regmap_update_bits(tve->regmap, TVE_TVDAC1_CONT_REG, TVE_TVDAC_GAIN_MASK, 0x0a); + if (ret) + return ret; + ret = regmap_update_bits(tve->regmap, TVE_TVDAC2_CONT_REG, TVE_TVDAC_GAIN_MASK, 0x0a); + if (ret) + return ret; /* set configuration register */ mask = TVE_DATA_SOURCE_MASK | TVE_INP_VIDEO_FORM; @@ -204,16 +212,12 @@ static int tve_setup_vga(struct imx_tve *tve) mask |= TVE_TV_OUT_MODE_MASK | TVE_SYNC_CH_0_EN; val |= TVE_TV_OUT_RGB | TVE_SYNC_CH_0_EN; ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, mask, val); - if (ret < 0) { - dev_err(tve->dev, "failed to set configuration: %d\n", ret); + if (ret) return ret; - } /* set test mode (as documented) */ - ret = regmap_update_bits(tve->regmap, TVE_TST_MODE_REG, + return regmap_update_bits(tve->regmap, TVE_TST_MODE_REG, TVE_TVDAC_TEST_MODE_MASK, 1); - - return 0; } static enum drm_connector_status imx_tve_connector_detect( @@ -335,9 +339,11 @@ static void imx_tve_encoder_mode_set(struct drm_encoder *encoder, } if (tve->mode == TVE_MODE_VGA) - tve_setup_vga(tve); + ret = tve_setup_vga(tve); else - tve_setup_tvout(tve); + ret = tve_setup_tvout(tve); + if (ret) + dev_err(tve->dev, "failed to set configuration: %d\n", ret); } static void imx_tve_encoder_commit(struct drm_encoder *encoder) @@ -671,6 +677,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) /* disable cable detection for VGA mode */ ret = regmap_write(tve->regmap, TVE_CD_CONT_REG, 0); + if (ret) + return ret; ret = imx_tve_register(drm, tve); if (ret) diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 5b2a1ff95d3d..bacbbb70f679 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -3,6 +3,7 @@ config DRM_MSM tristate "MSM DRM" depends on DRM depends on ARCH_QCOM || (ARM && COMPILE_TEST) + depends on OF && COMMON_CLK select REGULATOR select DRM_KMS_HELPER select DRM_PANEL diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 143d988f8add..674a132fd76e 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -1,7 +1,4 @@ ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -ifeq (, $(findstring -W,$(EXTRA_CFLAGS))) - ccflags-y += -Werror -endif msm-y := \ adreno/adreno_device.o \ @@ -16,6 +13,12 @@ msm-y := \ hdmi/hdmi_phy_8960.o \ hdmi/hdmi_phy_8x60.o \ hdmi/hdmi_phy_8x74.o \ + edp/edp.o \ + edp/edp_aux.o \ + edp/edp_bridge.o \ + edp/edp_connector.o \ + edp/edp_ctrl.o \ + edp/edp_phy.o \ mdp/mdp_format.o \ mdp/mdp_kms.o \ mdp/mdp4/mdp4_crtc.o \ diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h index 22882cc0a573..edc845fffdf4 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h @@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h index 109e9a263daf..e91a739452d7 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h @@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) @@ -58,111 +58,130 @@ enum a3xx_cache_opcode { }; enum a3xx_vtx_fmt { - VFMT_FLOAT_32 = 0, - VFMT_FLOAT_32_32 = 1, - VFMT_FLOAT_32_32_32 = 2, - VFMT_FLOAT_32_32_32_32 = 3, - VFMT_FLOAT_16 = 4, - VFMT_FLOAT_16_16 = 5, - VFMT_FLOAT_16_16_16 = 6, - VFMT_FLOAT_16_16_16_16 = 7, - VFMT_FIXED_32 = 8, - VFMT_FIXED_32_32 = 9, - VFMT_FIXED_32_32_32 = 10, - VFMT_FIXED_32_32_32_32 = 11, - VFMT_SHORT_16 = 16, - VFMT_SHORT_16_16 = 17, - VFMT_SHORT_16_16_16 = 18, - VFMT_SHORT_16_16_16_16 = 19, - VFMT_USHORT_16 = 20, - VFMT_USHORT_16_16 = 21, - VFMT_USHORT_16_16_16 = 22, - VFMT_USHORT_16_16_16_16 = 23, - VFMT_NORM_SHORT_16 = 24, - VFMT_NORM_SHORT_16_16 = 25, - VFMT_NORM_SHORT_16_16_16 = 26, - VFMT_NORM_SHORT_16_16_16_16 = 27, - VFMT_NORM_USHORT_16 = 28, - VFMT_NORM_USHORT_16_16 = 29, - VFMT_NORM_USHORT_16_16_16 = 30, - VFMT_NORM_USHORT_16_16_16_16 = 31, - VFMT_UINT_32 = 32, - VFMT_UINT_32_32 = 33, - VFMT_UINT_32_32_32 = 34, - VFMT_UINT_32_32_32_32 = 35, - VFMT_INT_32 = 36, - VFMT_INT_32_32 = 37, - VFMT_INT_32_32_32 = 38, - VFMT_INT_32_32_32_32 = 39, - VFMT_UBYTE_8 = 40, - VFMT_UBYTE_8_8 = 41, - VFMT_UBYTE_8_8_8 = 42, - VFMT_UBYTE_8_8_8_8 = 43, - VFMT_NORM_UBYTE_8 = 44, - VFMT_NORM_UBYTE_8_8 = 45, - VFMT_NORM_UBYTE_8_8_8 = 46, - VFMT_NORM_UBYTE_8_8_8_8 = 47, - VFMT_BYTE_8 = 48, - VFMT_BYTE_8_8 = 49, - VFMT_BYTE_8_8_8 = 50, - VFMT_BYTE_8_8_8_8 = 51, - VFMT_NORM_BYTE_8 = 52, - VFMT_NORM_BYTE_8_8 = 53, - VFMT_NORM_BYTE_8_8_8 = 54, - VFMT_NORM_BYTE_8_8_8_8 = 55, - VFMT_UINT_10_10_10_2 = 60, - VFMT_NORM_UINT_10_10_10_2 = 61, - VFMT_INT_10_10_10_2 = 62, - VFMT_NORM_INT_10_10_10_2 = 63, + VFMT_32_FLOAT = 0, + VFMT_32_32_FLOAT = 1, + VFMT_32_32_32_FLOAT = 2, + VFMT_32_32_32_32_FLOAT = 3, + VFMT_16_FLOAT = 4, + VFMT_16_16_FLOAT = 5, + VFMT_16_16_16_FLOAT = 6, + VFMT_16_16_16_16_FLOAT = 7, + VFMT_32_FIXED = 8, + VFMT_32_32_FIXED = 9, + VFMT_32_32_32_FIXED = 10, + VFMT_32_32_32_32_FIXED = 11, + VFMT_16_SINT = 16, + VFMT_16_16_SINT = 17, + VFMT_16_16_16_SINT = 18, + VFMT_16_16_16_16_SINT = 19, + VFMT_16_UINT = 20, + VFMT_16_16_UINT = 21, + VFMT_16_16_16_UINT = 22, + VFMT_16_16_16_16_UINT = 23, + VFMT_16_SNORM = 24, + VFMT_16_16_SNORM = 25, + VFMT_16_16_16_SNORM = 26, + VFMT_16_16_16_16_SNORM = 27, + VFMT_16_UNORM = 28, + VFMT_16_16_UNORM = 29, + VFMT_16_16_16_UNORM = 30, + VFMT_16_16_16_16_UNORM = 31, + VFMT_32_UINT = 32, + VFMT_32_32_UINT = 33, + VFMT_32_32_32_UINT = 34, + VFMT_32_32_32_32_UINT = 35, + VFMT_32_SINT = 36, + VFMT_32_32_SINT = 37, + VFMT_32_32_32_SINT = 38, + VFMT_32_32_32_32_SINT = 39, + VFMT_8_UINT = 40, + VFMT_8_8_UINT = 41, + VFMT_8_8_8_UINT = 42, + VFMT_8_8_8_8_UINT = 43, + VFMT_8_UNORM = 44, + VFMT_8_8_UNORM = 45, + VFMT_8_8_8_UNORM = 46, + VFMT_8_8_8_8_UNORM = 47, + VFMT_8_SINT = 48, + VFMT_8_8_SINT = 49, + VFMT_8_8_8_SINT = 50, + VFMT_8_8_8_8_SINT = 51, + VFMT_8_SNORM = 52, + VFMT_8_8_SNORM = 53, + VFMT_8_8_8_SNORM = 54, + VFMT_8_8_8_8_SNORM = 55, + VFMT_10_10_10_2_UINT = 60, + VFMT_10_10_10_2_UNORM = 61, + VFMT_10_10_10_2_SINT = 62, + VFMT_10_10_10_2_SNORM = 63, }; enum a3xx_tex_fmt { - TFMT_NORM_USHORT_565 = 4, - TFMT_NORM_USHORT_5551 = 6, - TFMT_NORM_USHORT_4444 = 7, - TFMT_NORM_USHORT_Z16 = 9, - TFMT_NORM_UINT_X8Z24 = 10, - TFMT_FLOAT_Z32 = 11, - TFMT_NORM_UINT_NV12_UV_TILED = 17, - TFMT_NORM_UINT_NV12_Y_TILED = 19, - TFMT_NORM_UINT_NV12_UV = 21, - TFMT_NORM_UINT_NV12_Y = 23, - TFMT_NORM_UINT_I420_Y = 24, - TFMT_NORM_UINT_I420_U = 26, - TFMT_NORM_UINT_I420_V = 27, - TFMT_NORM_UINT_2_10_10_10 = 41, - TFMT_FLOAT_9_9_9_E5 = 42, - TFMT_FLOAT_10_11_11 = 43, - TFMT_NORM_UINT_A8 = 44, - TFMT_NORM_UINT_L8_A8 = 47, - TFMT_NORM_UINT_8 = 48, - TFMT_NORM_UINT_8_8 = 49, - TFMT_NORM_UINT_8_8_8 = 50, - TFMT_NORM_UINT_8_8_8_8 = 51, - TFMT_NORM_SINT_8_8 = 53, - TFMT_NORM_SINT_8_8_8_8 = 55, - TFMT_UINT_8_8 = 57, - TFMT_UINT_8_8_8_8 = 59, - TFMT_SINT_8_8 = 61, - TFMT_SINT_8_8_8_8 = 63, - TFMT_FLOAT_16 = 64, - TFMT_FLOAT_16_16 = 65, - TFMT_FLOAT_16_16_16_16 = 67, - TFMT_UINT_16 = 68, - TFMT_UINT_16_16 = 69, - TFMT_UINT_16_16_16_16 = 71, - TFMT_SINT_16 = 72, - TFMT_SINT_16_16 = 73, - TFMT_SINT_16_16_16_16 = 75, - TFMT_FLOAT_32 = 84, - TFMT_FLOAT_32_32 = 85, - TFMT_FLOAT_32_32_32_32 = 87, - TFMT_UINT_32 = 88, - TFMT_UINT_32_32 = 89, - TFMT_UINT_32_32_32_32 = 91, - TFMT_SINT_32 = 92, - TFMT_SINT_32_32 = 93, - TFMT_SINT_32_32_32_32 = 95, + TFMT_5_6_5_UNORM = 4, + TFMT_5_5_5_1_UNORM = 5, + TFMT_4_4_4_4_UNORM = 7, + TFMT_Z16_UNORM = 9, + TFMT_X8Z24_UNORM = 10, + TFMT_Z32_FLOAT = 11, + TFMT_NV12_UV_TILED = 17, + TFMT_NV12_Y_TILED = 19, + TFMT_NV12_UV = 21, + TFMT_NV12_Y = 23, + TFMT_I420_Y = 24, + TFMT_I420_U = 26, + TFMT_I420_V = 27, + TFMT_DXT1 = 36, + TFMT_DXT3 = 37, + TFMT_DXT5 = 38, + TFMT_10_10_10_2_UNORM = 41, + TFMT_9_9_9_E5_FLOAT = 42, + TFMT_11_11_10_FLOAT = 43, + TFMT_A8_UNORM = 44, + TFMT_L8_A8_UNORM = 47, + TFMT_8_UNORM = 48, + TFMT_8_8_UNORM = 49, + TFMT_8_8_8_UNORM = 50, + TFMT_8_8_8_8_UNORM = 51, + TFMT_8_SNORM = 52, + TFMT_8_8_SNORM = 53, + TFMT_8_8_8_SNORM = 54, + TFMT_8_8_8_8_SNORM = 55, + TFMT_8_UINT = 56, + TFMT_8_8_UINT = 57, + TFMT_8_8_8_UINT = 58, + TFMT_8_8_8_8_UINT = 59, + TFMT_8_SINT = 60, + TFMT_8_8_SINT = 61, + TFMT_8_8_8_SINT = 62, + TFMT_8_8_8_8_SINT = 63, + TFMT_16_FLOAT = 64, + TFMT_16_16_FLOAT = 65, + TFMT_16_16_16_16_FLOAT = 67, + TFMT_16_UINT = 68, + TFMT_16_16_UINT = 69, + TFMT_16_16_16_16_UINT = 71, + TFMT_16_SINT = 72, + TFMT_16_16_SINT = 73, + TFMT_16_16_16_16_SINT = 75, + TFMT_16_UNORM = 76, + TFMT_16_16_UNORM = 77, + TFMT_16_16_16_16_UNORM = 79, + TFMT_16_SNORM = 80, + TFMT_16_16_SNORM = 81, + TFMT_16_16_16_16_SNORM = 83, + TFMT_32_FLOAT = 84, + TFMT_32_32_FLOAT = 85, + TFMT_32_32_32_32_FLOAT = 87, + TFMT_32_UINT = 88, + TFMT_32_32_UINT = 89, + TFMT_32_32_32_32_UINT = 91, + TFMT_32_SINT = 92, + TFMT_32_32_SINT = 93, + TFMT_32_32_32_32_SINT = 95, + TFMT_RGTC2_SNORM = 112, + TFMT_RGTC2_UNORM = 113, + TFMT_RGTC1_SNORM = 114, + TFMT_RGTC1_UNORM = 115, }; enum a3xx_tex_fetchsize { @@ -180,9 +199,11 @@ enum a3xx_color_fmt { RB_R4G4B4A4_UNORM = 3, RB_R8G8B8_UNORM = 4, RB_R8G8B8A8_UNORM = 8, + RB_R8G8B8A8_SNORM = 9, RB_R8G8B8A8_UINT = 10, RB_R8G8B8A8_SINT = 11, RB_R8G8_UNORM = 12, + RB_R8G8_SNORM = 13, RB_R8_UINT = 14, RB_R8_SINT = 15, RB_R10G10B10A2_UNORM = 16, @@ -258,6 +279,14 @@ enum a3xx_tex_clamp { A3XX_TEX_MIRROR_CLAMP = 4, }; +enum a3xx_tex_aniso { + A3XX_TEX_ANISO_1 = 0, + A3XX_TEX_ANISO_2 = 1, + A3XX_TEX_ANISO_4 = 2, + A3XX_TEX_ANISO_8 = 3, + A3XX_TEX_ANISO_16 = 4, +}; + enum a3xx_tex_swiz { A3XX_TEX_X = 0, A3XX_TEX_Y = 1, @@ -1563,12 +1592,13 @@ static inline uint32_t A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val) { return ((val) << A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK; } -#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80 +#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0000ff80 #define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7 static inline uint32_t A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val) { return ((val) << A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK; } +#define A3XX_VFD_FETCH_INSTR_0_INSTANCED 0x00010000 #define A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00020000 #define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK 0x00fc0000 #define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT 18 @@ -2509,6 +2539,12 @@ static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val) { return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK; } +#define A3XX_TEX_SAMP_0_ANISO__MASK 0x00038000 +#define A3XX_TEX_SAMP_0_ANISO__SHIFT 15 +static inline uint32_t A3XX_TEX_SAMP_0_ANISO(enum a3xx_tex_aniso val) +{ + return ((val) << A3XX_TEX_SAMP_0_ANISO__SHIFT) & A3XX_TEX_SAMP_0_ANISO__MASK; +} #define A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK 0x00700000 #define A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT 20 static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val) diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h index 5a24c416d2dd..755723fd8ba5 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h @@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) @@ -63,72 +63,82 @@ enum a4xx_rb_blend_opcode { }; enum a4xx_vtx_fmt { - VFMT4_FLOAT_32 = 1, - VFMT4_FLOAT_32_32 = 2, - VFMT4_FLOAT_32_32_32 = 3, - VFMT4_FLOAT_32_32_32_32 = 4, - VFMT4_FLOAT_16 = 5, - VFMT4_FLOAT_16_16 = 6, - VFMT4_FLOAT_16_16_16 = 7, - VFMT4_FLOAT_16_16_16_16 = 8, - VFMT4_FIXED_32 = 9, - VFMT4_FIXED_32_32 = 10, - VFMT4_FIXED_32_32_32 = 11, - VFMT4_FIXED_32_32_32_32 = 12, - VFMT4_SHORT_16 = 16, - VFMT4_SHORT_16_16 = 17, - VFMT4_SHORT_16_16_16 = 18, - VFMT4_SHORT_16_16_16_16 = 19, - VFMT4_USHORT_16 = 20, - VFMT4_USHORT_16_16 = 21, - VFMT4_USHORT_16_16_16 = 22, - VFMT4_USHORT_16_16_16_16 = 23, - VFMT4_NORM_SHORT_16 = 24, - VFMT4_NORM_SHORT_16_16 = 25, - VFMT4_NORM_SHORT_16_16_16 = 26, - VFMT4_NORM_SHORT_16_16_16_16 = 27, - VFMT4_NORM_USHORT_16 = 28, - VFMT4_NORM_USHORT_16_16 = 29, - VFMT4_NORM_USHORT_16_16_16 = 30, - VFMT4_NORM_USHORT_16_16_16_16 = 31, - VFMT4_UBYTE_8 = 40, - VFMT4_UBYTE_8_8 = 41, - VFMT4_UBYTE_8_8_8 = 42, - VFMT4_UBYTE_8_8_8_8 = 43, - VFMT4_NORM_UBYTE_8 = 44, - VFMT4_NORM_UBYTE_8_8 = 45, - VFMT4_NORM_UBYTE_8_8_8 = 46, - VFMT4_NORM_UBYTE_8_8_8_8 = 47, - VFMT4_BYTE_8 = 48, - VFMT4_BYTE_8_8 = 49, - VFMT4_BYTE_8_8_8 = 50, - VFMT4_BYTE_8_8_8_8 = 51, - VFMT4_NORM_BYTE_8 = 52, - VFMT4_NORM_BYTE_8_8 = 53, - VFMT4_NORM_BYTE_8_8_8 = 54, - VFMT4_NORM_BYTE_8_8_8_8 = 55, - VFMT4_UINT_10_10_10_2 = 60, - VFMT4_NORM_UINT_10_10_10_2 = 61, - VFMT4_INT_10_10_10_2 = 62, - VFMT4_NORM_INT_10_10_10_2 = 63, + VFMT4_32_FLOAT = 1, + VFMT4_32_32_FLOAT = 2, + VFMT4_32_32_32_FLOAT = 3, + VFMT4_32_32_32_32_FLOAT = 4, + VFMT4_16_FLOAT = 5, + VFMT4_16_16_FLOAT = 6, + VFMT4_16_16_16_FLOAT = 7, + VFMT4_16_16_16_16_FLOAT = 8, + VFMT4_32_FIXED = 9, + VFMT4_32_32_FIXED = 10, + VFMT4_32_32_32_FIXED = 11, + VFMT4_32_32_32_32_FIXED = 12, + VFMT4_16_SINT = 16, + VFMT4_16_16_SINT = 17, + VFMT4_16_16_16_SINT = 18, + VFMT4_16_16_16_16_SINT = 19, + VFMT4_16_UINT = 20, + VFMT4_16_16_UINT = 21, + VFMT4_16_16_16_UINT = 22, + VFMT4_16_16_16_16_UINT = 23, + VFMT4_16_SNORM = 24, + VFMT4_16_16_SNORM = 25, + VFMT4_16_16_16_SNORM = 26, + VFMT4_16_16_16_16_SNORM = 27, + VFMT4_16_UNORM = 28, + VFMT4_16_16_UNORM = 29, + VFMT4_16_16_16_UNORM = 30, + VFMT4_16_16_16_16_UNORM = 31, + VFMT4_32_32_SINT = 37, + VFMT4_8_UINT = 40, + VFMT4_8_8_UINT = 41, + VFMT4_8_8_8_UINT = 42, + VFMT4_8_8_8_8_UINT = 43, + VFMT4_8_UNORM = 44, + VFMT4_8_8_UNORM = 45, + VFMT4_8_8_8_UNORM = 46, + VFMT4_8_8_8_8_UNORM = 47, + VFMT4_8_SINT = 48, + VFMT4_8_8_SINT = 49, + VFMT4_8_8_8_SINT = 50, + VFMT4_8_8_8_8_SINT = 51, + VFMT4_8_SNORM = 52, + VFMT4_8_8_SNORM = 53, + VFMT4_8_8_8_SNORM = 54, + VFMT4_8_8_8_8_SNORM = 55, + VFMT4_10_10_10_2_UINT = 60, + VFMT4_10_10_10_2_UNORM = 61, + VFMT4_10_10_10_2_SINT = 62, + VFMT4_10_10_10_2_SNORM = 63, }; enum a4xx_tex_fmt { - TFMT4_NORM_USHORT_565 = 11, - TFMT4_NORM_USHORT_5551 = 10, - TFMT4_NORM_USHORT_4444 = 8, - TFMT4_NORM_UINT_X8Z24 = 71, - TFMT4_NORM_UINT_2_10_10_10 = 33, - TFMT4_NORM_UINT_A8 = 3, - TFMT4_NORM_UINT_L8_A8 = 13, - TFMT4_NORM_UINT_8 = 4, - TFMT4_NORM_UINT_8_8_8_8 = 28, - TFMT4_FLOAT_16 = 20, - TFMT4_FLOAT_16_16 = 40, - TFMT4_FLOAT_16_16_16_16 = 53, - TFMT4_FLOAT_32 = 43, - TFMT4_FLOAT_32_32 = 56, - TFMT4_FLOAT_32_32_32_32 = 63, + TFMT4_5_6_5_UNORM = 11, + TFMT4_5_5_5_1_UNORM = 10, + TFMT4_4_4_4_4_UNORM = 8, + TFMT4_X8Z24_UNORM = 71, + TFMT4_10_10_10_2_UNORM = 33, + TFMT4_A8_UNORM = 3, + TFMT4_L8_A8_UNORM = 13, + TFMT4_8_UNORM = 4, + TFMT4_8_8_UNORM = 14, + TFMT4_8_8_8_8_UNORM = 28, + TFMT4_16_FLOAT = 20, + TFMT4_16_16_FLOAT = 40, + TFMT4_16_16_16_16_FLOAT = 53, + TFMT4_32_FLOAT = 43, + TFMT4_32_32_FLOAT = 56, + TFMT4_32_32_32_32_FLOAT = 63, +}; + +enum a4xx_tex_fetchsize { + TFETCH4_1_BYTE = 0, + TFETCH4_2_BYTE = 1, + TFETCH4_4_BYTE = 2, + TFETCH4_8_BYTE = 3, + TFETCH4_16_BYTE = 4, }; enum a4xx_depth_format { @@ -264,14 +274,19 @@ static inline uint32_t A4XX_RB_MSAA_CONTROL_SAMPLES(uint32_t val) return ((val) << A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL_SAMPLES__MASK; } -#define REG_A4XX_RB_MSAA_CONTROL2 0x000020a3 -#define A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__MASK 0x00000380 -#define A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__SHIFT 7 -static inline uint32_t A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES(uint32_t val) +#define REG_A4XX_RB_RENDER_CONTROL2 0x000020a3 +#define A4XX_RB_RENDER_CONTROL2_XCOORD 0x00000001 +#define A4XX_RB_RENDER_CONTROL2_YCOORD 0x00000002 +#define A4XX_RB_RENDER_CONTROL2_ZCOORD 0x00000004 +#define A4XX_RB_RENDER_CONTROL2_WCOORD 0x00000008 +#define A4XX_RB_RENDER_CONTROL2_FACENESS 0x00000020 +#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK 0x00000380 +#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT 7 +static inline uint32_t A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES(uint32_t val) { - return ((val) << A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__MASK; + return ((val) << A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK; } -#define A4XX_RB_MSAA_CONTROL2_VARYING 0x00001000 +#define A4XX_RB_RENDER_CONTROL2_VARYING 0x00001000 static inline uint32_t REG_A4XX_RB_MRT(uint32_t i0) { return 0x000020a4 + 0x5*i0; } @@ -362,7 +377,69 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_r return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK; } +#define REG_A4XX_RB_BLEND_RED 0x000020f3 +#define A4XX_RB_BLEND_RED_UINT__MASK 0x00007fff +#define A4XX_RB_BLEND_RED_UINT__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_RED_UINT__SHIFT) & A4XX_RB_BLEND_RED_UINT__MASK; +} +#define A4XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000 +#define A4XX_RB_BLEND_RED_FLOAT__SHIFT 16 +static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val) +{ + return ((util_float_to_half(val)) << A4XX_RB_BLEND_RED_FLOAT__SHIFT) & A4XX_RB_BLEND_RED_FLOAT__MASK; +} + +#define REG_A4XX_RB_BLEND_GREEN 0x000020f4 +#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x00007fff +#define A4XX_RB_BLEND_GREEN_UINT__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_GREEN_UINT__SHIFT) & A4XX_RB_BLEND_GREEN_UINT__MASK; +} +#define A4XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000 +#define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT 16 +static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val) +{ + return ((util_float_to_half(val)) << A4XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A4XX_RB_BLEND_GREEN_FLOAT__MASK; +} + +#define REG_A4XX_RB_BLEND_BLUE 0x000020f5 +#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x00007fff +#define A4XX_RB_BLEND_BLUE_UINT__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_BLUE_UINT__SHIFT) & A4XX_RB_BLEND_BLUE_UINT__MASK; +} +#define A4XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000 +#define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT 16 +static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val) +{ + return ((util_float_to_half(val)) << A4XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A4XX_RB_BLEND_BLUE_FLOAT__MASK; +} + +#define REG_A4XX_RB_BLEND_ALPHA 0x000020f6 +#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x00007fff +#define A4XX_RB_BLEND_ALPHA_UINT__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_ALPHA_UINT__SHIFT) & A4XX_RB_BLEND_ALPHA_UINT__MASK; +} +#define A4XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000 +#define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16 +static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val) +{ + return ((util_float_to_half(val)) << A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A4XX_RB_BLEND_ALPHA_FLOAT__MASK; +} + #define REG_A4XX_RB_ALPHA_CONTROL 0x000020f8 +#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff +#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0 +static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val) +{ + return ((val) << A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK; +} #define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100 #define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00 #define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9 @@ -372,7 +449,7 @@ static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare } #define REG_A4XX_RB_FS_OUTPUT 0x000020f9 -#define A4XX_RB_FS_OUTPUT_ENABLE_COLOR_PIPE 0x00000001 +#define A4XX_RB_FS_OUTPUT_ENABLE_BLEND 0x00000001 #define A4XX_RB_FS_OUTPUT_FAST_CLEAR 0x00000100 #define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000 #define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16 @@ -416,11 +493,11 @@ static inline uint32_t A4XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val) } #define REG_A4XX_RB_COPY_DEST_BASE 0x000020fd -#define A4XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0 -#define A4XX_RB_COPY_DEST_BASE_BASE__SHIFT 4 +#define A4XX_RB_COPY_DEST_BASE_BASE__MASK 0xffffffe0 +#define A4XX_RB_COPY_DEST_BASE_BASE__SHIFT 5 static inline uint32_t A4XX_RB_COPY_DEST_BASE_BASE(uint32_t val) { - return ((val >> 4) << A4XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A4XX_RB_COPY_DEST_BASE_BASE__MASK; + return ((val >> 5) << A4XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A4XX_RB_COPY_DEST_BASE_BASE__MASK; } #define REG_A4XX_RB_COPY_DEST_PITCH 0x000020fe @@ -508,7 +585,7 @@ static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val) #define A4XX_RB_DEPTH_PITCH__SHIFT 0 static inline uint32_t A4XX_RB_DEPTH_PITCH(uint32_t val) { - return ((val >> 4) << A4XX_RB_DEPTH_PITCH__SHIFT) & A4XX_RB_DEPTH_PITCH__MASK; + return ((val >> 5) << A4XX_RB_DEPTH_PITCH__SHIFT) & A4XX_RB_DEPTH_PITCH__MASK; } #define REG_A4XX_RB_DEPTH_PITCH2 0x00002105 @@ -516,7 +593,7 @@ static inline uint32_t A4XX_RB_DEPTH_PITCH(uint32_t val) #define A4XX_RB_DEPTH_PITCH2__SHIFT 0 static inline uint32_t A4XX_RB_DEPTH_PITCH2(uint32_t val) { - return ((val >> 4) << A4XX_RB_DEPTH_PITCH2__SHIFT) & A4XX_RB_DEPTH_PITCH2__MASK; + return ((val >> 5) << A4XX_RB_DEPTH_PITCH2__SHIFT) & A4XX_RB_DEPTH_PITCH2__MASK; } #define REG_A4XX_RB_STENCIL_CONTROL 0x00002106 @@ -630,7 +707,11 @@ static inline uint32_t A4XX_RB_BIN_OFFSET_Y(uint32_t val) return ((val) << A4XX_RB_BIN_OFFSET_Y__SHIFT) & A4XX_RB_BIN_OFFSET_Y__MASK; } -#define REG_A4XX_RB_VPORT_Z_CLAMP_MAX_15 0x0000213f +static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP(uint32_t i0) { return 0x00002120 + 0x2*i0; } + +static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP_MIN(uint32_t i0) { return 0x00002120 + 0x2*i0; } + +static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP_MAX(uint32_t i0) { return 0x00002121 + 0x2*i0; } #define REG_A4XX_RBBM_HW_VERSION 0x00000000 @@ -1121,7 +1202,9 @@ static inline uint32_t A4XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val) { return ((val) << A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK; } +#define A4XX_SP_FS_CTRL_REG1_FACENESS 0x00080000 #define A4XX_SP_FS_CTRL_REG1_VARYING 0x00100000 +#define A4XX_SP_FS_CTRL_REG1_FRAGCOORD 0x00200000 #define REG_A4XX_SP_FS_OBJ_OFFSET_REG 0x000022ea #define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 @@ -1384,6 +1467,12 @@ static inline uint32_t A4XX_VFD_CONTROL_1_REGID4INST(uint32_t val) #define REG_A4XX_VFD_CONTROL_2 0x00002202 #define REG_A4XX_VFD_CONTROL_3 0x00002203 +#define A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK 0x0000ff00 +#define A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT 8 +static inline uint32_t A4XX_VFD_CONTROL_3_REGID_VTXCNT(uint32_t val) +{ + return ((val) << A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT) & A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK; +} #define REG_A4XX_VFD_CONTROL_4 0x00002204 @@ -1405,12 +1494,7 @@ static inline uint32_t A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val) return ((val) << A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK; } #define A4XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00080000 -#define A4XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000 -#define A4XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24 -static inline uint32_t A4XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val) -{ - return ((val) << A4XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_STEPRATE__MASK; -} +#define A4XX_VFD_FETCH_INSTR_0_INSTANCED 0x00100000 static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x0000220b + 0x4*i0; } @@ -1423,6 +1507,12 @@ static inline uint32_t A4XX_VFD_FETCH_INSTR_2_SIZE(uint32_t val) } static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_3(uint32_t i0) { return 0x0000220d + 0x4*i0; } +#define A4XX_VFD_FETCH_INSTR_3_STEPRATE__MASK 0x000001ff +#define A4XX_VFD_FETCH_INSTR_3_STEPRATE__SHIFT 0 +static inline uint32_t A4XX_VFD_FETCH_INSTR_3_STEPRATE(uint32_t val) +{ + return ((val) << A4XX_VFD_FETCH_INSTR_3_STEPRATE__SHIFT) & A4XX_VFD_FETCH_INSTR_3_STEPRATE__MASK; +} static inline uint32_t REG_A4XX_VFD_DECODE(uint32_t i0) { return 0x0000228a + 0x1*i0; } @@ -1446,6 +1536,7 @@ static inline uint32_t A4XX_VFD_DECODE_INSTR_REGID(uint32_t val) { return ((val) << A4XX_VFD_DECODE_INSTR_REGID__SHIFT) & A4XX_VFD_DECODE_INSTR_REGID__MASK; } +#define A4XX_VFD_DECODE_INSTR_INT 0x00100000 #define A4XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000 #define A4XX_VFD_DECODE_INSTR_SWAP__SHIFT 22 static inline uint32_t A4XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val) @@ -1585,7 +1676,47 @@ static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_OFFSET(float val) return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; } -#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_TL 0x0000209f +#define REG_A4XX_GRAS_DEPTH_CONTROL 0x00002077 +#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK 0x00000003 +#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT 0 +static inline uint32_t A4XX_GRAS_DEPTH_CONTROL_FORMAT(enum a4xx_depth_format val) +{ + return ((val) << A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT) & A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK; +} + +#define REG_A4XX_GRAS_SU_MODE_CONTROL 0x00002078 +#define A4XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001 +#define A4XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002 +#define A4XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004 +#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8 +#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3 +static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val) +{ + return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK; +} +#define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800 +#define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS 0x00100000 + +#define REG_A4XX_GRAS_SC_CONTROL 0x0000207b +#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x0000000c +#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 2 +static inline uint32_t A4XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val) +{ + return ((val) << A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK; +} +#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000380 +#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 7 +static inline uint32_t A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(uint32_t val) +{ + return ((val) << A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK; +} +#define A4XX_GRAS_SC_CONTROL_MSAA_DISABLE 0x00000800 +#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000 +#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12 +static inline uint32_t A4XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val) +{ + return ((val) << A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK; +} #define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_TL 0x0000207c #define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 @@ -1647,46 +1778,34 @@ static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val) return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK; } -#define REG_A4XX_GRAS_DEPTH_CONTROL 0x00002077 -#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK 0x00000003 -#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT 0 -static inline uint32_t A4XX_GRAS_DEPTH_CONTROL_FORMAT(enum a4xx_depth_format val) +#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_BR 0x0000209e +#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_WINDOW_OFFSET_DISABLE 0x80000000 +#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__MASK 0x00007fff +#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__SHIFT 0 +static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_BR_X(uint32_t val) { - return ((val) << A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT) & A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK; + return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__MASK; } - -#define REG_A4XX_GRAS_SU_MODE_CONTROL 0x00002078 -#define A4XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001 -#define A4XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002 -#define A4XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004 -#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8 -#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3 -static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val) +#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__MASK 0x7fff0000 +#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__SHIFT 16 +static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y(uint32_t val) { - return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK; + return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__MASK; } -#define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800 -#define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS 0x00100000 -#define REG_A4XX_GRAS_SC_CONTROL 0x0000207b -#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x0000000c -#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 2 -static inline uint32_t A4XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val) -{ - return ((val) << A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK; -} -#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000380 -#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 7 -static inline uint32_t A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(uint32_t val) +#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_TL 0x0000209f +#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_WINDOW_OFFSET_DISABLE 0x80000000 +#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__MASK 0x00007fff +#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__SHIFT 0 +static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_X(uint32_t val) { - return ((val) << A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK; + return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__MASK; } -#define A4XX_GRAS_SC_CONTROL_MSAA_DISABLE 0x00000800 -#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000 -#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12 -static inline uint32_t A4XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val) +#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__MASK 0x7fff0000 +#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__SHIFT 16 +static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val) { - return ((val) << A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK; + return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__MASK; } #define REG_A4XX_UCHE_CACHE_MODE_CONTROL 0x00000e80 @@ -1742,6 +1861,12 @@ static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize } #define A4XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100 #define A4XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200 +#define A4XX_HLSQ_CONTROL_1_REG_COORDREGID__MASK 0x00ff0000 +#define A4XX_HLSQ_CONTROL_1_REG_COORDREGID__SHIFT 16 +static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_COORDREGID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_1_REG_COORDREGID__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_COORDREGID__MASK; +} #define A4XX_HLSQ_CONTROL_1_REG_ZWCOORD 0x02000000 #define REG_A4XX_HLSQ_CONTROL_2_REG 0x000023c2 @@ -1751,6 +1876,12 @@ static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val) { return ((val) << A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK; } +#define A4XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000003fc +#define A4XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 2 +static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK; +} #define REG_A4XX_HLSQ_CONTROL_3_REG 0x000023c3 #define A4XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff @@ -1965,15 +2096,13 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val) #define REG_A4XX_UNKNOWN_20F2 0x000020f2 -#define REG_A4XX_UNKNOWN_20F3 0x000020f3 - -#define REG_A4XX_UNKNOWN_20F4 0x000020f4 - -#define REG_A4XX_UNKNOWN_20F5 0x000020f5 - -#define REG_A4XX_UNKNOWN_20F6 0x000020f6 - #define REG_A4XX_UNKNOWN_20F7 0x000020f7 +#define A4XX_UNKNOWN_20F7__MASK 0xffffffff +#define A4XX_UNKNOWN_20F7__SHIFT 0 +static inline uint32_t A4XX_UNKNOWN_20F7(float val) +{ + return ((fui(val)) << A4XX_UNKNOWN_20F7__SHIFT) & A4XX_UNKNOWN_20F7__MASK; +} #define REG_A4XX_UNKNOWN_2152 0x00002152 @@ -2000,6 +2129,7 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val) #define REG_A4XX_UNKNOWN_23A0 0x000023a0 #define REG_A4XX_TEX_SAMP_0 0x00000000 +#define A4XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001 #define A4XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006 #define A4XX_TEX_SAMP_0_XY_MAG__SHIFT 1 static inline uint32_t A4XX_TEX_SAMP_0_XY_MAG(enum a4xx_tex_filter val) @@ -2038,17 +2168,19 @@ static inline uint32_t A4XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val { return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK; } +#define A4XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020 +#define A4XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040 #define A4XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00 #define A4XX_TEX_SAMP_1_MAX_LOD__SHIFT 8 static inline uint32_t A4XX_TEX_SAMP_1_MAX_LOD(float val) { - return ((((uint32_t)(val * 64.0))) << A4XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A4XX_TEX_SAMP_1_MAX_LOD__MASK; + return ((((uint32_t)(val * 256.0))) << A4XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A4XX_TEX_SAMP_1_MAX_LOD__MASK; } #define A4XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000 #define A4XX_TEX_SAMP_1_MIN_LOD__SHIFT 20 static inline uint32_t A4XX_TEX_SAMP_1_MIN_LOD(float val) { - return ((((uint32_t)(val * 64.0))) << A4XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A4XX_TEX_SAMP_1_MIN_LOD__MASK; + return ((((uint32_t)(val * 256.0))) << A4XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A4XX_TEX_SAMP_1_MIN_LOD__MASK; } #define REG_A4XX_TEX_CONST_0 0x00000000 @@ -2077,6 +2209,12 @@ static inline uint32_t A4XX_TEX_CONST_0_SWIZ_W(enum a4xx_tex_swiz val) { return ((val) << A4XX_TEX_CONST_0_SWIZ_W__SHIFT) & A4XX_TEX_CONST_0_SWIZ_W__MASK; } +#define A4XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000 +#define A4XX_TEX_CONST_0_MIPLVLS__SHIFT 16 +static inline uint32_t A4XX_TEX_CONST_0_MIPLVLS(uint32_t val) +{ + return ((val) << A4XX_TEX_CONST_0_MIPLVLS__SHIFT) & A4XX_TEX_CONST_0_MIPLVLS__MASK; +} #define A4XX_TEX_CONST_0_FMT__MASK 0x1fc00000 #define A4XX_TEX_CONST_0_FMT__SHIFT 22 static inline uint32_t A4XX_TEX_CONST_0_FMT(enum a4xx_tex_fmt val) @@ -2105,6 +2243,12 @@ static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val) } #define REG_A4XX_TEX_CONST_2 0x00000002 +#define A4XX_TEX_CONST_2_FETCHSIZE__MASK 0x0000000f +#define A4XX_TEX_CONST_2_FETCHSIZE__SHIFT 0 +static inline uint32_t A4XX_TEX_CONST_2_FETCHSIZE(enum a4xx_tex_fetchsize val) +{ + return ((val) << A4XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A4XX_TEX_CONST_2_FETCHSIZE__MASK; +} #define A4XX_TEX_CONST_2_PITCH__MASK 0x3ffffe00 #define A4XX_TEX_CONST_2_PITCH__SHIFT 9 static inline uint32_t A4XX_TEX_CONST_2_PITCH(uint32_t val) @@ -2119,19 +2263,31 @@ static inline uint32_t A4XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val) } #define REG_A4XX_TEX_CONST_3 0x00000003 -#define A4XX_TEX_CONST_3_LAYERSZ__MASK 0x0000000f +#define A4XX_TEX_CONST_3_LAYERSZ__MASK 0x00003fff #define A4XX_TEX_CONST_3_LAYERSZ__SHIFT 0 static inline uint32_t A4XX_TEX_CONST_3_LAYERSZ(uint32_t val) { return ((val >> 12) << A4XX_TEX_CONST_3_LAYERSZ__SHIFT) & A4XX_TEX_CONST_3_LAYERSZ__MASK; } +#define A4XX_TEX_CONST_3_DEPTH__MASK 0x7ffc0000 +#define A4XX_TEX_CONST_3_DEPTH__SHIFT 18 +static inline uint32_t A4XX_TEX_CONST_3_DEPTH(uint32_t val) +{ + return ((val) << A4XX_TEX_CONST_3_DEPTH__SHIFT) & A4XX_TEX_CONST_3_DEPTH__MASK; +} #define REG_A4XX_TEX_CONST_4 0x00000004 -#define A4XX_TEX_CONST_4_BASE__MASK 0xffffffff -#define A4XX_TEX_CONST_4_BASE__SHIFT 0 +#define A4XX_TEX_CONST_4_LAYERSZ__MASK 0x0000000f +#define A4XX_TEX_CONST_4_LAYERSZ__SHIFT 0 +static inline uint32_t A4XX_TEX_CONST_4_LAYERSZ(uint32_t val) +{ + return ((val >> 12) << A4XX_TEX_CONST_4_LAYERSZ__SHIFT) & A4XX_TEX_CONST_4_LAYERSZ__MASK; +} +#define A4XX_TEX_CONST_4_BASE__MASK 0xffffffe0 +#define A4XX_TEX_CONST_4_BASE__SHIFT 5 static inline uint32_t A4XX_TEX_CONST_4_BASE(uint32_t val) { - return ((val) << A4XX_TEX_CONST_4_BASE__SHIFT) & A4XX_TEX_CONST_4_BASE__MASK; + return ((val >> 5) << A4XX_TEX_CONST_4_BASE__SHIFT) & A4XX_TEX_CONST_4_BASE__MASK; } #define REG_A4XX_TEX_CONST_5 0x00000005 diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h index a4b33af9338d..8531beb982e7 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h @@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h index 6a75cee94d81..6ffc4f6c8af1 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h @@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) @@ -172,7 +172,9 @@ enum adreno_pm4_type3_packets { CP_DRAW_INDIRECT = 40, CP_DRAW_INDX_INDIRECT = 41, CP_DRAW_AUTO = 36, + CP_UNKNOWN_19 = 25, CP_UNKNOWN_1A = 26, + CP_UNKNOWN_4E = 78, CP_WIDE_REG_WRITE = 116, IN_IB_PREFETCH_END = 23, IN_SUBBLK_PREFETCH = 31, @@ -203,6 +205,12 @@ enum adreno_state_src { SS_INDIRECT = 4, }; +enum a4xx_index_size { + INDEX4_SIZE_8_BIT = 0, + INDEX4_SIZE_16_BIT = 1, + INDEX4_SIZE_32_BIT = 2, +}; + #define REG_CP_LOAD_STATE_0 0x00000000 #define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff #define CP_LOAD_STATE_0_DST_OFF__SHIFT 0 @@ -374,29 +382,20 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel va { return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK; } -#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000700 -#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8 -static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val) -{ - return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK; -} -#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000800 -#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 11 -static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum pc_di_index_size val) +#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00 +#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10 +static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val) { return ((val) << CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK; } -#define CP_DRAW_INDX_OFFSET_0_NOT_EOP 0x00001000 -#define CP_DRAW_INDX_OFFSET_0_SMALL_INDEX 0x00002000 -#define CP_DRAW_INDX_OFFSET_0_PRE_DRAW_INITIATOR_ENABLE 0x00004000 -#define CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__MASK 0xffff0000 -#define CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__SHIFT 16 -static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES(uint32_t val) -{ - return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__MASK; -} #define REG_CP_DRAW_INDX_OFFSET_1 0x00000001 +#define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK 0xffffffff +#define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK; +} #define REG_CP_DRAW_INDX_OFFSET_2 0x00000002 #define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK 0xffffffff diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h index 448438b759b4..abf1bba520bf 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h @@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) Copyright (C) 2013 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h index c102a7f074ac..695f99d4bec2 100644 --- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h +++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h @@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h index a900134bdf33..50ff9851d73d 100644 --- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h +++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h @@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) Copyright (C) 2013 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c new file mode 100644 index 000000000000..0940e84b2821 --- /dev/null +++ b/drivers/gpu/drm/msm/edp/edp.c @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/of_irq.h> +#include "edp.h" + +static irqreturn_t edp_irq(int irq, void *dev_id) +{ + struct msm_edp *edp = dev_id; + + /* Process eDP irq */ + return msm_edp_ctrl_irq(edp->ctrl); +} + +static void edp_destroy(struct platform_device *pdev) +{ + struct msm_edp *edp = platform_get_drvdata(pdev); + + if (!edp) + return; + + if (edp->ctrl) { + msm_edp_ctrl_destroy(edp->ctrl); + edp->ctrl = NULL; + } + + platform_set_drvdata(pdev, NULL); +} + +/* construct eDP at bind/probe time, grab all the resources. */ +static struct msm_edp *edp_init(struct platform_device *pdev) +{ + struct msm_edp *edp = NULL; + int ret; + + if (!pdev) { + pr_err("no eDP device\n"); + ret = -ENXIO; + goto fail; + } + + edp = devm_kzalloc(&pdev->dev, sizeof(*edp), GFP_KERNEL); + if (!edp) { + ret = -ENOMEM; + goto fail; + } + DBG("eDP probed=%p", edp); + + edp->pdev = pdev; + platform_set_drvdata(pdev, edp); + + ret = msm_edp_ctrl_init(edp); + if (ret) + goto fail; + + return edp; + +fail: + if (edp) + edp_destroy(pdev); + + return ERR_PTR(ret); +} + +static int edp_bind(struct device *dev, struct device *master, void *data) +{ + struct drm_device *drm = dev_get_drvdata(master); + struct msm_drm_private *priv = drm->dev_private; + struct msm_edp *edp; + + DBG(""); + edp = edp_init(to_platform_device(dev)); + if (IS_ERR(edp)) + return PTR_ERR(edp); + priv->edp = edp; + + return 0; +} + +static void edp_unbind(struct device *dev, struct device *master, void *data) +{ + struct drm_device *drm = dev_get_drvdata(master); + struct msm_drm_private *priv = drm->dev_private; + + DBG(""); + if (priv->edp) { + edp_destroy(to_platform_device(dev)); + priv->edp = NULL; + } +} + +static const struct component_ops edp_ops = { + .bind = edp_bind, + .unbind = edp_unbind, +}; + +static int edp_dev_probe(struct platform_device *pdev) +{ + DBG(""); + return component_add(&pdev->dev, &edp_ops); +} + +static int edp_dev_remove(struct platform_device *pdev) +{ + DBG(""); + component_del(&pdev->dev, &edp_ops); + return 0; +} + +static const struct of_device_id dt_match[] = { + { .compatible = "qcom,mdss-edp" }, + {} +}; + +static struct platform_driver edp_driver = { + .probe = edp_dev_probe, + .remove = edp_dev_remove, + .driver = { + .name = "msm_edp", + .of_match_table = dt_match, + }, +}; + +void __init msm_edp_register(void) +{ + DBG(""); + platform_driver_register(&edp_driver); +} + +void __exit msm_edp_unregister(void) +{ + DBG(""); + platform_driver_unregister(&edp_driver); +} + +/* Second part of initialization, the drm/kms level modeset_init */ +int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, + struct drm_encoder *encoder) +{ + struct platform_device *pdev = edp->pdev; + struct msm_drm_private *priv = dev->dev_private; + int ret; + + edp->encoder = encoder; + edp->dev = dev; + + edp->bridge = msm_edp_bridge_init(edp); + if (IS_ERR(edp->bridge)) { + ret = PTR_ERR(edp->bridge); + dev_err(dev->dev, "failed to create eDP bridge: %d\n", ret); + edp->bridge = NULL; + goto fail; + } + + edp->connector = msm_edp_connector_init(edp); + if (IS_ERR(edp->connector)) { + ret = PTR_ERR(edp->connector); + dev_err(dev->dev, "failed to create eDP connector: %d\n", ret); + edp->connector = NULL; + goto fail; + } + + edp->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (edp->irq < 0) { + ret = edp->irq; + dev_err(dev->dev, "failed to get IRQ: %d\n", ret); + goto fail; + } + + ret = devm_request_irq(&pdev->dev, edp->irq, + edp_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + "edp_isr", edp); + if (ret < 0) { + dev_err(dev->dev, "failed to request IRQ%u: %d\n", + edp->irq, ret); + goto fail; + } + + encoder->bridge = edp->bridge; + + priv->bridges[priv->num_bridges++] = edp->bridge; + priv->connectors[priv->num_connectors++] = edp->connector; + + return 0; + +fail: + /* bridge/connector are normally destroyed by drm */ + if (edp->bridge) { + edp_bridge_destroy(edp->bridge); + edp->bridge = NULL; + } + if (edp->connector) { + edp->connector->funcs->destroy(edp->connector); + edp->connector = NULL; + } + + return ret; +} diff --git a/drivers/gpu/drm/msm/edp/edp.h b/drivers/gpu/drm/msm/edp/edp.h new file mode 100644 index 000000000000..ba5bedde5241 --- /dev/null +++ b/drivers/gpu/drm/msm/edp/edp.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __EDP_CONNECTOR_H__ +#define __EDP_CONNECTOR_H__ + +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> + +#include "drm_crtc.h" +#include "drm_dp_helper.h" +#include "msm_drv.h" + +#define edp_read(offset) msm_readl((offset)) +#define edp_write(offset, data) msm_writel((data), (offset)) + +struct edp_ctrl; +struct edp_aux; +struct edp_phy; + +struct msm_edp { + struct drm_device *dev; + struct platform_device *pdev; + + struct drm_connector *connector; + struct drm_bridge *bridge; + + /* the encoder we are hooked to (outside of eDP block) */ + struct drm_encoder *encoder; + + struct edp_ctrl *ctrl; + + int irq; +}; + +/* eDP bridge */ +struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp); +void edp_bridge_destroy(struct drm_bridge *bridge); + +/* eDP connector */ +struct drm_connector *msm_edp_connector_init(struct msm_edp *edp); + +/* AUX */ +void *msm_edp_aux_init(struct device *dev, void __iomem *regbase, + struct drm_dp_aux **drm_aux); +void msm_edp_aux_destroy(struct device *dev, struct edp_aux *aux); +irqreturn_t msm_edp_aux_irq(struct edp_aux *aux, u32 isr); +void msm_edp_aux_ctrl(struct edp_aux *aux, int enable); + +/* Phy */ +bool msm_edp_phy_ready(struct edp_phy *phy); +void msm_edp_phy_ctrl(struct edp_phy *phy, int enable); +void msm_edp_phy_vm_pe_init(struct edp_phy *phy); +void msm_edp_phy_vm_pe_cfg(struct edp_phy *phy, u32 v0, u32 v1); +void msm_edp_phy_lane_power_ctrl(struct edp_phy *phy, bool up, u32 max_lane); +void *msm_edp_phy_init(struct device *dev, void __iomem *regbase); + +/* Ctrl */ +irqreturn_t msm_edp_ctrl_irq(struct edp_ctrl *ctrl); +void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on); +int msm_edp_ctrl_init(struct msm_edp *edp); +void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl); +bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl); +int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl, + struct drm_connector *connector, struct edid **edid); +int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl, + const struct drm_display_mode *mode, + const struct drm_display_info *info); +/* @pixel_rate is in kHz */ +bool msm_edp_ctrl_pixel_clock_valid(struct edp_ctrl *ctrl, + u32 pixel_rate, u32 *pm, u32 *pn); + +#endif /* __EDP_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h new file mode 100644 index 000000000000..a29f1df15143 --- /dev/null +++ b/drivers/gpu/drm/msm/edp/edp.xml.h @@ -0,0 +1,292 @@ +#ifndef EDP_XML +#define EDP_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://github.com/freedreno/envytools/ +git clone https://github.com/freedreno/envytools.git + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) + +Copyright (C) 2013-2014 by the following authors: +- Rob Clark <robdclark@gmail.com> (robclark) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum edp_color_depth { + EDP_6BIT = 0, + EDP_8BIT = 1, + EDP_10BIT = 2, + EDP_12BIT = 3, + EDP_16BIT = 4, +}; + +enum edp_component_format { + EDP_RGB = 0, + EDP_YUV422 = 1, + EDP_YUV444 = 2, +}; + +#define REG_EDP_MAINLINK_CTRL 0x00000004 +#define EDP_MAINLINK_CTRL_ENABLE 0x00000001 +#define EDP_MAINLINK_CTRL_RESET 0x00000002 + +#define REG_EDP_STATE_CTRL 0x00000008 +#define EDP_STATE_CTRL_TRAIN_PATTERN_1 0x00000001 +#define EDP_STATE_CTRL_TRAIN_PATTERN_2 0x00000002 +#define EDP_STATE_CTRL_TRAIN_PATTERN_3 0x00000004 +#define EDP_STATE_CTRL_SYMBOL_ERR_RATE_MEAS 0x00000008 +#define EDP_STATE_CTRL_PRBS7 0x00000010 +#define EDP_STATE_CTRL_CUSTOM_80_BIT_PATTERN 0x00000020 +#define EDP_STATE_CTRL_SEND_VIDEO 0x00000040 +#define EDP_STATE_CTRL_PUSH_IDLE 0x00000080 + +#define REG_EDP_CONFIGURATION_CTRL 0x0000000c +#define EDP_CONFIGURATION_CTRL_SYNC_CLK 0x00000001 +#define EDP_CONFIGURATION_CTRL_STATIC_MVID 0x00000002 +#define EDP_CONFIGURATION_CTRL_PROGRESSIVE 0x00000004 +#define EDP_CONFIGURATION_CTRL_LANES__MASK 0x00000030 +#define EDP_CONFIGURATION_CTRL_LANES__SHIFT 4 +static inline uint32_t EDP_CONFIGURATION_CTRL_LANES(uint32_t val) +{ + return ((val) << EDP_CONFIGURATION_CTRL_LANES__SHIFT) & EDP_CONFIGURATION_CTRL_LANES__MASK; +} +#define EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING 0x00000040 +#define EDP_CONFIGURATION_CTRL_COLOR__MASK 0x00000100 +#define EDP_CONFIGURATION_CTRL_COLOR__SHIFT 8 +static inline uint32_t EDP_CONFIGURATION_CTRL_COLOR(enum edp_color_depth val) +{ + return ((val) << EDP_CONFIGURATION_CTRL_COLOR__SHIFT) & EDP_CONFIGURATION_CTRL_COLOR__MASK; +} + +#define REG_EDP_SOFTWARE_MVID 0x00000014 + +#define REG_EDP_SOFTWARE_NVID 0x00000018 + +#define REG_EDP_TOTAL_HOR_VER 0x0000001c +#define EDP_TOTAL_HOR_VER_HORIZ__MASK 0x0000ffff +#define EDP_TOTAL_HOR_VER_HORIZ__SHIFT 0 +static inline uint32_t EDP_TOTAL_HOR_VER_HORIZ(uint32_t val) +{ + return ((val) << EDP_TOTAL_HOR_VER_HORIZ__SHIFT) & EDP_TOTAL_HOR_VER_HORIZ__MASK; +} +#define EDP_TOTAL_HOR_VER_VERT__MASK 0xffff0000 +#define EDP_TOTAL_HOR_VER_VERT__SHIFT 16 +static inline uint32_t EDP_TOTAL_HOR_VER_VERT(uint32_t val) +{ + return ((val) << EDP_TOTAL_HOR_VER_VERT__SHIFT) & EDP_TOTAL_HOR_VER_VERT__MASK; +} + +#define REG_EDP_START_HOR_VER_FROM_SYNC 0x00000020 +#define EDP_START_HOR_VER_FROM_SYNC_HORIZ__MASK 0x0000ffff +#define EDP_START_HOR_VER_FROM_SYNC_HORIZ__SHIFT 0 +static inline uint32_t EDP_START_HOR_VER_FROM_SYNC_HORIZ(uint32_t val) +{ + return ((val) << EDP_START_HOR_VER_FROM_SYNC_HORIZ__SHIFT) & EDP_START_HOR_VER_FROM_SYNC_HORIZ__MASK; +} +#define EDP_START_HOR_VER_FROM_SYNC_VERT__MASK 0xffff0000 +#define EDP_START_HOR_VER_FROM_SYNC_VERT__SHIFT 16 +static inline uint32_t EDP_START_HOR_VER_FROM_SYNC_VERT(uint32_t val) +{ + return ((val) << EDP_START_HOR_VER_FROM_SYNC_VERT__SHIFT) & EDP_START_HOR_VER_FROM_SYNC_VERT__MASK; +} + +#define REG_EDP_HSYNC_VSYNC_WIDTH_POLARITY 0x00000024 +#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__MASK 0x00007fff +#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__SHIFT 0 +static inline uint32_t EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ(uint32_t val) +{ + return ((val) << EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__SHIFT) & EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__MASK; +} +#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_NHSYNC 0x00008000 +#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__MASK 0x7fff0000 +#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__SHIFT 16 +static inline uint32_t EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT(uint32_t val) +{ + return ((val) << EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__SHIFT) & EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__MASK; +} +#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_NVSYNC 0x80000000 + +#define REG_EDP_ACTIVE_HOR_VER 0x00000028 +#define EDP_ACTIVE_HOR_VER_HORIZ__MASK 0x0000ffff +#define EDP_ACTIVE_HOR_VER_HORIZ__SHIFT 0 +static inline uint32_t EDP_ACTIVE_HOR_VER_HORIZ(uint32_t val) +{ + return ((val) << EDP_ACTIVE_HOR_VER_HORIZ__SHIFT) & EDP_ACTIVE_HOR_VER_HORIZ__MASK; +} +#define EDP_ACTIVE_HOR_VER_VERT__MASK 0xffff0000 +#define EDP_ACTIVE_HOR_VER_VERT__SHIFT 16 +static inline uint32_t EDP_ACTIVE_HOR_VER_VERT(uint32_t val) +{ + return ((val) << EDP_ACTIVE_HOR_VER_VERT__SHIFT) & EDP_ACTIVE_HOR_VER_VERT__MASK; +} + +#define REG_EDP_MISC1_MISC0 0x0000002c +#define EDP_MISC1_MISC0_MISC0__MASK 0x000000ff +#define EDP_MISC1_MISC0_MISC0__SHIFT 0 +static inline uint32_t EDP_MISC1_MISC0_MISC0(uint32_t val) +{ + return ((val) << EDP_MISC1_MISC0_MISC0__SHIFT) & EDP_MISC1_MISC0_MISC0__MASK; +} +#define EDP_MISC1_MISC0_SYNC 0x00000001 +#define EDP_MISC1_MISC0_COMPONENT_FORMAT__MASK 0x00000006 +#define EDP_MISC1_MISC0_COMPONENT_FORMAT__SHIFT 1 +static inline uint32_t EDP_MISC1_MISC0_COMPONENT_FORMAT(enum edp_component_format val) +{ + return ((val) << EDP_MISC1_MISC0_COMPONENT_FORMAT__SHIFT) & EDP_MISC1_MISC0_COMPONENT_FORMAT__MASK; +} +#define EDP_MISC1_MISC0_CEA 0x00000008 +#define EDP_MISC1_MISC0_BT709_5 0x00000010 +#define EDP_MISC1_MISC0_COLOR__MASK 0x000000e0 +#define EDP_MISC1_MISC0_COLOR__SHIFT 5 +static inline uint32_t EDP_MISC1_MISC0_COLOR(enum edp_color_depth val) +{ + return ((val) << EDP_MISC1_MISC0_COLOR__SHIFT) & EDP_MISC1_MISC0_COLOR__MASK; +} +#define EDP_MISC1_MISC0_MISC1__MASK 0x0000ff00 +#define EDP_MISC1_MISC0_MISC1__SHIFT 8 +static inline uint32_t EDP_MISC1_MISC0_MISC1(uint32_t val) +{ + return ((val) << EDP_MISC1_MISC0_MISC1__SHIFT) & EDP_MISC1_MISC0_MISC1__MASK; +} +#define EDP_MISC1_MISC0_INTERLACED_ODD 0x00000100 +#define EDP_MISC1_MISC0_STEREO__MASK 0x00000600 +#define EDP_MISC1_MISC0_STEREO__SHIFT 9 +static inline uint32_t EDP_MISC1_MISC0_STEREO(uint32_t val) +{ + return ((val) << EDP_MISC1_MISC0_STEREO__SHIFT) & EDP_MISC1_MISC0_STEREO__MASK; +} + +#define REG_EDP_PHY_CTRL 0x00000074 +#define EDP_PHY_CTRL_SW_RESET_PLL 0x00000001 +#define EDP_PHY_CTRL_SW_RESET 0x00000004 + +#define REG_EDP_MAINLINK_READY 0x00000084 +#define EDP_MAINLINK_READY_TRAIN_PATTERN_1_READY 0x00000008 +#define EDP_MAINLINK_READY_TRAIN_PATTERN_2_READY 0x00000010 +#define EDP_MAINLINK_READY_TRAIN_PATTERN_3_READY 0x00000020 + +#define REG_EDP_AUX_CTRL 0x00000300 +#define EDP_AUX_CTRL_ENABLE 0x00000001 +#define EDP_AUX_CTRL_RESET 0x00000002 + +#define REG_EDP_INTERRUPT_REG_1 0x00000308 +#define EDP_INTERRUPT_REG_1_HPD 0x00000001 +#define EDP_INTERRUPT_REG_1_HPD_ACK 0x00000002 +#define EDP_INTERRUPT_REG_1_HPD_EN 0x00000004 +#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE 0x00000008 +#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE_ACK 0x00000010 +#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE_EN 0x00000020 +#define EDP_INTERRUPT_REG_1_WRONG_ADDR 0x00000040 +#define EDP_INTERRUPT_REG_1_WRONG_ADDR_ACK 0x00000080 +#define EDP_INTERRUPT_REG_1_WRONG_ADDR_EN 0x00000100 +#define EDP_INTERRUPT_REG_1_TIMEOUT 0x00000200 +#define EDP_INTERRUPT_REG_1_TIMEOUT_ACK 0x00000400 +#define EDP_INTERRUPT_REG_1_TIMEOUT_EN 0x00000800 +#define EDP_INTERRUPT_REG_1_NACK_DEFER 0x00001000 +#define EDP_INTERRUPT_REG_1_NACK_DEFER_ACK 0x00002000 +#define EDP_INTERRUPT_REG_1_NACK_DEFER_EN 0x00004000 +#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT 0x00008000 +#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT_ACK 0x00010000 +#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT_EN 0x00020000 +#define EDP_INTERRUPT_REG_1_I2C_NACK 0x00040000 +#define EDP_INTERRUPT_REG_1_I2C_NACK_ACK 0x00080000 +#define EDP_INTERRUPT_REG_1_I2C_NACK_EN 0x00100000 +#define EDP_INTERRUPT_REG_1_I2C_DEFER 0x00200000 +#define EDP_INTERRUPT_REG_1_I2C_DEFER_ACK 0x00400000 +#define EDP_INTERRUPT_REG_1_I2C_DEFER_EN 0x00800000 +#define EDP_INTERRUPT_REG_1_PLL_UNLOCK 0x01000000 +#define EDP_INTERRUPT_REG_1_PLL_UNLOCK_ACK 0x02000000 +#define EDP_INTERRUPT_REG_1_PLL_UNLOCK_EN 0x04000000 +#define EDP_INTERRUPT_REG_1_AUX_ERROR 0x08000000 +#define EDP_INTERRUPT_REG_1_AUX_ERROR_ACK 0x10000000 +#define EDP_INTERRUPT_REG_1_AUX_ERROR_EN 0x20000000 + +#define REG_EDP_INTERRUPT_REG_2 0x0000030c +#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO 0x00000001 +#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO_ACK 0x00000002 +#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO_EN 0x00000004 +#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT 0x00000008 +#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT_ACK 0x00000010 +#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT_EN 0x00000020 +#define EDP_INTERRUPT_REG_2_FRAME_END 0x00000200 +#define EDP_INTERRUPT_REG_2_FRAME_END_ACK 0x00000080 +#define EDP_INTERRUPT_REG_2_FRAME_END_EN 0x00000100 +#define EDP_INTERRUPT_REG_2_CRC_UPDATED 0x00000200 +#define EDP_INTERRUPT_REG_2_CRC_UPDATED_ACK 0x00000400 +#define EDP_INTERRUPT_REG_2_CRC_UPDATED_EN 0x00000800 + +#define REG_EDP_INTERRUPT_TRANS_NUM 0x00000310 + +#define REG_EDP_AUX_DATA 0x00000314 +#define EDP_AUX_DATA_READ 0x00000001 +#define EDP_AUX_DATA_DATA__MASK 0x0000ff00 +#define EDP_AUX_DATA_DATA__SHIFT 8 +static inline uint32_t EDP_AUX_DATA_DATA(uint32_t val) +{ + return ((val) << EDP_AUX_DATA_DATA__SHIFT) & EDP_AUX_DATA_DATA__MASK; +} +#define EDP_AUX_DATA_INDEX__MASK 0x00ff0000 +#define EDP_AUX_DATA_INDEX__SHIFT 16 +static inline uint32_t EDP_AUX_DATA_INDEX(uint32_t val) +{ + return ((val) << EDP_AUX_DATA_INDEX__SHIFT) & EDP_AUX_DATA_INDEX__MASK; +} +#define EDP_AUX_DATA_INDEX_WRITE 0x80000000 + +#define REG_EDP_AUX_TRANS_CTRL 0x00000318 +#define EDP_AUX_TRANS_CTRL_I2C 0x00000100 +#define EDP_AUX_TRANS_CTRL_GO 0x00000200 + +#define REG_EDP_AUX_STATUS 0x00000324 + +static inline uint32_t REG_EDP_PHY_LN(uint32_t i0) { return 0x00000400 + 0x40*i0; } + +static inline uint32_t REG_EDP_PHY_LN_PD_CTL(uint32_t i0) { return 0x00000404 + 0x40*i0; } + +#define REG_EDP_PHY_GLB_VM_CFG0 0x00000510 + +#define REG_EDP_PHY_GLB_VM_CFG1 0x00000514 + +#define REG_EDP_PHY_GLB_MISC9 0x00000518 + +#define REG_EDP_PHY_GLB_CFG 0x00000528 + +#define REG_EDP_PHY_GLB_PD_CTL 0x0000052c + +#define REG_EDP_PHY_GLB_PHY_STATUS 0x00000598 + + +#endif /* EDP_XML */ diff --git a/drivers/gpu/drm/msm/edp/edp_aux.c b/drivers/gpu/drm/msm/edp/edp_aux.c new file mode 100644 index 000000000000..5f5a84f6074c --- /dev/null +++ b/drivers/gpu/drm/msm/edp/edp_aux.c @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "edp.h" +#include "edp.xml.h" + +#define AUX_CMD_FIFO_LEN 144 +#define AUX_CMD_NATIVE_MAX 16 +#define AUX_CMD_I2C_MAX 128 + +#define EDP_INTR_AUX_I2C_ERR \ + (EDP_INTERRUPT_REG_1_WRONG_ADDR | EDP_INTERRUPT_REG_1_TIMEOUT | \ + EDP_INTERRUPT_REG_1_NACK_DEFER | EDP_INTERRUPT_REG_1_WRONG_DATA_CNT | \ + EDP_INTERRUPT_REG_1_I2C_NACK | EDP_INTERRUPT_REG_1_I2C_DEFER) +#define EDP_INTR_TRANS_STATUS \ + (EDP_INTERRUPT_REG_1_AUX_I2C_DONE | EDP_INTR_AUX_I2C_ERR) + +struct edp_aux { + void __iomem *base; + bool msg_err; + + struct completion msg_comp; + + /* To prevent the message transaction routine from reentry. */ + struct mutex msg_mutex; + + struct drm_dp_aux drm_aux; +}; +#define to_edp_aux(x) container_of(x, struct edp_aux, drm_aux) + +static int edp_msg_fifo_tx(struct edp_aux *aux, struct drm_dp_aux_msg *msg) +{ + u32 data[4]; + u32 reg, len; + bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); + bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); + u8 *msgdata = msg->buffer; + int i; + + if (read) + len = 4; + else + len = msg->size + 4; + + /* + * cmd fifo only has depth of 144 bytes + */ + if (len > AUX_CMD_FIFO_LEN) + return -EINVAL; + + /* Pack cmd and write to HW */ + data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */ + if (read) + data[0] |= BIT(4); /* R/W */ + + data[1] = (msg->address >> 8) & 0xff; /* addr[15:8] */ + data[2] = msg->address & 0xff; /* addr[7:0] */ + data[3] = (msg->size - 1) & 0xff; /* len[7:0] */ + + for (i = 0; i < len; i++) { + reg = (i < 4) ? data[i] : msgdata[i - 4]; + reg = EDP_AUX_DATA_DATA(reg); /* index = 0, write */ + if (i == 0) + reg |= EDP_AUX_DATA_INDEX_WRITE; + edp_write(aux->base + REG_EDP_AUX_DATA, reg); + } + + reg = 0; /* Transaction number is always 1 */ + if (!native) /* i2c */ + reg |= EDP_AUX_TRANS_CTRL_I2C; + + reg |= EDP_AUX_TRANS_CTRL_GO; + edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, reg); + + return 0; +} + +static int edp_msg_fifo_rx(struct edp_aux *aux, struct drm_dp_aux_msg *msg) +{ + u32 data; + u8 *dp; + int i; + u32 len = msg->size; + + edp_write(aux->base + REG_EDP_AUX_DATA, + EDP_AUX_DATA_INDEX_WRITE | EDP_AUX_DATA_READ); /* index = 0 */ + + dp = msg->buffer; + + /* discard first byte */ + data = edp_read(aux->base + REG_EDP_AUX_DATA); + for (i = 0; i < len; i++) { + data = edp_read(aux->base + REG_EDP_AUX_DATA); + dp[i] = (u8)((data >> 8) & 0xff); + } + + return 0; +} + +/* + * This function does the real job to process an AUX transaction. + * It will call msm_edp_aux_ctrl() function to reset the AUX channel, + * if the waiting is timeout. + * The caller who triggers the transaction should avoid the + * msm_edp_aux_ctrl() running concurrently in other threads, i.e. + * start transaction only when AUX channel is fully enabled. + */ +ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux, struct drm_dp_aux_msg *msg) +{ + struct edp_aux *aux = to_edp_aux(drm_aux); + ssize_t ret; + bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); + bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); + + /* Ignore address only message */ + if ((msg->size == 0) || (msg->buffer == NULL)) { + msg->reply = native ? + DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; + return msg->size; + } + + /* msg sanity check */ + if ((native && (msg->size > AUX_CMD_NATIVE_MAX)) || + (msg->size > AUX_CMD_I2C_MAX)) { + pr_err("%s: invalid msg: size(%d), request(%x)\n", + __func__, msg->size, msg->request); + return -EINVAL; + } + + mutex_lock(&aux->msg_mutex); + + aux->msg_err = false; + reinit_completion(&aux->msg_comp); + + ret = edp_msg_fifo_tx(aux, msg); + if (ret < 0) + goto unlock_exit; + + DBG("wait_for_completion"); + ret = wait_for_completion_timeout(&aux->msg_comp, 300); + if (ret <= 0) { + /* + * Clear GO and reset AUX channel + * to cancel the current transaction. + */ + edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0); + msm_edp_aux_ctrl(aux, 1); + pr_err("%s: aux timeout, %d\n", __func__, ret); + goto unlock_exit; + } + DBG("completion"); + + if (!aux->msg_err) { + if (read) { + ret = edp_msg_fifo_rx(aux, msg); + if (ret < 0) + goto unlock_exit; + } + + msg->reply = native ? + DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; + } else { + /* Reply defer to retry */ + msg->reply = native ? + DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER; + /* + * The sleep time in caller is not long enough to make sure + * our H/W completes transactions. Add more defer time here. + */ + msleep(100); + } + + /* Return requested size for success or retry */ + ret = msg->size; + +unlock_exit: + mutex_unlock(&aux->msg_mutex); + return ret; +} + +void *msm_edp_aux_init(struct device *dev, void __iomem *regbase, + struct drm_dp_aux **drm_aux) +{ + struct edp_aux *aux = NULL; + int ret; + + DBG(""); + aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL); + if (!aux) + return NULL; + + aux->base = regbase; + mutex_init(&aux->msg_mutex); + init_completion(&aux->msg_comp); + + aux->drm_aux.name = "msm_edp_aux"; + aux->drm_aux.dev = dev; + aux->drm_aux.transfer = edp_aux_transfer; + ret = drm_dp_aux_register(&aux->drm_aux); + if (ret) { + pr_err("%s: failed to register drm aux: %d\n", __func__, ret); + mutex_destroy(&aux->msg_mutex); + } + + if (drm_aux && aux) + *drm_aux = &aux->drm_aux; + + return aux; +} + +void msm_edp_aux_destroy(struct device *dev, struct edp_aux *aux) +{ + if (aux) { + drm_dp_aux_unregister(&aux->drm_aux); + mutex_destroy(&aux->msg_mutex); + } +} + +irqreturn_t msm_edp_aux_irq(struct edp_aux *aux, u32 isr) +{ + if (isr & EDP_INTR_TRANS_STATUS) { + DBG("isr=%x", isr); + edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0); + + if (isr & EDP_INTR_AUX_I2C_ERR) + aux->msg_err = true; + else + aux->msg_err = false; + + complete(&aux->msg_comp); + } + + return IRQ_HANDLED; +} + +void msm_edp_aux_ctrl(struct edp_aux *aux, int enable) +{ + u32 data; + + DBG("enable=%d", enable); + data = edp_read(aux->base + REG_EDP_AUX_CTRL); + + if (enable) { + data |= EDP_AUX_CTRL_RESET; + edp_write(aux->base + REG_EDP_AUX_CTRL, data); + /* Make sure full reset */ + wmb(); + usleep_range(500, 1000); + + data &= ~EDP_AUX_CTRL_RESET; + data |= EDP_AUX_CTRL_ENABLE; + edp_write(aux->base + REG_EDP_AUX_CTRL, data); + } else { + data &= ~EDP_AUX_CTRL_ENABLE; + edp_write(aux->base + REG_EDP_AUX_CTRL, data); + } +} + diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c new file mode 100644 index 000000000000..2bc73f82f3f5 --- /dev/null +++ b/drivers/gpu/drm/msm/edp/edp_bridge.c @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "edp.h" + +struct edp_bridge { + struct drm_bridge base; + struct msm_edp *edp; +}; +#define to_edp_bridge(x) container_of(x, struct edp_bridge, base) + +void edp_bridge_destroy(struct drm_bridge *bridge) +{ +} + +static void edp_bridge_pre_enable(struct drm_bridge *bridge) +{ + struct edp_bridge *edp_bridge = to_edp_bridge(bridge); + struct msm_edp *edp = edp_bridge->edp; + + DBG(""); + msm_edp_ctrl_power(edp->ctrl, true); +} + +static void edp_bridge_enable(struct drm_bridge *bridge) +{ + DBG(""); +} + +static void edp_bridge_disable(struct drm_bridge *bridge) +{ + DBG(""); +} + +static void edp_bridge_post_disable(struct drm_bridge *bridge) +{ + struct edp_bridge *edp_bridge = to_edp_bridge(bridge); + struct msm_edp *edp = edp_bridge->edp; + + DBG(""); + msm_edp_ctrl_power(edp->ctrl, false); +} + +static void edp_bridge_mode_set(struct drm_bridge *bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = bridge->dev; + struct drm_connector *connector; + struct edp_bridge *edp_bridge = to_edp_bridge(bridge); + struct msm_edp *edp = edp_bridge->edp; + + DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", + mode->base.id, mode->name, + mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, + mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, + mode->vsync_end, mode->vtotal, + mode->type, mode->flags); + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if ((connector->encoder != NULL) && + (connector->encoder->bridge == bridge)) { + msm_edp_ctrl_timing_cfg(edp->ctrl, + adjusted_mode, &connector->display_info); + break; + } + } +} + +static const struct drm_bridge_funcs edp_bridge_funcs = { + .pre_enable = edp_bridge_pre_enable, + .enable = edp_bridge_enable, + .disable = edp_bridge_disable, + .post_disable = edp_bridge_post_disable, + .mode_set = edp_bridge_mode_set, +}; + +/* initialize bridge */ +struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp) +{ + struct drm_bridge *bridge = NULL; + struct edp_bridge *edp_bridge; + int ret; + + edp_bridge = devm_kzalloc(edp->dev->dev, + sizeof(*edp_bridge), GFP_KERNEL); + if (!edp_bridge) { + ret = -ENOMEM; + goto fail; + } + + edp_bridge->edp = edp; + + bridge = &edp_bridge->base; + bridge->funcs = &edp_bridge_funcs; + + ret = drm_bridge_attach(edp->dev, bridge); + if (ret) + goto fail; + + return bridge; + +fail: + if (bridge) + edp_bridge_destroy(bridge); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c new file mode 100644 index 000000000000..d8812e84da54 --- /dev/null +++ b/drivers/gpu/drm/msm/edp/edp_connector.c @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "drm/drm_edid.h" +#include "msm_kms.h" +#include "edp.h" + +struct edp_connector { + struct drm_connector base; + struct msm_edp *edp; +}; +#define to_edp_connector(x) container_of(x, struct edp_connector, base) + +static enum drm_connector_status edp_connector_detect( + struct drm_connector *connector, bool force) +{ + struct edp_connector *edp_connector = to_edp_connector(connector); + struct msm_edp *edp = edp_connector->edp; + + DBG(""); + return msm_edp_ctrl_panel_connected(edp->ctrl) ? + connector_status_connected : connector_status_disconnected; +} + +static void edp_connector_destroy(struct drm_connector *connector) +{ + struct edp_connector *edp_connector = to_edp_connector(connector); + + DBG(""); + drm_connector_unregister(connector); + drm_connector_cleanup(connector); + + kfree(edp_connector); +} + +static int edp_connector_get_modes(struct drm_connector *connector) +{ + struct edp_connector *edp_connector = to_edp_connector(connector); + struct msm_edp *edp = edp_connector->edp; + + struct edid *drm_edid = NULL; + int ret = 0; + + DBG(""); + ret = msm_edp_ctrl_get_panel_info(edp->ctrl, connector, &drm_edid); + if (ret) + return ret; + + drm_mode_connector_update_edid_property(connector, drm_edid); + if (drm_edid) + ret = drm_add_edid_modes(connector, drm_edid); + + return ret; +} + +static int edp_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct edp_connector *edp_connector = to_edp_connector(connector); + struct msm_edp *edp = edp_connector->edp; + struct msm_drm_private *priv = connector->dev->dev_private; + struct msm_kms *kms = priv->kms; + long actual, requested; + + requested = 1000 * mode->clock; + actual = kms->funcs->round_pixclk(kms, + requested, edp_connector->edp->encoder); + + DBG("requested=%ld, actual=%ld", requested, actual); + if (actual != requested) + return MODE_CLOCK_RANGE; + + if (!msm_edp_ctrl_pixel_clock_valid( + edp->ctrl, mode->clock, NULL, NULL)) + return MODE_CLOCK_RANGE; + + /* Invalidate all modes if color format is not supported */ + if (connector->display_info.bpc > 8) + return MODE_BAD; + + return MODE_OK; +} + +static struct drm_encoder * +edp_connector_best_encoder(struct drm_connector *connector) +{ + struct edp_connector *edp_connector = to_edp_connector(connector); + + DBG(""); + return edp_connector->edp->encoder; +} + +static const struct drm_connector_funcs edp_connector_funcs = { + .dpms = drm_atomic_helper_connector_dpms, + .detect = edp_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = edp_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const struct drm_connector_helper_funcs edp_connector_helper_funcs = { + .get_modes = edp_connector_get_modes, + .mode_valid = edp_connector_mode_valid, + .best_encoder = edp_connector_best_encoder, +}; + +/* initialize connector */ +struct drm_connector *msm_edp_connector_init(struct msm_edp *edp) +{ + struct drm_connector *connector = NULL; + struct edp_connector *edp_connector; + int ret; + + edp_connector = kzalloc(sizeof(*edp_connector), GFP_KERNEL); + if (!edp_connector) { + ret = -ENOMEM; + goto fail; + } + + edp_connector->edp = edp; + + connector = &edp_connector->base; + + ret = drm_connector_init(edp->dev, connector, &edp_connector_funcs, + DRM_MODE_CONNECTOR_eDP); + if (ret) + goto fail; + + drm_connector_helper_add(connector, &edp_connector_helper_funcs); + + /* We don't support HPD, so only poll status until connected. */ + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + + /* Display driver doesn't support interlace now. */ + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + + ret = drm_connector_register(connector); + if (ret) + goto fail; + + return connector; + +fail: + if (connector) + edp_connector_destroy(connector); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c new file mode 100644 index 000000000000..3e246210c46f --- /dev/null +++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c @@ -0,0 +1,1373 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/gpio/consumer.h> +#include <linux/regulator/consumer.h> + +#include "drm_crtc.h" +#include "drm_dp_helper.h" +#include "drm_edid.h" +#include "edp.h" +#include "edp.xml.h" + +#define VDDA_MIN_UV 1800000 /* uV units */ +#define VDDA_MAX_UV 1800000 /* uV units */ +#define VDDA_UA_ON_LOAD 100000 /* uA units */ +#define VDDA_UA_OFF_LOAD 100 /* uA units */ + +#define DPCD_LINK_VOLTAGE_MAX 4 +#define DPCD_LINK_PRE_EMPHASIS_MAX 4 + +#define EDP_LINK_BW_MAX DP_LINK_BW_2_7 + +/* Link training return value */ +#define EDP_TRAIN_FAIL -1 +#define EDP_TRAIN_SUCCESS 0 +#define EDP_TRAIN_RECONFIG 1 + +#define EDP_CLK_MASK_AHB BIT(0) +#define EDP_CLK_MASK_AUX BIT(1) +#define EDP_CLK_MASK_LINK BIT(2) +#define EDP_CLK_MASK_PIXEL BIT(3) +#define EDP_CLK_MASK_MDP_CORE BIT(4) +#define EDP_CLK_MASK_LINK_CHAN (EDP_CLK_MASK_LINK | EDP_CLK_MASK_PIXEL) +#define EDP_CLK_MASK_AUX_CHAN \ + (EDP_CLK_MASK_AHB | EDP_CLK_MASK_AUX | EDP_CLK_MASK_MDP_CORE) +#define EDP_CLK_MASK_ALL (EDP_CLK_MASK_AUX_CHAN | EDP_CLK_MASK_LINK_CHAN) + +#define EDP_BACKLIGHT_MAX 255 + +#define EDP_INTR_STATUS1 \ + (EDP_INTERRUPT_REG_1_HPD | EDP_INTERRUPT_REG_1_AUX_I2C_DONE | \ + EDP_INTERRUPT_REG_1_WRONG_ADDR | EDP_INTERRUPT_REG_1_TIMEOUT | \ + EDP_INTERRUPT_REG_1_NACK_DEFER | EDP_INTERRUPT_REG_1_WRONG_DATA_CNT | \ + EDP_INTERRUPT_REG_1_I2C_NACK | EDP_INTERRUPT_REG_1_I2C_DEFER | \ + EDP_INTERRUPT_REG_1_PLL_UNLOCK | EDP_INTERRUPT_REG_1_AUX_ERROR) +#define EDP_INTR_MASK1 (EDP_INTR_STATUS1 << 2) +#define EDP_INTR_STATUS2 \ + (EDP_INTERRUPT_REG_2_READY_FOR_VIDEO | \ + EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT | \ + EDP_INTERRUPT_REG_2_FRAME_END | EDP_INTERRUPT_REG_2_CRC_UPDATED) +#define EDP_INTR_MASK2 (EDP_INTR_STATUS2 << 2) + +struct edp_ctrl { + struct platform_device *pdev; + + void __iomem *base; + + /* regulators */ + struct regulator *vdda_vreg; + struct regulator *lvl_vreg; + + /* clocks */ + struct clk *aux_clk; + struct clk *pixel_clk; + struct clk *ahb_clk; + struct clk *link_clk; + struct clk *mdp_core_clk; + + /* gpios */ + struct gpio_desc *panel_en_gpio; + struct gpio_desc *panel_hpd_gpio; + + /* completion and mutex */ + struct completion idle_comp; + struct mutex dev_mutex; /* To protect device power status */ + + /* work queue */ + struct work_struct on_work; + struct work_struct off_work; + struct workqueue_struct *workqueue; + + /* Interrupt register lock */ + spinlock_t irq_lock; + + bool edp_connected; + bool power_on; + + /* edid raw data */ + struct edid *edid; + + struct drm_dp_link dp_link; + struct drm_dp_aux *drm_aux; + + /* dpcd raw data */ + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + + /* Link status */ + u8 link_rate; + u8 lane_cnt; + u8 v_level; + u8 p_level; + + /* Timing status */ + u8 interlaced; + u32 pixel_rate; /* in kHz */ + u32 color_depth; + + struct edp_aux *aux; + struct edp_phy *phy; +}; + +struct edp_pixel_clk_div { + u32 rate; /* in kHz */ + u32 m; + u32 n; +}; + +#define EDP_PIXEL_CLK_NUM 8 +static const struct edp_pixel_clk_div clk_divs[2][EDP_PIXEL_CLK_NUM] = { + { /* Link clock = 162MHz, source clock = 810MHz */ + {119000, 31, 211}, /* WSXGA+ 1680x1050@60Hz CVT */ + {130250, 32, 199}, /* UXGA 1600x1200@60Hz CVT */ + {148500, 11, 60}, /* FHD 1920x1080@60Hz */ + {154000, 50, 263}, /* WUXGA 1920x1200@60Hz CVT */ + {209250, 31, 120}, /* QXGA 2048x1536@60Hz CVT */ + {268500, 119, 359}, /* WQXGA 2560x1600@60Hz CVT */ + {138530, 33, 193}, /* AUO B116HAN03.0 Panel */ + {141400, 48, 275}, /* AUO B133HTN01.2 Panel */ + }, + { /* Link clock = 270MHz, source clock = 675MHz */ + {119000, 52, 295}, /* WSXGA+ 1680x1050@60Hz CVT */ + {130250, 11, 57}, /* UXGA 1600x1200@60Hz CVT */ + {148500, 11, 50}, /* FHD 1920x1080@60Hz */ + {154000, 47, 206}, /* WUXGA 1920x1200@60Hz CVT */ + {209250, 31, 100}, /* QXGA 2048x1536@60Hz CVT */ + {268500, 107, 269}, /* WQXGA 2560x1600@60Hz CVT */ + {138530, 63, 307}, /* AUO B116HAN03.0 Panel */ + {141400, 53, 253}, /* AUO B133HTN01.2 Panel */ + }, +}; + +static int edp_clk_init(struct edp_ctrl *ctrl) +{ + struct device *dev = &ctrl->pdev->dev; + int ret; + + ctrl->aux_clk = devm_clk_get(dev, "core_clk"); + if (IS_ERR(ctrl->aux_clk)) { + ret = PTR_ERR(ctrl->aux_clk); + pr_err("%s: Can't find aux_clk, %d\n", __func__, ret); + ctrl->aux_clk = NULL; + return ret; + } + + ctrl->pixel_clk = devm_clk_get(dev, "pixel_clk"); + if (IS_ERR(ctrl->pixel_clk)) { + ret = PTR_ERR(ctrl->pixel_clk); + pr_err("%s: Can't find pixel_clk, %d\n", __func__, ret); + ctrl->pixel_clk = NULL; + return ret; + } + + ctrl->ahb_clk = devm_clk_get(dev, "iface_clk"); + if (IS_ERR(ctrl->ahb_clk)) { + ret = PTR_ERR(ctrl->ahb_clk); + pr_err("%s: Can't find ahb_clk, %d\n", __func__, ret); + ctrl->ahb_clk = NULL; + return ret; + } + + ctrl->link_clk = devm_clk_get(dev, "link_clk"); + if (IS_ERR(ctrl->link_clk)) { + ret = PTR_ERR(ctrl->link_clk); + pr_err("%s: Can't find link_clk, %d\n", __func__, ret); + ctrl->link_clk = NULL; + return ret; + } + + /* need mdp core clock to receive irq */ + ctrl->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk"); + if (IS_ERR(ctrl->mdp_core_clk)) { + ret = PTR_ERR(ctrl->mdp_core_clk); + pr_err("%s: Can't find mdp_core_clk, %d\n", __func__, ret); + ctrl->mdp_core_clk = NULL; + return ret; + } + + return 0; +} + +static int edp_clk_enable(struct edp_ctrl *ctrl, u32 clk_mask) +{ + int ret; + + DBG("mask=%x", clk_mask); + /* ahb_clk should be enabled first */ + if (clk_mask & EDP_CLK_MASK_AHB) { + ret = clk_prepare_enable(ctrl->ahb_clk); + if (ret) { + pr_err("%s: Failed to enable ahb clk\n", __func__); + goto f0; + } + } + if (clk_mask & EDP_CLK_MASK_AUX) { + ret = clk_set_rate(ctrl->aux_clk, 19200000); + if (ret) { + pr_err("%s: Failed to set rate aux clk\n", __func__); + goto f1; + } + ret = clk_prepare_enable(ctrl->aux_clk); + if (ret) { + pr_err("%s: Failed to enable aux clk\n", __func__); + goto f1; + } + } + /* Need to set rate and enable link_clk prior to pixel_clk */ + if (clk_mask & EDP_CLK_MASK_LINK) { + DBG("edp->link_clk, set_rate %ld", + (unsigned long)ctrl->link_rate * 27000000); + ret = clk_set_rate(ctrl->link_clk, + (unsigned long)ctrl->link_rate * 27000000); + if (ret) { + pr_err("%s: Failed to set rate to link clk\n", + __func__); + goto f2; + } + + ret = clk_prepare_enable(ctrl->link_clk); + if (ret) { + pr_err("%s: Failed to enable link clk\n", __func__); + goto f2; + } + } + if (clk_mask & EDP_CLK_MASK_PIXEL) { + DBG("edp->pixel_clk, set_rate %ld", + (unsigned long)ctrl->pixel_rate * 1000); + ret = clk_set_rate(ctrl->pixel_clk, + (unsigned long)ctrl->pixel_rate * 1000); + if (ret) { + pr_err("%s: Failed to set rate to pixel clk\n", + __func__); + goto f3; + } + + ret = clk_prepare_enable(ctrl->pixel_clk); + if (ret) { + pr_err("%s: Failed to enable pixel clk\n", __func__); + goto f3; + } + } + if (clk_mask & EDP_CLK_MASK_MDP_CORE) { + ret = clk_prepare_enable(ctrl->mdp_core_clk); + if (ret) { + pr_err("%s: Failed to enable mdp core clk\n", __func__); + goto f4; + } + } + + return 0; + +f4: + if (clk_mask & EDP_CLK_MASK_PIXEL) + clk_disable_unprepare(ctrl->pixel_clk); +f3: + if (clk_mask & EDP_CLK_MASK_LINK) + clk_disable_unprepare(ctrl->link_clk); +f2: + if (clk_mask & EDP_CLK_MASK_AUX) + clk_disable_unprepare(ctrl->aux_clk); +f1: + if (clk_mask & EDP_CLK_MASK_AHB) + clk_disable_unprepare(ctrl->ahb_clk); +f0: + return ret; +} + +static void edp_clk_disable(struct edp_ctrl *ctrl, u32 clk_mask) +{ + if (clk_mask & EDP_CLK_MASK_MDP_CORE) + clk_disable_unprepare(ctrl->mdp_core_clk); + if (clk_mask & EDP_CLK_MASK_PIXEL) + clk_disable_unprepare(ctrl->pixel_clk); + if (clk_mask & EDP_CLK_MASK_LINK) + clk_disable_unprepare(ctrl->link_clk); + if (clk_mask & EDP_CLK_MASK_AUX) + clk_disable_unprepare(ctrl->aux_clk); + if (clk_mask & EDP_CLK_MASK_AHB) + clk_disable_unprepare(ctrl->ahb_clk); +} + +static int edp_regulator_init(struct edp_ctrl *ctrl) +{ + struct device *dev = &ctrl->pdev->dev; + + DBG(""); + ctrl->vdda_vreg = devm_regulator_get(dev, "vdda"); + if (IS_ERR(ctrl->vdda_vreg)) { + pr_err("%s: Could not get vdda reg, ret = %ld\n", __func__, + PTR_ERR(ctrl->vdda_vreg)); + ctrl->vdda_vreg = NULL; + return PTR_ERR(ctrl->vdda_vreg); + } + ctrl->lvl_vreg = devm_regulator_get(dev, "lvl-vdd"); + if (IS_ERR(ctrl->lvl_vreg)) { + pr_err("Could not get lvl-vdd reg, %ld", + PTR_ERR(ctrl->lvl_vreg)); + ctrl->lvl_vreg = NULL; + return PTR_ERR(ctrl->lvl_vreg); + } + + return 0; +} + +static int edp_regulator_enable(struct edp_ctrl *ctrl) +{ + int ret; + + ret = regulator_set_voltage(ctrl->vdda_vreg, VDDA_MIN_UV, VDDA_MAX_UV); + if (ret) { + pr_err("%s:vdda_vreg set_voltage failed, %d\n", __func__, ret); + goto vdda_set_fail; + } + + ret = regulator_set_optimum_mode(ctrl->vdda_vreg, VDDA_UA_ON_LOAD); + if (ret < 0) { + pr_err("%s: vdda_vreg set regulator mode failed.\n", __func__); + goto vdda_set_fail; + } + + ret = regulator_enable(ctrl->vdda_vreg); + if (ret) { + pr_err("%s: Failed to enable vdda_vreg regulator.\n", __func__); + goto vdda_enable_fail; + } + + ret = regulator_enable(ctrl->lvl_vreg); + if (ret) { + pr_err("Failed to enable lvl-vdd reg regulator, %d", ret); + goto lvl_enable_fail; + } + + DBG("exit"); + return 0; + +lvl_enable_fail: + regulator_disable(ctrl->vdda_vreg); +vdda_enable_fail: + regulator_set_optimum_mode(ctrl->vdda_vreg, VDDA_UA_OFF_LOAD); +vdda_set_fail: + return ret; +} + +static void edp_regulator_disable(struct edp_ctrl *ctrl) +{ + regulator_disable(ctrl->lvl_vreg); + regulator_disable(ctrl->vdda_vreg); + regulator_set_optimum_mode(ctrl->vdda_vreg, VDDA_UA_OFF_LOAD); +} + +static int edp_gpio_config(struct edp_ctrl *ctrl) +{ + struct device *dev = &ctrl->pdev->dev; + int ret; + + ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd"); + if (IS_ERR(ctrl->panel_hpd_gpio)) { + ret = PTR_ERR(ctrl->panel_hpd_gpio); + ctrl->panel_hpd_gpio = NULL; + pr_err("%s: cannot get panel-hpd-gpios, %d\n", __func__, ret); + return ret; + } + + ret = gpiod_direction_input(ctrl->panel_hpd_gpio); + if (ret) { + pr_err("%s: Set direction for hpd failed, %d\n", __func__, ret); + return ret; + } + + ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en"); + if (IS_ERR(ctrl->panel_en_gpio)) { + ret = PTR_ERR(ctrl->panel_en_gpio); + ctrl->panel_en_gpio = NULL; + pr_err("%s: cannot get panel-en-gpios, %d\n", __func__, ret); + return ret; + } + + ret = gpiod_direction_output(ctrl->panel_en_gpio, 0); + if (ret) { + pr_err("%s: Set direction for panel_en failed, %d\n", + __func__, ret); + return ret; + } + + DBG("gpio on"); + + return 0; +} + +static void edp_ctrl_irq_enable(struct edp_ctrl *ctrl, int enable) +{ + unsigned long flags; + + DBG("%d", enable); + spin_lock_irqsave(&ctrl->irq_lock, flags); + if (enable) { + edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, EDP_INTR_MASK1); + edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, EDP_INTR_MASK2); + } else { + edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, 0x0); + edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, 0x0); + } + spin_unlock_irqrestore(&ctrl->irq_lock, flags); + DBG("exit"); +} + +static void edp_fill_link_cfg(struct edp_ctrl *ctrl) +{ + u32 prate; + u32 lrate; + u32 bpp; + u8 max_lane = ctrl->dp_link.num_lanes; + u8 lane; + + prate = ctrl->pixel_rate; + bpp = ctrl->color_depth * 3; + + /* + * By default, use the maximum link rate and minimum lane count, + * so that we can do rate down shift during link training. + */ + ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate); + + prate *= bpp; + prate /= 8; /* in kByte */ + + lrate = 270000; /* in kHz */ + lrate *= ctrl->link_rate; + lrate /= 10; /* in kByte, 10 bits --> 8 bits */ + + for (lane = 1; lane <= max_lane; lane <<= 1) { + if (lrate >= prate) + break; + lrate <<= 1; + } + + ctrl->lane_cnt = lane; + DBG("rate=%d lane=%d", ctrl->link_rate, ctrl->lane_cnt); +} + +static void edp_config_ctrl(struct edp_ctrl *ctrl) +{ + u32 data; + enum edp_color_depth depth; + + data = EDP_CONFIGURATION_CTRL_LANES(ctrl->lane_cnt - 1); + + if (ctrl->dp_link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + data |= EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING; + + depth = EDP_6BIT; + if (ctrl->color_depth == 8) + depth = EDP_8BIT; + + data |= EDP_CONFIGURATION_CTRL_COLOR(depth); + + if (!ctrl->interlaced) /* progressive */ + data |= EDP_CONFIGURATION_CTRL_PROGRESSIVE; + + data |= (EDP_CONFIGURATION_CTRL_SYNC_CLK | + EDP_CONFIGURATION_CTRL_STATIC_MVID); + + edp_write(ctrl->base + REG_EDP_CONFIGURATION_CTRL, data); +} + +static void edp_state_ctrl(struct edp_ctrl *ctrl, u32 state) +{ + edp_write(ctrl->base + REG_EDP_STATE_CTRL, state); + /* Make sure H/W status is set */ + wmb(); +} + +static int edp_lane_set_write(struct edp_ctrl *ctrl, + u8 voltage_level, u8 pre_emphasis_level) +{ + int i; + u8 buf[4]; + + if (voltage_level >= DPCD_LINK_VOLTAGE_MAX) + voltage_level |= 0x04; + + if (pre_emphasis_level >= DPCD_LINK_PRE_EMPHASIS_MAX) + pre_emphasis_level |= 0x04; + + pre_emphasis_level <<= 3; + + for (i = 0; i < 4; i++) + buf[i] = voltage_level | pre_emphasis_level; + + DBG("%s: p|v=0x%x", __func__, voltage_level | pre_emphasis_level); + if (drm_dp_dpcd_write(ctrl->drm_aux, 0x103, buf, 4) < 4) { + pr_err("%s: Set sw/pe to panel failed\n", __func__); + return -ENOLINK; + } + + return 0; +} + +static int edp_train_pattern_set_write(struct edp_ctrl *ctrl, u8 pattern) +{ + u8 p = pattern; + + DBG("pattern=%x", p); + if (drm_dp_dpcd_write(ctrl->drm_aux, + DP_TRAINING_PATTERN_SET, &p, 1) < 1) { + pr_err("%s: Set training pattern to panel failed\n", __func__); + return -ENOLINK; + } + + return 0; +} + +static void edp_sink_train_set_adjust(struct edp_ctrl *ctrl, + const u8 *link_status) +{ + int i; + u8 max = 0; + u8 data; + + /* use the max level across lanes */ + for (i = 0; i < ctrl->lane_cnt; i++) { + data = drm_dp_get_adjust_request_voltage(link_status, i); + DBG("lane=%d req_voltage_swing=0x%x", i, data); + if (max < data) + max = data; + } + + ctrl->v_level = max >> DP_TRAIN_VOLTAGE_SWING_SHIFT; + + /* use the max level across lanes */ + max = 0; + for (i = 0; i < ctrl->lane_cnt; i++) { + data = drm_dp_get_adjust_request_pre_emphasis(link_status, i); + DBG("lane=%d req_pre_emphasis=0x%x", i, data); + if (max < data) + max = data; + } + + ctrl->p_level = max >> DP_TRAIN_PRE_EMPHASIS_SHIFT; + DBG("v_level=%d, p_level=%d", ctrl->v_level, ctrl->p_level); +} + +static void edp_host_train_set(struct edp_ctrl *ctrl, u32 train) +{ + int cnt = 10; + u32 data; + u32 shift = train - 1; + + DBG("train=%d", train); + + edp_state_ctrl(ctrl, EDP_STATE_CTRL_TRAIN_PATTERN_1 << shift); + while (--cnt) { + data = edp_read(ctrl->base + REG_EDP_MAINLINK_READY); + if (data & (EDP_MAINLINK_READY_TRAIN_PATTERN_1_READY << shift)) + break; + } + + if (cnt == 0) + pr_err("%s: set link_train=%d failed\n", __func__, train); +} + +static const u8 vm_pre_emphasis[4][4] = { + {0x03, 0x06, 0x09, 0x0C}, /* pe0, 0 db */ + {0x03, 0x06, 0x09, 0xFF}, /* pe1, 3.5 db */ + {0x03, 0x06, 0xFF, 0xFF}, /* pe2, 6.0 db */ + {0x03, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */ +}; + +/* voltage swing, 0.2v and 1.0v are not support */ +static const u8 vm_voltage_swing[4][4] = { + {0x14, 0x18, 0x1A, 0x1E}, /* sw0, 0.4v */ + {0x18, 0x1A, 0x1E, 0xFF}, /* sw1, 0.6 v */ + {0x1A, 0x1E, 0xFF, 0xFF}, /* sw1, 0.8 v */ + {0x1E, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */ +}; + +static int edp_voltage_pre_emphasise_set(struct edp_ctrl *ctrl) +{ + u32 value0; + u32 value1; + + DBG("v=%d p=%d", ctrl->v_level, ctrl->p_level); + + value0 = vm_pre_emphasis[(int)(ctrl->v_level)][(int)(ctrl->p_level)]; + value1 = vm_voltage_swing[(int)(ctrl->v_level)][(int)(ctrl->p_level)]; + + /* Configure host and panel only if both values are allowed */ + if (value0 != 0xFF && value1 != 0xFF) { + msm_edp_phy_vm_pe_cfg(ctrl->phy, value0, value1); + return edp_lane_set_write(ctrl, ctrl->v_level, ctrl->p_level); + } + + return -EINVAL; +} + +static int edp_start_link_train_1(struct edp_ctrl *ctrl) +{ + u8 link_status[DP_LINK_STATUS_SIZE]; + u8 old_v_level; + int tries; + int ret; + int rlen; + + DBG(""); + + edp_host_train_set(ctrl, DP_TRAINING_PATTERN_1); + ret = edp_voltage_pre_emphasise_set(ctrl); + if (ret) + return ret; + ret = edp_train_pattern_set_write(ctrl, + DP_TRAINING_PATTERN_1 | DP_RECOVERED_CLOCK_OUT_EN); + if (ret) + return ret; + + tries = 0; + old_v_level = ctrl->v_level; + while (1) { + drm_dp_link_train_clock_recovery_delay(ctrl->dpcd); + + rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status); + if (rlen < DP_LINK_STATUS_SIZE) { + pr_err("%s: read link status failed\n", __func__); + return -ENOLINK; + } + if (drm_dp_clock_recovery_ok(link_status, ctrl->lane_cnt)) { + ret = 0; + break; + } + + if (ctrl->v_level == DPCD_LINK_VOLTAGE_MAX) { + ret = -1; + break; + } + + if (old_v_level == ctrl->v_level) { + tries++; + if (tries >= 5) { + ret = -1; + break; + } + } else { + tries = 0; + old_v_level = ctrl->v_level; + } + + edp_sink_train_set_adjust(ctrl, link_status); + ret = edp_voltage_pre_emphasise_set(ctrl); + if (ret) + return ret; + } + + return ret; +} + +static int edp_start_link_train_2(struct edp_ctrl *ctrl) +{ + u8 link_status[DP_LINK_STATUS_SIZE]; + int tries = 0; + int ret; + int rlen; + + DBG(""); + + edp_host_train_set(ctrl, DP_TRAINING_PATTERN_2); + ret = edp_voltage_pre_emphasise_set(ctrl); + if (ret) + return ret; + + ret = edp_train_pattern_set_write(ctrl, + DP_TRAINING_PATTERN_2 | DP_RECOVERED_CLOCK_OUT_EN); + if (ret) + return ret; + + while (1) { + drm_dp_link_train_channel_eq_delay(ctrl->dpcd); + + rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status); + if (rlen < DP_LINK_STATUS_SIZE) { + pr_err("%s: read link status failed\n", __func__); + return -ENOLINK; + } + if (drm_dp_channel_eq_ok(link_status, ctrl->lane_cnt)) { + ret = 0; + break; + } + + tries++; + if (tries > 10) { + ret = -1; + break; + } + + edp_sink_train_set_adjust(ctrl, link_status); + ret = edp_voltage_pre_emphasise_set(ctrl); + if (ret) + return ret; + } + + return ret; +} + +static int edp_link_rate_down_shift(struct edp_ctrl *ctrl) +{ + u32 prate, lrate, bpp; + u8 rate, lane, max_lane; + int changed = 0; + + rate = ctrl->link_rate; + lane = ctrl->lane_cnt; + max_lane = ctrl->dp_link.num_lanes; + + bpp = ctrl->color_depth * 3; + prate = ctrl->pixel_rate; + prate *= bpp; + prate /= 8; /* in kByte */ + + if (rate > DP_LINK_BW_1_62 && rate <= EDP_LINK_BW_MAX) { + rate -= 4; /* reduce rate */ + changed++; + } + + if (changed) { + if (lane >= 1 && lane < max_lane) + lane <<= 1; /* increase lane */ + + lrate = 270000; /* in kHz */ + lrate *= rate; + lrate /= 10; /* kByte, 10 bits --> 8 bits */ + lrate *= lane; + + DBG("new lrate=%u prate=%u(kHz) rate=%d lane=%d p=%u b=%d", + lrate, prate, rate, lane, + ctrl->pixel_rate, + bpp); + + if (lrate > prate) { + ctrl->link_rate = rate; + ctrl->lane_cnt = lane; + DBG("new rate=%d %d", rate, lane); + return 0; + } + } + + return -EINVAL; +} + +static int edp_clear_training_pattern(struct edp_ctrl *ctrl) +{ + int ret; + + ret = edp_train_pattern_set_write(ctrl, 0); + + drm_dp_link_train_channel_eq_delay(ctrl->dpcd); + + return ret; +} + +static int edp_do_link_train(struct edp_ctrl *ctrl) +{ + int ret; + struct drm_dp_link dp_link; + + DBG(""); + /* + * Set the current link rate and lane cnt to panel. They may have been + * adjusted and the values are different from them in DPCD CAP + */ + dp_link.num_lanes = ctrl->lane_cnt; + dp_link.rate = drm_dp_bw_code_to_link_rate(ctrl->link_rate); + dp_link.capabilities = ctrl->dp_link.capabilities; + if (drm_dp_link_configure(ctrl->drm_aux, &dp_link) < 0) + return EDP_TRAIN_FAIL; + + ctrl->v_level = 0; /* start from default level */ + ctrl->p_level = 0; + + edp_state_ctrl(ctrl, 0); + if (edp_clear_training_pattern(ctrl)) + return EDP_TRAIN_FAIL; + + ret = edp_start_link_train_1(ctrl); + if (ret < 0) { + if (edp_link_rate_down_shift(ctrl) == 0) { + DBG("link reconfig"); + ret = EDP_TRAIN_RECONFIG; + goto clear; + } else { + pr_err("%s: Training 1 failed", __func__); + ret = EDP_TRAIN_FAIL; + goto clear; + } + } + DBG("Training 1 completed successfully"); + + edp_state_ctrl(ctrl, 0); + if (edp_clear_training_pattern(ctrl)) + return EDP_TRAIN_FAIL; + + ret = edp_start_link_train_2(ctrl); + if (ret < 0) { + if (edp_link_rate_down_shift(ctrl) == 0) { + DBG("link reconfig"); + ret = EDP_TRAIN_RECONFIG; + goto clear; + } else { + pr_err("%s: Training 2 failed", __func__); + ret = EDP_TRAIN_FAIL; + goto clear; + } + } + DBG("Training 2 completed successfully"); + + edp_state_ctrl(ctrl, EDP_STATE_CTRL_SEND_VIDEO); +clear: + edp_clear_training_pattern(ctrl); + + return ret; +} + +static void edp_clock_synchrous(struct edp_ctrl *ctrl, int sync) +{ + u32 data; + enum edp_color_depth depth; + + data = edp_read(ctrl->base + REG_EDP_MISC1_MISC0); + + if (sync) + data |= EDP_MISC1_MISC0_SYNC; + else + data &= ~EDP_MISC1_MISC0_SYNC; + + /* only legacy rgb mode supported */ + depth = EDP_6BIT; /* Default */ + if (ctrl->color_depth == 8) + depth = EDP_8BIT; + else if (ctrl->color_depth == 10) + depth = EDP_10BIT; + else if (ctrl->color_depth == 12) + depth = EDP_12BIT; + else if (ctrl->color_depth == 16) + depth = EDP_16BIT; + + data |= EDP_MISC1_MISC0_COLOR(depth); + + edp_write(ctrl->base + REG_EDP_MISC1_MISC0, data); +} + +static int edp_sw_mvid_nvid(struct edp_ctrl *ctrl, u32 m, u32 n) +{ + u32 n_multi, m_multi = 5; + + if (ctrl->link_rate == DP_LINK_BW_1_62) { + n_multi = 1; + } else if (ctrl->link_rate == DP_LINK_BW_2_7) { + n_multi = 2; + } else { + pr_err("%s: Invalid link rate, %d\n", __func__, + ctrl->link_rate); + return -EINVAL; + } + + edp_write(ctrl->base + REG_EDP_SOFTWARE_MVID, m * m_multi); + edp_write(ctrl->base + REG_EDP_SOFTWARE_NVID, n * n_multi); + + return 0; +} + +static void edp_mainlink_ctrl(struct edp_ctrl *ctrl, int enable) +{ + u32 data = 0; + + edp_write(ctrl->base + REG_EDP_MAINLINK_CTRL, EDP_MAINLINK_CTRL_RESET); + /* Make sure fully reset */ + wmb(); + usleep_range(500, 1000); + + if (enable) + data |= EDP_MAINLINK_CTRL_ENABLE; + + edp_write(ctrl->base + REG_EDP_MAINLINK_CTRL, data); +} + +static void edp_ctrl_phy_aux_enable(struct edp_ctrl *ctrl, int enable) +{ + if (enable) { + edp_regulator_enable(ctrl); + edp_clk_enable(ctrl, EDP_CLK_MASK_AUX_CHAN); + msm_edp_phy_ctrl(ctrl->phy, 1); + msm_edp_aux_ctrl(ctrl->aux, 1); + gpiod_set_value(ctrl->panel_en_gpio, 1); + } else { + gpiod_set_value(ctrl->panel_en_gpio, 0); + msm_edp_aux_ctrl(ctrl->aux, 0); + msm_edp_phy_ctrl(ctrl->phy, 0); + edp_clk_disable(ctrl, EDP_CLK_MASK_AUX_CHAN); + edp_regulator_disable(ctrl); + } +} + +static void edp_ctrl_link_enable(struct edp_ctrl *ctrl, int enable) +{ + u32 m, n; + + if (enable) { + /* Enable link channel clocks */ + edp_clk_enable(ctrl, EDP_CLK_MASK_LINK_CHAN); + + msm_edp_phy_lane_power_ctrl(ctrl->phy, true, ctrl->lane_cnt); + + msm_edp_phy_vm_pe_init(ctrl->phy); + + /* Make sure phy is programed */ + wmb(); + msm_edp_phy_ready(ctrl->phy); + + edp_config_ctrl(ctrl); + msm_edp_ctrl_pixel_clock_valid(ctrl, ctrl->pixel_rate, &m, &n); + edp_sw_mvid_nvid(ctrl, m, n); + edp_mainlink_ctrl(ctrl, 1); + } else { + edp_mainlink_ctrl(ctrl, 0); + + msm_edp_phy_lane_power_ctrl(ctrl->phy, false, 0); + edp_clk_disable(ctrl, EDP_CLK_MASK_LINK_CHAN); + } +} + +static int edp_ctrl_training(struct edp_ctrl *ctrl) +{ + int ret; + + /* Do link training only when power is on */ + if (!ctrl->power_on) + return -EINVAL; + +train_start: + ret = edp_do_link_train(ctrl); + if (ret == EDP_TRAIN_RECONFIG) { + /* Re-configure main link */ + edp_ctrl_irq_enable(ctrl, 0); + edp_ctrl_link_enable(ctrl, 0); + msm_edp_phy_ctrl(ctrl->phy, 0); + + /* Make sure link is fully disabled */ + wmb(); + usleep_range(500, 1000); + + msm_edp_phy_ctrl(ctrl->phy, 1); + edp_ctrl_link_enable(ctrl, 1); + edp_ctrl_irq_enable(ctrl, 1); + goto train_start; + } + + return ret; +} + +static void edp_ctrl_on_worker(struct work_struct *work) +{ + struct edp_ctrl *ctrl = container_of( + work, struct edp_ctrl, on_work); + int ret; + + mutex_lock(&ctrl->dev_mutex); + + if (ctrl->power_on) { + DBG("already on"); + goto unlock_ret; + } + + edp_ctrl_phy_aux_enable(ctrl, 1); + edp_ctrl_link_enable(ctrl, 1); + + edp_ctrl_irq_enable(ctrl, 1); + ret = drm_dp_link_power_up(ctrl->drm_aux, &ctrl->dp_link); + if (ret) + goto fail; + + ctrl->power_on = true; + + /* Start link training */ + ret = edp_ctrl_training(ctrl); + if (ret != EDP_TRAIN_SUCCESS) + goto fail; + + DBG("DONE"); + goto unlock_ret; + +fail: + edp_ctrl_irq_enable(ctrl, 0); + edp_ctrl_link_enable(ctrl, 0); + edp_ctrl_phy_aux_enable(ctrl, 0); + ctrl->power_on = false; +unlock_ret: + mutex_unlock(&ctrl->dev_mutex); +} + +static void edp_ctrl_off_worker(struct work_struct *work) +{ + struct edp_ctrl *ctrl = container_of( + work, struct edp_ctrl, off_work); + int ret; + + mutex_lock(&ctrl->dev_mutex); + + if (!ctrl->power_on) { + DBG("already off"); + goto unlock_ret; + } + + reinit_completion(&ctrl->idle_comp); + edp_state_ctrl(ctrl, EDP_STATE_CTRL_PUSH_IDLE); + + ret = wait_for_completion_timeout(&ctrl->idle_comp, + msecs_to_jiffies(500)); + if (ret <= 0) + DBG("%s: idle pattern timedout, %d\n", + __func__, ret); + + edp_state_ctrl(ctrl, 0); + + drm_dp_link_power_down(ctrl->drm_aux, &ctrl->dp_link); + + edp_ctrl_irq_enable(ctrl, 0); + + edp_ctrl_link_enable(ctrl, 0); + + edp_ctrl_phy_aux_enable(ctrl, 0); + + ctrl->power_on = false; + +unlock_ret: + mutex_unlock(&ctrl->dev_mutex); +} + +irqreturn_t msm_edp_ctrl_irq(struct edp_ctrl *ctrl) +{ + u32 isr1, isr2, mask1, mask2; + u32 ack; + + DBG(""); + spin_lock(&ctrl->irq_lock); + isr1 = edp_read(ctrl->base + REG_EDP_INTERRUPT_REG_1); + isr2 = edp_read(ctrl->base + REG_EDP_INTERRUPT_REG_2); + + mask1 = isr1 & EDP_INTR_MASK1; + mask2 = isr2 & EDP_INTR_MASK2; + + isr1 &= ~mask1; /* remove masks bit */ + isr2 &= ~mask2; + + DBG("isr=%x mask=%x isr2=%x mask2=%x", + isr1, mask1, isr2, mask2); + + ack = isr1 & EDP_INTR_STATUS1; + ack <<= 1; /* ack bits */ + ack |= mask1; + edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, ack); + + ack = isr2 & EDP_INTR_STATUS2; + ack <<= 1; /* ack bits */ + ack |= mask2; + edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, ack); + spin_unlock(&ctrl->irq_lock); + + if (isr1 & EDP_INTERRUPT_REG_1_HPD) + DBG("edp_hpd"); + + if (isr2 & EDP_INTERRUPT_REG_2_READY_FOR_VIDEO) + DBG("edp_video_ready"); + + if (isr2 & EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT) { + DBG("idle_patterns_sent"); + complete(&ctrl->idle_comp); + } + + msm_edp_aux_irq(ctrl->aux, isr1); + + return IRQ_HANDLED; +} + +void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on) +{ + if (on) + queue_work(ctrl->workqueue, &ctrl->on_work); + else + queue_work(ctrl->workqueue, &ctrl->off_work); +} + +int msm_edp_ctrl_init(struct msm_edp *edp) +{ + struct edp_ctrl *ctrl = NULL; + struct device *dev = &edp->pdev->dev; + int ret; + + if (!edp) { + pr_err("%s: edp is NULL!\n", __func__); + return -EINVAL; + } + + ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) + return -ENOMEM; + + edp->ctrl = ctrl; + ctrl->pdev = edp->pdev; + + ctrl->base = msm_ioremap(ctrl->pdev, "edp", "eDP"); + if (IS_ERR(ctrl->base)) + return PTR_ERR(ctrl->base); + + /* Get regulator, clock, gpio, pwm */ + ret = edp_regulator_init(ctrl); + if (ret) { + pr_err("%s:regulator init fail\n", __func__); + return ret; + } + ret = edp_clk_init(ctrl); + if (ret) { + pr_err("%s:clk init fail\n", __func__); + return ret; + } + ret = edp_gpio_config(ctrl); + if (ret) { + pr_err("%s:failed to configure GPIOs: %d", __func__, ret); + return ret; + } + + /* Init aux and phy */ + ctrl->aux = msm_edp_aux_init(dev, ctrl->base, &ctrl->drm_aux); + if (!ctrl->aux || !ctrl->drm_aux) { + pr_err("%s:failed to init aux\n", __func__); + return ret; + } + + ctrl->phy = msm_edp_phy_init(dev, ctrl->base); + if (!ctrl->phy) { + pr_err("%s:failed to init phy\n", __func__); + goto err_destory_aux; + } + + spin_lock_init(&ctrl->irq_lock); + mutex_init(&ctrl->dev_mutex); + init_completion(&ctrl->idle_comp); + + /* setup workqueue */ + ctrl->workqueue = alloc_ordered_workqueue("edp_drm_work", 0); + INIT_WORK(&ctrl->on_work, edp_ctrl_on_worker); + INIT_WORK(&ctrl->off_work, edp_ctrl_off_worker); + + return 0; + +err_destory_aux: + msm_edp_aux_destroy(dev, ctrl->aux); + ctrl->aux = NULL; + return ret; +} + +void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl) +{ + if (!ctrl) + return; + + if (ctrl->workqueue) { + flush_workqueue(ctrl->workqueue); + destroy_workqueue(ctrl->workqueue); + ctrl->workqueue = NULL; + } + + if (ctrl->aux) { + msm_edp_aux_destroy(&ctrl->pdev->dev, ctrl->aux); + ctrl->aux = NULL; + } + + kfree(ctrl->edid); + ctrl->edid = NULL; + + mutex_destroy(&ctrl->dev_mutex); +} + +bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl) +{ + mutex_lock(&ctrl->dev_mutex); + DBG("connect status = %d", ctrl->edp_connected); + if (ctrl->edp_connected) { + mutex_unlock(&ctrl->dev_mutex); + return true; + } + + if (!ctrl->power_on) { + edp_ctrl_phy_aux_enable(ctrl, 1); + edp_ctrl_irq_enable(ctrl, 1); + } + + if (drm_dp_dpcd_read(ctrl->drm_aux, DP_DPCD_REV, ctrl->dpcd, + DP_RECEIVER_CAP_SIZE) < DP_RECEIVER_CAP_SIZE) { + pr_err("%s: AUX channel is NOT ready\n", __func__); + memset(ctrl->dpcd, 0, DP_RECEIVER_CAP_SIZE); + } else { + ctrl->edp_connected = true; + } + + if (!ctrl->power_on) { + edp_ctrl_irq_enable(ctrl, 0); + edp_ctrl_phy_aux_enable(ctrl, 0); + } + + DBG("exit: connect status=%d", ctrl->edp_connected); + + mutex_unlock(&ctrl->dev_mutex); + + return ctrl->edp_connected; +} + +int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl, + struct drm_connector *connector, struct edid **edid) +{ + int ret = 0; + + mutex_lock(&ctrl->dev_mutex); + + if (ctrl->edid) { + if (edid) { + DBG("Just return edid buffer"); + *edid = ctrl->edid; + } + goto unlock_ret; + } + + if (!ctrl->power_on) { + edp_ctrl_phy_aux_enable(ctrl, 1); + edp_ctrl_irq_enable(ctrl, 1); + } + + ret = drm_dp_link_probe(ctrl->drm_aux, &ctrl->dp_link); + if (ret) { + pr_err("%s: read dpcd cap failed, %d\n", __func__, ret); + goto disable_ret; + } + + /* Initialize link rate as panel max link rate */ + ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate); + + ctrl->edid = drm_get_edid(connector, &ctrl->drm_aux->ddc); + if (!ctrl->edid) { + pr_err("%s: edid read fail\n", __func__); + goto disable_ret; + } + + if (edid) + *edid = ctrl->edid; + +disable_ret: + if (!ctrl->power_on) { + edp_ctrl_irq_enable(ctrl, 0); + edp_ctrl_phy_aux_enable(ctrl, 0); + } +unlock_ret: + mutex_unlock(&ctrl->dev_mutex); + return ret; +} + +int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl, + const struct drm_display_mode *mode, + const struct drm_display_info *info) +{ + u32 hstart_from_sync, vstart_from_sync; + u32 data; + int ret = 0; + + mutex_lock(&ctrl->dev_mutex); + /* + * Need to keep color depth, pixel rate and + * interlaced information in ctrl context + */ + ctrl->color_depth = info->bpc; + ctrl->pixel_rate = mode->clock; + ctrl->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); + + /* Fill initial link config based on passed in timing */ + edp_fill_link_cfg(ctrl); + + if (edp_clk_enable(ctrl, EDP_CLK_MASK_AHB)) { + pr_err("%s, fail to prepare enable ahb clk\n", __func__); + ret = -EINVAL; + goto unlock_ret; + } + edp_clock_synchrous(ctrl, 1); + + /* Configure eDP timing to HW */ + edp_write(ctrl->base + REG_EDP_TOTAL_HOR_VER, + EDP_TOTAL_HOR_VER_HORIZ(mode->htotal) | + EDP_TOTAL_HOR_VER_VERT(mode->vtotal)); + + vstart_from_sync = mode->vtotal - mode->vsync_start; + hstart_from_sync = mode->htotal - mode->hsync_start; + edp_write(ctrl->base + REG_EDP_START_HOR_VER_FROM_SYNC, + EDP_START_HOR_VER_FROM_SYNC_HORIZ(hstart_from_sync) | + EDP_START_HOR_VER_FROM_SYNC_VERT(vstart_from_sync)); + + data = EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT( + mode->vsync_end - mode->vsync_start); + data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ( + mode->hsync_end - mode->hsync_start); + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_NVSYNC; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_NHSYNC; + edp_write(ctrl->base + REG_EDP_HSYNC_VSYNC_WIDTH_POLARITY, data); + + edp_write(ctrl->base + REG_EDP_ACTIVE_HOR_VER, + EDP_ACTIVE_HOR_VER_HORIZ(mode->hdisplay) | + EDP_ACTIVE_HOR_VER_VERT(mode->vdisplay)); + + edp_clk_disable(ctrl, EDP_CLK_MASK_AHB); + +unlock_ret: + mutex_unlock(&ctrl->dev_mutex); + return ret; +} + +bool msm_edp_ctrl_pixel_clock_valid(struct edp_ctrl *ctrl, + u32 pixel_rate, u32 *pm, u32 *pn) +{ + const struct edp_pixel_clk_div *divs; + u32 err = 1; /* 1% error tolerance */ + u32 clk_err; + int i; + + if (ctrl->link_rate == DP_LINK_BW_1_62) { + divs = clk_divs[0]; + } else if (ctrl->link_rate == DP_LINK_BW_2_7) { + divs = clk_divs[1]; + } else { + pr_err("%s: Invalid link rate,%d\n", __func__, ctrl->link_rate); + return false; + } + + for (i = 0; i < EDP_PIXEL_CLK_NUM; i++) { + clk_err = abs(divs[i].rate - pixel_rate); + if ((divs[i].rate * err / 100) >= clk_err) { + if (pm) + *pm = divs[i].m; + if (pn) + *pn = divs[i].n; + return true; + } + } + + DBG("pixel clock %d(kHz) not supported", pixel_rate); + + return false; +} + diff --git a/drivers/gpu/drm/msm/edp/edp_phy.c b/drivers/gpu/drm/msm/edp/edp_phy.c new file mode 100644 index 000000000000..36bb8933e9ee --- /dev/null +++ b/drivers/gpu/drm/msm/edp/edp_phy.c @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "edp.h" +#include "edp.xml.h" + +#define EDP_MAX_LANE 4 + +struct edp_phy { + void __iomem *base; +}; + +bool msm_edp_phy_ready(struct edp_phy *phy) +{ + u32 status; + int cnt = 100; + + while (--cnt) { + status = edp_read(phy->base + + REG_EDP_PHY_GLB_PHY_STATUS); + if (status & 0x01) + break; + usleep_range(500, 1000); + } + + if (cnt == 0) { + pr_err("%s: PHY NOT ready\n", __func__); + return false; + } else { + return true; + } +} + +void msm_edp_phy_ctrl(struct edp_phy *phy, int enable) +{ + DBG("enable=%d", enable); + if (enable) { + /* Reset */ + edp_write(phy->base + REG_EDP_PHY_CTRL, + EDP_PHY_CTRL_SW_RESET | EDP_PHY_CTRL_SW_RESET_PLL); + /* Make sure fully reset */ + wmb(); + usleep_range(500, 1000); + edp_write(phy->base + REG_EDP_PHY_CTRL, 0x000); + edp_write(phy->base + REG_EDP_PHY_GLB_PD_CTL, 0x3f); + edp_write(phy->base + REG_EDP_PHY_GLB_CFG, 0x1); + } else { + edp_write(phy->base + REG_EDP_PHY_GLB_PD_CTL, 0xc0); + } +} + +/* voltage mode and pre emphasis cfg */ +void msm_edp_phy_vm_pe_init(struct edp_phy *phy) +{ + edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG0, 0x3); + edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG1, 0x64); + edp_write(phy->base + REG_EDP_PHY_GLB_MISC9, 0x6c); +} + +void msm_edp_phy_vm_pe_cfg(struct edp_phy *phy, u32 v0, u32 v1) +{ + edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG0, v0); + edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG1, v1); +} + +void msm_edp_phy_lane_power_ctrl(struct edp_phy *phy, bool up, u32 max_lane) +{ + u32 i; + u32 data; + + if (up) + data = 0; /* power up */ + else + data = 0x7; /* power down */ + + for (i = 0; i < max_lane; i++) + edp_write(phy->base + REG_EDP_PHY_LN_PD_CTL(i) , data); + + /* power down unused lane */ + data = 0x7; /* power down */ + for (i = max_lane; i < EDP_MAX_LANE; i++) + edp_write(phy->base + REG_EDP_PHY_LN_PD_CTL(i) , data); +} + +void *msm_edp_phy_init(struct device *dev, void __iomem *regbase) +{ + struct edp_phy *phy = NULL; + + phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return NULL; + + phy->base = regbase; + return phy; +} + diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 062c68725376..814536202efe 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2014 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -106,7 +107,12 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) goto fail; } - BUG_ON(config->hpd_reg_cnt > ARRAY_SIZE(hdmi->hpd_regs)); + hdmi->hpd_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_regs[0]) * + config->hpd_reg_cnt, GFP_KERNEL); + if (!hdmi->hpd_regs) { + ret = -ENOMEM; + goto fail; + } for (i = 0; i < config->hpd_reg_cnt; i++) { struct regulator *reg; @@ -122,7 +128,12 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) hdmi->hpd_regs[i] = reg; } - BUG_ON(config->pwr_reg_cnt > ARRAY_SIZE(hdmi->pwr_regs)); + hdmi->pwr_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->pwr_regs[0]) * + config->pwr_reg_cnt, GFP_KERNEL); + if (!hdmi->pwr_regs) { + ret = -ENOMEM; + goto fail; + } for (i = 0; i < config->pwr_reg_cnt; i++) { struct regulator *reg; @@ -138,7 +149,12 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) hdmi->pwr_regs[i] = reg; } - BUG_ON(config->hpd_clk_cnt > ARRAY_SIZE(hdmi->hpd_clks)); + hdmi->hpd_clks = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_clks[0]) * + config->hpd_clk_cnt, GFP_KERNEL); + if (!hdmi->hpd_clks) { + ret = -ENOMEM; + goto fail; + } for (i = 0; i < config->hpd_clk_cnt; i++) { struct clk *clk; @@ -153,7 +169,12 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) hdmi->hpd_clks[i] = clk; } - BUG_ON(config->pwr_clk_cnt > ARRAY_SIZE(hdmi->pwr_clks)); + hdmi->pwr_clks = devm_kzalloc(&pdev->dev, sizeof(hdmi->pwr_clks[0]) * + config->pwr_clk_cnt, GFP_KERNEL); + if (!hdmi->pwr_clks) { + ret = -ENOMEM; + goto fail; + } for (i = 0; i < config->pwr_clk_cnt; i++) { struct clk *clk; @@ -247,9 +268,9 @@ int hdmi_modeset_init(struct hdmi *hdmi, return 0; fail: - /* bridge/connector are normally destroyed by drm: */ + /* bridge is normally destroyed by drm: */ if (hdmi->bridge) { - hdmi->bridge->funcs->destroy(hdmi->bridge); + hdmi_bridge_destroy(hdmi->bridge); hdmi->bridge = NULL; } if (hdmi->connector) { @@ -266,6 +287,57 @@ fail: #include <linux/of_gpio.h> +#define HDMI_CFG(item, entry) \ + .item ## _names = item ##_names_ ## entry, \ + .item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry) + +static struct hdmi_platform_config hdmi_tx_8660_config = { + .phy_init = hdmi_phy_8x60_init, +}; + +static const char *hpd_reg_names_8960[] = {"core-vdda", "hdmi-mux"}; +static const char *hpd_clk_names_8960[] = {"core_clk", "master_iface_clk", "slave_iface_clk"}; + +static struct hdmi_platform_config hdmi_tx_8960_config = { + .phy_init = hdmi_phy_8960_init, + HDMI_CFG(hpd_reg, 8960), + HDMI_CFG(hpd_clk, 8960), +}; + +static const char *pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"}; +static const char *hpd_reg_names_8x74[] = {"hpd-gdsc", "hpd-5v"}; +static const char *pwr_clk_names_8x74[] = {"extp_clk", "alt_iface_clk"}; +static const char *hpd_clk_names_8x74[] = {"iface_clk", "core_clk", "mdp_core_clk"}; +static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0}; + +static struct hdmi_platform_config hdmi_tx_8074_config = { + .phy_init = hdmi_phy_8x74_init, + HDMI_CFG(pwr_reg, 8x74), + HDMI_CFG(hpd_reg, 8x74), + HDMI_CFG(pwr_clk, 8x74), + HDMI_CFG(hpd_clk, 8x74), + .hpd_freq = hpd_clk_freq_8x74, +}; + +static const char *hpd_reg_names_8084[] = {"hpd-gdsc", "hpd-5v", "hpd-5v-en"}; + +static struct hdmi_platform_config hdmi_tx_8084_config = { + .phy_init = hdmi_phy_8x74_init, + HDMI_CFG(pwr_reg, 8x74), + HDMI_CFG(hpd_reg, 8084), + HDMI_CFG(pwr_clk, 8x74), + HDMI_CFG(hpd_clk, 8x74), + .hpd_freq = hpd_clk_freq_8x74, +}; + +static const struct of_device_id dt_match[] = { + { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config }, + { .compatible = "qcom,hdmi-tx-8074", .data = &hdmi_tx_8074_config }, + { .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config }, + { .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config }, + {} +}; + #ifdef CONFIG_OF static int get_gpio(struct device *dev, struct device_node *of_node, const char *name) { @@ -288,50 +360,31 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = dev_get_drvdata(master); struct msm_drm_private *priv = drm->dev_private; - static struct hdmi_platform_config config = {}; + static struct hdmi_platform_config *hdmi_cfg; struct hdmi *hdmi; #ifdef CONFIG_OF struct device_node *of_node = dev->of_node; + const struct of_device_id *match; - if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) { - static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"}; - static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"}; - static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"}; - static unsigned long hpd_clk_freq[] = {0, 19200000, 0}; - static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"}; - config.phy_init = hdmi_phy_8x74_init; - config.hpd_reg_names = hpd_reg_names; - config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); - config.pwr_reg_names = pwr_reg_names; - config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names); - config.hpd_clk_names = hpd_clk_names; - config.hpd_freq = hpd_clk_freq; - config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); - config.pwr_clk_names = pwr_clk_names; - config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names); - } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) { - static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"}; - static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"}; - config.phy_init = hdmi_phy_8960_init; - config.hpd_reg_names = hpd_reg_names; - config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); - config.hpd_clk_names = hpd_clk_names; - config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); - } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8660")) { - config.phy_init = hdmi_phy_8x60_init; + match = of_match_node(dt_match, of_node); + if (match && match->data) { + hdmi_cfg = (struct hdmi_platform_config *)match->data; + DBG("hdmi phy: %s", match->compatible); } else { dev_err(dev, "unknown phy: %s\n", of_node->name); + return -ENXIO; } - config.mmio_name = "core_physical"; - config.ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk"); - config.ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data"); - config.hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd"); - config.mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en"); - config.mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel"); - config.mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm"); + hdmi_cfg->mmio_name = "core_physical"; + hdmi_cfg->ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk"); + hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data"); + hdmi_cfg->hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd"); + hdmi_cfg->mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en"); + hdmi_cfg->mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel"); + hdmi_cfg->mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm"); #else + static struct hdmi_platform_config config = {}; static const char *hpd_clk_names[] = { "core_clk", "master_iface_clk", "slave_iface_clk", }; @@ -377,12 +430,15 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) config.mux_en_gpio = -1; config.mux_sel_gpio = -1; } + hdmi_cfg = &config; #endif - dev->platform_data = &config; + dev->platform_data = hdmi_cfg; + hdmi = hdmi_init(to_platform_device(dev)); if (IS_ERR(hdmi)) return PTR_ERR(hdmi); priv->hdmi = hdmi; + return 0; } @@ -413,13 +469,6 @@ static int hdmi_dev_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id dt_match[] = { - { .compatible = "qcom,hdmi-tx-8074" }, - { .compatible = "qcom,hdmi-tx-8960" }, - { .compatible = "qcom,hdmi-tx-8660" }, - {} -}; - static struct platform_driver hdmi_driver = { .probe = hdmi_dev_probe, .remove = hdmi_dev_remove, diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index 43e654f751b7..68fdfb3622a5 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -52,10 +52,10 @@ struct hdmi { void __iomem *mmio; - struct regulator *hpd_regs[2]; - struct regulator *pwr_regs[2]; - struct clk *hpd_clks[3]; - struct clk *pwr_clks[2]; + struct regulator **hpd_regs; + struct regulator **pwr_regs; + struct clk **hpd_clks; + struct clk **pwr_clks; struct hdmi_phy *phy; struct i2c_adapter *i2c; @@ -146,6 +146,7 @@ void hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate); */ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi); +void hdmi_bridge_destroy(struct drm_bridge *bridge); /* * hdmi connector: diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h index 5b0844befbab..350988740e9f 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h @@ -8,18 +8,19 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) -Copyright (C) 2013-2014 by the following authors: +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -45,12 +46,14 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. enum hdmi_hdcp_key_state { - NO_KEYS = 0, - NOT_CHECKED = 1, - CHECKING = 2, - KEYS_VALID = 3, - AKSV_INVALID = 4, - CHECKSUM_MISMATCH = 5, + HDCP_KEYS_STATE_NO_KEYS = 0, + HDCP_KEYS_STATE_NOT_CHECKED = 1, + HDCP_KEYS_STATE_CHECKING = 2, + HDCP_KEYS_STATE_VALID = 3, + HDCP_KEYS_STATE_AKSV_NOT_VALID = 4, + HDCP_KEYS_STATE_CHKSUM_MISMATCH = 5, + HDCP_KEYS_STATE_PROD_AKSV = 6, + HDCP_KEYS_STATE_RESERVED = 7, }; enum hdmi_ddc_read_write { @@ -199,11 +202,29 @@ static inline uint32_t HDMI_AUDIO_INFO1_LSV(uint32_t val) #define HDMI_HDCP_CTRL_ENABLE 0x00000001 #define HDMI_HDCP_CTRL_ENCRYPTION_ENABLE 0x00000100 +#define REG_HDMI_HDCP_DEBUG_CTRL 0x00000114 +#define HDMI_HDCP_DEBUG_CTRL_RNG_CIPHER 0x00000004 + #define REG_HDMI_HDCP_INT_CTRL 0x00000118 +#define HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT 0x00000001 +#define HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_ACK 0x00000002 +#define HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_MASK 0x00000004 +#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT 0x00000010 +#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_ACK 0x00000020 +#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_MASK 0x00000040 +#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK 0x00000080 +#define HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_INT 0x00000100 +#define HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_ACK 0x00000200 +#define HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_MASK 0x00000400 +#define HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_INT 0x00001000 +#define HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_ACK 0x00002000 +#define HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_MASK 0x00004000 #define REG_HDMI_HDCP_LINK0_STATUS 0x0000011c #define HDMI_HDCP_LINK0_STATUS_AN_0_READY 0x00000100 #define HDMI_HDCP_LINK0_STATUS_AN_1_READY 0x00000200 +#define HDMI_HDCP_LINK0_STATUS_RI_MATCHES 0x00001000 +#define HDMI_HDCP_LINK0_STATUS_V_MATCHES 0x00100000 #define HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK 0x70000000 #define HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT 28 static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state val) @@ -211,9 +232,56 @@ static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state return ((val) << HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT) & HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK; } +#define REG_HDMI_HDCP_DDC_CTRL_0 0x00000120 +#define HDMI_HDCP_DDC_CTRL_0_DISABLE 0x00000001 + +#define REG_HDMI_HDCP_DDC_CTRL_1 0x00000124 +#define HDMI_HDCP_DDC_CTRL_1_FAILED_ACK 0x00000001 + +#define REG_HDMI_HDCP_DDC_STATUS 0x00000128 +#define HDMI_HDCP_DDC_STATUS_XFER_REQ 0x00000010 +#define HDMI_HDCP_DDC_STATUS_XFER_DONE 0x00000400 +#define HDMI_HDCP_DDC_STATUS_ABORTED 0x00001000 +#define HDMI_HDCP_DDC_STATUS_TIMEOUT 0x00002000 +#define HDMI_HDCP_DDC_STATUS_NACK0 0x00004000 +#define HDMI_HDCP_DDC_STATUS_NACK1 0x00008000 +#define HDMI_HDCP_DDC_STATUS_FAILED 0x00010000 + +#define REG_HDMI_HDCP_ENTROPY_CTRL0 0x0000012c + +#define REG_HDMI_HDCP_ENTROPY_CTRL1 0x0000025c + #define REG_HDMI_HDCP_RESET 0x00000130 #define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001 +#define REG_HDMI_HDCP_RCVPORT_DATA0 0x00000134 + +#define REG_HDMI_HDCP_RCVPORT_DATA1 0x00000138 + +#define REG_HDMI_HDCP_RCVPORT_DATA2_0 0x0000013c + +#define REG_HDMI_HDCP_RCVPORT_DATA2_1 0x00000140 + +#define REG_HDMI_HDCP_RCVPORT_DATA3 0x00000144 + +#define REG_HDMI_HDCP_RCVPORT_DATA4 0x00000148 + +#define REG_HDMI_HDCP_RCVPORT_DATA5 0x0000014c + +#define REG_HDMI_HDCP_RCVPORT_DATA6 0x00000150 + +#define REG_HDMI_HDCP_RCVPORT_DATA7 0x00000154 + +#define REG_HDMI_HDCP_RCVPORT_DATA8 0x00000158 + +#define REG_HDMI_HDCP_RCVPORT_DATA9 0x0000015c + +#define REG_HDMI_HDCP_RCVPORT_DATA10 0x00000160 + +#define REG_HDMI_HDCP_RCVPORT_DATA11 0x00000164 + +#define REG_HDMI_HDCP_RCVPORT_DATA12 0x00000168 + #define REG_HDMI_VENSPEC_INFO0 0x0000016c #define REG_HDMI_VENSPEC_INFO1 0x00000170 @@ -266,6 +334,7 @@ static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val) #define HDMI_DDC_SW_STATUS_NACK3 0x00008000 #define REG_HDMI_DDC_HW_STATUS 0x0000021c +#define HDMI_DDC_HW_STATUS_DONE 0x00000008 #define REG_HDMI_DDC_SPEED 0x00000220 #define HDMI_DDC_SPEED_THRESHOLD__MASK 0x00000003 @@ -329,6 +398,15 @@ static inline uint32_t HDMI_DDC_DATA_INDEX(uint32_t val) } #define HDMI_DDC_DATA_INDEX_WRITE 0x80000000 +#define REG_HDMI_HDCP_SHA_CTRL 0x0000023c + +#define REG_HDMI_HDCP_SHA_STATUS 0x00000240 +#define HDMI_HDCP_SHA_STATUS_BLOCK_DONE 0x00000001 +#define HDMI_HDCP_SHA_STATUS_COMP_DONE 0x00000010 + +#define REG_HDMI_HDCP_SHA_DATA 0x00000244 +#define HDMI_HDCP_SHA_DATA_DONE 0x00000001 + #define REG_HDMI_HPD_INT_STATUS 0x00000250 #define HDMI_HPD_INT_STATUS_INT 0x00000001 #define HDMI_HPD_INT_STATUS_CABLE_DETECTED 0x00000002 @@ -359,6 +437,10 @@ static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val) return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK; } +#define REG_HDMI_HDCP_SW_UPPER_AKSV 0x00000284 + +#define REG_HDMI_HDCP_SW_LOWER_AKSV 0x00000288 + #define REG_HDMI_CEC_STATUS 0x00000298 #define REG_HDMI_CEC_INT 0x0000029c diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index 6902ad6da710..a7a1d8267cf0 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -23,11 +23,8 @@ struct hdmi_bridge { }; #define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base) -static void hdmi_bridge_destroy(struct drm_bridge *bridge) +void hdmi_bridge_destroy(struct drm_bridge *bridge) { - struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); - drm_bridge_cleanup(bridge); - kfree(hdmi_bridge); } static void power_on(struct drm_bridge *bridge) @@ -200,7 +197,6 @@ static const struct drm_bridge_funcs hdmi_bridge_funcs = { .disable = hdmi_bridge_disable, .post_disable = hdmi_bridge_post_disable, .mode_set = hdmi_bridge_mode_set, - .destroy = hdmi_bridge_destroy, }; @@ -211,7 +207,8 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi) struct hdmi_bridge *hdmi_bridge; int ret; - hdmi_bridge = kzalloc(sizeof(*hdmi_bridge), GFP_KERNEL); + hdmi_bridge = devm_kzalloc(hdmi->dev->dev, + sizeof(*hdmi_bridge), GFP_KERNEL); if (!hdmi_bridge) { ret = -ENOMEM; goto fail; @@ -220,8 +217,11 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi) hdmi_bridge->hdmi = hdmi; bridge = &hdmi_bridge->base; + bridge->funcs = &hdmi_bridge_funcs; - drm_bridge_init(hdmi->dev, bridge, &hdmi_bridge_funcs); + ret = drm_bridge_attach(hdmi->dev, bridge); + if (ret) + goto fail; return bridge; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index b4e70e0e3cfa..b62cdb968614 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -386,7 +386,7 @@ hdmi_connector_best_encoder(struct drm_connector *connector) } static const struct drm_connector_funcs hdmi_connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, .detect = hdmi_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = hdmi_connector_destroy, @@ -426,7 +426,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi) connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; - connector->interlace_allowed = 1; + connector->interlace_allowed = 0; connector->doublescan_allowed = 0; drm_connector_register(connector); diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h index 29bd796797de..43bb54a9afbf 100644 --- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h +++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h @@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) Copyright (C) 2013 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h index a4a7f8c7122a..1d39174d91fb 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h @@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) @@ -72,6 +73,18 @@ enum mdp4_cursor_format { CURSOR_XRGB = 2, }; +enum mdp4_frame_format { + FRAME_LINEAR = 0, + FRAME_TILE_ARGB_4X4 = 1, + FRAME_TILE_YCBCR_420 = 2, +}; + +enum mdp4_scale_unit { + SCALE_FIR = 0, + SCALE_MN_PHASE = 1, + SCALE_PIXEL_RPT = 2, +}; + enum mdp4_dma { DMA_P = 0, DMA_S = 1, @@ -637,6 +650,8 @@ static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mdp4_pipe i0) { return 0x00 static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mdp4_pipe i0) { return 0x00020018 + 0x10000*i0; } +static inline uint32_t REG_MDP4_PIPE_SRCP3_BASE(enum mdp4_pipe i0) { return 0x0002001c + 0x10000*i0; } + static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mdp4_pipe i0) { return 0x00020040 + 0x10000*i0; } #define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff #define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0 @@ -720,7 +735,25 @@ static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val) } #define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000 #define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 +#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK 0x00180000 +#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT 19 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT) & MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK; +} #define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000 +#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x0c000000 +#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 26 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK 0x60000000 +#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT 29 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(enum mdp4_frame_format val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT) & MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK; +} static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mdp4_pipe i0) { return 0x00020054 + 0x10000*i0; } #define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff @@ -751,6 +784,18 @@ static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val) static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mdp4_pipe i0) { return 0x00020058 + 0x10000*i0; } #define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001 #define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002 +#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK 0x0000000c +#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT 2 +static inline uint32_t MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL(enum mdp4_scale_unit val) +{ + return ((val) << MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK; +} +#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK 0x00000030 +#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT 4 +static inline uint32_t MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL(enum mdp4_scale_unit val) +{ + return ((val) << MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK; +} #define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200 #define MDP4_PIPE_OP_MODE_DST_YCBCR 0x00000400 #define MDP4_PIPE_OP_MODE_CSC_EN 0x00000800 diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 20ae50385e5b..73afa21822b4 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -140,26 +140,6 @@ static void mdp4_crtc_destroy(struct drm_crtc *crtc) kfree(mdp4_crtc); } -static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct mdp4_kms *mdp4_kms = get_kms(crtc); - bool enabled = (mode == DRM_MODE_DPMS_ON); - - DBG("%s: mode=%d", mdp4_crtc->name, mode); - - if (enabled != mdp4_crtc->enabled) { - if (enabled) { - mdp4_enable(mdp4_kms); - mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err); - } else { - mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err); - mdp4_disable(mdp4_kms); - } - mdp4_crtc->enabled = enabled; - } -} - static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -304,23 +284,38 @@ static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc) } } -static void mdp4_crtc_prepare(struct drm_crtc *crtc) +static void mdp4_crtc_disable(struct drm_crtc *crtc) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + DBG("%s", mdp4_crtc->name); - /* make sure we hold a ref to mdp clks while setting up mode: */ - drm_crtc_vblank_get(crtc); - mdp4_enable(get_kms(crtc)); - mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + + if (WARN_ON(!mdp4_crtc->enabled)) + return; + + mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err); + mdp4_disable(mdp4_kms); + + mdp4_crtc->enabled = false; } -static void mdp4_crtc_commit(struct drm_crtc *crtc) +static void mdp4_crtc_enable(struct drm_crtc *crtc) { - mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON); + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + + DBG("%s", mdp4_crtc->name); + + if (WARN_ON(mdp4_crtc->enabled)) + return; + + mdp4_enable(mdp4_kms); + mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err); + crtc_flush(crtc); - /* drop the ref to mdp clk's that we got in prepare: */ - mdp4_disable(get_kms(crtc)); - drm_crtc_vblank_put(crtc); + + mdp4_crtc->enabled = true; } static int mdp4_crtc_atomic_check(struct drm_crtc *crtc, @@ -504,13 +499,10 @@ static const struct drm_crtc_funcs mdp4_crtc_funcs = { }; static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = { - .dpms = mdp4_crtc_dpms, .mode_fixup = mdp4_crtc_mode_fixup, .mode_set_nofb = mdp4_crtc_mode_set_nofb, - .mode_set = drm_helper_crtc_mode_set, - .mode_set_base = drm_helper_crtc_mode_set_base, - .prepare = mdp4_crtc_prepare, - .commit = mdp4_crtc_commit, + .disable = mdp4_crtc_disable, + .enable = mdp4_crtc_enable, .atomic_check = mdp4_crtc_atomic_check, .atomic_begin = mdp4_crtc_atomic_begin, .atomic_flush = mdp4_crtc_atomic_flush, diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c index c3878420180b..7896323b2631 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c @@ -94,61 +94,6 @@ static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = { .destroy = mdp4_dtv_encoder_destroy, }; -static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode) -{ - struct drm_device *dev = encoder->dev; - struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); - struct mdp4_kms *mdp4_kms = get_kms(encoder); - bool enabled = (mode == DRM_MODE_DPMS_ON); - - DBG("mode=%d", mode); - - if (enabled == mdp4_dtv_encoder->enabled) - return; - - if (enabled) { - unsigned long pc = mdp4_dtv_encoder->pixclock; - int ret; - - bs_set(mdp4_dtv_encoder, 1); - - DBG("setting src_clk=%lu", pc); - - ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc); - if (ret) - dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret); - clk_prepare_enable(mdp4_dtv_encoder->src_clk); - ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk); - if (ret) - dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret); - ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk); - if (ret) - dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret); - - mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1); - } else { - mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); - - /* - * Wait for a vsync so we know the ENABLE=0 latched before - * the (connector) source of the vsync's gets disabled, - * otherwise we end up in a funny state if we re-enable - * before the disable latches, which results that some of - * the settings changes for the new modeset (like new - * scanout buffer) don't latch properly.. - */ - mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC); - - clk_disable_unprepare(mdp4_dtv_encoder->src_clk); - clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); - clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk); - - bs_set(mdp4_dtv_encoder, 0); - } - - mdp4_dtv_encoder->enabled = enabled; -} - static bool mdp4_dtv_encoder_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -221,28 +166,78 @@ static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder, mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0); } -static void mdp4_dtv_encoder_prepare(struct drm_encoder *encoder) +static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder) { - mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + + if (WARN_ON(!mdp4_dtv_encoder->enabled)) + return; + + mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC); + + clk_disable_unprepare(mdp4_dtv_encoder->src_clk); + clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); + clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk); + + bs_set(mdp4_dtv_encoder, 0); + + mdp4_dtv_encoder->enabled = false; } -static void mdp4_dtv_encoder_commit(struct drm_encoder *encoder) +static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder) { + struct drm_device *dev = encoder->dev; + struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + unsigned long pc = mdp4_dtv_encoder->pixclock; + int ret; + + if (WARN_ON(mdp4_dtv_encoder->enabled)) + return; + mdp4_crtc_set_config(encoder->crtc, MDP4_DMA_CONFIG_R_BPC(BPC8) | MDP4_DMA_CONFIG_G_BPC(BPC8) | MDP4_DMA_CONFIG_B_BPC(BPC8) | MDP4_DMA_CONFIG_PACK(0x21)); mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 1); - mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_ON); + + bs_set(mdp4_dtv_encoder, 1); + + DBG("setting src_clk=%lu", pc); + + ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc); + if (ret) + dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret); + clk_prepare_enable(mdp4_dtv_encoder->src_clk); + ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk); + if (ret) + dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret); + ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk); + if (ret) + dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret); + + mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1); + + mdp4_dtv_encoder->enabled = true; } static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = { - .dpms = mdp4_dtv_encoder_dpms, .mode_fixup = mdp4_dtv_encoder_mode_fixup, .mode_set = mdp4_dtv_encoder_mode_set, - .prepare = mdp4_dtv_encoder_prepare, - .commit = mdp4_dtv_encoder_commit, + .enable = mdp4_dtv_encoder_enable, + .disable = mdp4_dtv_encoder_disable, }; long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index a62109e4ae0d..d847b9436194 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -125,6 +125,38 @@ out: return ret; } +static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + int i, ncrtcs = state->dev->mode_config.num_crtc; + + mdp4_enable(mdp4_kms); + + /* see 119ecb7fd */ + for (i = 0; i < ncrtcs; i++) { + struct drm_crtc *crtc = state->crtcs[i]; + if (!crtc) + continue; + drm_crtc_vblank_get(crtc); + } +} + +static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + int i, ncrtcs = state->dev->mode_config.num_crtc; + + /* see 119ecb7fd */ + for (i = 0; i < ncrtcs; i++) { + struct drm_crtc *crtc = state->crtcs[i]; + if (!crtc) + continue; + drm_crtc_vblank_put(crtc); + } + + mdp4_disable(mdp4_kms); +} + static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, struct drm_encoder *encoder) { @@ -161,6 +193,8 @@ static const struct mdp_kms_funcs kms_funcs = { .irq = mdp4_irq, .enable_vblank = mdp4_enable_vblank, .disable_vblank = mdp4_disable_vblank, + .prepare_commit = mdp4_prepare_commit, + .complete_commit = mdp4_complete_commit, .get_format = mdp_get_format, .round_pixclk = mdp4_round_pixclk, .preclose = mdp4_preclose, diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index cbd77bc626d5..0a5c58bde7a9 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h @@ -175,14 +175,25 @@ irqreturn_t mdp4_irq(struct msm_kms *kms); int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); +static inline bool pipe_supports_yuv(enum mdp4_pipe pipe) +{ + switch (pipe) { + case VG1: + case VG2: + case VG3: + case VG4: + return true; + default: + return false; + } +} + static inline uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats, uint32_t max_formats) { - /* TODO when we have YUV, we need to filter supported formats - * based on pipe_id.. - */ - return mdp_get_formats(pixel_formats, max_formats); + return mdp_get_formats(pixel_formats, max_formats, + !pipe_supports_yuv(pipe_id)); } void mdp4_plane_install_properties(struct drm_plane *plane, diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c index 41f6436754fc..60ec8222c9f6 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c @@ -259,77 +259,6 @@ static void setup_phy(struct drm_encoder *encoder) mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0); } -static void mdp4_lcdc_encoder_dpms(struct drm_encoder *encoder, int mode) -{ - struct drm_device *dev = encoder->dev; - struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = - to_mdp4_lcdc_encoder(encoder); - struct mdp4_kms *mdp4_kms = get_kms(encoder); - struct drm_panel *panel = mdp4_lcdc_encoder->panel; - bool enabled = (mode == DRM_MODE_DPMS_ON); - int i, ret; - - DBG("mode=%d", mode); - - if (enabled == mdp4_lcdc_encoder->enabled) - return; - - if (enabled) { - unsigned long pc = mdp4_lcdc_encoder->pixclock; - int ret; - - bs_set(mdp4_lcdc_encoder, 1); - - for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { - ret = regulator_enable(mdp4_lcdc_encoder->regs[i]); - if (ret) - dev_err(dev->dev, "failed to enable regulator: %d\n", ret); - } - - DBG("setting lcdc_clk=%lu", pc); - ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc); - if (ret) - dev_err(dev->dev, "failed to configure lcdc_clk: %d\n", ret); - ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk); - if (ret) - dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret); - - if (panel) - drm_panel_enable(panel); - - setup_phy(encoder); - - mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 1); - } else { - mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); - - if (panel) - drm_panel_disable(panel); - - /* - * Wait for a vsync so we know the ENABLE=0 latched before - * the (connector) source of the vsync's gets disabled, - * otherwise we end up in a funny state if we re-enable - * before the disable latches, which results that some of - * the settings changes for the new modeset (like new - * scanout buffer) don't latch properly.. - */ - mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC); - - clk_disable_unprepare(mdp4_lcdc_encoder->lcdc_clk); - - for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { - ret = regulator_disable(mdp4_lcdc_encoder->regs[i]); - if (ret) - dev_err(dev->dev, "failed to disable regulator: %d\n", ret); - } - - bs_set(mdp4_lcdc_encoder, 0); - } - - mdp4_lcdc_encoder->enabled = enabled; -} - static bool mdp4_lcdc_encoder_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -403,13 +332,59 @@ static void mdp4_lcdc_encoder_mode_set(struct drm_encoder *encoder, mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_VEND, 0); } -static void mdp4_lcdc_encoder_prepare(struct drm_encoder *encoder) +static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder) { - mdp4_lcdc_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + struct drm_device *dev = encoder->dev; + struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = + to_mdp4_lcdc_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + struct drm_panel *panel = mdp4_lcdc_encoder->panel; + int i, ret; + + if (WARN_ON(!mdp4_lcdc_encoder->enabled)) + return; + + mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); + + if (panel) + drm_panel_disable(panel); + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC); + + clk_disable_unprepare(mdp4_lcdc_encoder->lcdc_clk); + + for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { + ret = regulator_disable(mdp4_lcdc_encoder->regs[i]); + if (ret) + dev_err(dev->dev, "failed to disable regulator: %d\n", ret); + } + + bs_set(mdp4_lcdc_encoder, 0); + + mdp4_lcdc_encoder->enabled = false; } -static void mdp4_lcdc_encoder_commit(struct drm_encoder *encoder) +static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder) { + struct drm_device *dev = encoder->dev; + struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = + to_mdp4_lcdc_encoder(encoder); + unsigned long pc = mdp4_lcdc_encoder->pixclock; + struct mdp4_kms *mdp4_kms = get_kms(encoder); + struct drm_panel *panel = mdp4_lcdc_encoder->panel; + int i, ret; + + if (WARN_ON(mdp4_lcdc_encoder->enabled)) + return; + /* TODO: hard-coded for 18bpp: */ mdp4_crtc_set_config(encoder->crtc, MDP4_DMA_CONFIG_R_BPC(BPC6) | @@ -420,15 +395,38 @@ static void mdp4_lcdc_encoder_commit(struct drm_encoder *encoder) MDP4_DMA_CONFIG_DEFLKR_EN | MDP4_DMA_CONFIG_DITHER_EN); mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0); - mdp4_lcdc_encoder_dpms(encoder, DRM_MODE_DPMS_ON); + + bs_set(mdp4_lcdc_encoder, 1); + + for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { + ret = regulator_enable(mdp4_lcdc_encoder->regs[i]); + if (ret) + dev_err(dev->dev, "failed to enable regulator: %d\n", ret); + } + + DBG("setting lcdc_clk=%lu", pc); + ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc); + if (ret) + dev_err(dev->dev, "failed to configure lcdc_clk: %d\n", ret); + ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk); + if (ret) + dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret); + + if (panel) + drm_panel_enable(panel); + + setup_phy(encoder); + + mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 1); + + mdp4_lcdc_encoder->enabled = true; } static const struct drm_encoder_helper_funcs mdp4_lcdc_encoder_helper_funcs = { - .dpms = mdp4_lcdc_encoder_dpms, .mode_fixup = mdp4_lcdc_encoder_mode_fixup, .mode_set = mdp4_lcdc_encoder_mode_set, - .prepare = mdp4_lcdc_encoder_prepare, - .commit = mdp4_lcdc_encoder_commit, + .disable = mdp4_lcdc_encoder_disable, + .enable = mdp4_lcdc_encoder_enable, }; long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c index 4ddc28e1275b..921185133d38 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c @@ -94,7 +94,7 @@ mdp4_lvds_connector_best_encoder(struct drm_connector *connector) } static const struct drm_connector_funcs mdp4_lvds_connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, .detect = mdp4_lvds_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = mdp4_lvds_connector_destroy, diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index 1e5ebe83647d..cde25009203a 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c @@ -17,6 +17,8 @@ #include "mdp4_kms.h" +#define DOWN_SCALE_MAX 8 +#define UP_SCALE_MAX 8 struct mdp4_plane { struct drm_plane base; @@ -136,10 +138,6 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane, struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); struct mdp4_kms *mdp4_kms = get_kms(plane); enum mdp4_pipe pipe = mdp4_plane->pipe; - uint32_t iova = msm_framebuffer_iova(fb, mdp4_kms->id, 0); - - DBG("%s: set_scanout: %08x (%u)", mdp4_plane->name, - iova, fb->pitches[0]); mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe), MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | @@ -149,11 +147,45 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane, MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) | MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); - mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), + msm_framebuffer_iova(fb, mdp4_kms->id, 0)); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe), + msm_framebuffer_iova(fb, mdp4_kms->id, 1)); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe), + msm_framebuffer_iova(fb, mdp4_kms->id, 2)); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe), + msm_framebuffer_iova(fb, mdp4_kms->id, 3)); plane->fb = fb; } +static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms, + enum mdp4_pipe pipe, struct csc_cfg *csc) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(csc->matrix); i++) { + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_MV(pipe, i), + csc->matrix[i]); + } + + for (i = 0; i < ARRAY_SIZE(csc->post_bias) ; i++) { + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_PRE_BV(pipe, i), + csc->pre_bias[i]); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_POST_BV(pipe, i), + csc->post_bias[i]); + } + + for (i = 0; i < ARRAY_SIZE(csc->post_clamp) ; i++) { + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_PRE_LV(pipe, i), + csc->pre_clamp[i]); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_POST_LV(pipe, i), + csc->post_clamp[i]); + } +} + #define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000 static int mdp4_plane_mode_set(struct drm_plane *plane, @@ -163,6 +195,7 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { + struct drm_device *dev = plane->dev; struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); struct mdp4_kms *mdp4_kms = get_kms(plane); enum mdp4_pipe pipe = mdp4_plane->pipe; @@ -186,14 +219,59 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, fb->base.id, src_x, src_y, src_w, src_h, crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); + format = to_mdp_format(msm_framebuffer_format(fb)); + + if (src_w > (crtc_w * DOWN_SCALE_MAX)) { + dev_err(dev->dev, "Width down scaling exceeds limits!\n"); + return -ERANGE; + } + + if (src_h > (crtc_h * DOWN_SCALE_MAX)) { + dev_err(dev->dev, "Height down scaling exceeds limits!\n"); + return -ERANGE; + } + + if (crtc_w > (src_w * UP_SCALE_MAX)) { + dev_err(dev->dev, "Width up scaling exceeds limits!\n"); + return -ERANGE; + } + + if (crtc_h > (src_h * UP_SCALE_MAX)) { + dev_err(dev->dev, "Height up scaling exceeds limits!\n"); + return -ERANGE; + } + if (src_w != crtc_w) { + uint32_t sel_unit = SCALE_FIR; op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN; - /* TODO calc phasex_step */ + + if (MDP_FORMAT_IS_YUV(format)) { + if (crtc_w > src_w) + sel_unit = SCALE_PIXEL_RPT; + else if (crtc_w <= (src_w / 4)) + sel_unit = SCALE_MN_PHASE; + + op_mode |= MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL(sel_unit); + phasex_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT, + src_w, crtc_w); + } } if (src_h != crtc_h) { + uint32_t sel_unit = SCALE_FIR; op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN; - /* TODO calc phasey_step */ + + if (MDP_FORMAT_IS_YUV(format)) { + + if (crtc_h > src_h) + sel_unit = SCALE_PIXEL_RPT; + else if (crtc_h <= (src_h / 4)) + sel_unit = SCALE_MN_PHASE; + + op_mode |= MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL(sel_unit); + phasey_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT, + src_h, crtc_h); + } } mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe), @@ -214,8 +292,6 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, mdp4_plane_set_scanout(plane, fb); - format = to_mdp_format(msm_framebuffer_format(fb)); - mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe), MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) | @@ -224,6 +300,8 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) | MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | + MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(format->fetch_type) | + MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample) | COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT)); mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe), @@ -232,6 +310,14 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) | MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3])); + if (MDP_FORMAT_IS_YUV(format)) { + struct csc_cfg *csc = mdp_get_default_csc_cfg(CSC_YUV2RGB); + + op_mode |= MDP4_PIPE_OP_MODE_SRC_YCBCR; + op_mode |= MDP4_PIPE_OP_MODE_CSC_EN; + mdp4_write_csc_config(mdp4_kms, pipe, csc); + } + mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode); mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step); mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h index e87ef5512cb0..09b4a25eb553 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h @@ -8,18 +8,19 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) -Copyright (C) 2013-2014 by the following authors: +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -88,13 +89,6 @@ enum mdp5_pack_3d { PACK_3D_COL_INT = 3, }; -enum mdp5_chroma_samp_type { - CHROMA_RGB = 0, - CHROMA_H2V1 = 1, - CHROMA_H1V2 = 2, - CHROMA_420 = 3, -}; - enum mdp5_scale_filter { SCALE_FILTER_NEAREST = 0, SCALE_FILTER_BIL = 1, @@ -135,6 +129,17 @@ enum mdp5_client_id { CID_MAX = 23, }; +enum mdp5_cursor_format { + CURSOR_FMT_ARGB8888 = 0, + CURSOR_FMT_ARGB1555 = 2, + CURSOR_FMT_ARGB4444 = 4, +}; + +enum mdp5_cursor_alpha { + CURSOR_ALPHA_CONST = 0, + CURSOR_ALPHA_PER_PIXEL = 2, +}; + enum mdp5_igc_type { IGC_VIG = 0, IGC_RGB = 1, @@ -142,6 +147,11 @@ enum mdp5_igc_type { IGC_DSPP = 3, }; +enum mdp5_data_format { + DATA_FORMAT_RGB = 0, + DATA_FORMAT_YUV = 1, +}; + #define MDP5_IRQ_INTF0_WB_ROT_COMP 0x00000001 #define MDP5_IRQ_INTF1_WB_ROT_COMP 0x00000002 #define MDP5_IRQ_INTF2_WB_ROT_COMP 0x00000004 @@ -463,12 +473,143 @@ static inline uint32_t __offset_PIPE(enum mdp5_pipe idx) } static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); } +static inline uint32_t REG_MDP5_PIPE_OP_MODE(enum mdp5_pipe i0) { return 0x00000200 + __offset_PIPE(i0); } +#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK 0x00080000 +#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT 19 +static inline uint32_t MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(enum mdp5_data_format val) +{ + return ((val) << MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK; +} +#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK 0x00040000 +#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT 18 +static inline uint32_t MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(enum mdp5_data_format val) +{ + return ((val) << MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK; +} +#define MDP5_PIPE_OP_MODE_CSC_1_EN 0x00020000 + static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000002c4 + __offset_PIPE(i0); } static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000002f0 + __offset_PIPE(i0); } static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00000300 + __offset_PIPE(i0); } +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(enum mdp5_pipe i0) { return 0x00000320 + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK; +} +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK 0x1fff0000 +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT 16 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(enum mdp5_pipe i0) { return 0x00000324 + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK; +} +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK 0x1fff0000 +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT 16 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(enum mdp5_pipe i0) { return 0x00000328 + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK; +} +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK 0x1fff0000 +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT 16 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(enum mdp5_pipe i0) { return 0x0000032c + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK; +} +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK 0x1fff0000 +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT 16 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(enum mdp5_pipe i0) { return 0x00000330 + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; } +#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK 0x000000ff +#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK; +} +#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK 0x0000ff00 +#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT 8 +static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; } +#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK 0x000000ff +#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK; +} +#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK 0x0000ff00 +#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT 8 +static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; } +#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK 0x000001ff +#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; } +#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK 0x000001ff +#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK; +} + static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); } #define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000 #define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16 @@ -618,15 +759,15 @@ static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val) } #define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000 #define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 -#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK 0x00780000 +#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK 0x00180000 #define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT 19 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(uint32_t val) +static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(enum mdp_sspp_fetch_type val) { return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK; } #define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000 #define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp5_chroma_samp_type val) +static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val) { return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK; } @@ -753,6 +894,10 @@ static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { ret static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00000214 + __offset_PIPE(i0); } +static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000218 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x0000021c + __offset_PIPE(i0); } + static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00000220 + __offset_PIPE(i0); } static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00000224 + __offset_PIPE(i0); } @@ -839,20 +984,88 @@ static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000048 + __offset_LM(i0) + 0x30*i1; } static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK 0x0000ffff +#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_W(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK; +} +#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK 0xffff0000 +#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT 16 +static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_H(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK; +} static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000000e4 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_SIZE_ROI_W__MASK 0x0000ffff +#define MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_W(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_W__MASK; +} +#define MDP5_LM_CURSOR_SIZE_ROI_H__MASK 0xffff0000 +#define MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT 16 +static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_H(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_H__MASK; +} static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000000e8 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_XY_SRC_X__MASK 0x0000ffff +#define MDP5_LM_CURSOR_XY_SRC_X__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_XY_SRC_X(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_XY_SRC_X__SHIFT) & MDP5_LM_CURSOR_XY_SRC_X__MASK; +} +#define MDP5_LM_CURSOR_XY_SRC_Y__MASK 0xffff0000 +#define MDP5_LM_CURSOR_XY_SRC_Y__SHIFT 16 +static inline uint32_t MDP5_LM_CURSOR_XY_SRC_Y(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_XY_SRC_Y__SHIFT) & MDP5_LM_CURSOR_XY_SRC_Y__MASK; +} static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000000dc + __offset_LM(i0); } +#define MDP5_LM_CURSOR_STRIDE_STRIDE__MASK 0x0000ffff +#define MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_STRIDE_STRIDE(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT) & MDP5_LM_CURSOR_STRIDE_STRIDE__MASK; +} static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000000ec + __offset_LM(i0); } +#define MDP5_LM_CURSOR_FORMAT_FORMAT__MASK 0x00000007 +#define MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_FORMAT_FORMAT(enum mdp5_cursor_format val) +{ + return ((val) << MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT) & MDP5_LM_CURSOR_FORMAT_FORMAT__MASK; +} static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000000f0 + __offset_LM(i0); } static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000000f4 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_START_XY_X_START__MASK 0x0000ffff +#define MDP5_LM_CURSOR_START_XY_X_START__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_START_XY_X_START(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_START_XY_X_START__SHIFT) & MDP5_LM_CURSOR_START_XY_X_START__MASK; +} +#define MDP5_LM_CURSOR_START_XY_Y_START__MASK 0xffff0000 +#define MDP5_LM_CURSOR_START_XY_Y_START__SHIFT 16 +static inline uint32_t MDP5_LM_CURSOR_START_XY_Y_START(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_START_XY_Y_START__SHIFT) & MDP5_LM_CURSOR_START_XY_Y_START__MASK; +} static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000000f8 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN 0x00000001 +#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK 0x00000006 +#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT 1 +static inline uint32_t MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(enum mdp5_cursor_alpha val) +{ + return ((val) << MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT) & MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK; +} +#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN 0x00000008 static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000000fc + __offset_LM(i0); } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 6b25f9f731ed..46fac545dc2b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -24,6 +24,9 @@ #include "drm_crtc_helper.h" #include "drm_flip_work.h" +#define CURSOR_WIDTH 64 +#define CURSOR_HEIGHT 64 + #define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */ struct mdp5_crtc { @@ -47,8 +50,21 @@ struct mdp5_crtc { #define PENDING_FLIP 0x2 atomic_t pending; + /* for unref'ing cursor bo's after scanout completes: */ + struct drm_flip_work unref_cursor_work; + struct mdp_irq vblank; struct mdp_irq err; + + struct { + /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/ + spinlock_t lock; + + /* current cursor being scanned out: */ + struct drm_gem_object *scanout_bo; + uint32_t width; + uint32_t height; + } cursor; }; #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base) @@ -129,37 +145,26 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) } } +static void unref_cursor_worker(struct drm_flip_work *work, void *val) +{ + struct mdp5_crtc *mdp5_crtc = + container_of(work, struct mdp5_crtc, unref_cursor_work); + struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base); + + msm_gem_put_iova(val, mdp5_kms->id); + drm_gem_object_unreference_unlocked(val); +} + static void mdp5_crtc_destroy(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); drm_crtc_cleanup(crtc); + drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work); kfree(mdp5_crtc); } -static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode) -{ - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - struct mdp5_kms *mdp5_kms = get_kms(crtc); - bool enabled = (mode == DRM_MODE_DPMS_ON); - - DBG("%s: mode=%d", mdp5_crtc->name, mode); - - if (enabled != mdp5_crtc->enabled) { - if (enabled) { - mdp5_enable(mdp5_kms); - mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); - } else { - /* set STAGE_UNUSED for all layers */ - mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000); - mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); - mdp5_disable(mdp5_kms); - } - mdp5_crtc->enabled = enabled; - } -} - static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -256,23 +261,41 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc) spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); } -static void mdp5_crtc_prepare(struct drm_crtc *crtc) +static void mdp5_crtc_disable(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_kms *mdp5_kms = get_kms(crtc); + DBG("%s", mdp5_crtc->name); - /* make sure we hold a ref to mdp clks while setting up mode: */ - mdp5_enable(get_kms(crtc)); - mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + + if (WARN_ON(!mdp5_crtc->enabled)) + return; + + /* set STAGE_UNUSED for all layers */ + mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000); + + mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); + mdp5_disable(mdp5_kms); + + mdp5_crtc->enabled = false; } -static void mdp5_crtc_commit(struct drm_crtc *crtc) +static void mdp5_crtc_enable(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_kms *mdp5_kms = get_kms(crtc); + DBG("%s", mdp5_crtc->name); - mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON); + + if (WARN_ON(mdp5_crtc->enabled)) + return; + + mdp5_enable(mdp5_kms); + mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); + crtc_flush_all(crtc); - /* drop the ref to mdp clk's that we got in prepare: */ - mdp5_disable(get_kms(crtc)); + + mdp5_crtc->enabled = true; } struct plane_state { @@ -380,6 +403,132 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc, return -EINVAL; } +static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file, uint32_t handle, + uint32_t width, uint32_t height) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct drm_gem_object *cursor_bo, *old_bo; + uint32_t blendcfg, cursor_addr, stride; + int ret, bpp, lm; + unsigned int depth; + enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; + uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); + unsigned long flags; + + if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { + dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height); + return -EINVAL; + } + + if (NULL == mdp5_crtc->ctl) + return -EINVAL; + + if (!handle) { + DBG("Cursor off"); + return mdp5_ctl_set_cursor(mdp5_crtc->ctl, false); + } + + cursor_bo = drm_gem_object_lookup(dev, file, handle); + if (!cursor_bo) + return -ENOENT; + + ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr); + if (ret) + return -EINVAL; + + lm = mdp5_crtc->lm; + drm_fb_get_bpp_depth(DRM_FORMAT_ARGB8888, &depth, &bpp); + stride = width * (bpp >> 3); + + spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); + old_bo = mdp5_crtc->cursor.scanout_bo; + + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), + MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm), + MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) | + MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width)); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), + MDP5_LM_CURSOR_SIZE_ROI_H(height) | + MDP5_LM_CURSOR_SIZE_ROI_W(width)); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr); + + + blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN; + blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN; + blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg); + + mdp5_crtc->cursor.scanout_bo = cursor_bo; + mdp5_crtc->cursor.width = width; + mdp5_crtc->cursor.height = height; + spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); + + ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true); + if (ret) + goto end; + + flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl); + crtc_flush(crtc, flush_mask); + +end: + if (old_bo) { + drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); + /* enable vblank to complete cursor work: */ + request_pending(crtc, PENDING_CURSOR); + } + return ret; +} + +static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); + uint32_t xres = crtc->mode.hdisplay; + uint32_t yres = crtc->mode.vdisplay; + uint32_t roi_w; + uint32_t roi_h; + unsigned long flags; + + x = (x > 0) ? x : 0; + y = (y > 0) ? y : 0; + + /* + * Cursor Region Of Interest (ROI) is a plane read from cursor + * buffer to render. The ROI region is determined by the visiblity of + * the cursor point. In the default Cursor image the cursor point will + * be at the top left of the cursor image, unless it is specified + * otherwise using hotspot feature. + * + * If the cursor point reaches the right (xres - x < cursor.width) or + * bottom (yres - y < cursor.height) boundary of the screen, then ROI + * width and ROI height need to be evaluated to crop the cursor image + * accordingly. + * (xres-x) will be new cursor width when x > (xres - cursor.width) + * (yres-y) will be new cursor height when y > (yres - cursor.height) + */ + roi_w = min(mdp5_crtc->cursor.width, xres - x); + roi_h = min(mdp5_crtc->cursor.height, yres - y); + + spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm), + MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | + MDP5_LM_CURSOR_SIZE_ROI_W(roi_w)); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc->lm), + MDP5_LM_CURSOR_START_XY_Y_START(y) | + MDP5_LM_CURSOR_START_XY_X_START(x)); + spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); + + crtc_flush(crtc, flush_mask); + + return 0; +} + static const struct drm_crtc_funcs mdp5_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .destroy = mdp5_crtc_destroy, @@ -388,16 +537,15 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = { .reset = drm_atomic_helper_crtc_reset, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .cursor_set = mdp5_crtc_cursor_set, + .cursor_move = mdp5_crtc_cursor_move, }; static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { - .dpms = mdp5_crtc_dpms, .mode_fixup = mdp5_crtc_mode_fixup, .mode_set_nofb = mdp5_crtc_mode_set_nofb, - .mode_set = drm_helper_crtc_mode_set, - .mode_set_base = drm_helper_crtc_mode_set_base, - .prepare = mdp5_crtc_prepare, - .commit = mdp5_crtc_commit, + .prepare = mdp5_crtc_disable, + .commit = mdp5_crtc_enable, .atomic_check = mdp5_crtc_atomic_check, .atomic_begin = mdp5_crtc_atomic_begin, .atomic_flush = mdp5_crtc_atomic_flush, @@ -407,6 +555,7 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) { struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank); struct drm_crtc *crtc = &mdp5_crtc->base; + struct msm_drm_private *priv = crtc->dev->dev_private; unsigned pending; mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank); @@ -416,6 +565,9 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) if (pending & PENDING_FLIP) { complete_flip(crtc, NULL); } + + if (pending & PENDING_CURSOR) + drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq); } static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) @@ -515,6 +667,7 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, mdp5_crtc->lm = GET_LM_ID(id); spin_lock_init(&mdp5_crtc->lm_lock); + spin_lock_init(&mdp5_crtc->cursor.lock); mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; mdp5_crtc->err.irq = mdp5_crtc_err_irq; @@ -523,6 +676,10 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, pipe2name(mdp5_plane_pipe(plane)), id); drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs); + + drm_flip_work_init(&mdp5_crtc->unref_cursor_work, + "unref cursor", unref_cursor_worker); + drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); plane->crtc = crtc; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c index dea4505ac963..151129032d16 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c @@ -95,7 +95,7 @@ u32 ctl_read(struct mdp5_ctl *ctl, u32 reg) } -int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf) +int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, int intf) { unsigned long flags; static const enum mdp5_intfnum intfnum[] = { diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h index 1018519b6af2..ad48788efeea 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h @@ -34,7 +34,7 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm); */ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc); -int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf); +int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, int intf); int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index 0254bfdeb92f..d6a14bb99988 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -110,45 +111,6 @@ static const struct drm_encoder_funcs mdp5_encoder_funcs = { .destroy = mdp5_encoder_destroy, }; -static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode) -{ - struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - struct mdp5_kms *mdp5_kms = get_kms(encoder); - int intf = mdp5_encoder->intf; - bool enabled = (mode == DRM_MODE_DPMS_ON); - unsigned long flags; - - DBG("mode=%d", mode); - - if (enabled == mdp5_encoder->enabled) - return; - - if (enabled) { - bs_set(mdp5_encoder, 1); - spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); - mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); - spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); - } else { - spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); - mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0); - spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); - - /* - * Wait for a vsync so we know the ENABLE=0 latched before - * the (connector) source of the vsync's gets disabled, - * otherwise we end up in a funny state if we re-enable - * before the disable latches, which results that some of - * the settings changes for the new modeset (like new - * scanout buffer) don't latch properly.. - */ - mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf)); - - bs_set(mdp5_encoder, 0); - } - - mdp5_encoder->enabled = enabled; -} - static bool mdp5_encoder_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -162,11 +124,13 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder); + struct drm_device *dev = encoder->dev; + struct drm_connector *connector; int intf = mdp5_encoder->intf; uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol; uint32_t display_v_start, display_v_end; uint32_t hsync_start_x, hsync_end_x; - uint32_t format; + uint32_t format = 0x2100; unsigned long flags; mode = adjusted_mode; @@ -188,7 +152,28 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, /* probably need to get DATA_EN polarity from panel.. */ dtv_hsync_skew = 0; /* get this from panel? */ - format = 0x213f; /* get this from panel? */ + + /* Get color format from panel, default is 8bpc */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + switch (connector->display_info.bpc) { + case 4: + format |= 0; + break; + case 5: + format |= 0x15; + break; + case 6: + format |= 0x2A; + break; + case 8: + default: + format |= 0x3F; + break; + } + break; + } + } hsync_start_x = (mode->htotal - mode->hsync_start); hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1; @@ -198,6 +183,16 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew; display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1; + /* + * For edp only: + * DISPLAY_V_START = (VBP * HCYCLE) + HBP + * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP + */ + if (mdp5_encoder->intf_id == INTF_eDP) { + display_v_start += mode->htotal - mode->hsync_start; + display_v_end -= mode->hsync_start - mode->hdisplay; + } + spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf), @@ -225,25 +220,61 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); } -static void mdp5_encoder_prepare(struct drm_encoder *encoder) +static void mdp5_encoder_disable(struct drm_encoder *encoder) { - mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_kms *mdp5_kms = get_kms(encoder); + int intf = mdp5_encoder->intf; + unsigned long flags; + + if (WARN_ON(!mdp5_encoder->enabled)) + return; + + spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); + mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0); + spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf)); + + bs_set(mdp5_encoder, 0); + + mdp5_encoder->enabled = false; } -static void mdp5_encoder_commit(struct drm_encoder *encoder) +static void mdp5_encoder_enable(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_kms *mdp5_kms = get_kms(encoder); + int intf = mdp5_encoder->intf; + unsigned long flags; + + if (WARN_ON(mdp5_encoder->enabled)) + return; + mdp5_crtc_set_intf(encoder->crtc, mdp5_encoder->intf, mdp5_encoder->intf_id); - mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_ON); + + bs_set(mdp5_encoder, 1); + spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); + mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); + spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); + + mdp5_encoder->enabled = false; } static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { - .dpms = mdp5_encoder_dpms, .mode_fixup = mdp5_encoder_mode_fixup, .mode_set = mdp5_encoder_mode_set, - .prepare = mdp5_encoder_prepare, - .commit = mdp5_encoder_commit, + .prepare = mdp5_encoder_disable, + .commit = mdp5_encoder_enable, }; /* initialize encoder */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 9f01a4f21af2..92b61db5754c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -68,6 +68,18 @@ static int mdp5_hw_init(struct msm_kms *kms) return 0; } +static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + mdp5_enable(mdp5_kms); +} + +static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + mdp5_disable(mdp5_kms); +} + static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate, struct drm_encoder *encoder) { @@ -115,6 +127,8 @@ static const struct mdp_kms_funcs kms_funcs = { .irq = mdp5_irq, .enable_vblank = mdp5_enable_vblank, .disable_vblank = mdp5_disable_vblank, + .prepare_commit = mdp5_prepare_commit, + .complete_commit = mdp5_complete_commit, .get_format = mdp_get_format, .round_pixclk = mdp5_round_pixclk, .preclose = mdp5_preclose, @@ -208,19 +222,18 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) } } - /* Construct encoder for HDMI: */ - encoder = mdp5_encoder_init(dev, 3, INTF_HDMI); - if (IS_ERR(encoder)) { - dev_err(dev->dev, "failed to construct encoder\n"); - ret = PTR_ERR(encoder); - goto fail; - } + if (priv->hdmi) { + /* Construct encoder for HDMI: */ + encoder = mdp5_encoder_init(dev, 3, INTF_HDMI); + if (IS_ERR(encoder)) { + dev_err(dev->dev, "failed to construct encoder\n"); + ret = PTR_ERR(encoder); + goto fail; + } - encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;; - priv->encoders[priv->num_encoders++] = encoder; + encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;; + priv->encoders[priv->num_encoders++] = encoder; - /* Construct bridge/connector for HDMI: */ - if (priv->hdmi) { ret = hdmi_modeset_init(priv->hdmi, dev, encoder); if (ret) { dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret); @@ -228,6 +241,27 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) } } + if (priv->edp) { + /* Construct encoder for eDP: */ + encoder = mdp5_encoder_init(dev, 0, INTF_eDP); + if (IS_ERR(encoder)) { + dev_err(dev->dev, "failed to construct eDP encoder\n"); + ret = PTR_ERR(encoder); + goto fail; + } + + encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; + priv->encoders[priv->num_encoders++] = encoder; + + /* Construct bridge/connector for eDP: */ + ret = msm_edp_modeset_init(priv->edp, dev, encoder); + if (ret) { + dev_err(dev->dev, "failed to initialize eDP: %d\n", + ret); + goto fail; + } + } + return 0; fail: diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index dd69c77c0d64..49d011e8835b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -165,14 +165,25 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms); void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); +static inline bool pipe_supports_yuv(enum mdp5_pipe pipe) +{ + switch (pipe) { + case SSPP_VIG0: + case SSPP_VIG1: + case SSPP_VIG2: + case SSPP_VIG3: + return true; + default: + return false; + } +} + static inline uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats, uint32_t max_formats) { - /* TODO when we have YUV, we need to filter supported formats - * based on pipe id.. - */ - return mdp_get_formats(pixel_formats, max_formats); + return mdp_get_formats(pixel_formats, max_formats, + !pipe_supports_yuv(pipe)); } void mdp5_plane_install_properties(struct drm_plane *plane, diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index fc76f630e5b1..05cf9ab2a876 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -18,8 +18,6 @@ #include "mdp5_kms.h" -#define MAX_PLANE 4 - struct mdp5_plane { struct drm_plane base; const char *name; @@ -278,6 +276,155 @@ static void set_scanout_locked(struct drm_plane *plane, plane->fb = fb; } +/* Note: mdp5_plane->pipe_lock must be locked */ +static void csc_disable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe) +{ + uint32_t value = mdp5_read(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe)) & + ~MDP5_PIPE_OP_MODE_CSC_1_EN; + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), value); +} + +/* Note: mdp5_plane->pipe_lock must be locked */ +static void csc_enable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe, + struct csc_cfg *csc) +{ + uint32_t i, mode = 0; /* RGB, no CSC */ + uint32_t *matrix; + + if (unlikely(!csc)) + return; + + if ((csc->type == CSC_YUV2RGB) || (CSC_YUV2YUV == csc->type)) + mode |= MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(DATA_FORMAT_YUV); + if ((csc->type == CSC_RGB2YUV) || (CSC_YUV2YUV == csc->type)) + mode |= MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(DATA_FORMAT_YUV); + mode |= MDP5_PIPE_OP_MODE_CSC_1_EN; + mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), mode); + + matrix = csc->matrix; + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(matrix[0]) | + MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(matrix[1])); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(matrix[2]) | + MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(matrix[3])); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(matrix[4]) | + MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(matrix[5])); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(matrix[6]) | + MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(matrix[7])); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(matrix[8])); + + for (i = 0; i < ARRAY_SIZE(csc->pre_bias); i++) { + uint32_t *pre_clamp = csc->pre_clamp; + uint32_t *post_clamp = csc->post_clamp; + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_CLAMP(pipe, i), + MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(pre_clamp[2*i+1]) | + MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(pre_clamp[2*i])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_CLAMP(pipe, i), + MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(post_clamp[2*i+1]) | + MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(post_clamp[2*i])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_BIAS(pipe, i), + MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(csc->pre_bias[i])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_BIAS(pipe, i), + MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(csc->post_bias[i])); + } +} + +#define PHASE_STEP_SHIFT 21 +#define DOWN_SCALE_RATIO_MAX 32 /* 2^(26-21) */ + +static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase) +{ + uint32_t unit; + + if (src == 0 || dst == 0) + return -EINVAL; + + /* + * PHASE_STEP_X/Y is coded on 26 bits (25:0), + * where 2^21 represents the unity "1" in fixed-point hardware design. + * This leaves 5 bits for the integer part (downscale case): + * -> maximum downscale ratio = 0b1_1111 = 31 + */ + if (src > (dst * DOWN_SCALE_RATIO_MAX)) + return -EOVERFLOW; + + unit = 1 << PHASE_STEP_SHIFT; + *out_phase = mult_frac(unit, src, dst); + + return 0; +} + +static int calc_scalex_steps(uint32_t pixel_format, uint32_t src, uint32_t dest, + uint32_t phasex_steps[2]) +{ + uint32_t phasex_step; + unsigned int hsub; + int ret; + + ret = calc_phase_step(src, dest, &phasex_step); + if (ret) + return ret; + + hsub = drm_format_horz_chroma_subsampling(pixel_format); + + phasex_steps[0] = phasex_step; + phasex_steps[1] = phasex_step / hsub; + + return 0; +} + +static int calc_scaley_steps(uint32_t pixel_format, uint32_t src, uint32_t dest, + uint32_t phasey_steps[2]) +{ + uint32_t phasey_step; + unsigned int vsub; + int ret; + + ret = calc_phase_step(src, dest, &phasey_step); + if (ret) + return ret; + + vsub = drm_format_vert_chroma_subsampling(pixel_format); + + phasey_steps[0] = phasey_step; + phasey_steps[1] = phasey_step / vsub; + + return 0; +} + +static uint32_t get_scalex_config(uint32_t src, uint32_t dest) +{ + uint32_t filter; + + filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN; + + return MDP5_PIPE_SCALE_CONFIG_SCALEX_EN | + MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(filter); +} + +static uint32_t get_scaley_config(uint32_t src, uint32_t dest) +{ + uint32_t filter; + + filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN; + + return MDP5_PIPE_SCALE_CONFIG_SCALEY_EN | + MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(filter); +} + static int mdp5_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, @@ -287,11 +434,14 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, { struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_kms *mdp5_kms = get_kms(plane); + struct device *dev = mdp5_kms->dev->dev; enum mdp5_pipe pipe = mdp5_plane->pipe; const struct mdp_format *format; uint32_t nplanes, config = 0; - uint32_t phasex_step = 0, phasey_step = 0; + /* below array -> index 0: comp 0/3 ; index 1: comp 1/2 */ + uint32_t phasex_step[2] = {0,}, phasey_step[2] = {0,}; uint32_t hdecm = 0, vdecm = 0; + uint32_t pix_format; unsigned long flags; int ret; @@ -301,6 +451,9 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, if (WARN_ON(nplanes > pipe2nclients(pipe))) return -EINVAL; + format = to_mdp_format(msm_framebuffer_format(fb)); + pix_format = format->base.pixel_format; + /* src values are in Q16 fixed point, convert to integer: */ src_x = src_x >> 16; src_y = src_y >> 16; @@ -325,14 +478,28 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, */ mdp5_smp_configure(mdp5_kms->smp, pipe); - if (src_w != crtc_w) { - config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN; - /* TODO calc phasex_step, hdecm */ + /* SCALE is used to both scale and up-sample chroma components */ + + if ((src_w != crtc_w) || MDP_FORMAT_IS_YUV(format)) { + /* TODO calc hdecm */ + ret = calc_scalex_steps(pix_format, src_w, crtc_w, phasex_step); + if (ret) { + dev_err(dev, "X scaling (%d -> %d) failed: %d\n", + src_w, crtc_w, ret); + return ret; + } + config |= get_scalex_config(src_w, crtc_w); } - if (src_h != crtc_h) { - config |= MDP5_PIPE_SCALE_CONFIG_SCALEY_EN; - /* TODO calc phasey_step, vdecm */ + if ((src_h != crtc_h) || MDP_FORMAT_IS_YUV(format)) { + /* TODO calc vdecm */ + ret = calc_scaley_steps(pix_format, src_h, crtc_h, phasey_step); + if (ret) { + dev_err(dev, "Y scaling (%d -> %d) failed: %d\n", + src_h, crtc_h, ret); + return ret; + } + config |= get_scaley_config(src_h, crtc_h); } spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); @@ -357,8 +524,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, MDP5_PIPE_OUT_XY_X(crtc_x) | MDP5_PIPE_OUT_XY_Y(crtc_y)); - format = to_mdp_format(msm_framebuffer_format(fb)); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe), MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) | @@ -368,8 +533,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) | - MDP5_PIPE_SRC_FORMAT_NUM_PLANES(nplanes - 1) | - MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(CHROMA_RGB)); + MDP5_PIPE_SRC_FORMAT_NUM_PLANES(format->fetch_type) | + MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample)); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe), MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) | @@ -383,18 +548,24 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, /* not using secure mode: */ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), phasex_step); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), phasey_step); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), + phasex_step[0]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), + phasey_step[0]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe), + phasex_step[1]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe), + phasey_step[1]); mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe), MDP5_PIPE_DECIMATION_VERT(vdecm) | MDP5_PIPE_DECIMATION_HORZ(hdecm)); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), - MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(SCALE_FILTER_NEAREST) | - MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(SCALE_FILTER_NEAREST) | - MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(SCALE_FILTER_NEAREST) | - MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(SCALE_FILTER_NEAREST) | - MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) | - MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST)); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config); + + if (MDP_FORMAT_IS_YUV(format)) + csc_enable(mdp5_kms, pipe, + mdp_get_default_csc_cfg(CSC_YUV2RGB)); + else + csc_disable(mdp5_kms, pipe); set_scanout_locked(plane, fb); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c index bf551885e019..1f795af89680 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c @@ -119,9 +119,10 @@ static int smp_request_block(struct mdp5_smp *smp, spin_lock_irqsave(&smp->state_lock, flags); - nblks -= reserved; - if (reserved) + if (reserved) { + nblks = max(0, nblks - reserved); DBG("%d MMBs allocated (%d reserved)", nblks, reserved); + } avail = cnt - bitmap_weight(smp->state, cnt); if (nblks > avail) { diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h index 64c1afd6030a..a1d35f162c7f 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h @@ -8,18 +8,19 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) -Copyright (C) 2013 by the following authors: +Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -44,6 +45,19 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +enum mdp_chroma_samp_type { + CHROMA_RGB = 0, + CHROMA_H2V1 = 1, + CHROMA_H1V2 = 2, + CHROMA_420 = 3, +}; + +enum mdp_sspp_fetch_type { + MDP_PLANE_INTERLEAVED = 0, + MDP_PLANE_PLANAR = 1, + MDP_PLANE_PSEUDO_PLANAR = 2, +}; + enum mdp_mixer_stage_id { STAGE_UNUSED = 0, STAGE_BASE = 1, diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c index e0a6ffbe6ab4..f683433b6727 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_format.c +++ b/drivers/gpu/drm/msm/mdp/mdp_format.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2014 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -19,7 +20,58 @@ #include "msm_drv.h" #include "mdp_kms.h" -#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \ +static struct csc_cfg csc_convert[CSC_MAX] = { + [CSC_RGB2RGB] = { + .type = CSC_RGB2RGB, + .matrix = { + 0x0200, 0x0000, 0x0000, + 0x0000, 0x0200, 0x0000, + 0x0000, 0x0000, 0x0200 + }, + .pre_bias = { 0x0, 0x0, 0x0 }, + .post_bias = { 0x0, 0x0, 0x0 }, + .pre_clamp = { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff }, + .post_clamp = { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff }, + }, + [CSC_YUV2RGB] = { + .type = CSC_YUV2RGB, + .matrix = { + 0x0254, 0x0000, 0x0331, + 0x0254, 0xff37, 0xfe60, + 0x0254, 0x0409, 0x0000 + }, + .pre_bias = { 0xfff0, 0xff80, 0xff80 }, + .post_bias = { 0x00, 0x00, 0x00 }, + .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + .post_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + }, + [CSC_RGB2YUV] = { + .type = CSC_RGB2YUV, + .matrix = { + 0x0083, 0x0102, 0x0032, + 0x1fb5, 0x1f6c, 0x00e1, + 0x00e1, 0x1f45, 0x1fdc + }, + .pre_bias = { 0x00, 0x00, 0x00 }, + .post_bias = { 0x10, 0x80, 0x80 }, + .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + .post_clamp = { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0 }, + }, + [CSC_YUV2YUV] = { + .type = CSC_YUV2YUV, + .matrix = { + 0x0200, 0x0000, 0x0000, + 0x0000, 0x0200, 0x0000, + 0x0000, 0x0000, 0x0200 + }, + .pre_bias = { 0x00, 0x00, 0x00 }, + .post_bias = { 0x00, 0x00, 0x00 }, + .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + .post_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + }, +}; + +#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs) { \ .base = { .pixel_format = DRM_FORMAT_ ## name }, \ .bpc_a = BPC ## a ## A, \ .bpc_r = BPC ## r, \ @@ -30,21 +82,46 @@ .unpack_tight = tight, \ .cpp = c, \ .unpack_count = cnt, \ - } + .fetch_type = fp, \ + .chroma_sample = cs \ +} #define BPC0A 0 +/* + * Note: Keep RGB formats 1st, followed by YUV formats to avoid breaking + * mdp_get_rgb_formats()'s implementation. + */ static const struct mdp_format formats[] = { - /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt */ - FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4), - FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4), - FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3), - FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3), - FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3), - FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3), + /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt ... */ + FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_RGB), + FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_RGB), + FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3, + MDP_PLANE_INTERLEAVED, CHROMA_RGB), + FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3, + MDP_PLANE_INTERLEAVED, CHROMA_RGB), + FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3, + MDP_PLANE_INTERLEAVED, CHROMA_RGB), + FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3, + MDP_PLANE_INTERLEAVED, CHROMA_RGB), + + /* --- RGB formats above / YUV formats below this line --- */ + + FMT(NV12, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2, + MDP_PLANE_PSEUDO_PLANAR, CHROMA_420), + FMT(NV21, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2, + MDP_PLANE_PSEUDO_PLANAR, CHROMA_420), }; -uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats) +/* + * Note: + * @rgb_only must be set to true, when requesting + * supported formats for RGB pipes. + */ +uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats, + bool rgb_only) { uint32_t i; for (i = 0; i < ARRAY_SIZE(formats); i++) { @@ -53,6 +130,9 @@ uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats) if (i == max_formats) break; + if (rgb_only && MDP_FORMAT_IS_YUV(f)) + break; + pixel_formats[i] = f->base.pixel_format; } @@ -69,3 +149,11 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format) } return NULL; } + +struct csc_cfg *mdp_get_default_csc_cfg(enum csc_type type) +{ + if (unlikely(WARN_ON(type >= CSC_MAX))) + return NULL; + + return &csc_convert[type]; +} diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c index 2a731722d840..1988c243f437 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c @@ -34,7 +34,7 @@ static void update_irq(struct mdp_kms *mdp_kms) struct mdp_irq *irq; uint32_t irqmask = mdp_kms->vblank_mask; - BUG_ON(!spin_is_locked(&list_lock)); + assert_spin_locked(&list_lock); list_for_each_entry(irq, &mdp_kms->irq_list, node) irqmask |= irq->irqmask; diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h index b268ce95d394..5ae4039d68e4 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h @@ -88,10 +88,32 @@ struct mdp_format { uint8_t unpack[4]; bool alpha_enable, unpack_tight; uint8_t cpp, unpack_count; + enum mdp_sspp_fetch_type fetch_type; + enum mdp_chroma_samp_type chroma_sample; }; #define to_mdp_format(x) container_of(x, struct mdp_format, base) +#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->chroma_sample > CHROMA_RGB) -uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats); +uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only); const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); +enum csc_type { + CSC_RGB2RGB = 0, + CSC_YUV2RGB, + CSC_RGB2YUV, + CSC_YUV2YUV, + CSC_MAX +}; + +struct csc_cfg { + enum csc_type type; + uint32_t matrix[9]; + uint32_t pre_bias[3]; + uint32_t post_bias[3]; + uint32_t pre_clamp[6]; + uint32_t post_clamp[6]; +}; + +struct csc_cfg *mdp_get_default_csc_cfg(enum csc_type); + #endif /* __MDP_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 2c396540e279..7c412292a0ff 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -20,6 +20,7 @@ #include "msm_gem.h" struct msm_commit { + struct drm_device *dev; struct drm_atomic_state *state; uint32_t fence; struct msm_fence_cb fence_cb; @@ -58,14 +59,16 @@ static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask) spin_unlock(&priv->pending_crtcs_event.lock); } -static struct msm_commit *new_commit(struct drm_atomic_state *state) +static struct msm_commit *commit_init(struct drm_atomic_state *state) { struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL); if (!c) return NULL; + c->dev = state->dev; c->state = state; + /* TODO we might need a way to indicate to run the cb on a * different wq so wait_for_vblanks() doesn't block retiring * bo's.. @@ -75,6 +78,12 @@ static struct msm_commit *new_commit(struct drm_atomic_state *state) return c; } +static void commit_destroy(struct msm_commit *c) +{ + end_atomic(c->dev->dev_private, c->crtc_mask); + kfree(c); +} + /* The (potentially) asynchronous part of the commit. At this point * nothing can fail short of armageddon. */ @@ -82,12 +91,16 @@ static void complete_commit(struct msm_commit *c) { struct drm_atomic_state *state = c->state; struct drm_device *dev = state->dev; + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms = priv->kms; - drm_atomic_helper_commit_pre_planes(dev, state); + kms->funcs->prepare_commit(kms, state); + + drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_planes(dev, state); - drm_atomic_helper_commit_post_planes(dev, state); + drm_atomic_helper_commit_modeset_enables(dev, state); /* NOTE: _wait_for_vblanks() only waits for vblank on * enabled CRTCs. So we end up faulting when disabling @@ -106,11 +119,11 @@ static void complete_commit(struct msm_commit *c) drm_atomic_helper_cleanup_planes(dev, state); - drm_atomic_state_free(state); + kms->funcs->complete_commit(kms, state); - end_atomic(dev->dev_private, c->crtc_mask); + drm_atomic_state_free(state); - kfree(c); + commit_destroy(c); } static void fence_cb(struct msm_fence_cb *cb) @@ -165,6 +178,7 @@ int msm_atomic_commit(struct drm_device *dev, { int nplanes = dev->mode_config.num_total_plane; int ncrtcs = dev->mode_config.num_crtc; + struct timespec timeout; struct msm_commit *c; int i, ret; @@ -172,7 +186,7 @@ int msm_atomic_commit(struct drm_device *dev, if (ret) return ret; - c = new_commit(state); + c = commit_init(state); if (!c) return -ENOMEM; @@ -237,10 +251,12 @@ int msm_atomic_commit(struct drm_device *dev, return 0; } - ret = msm_wait_fence_interruptable(dev, c->fence, NULL); + jiffies_to_timespec(jiffies + msecs_to_jiffies(1000), &timeout); + + ret = msm_wait_fence_interruptable(dev, c->fence, &timeout); if (ret) { WARN_ON(ret); // TODO unswap state back? or?? - kfree(c); + commit_destroy(c); return ret; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index f1ebedde6346..a4269119f9ea 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -54,6 +54,12 @@ module_param(reglog, bool, 0600); #define reglog 0 #endif +#ifdef CONFIG_DRM_MSM_FBDEV +static bool fbdev = true; +MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer"); +module_param(fbdev, bool, 0600); +#endif + static char *vram = "16m"; MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU"); module_param(vram, charp, 0); @@ -300,7 +306,8 @@ static int msm_load(struct drm_device *dev, unsigned long flags) drm_mode_config_reset(dev); #ifdef CONFIG_DRM_MSM_FBDEV - priv->fbdev = msm_fbdev_init(dev); + if (fbdev) + priv->fbdev = msm_fbdev_init(dev); #endif ret = msm_debugfs_late_init(dev); @@ -1023,6 +1030,7 @@ static struct platform_driver msm_platform_driver = { static int __init msm_drm_register(void) { DBG("init"); + msm_edp_register(); hdmi_register(); adreno_register(); return platform_driver_register(&msm_platform_driver); @@ -1034,6 +1042,7 @@ static void __exit msm_drm_unregister(void) platform_driver_unregister(&msm_platform_driver); hdmi_unregister(); adreno_unregister(); + msm_edp_unregister(); } module_init(msm_drm_register); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 22e5391a7ce8..9e8d441b61c3 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -76,6 +76,12 @@ struct msm_drm_private { */ struct hdmi *hdmi; + /* eDP is for mdp5 only, but kms has not been created + * when edp_bind() and edp_init() are called. Here is the only + * place to keep the edp instance. + */ + struct msm_edp *edp; + /* when we have more than one 'msm_gpu' these need to be an array: */ struct msm_gpu *gpu; struct msm_file_private *lastctx; @@ -224,6 +230,12 @@ int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev, void __init hdmi_register(void); void __exit hdmi_unregister(void); +struct msm_edp; +void __init msm_edp_register(void); +void __exit msm_edp_unregister(void); +int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, + struct drm_encoder *encoder); + #ifdef CONFIG_DEBUG_FS void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m); void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index 84dec161d836..6b573e612f27 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -24,7 +24,7 @@ struct msm_framebuffer { struct drm_framebuffer base; const struct msm_format *format; - struct drm_gem_object *planes[3]; + struct drm_gem_object *planes[MAX_PLANE]; }; #define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base) @@ -122,7 +122,7 @@ uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane) struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); if (!msm_fb->planes[plane]) return 0; - return msm_gem_iova(msm_fb->planes[plane], id); + return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane]; } struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane) diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 115b509a4a00..df60f65728ff 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -245,9 +245,6 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev) if (ret) goto fini; - /* disable all the possible outputs/crtcs before entering KMS mode */ - drm_helper_disable_unused_functions(dev); - ret = drm_fb_helper_initial_config(helper, 32); if (ret) goto fini; diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 06437745bc2c..3a78cb48662b 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -23,6 +23,8 @@ #include "msm_drv.h" +#define MAX_PLANE 4 + /* As there are different display controller blocks depending on the * snapdragon version, the kms support is split out and the appropriate * implementation is loaded at runtime. The kms module is responsible @@ -38,6 +40,9 @@ struct msm_kms_funcs { irqreturn_t (*irq)(struct msm_kms *kms); int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); + /* modeset, bracketing atomic_commit(): */ + void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state); + void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state); /* misc: */ const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format); long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 024e98ef8e4d..d84583776d50 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -10,6 +10,7 @@ menu "Display Panels" config DRM_PANEL_SIMPLE tristate "support for simple panels" depends on OF + depends on BACKLIGHT_CLASS_DEVICE help DRM panel driver for dumb panels that need at most a regulator and a GPIO to be powered up. Optionally a backlight can be attached so @@ -31,6 +32,7 @@ config DRM_PANEL_SHARP_LQ101R1SX01 tristate "Sharp LQ101R1SX01 panel" depends on OF depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE help Say Y here if you want to enable support for Sharp LQ101R1SX01 TFT-LCD modules. The panel has a 2560x1600 resolution and uses diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c index 9d81759d82fc..3cce3ca19601 100644 --- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c +++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c @@ -19,8 +19,6 @@ #include <video/mipi_display.h> -#include <linux/host1x.h> - struct sharp_panel { struct drm_panel base; /* the datasheet refers to them as DSI-LINK1 and DSI-LINK2 */ @@ -41,6 +39,16 @@ static inline struct sharp_panel *to_sharp_panel(struct drm_panel *panel) return container_of(panel, struct sharp_panel, base); } +static void sharp_wait_frames(struct sharp_panel *sharp, unsigned int frames) +{ + unsigned int refresh = drm_mode_vrefresh(sharp->mode); + + if (WARN_ON(frames > refresh)) + return; + + msleep(1000 / (refresh / frames)); +} + static int sharp_panel_write(struct sharp_panel *sharp, u16 offset, u8 value) { u8 payload[3] = { offset >> 8, offset & 0xff, value }; @@ -106,6 +114,8 @@ static int sharp_panel_unprepare(struct drm_panel *panel) if (!sharp->prepared) return 0; + sharp_wait_frames(sharp, 4); + err = mipi_dsi_dcs_set_display_off(sharp->link1); if (err < 0) dev_err(panel->dev, "failed to set display off: %d\n", err); @@ -170,15 +180,13 @@ static int sharp_panel_prepare(struct drm_panel *panel) if (err < 0) return err; - usleep_range(10000, 20000); - - err = mipi_dsi_dcs_soft_reset(sharp->link1); - if (err < 0) { - dev_err(panel->dev, "soft reset failed: %d\n", err); - goto poweroff; - } - - msleep(120); + /* + * According to the datasheet, the panel needs around 10 ms to fully + * power up. At least another 120 ms is required before exiting sleep + * mode to make sure the panel is ready. Throw in another 20 ms for + * good measure. + */ + msleep(150); err = mipi_dsi_dcs_exit_sleep_mode(sharp->link1); if (err < 0) { @@ -238,6 +246,9 @@ static int sharp_panel_prepare(struct drm_panel *panel) sharp->prepared = true; + /* wait for 6 frames before continuing */ + sharp_wait_frames(sharp, 6); + return 0; poweroff: diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 6049d245c20e..39806c335339 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -448,6 +448,34 @@ static const struct panel_desc auo_b133htn01 = { }, }; +static const struct drm_display_mode avic_tm070ddh03_mode = { + .clock = 51200, + .hdisplay = 1024, + .hsync_start = 1024 + 160, + .hsync_end = 1024 + 160 + 4, + .htotal = 1024 + 160 + 4 + 156, + .vdisplay = 600, + .vsync_start = 600 + 17, + .vsync_end = 600 + 17 + 1, + .vtotal = 600 + 17 + 1 + 17, + .vrefresh = 60, +}; + +static const struct panel_desc avic_tm070ddh03 = { + .modes = &avic_tm070ddh03_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 154, + .height = 90, + }, + .delay = { + .prepare = 20, + .enable = 200, + .disable = 200, + }, +}; + static const struct drm_display_mode chunghwa_claa101wa01a_mode = { .clock = 72070, .hdisplay = 1366, @@ -566,6 +594,29 @@ static const struct panel_desc foxlink_fl500wvr00_a0t = { .bus_format = MEDIA_BUS_FMT_RGB888_1X24, }; +static const struct drm_display_mode giantplus_gpg482739qs5_mode = { + .clock = 9000, + .hdisplay = 480, + .hsync_start = 480 + 5, + .hsync_end = 480 + 5 + 1, + .htotal = 480 + 5 + 1 + 40, + .vdisplay = 272, + .vsync_start = 272 + 8, + .vsync_end = 272 + 8 + 1, + .vtotal = 272 + 8 + 1 + 8, + .vrefresh = 60, +}; + +static const struct panel_desc giantplus_gpg482739qs5 = { + .modes = &giantplus_gpg482739qs5_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 95, + .height = 54, + }, +}; + static const struct drm_display_mode hannstar_hsd070pww1_mode = { .clock = 71100, .hdisplay = 1280, @@ -745,6 +796,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "auo,b133xtn01", .data = &auo_b133xtn01, }, { + .compatible = "avic,tm070ddh03", + .data = &avic_tm070ddh03, + }, { .compatible = "chunghwa,claa101wa01a", .data = &chunghwa_claa101wa01a }, { @@ -763,6 +817,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "foxlink,fl500wvr00-a0t", .data = &foxlink_fl500wvr00_a0t, }, { + .compatible = "giantplus,gpg482739qs5", + .data = &giantplus_gpg482739qs5 + }, { .compatible = "hannstar,hsd070pww1", .data = &hannstar_hsd070pww1, }, { diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index db42a670f995..5bf825dfaa09 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -623,10 +623,8 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) drm_dp_dpcd_writeb(dp_info->aux, DP_DOWNSPREAD_CTRL, 0); - if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) && - (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) { + if (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE) drm_dp_dpcd_writeb(dp_info->aux, DP_EDP_CONFIGURATION_SET, 1); - } /* set the lane count on the sink */ tmp = dp_info->dp_lane_count; diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index de77c27d8106..e6a4ba236c70 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3905,7 +3905,21 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring = &rdev->ring[fence->ring]; u64 addr = rdev->fence_drv[fence->ring].gpu_addr; - /* EVENT_WRITE_EOP - flush caches, send int */ + /* Workaround for cache flush problems. First send a dummy EOP + * event down the pipe with seq one below. + */ + radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); + radeon_ring_write(ring, (EOP_TCL1_ACTION_EN | + EOP_TC_ACTION_EN | + EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | + EVENT_INDEX(5))); + radeon_ring_write(ring, addr & 0xfffffffc); + radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | + DATA_SEL(1) | INT_SEL(0)); + radeon_ring_write(ring, fence->seq - 1); + radeon_ring_write(ring, 0); + + /* Then send the real EOP event down the pipe. */ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); radeon_ring_write(ring, (EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN | @@ -7359,7 +7373,6 @@ int cik_irq_set(struct radeon_device *rdev) u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; u32 grbm_int_cntl = 0; u32 dma_cntl, dma_cntl1; - u32 thermal_int; if (!rdev->irq.installed) { WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); @@ -7389,13 +7402,6 @@ int cik_irq_set(struct radeon_device *rdev) cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; - if (rdev->flags & RADEON_IS_IGP) - thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) & - ~(THERM_INTH_MASK | THERM_INTL_MASK); - else - thermal_int = RREG32_SMC(CG_THERMAL_INT) & - ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); - /* enable CP interrupts on all rings */ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { DRM_DEBUG("cik_irq_set: sw int gfx\n"); @@ -7499,14 +7505,6 @@ int cik_irq_set(struct radeon_device *rdev) hpd6 |= DC_HPDx_INT_EN; } - if (rdev->irq.dpm_thermal) { - DRM_DEBUG("dpm thermal\n"); - if (rdev->flags & RADEON_IS_IGP) - thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; - else - thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; - } - WREG32(CP_INT_CNTL_RING0, cp_int_cntl); WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl); @@ -7553,11 +7551,6 @@ int cik_irq_set(struct radeon_device *rdev) WREG32(DC_HPD5_INT_CONTROL, hpd5); WREG32(DC_HPD6_INT_CONTROL, hpd6); - if (rdev->flags & RADEON_IS_IGP) - WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); - else - WREG32_SMC(CG_THERMAL_INT, thermal_int); - return 0; } diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 7d2ff31c35a5..f86eb54e7763 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -845,7 +845,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev, for (; ndw > 0; ndw -= 2, --count, pe += 8) { if (flags & R600_PTE_SYSTEM) { value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; } else if (flags & R600_PTE_VALID) { value = addr; } else { diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index c5eb286517a8..0e236d067d66 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -1169,6 +1169,19 @@ void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable) } } +static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable) +{ + u32 thermal_int; + + thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL); + if (enable) + thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; + else + thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK); + WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); + +} + int kv_dpm_enable(struct radeon_device *rdev) { struct kv_power_info *pi = kv_get_pi(rdev); @@ -1280,8 +1293,7 @@ int kv_dpm_late_enable(struct radeon_device *rdev) DRM_ERROR("kv_set_thermal_temperature_range failed\n"); return ret; } - rdev->irq.dpm_thermal = true; - radeon_irq_set(rdev); + kv_enable_thermal_int(rdev, true); } /* powerdown unused blocks for now */ @@ -1312,6 +1324,7 @@ void kv_dpm_disable(struct radeon_device *rdev) kv_stop_dpm(rdev); kv_enable_ulv(rdev, false); kv_reset_am(rdev); + kv_enable_thermal_int(rdev, false); kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); } diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c index 4be2bb7cbef3..ce787a9f12c0 100644 --- a/drivers/gpu/drm/radeon/ni_dma.c +++ b/drivers/gpu/drm/radeon/ni_dma.c @@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev, for (; ndw > 0; ndw -= 2, --count, pe += 8) { if (flags & R600_PTE_SYSTEM) { value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; } else if (flags & R600_PTE_VALID) { value = addr; } else { diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 74f06d540591..279801ca5110 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev) return r; rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; + rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; rdev->asic->gart.set_page = &r100_pci_gart_set_page; return radeon_gart_table_ram_alloc(rdev); } @@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev) WREG32(RADEON_AIC_HI_ADDR, 0); } +uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags) +{ + return addr; +} + void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags) + uint64_t entry) { u32 *gtt = rdev->gart.ptr; - gtt[i] = cpu_to_le32(lower_32_bits(addr)); + gtt[i] = cpu_to_le32(lower_32_bits(entry)); } void r100_pci_gart_fini(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 064ad5569cca..08d68f3e13e9 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) #define R300_PTE_WRITEABLE (1 << 2) #define R300_PTE_READABLE (1 << 3) -void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags) +uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags) { - void __iomem *ptr = rdev->gart.ptr; - addr = (lower_32_bits(addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 24); if (flags & RADEON_GART_PAGE_READ) @@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, addr |= R300_PTE_WRITEABLE; if (!(flags & RADEON_GART_PAGE_SNOOP)) addr |= R300_PTE_UNSNOOPED; + return addr; +} + +void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t entry) +{ + void __iomem *ptr = rdev->gart.ptr; + /* on x86 we want this to be CPU endian, on powerpc * on powerpc without HW swappers, it'll get swapped on way * into VRAM - so no need for cpu_to_le32 on VRAM tables */ - writel(addr, ((void __iomem *)ptr) + (i * 4)); + writel(entry, ((void __iomem *)ptr) + (i * 4)); } int rv370_pcie_gart_init(struct radeon_device *rdev) @@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev) DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; + rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; return radeon_gart_table_vram_alloc(rdev); } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 93e407b7e7a7..5587603b4a89 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev); * Dummy page */ struct radeon_dummy_page { + uint64_t entry; struct page *page; dma_addr_t addr; }; @@ -645,7 +646,7 @@ struct radeon_gart { unsigned num_cpu_pages; unsigned table_size; struct page **pages; - dma_addr_t *pages_addr; + uint64_t *pages_entry; bool ready; }; @@ -1858,8 +1859,9 @@ struct radeon_asic { /* gart */ struct { void (*tlb_flush)(struct radeon_device *rdev); + uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags); void (*set_page)(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); } gart; struct { int (*init)(struct radeon_device *rdev); @@ -2867,7 +2869,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) -#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) +#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f)) +#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e)) #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index f811ee14a237..c0ecd128b14b 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev) DRM_INFO("Forcing AGP to PCIE mode\n"); rdev->flags |= RADEON_IS_PCIE; rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; + rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; } else { DRM_INFO("Forcing AGP to PCI mode\n"); rdev->flags |= RADEON_IS_PCI; rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; + rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; rdev->asic->gart.set_page = &r100_pci_gart_set_page; } rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; @@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = { .mc_wait_for_idle = &r100_mc_wait_for_idle, .gart = { .tlb_flush = &r100_pci_gart_tlb_flush, + .get_page_entry = &r100_pci_gart_get_page_entry, .set_page = &r100_pci_gart_set_page, }, .ring = { @@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = { .mc_wait_for_idle = &r100_mc_wait_for_idle, .gart = { .tlb_flush = &r100_pci_gart_tlb_flush, + .get_page_entry = &r100_pci_gart_get_page_entry, .set_page = &r100_pci_gart_set_page, }, .ring = { @@ -359,6 +363,7 @@ static struct radeon_asic r300_asic = { .mc_wait_for_idle = &r300_mc_wait_for_idle, .gart = { .tlb_flush = &r100_pci_gart_tlb_flush, + .get_page_entry = &r100_pci_gart_get_page_entry, .set_page = &r100_pci_gart_set_page, }, .ring = { @@ -425,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = { .mc_wait_for_idle = &r300_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, + .get_page_entry = &rv370_pcie_gart_get_page_entry, .set_page = &rv370_pcie_gart_set_page, }, .ring = { @@ -491,6 +497,7 @@ static struct radeon_asic r420_asic = { .mc_wait_for_idle = &r300_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, + .get_page_entry = &rv370_pcie_gart_get_page_entry, .set_page = &rv370_pcie_gart_set_page, }, .ring = { @@ -557,6 +564,7 @@ static struct radeon_asic rs400_asic = { .mc_wait_for_idle = &rs400_mc_wait_for_idle, .gart = { .tlb_flush = &rs400_gart_tlb_flush, + .get_page_entry = &rs400_gart_get_page_entry, .set_page = &rs400_gart_set_page, }, .ring = { @@ -623,6 +631,7 @@ static struct radeon_asic rs600_asic = { .mc_wait_for_idle = &rs600_mc_wait_for_idle, .gart = { .tlb_flush = &rs600_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -689,6 +698,7 @@ static struct radeon_asic rs690_asic = { .mc_wait_for_idle = &rs690_mc_wait_for_idle, .gart = { .tlb_flush = &rs400_gart_tlb_flush, + .get_page_entry = &rs400_gart_get_page_entry, .set_page = &rs400_gart_set_page, }, .ring = { @@ -755,6 +765,7 @@ static struct radeon_asic rv515_asic = { .mc_wait_for_idle = &rv515_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, + .get_page_entry = &rv370_pcie_gart_get_page_entry, .set_page = &rv370_pcie_gart_set_page, }, .ring = { @@ -821,6 +832,7 @@ static struct radeon_asic r520_asic = { .mc_wait_for_idle = &r520_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, + .get_page_entry = &rv370_pcie_gart_get_page_entry, .set_page = &rv370_pcie_gart_set_page, }, .ring = { @@ -915,6 +927,7 @@ static struct radeon_asic r600_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -998,6 +1011,7 @@ static struct radeon_asic rv6xx_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1087,6 +1101,7 @@ static struct radeon_asic rs780_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1189,6 +1204,7 @@ static struct radeon_asic rv770_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1305,6 +1321,7 @@ static struct radeon_asic evergreen_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &evergreen_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1395,6 +1412,7 @@ static struct radeon_asic sumo_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &evergreen_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1484,6 +1502,7 @@ static struct radeon_asic btc_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &evergreen_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1617,6 +1636,7 @@ static struct radeon_asic cayman_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &cayman_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { @@ -1718,6 +1738,7 @@ static struct radeon_asic trinity_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &cayman_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { @@ -1849,6 +1870,7 @@ static struct radeon_asic si_asic = { .get_gpu_clock_counter = &si_get_gpu_clock_counter, .gart = { .tlb_flush = &si_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { @@ -2012,6 +2034,7 @@ static struct radeon_asic ci_asic = { .get_gpu_clock_counter = &cik_get_gpu_clock_counter, .gart = { .tlb_flush = &cik_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { @@ -2121,6 +2144,7 @@ static struct radeon_asic kv_asic = { .get_gpu_clock_counter = &cik_get_gpu_clock_counter, .gart = { .tlb_flush = &cik_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 4045a320a424..72bdd3bf0d8e 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); int r100_asic_reset(struct radeon_device *rdev); u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); void r100_pci_gart_tlb_flush(struct radeon_device *rdev); +uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags); void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); int r100_irq_set(struct radeon_device *rdev); int r100_irq_process(struct radeon_device *rdev); @@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); extern int r300_cs_parse(struct radeon_cs_parser *p); extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); +extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags); extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); extern int rv370_get_pcie_lanes(struct radeon_device *rdev); extern void r300_set_reg_safe(struct radeon_device *rdev); @@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev); extern int rs400_suspend(struct radeon_device *rdev); extern int rs400_resume(struct radeon_device *rdev); void rs400_gart_tlb_flush(struct radeon_device *rdev); +uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags); void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); int rs400_gart_init(struct radeon_device *rdev); @@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev); void rs600_irq_disable(struct radeon_device *rdev); u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); void rs600_gart_tlb_flush(struct radeon_device *rdev); +uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags); void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rs600_bandwidth_update(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index dbc94f300297..fc1b3f34cf18 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -3289,6 +3289,7 @@ int radeon_atom_get_voltage_evv(struct radeon_device *rdev, args.in.ucVoltageType = VOLTAGE_TYPE_VDDC; args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; + args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id); args.in.ulSCLKFreq = cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk); diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 9e7f23dd14bd..87d5fb21cb61 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -34,7 +34,8 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, uint64_t saddr, uint64_t daddr, - int flag, int n) + int flag, int n, + struct reservation_object *resv) { unsigned long start_jiffies; unsigned long end_jiffies; @@ -47,12 +48,12 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, case RADEON_BENCHMARK_COPY_DMA: fence = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, - NULL); + resv); break; case RADEON_BENCHMARK_COPY_BLIT: fence = radeon_copy_blit(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, - NULL); + resv); break; default: DRM_ERROR("Unknown copy method\n"); @@ -120,7 +121,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, if (rdev->asic->copy.dma) { time = radeon_benchmark_do_move(rdev, size, saddr, daddr, - RADEON_BENCHMARK_COPY_DMA, n); + RADEON_BENCHMARK_COPY_DMA, n, + dobj->tbo.resv); if (time < 0) goto out_cleanup; if (time > 0) @@ -130,7 +132,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, if (rdev->asic->copy.blit) { time = radeon_benchmark_do_move(rdev, size, saddr, daddr, - RADEON_BENCHMARK_COPY_BLIT, n); + RADEON_BENCHMARK_COPY_BLIT, n, + dobj->tbo.resv); if (time < 0) goto out_cleanup; if (time > 0) diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0ec65168f331..bd7519fdd3f4 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev) rdev->dummy_page.page = NULL; return -ENOMEM; } + rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr, + RADEON_GART_PAGE_DUMMY); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 102116902a07..913fafa597ad 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -960,6 +960,9 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll, if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && pll->flags & RADEON_PLL_USE_REF_DIV) ref_div_max = pll->reference_div; + else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) + /* fix for problems on RS880 */ + ref_div_max = min(pll->max_ref_div, 7u); else ref_div_max = pll->max_ref_div; diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 84146d5901aa..5450fa95a47e 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev) radeon_bo_unpin(rdev->gart.robj); radeon_bo_unreserve(rdev->gart.robj); rdev->gart.table_addr = gpu_addr; + + if (!r) { + int i; + + /* We might have dropped some GART table updates while it wasn't + * mapped, restore all entries + */ + for (i = 0; i < rdev->gart.num_gpu_pages; i++) + radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]); + mb(); + radeon_gart_tlb_flush(rdev); + } + return r; } @@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, unsigned t; unsigned p; int i, j; - u64 page_base; if (!rdev->gart.ready) { WARN(1, "trying to unbind memory from uninitialized GART !\n"); @@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, for (i = 0; i < pages; i++, p++) { if (rdev->gart.pages[p]) { rdev->gart.pages[p] = NULL; - rdev->gart.pages_addr[p] = rdev->dummy_page.addr; - page_base = rdev->gart.pages_addr[p]; for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { + rdev->gart.pages_entry[t] = rdev->dummy_page.entry; if (rdev->gart.ptr) { - radeon_gart_set_page(rdev, t, page_base, - RADEON_GART_PAGE_DUMMY); + radeon_gart_set_page(rdev, t, + rdev->dummy_page.entry); } - page_base += RADEON_GPU_PAGE_SIZE; } } } @@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, { unsigned t; unsigned p; - uint64_t page_base; + uint64_t page_base, page_entry; int i, j; if (!rdev->gart.ready) { @@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); for (i = 0; i < pages; i++, p++) { - rdev->gart.pages_addr[p] = dma_addr[i]; rdev->gart.pages[p] = pagelist[i]; - if (rdev->gart.ptr) { - page_base = rdev->gart.pages_addr[p]; - for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { - radeon_gart_set_page(rdev, t, page_base, flags); - page_base += RADEON_GPU_PAGE_SIZE; + page_base = dma_addr[i]; + for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { + page_entry = radeon_gart_get_page_entry(page_base, flags); + rdev->gart.pages_entry[t] = page_entry; + if (rdev->gart.ptr) { + radeon_gart_set_page(rdev, t, page_entry); } + page_base += RADEON_GPU_PAGE_SIZE; } } mb(); @@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev) radeon_gart_fini(rdev); return -ENOMEM; } - rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * - rdev->gart.num_cpu_pages); - if (rdev->gart.pages_addr == NULL) { + rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) * + rdev->gart.num_gpu_pages); + if (rdev->gart.pages_entry == NULL) { radeon_gart_fini(rdev); return -ENOMEM; } /* set GART entry to point to the dummy page by default */ - for (i = 0; i < rdev->gart.num_cpu_pages; i++) { - rdev->gart.pages_addr[i] = rdev->dummy_page.addr; - } + for (i = 0; i < rdev->gart.num_gpu_pages; i++) + rdev->gart.pages_entry[i] = rdev->dummy_page.entry; return 0; } @@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev) */ void radeon_gart_fini(struct radeon_device *rdev) { - if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { + if (rdev->gart.ready) { /* unbind pages */ radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); } rdev->gart.ready = false; vfree(rdev->gart.pages); - vfree(rdev->gart.pages_addr); + vfree(rdev->gart.pages_entry); rdev->gart.pages = NULL; - rdev->gart.pages_addr = NULL; + rdev->gart.pages_entry = NULL; radeon_dummy_page_fini(rdev); } diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index d0b4f7d1140d..ac3c1310b953 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -146,7 +146,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri struct radeon_bo_va *bo_va; int r; - if (rdev->family < CHIP_CAYMAN) { + if ((rdev->family < CHIP_CAYMAN) || + (!rdev->accel_working)) { return 0; } @@ -176,7 +177,8 @@ void radeon_gem_object_close(struct drm_gem_object *obj, struct radeon_bo_va *bo_va; int r; - if (rdev->family < CHIP_CAYMAN) { + if ((rdev->family < CHIP_CAYMAN) || + (!rdev->accel_working)) { return; } diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 7b274205eeaf..061eaa9c19c7 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c @@ -392,7 +392,7 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, uint32_t hpd_size, uint64_t hpd_gpu_addr) { - uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1; + uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1; uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); lock_srbm(kgd, mec, pipe, 0, 0); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 3cf9c1fa6475..686411e4e4f6 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -605,14 +605,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) return -ENOMEM; } - vm = &fpriv->vm; - r = radeon_vm_init(rdev, vm); - if (r) { - kfree(fpriv); - return r; - } - if (rdev->accel_working) { + vm = &fpriv->vm; + r = radeon_vm_init(rdev, vm); + if (r) { + kfree(fpriv); + return r; + } + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (r) { radeon_vm_fini(rdev, vm); @@ -668,9 +668,9 @@ void radeon_driver_postclose_kms(struct drm_device *dev, radeon_vm_bo_rmv(rdev, vm->ib_bo_va); radeon_bo_unreserve(rdev->ring_tmp_bo.bo); } + radeon_vm_fini(rdev, vm); } - radeon_vm_fini(rdev, vm); kfree(fpriv); file_priv->driver_priv = NULL; } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 1d955776f4d0..43e09942823e 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -238,6 +238,18 @@ int radeon_bo_create(struct radeon_device *rdev, * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 */ bo->flags &= ~RADEON_GEM_GTT_WC; +#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) + /* Don't try to enable write-combining when it can't work, or things + * may be slow + * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 + */ + +#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ + thanks to write-combining + + DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " + "better performance thanks to write-combining\n"); + bo->flags &= ~RADEON_GEM_GTT_WC; #endif radeon_ttm_placement_from_domain(bo, domain); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 91e1bd246cad..9f758d39420d 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -585,7 +585,7 @@ static ssize_t radeon_hwmon_set_pwm1_enable(struct device *dev, if (err) return err; - switch(value) { + switch (value) { case 1: /* manual, percent-based */ rdev->asic->dpm.fan_ctrl_set_mode(rdev, FDO_PWM_MODE_STATIC); break; @@ -608,7 +608,7 @@ static ssize_t radeon_hwmon_get_pwm1_max(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%i\n", 100); /* pwm uses percent-based fan-control */ + return sprintf(buf, "%i\n", 255); } static ssize_t radeon_hwmon_set_pwm1(struct device *dev, @@ -623,6 +623,8 @@ static ssize_t radeon_hwmon_set_pwm1(struct device *dev, if (err) return err; + value = (value * 100) / 255; + err = rdev->asic->dpm.set_fan_speed_percent(rdev, value); if (err) return err; @@ -642,6 +644,8 @@ static ssize_t radeon_hwmon_get_pwm1(struct device *dev, if (err) return err; + speed = (speed * 255) / 100; + return sprintf(buf, "%i\n", speed); } diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 07b506b41008..791818165c76 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -119,11 +119,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) if (ring == R600_RING_TYPE_DMA_INDEX) fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, - NULL); + vram_obj->tbo.resv); else fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, - NULL); + vram_obj->tbo.resv); if (IS_ERR(fence)) { DRM_ERROR("Failed GTT->VRAM copy %d\n", i); r = PTR_ERR(fence); @@ -170,11 +170,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) if (ring == R600_RING_TYPE_DMA_INDEX) fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, - NULL); + vram_obj->tbo.resv); else fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, - NULL); + vram_obj->tbo.resv); if (IS_ERR(fence)) { DRM_ERROR("Failed VRAM->GTT copy %d\n", i); r = PTR_ERR(fence); diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index cde48c42b30a..2a5a4a9e772d 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) uint64_t result; /* page table offset */ - result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; - - /* in case cpu page size != gpu page size*/ - result |= addr & (~PAGE_MASK); + result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT]; + result &= ~RADEON_GPU_PAGE_MASK; return result; } @@ -745,9 +743,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev, */ /* NI is optimized for 256KB fragments, SI and newer for 64KB */ - uint64_t frag_flags = rdev->family == CHIP_CAYMAN ? + uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) || + (rdev->family == CHIP_ARUBA)) ? R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB; - uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80; + uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) || + (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80; uint64_t frag_start = ALIGN(pe_start, frag_align); uint64_t frag_end = pe_end & ~(frag_align - 1); diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index c5799f16aa4b..34e3235f41d2 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev) #define RS400_PTE_WRITEABLE (1 << 2) #define RS400_PTE_READABLE (1 << 3) -void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags) +uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags) { uint32_t entry; - u32 *gtt = rdev->gart.ptr; entry = (lower_32_bits(addr) & PAGE_MASK) | ((upper_32_bits(addr) & 0xff) << 4); @@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, entry |= RS400_PTE_WRITEABLE; if (!(flags & RADEON_GART_PAGE_SNOOP)) entry |= RS400_PTE_UNSNOOPED; - entry = cpu_to_le32(entry); - gtt[i] = entry; + return entry; +} + +void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t entry) +{ + u32 *gtt = rdev->gart.ptr; + gtt[i] = cpu_to_le32(lower_32_bits(entry)); } int rs400_mc_wait_for_idle(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 68f154a451c0..d81182ad53ec 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -626,11 +626,8 @@ static void rs600_gart_fini(struct radeon_device *rdev) radeon_gart_table_vram_free(rdev); } -void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags) +uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags) { - void __iomem *ptr = (void *)rdev->gart.ptr; - addr = addr & 0xFFFFFFFFFFFFF000ULL; addr |= R600_PTE_SYSTEM; if (flags & RADEON_GART_PAGE_VALID) @@ -641,7 +638,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, addr |= R600_PTE_WRITEABLE; if (flags & RADEON_GART_PAGE_SNOOP) addr |= R600_PTE_SNOOPED; - writeq(addr, ptr + (i * 8)); + return addr; +} + +void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t entry) +{ + void __iomem *ptr = (void *)rdev->gart.ptr; + writeq(entry, ptr + (i * 8)); } int rs600_irq_set(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index aa7b872b2c43..83207929fc62 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c @@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev, for (; ndw > 0; ndw -= 2, --count, pe += 8) { if (flags & R600_PTE_SYSTEM) { value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; } else if (flags & R600_PTE_VALID) { value = addr; } else { diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index 2324a526de65..11485a4a16ae 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -1,6 +1,6 @@ config DRM_RCAR_DU tristate "DRM Support for R-Car Display Unit" - depends on DRM && ARM + depends on DRM && ARM && HAVE_DMA_ATTRS depends on ARCH_SHMOBILE || COMPILE_TEST select DRM_KMS_HELPER select DRM_KMS_CMA_HELPER diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 25c7a998fc2c..9e72133bb64b 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -15,6 +15,8 @@ #include <linux/mutex.h> #include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_cma_helper.h> @@ -99,9 +101,13 @@ static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc) clk_disable_unprepare(rcrtc->clock); } +/* ----------------------------------------------------------------------------- + * Hardware Setup + */ + static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) { - const struct drm_display_mode *mode = &rcrtc->crtc.mode; + const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode; unsigned long mode_clock = mode->clock * 1000; unsigned long clk; u32 value; @@ -187,9 +193,19 @@ void rcar_du_crtc_route_output(struct drm_crtc *crtc, rcdu->dpad0_source = rcrtc->index; } -void rcar_du_crtc_update_planes(struct drm_crtc *crtc) +static unsigned int plane_zpos(struct rcar_du_plane *plane) +{ + return to_rcar_du_plane_state(plane->plane.state)->zpos; +} + +static const struct rcar_du_format_info * +plane_format(struct rcar_du_plane *plane) +{ + return to_rcar_du_plane_state(plane->plane.state)->format; +} + +static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc) { - struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES]; unsigned int num_planes = 0; unsigned int prio = 0; @@ -201,29 +217,30 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc) struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i]; unsigned int j; - if (plane->crtc != &rcrtc->crtc || !plane->enabled) + if (plane->plane.state->crtc != &rcrtc->crtc) continue; /* Insert the plane in the sorted planes array. */ for (j = num_planes++; j > 0; --j) { - if (planes[j-1]->zpos <= plane->zpos) + if (plane_zpos(planes[j-1]) <= plane_zpos(plane)) break; planes[j] = planes[j-1]; } planes[j] = plane; - prio += plane->format->planes * 4; + prio += plane_format(plane)->planes * 4; } for (i = 0; i < num_planes; ++i) { struct rcar_du_plane *plane = planes[i]; - unsigned int index = plane->hwindex; + struct drm_plane_state *state = plane->plane.state; + unsigned int index = to_rcar_du_plane_state(state)->hwindex; prio -= 4; dspr |= (index + 1) << prio; dptsr |= DPTSR_PnDK(index) | DPTSR_PnTS(index); - if (plane->format->planes == 2) { + if (plane_format(plane)->planes == 2) { index = (index + 1) % 8; prio -= 4; @@ -236,8 +253,6 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc) * with superposition controller 2. */ if (rcrtc->index % 2) { - u32 value = rcar_du_group_read(rcrtc->group, DPTSR); - /* The DPTSR register is updated when the display controller is * stopped. We thus need to restart the DU. Once again, sorry * for the flicker. One way to mitigate the issue would be to @@ -245,29 +260,104 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc) * split, or through a module parameter). Flicker would then * occur only if we need to break the pre-association. */ - if (value != dptsr) { + mutex_lock(&rcrtc->group->lock); + if (rcar_du_group_read(rcrtc->group, DPTSR) != dptsr) { rcar_du_group_write(rcrtc->group, DPTSR, dptsr); if (rcrtc->group->used_crtcs) rcar_du_group_restart(rcrtc->group); } + mutex_unlock(&rcrtc->group->lock); } rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, dspr); } +/* ----------------------------------------------------------------------------- + * Page Flip + */ + +void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc, + struct drm_file *file) +{ + struct drm_pending_vblank_event *event; + struct drm_device *dev = rcrtc->crtc.dev; + unsigned long flags; + + /* Destroy the pending vertical blanking event associated with the + * pending page flip, if any, and disable vertical blanking interrupts. + */ + spin_lock_irqsave(&dev->event_lock, flags); + event = rcrtc->event; + if (event && event->base.file_priv == file) { + rcrtc->event = NULL; + event->base.destroy(&event->base); + drm_crtc_vblank_put(&rcrtc->crtc); + } + spin_unlock_irqrestore(&dev->event_lock, flags); +} + +static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc) +{ + struct drm_pending_vblank_event *event; + struct drm_device *dev = rcrtc->crtc.dev; + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + event = rcrtc->event; + rcrtc->event = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + + if (event == NULL) + return; + + spin_lock_irqsave(&dev->event_lock, flags); + drm_send_vblank_event(dev, rcrtc->index, event); + wake_up(&rcrtc->flip_wait); + spin_unlock_irqrestore(&dev->event_lock, flags); + + drm_crtc_vblank_put(&rcrtc->crtc); +} + +static bool rcar_du_crtc_page_flip_pending(struct rcar_du_crtc *rcrtc) +{ + struct drm_device *dev = rcrtc->crtc.dev; + unsigned long flags; + bool pending; + + spin_lock_irqsave(&dev->event_lock, flags); + pending = rcrtc->event != NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + + return pending; +} + +static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc) +{ + struct rcar_du_device *rcdu = rcrtc->group->dev; + + if (wait_event_timeout(rcrtc->flip_wait, + !rcar_du_crtc_page_flip_pending(rcrtc), + msecs_to_jiffies(50))) + return; + + dev_warn(rcdu->dev, "page flip timeout\n"); + + rcar_du_crtc_finish_page_flip(rcrtc); +} + +/* ----------------------------------------------------------------------------- + * Start/Stop and Suspend/Resume + */ + static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) { struct drm_crtc *crtc = &rcrtc->crtc; bool interlaced; - unsigned int i; if (rcrtc->started) return; - if (WARN_ON(rcrtc->plane->format == NULL)) - return; - /* Set display off and background to black */ rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0)); rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0)); @@ -276,20 +366,8 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) rcar_du_crtc_set_display_timing(rcrtc); rcar_du_group_set_routing(rcrtc->group); - mutex_lock(&rcrtc->group->planes.lock); - rcrtc->plane->enabled = true; - rcar_du_crtc_update_planes(crtc); - mutex_unlock(&rcrtc->group->planes.lock); - - /* Setup planes. */ - for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) { - struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i]; - - if (plane->crtc != crtc || !plane->enabled) - continue; - - rcar_du_plane_setup(plane); - } + /* Start with all planes disabled. */ + rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0); /* Select master sync mode. This enables display operation in master * sync mode (with the HSYNC and VSYNC signals configured as outputs and @@ -302,6 +380,9 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) rcar_du_group_start_stop(rcrtc->group, true); + /* Turn vertical blanking interrupt reporting back on. */ + drm_crtc_vblank_on(crtc); + rcrtc->started = true; } @@ -312,10 +393,12 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) if (!rcrtc->started) return; - mutex_lock(&rcrtc->group->planes.lock); - rcrtc->plane->enabled = false; - rcar_du_crtc_update_planes(crtc); - mutex_unlock(&rcrtc->group->planes.lock); + /* Disable vertical blanking interrupt reporting. We first need to wait + * for page flip completion before stopping the CRTC as userspace + * expects page flips to eventually complete. + */ + rcar_du_crtc_wait_page_flip(rcrtc); + drm_crtc_vblank_off(crtc); /* Select switch sync mode. This stops display operation and configures * the HSYNC and VSYNC signals as inputs. @@ -335,196 +418,111 @@ void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc) void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc) { - if (rcrtc->dpms != DRM_MODE_DPMS_ON) + unsigned int i; + + if (!rcrtc->enabled) return; rcar_du_crtc_get(rcrtc); rcar_du_crtc_start(rcrtc); -} - -static void rcar_du_crtc_update_base(struct rcar_du_crtc *rcrtc) -{ - struct drm_crtc *crtc = &rcrtc->crtc; - - rcar_du_plane_compute_base(rcrtc->plane, crtc->primary->fb); - rcar_du_plane_update_base(rcrtc->plane); -} - -static void rcar_du_crtc_dpms(struct drm_crtc *crtc, int mode) -{ - struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); - if (mode != DRM_MODE_DPMS_ON) - mode = DRM_MODE_DPMS_OFF; + /* Commit the planes state. */ + for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) { + struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i]; - if (rcrtc->dpms == mode) - return; + if (plane->plane.state->crtc != &rcrtc->crtc) + continue; - if (mode == DRM_MODE_DPMS_ON) { - rcar_du_crtc_get(rcrtc); - rcar_du_crtc_start(rcrtc); - } else { - rcar_du_crtc_stop(rcrtc); - rcar_du_crtc_put(rcrtc); + rcar_du_plane_setup(plane); } - rcrtc->dpms = mode; + rcar_du_crtc_update_planes(rcrtc); } -static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - /* TODO Fixup modes */ - return true; -} +/* ----------------------------------------------------------------------------- + * CRTC Functions + */ -static void rcar_du_crtc_mode_prepare(struct drm_crtc *crtc) +static void rcar_du_crtc_enable(struct drm_crtc *crtc) { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); - /* We need to access the hardware during mode set, acquire a reference - * to the CRTC. - */ - rcar_du_crtc_get(rcrtc); + if (rcrtc->enabled) + return; - /* Stop the CRTC and release the plane. Force the DPMS mode to off as a - * result. - */ - rcar_du_crtc_stop(rcrtc); - rcar_du_plane_release(rcrtc->plane); + rcar_du_crtc_get(rcrtc); + rcar_du_crtc_start(rcrtc); - rcrtc->dpms = DRM_MODE_DPMS_OFF; + rcrtc->enabled = true; } -static int rcar_du_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, - int x, int y, - struct drm_framebuffer *old_fb) +static void rcar_du_crtc_disable(struct drm_crtc *crtc) { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); - struct rcar_du_device *rcdu = rcrtc->group->dev; - const struct rcar_du_format_info *format; - int ret; - - format = rcar_du_format_info(crtc->primary->fb->pixel_format); - if (format == NULL) { - dev_dbg(rcdu->dev, "mode_set: unsupported format %08x\n", - crtc->primary->fb->pixel_format); - ret = -EINVAL; - goto error; - } - ret = rcar_du_plane_reserve(rcrtc->plane, format); - if (ret < 0) - goto error; - - rcrtc->plane->format = format; - - rcrtc->plane->src_x = x; - rcrtc->plane->src_y = y; - rcrtc->plane->width = mode->hdisplay; - rcrtc->plane->height = mode->vdisplay; + if (!rcrtc->enabled) + return; - rcar_du_plane_compute_base(rcrtc->plane, crtc->primary->fb); + rcar_du_crtc_stop(rcrtc); + rcar_du_crtc_put(rcrtc); + rcrtc->enabled = false; rcrtc->outputs = 0; - - return 0; - -error: - /* There's no rollback/abort operation to clean up in case of error. We - * thus need to release the reference to the CRTC acquired in prepare() - * here. - */ - rcar_du_crtc_put(rcrtc); - return ret; } -static void rcar_du_crtc_mode_commit(struct drm_crtc *crtc) +static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { - struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); - - /* We're done, restart the CRTC and set the DPMS mode to on. The - * reference to the DU acquired at prepare() time will thus be released - * by the DPMS handler (possibly called by the disable() handler). - */ - rcar_du_crtc_start(rcrtc); - rcrtc->dpms = DRM_MODE_DPMS_ON; + /* TODO Fixup modes */ + return true; } -static int rcar_du_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) +static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc) { + struct drm_pending_vblank_event *event = crtc->state->event; struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); + struct drm_device *dev = rcrtc->crtc.dev; + unsigned long flags; - rcrtc->plane->src_x = x; - rcrtc->plane->src_y = y; + if (event) { + event->pipe = rcrtc->index; - rcar_du_crtc_update_base(rcrtc); + WARN_ON(drm_crtc_vblank_get(crtc) != 0); - return 0; + spin_lock_irqsave(&dev->event_lock, flags); + rcrtc->event = event; + spin_unlock_irqrestore(&dev->event_lock, flags); + } } -static void rcar_du_crtc_disable(struct drm_crtc *crtc) +static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc) { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); - rcar_du_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); - rcar_du_plane_release(rcrtc->plane); + rcar_du_crtc_update_planes(rcrtc); } static const struct drm_crtc_helper_funcs crtc_helper_funcs = { - .dpms = rcar_du_crtc_dpms, .mode_fixup = rcar_du_crtc_mode_fixup, - .prepare = rcar_du_crtc_mode_prepare, - .commit = rcar_du_crtc_mode_commit, - .mode_set = rcar_du_crtc_mode_set, - .mode_set_base = rcar_du_crtc_mode_set_base, .disable = rcar_du_crtc_disable, + .enable = rcar_du_crtc_enable, + .atomic_begin = rcar_du_crtc_atomic_begin, + .atomic_flush = rcar_du_crtc_atomic_flush, }; -void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc, - struct drm_file *file) -{ - struct drm_pending_vblank_event *event; - struct drm_device *dev = rcrtc->crtc.dev; - unsigned long flags; - - /* Destroy the pending vertical blanking event associated with the - * pending page flip, if any, and disable vertical blanking interrupts. - */ - spin_lock_irqsave(&dev->event_lock, flags); - event = rcrtc->event; - if (event && event->base.file_priv == file) { - rcrtc->event = NULL; - event->base.destroy(&event->base); - drm_vblank_put(dev, rcrtc->index); - } - spin_unlock_irqrestore(&dev->event_lock, flags); -} - -static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc) -{ - struct drm_pending_vblank_event *event; - struct drm_device *dev = rcrtc->crtc.dev; - unsigned long flags; - - spin_lock_irqsave(&dev->event_lock, flags); - event = rcrtc->event; - rcrtc->event = NULL; - spin_unlock_irqrestore(&dev->event_lock, flags); - - if (event == NULL) - return; - - spin_lock_irqsave(&dev->event_lock, flags); - drm_send_vblank_event(dev, rcrtc->index, event); - spin_unlock_irqrestore(&dev->event_lock, flags); +static const struct drm_crtc_funcs crtc_funcs = { + .reset = drm_atomic_helper_crtc_reset, + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; - drm_vblank_put(dev, rcrtc->index); -} +/* ----------------------------------------------------------------------------- + * Interrupt Handling + */ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg) { @@ -544,41 +542,9 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg) return ret; } -static int rcar_du_crtc_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags) -{ - struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); - struct drm_device *dev = rcrtc->crtc.dev; - unsigned long flags; - - spin_lock_irqsave(&dev->event_lock, flags); - if (rcrtc->event != NULL) { - spin_unlock_irqrestore(&dev->event_lock, flags); - return -EBUSY; - } - spin_unlock_irqrestore(&dev->event_lock, flags); - - crtc->primary->fb = fb; - rcar_du_crtc_update_base(rcrtc); - - if (event) { - event->pipe = rcrtc->index; - drm_vblank_get(dev, rcrtc->index); - spin_lock_irqsave(&dev->event_lock, flags); - rcrtc->event = event; - spin_unlock_irqrestore(&dev->event_lock, flags); - } - - return 0; -} - -static const struct drm_crtc_funcs crtc_funcs = { - .destroy = drm_crtc_cleanup, - .set_config = drm_crtc_helper_set_config, - .page_flip = rcar_du_crtc_page_flip, -}; +/* ----------------------------------------------------------------------------- + * Initialization + */ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index) { @@ -620,20 +586,24 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index) return -EPROBE_DEFER; } + init_waitqueue_head(&rcrtc->flip_wait); + rcrtc->group = rgrp; rcrtc->mmio_offset = mmio_offsets[index]; rcrtc->index = index; - rcrtc->dpms = DRM_MODE_DPMS_OFF; - rcrtc->plane = &rgrp->planes.planes[index % 2]; - - rcrtc->plane->crtc = crtc; + rcrtc->enabled = false; - ret = drm_crtc_init(rcdu->ddev, crtc, &crtc_funcs); + ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, + &rgrp->planes.planes[index % 2].plane, + NULL, &crtc_funcs); if (ret < 0) return ret; drm_crtc_helper_add(crtc, &crtc_helper_funcs); + /* Start with vertical blanking interrupt reporting disabled. */ + drm_crtc_vblank_off(crtc); + /* Register the interrupt handler. */ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) { irq = platform_get_irq(pdev, index); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h index d2f89f7d2e5e..5d9aa9b33769 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h @@ -15,12 +15,12 @@ #define __RCAR_DU_CRTC_H__ #include <linux/mutex.h> +#include <linux/wait.h> #include <drm/drmP.h> #include <drm/drm_crtc.h> struct rcar_du_group; -struct rcar_du_plane; struct rcar_du_crtc { struct drm_crtc crtc; @@ -32,11 +32,12 @@ struct rcar_du_crtc { bool started; struct drm_pending_vblank_event *event; + wait_queue_head_t flip_wait; + unsigned int outputs; - int dpms; + bool enabled; struct rcar_du_group *group; - struct rcar_du_plane *plane; }; #define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc) @@ -59,6 +60,5 @@ void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc); void rcar_du_crtc_route_output(struct drm_crtc *crtc, enum rcar_du_output output); -void rcar_du_crtc_update_planes(struct drm_crtc *crtc); #endif /* __RCAR_DU_CRTC_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index e0d74f821416..1d9e4f8568ae 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -19,6 +19,7 @@ #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/slab.h> +#include <linux/wait.h> #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> @@ -163,6 +164,8 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags) return -ENOMEM; } + init_waitqueue_head(&rcdu->commit.wait); + rcdu->dev = &pdev->dev; rcdu->info = np ? of_match_device(rcar_du_of_table, rcdu->dev)->data : (void *)platform_get_device_id(pdev)->driver_data; @@ -175,17 +178,19 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags) if (IS_ERR(rcdu->mmio)) return PTR_ERR(rcdu->mmio); - /* DRM/KMS objects */ - ret = rcar_du_modeset_init(rcdu); + /* Initialize vertical blanking interrupts handling. Start with vblank + * disabled for all CRTCs. + */ + ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1); if (ret < 0) { - dev_err(&pdev->dev, "failed to initialize DRM/KMS\n"); + dev_err(&pdev->dev, "failed to initialize vblank\n"); goto done; } - /* vblank handling */ - ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1); + /* DRM/KMS objects */ + ret = rcar_du_modeset_init(rcdu); if (ret < 0) { - dev_err(&pdev->dev, "failed to initialize vblank\n"); + dev_err(&pdev->dev, "failed to initialize DRM/KMS\n"); goto done; } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index c5b9ea6a7eaa..c7c538dd2e68 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -15,6 +15,7 @@ #define __RCAR_DU_DRV_H__ #include <linux/kernel.h> +#include <linux/wait.h> #include "rcar_du_crtc.h" #include "rcar_du_group.h" @@ -64,6 +65,10 @@ struct rcar_du_device_info { unsigned int num_lvds; }; +#define RCAR_DU_MAX_CRTCS 3 +#define RCAR_DU_MAX_GROUPS DIV_ROUND_UP(RCAR_DU_MAX_CRTCS, 2) +#define RCAR_DU_MAX_LVDS 2 + struct rcar_du_device { struct device *dev; const struct rcar_du_device_info *info; @@ -73,13 +78,18 @@ struct rcar_du_device { struct drm_device *ddev; struct drm_fbdev_cma *fbdev; - struct rcar_du_crtc crtcs[3]; + struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS]; unsigned int num_crtcs; - struct rcar_du_group groups[2]; + struct rcar_du_group groups[RCAR_DU_MAX_GROUPS]; unsigned int dpad0_source; - struct rcar_du_lvdsenc *lvds[2]; + struct rcar_du_lvdsenc *lvds[RCAR_DU_MAX_LVDS]; + + struct { + wait_queue_head_t wait; + u32 pending; + } commit; }; static inline bool rcar_du_has(struct rcar_du_device *rcdu, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index 279167f783f6..d0ae1e8009c6 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -42,46 +42,40 @@ rcar_du_connector_best_encoder(struct drm_connector *connector) * Encoder */ -static void rcar_du_encoder_dpms(struct drm_encoder *encoder, int mode) +static void rcar_du_encoder_disable(struct drm_encoder *encoder) { struct rcar_du_encoder *renc = to_rcar_encoder(encoder); - if (mode != DRM_MODE_DPMS_ON) - mode = DRM_MODE_DPMS_OFF; + if (renc->lvds) + rcar_du_lvdsenc_enable(renc->lvds, encoder->crtc, false); +} + +static void rcar_du_encoder_enable(struct drm_encoder *encoder) +{ + struct rcar_du_encoder *renc = to_rcar_encoder(encoder); if (renc->lvds) - rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc, mode); + rcar_du_lvdsenc_enable(renc->lvds, encoder->crtc, true); } -static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static int rcar_du_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { struct rcar_du_encoder *renc = to_rcar_encoder(encoder); + struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; + const struct drm_display_mode *mode = &crtc_state->mode; const struct drm_display_mode *panel_mode; + struct drm_connector *connector = conn_state->connector; struct drm_device *dev = encoder->dev; - struct drm_connector *connector; - bool found = false; /* DAC encoders have currently no restriction on the mode. */ if (encoder->encoder_type == DRM_MODE_ENCODER_DAC) - return true; - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->encoder == encoder) { - found = true; - break; - } - } - - if (!found) { - dev_dbg(dev->dev, "mode_fixup: no connector found\n"); - return false; - } + return 0; if (list_empty(&connector->modes)) { - dev_dbg(dev->dev, "mode_fixup: empty modes list\n"); - return false; + dev_dbg(dev->dev, "encoder: empty modes list\n"); + return -EINVAL; } panel_mode = list_first_entry(&connector->modes, @@ -90,7 +84,7 @@ static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder, /* We're not allowed to modify the resolution. */ if (mode->hdisplay != panel_mode->hdisplay || mode->vdisplay != panel_mode->vdisplay) - return false; + return -EINVAL; /* The flat panel mode is fixed, just copy it to the adjusted mode. */ drm_mode_copy(adjusted_mode, panel_mode); @@ -102,25 +96,7 @@ static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder, adjusted_mode->clock = clamp(adjusted_mode->clock, 30000, 150000); - return true; -} - -static void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder) -{ - struct rcar_du_encoder *renc = to_rcar_encoder(encoder); - - if (renc->lvds) - rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc, - DRM_MODE_DPMS_OFF); -} - -static void rcar_du_encoder_mode_commit(struct drm_encoder *encoder) -{ - struct rcar_du_encoder *renc = to_rcar_encoder(encoder); - - if (renc->lvds) - rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc, - DRM_MODE_DPMS_ON); + return 0; } static void rcar_du_encoder_mode_set(struct drm_encoder *encoder, @@ -133,11 +109,10 @@ static void rcar_du_encoder_mode_set(struct drm_encoder *encoder, } static const struct drm_encoder_helper_funcs encoder_helper_funcs = { - .dpms = rcar_du_encoder_dpms, - .mode_fixup = rcar_du_encoder_mode_fixup, - .prepare = rcar_du_encoder_mode_prepare, - .commit = rcar_du_encoder_mode_commit, .mode_set = rcar_du_encoder_mode_set, + .disable = rcar_du_encoder_disable, + .enable = rcar_du_encoder_enable, + .atomic_check = rcar_du_encoder_atomic_check, }; static const struct drm_encoder_funcs encoder_funcs = { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h index 0c38cdcda4ca..ed36433fbe84 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h @@ -14,6 +14,8 @@ #ifndef __RCAR_DU_GROUP_H__ #define __RCAR_DU_GROUP_H__ +#include <linux/mutex.h> + #include "rcar_du_plane.h" struct rcar_du_device; @@ -25,6 +27,7 @@ struct rcar_du_device; * @index: group index * @use_count: number of users of the group (rcar_du_group_(get|put)) * @used_crtcs: number of CRTCs currently in use + * @lock: protects the DPTSR register * @planes: planes handled by the group */ struct rcar_du_group { @@ -35,6 +38,8 @@ struct rcar_du_group { unsigned int use_count; unsigned int used_crtcs; + struct mutex lock; + struct rcar_du_planes planes; }; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c index ca94b029ac80..96f2eb43713c 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c @@ -12,6 +12,7 @@ */ #include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_encoder_slave.h> @@ -74,10 +75,13 @@ rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force) } static const struct drm_connector_funcs connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, + .reset = drm_atomic_helper_connector_reset, .detect = rcar_du_hdmi_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = rcar_du_hdmi_connector_destroy, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu, @@ -108,7 +112,7 @@ int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu, if (ret < 0) return ret; - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + connector->dpms = DRM_MODE_DPMS_OFF; drm_object_property_set_value(&connector->base, rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); @@ -116,7 +120,6 @@ int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu, if (ret < 0) return ret; - connector->encoder = encoder; rcon->encoder = renc; return 0; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c index 221f0a17fd6a..81da8419282b 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c @@ -26,41 +26,50 @@ struct rcar_du_hdmienc { struct rcar_du_encoder *renc; struct device *dev; - int dpms; + bool enabled; }; #define to_rcar_hdmienc(e) (to_rcar_encoder(e)->hdmi) #define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs) -static void rcar_du_hdmienc_dpms(struct drm_encoder *encoder, int mode) +static void rcar_du_hdmienc_disable(struct drm_encoder *encoder) { struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); - if (mode != DRM_MODE_DPMS_ON) - mode = DRM_MODE_DPMS_OFF; + if (sfuncs->dpms) + sfuncs->dpms(encoder, DRM_MODE_DPMS_OFF); - if (hdmienc->dpms == mode) - return; + if (hdmienc->renc->lvds) + rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc, + false); - if (mode == DRM_MODE_DPMS_ON && hdmienc->renc->lvds) - rcar_du_lvdsenc_dpms(hdmienc->renc->lvds, encoder->crtc, mode); + hdmienc->enabled = false; +} - if (sfuncs->dpms) - sfuncs->dpms(encoder, mode); +static void rcar_du_hdmienc_enable(struct drm_encoder *encoder) +{ + struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); + struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); + + if (hdmienc->renc->lvds) + rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc, + true); - if (mode != DRM_MODE_DPMS_ON && hdmienc->renc->lvds) - rcar_du_lvdsenc_dpms(hdmienc->renc->lvds, encoder->crtc, mode); + if (sfuncs->dpms) + sfuncs->dpms(encoder, DRM_MODE_DPMS_ON); - hdmienc->dpms = mode; + hdmienc->enabled = true; } -static bool rcar_du_hdmienc_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static int rcar_du_hdmienc_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder); + struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; + const struct drm_display_mode *mode = &crtc_state->mode; /* The internal LVDS encoder has a clock frequency operating range of * 30MHz to 150MHz. Clamp the clock accordingly. @@ -70,19 +79,9 @@ static bool rcar_du_hdmienc_mode_fixup(struct drm_encoder *encoder, 30000, 150000); if (sfuncs->mode_fixup == NULL) - return true; - - return sfuncs->mode_fixup(encoder, mode, adjusted_mode); -} + return 0; -static void rcar_du_hdmienc_mode_prepare(struct drm_encoder *encoder) -{ - rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_OFF); -} - -static void rcar_du_hdmienc_mode_commit(struct drm_encoder *encoder) -{ - rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_ON); + return sfuncs->mode_fixup(encoder, mode, adjusted_mode) ? 0 : -EINVAL; } static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder, @@ -99,18 +98,18 @@ static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder, } static const struct drm_encoder_helper_funcs encoder_helper_funcs = { - .dpms = rcar_du_hdmienc_dpms, - .mode_fixup = rcar_du_hdmienc_mode_fixup, - .prepare = rcar_du_hdmienc_mode_prepare, - .commit = rcar_du_hdmienc_mode_commit, .mode_set = rcar_du_hdmienc_mode_set, + .disable = rcar_du_hdmienc_disable, + .enable = rcar_du_hdmienc_enable, + .atomic_check = rcar_du_hdmienc_atomic_check, }; static void rcar_du_hdmienc_cleanup(struct drm_encoder *encoder) { struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); - rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_OFF); + if (hdmienc->enabled) + rcar_du_hdmienc_disable(encoder); drm_encoder_cleanup(encoder); put_device(hdmienc->dev); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index cc9136e8ee9c..fb052bca574f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -12,12 +12,15 @@ */ #include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> #include <linux/of_graph.h> +#include <linux/wait.h> #include "rcar_du_crtc.h" #include "rcar_du_drv.h" @@ -185,9 +188,309 @@ static void rcar_du_output_poll_changed(struct drm_device *dev) drm_fbdev_cma_hotplug_event(rcdu->fbdev); } +/* ----------------------------------------------------------------------------- + * Atomic Check and Update + */ + +/* + * Atomic hardware plane allocator + * + * The hardware plane allocator is solely based on the atomic plane states + * without keeping any external state to avoid races between .atomic_check() + * and .atomic_commit(). + * + * The core idea is to avoid using a free planes bitmask that would need to be + * shared between check and commit handlers with a collective knowledge based on + * the allocated hardware plane(s) for each KMS plane. The allocator then loops + * over all plane states to compute the free planes bitmask, allocates hardware + * planes based on that bitmask, and stores the result back in the plane states. + * + * For this to work we need to access the current state of planes not touched by + * the atomic update. To ensure that it won't be modified, we need to lock all + * planes using drm_atomic_get_plane_state(). This effectively serializes atomic + * updates from .atomic_check() up to completion (when swapping the states if + * the check step has succeeded) or rollback (when freeing the states if the + * check step has failed). + * + * Allocation is performed in the .atomic_check() handler and applied + * automatically when the core swaps the old and new states. + */ + +static bool rcar_du_plane_needs_realloc(struct rcar_du_plane *plane, + struct rcar_du_plane_state *state) +{ + const struct rcar_du_format_info *cur_format; + + cur_format = to_rcar_du_plane_state(plane->plane.state)->format; + + /* Lowering the number of planes doesn't strictly require reallocation + * as the extra hardware plane will be freed when committing, but doing + * so could lead to more fragmentation. + */ + return !cur_format || cur_format->planes != state->format->planes; +} + +static unsigned int rcar_du_plane_hwmask(struct rcar_du_plane_state *state) +{ + unsigned int mask; + + if (state->hwindex == -1) + return 0; + + mask = 1 << state->hwindex; + if (state->format->planes == 2) + mask |= 1 << ((state->hwindex + 1) % 8); + + return mask; +} + +static int rcar_du_plane_hwalloc(unsigned int num_planes, unsigned int free) +{ + unsigned int i; + + for (i = 0; i < RCAR_DU_NUM_HW_PLANES; ++i) { + if (!(free & (1 << i))) + continue; + + if (num_planes == 1 || free & (1 << ((i + 1) % 8))) + break; + } + + return i == RCAR_DU_NUM_HW_PLANES ? -EBUSY : i; +} + +static int rcar_du_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct rcar_du_device *rcdu = dev->dev_private; + unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, }; + unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, }; + bool needs_realloc = false; + unsigned int groups = 0; + unsigned int i; + int ret; + + ret = drm_atomic_helper_check(dev, state); + if (ret < 0) + return ret; + + /* Check if hardware planes need to be reallocated. */ + for (i = 0; i < dev->mode_config.num_total_plane; ++i) { + struct rcar_du_plane_state *plane_state; + struct rcar_du_plane *plane; + unsigned int index; + + if (!state->planes[i]) + continue; + + plane = to_rcar_plane(state->planes[i]); + plane_state = to_rcar_du_plane_state(state->plane_states[i]); + + /* If the plane is being disabled we don't need to go through + * the full reallocation procedure. Just mark the hardware + * plane(s) as freed. + */ + if (!plane_state->format) { + index = plane - plane->group->planes.planes; + group_freed_planes[plane->group->index] |= 1 << index; + plane_state->hwindex = -1; + continue; + } + + /* If the plane needs to be reallocated mark it as such, and + * mark the hardware plane(s) as free. + */ + if (rcar_du_plane_needs_realloc(plane, plane_state)) { + groups |= 1 << plane->group->index; + needs_realloc = true; + + index = plane - plane->group->planes.planes; + group_freed_planes[plane->group->index] |= 1 << index; + plane_state->hwindex = -1; + } + } + + if (!needs_realloc) + return 0; + + /* Grab all plane states for the groups that need reallocation to ensure + * locking and avoid racy updates. This serializes the update operation, + * but there's not much we can do about it as that's the hardware + * design. + * + * Compute the used planes mask for each group at the same time to avoid + * looping over the planes separately later. + */ + while (groups) { + unsigned int index = ffs(groups) - 1; + struct rcar_du_group *group = &rcdu->groups[index]; + unsigned int used_planes = 0; + + for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) { + struct rcar_du_plane *plane = &group->planes.planes[i]; + struct rcar_du_plane_state *plane_state; + struct drm_plane_state *s; + + s = drm_atomic_get_plane_state(state, &plane->plane); + if (IS_ERR(s)) + return PTR_ERR(s); + + /* If the plane has been freed in the above loop its + * hardware planes must not be added to the used planes + * bitmask. However, the current state doesn't reflect + * the free state yet, as we've modified the new state + * above. Use the local freed planes list to check for + * that condition instead. + */ + if (group_freed_planes[index] & (1 << i)) + continue; + + plane_state = to_rcar_du_plane_state(plane->plane.state); + used_planes |= rcar_du_plane_hwmask(plane_state); + } + + group_free_planes[index] = 0xff & ~used_planes; + groups &= ~(1 << index); + } + + /* Reallocate hardware planes for each plane that needs it. */ + for (i = 0; i < dev->mode_config.num_total_plane; ++i) { + struct rcar_du_plane_state *plane_state; + struct rcar_du_plane *plane; + int idx; + + if (!state->planes[i]) + continue; + + plane = to_rcar_plane(state->planes[i]); + plane_state = to_rcar_du_plane_state(state->plane_states[i]); + + /* Skip planes that are being disabled or don't need to be + * reallocated. + */ + if (!plane_state->format || + !rcar_du_plane_needs_realloc(plane, plane_state)) + continue; + + idx = rcar_du_plane_hwalloc(plane_state->format->planes, + group_free_planes[plane->group->index]); + if (idx < 0) { + dev_dbg(rcdu->dev, "%s: no available hardware plane\n", + __func__); + return idx; + } + + plane_state->hwindex = idx; + + group_free_planes[plane->group->index] &= + ~rcar_du_plane_hwmask(plane_state); + } + + return 0; +} + +struct rcar_du_commit { + struct work_struct work; + struct drm_device *dev; + struct drm_atomic_state *state; + u32 crtcs; +}; + +static void rcar_du_atomic_complete(struct rcar_du_commit *commit) +{ + struct drm_device *dev = commit->dev; + struct rcar_du_device *rcdu = dev->dev_private; + struct drm_atomic_state *old_state = commit->state; + + /* Apply the atomic update. */ + drm_atomic_helper_commit_modeset_disables(dev, old_state); + drm_atomic_helper_commit_modeset_enables(dev, old_state); + drm_atomic_helper_commit_planes(dev, old_state); + + drm_atomic_helper_wait_for_vblanks(dev, old_state); + + drm_atomic_helper_cleanup_planes(dev, old_state); + + drm_atomic_state_free(old_state); + + /* Complete the commit, wake up any waiter. */ + spin_lock(&rcdu->commit.wait.lock); + rcdu->commit.pending &= ~commit->crtcs; + wake_up_all_locked(&rcdu->commit.wait); + spin_unlock(&rcdu->commit.wait.lock); + + kfree(commit); +} + +static void rcar_du_atomic_work(struct work_struct *work) +{ + struct rcar_du_commit *commit = + container_of(work, struct rcar_du_commit, work); + + rcar_du_atomic_complete(commit); +} + +static int rcar_du_atomic_commit(struct drm_device *dev, + struct drm_atomic_state *state, bool async) +{ + struct rcar_du_device *rcdu = dev->dev_private; + struct rcar_du_commit *commit; + unsigned int i; + int ret; + + ret = drm_atomic_helper_prepare_planes(dev, state); + if (ret) + return ret; + + /* Allocate the commit object. */ + commit = kzalloc(sizeof(*commit), GFP_KERNEL); + if (commit == NULL) + return -ENOMEM; + + INIT_WORK(&commit->work, rcar_du_atomic_work); + commit->dev = dev; + commit->state = state; + + /* Wait until all affected CRTCs have completed previous commits and + * mark them as pending. + */ + for (i = 0; i < dev->mode_config.num_crtc; ++i) { + if (state->crtcs[i]) + commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]); + } + + spin_lock(&rcdu->commit.wait.lock); + ret = wait_event_interruptible_locked(rcdu->commit.wait, + !(rcdu->commit.pending & commit->crtcs)); + if (ret == 0) + rcdu->commit.pending |= commit->crtcs; + spin_unlock(&rcdu->commit.wait.lock); + + if (ret) { + kfree(commit); + return ret; + } + + /* Swap the state, this is the point of no return. */ + drm_atomic_helper_swap_state(dev, state); + + if (async) + schedule_work(&commit->work); + else + rcar_du_atomic_complete(commit); + + return 0; +} + +/* ----------------------------------------------------------------------------- + * Initialization + */ + static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = { .fb_create = rcar_du_fb_create, .output_poll_changed = rcar_du_output_poll_changed, + .atomic_check = rcar_du_atomic_check, + .atomic_commit = rcar_du_atomic_commit, }; static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu, @@ -392,6 +695,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) for (i = 0; i < num_groups; ++i) { struct rcar_du_group *rgrp = &rcdu->groups[i]; + mutex_init(&rgrp->lock); + rgrp->dev = rcdu; rgrp->mmio_offset = mmio_offsets[i]; rgrp->index = i; @@ -439,27 +744,21 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) encoder->possible_clones = (1 << num_encoders) - 1; } - /* Now that the CRTCs have been initialized register the planes. */ - for (i = 0; i < num_groups; ++i) { - ret = rcar_du_planes_register(&rcdu->groups[i]); - if (ret < 0) - return ret; - } + drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - drm_helper_disable_unused_functions(dev); - - fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc, - dev->mode_config.num_connector); - if (IS_ERR(fbdev)) - return PTR_ERR(fbdev); + if (dev->mode_config.num_connector) { + fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc, + dev->mode_config.num_connector); + if (IS_ERR(fbdev)) + return PTR_ERR(fbdev); -#ifndef CONFIG_FRAMEBUFFER_CONSOLE - drm_fbdev_cma_restore_mode(fbdev); -#endif - - rcdu->fbdev = fbdev; + rcdu->fbdev = fbdev; + } else { + dev_info(rcdu->dev, + "no connector found, disabling fbdev emulation\n"); + } return 0; } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c index 6d9811c052c4..0c43032fc693 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c @@ -12,6 +12,7 @@ */ #include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> @@ -74,10 +75,13 @@ rcar_du_lvds_connector_detect(struct drm_connector *connector, bool force) } static const struct drm_connector_funcs connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, + .reset = drm_atomic_helper_connector_reset, .detect = rcar_du_lvds_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = rcar_du_lvds_connector_destroy, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, @@ -117,7 +121,7 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, if (ret < 0) return ret; - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + connector->dpms = DRM_MODE_DPMS_OFF; drm_object_property_set_value(&connector->base, rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); @@ -125,7 +129,6 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, if (ret < 0) return ret; - connector->encoder = encoder; lvdscon->connector.encoder = renc; return 0; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c index 7cfb48ce1791..85043c5bad03 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c @@ -28,7 +28,7 @@ struct rcar_du_lvdsenc { unsigned int index; void __iomem *mmio; struct clk *clock; - int dpms; + bool enabled; enum rcar_lvds_input input; }; @@ -48,7 +48,7 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds, u32 pllcr; int ret; - if (lvds->dpms == DRM_MODE_DPMS_ON) + if (lvds->enabled) return 0; ret = clk_prepare_enable(lvds->clock); @@ -110,13 +110,13 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds, lvdcr0 |= LVDCR0_LVRES; rcar_lvds_write(lvds, LVDCR0, lvdcr0); - lvds->dpms = DRM_MODE_DPMS_ON; + lvds->enabled = true; return 0; } static void rcar_du_lvdsenc_stop(struct rcar_du_lvdsenc *lvds) { - if (lvds->dpms == DRM_MODE_DPMS_OFF) + if (!lvds->enabled) return; rcar_lvds_write(lvds, LVDCR0, 0); @@ -124,13 +124,13 @@ static void rcar_du_lvdsenc_stop(struct rcar_du_lvdsenc *lvds) clk_disable_unprepare(lvds->clock); - lvds->dpms = DRM_MODE_DPMS_OFF; + lvds->enabled = false; } -int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds, - struct drm_crtc *crtc, int mode) +int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds, struct drm_crtc *crtc, + bool enable) { - if (mode == DRM_MODE_DPMS_OFF) { + if (!enable) { rcar_du_lvdsenc_stop(lvds); return 0; } else if (crtc) { @@ -179,7 +179,7 @@ int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu) lvds->dev = rcdu; lvds->index = i; lvds->input = i ? RCAR_LVDS_INPUT_DU1 : RCAR_LVDS_INPUT_DU0; - lvds->dpms = DRM_MODE_DPMS_OFF; + lvds->enabled = false; ret = rcar_du_lvdsenc_get_resources(lvds, pdev); if (ret < 0) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h index f65aabda0796..9a6001c07303 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h @@ -28,15 +28,15 @@ enum rcar_lvds_input { #if IS_ENABLED(CONFIG_DRM_RCAR_LVDS) int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu); -int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds, - struct drm_crtc *crtc, int mode); +int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds, + struct drm_crtc *crtc, bool enable); #else static inline int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu) { return 0; } -static inline int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds, - struct drm_crtc *crtc, int mode) +static inline int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds, + struct drm_crtc *crtc, bool enable) { return 0; } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index 50f2f2b20d39..35a2f04ab799 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -12,10 +12,12 @@ */ #include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> +#include <drm/drm_plane_helper.h> #include "rcar_du_drv.h" #include "rcar_du_kms.h" @@ -26,16 +28,6 @@ #define RCAR_DU_COLORKEY_SOURCE (1 << 24) #define RCAR_DU_COLORKEY_MASK (1 << 24) -struct rcar_du_kms_plane { - struct drm_plane plane; - struct rcar_du_plane *hwplane; -}; - -static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane) -{ - return container_of(plane, struct rcar_du_kms_plane, plane)->hwplane; -} - static u32 rcar_du_plane_read(struct rcar_du_group *rgrp, unsigned int index, u32 reg) { @@ -50,74 +42,31 @@ static void rcar_du_plane_write(struct rcar_du_group *rgrp, data); } -int rcar_du_plane_reserve(struct rcar_du_plane *plane, - const struct rcar_du_format_info *format) -{ - struct rcar_du_group *rgrp = plane->group; - unsigned int i; - int ret = -EBUSY; - - mutex_lock(&rgrp->planes.lock); - - for (i = 0; i < ARRAY_SIZE(rgrp->planes.planes); ++i) { - if (!(rgrp->planes.free & (1 << i))) - continue; - - if (format->planes == 1 || - rgrp->planes.free & (1 << ((i + 1) % 8))) - break; - } - - if (i == ARRAY_SIZE(rgrp->planes.planes)) - goto done; - - rgrp->planes.free &= ~(1 << i); - if (format->planes == 2) - rgrp->planes.free &= ~(1 << ((i + 1) % 8)); - - plane->hwindex = i; - - ret = 0; - -done: - mutex_unlock(&rgrp->planes.lock); - return ret; -} - -void rcar_du_plane_release(struct rcar_du_plane *plane) -{ - struct rcar_du_group *rgrp = plane->group; - - if (plane->hwindex == -1) - return; - - mutex_lock(&rgrp->planes.lock); - rgrp->planes.free |= 1 << plane->hwindex; - if (plane->format->planes == 2) - rgrp->planes.free |= 1 << ((plane->hwindex + 1) % 8); - mutex_unlock(&rgrp->planes.lock); - - plane->hwindex = -1; -} - -void rcar_du_plane_update_base(struct rcar_du_plane *plane) +static void rcar_du_plane_setup_fb(struct rcar_du_plane *plane) { + struct rcar_du_plane_state *state = + to_rcar_du_plane_state(plane->plane.state); + struct drm_framebuffer *fb = plane->plane.state->fb; struct rcar_du_group *rgrp = plane->group; - unsigned int index = plane->hwindex; + unsigned int src_x = state->state.src_x >> 16; + unsigned int src_y = state->state.src_y >> 16; + unsigned int index = state->hwindex; + struct drm_gem_cma_object *gem; bool interlaced; u32 mwr; - interlaced = plane->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE; + interlaced = state->state.crtc->state->adjusted_mode.flags + & DRM_MODE_FLAG_INTERLACE; /* Memory pitch (expressed in pixels). Must be doubled for interlaced * operation with 32bpp formats. */ - if (plane->format->planes == 2) - mwr = plane->pitch; + if (state->format->planes == 2) + mwr = fb->pitches[0]; else - mwr = plane->pitch * 8 / plane->format->bpp; + mwr = fb->pitches[0] * 8 / state->format->bpp; - if (interlaced && plane->format->bpp == 32) + if (interlaced && state->format->bpp == 32) mwr *= 2; rcar_du_plane_write(rgrp, index, PnMWR, mwr); @@ -134,42 +83,33 @@ void rcar_du_plane_update_base(struct rcar_du_plane *plane) * require a halved Y position value, in both progressive and interlaced * modes. */ - rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x); - rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y * - (!interlaced && plane->format->bpp == 32 ? 2 : 1)); - rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[0]); + rcar_du_plane_write(rgrp, index, PnSPXR, src_x); + rcar_du_plane_write(rgrp, index, PnSPYR, src_y * + (!interlaced && state->format->bpp == 32 ? 2 : 1)); - if (plane->format->planes == 2) { - index = (index + 1) % 8; - - rcar_du_plane_write(rgrp, index, PnMWR, plane->pitch); + gem = drm_fb_cma_get_gem_obj(fb, 0); + rcar_du_plane_write(rgrp, index, PnDSA0R, gem->paddr + fb->offsets[0]); - rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x); - rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y * - (plane->format->bpp == 16 ? 2 : 1) / 2); - rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[1]); - } -} + if (state->format->planes == 2) { + index = (index + 1) % 8; -void rcar_du_plane_compute_base(struct rcar_du_plane *plane, - struct drm_framebuffer *fb) -{ - struct drm_gem_cma_object *gem; + rcar_du_plane_write(rgrp, index, PnMWR, fb->pitches[0]); - plane->pitch = fb->pitches[0]; + rcar_du_plane_write(rgrp, index, PnSPXR, src_x); + rcar_du_plane_write(rgrp, index, PnSPYR, src_y * + (state->format->bpp == 16 ? 2 : 1) / 2); - gem = drm_fb_cma_get_gem_obj(fb, 0); - plane->dma[0] = gem->paddr + fb->offsets[0]; - - if (plane->format->planes == 2) { gem = drm_fb_cma_get_gem_obj(fb, 1); - plane->dma[1] = gem->paddr + fb->offsets[1]; + rcar_du_plane_write(rgrp, index, PnDSA0R, + gem->paddr + fb->offsets[1]); } } static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane, unsigned int index) { + struct rcar_du_plane_state *state = + to_rcar_du_plane_state(plane->plane.state); struct rcar_du_group *rgrp = plane->group; u32 colorkey; u32 pnmr; @@ -183,47 +123,47 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane, * For XRGB, set the alpha value to the plane-wide alpha value and * enable alpha-blending regardless of the X bit value. */ - if (plane->format->fourcc != DRM_FORMAT_XRGB1555) + if (state->format->fourcc != DRM_FORMAT_XRGB1555) rcar_du_plane_write(rgrp, index, PnALPHAR, PnALPHAR_ABIT_0); else rcar_du_plane_write(rgrp, index, PnALPHAR, - PnALPHAR_ABIT_X | plane->alpha); + PnALPHAR_ABIT_X | state->alpha); - pnmr = PnMR_BM_MD | plane->format->pnmr; + pnmr = PnMR_BM_MD | state->format->pnmr; /* Disable color keying when requested. YUV formats have the * PnMR_SPIM_TP_OFF bit set in their pnmr field, disabling color keying * automatically. */ - if ((plane->colorkey & RCAR_DU_COLORKEY_MASK) == RCAR_DU_COLORKEY_NONE) + if ((state->colorkey & RCAR_DU_COLORKEY_MASK) == RCAR_DU_COLORKEY_NONE) pnmr |= PnMR_SPIM_TP_OFF; /* For packed YUV formats we need to select the U/V order. */ - if (plane->format->fourcc == DRM_FORMAT_YUYV) + if (state->format->fourcc == DRM_FORMAT_YUYV) pnmr |= PnMR_YCDF_YUYV; rcar_du_plane_write(rgrp, index, PnMR, pnmr); - switch (plane->format->fourcc) { + switch (state->format->fourcc) { case DRM_FORMAT_RGB565: - colorkey = ((plane->colorkey & 0xf80000) >> 8) - | ((plane->colorkey & 0x00fc00) >> 5) - | ((plane->colorkey & 0x0000f8) >> 3); + colorkey = ((state->colorkey & 0xf80000) >> 8) + | ((state->colorkey & 0x00fc00) >> 5) + | ((state->colorkey & 0x0000f8) >> 3); rcar_du_plane_write(rgrp, index, PnTC2R, colorkey); break; case DRM_FORMAT_ARGB1555: case DRM_FORMAT_XRGB1555: - colorkey = ((plane->colorkey & 0xf80000) >> 9) - | ((plane->colorkey & 0x00f800) >> 6) - | ((plane->colorkey & 0x0000f8) >> 3); + colorkey = ((state->colorkey & 0xf80000) >> 9) + | ((state->colorkey & 0x00f800) >> 6) + | ((state->colorkey & 0x0000f8) >> 3); rcar_du_plane_write(rgrp, index, PnTC2R, colorkey); break; case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ARGB8888: rcar_du_plane_write(rgrp, index, PnTC3R, - PnTC3R_CODE | (plane->colorkey & 0xffffff)); + PnTC3R_CODE | (state->colorkey & 0xffffff)); break; } } @@ -231,6 +171,8 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane, static void __rcar_du_plane_setup(struct rcar_du_plane *plane, unsigned int index) { + struct rcar_du_plane_state *state = + to_rcar_du_plane_state(plane->plane.state); struct rcar_du_group *rgrp = plane->group; u32 ddcr2 = PnDDCR2_CODE; u32 ddcr4; @@ -242,17 +184,17 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane, */ ddcr4 = rcar_du_plane_read(rgrp, index, PnDDCR4); ddcr4 &= ~PnDDCR4_EDF_MASK; - ddcr4 |= plane->format->edf | PnDDCR4_CODE; + ddcr4 |= state->format->edf | PnDDCR4_CODE; rcar_du_plane_setup_mode(plane, index); - if (plane->format->planes == 2) { - if (plane->hwindex != index) { - if (plane->format->fourcc == DRM_FORMAT_NV12 || - plane->format->fourcc == DRM_FORMAT_NV21) + if (state->format->planes == 2) { + if (state->hwindex != index) { + if (state->format->fourcc == DRM_FORMAT_NV12 || + state->format->fourcc == DRM_FORMAT_NV21) ddcr2 |= PnDDCR2_Y420; - if (plane->format->fourcc == DRM_FORMAT_NV21) + if (state->format->fourcc == DRM_FORMAT_NV21) ddcr2 |= PnDDCR2_NV21; ddcr2 |= PnDDCR2_DIVU; @@ -265,10 +207,10 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane, rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4); /* Destination position and size */ - rcar_du_plane_write(rgrp, index, PnDSXR, plane->width); - rcar_du_plane_write(rgrp, index, PnDSYR, plane->height); - rcar_du_plane_write(rgrp, index, PnDPXR, plane->dst_x); - rcar_du_plane_write(rgrp, index, PnDPYR, plane->dst_y); + rcar_du_plane_write(rgrp, index, PnDSXR, plane->plane.state->crtc_w); + rcar_du_plane_write(rgrp, index, PnDSYR, plane->plane.state->crtc_h); + rcar_du_plane_write(rgrp, index, PnDPXR, plane->plane.state->crtc_x); + rcar_du_plane_write(rgrp, index, PnDPYR, plane->plane.state->crtc_y); /* Wrap-around and blinking, disabled */ rcar_du_plane_write(rgrp, index, PnWASPR, 0); @@ -279,150 +221,140 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane, void rcar_du_plane_setup(struct rcar_du_plane *plane) { - __rcar_du_plane_setup(plane, plane->hwindex); - if (plane->format->planes == 2) - __rcar_du_plane_setup(plane, (plane->hwindex + 1) % 8); + struct rcar_du_plane_state *state = + to_rcar_du_plane_state(plane->plane.state); + + __rcar_du_plane_setup(plane, state->hwindex); + if (state->format->planes == 2) + __rcar_du_plane_setup(plane, (state->hwindex + 1) % 8); - rcar_du_plane_update_base(plane); + rcar_du_plane_setup_fb(plane); } -static int -rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, - struct drm_framebuffer *fb, int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h) +static int rcar_du_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) { + struct rcar_du_plane_state *rstate = to_rcar_du_plane_state(state); struct rcar_du_plane *rplane = to_rcar_plane(plane); struct rcar_du_device *rcdu = rplane->group->dev; - const struct rcar_du_format_info *format; - unsigned int nplanes; - int ret; - format = rcar_du_format_info(fb->pixel_format); - if (format == NULL) { - dev_dbg(rcdu->dev, "%s: unsupported format %08x\n", __func__, - fb->pixel_format); - return -EINVAL; + if (!state->fb || !state->crtc) { + rstate->format = NULL; + return 0; } - if (src_w >> 16 != crtc_w || src_h >> 16 != crtc_h) { + if (state->src_w >> 16 != state->crtc_w || + state->src_h >> 16 != state->crtc_h) { dev_dbg(rcdu->dev, "%s: scaling not supported\n", __func__); return -EINVAL; } - nplanes = rplane->format ? rplane->format->planes : 0; - - /* Reallocate hardware planes if the number of required planes has - * changed. - */ - if (format->planes != nplanes) { - rcar_du_plane_release(rplane); - ret = rcar_du_plane_reserve(rplane, format); - if (ret < 0) - return ret; + rstate->format = rcar_du_format_info(state->fb->pixel_format); + if (rstate->format == NULL) { + dev_dbg(rcdu->dev, "%s: unsupported format %08x\n", __func__, + state->fb->pixel_format); + return -EINVAL; } - rplane->crtc = crtc; - rplane->format = format; - - rplane->src_x = src_x >> 16; - rplane->src_y = src_y >> 16; - rplane->dst_x = crtc_x; - rplane->dst_y = crtc_y; - rplane->width = crtc_w; - rplane->height = crtc_h; - - rcar_du_plane_compute_base(rplane, fb); - rcar_du_plane_setup(rplane); - - mutex_lock(&rplane->group->planes.lock); - rplane->enabled = true; - rcar_du_crtc_update_planes(rplane->crtc); - mutex_unlock(&rplane->group->planes.lock); - return 0; } -static int rcar_du_plane_disable(struct drm_plane *plane) +static void rcar_du_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) { struct rcar_du_plane *rplane = to_rcar_plane(plane); - if (!rplane->enabled) - return 0; + if (plane->state->crtc) + rcar_du_plane_setup(rplane); +} - mutex_lock(&rplane->group->planes.lock); - rplane->enabled = false; - rcar_du_crtc_update_planes(rplane->crtc); - mutex_unlock(&rplane->group->planes.lock); +static const struct drm_plane_helper_funcs rcar_du_plane_helper_funcs = { + .atomic_check = rcar_du_plane_atomic_check, + .atomic_update = rcar_du_plane_atomic_update, +}; - rcar_du_plane_release(rplane); +static void rcar_du_plane_reset(struct drm_plane *plane) +{ + struct rcar_du_plane_state *state; - rplane->crtc = NULL; - rplane->format = NULL; + if (plane->state && plane->state->fb) + drm_framebuffer_unreference(plane->state->fb); - return 0; -} + kfree(plane->state); + plane->state = NULL; -/* Both the .set_property and the .update_plane operations are called with the - * mode_config lock held. There is this no need to explicitly protect access to - * the alpha and colorkey fields and the mode register. - */ -static void rcar_du_plane_set_alpha(struct rcar_du_plane *plane, u32 alpha) -{ - if (plane->alpha == alpha) + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state == NULL) return; - plane->alpha = alpha; - if (!plane->enabled || plane->format->fourcc != DRM_FORMAT_XRGB1555) - return; + state->hwindex = -1; + state->alpha = 255; + state->colorkey = RCAR_DU_COLORKEY_NONE; + state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1; - rcar_du_plane_setup_mode(plane, plane->hwindex); + plane->state = &state->state; + plane->state->plane = plane; } -static void rcar_du_plane_set_colorkey(struct rcar_du_plane *plane, - u32 colorkey) +static struct drm_plane_state * +rcar_du_plane_atomic_duplicate_state(struct drm_plane *plane) { - if (plane->colorkey == colorkey) - return; + struct rcar_du_plane_state *state; + struct rcar_du_plane_state *copy; - plane->colorkey = colorkey; - if (!plane->enabled) - return; + state = to_rcar_du_plane_state(plane->state); + copy = kmemdup(state, sizeof(*state), GFP_KERNEL); + if (copy == NULL) + return NULL; + + if (copy->state.fb) + drm_framebuffer_reference(copy->state.fb); - rcar_du_plane_setup_mode(plane, plane->hwindex); + return ©->state; } -static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane, - unsigned int zpos) +static void rcar_du_plane_atomic_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) { - mutex_lock(&plane->group->planes.lock); - if (plane->zpos == zpos) - goto done; + kfree(to_rcar_du_plane_state(state)); +} - plane->zpos = zpos; - if (!plane->enabled) - goto done; +static int rcar_du_plane_atomic_set_property(struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val) +{ + struct rcar_du_plane_state *rstate = to_rcar_du_plane_state(state); + struct rcar_du_plane *rplane = to_rcar_plane(plane); + struct rcar_du_group *rgrp = rplane->group; - rcar_du_crtc_update_planes(plane->crtc); + if (property == rgrp->planes.alpha) + rstate->alpha = val; + else if (property == rgrp->planes.colorkey) + rstate->colorkey = val; + else if (property == rgrp->planes.zpos) + rstate->zpos = val; + else + return -EINVAL; -done: - mutex_unlock(&plane->group->planes.lock); + return 0; } -static int rcar_du_plane_set_property(struct drm_plane *plane, - struct drm_property *property, - uint64_t value) +static int rcar_du_plane_atomic_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, struct drm_property *property, + uint64_t *val) { + const struct rcar_du_plane_state *rstate = + container_of(state, const struct rcar_du_plane_state, state); struct rcar_du_plane *rplane = to_rcar_plane(plane); struct rcar_du_group *rgrp = rplane->group; if (property == rgrp->planes.alpha) - rcar_du_plane_set_alpha(rplane, value); + *val = rstate->alpha; else if (property == rgrp->planes.colorkey) - rcar_du_plane_set_colorkey(rplane, value); + *val = rstate->colorkey; else if (property == rgrp->planes.zpos) - rcar_du_plane_set_zpos(rplane, value); + *val = rstate->zpos; else return -EINVAL; @@ -430,10 +362,15 @@ static int rcar_du_plane_set_property(struct drm_plane *plane, } static const struct drm_plane_funcs rcar_du_plane_funcs = { - .update_plane = rcar_du_plane_update, - .disable_plane = rcar_du_plane_disable, - .set_property = rcar_du_plane_set_property, + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .reset = rcar_du_plane_reset, + .set_property = drm_atomic_helper_plane_set_property, .destroy = drm_plane_cleanup, + .atomic_duplicate_state = rcar_du_plane_atomic_duplicate_state, + .atomic_destroy_state = rcar_du_plane_atomic_destroy_state, + .atomic_set_property = rcar_du_plane_atomic_set_property, + .atomic_get_property = rcar_du_plane_atomic_get_property, }; static const uint32_t formats[] = { @@ -453,10 +390,11 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp) { struct rcar_du_planes *planes = &rgrp->planes; struct rcar_du_device *rcdu = rgrp->dev; + unsigned int num_planes; + unsigned int num_crtcs; + unsigned int crtcs; unsigned int i; - - mutex_init(&planes->lock); - planes->free = 0xff; + int ret; planes->alpha = drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255); @@ -478,45 +416,34 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp) if (planes->zpos == NULL) return -ENOMEM; - for (i = 0; i < ARRAY_SIZE(planes->planes); ++i) { - struct rcar_du_plane *plane = &planes->planes[i]; - - plane->group = rgrp; - plane->hwindex = -1; - plane->alpha = 255; - plane->colorkey = RCAR_DU_COLORKEY_NONE; - plane->zpos = 0; - } - - return 0; -} - -int rcar_du_planes_register(struct rcar_du_group *rgrp) -{ - struct rcar_du_planes *planes = &rgrp->planes; - struct rcar_du_device *rcdu = rgrp->dev; - unsigned int crtcs; - unsigned int i; - int ret; + /* Create one primary plane per in this group CRTC and seven overlay + * planes. + */ + num_crtcs = min(rcdu->num_crtcs - 2 * rgrp->index, 2U); + num_planes = num_crtcs + 7; crtcs = ((1 << rcdu->num_crtcs) - 1) & (3 << (2 * rgrp->index)); - for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) { - struct rcar_du_kms_plane *plane; - - plane = devm_kzalloc(rcdu->dev, sizeof(*plane), GFP_KERNEL); - if (plane == NULL) - return -ENOMEM; + for (i = 0; i < num_planes; ++i) { + enum drm_plane_type type = i < num_crtcs + ? DRM_PLANE_TYPE_PRIMARY + : DRM_PLANE_TYPE_OVERLAY; + struct rcar_du_plane *plane = &planes->planes[i]; - plane->hwplane = &planes->planes[i + 2]; - plane->hwplane->zpos = 1; + plane->group = rgrp; - ret = drm_plane_init(rcdu->ddev, &plane->plane, crtcs, - &rcar_du_plane_funcs, formats, - ARRAY_SIZE(formats), false); + ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs, + &rcar_du_plane_funcs, formats, + ARRAY_SIZE(formats), type); if (ret < 0) return ret; + drm_plane_helper_add(&plane->plane, + &rcar_du_plane_helper_funcs); + + if (type == DRM_PLANE_TYPE_PRIMARY) + continue; + drm_object_attach_property(&plane->plane.base, planes->alpha, 255); drm_object_attach_property(&plane->plane.base, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h index 3021288b1a89..abff0ebeb195 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h @@ -14,68 +14,57 @@ #ifndef __RCAR_DU_PLANE_H__ #define __RCAR_DU_PLANE_H__ -#include <linux/mutex.h> - #include <drm/drmP.h> #include <drm/drm_crtc.h> struct rcar_du_format_info; struct rcar_du_group; -/* The RCAR DU has 8 hardware planes, shared between KMS planes and CRTCs. As - * using KMS planes requires at least one of the CRTCs being enabled, no more - * than 7 KMS planes can be available. We thus create 7 KMS planes and - * 9 software planes (one for each KMS planes and one for each CRTC). +/* The RCAR DU has 8 hardware planes, shared between primary and overlay planes. + * As using overlay planes requires at least one of the CRTCs being enabled, no + * more than 7 overlay planes can be available. We thus create 1 primary plane + * per CRTC and 7 overlay planes, for a total of up to 9 KMS planes. */ - -#define RCAR_DU_NUM_KMS_PLANES 7 +#define RCAR_DU_NUM_KMS_PLANES 9 #define RCAR_DU_NUM_HW_PLANES 8 -#define RCAR_DU_NUM_SW_PLANES 9 struct rcar_du_plane { + struct drm_plane plane; struct rcar_du_group *group; - struct drm_crtc *crtc; - - bool enabled; - - int hwindex; /* 0-based, -1 means unused */ - unsigned int alpha; - unsigned int colorkey; - unsigned int zpos; - - const struct rcar_du_format_info *format; - - unsigned long dma[2]; - unsigned int pitch; - - unsigned int width; - unsigned int height; - - unsigned int src_x; - unsigned int src_y; - unsigned int dst_x; - unsigned int dst_y; }; +static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane) +{ + return container_of(plane, struct rcar_du_plane, plane); +} + struct rcar_du_planes { - struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES]; - unsigned int free; - struct mutex lock; + struct rcar_du_plane planes[RCAR_DU_NUM_KMS_PLANES]; struct drm_property *alpha; struct drm_property *colorkey; struct drm_property *zpos; }; +struct rcar_du_plane_state { + struct drm_plane_state state; + + const struct rcar_du_format_info *format; + int hwindex; /* 0-based, -1 means unused */ + + unsigned int alpha; + unsigned int colorkey; + unsigned int zpos; +}; + +static inline struct rcar_du_plane_state * +to_rcar_du_plane_state(struct drm_plane_state *state) +{ + return container_of(state, struct rcar_du_plane_state, state); +} + int rcar_du_planes_init(struct rcar_du_group *rgrp); -int rcar_du_planes_register(struct rcar_du_group *rgrp); void rcar_du_plane_setup(struct rcar_du_plane *plane); -void rcar_du_plane_update_base(struct rcar_du_plane *plane); -void rcar_du_plane_compute_base(struct rcar_du_plane *plane, - struct drm_framebuffer *fb); -int rcar_du_plane_reserve(struct rcar_du_plane *plane, - const struct rcar_du_format_info *format); -void rcar_du_plane_release(struct rcar_du_plane *plane); #endif /* __RCAR_DU_PLANE_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c index 9d4879921cc7..e0a5d8f93963 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c @@ -12,6 +12,7 @@ */ #include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> @@ -43,10 +44,13 @@ rcar_du_vga_connector_detect(struct drm_connector *connector, bool force) } static const struct drm_connector_funcs connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, + .reset = drm_atomic_helper_connector_reset, .detect = rcar_du_vga_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = rcar_du_vga_connector_destroy, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, @@ -76,7 +80,7 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, if (ret < 0) return ret; - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + connector->dpms = DRM_MODE_DPMS_OFF; drm_object_property_set_value(&connector->base, rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); @@ -84,7 +88,6 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, if (ret < 0) return ret; - connector->encoder = encoder; rcon->encoder = renc; return 0; diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index cb21e3821244..35215f6867d3 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -1,6 +1,7 @@ config DRM_ROCKCHIP tristate "DRM Support for Rockchip" depends on DRM && ROCKCHIP_IOMMU + depends on RESET_CONTROLLER select DRM_KMS_HELPER select DRM_KMS_FB_HELPER select DRM_PANEL diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig index a50fe0eeaa0d..b9202aa6f8ab 100644 --- a/drivers/gpu/drm/shmobile/Kconfig +++ b/drivers/gpu/drm/shmobile/Kconfig @@ -1,8 +1,10 @@ config DRM_SHMOBILE tristate "DRM Support for SH Mobile" - depends on DRM && ARM + depends on DRM && ARM && HAVE_DMA_ATTRS depends on ARCH_SHMOBILE || COMPILE_TEST + depends on FB_SH_MOBILE_MERAM || !FB_SH_MOBILE_MERAM select BACKLIGHT_CLASS_DEVICE + select BACKLIGHT_LCD_SUPPORT select DRM_KMS_HELPER select DRM_KMS_FB_HELPER select DRM_KMS_CMA_HELPER diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig index d6d6b705b8c1..fbccc105819b 100644 --- a/drivers/gpu/drm/sti/Kconfig +++ b/drivers/gpu/drm/sti/Kconfig @@ -1,10 +1,11 @@ config DRM_STI tristate "DRM Support for STMicroelectronics SoC stiH41x Series" - depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM) + depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS select RESET_CONTROLLER select DRM_KMS_HELPER select DRM_GEM_CMA_HELPER select DRM_KMS_CMA_HELPER + select DRM_PANEL select FW_LOADER_USER_HELPER_FALLBACK help Choose this option to enable DRM on STM stiH41x chipset diff --git a/drivers/gpu/drm/sti/sti_awg_utils.c b/drivers/gpu/drm/sti/sti_awg_utils.c index 9fde3ee8b1a5..6029a2e3db1d 100644 --- a/drivers/gpu/drm/sti/sti_awg_utils.c +++ b/drivers/gpu/drm/sti/sti_awg_utils.c @@ -60,8 +60,6 @@ static int awg_generate_instr(enum opcode opcode, * pixel. So we transform SKIP into SET * instruction */ opcode = SET; - arg = (arg << 24) >> 24; - arg &= (0x0ff); break; } diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index 651afad21f92..aeb5070c8363 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -91,6 +91,7 @@ struct sti_dvo { struct dvo_config *config; bool enabled; struct drm_encoder *encoder; + struct drm_bridge *bridge; }; struct sti_dvo_connector { @@ -272,19 +273,12 @@ static void sti_dvo_bridge_nope(struct drm_bridge *bridge) /* do nothing */ } -static void sti_dvo_brigde_destroy(struct drm_bridge *bridge) -{ - drm_bridge_cleanup(bridge); - kfree(bridge); -} - static const struct drm_bridge_funcs sti_dvo_bridge_funcs = { .pre_enable = sti_dvo_pre_enable, .enable = sti_dvo_bridge_nope, .disable = sti_dvo_disable, .post_disable = sti_dvo_bridge_nope, .mode_set = sti_dvo_set_mode, - .destroy = sti_dvo_brigde_destroy, }; static int sti_dvo_connector_get_modes(struct drm_connector *connector) @@ -416,8 +410,21 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data) return -ENOMEM; bridge->driver_private = dvo; - drm_bridge_init(drm_dev, bridge, &sti_dvo_bridge_funcs); + bridge->funcs = &sti_dvo_bridge_funcs; + bridge->of_node = dvo->dev.of_node; + err = drm_bridge_add(bridge); + if (err) { + DRM_ERROR("Failed to add bridge\n"); + return err; + } + err = drm_bridge_attach(drm_dev, bridge); + if (err) { + DRM_ERROR("Failed to attach bridge\n"); + return err; + } + + dvo->bridge = bridge; encoder->bridge = bridge; connector->encoder = encoder; dvo->encoder = encoder; @@ -446,7 +453,7 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data) err_sysfs: drm_connector_unregister(drm_connector); err_connector: - drm_bridge_cleanup(bridge); + drm_bridge_remove(bridge); drm_connector_cleanup(drm_connector); return -EINVAL; } @@ -454,7 +461,9 @@ err_connector: static void sti_dvo_unbind(struct device *dev, struct device *master, void *data) { - /* do nothing */ + struct sti_dvo *dvo = dev_get_drvdata(dev); + + drm_bridge_remove(dvo->bridge); } static const struct component_ops sti_dvo_ops = { diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index 32448d1d1e8f..087906fd8846 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -14,15 +14,19 @@ #include "sti_layer.h" #include "sti_vtg.h" +#define ALPHASWITCH BIT(6) #define ENA_COLOR_FILL BIT(8) +#define BIGNOTLITTLE BIT(23) #define WAIT_NEXT_VSYNC BIT(31) /* GDP color formats */ #define GDP_RGB565 0x00 #define GDP_RGB888 0x01 #define GDP_RGB888_32 0x02 +#define GDP_XBGR8888 (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH) #define GDP_ARGB8565 0x04 #define GDP_ARGB8888 0x05 +#define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH) #define GDP_ARGB1555 0x06 #define GDP_ARGB4444 0x07 #define GDP_CLUT8 0x0B @@ -103,7 +107,9 @@ struct sti_gdp { static const uint32_t gdp_supported_formats[] = { DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB4444, DRM_FORMAT_ARGB1555, DRM_FORMAT_RGB565, @@ -129,8 +135,12 @@ static int sti_gdp_fourcc2format(int fourcc) switch (fourcc) { case DRM_FORMAT_XRGB8888: return GDP_RGB888_32; + case DRM_FORMAT_XBGR8888: + return GDP_XBGR8888; case DRM_FORMAT_ARGB8888: return GDP_ARGB8888; + case DRM_FORMAT_ABGR8888: + return GDP_ABGR8888; case DRM_FORMAT_ARGB4444: return GDP_ARGB4444; case DRM_FORMAT_ARGB1555: @@ -157,6 +167,7 @@ static int sti_gdp_get_alpharange(int format) case GDP_ARGB8565: case GDP_ARGB8888: case GDP_AYCBR8888: + case GDP_ABGR8888: return GAM_GDP_ALPHARANGE_255; } return 0; diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index 2ae9a9b73666..a9bbb081ecad 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -508,19 +508,12 @@ static void sti_hda_bridge_nope(struct drm_bridge *bridge) /* do nothing */ } -static void sti_hda_brigde_destroy(struct drm_bridge *bridge) -{ - drm_bridge_cleanup(bridge); - kfree(bridge); -} - static const struct drm_bridge_funcs sti_hda_bridge_funcs = { .pre_enable = sti_hda_pre_enable, .enable = sti_hda_bridge_nope, .disable = sti_hda_disable, .post_disable = sti_hda_bridge_nope, .mode_set = sti_hda_set_mode, - .destroy = sti_hda_brigde_destroy, }; static int sti_hda_connector_get_modes(struct drm_connector *connector) @@ -664,7 +657,8 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data) return -ENOMEM; bridge->driver_private = hda; - drm_bridge_init(drm_dev, bridge, &sti_hda_bridge_funcs); + bridge->funcs = &sti_hda_bridge_funcs; + drm_bridge_attach(drm_dev, bridge); encoder->bridge = bridge; connector->encoder = encoder; @@ -693,7 +687,6 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data) err_sysfs: drm_connector_unregister(drm_connector); err_connector: - drm_bridge_cleanup(bridge); drm_connector_cleanup(drm_connector); return -EINVAL; } diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index d032e024b0b8..1485ade98710 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -42,8 +42,17 @@ #define HDMI_SW_DI_1_PKT_WORD5 0x0228 #define HDMI_SW_DI_1_PKT_WORD6 0x022C #define HDMI_SW_DI_CFG 0x0230 +#define HDMI_SW_DI_2_HEAD_WORD 0x0600 +#define HDMI_SW_DI_2_PKT_WORD0 0x0604 +#define HDMI_SW_DI_2_PKT_WORD1 0x0608 +#define HDMI_SW_DI_2_PKT_WORD2 0x060C +#define HDMI_SW_DI_2_PKT_WORD3 0x0610 +#define HDMI_SW_DI_2_PKT_WORD4 0x0614 +#define HDMI_SW_DI_2_PKT_WORD5 0x0618 +#define HDMI_SW_DI_2_PKT_WORD6 0x061C #define HDMI_IFRAME_SLOT_AVI 1 +#define HDMI_IFRAME_SLOT_AUDIO 2 #define XCAT(prefix, x, suffix) prefix ## x ## suffix #define HDMI_SW_DI_N_HEAD_WORD(x) XCAT(HDMI_SW_DI_, x, _HEAD_WORD) @@ -99,6 +108,10 @@ #define HDMI_STA_SW_RST BIT(1) +#define HDMI_INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0) +#define HDMI_INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8) +#define HDMI_INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16) + struct sti_hdmi_connector { struct drm_connector drm_connector; struct drm_encoder *encoder; @@ -228,6 +241,90 @@ static void hdmi_config(struct sti_hdmi *hdmi) } /** + * Helper to concatenate infoframe in 32 bits word + * + * @ptr: pointer on the hdmi internal structure + * @data: infoframe to write + * @size: size to write + */ +static inline unsigned int hdmi_infoframe_subpack(const u8 *ptr, size_t size) +{ + unsigned long value = 0; + size_t i; + + for (i = size; i > 0; i--) + value = (value << 8) | ptr[i - 1]; + + return value; +} + +/** + * Helper to write info frame + * + * @hdmi: pointer on the hdmi internal structure + * @data: infoframe to write + * @size: size to write + */ +static void hdmi_infoframe_write_infopack(struct sti_hdmi *hdmi, const u8 *data) +{ + const u8 *ptr = data; + u32 val, slot, mode, i; + u32 head_offset, pack_offset; + size_t size; + + switch (*ptr) { + case HDMI_INFOFRAME_TYPE_AVI: + slot = HDMI_IFRAME_SLOT_AVI; + mode = HDMI_IFRAME_FIELD; + head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI); + pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI); + size = HDMI_AVI_INFOFRAME_SIZE; + break; + + case HDMI_INFOFRAME_TYPE_AUDIO: + slot = HDMI_IFRAME_SLOT_AUDIO; + mode = HDMI_IFRAME_FRAME; + head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AUDIO); + pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AUDIO); + size = HDMI_AUDIO_INFOFRAME_SIZE; + break; + + default: + DRM_ERROR("unsupported infoframe type: %#x\n", *ptr); + return; + } + + /* Disable transmission slot for updated infoframe */ + val = hdmi_read(hdmi, HDMI_SW_DI_CFG); + val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, slot); + hdmi_write(hdmi, val, HDMI_SW_DI_CFG); + + val = HDMI_INFOFRAME_HEADER_TYPE(*ptr++); + val |= HDMI_INFOFRAME_HEADER_VERSION(*ptr++); + val |= HDMI_INFOFRAME_HEADER_LEN(*ptr++); + writel(val, hdmi->regs + head_offset); + + /* + * Each subpack contains 4 bytes + * The First Bytes of the first subpacket must contain the checksum + * Packet size in increase by one. + */ + for (i = 0; i < size; i += sizeof(u32)) { + size_t num; + + num = min_t(size_t, size - i, sizeof(u32)); + val = hdmi_infoframe_subpack(ptr, num); + ptr += sizeof(u32); + writel(val, hdmi->regs + pack_offset + i); + } + + /* Enable transmission slot for updated infoframe */ + val = hdmi_read(hdmi, HDMI_SW_DI_CFG); + val |= HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_FIELD, slot); + hdmi_write(hdmi, val, HDMI_SW_DI_CFG); +} + +/** * Prepare and configure the AVI infoframe * * AVI infoframe are transmitted at least once per two video field and @@ -243,8 +340,6 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi) struct drm_display_mode *mode = &hdmi->mode; struct hdmi_avi_infoframe infoframe; u8 buffer[HDMI_INFOFRAME_SIZE(AVI)]; - u8 *frame = buffer + HDMI_INFOFRAME_HEADER_SIZE; - u32 val; int ret; DRM_DEBUG_DRIVER("\n"); @@ -266,47 +361,43 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi) return ret; } - /* Disable transmission slot for AVI infoframe */ - val = hdmi_read(hdmi, HDMI_SW_DI_CFG); - val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, HDMI_IFRAME_SLOT_AVI); - hdmi_write(hdmi, val, HDMI_SW_DI_CFG); + hdmi_infoframe_write_infopack(hdmi, buffer); - /* Infoframe header */ - val = buffer[0]; - val |= buffer[1] << 8; - val |= buffer[2] << 16; - hdmi_write(hdmi, val, HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI)); - - /* Infoframe packet bytes */ - val = buffer[3]; - val |= *(frame++) << 8; - val |= *(frame++) << 16; - val |= *(frame++) << 24; - hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI)); - - val = *(frame++); - val |= *(frame++) << 8; - val |= *(frame++) << 16; - val |= *(frame++) << 24; - hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD1(HDMI_IFRAME_SLOT_AVI)); - - val = *(frame++); - val |= *(frame++) << 8; - val |= *(frame++) << 16; - val |= *(frame++) << 24; - hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI)); - - val = *(frame++); - val |= *(frame) << 8; - hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI)); - - /* Enable transmission slot for AVI infoframe - * According to the hdmi specification, AVI infoframe should be - * transmitted at least once per two video fields - */ - val = hdmi_read(hdmi, HDMI_SW_DI_CFG); - val |= HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_FIELD, HDMI_IFRAME_SLOT_AVI); - hdmi_write(hdmi, val, HDMI_SW_DI_CFG); + return 0; +} + +/** + * Prepare and configure the AUDIO infoframe + * + * AUDIO infoframe are transmitted once per frame and + * contains information about HDMI transmission mode such as audio codec, + * sample size, ... + * + * @hdmi: pointer on the hdmi internal structure + * + * Return negative value if error occurs + */ +static int hdmi_audio_infoframe_config(struct sti_hdmi *hdmi) +{ + struct hdmi_audio_infoframe infofame; + u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)]; + int ret; + + ret = hdmi_audio_infoframe_init(&infofame); + if (ret < 0) { + DRM_ERROR("failed to setup audio infoframe: %d\n", ret); + return ret; + } + + infofame.channels = 2; + + ret = hdmi_audio_infoframe_pack(&infofame, buffer, sizeof(buffer)); + if (ret < 0) { + DRM_ERROR("failed to pack audio infoframe: %d\n", ret); + return ret; + } + + hdmi_infoframe_write_infopack(hdmi, buffer); return 0; } @@ -427,6 +518,10 @@ static void sti_hdmi_pre_enable(struct drm_bridge *bridge) if (hdmi_avi_infoframe_config(hdmi)) DRM_ERROR("Unable to configure AVI infoframe\n"); + /* Program AUDIO infoframe */ + if (hdmi_audio_infoframe_config(hdmi)) + DRM_ERROR("Unable to configure AUDIO infoframe\n"); + /* Sw reset */ hdmi_swreset(hdmi); } @@ -463,19 +558,12 @@ static void sti_hdmi_bridge_nope(struct drm_bridge *bridge) /* do nothing */ } -static void sti_hdmi_brigde_destroy(struct drm_bridge *bridge) -{ - drm_bridge_cleanup(bridge); - kfree(bridge); -} - static const struct drm_bridge_funcs sti_hdmi_bridge_funcs = { .pre_enable = sti_hdmi_pre_enable, .enable = sti_hdmi_bridge_nope, .disable = sti_hdmi_disable, .post_disable = sti_hdmi_bridge_nope, .mode_set = sti_hdmi_set_mode, - .destroy = sti_hdmi_brigde_destroy, }; static int sti_hdmi_connector_get_modes(struct drm_connector *connector) @@ -635,7 +723,8 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) goto err_adapt; bridge->driver_private = hdmi; - drm_bridge_init(drm_dev, bridge, &sti_hdmi_bridge_funcs); + bridge->funcs = &sti_hdmi_bridge_funcs; + drm_bridge_attach(drm_dev, bridge); encoder->bridge = bridge; connector->encoder = encoder; @@ -667,7 +756,6 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) err_sysfs: drm_connector_unregister(drm_connector); err_connector: - drm_bridge_cleanup(bridge); drm_connector_cleanup(drm_connector); err_adapt: put_device(&hdmi->ddc_adapt->dev); diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index f3db05dab0ab..b0eb62de1b2e 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -1025,7 +1025,7 @@ static int sti_hqvdp_probe(struct platform_device *pdev) /* Get clock resources */ hqvdp->clk = devm_clk_get(dev, "hqvdp"); hqvdp->clk_pix_main = devm_clk_get(dev, "pix_main"); - if (IS_ERR(hqvdp->clk) || IS_ERR(hqvdp->clk)) { + if (IS_ERR(hqvdp->clk) || IS_ERR(hqvdp->clk_pix_main)) { DRM_ERROR("Cannot get clocks\n"); return -ENXIO; } diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index ae26cc054fff..3aaa84ae2681 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -18,9 +18,12 @@ #include "drm.h" #include "gem.h" +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_plane_helper.h> struct tegra_dc_soc_info { + bool supports_border_color; bool supports_interlacing; bool supports_cursor; bool supports_block_linear; @@ -38,63 +41,122 @@ static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane) return container_of(plane, struct tegra_plane, base); } -static void tegra_dc_window_commit(struct tegra_dc *dc, unsigned int index) +struct tegra_dc_state { + struct drm_crtc_state base; + + struct clk *clk; + unsigned long pclk; + unsigned int div; + + u32 planes; +}; + +static inline struct tegra_dc_state *to_dc_state(struct drm_crtc_state *state) { - u32 value = WIN_A_ACT_REQ << index; + if (state) + return container_of(state, struct tegra_dc_state, base); - tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); + return NULL; } -static void tegra_dc_cursor_commit(struct tegra_dc *dc) +struct tegra_plane_state { + struct drm_plane_state base; + + struct tegra_bo_tiling tiling; + u32 format; + u32 swap; +}; + +static inline struct tegra_plane_state * +to_tegra_plane_state(struct drm_plane_state *state) { - tegra_dc_writel(dc, CURSOR_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, CURSOR_ACT_REQ, DC_CMD_STATE_CONTROL); + if (state) + return container_of(state, struct tegra_plane_state, base); + + return NULL; } -static void tegra_dc_commit(struct tegra_dc *dc) +/* + * Reads the active copy of a register. This takes the dc->lock spinlock to + * prevent races with the VBLANK processing which also needs access to the + * active copy of some registers. + */ +static u32 tegra_dc_readl_active(struct tegra_dc *dc, unsigned long offset) +{ + unsigned long flags; + u32 value; + + spin_lock_irqsave(&dc->lock, flags); + + tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS); + value = tegra_dc_readl(dc, offset); + tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS); + + spin_unlock_irqrestore(&dc->lock, flags); + return value; +} + +/* + * Double-buffered registers have two copies: ASSEMBLY and ACTIVE. When the + * *_ACT_REQ bits are set the ASSEMBLY copy is latched into the ACTIVE copy. + * Latching happens mmediately if the display controller is in STOP mode or + * on the next frame boundary otherwise. + * + * Triple-buffered registers have three copies: ASSEMBLY, ARM and ACTIVE. The + * ASSEMBLY copy is latched into the ARM copy immediately after *_UPDATE bits + * are written. When the *_ACT_REQ bits are written, the ARM copy is latched + * into the ACTIVE copy, either immediately if the display controller is in + * STOP mode, or at the next frame boundary otherwise. + */ +void tegra_dc_commit(struct tegra_dc *dc) { tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); } -static unsigned int tegra_dc_format(uint32_t format, uint32_t *swap) +static int tegra_dc_format(u32 fourcc, u32 *format, u32 *swap) { /* assume no swapping of fetched data */ if (swap) *swap = BYTE_SWAP_NOSWAP; - switch (format) { + switch (fourcc) { case DRM_FORMAT_XBGR8888: - return WIN_COLOR_DEPTH_R8G8B8A8; + *format = WIN_COLOR_DEPTH_R8G8B8A8; + break; case DRM_FORMAT_XRGB8888: - return WIN_COLOR_DEPTH_B8G8R8A8; + *format = WIN_COLOR_DEPTH_B8G8R8A8; + break; case DRM_FORMAT_RGB565: - return WIN_COLOR_DEPTH_B5G6R5; + *format = WIN_COLOR_DEPTH_B5G6R5; + break; case DRM_FORMAT_UYVY: - return WIN_COLOR_DEPTH_YCbCr422; + *format = WIN_COLOR_DEPTH_YCbCr422; + break; case DRM_FORMAT_YUYV: if (swap) *swap = BYTE_SWAP_SWAP2; - return WIN_COLOR_DEPTH_YCbCr422; + *format = WIN_COLOR_DEPTH_YCbCr422; + break; case DRM_FORMAT_YUV420: - return WIN_COLOR_DEPTH_YCbCr420P; + *format = WIN_COLOR_DEPTH_YCbCr420P; + break; case DRM_FORMAT_YUV422: - return WIN_COLOR_DEPTH_YCbCr422P; + *format = WIN_COLOR_DEPTH_YCbCr422P; + break; default: - break; + return -EINVAL; } - WARN(1, "unsupported pixel format %u, using default\n", format); - return WIN_COLOR_DEPTH_B8G8R8A8; + return 0; } static bool tegra_dc_format_is_yuv(unsigned int format, bool *planar) @@ -121,6 +183,9 @@ static bool tegra_dc_format_is_yuv(unsigned int format, bool *planar) return true; } + if (planar) + *planar = false; + return false; } @@ -164,8 +229,8 @@ static inline u32 compute_initial_dda(unsigned int in) return dfixed_frac(inf); } -static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, - const struct tegra_dc_window *window) +static void tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, + const struct tegra_dc_window *window) { unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp; unsigned long value, flags; @@ -274,9 +339,11 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, break; case TEGRA_BO_TILING_MODE_BLOCK: - DRM_ERROR("hardware doesn't support block linear mode\n"); - spin_unlock_irqrestore(&dc->lock, flags); - return -EINVAL; + /* + * No need to handle this here because ->atomic_check + * will already have filtered it out. + */ + break; } tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE); @@ -332,109 +399,245 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, break; } - tegra_dc_window_commit(dc, index); - spin_unlock_irqrestore(&dc->lock, flags); - - return 0; } -static int tegra_window_plane_disable(struct drm_plane *plane) +static void tegra_plane_destroy(struct drm_plane *plane) { - struct tegra_dc *dc = to_tegra_dc(plane->crtc); struct tegra_plane *p = to_tegra_plane(plane); - unsigned long flags; - u32 value; - if (!plane->crtc) - return 0; + drm_plane_cleanup(plane); + kfree(p); +} - spin_lock_irqsave(&dc->lock, flags); +static const u32 tegra_primary_plane_formats[] = { + DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_RGB565, +}; - value = WINDOW_A_SELECT << p->index; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); +static void tegra_primary_plane_destroy(struct drm_plane *plane) +{ + tegra_plane_destroy(plane); +} - value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS); - value &= ~WIN_ENABLE; - tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS); +static void tegra_plane_reset(struct drm_plane *plane) +{ + struct tegra_plane_state *state; - tegra_dc_window_commit(dc, p->index); + if (plane->state && plane->state->fb) + drm_framebuffer_unreference(plane->state->fb); - spin_unlock_irqrestore(&dc->lock, flags); + kfree(plane->state); + plane->state = NULL; - return 0; + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state) { + plane->state = &state->base; + plane->state->plane = plane; + } } -static void tegra_plane_destroy(struct drm_plane *plane) +static struct drm_plane_state *tegra_plane_atomic_duplicate_state(struct drm_plane *plane) { - struct tegra_plane *p = to_tegra_plane(plane); + struct tegra_plane_state *state = to_tegra_plane_state(plane->state); + struct tegra_plane_state *copy; - drm_plane_cleanup(plane); - kfree(p); + copy = kmemdup(state, sizeof(*state), GFP_KERNEL); + if (!copy) + return NULL; + + if (copy->base.fb) + drm_framebuffer_reference(copy->base.fb); + + return ©->base; } -static const u32 tegra_primary_plane_formats[] = { - DRM_FORMAT_XBGR8888, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_RGB565, +static void tegra_plane_atomic_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + if (state->fb) + drm_framebuffer_unreference(state->fb); + + kfree(state); +} + +static const struct drm_plane_funcs tegra_primary_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = tegra_primary_plane_destroy, + .reset = tegra_plane_reset, + .atomic_duplicate_state = tegra_plane_atomic_duplicate_state, + .atomic_destroy_state = tegra_plane_atomic_destroy_state, }; -static int tegra_primary_plane_update(struct drm_plane *plane, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, int crtc_x, - int crtc_y, unsigned int crtc_w, - unsigned int crtc_h, uint32_t src_x, - uint32_t src_y, uint32_t src_w, - uint32_t src_h) +static int tegra_plane_prepare_fb(struct drm_plane *plane, + struct drm_framebuffer *fb) { - struct tegra_bo *bo = tegra_fb_get_plane(fb, 0); - struct tegra_plane *p = to_tegra_plane(plane); - struct tegra_dc *dc = to_tegra_dc(crtc); - struct tegra_dc_window window; + return 0; +} + +static void tegra_plane_cleanup_fb(struct drm_plane *plane, + struct drm_framebuffer *fb) +{ +} + +static int tegra_plane_state_add(struct tegra_plane *plane, + struct drm_plane_state *state) +{ + struct drm_crtc_state *crtc_state; + struct tegra_dc_state *tegra; + + /* Propagate errors from allocation or locking failures. */ + crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + tegra = to_dc_state(crtc_state); + + tegra->planes |= WIN_A_ACT_REQ << plane->index; + + return 0; +} + +static int tegra_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct tegra_plane_state *plane_state = to_tegra_plane_state(state); + struct tegra_bo_tiling *tiling = &plane_state->tiling; + struct tegra_plane *tegra = to_tegra_plane(plane); + struct tegra_dc *dc = to_tegra_dc(state->crtc); int err; - memset(&window, 0, sizeof(window)); - window.src.x = src_x >> 16; - window.src.y = src_y >> 16; - window.src.w = src_w >> 16; - window.src.h = src_h >> 16; - window.dst.x = crtc_x; - window.dst.y = crtc_y; - window.dst.w = crtc_w; - window.dst.h = crtc_h; - window.format = tegra_dc_format(fb->pixel_format, &window.swap); - window.bits_per_pixel = fb->bits_per_pixel; - window.bottom_up = tegra_fb_is_bottom_up(fb); + /* no need for further checks if the plane is being disabled */ + if (!state->crtc) + return 0; - err = tegra_fb_get_tiling(fb, &window.tiling); + err = tegra_dc_format(state->fb->pixel_format, &plane_state->format, + &plane_state->swap); if (err < 0) return err; - window.base[0] = bo->paddr + fb->offsets[0]; - window.stride[0] = fb->pitches[0]; + err = tegra_fb_get_tiling(state->fb, tiling); + if (err < 0) + return err; + + if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK && + !dc->soc->supports_block_linear) { + DRM_ERROR("hardware doesn't support block linear mode\n"); + return -EINVAL; + } + + /* + * Tegra doesn't support different strides for U and V planes so we + * error out if the user tries to display a framebuffer with such a + * configuration. + */ + if (drm_format_num_planes(state->fb->pixel_format) > 2) { + if (state->fb->pitches[2] != state->fb->pitches[1]) { + DRM_ERROR("unsupported UV-plane configuration\n"); + return -EINVAL; + } + } - err = tegra_dc_setup_window(dc, p->index, &window); + err = tegra_plane_state_add(tegra, state); if (err < 0) return err; return 0; } -static void tegra_primary_plane_destroy(struct drm_plane *plane) +static void tegra_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) { - tegra_window_plane_disable(plane); - tegra_plane_destroy(plane); + struct tegra_plane_state *state = to_tegra_plane_state(plane->state); + struct tegra_dc *dc = to_tegra_dc(plane->state->crtc); + struct drm_framebuffer *fb = plane->state->fb; + struct tegra_plane *p = to_tegra_plane(plane); + struct tegra_dc_window window; + unsigned int i; + + /* rien ne va plus */ + if (!plane->state->crtc || !plane->state->fb) + return; + + memset(&window, 0, sizeof(window)); + window.src.x = plane->state->src_x >> 16; + window.src.y = plane->state->src_y >> 16; + window.src.w = plane->state->src_w >> 16; + window.src.h = plane->state->src_h >> 16; + window.dst.x = plane->state->crtc_x; + window.dst.y = plane->state->crtc_y; + window.dst.w = plane->state->crtc_w; + window.dst.h = plane->state->crtc_h; + window.bits_per_pixel = fb->bits_per_pixel; + window.bottom_up = tegra_fb_is_bottom_up(fb); + + /* copy from state */ + window.tiling = state->tiling; + window.format = state->format; + window.swap = state->swap; + + for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) { + struct tegra_bo *bo = tegra_fb_get_plane(fb, i); + + window.base[i] = bo->paddr + fb->offsets[i]; + window.stride[i] = fb->pitches[i]; + } + + tegra_dc_setup_window(dc, p->index, &window); } -static const struct drm_plane_funcs tegra_primary_plane_funcs = { - .update_plane = tegra_primary_plane_update, - .disable_plane = tegra_window_plane_disable, - .destroy = tegra_primary_plane_destroy, +static void tegra_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct tegra_plane *p = to_tegra_plane(plane); + struct tegra_dc *dc; + unsigned long flags; + u32 value; + + /* rien ne va plus */ + if (!old_state || !old_state->crtc) + return; + + dc = to_tegra_dc(old_state->crtc); + + spin_lock_irqsave(&dc->lock, flags); + + value = WINDOW_A_SELECT << p->index; + tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); + + value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS); + value &= ~WIN_ENABLE; + tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS); + + spin_unlock_irqrestore(&dc->lock, flags); +} + +static const struct drm_plane_helper_funcs tegra_primary_plane_helper_funcs = { + .prepare_fb = tegra_plane_prepare_fb, + .cleanup_fb = tegra_plane_cleanup_fb, + .atomic_check = tegra_plane_atomic_check, + .atomic_update = tegra_plane_atomic_update, + .atomic_disable = tegra_plane_atomic_disable, }; static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm, struct tegra_dc *dc) { + /* + * Ideally this would use drm_crtc_mask(), but that would require the + * CRTC to already be in the mode_config's list of CRTCs. However, it + * will only be added to that list in the drm_crtc_init_with_planes() + * (in tegra_dc_init()), which in turn requires registration of these + * planes. So we have ourselves a nice little chicken and egg problem + * here. + * + * We work around this by manually creating the mask from the number + * of CRTCs that have been registered, and should therefore always be + * the same as drm_crtc_index() after registration. + */ + unsigned long possible_crtcs = 1 << drm->mode_config.num_crtc; struct tegra_plane *plane; unsigned int num_formats; const u32 *formats; @@ -447,7 +650,7 @@ static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm, num_formats = ARRAY_SIZE(tegra_primary_plane_formats); formats = tegra_primary_plane_formats; - err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe, + err = drm_universal_plane_init(drm, &plane->base, possible_crtcs, &tegra_primary_plane_funcs, formats, num_formats, DRM_PLANE_TYPE_PRIMARY); if (err < 0) { @@ -455,6 +658,8 @@ static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm, return ERR_PTR(err); } + drm_plane_helper_add(&plane->base, &tegra_primary_plane_helper_funcs); + return &plane->base; } @@ -462,27 +667,49 @@ static const u32 tegra_cursor_plane_formats[] = { DRM_FORMAT_RGBA8888, }; -static int tegra_cursor_plane_update(struct drm_plane *plane, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, int crtc_x, - int crtc_y, unsigned int crtc_w, - unsigned int crtc_h, uint32_t src_x, - uint32_t src_y, uint32_t src_w, - uint32_t src_h) +static int tegra_cursor_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) { - struct tegra_bo *bo = tegra_fb_get_plane(fb, 0); - struct tegra_dc *dc = to_tegra_dc(crtc); - u32 value = CURSOR_CLIP_DISPLAY; + struct tegra_plane *tegra = to_tegra_plane(plane); + int err; + + /* no need for further checks if the plane is being disabled */ + if (!state->crtc) + return 0; /* scaling not supported for cursor */ - if ((src_w >> 16 != crtc_w) || (src_h >> 16 != crtc_h)) + if ((state->src_w >> 16 != state->crtc_w) || + (state->src_h >> 16 != state->crtc_h)) return -EINVAL; /* only square cursors supported */ - if (src_w != src_h) + if (state->src_w != state->src_h) + return -EINVAL; + + if (state->crtc_w != 32 && state->crtc_w != 64 && + state->crtc_w != 128 && state->crtc_w != 256) return -EINVAL; - switch (crtc_w) { + err = tegra_plane_state_add(tegra, state); + if (err < 0) + return err; + + return 0; +} + +static void tegra_cursor_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct tegra_bo *bo = tegra_fb_get_plane(plane->state->fb, 0); + struct tegra_dc *dc = to_tegra_dc(plane->state->crtc); + struct drm_plane_state *state = plane->state; + u32 value = CURSOR_CLIP_DISPLAY; + + /* rien ne va plus */ + if (!plane->state->crtc || !plane->state->fb) + return; + + switch (state->crtc_w) { case 32: value |= CURSOR_SIZE_32x32; break; @@ -500,7 +727,9 @@ static int tegra_cursor_plane_update(struct drm_plane *plane, break; default: - return -EINVAL; + WARN(1, "cursor size %ux%u not supported\n", state->crtc_w, + state->crtc_h); + return; } value |= (bo->paddr >> 10) & 0x3fffff; @@ -526,38 +755,43 @@ static int tegra_cursor_plane_update(struct drm_plane *plane, tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL); /* position the cursor */ - value = (crtc_y & 0x3fff) << 16 | (crtc_x & 0x3fff); + value = (state->crtc_y & 0x3fff) << 16 | (state->crtc_x & 0x3fff); tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION); - /* apply changes */ - tegra_dc_cursor_commit(dc); - tegra_dc_commit(dc); - - return 0; } -static int tegra_cursor_plane_disable(struct drm_plane *plane) +static void tegra_cursor_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) { - struct tegra_dc *dc = to_tegra_dc(plane->crtc); + struct tegra_dc *dc; u32 value; - if (!plane->crtc) - return 0; + /* rien ne va plus */ + if (!old_state || !old_state->crtc) + return; + + dc = to_tegra_dc(old_state->crtc); value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); value &= ~CURSOR_ENABLE; tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - - tegra_dc_cursor_commit(dc); - tegra_dc_commit(dc); - - return 0; } static const struct drm_plane_funcs tegra_cursor_plane_funcs = { - .update_plane = tegra_cursor_plane_update, - .disable_plane = tegra_cursor_plane_disable, + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, .destroy = tegra_plane_destroy, + .reset = tegra_plane_reset, + .atomic_duplicate_state = tegra_plane_atomic_duplicate_state, + .atomic_destroy_state = tegra_plane_atomic_destroy_state, +}; + +static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = { + .prepare_fb = tegra_plane_prepare_fb, + .cleanup_fb = tegra_plane_cleanup_fb, + .atomic_check = tegra_cursor_atomic_check, + .atomic_update = tegra_cursor_atomic_update, + .atomic_disable = tegra_cursor_atomic_disable, }; static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm, @@ -572,6 +806,13 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm, if (!plane) return ERR_PTR(-ENOMEM); + /* + * We'll treat the cursor as an overlay plane with index 6 here so + * that the update and activation request bits in DC_CMD_STATE_CONTROL + * match up. + */ + plane->index = 6; + num_formats = ARRAY_SIZE(tegra_cursor_plane_formats); formats = tegra_cursor_plane_formats; @@ -583,71 +824,23 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm, return ERR_PTR(err); } - return &plane->base; -} - -static int tegra_overlay_plane_update(struct drm_plane *plane, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, int crtc_x, - int crtc_y, unsigned int crtc_w, - unsigned int crtc_h, uint32_t src_x, - uint32_t src_y, uint32_t src_w, - uint32_t src_h) -{ - struct tegra_plane *p = to_tegra_plane(plane); - struct tegra_dc *dc = to_tegra_dc(crtc); - struct tegra_dc_window window; - unsigned int i; - int err; - - memset(&window, 0, sizeof(window)); - window.src.x = src_x >> 16; - window.src.y = src_y >> 16; - window.src.w = src_w >> 16; - window.src.h = src_h >> 16; - window.dst.x = crtc_x; - window.dst.y = crtc_y; - window.dst.w = crtc_w; - window.dst.h = crtc_h; - window.format = tegra_dc_format(fb->pixel_format, &window.swap); - window.bits_per_pixel = fb->bits_per_pixel; - window.bottom_up = tegra_fb_is_bottom_up(fb); - - err = tegra_fb_get_tiling(fb, &window.tiling); - if (err < 0) - return err; - - for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) { - struct tegra_bo *bo = tegra_fb_get_plane(fb, i); - - window.base[i] = bo->paddr + fb->offsets[i]; + drm_plane_helper_add(&plane->base, &tegra_cursor_plane_helper_funcs); - /* - * Tegra doesn't support different strides for U and V planes - * so we display a warning if the user tries to display a - * framebuffer with such a configuration. - */ - if (i >= 2) { - if (fb->pitches[i] != window.stride[1]) - DRM_ERROR("unsupported UV-plane configuration\n"); - } else { - window.stride[i] = fb->pitches[i]; - } - } - - return tegra_dc_setup_window(dc, p->index, &window); + return &plane->base; } static void tegra_overlay_plane_destroy(struct drm_plane *plane) { - tegra_window_plane_disable(plane); tegra_plane_destroy(plane); } static const struct drm_plane_funcs tegra_overlay_plane_funcs = { - .update_plane = tegra_overlay_plane_update, - .disable_plane = tegra_window_plane_disable, + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, .destroy = tegra_overlay_plane_destroy, + .reset = tegra_plane_reset, + .atomic_duplicate_state = tegra_plane_atomic_duplicate_state, + .atomic_destroy_state = tegra_plane_atomic_destroy_state, }; static const uint32_t tegra_overlay_plane_formats[] = { @@ -660,6 +853,14 @@ static const uint32_t tegra_overlay_plane_formats[] = { DRM_FORMAT_YUV422, }; +static const struct drm_plane_helper_funcs tegra_overlay_plane_helper_funcs = { + .prepare_fb = tegra_plane_prepare_fb, + .cleanup_fb = tegra_plane_cleanup_fb, + .atomic_check = tegra_plane_atomic_check, + .atomic_update = tegra_plane_atomic_update, + .atomic_disable = tegra_plane_atomic_disable, +}; + static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm, struct tegra_dc *dc, unsigned int index) @@ -686,6 +887,8 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm, return ERR_PTR(err); } + drm_plane_helper_add(&plane->base, &tegra_overlay_plane_helper_funcs); + return &plane->base; } @@ -703,99 +906,6 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc) return 0; } -static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y, - struct drm_framebuffer *fb) -{ - struct tegra_bo *bo = tegra_fb_get_plane(fb, 0); - unsigned int h_offset = 0, v_offset = 0; - struct tegra_bo_tiling tiling; - unsigned long value, flags; - unsigned int format, swap; - int err; - - err = tegra_fb_get_tiling(fb, &tiling); - if (err < 0) - return err; - - spin_lock_irqsave(&dc->lock, flags); - - tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); - - value = fb->offsets[0] + y * fb->pitches[0] + - x * fb->bits_per_pixel / 8; - - tegra_dc_writel(dc, bo->paddr + value, DC_WINBUF_START_ADDR); - tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE); - - format = tegra_dc_format(fb->pixel_format, &swap); - tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH); - tegra_dc_writel(dc, swap, DC_WIN_BYTE_SWAP); - - if (dc->soc->supports_block_linear) { - unsigned long height = tiling.value; - - switch (tiling.mode) { - case TEGRA_BO_TILING_MODE_PITCH: - value = DC_WINBUF_SURFACE_KIND_PITCH; - break; - - case TEGRA_BO_TILING_MODE_TILED: - value = DC_WINBUF_SURFACE_KIND_TILED; - break; - - case TEGRA_BO_TILING_MODE_BLOCK: - value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) | - DC_WINBUF_SURFACE_KIND_BLOCK; - break; - } - - tegra_dc_writel(dc, value, DC_WINBUF_SURFACE_KIND); - } else { - switch (tiling.mode) { - case TEGRA_BO_TILING_MODE_PITCH: - value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV | - DC_WIN_BUFFER_ADDR_MODE_LINEAR; - break; - - case TEGRA_BO_TILING_MODE_TILED: - value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV | - DC_WIN_BUFFER_ADDR_MODE_TILE; - break; - - case TEGRA_BO_TILING_MODE_BLOCK: - DRM_ERROR("hardware doesn't support block linear mode\n"); - spin_unlock_irqrestore(&dc->lock, flags); - return -EINVAL; - } - - tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE); - } - - /* make sure bottom-up buffers are properly displayed */ - if (tegra_fb_is_bottom_up(fb)) { - value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS); - value |= V_DIRECTION; - tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS); - - v_offset += fb->height - 1; - } else { - value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS); - value &= ~V_DIRECTION; - tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS); - } - - tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET); - tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET); - - value = GENERAL_ACT_REQ | WIN_A_ACT_REQ; - tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); - - spin_unlock_irqrestore(&dc->lock, flags); - - return 0; -} - void tegra_dc_enable_vblank(struct tegra_dc *dc) { unsigned long value, flags; @@ -838,7 +948,7 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc) bo = tegra_fb_get_plane(crtc->primary->fb, 0); - spin_lock_irqsave(&dc->lock, flags); + spin_lock(&dc->lock); /* check if new start address has been latched */ tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); @@ -846,7 +956,7 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc) base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR); tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS); - spin_unlock_irqrestore(&dc->lock, flags); + spin_unlock(&dc->lock); if (base == bo->paddr + crtc->primary->fb->offsets[0]) { drm_crtc_send_vblank_event(crtc, dc->event); @@ -874,64 +984,130 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file) spin_unlock_irqrestore(&drm->event_lock, flags); } -static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, uint32_t page_flip_flags) +static void tegra_dc_destroy(struct drm_crtc *crtc) { - unsigned int pipe = drm_crtc_index(crtc); - struct tegra_dc *dc = to_tegra_dc(crtc); - - if (dc->event) - return -EBUSY; + drm_crtc_cleanup(crtc); +} - if (event) { - event->pipe = pipe; - dc->event = event; - drm_crtc_vblank_get(crtc); - } +static void tegra_crtc_reset(struct drm_crtc *crtc) +{ + struct tegra_dc_state *state; - tegra_dc_set_base(dc, 0, 0, fb); - crtc->primary->fb = fb; + kfree(crtc->state); + crtc->state = NULL; - return 0; + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state) + crtc->state = &state->base; } -static void drm_crtc_clear(struct drm_crtc *crtc) +static struct drm_crtc_state * +tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc) { - memset(crtc, 0, sizeof(*crtc)); + struct tegra_dc_state *state = to_dc_state(crtc->state); + struct tegra_dc_state *copy; + + copy = kmemdup(state, sizeof(*state), GFP_KERNEL); + if (!copy) + return NULL; + + copy->base.mode_changed = false; + copy->base.planes_changed = false; + copy->base.event = NULL; + + return ©->base; } -static void tegra_dc_destroy(struct drm_crtc *crtc) +static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) { - drm_crtc_cleanup(crtc); - drm_crtc_clear(crtc); + kfree(state); } static const struct drm_crtc_funcs tegra_crtc_funcs = { - .page_flip = tegra_dc_page_flip, - .set_config = drm_crtc_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .set_config = drm_atomic_helper_set_config, .destroy = tegra_dc_destroy, + .reset = tegra_crtc_reset, + .atomic_duplicate_state = tegra_crtc_atomic_duplicate_state, + .atomic_destroy_state = tegra_crtc_atomic_destroy_state, }; +static void tegra_dc_stop(struct tegra_dc *dc) +{ + u32 value; + + /* stop the display controller */ + value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); + value &= ~DISP_CTRL_MODE_MASK; + tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); + + tegra_dc_commit(dc); +} + +static bool tegra_dc_idle(struct tegra_dc *dc) +{ + u32 value; + + value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND); + + return (value & DISP_CTRL_MODE_MASK) == 0; +} + +static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout) +{ + timeout = jiffies + msecs_to_jiffies(timeout); + + while (time_before(jiffies, timeout)) { + if (tegra_dc_idle(dc)) + return 0; + + usleep_range(1000, 2000); + } + + dev_dbg(dc->dev, "timeout waiting for DC to become idle\n"); + return -ETIMEDOUT; +} + static void tegra_crtc_disable(struct drm_crtc *crtc) { struct tegra_dc *dc = to_tegra_dc(crtc); - struct drm_device *drm = crtc->dev; - struct drm_plane *plane; + u32 value; - drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) { - if (plane->crtc == crtc) { - tegra_window_plane_disable(plane); - plane->crtc = NULL; + if (!tegra_dc_idle(dc)) { + tegra_dc_stop(dc); - if (plane->fb) { - drm_framebuffer_unreference(plane->fb); - plane->fb = NULL; - } - } + /* + * Ignore the return value, there isn't anything useful to do + * in case this fails. + */ + tegra_dc_wait_idle(dc, 100); + } + + /* + * This should really be part of the RGB encoder driver, but clearing + * these bits has the side-effect of stopping the display controller. + * When that happens no VBLANK interrupts will be raised. At the same + * time the encoder is disabled before the display controller, so the + * above code is always going to timeout waiting for the controller + * to go idle. + * + * Given the close coupling between the RGB encoder and the display + * controller doing it here is still kind of okay. None of the other + * encoder drivers require these bits to be cleared. + * + * XXX: Perhaps given that the display controller is switched off at + * this point anyway maybe clearing these bits isn't even useful for + * the RGB encoder? + */ + if (dc->rgb) { + value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); + value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | + PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); + tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); } drm_crtc_vblank_off(crtc); - tegra_dc_commit(dc); } static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc, @@ -971,33 +1147,15 @@ static int tegra_dc_set_timings(struct tegra_dc *dc, return 0; } -static int tegra_crtc_setup_clk(struct drm_crtc *crtc, - struct drm_display_mode *mode) +int tegra_dc_setup_clock(struct tegra_dc *dc, struct clk *parent, + unsigned long pclk, unsigned int div) { - unsigned long pclk = mode->clock * 1000; - struct tegra_dc *dc = to_tegra_dc(crtc); - struct tegra_output *output = NULL; - struct drm_encoder *encoder; - unsigned int div; u32 value; - long err; - - list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, head) - if (encoder->crtc == crtc) { - output = encoder_to_output(encoder); - break; - } - - if (!output) - return -ENODEV; + int err; - /* - * This assumes that the parent clock is pll_d_out0 or pll_d2_out - * respectively, each of which divides the base pll_d by 2. - */ - err = tegra_output_setup_clock(output, dc->clk, pclk, &div); + err = clk_set_parent(dc->clk, parent); if (err < 0) { - dev_err(dc->dev, "failed to setup clock: %ld\n", err); + dev_err(dc->dev, "failed to set parent clock: %d\n", err); return err; } @@ -1009,26 +1167,69 @@ static int tegra_crtc_setup_clk(struct drm_crtc *crtc, return 0; } -static int tegra_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted, - int x, int y, struct drm_framebuffer *old_fb) +int tegra_dc_state_setup_clock(struct tegra_dc *dc, + struct drm_crtc_state *crtc_state, + struct clk *clk, unsigned long pclk, + unsigned int div) +{ + struct tegra_dc_state *state = to_dc_state(crtc_state); + + state->clk = clk; + state->pclk = pclk; + state->div = div; + + return 0; +} + +static void tegra_dc_commit_state(struct tegra_dc *dc, + struct tegra_dc_state *state) { - struct tegra_bo *bo = tegra_fb_get_plane(crtc->primary->fb, 0); - struct tegra_dc *dc = to_tegra_dc(crtc); - struct tegra_dc_window window; u32 value; int err; - err = tegra_crtc_setup_clk(crtc, mode); - if (err) { - dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err); - return err; + err = clk_set_parent(dc->clk, state->clk); + if (err < 0) + dev_err(dc->dev, "failed to set parent clock: %d\n", err); + + /* + * Outputs may not want to change the parent clock rate. This is only + * relevant to Tegra20 where only a single display PLL is available. + * Since that PLL would typically be used for HDMI, an internal LVDS + * panel would need to be driven by some other clock such as PLL_P + * which is shared with other peripherals. Changing the clock rate + * should therefore be avoided. + */ + if (state->pclk > 0) { + err = clk_set_rate(state->clk, state->pclk); + if (err < 0) + dev_err(dc->dev, + "failed to set clock rate to %lu Hz\n", + state->pclk); } + DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk), + state->div); + DRM_DEBUG_KMS("pclk: %lu\n", state->pclk); + + value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1; + tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); +} + +static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct tegra_dc_state *state = to_dc_state(crtc->state); + struct tegra_dc *dc = to_tegra_dc(crtc); + u32 value; + + tegra_dc_commit_state(dc, state); + /* program display mode */ tegra_dc_set_timings(dc, mode); + if (dc->soc->supports_border_color) + tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR); + /* interlacing isn't supported yet, so disable it */ if (dc->soc->supports_interlacing) { value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL); @@ -1036,35 +1237,17 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc, tegra_dc_writel(dc, value, DC_DISP_INTERLACE_CONTROL); } - /* setup window parameters */ - memset(&window, 0, sizeof(window)); - window.src.x = 0; - window.src.y = 0; - window.src.w = mode->hdisplay; - window.src.h = mode->vdisplay; - window.dst.x = 0; - window.dst.y = 0; - window.dst.w = mode->hdisplay; - window.dst.h = mode->vdisplay; - window.format = tegra_dc_format(crtc->primary->fb->pixel_format, - &window.swap); - window.bits_per_pixel = crtc->primary->fb->bits_per_pixel; - window.stride[0] = crtc->primary->fb->pitches[0]; - window.base[0] = bo->paddr; - - err = tegra_dc_setup_window(dc, 0, &window); - if (err < 0) - dev_err(dc->dev, "failed to enable root plane\n"); - - return 0; -} + value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); + value &= ~DISP_CTRL_MODE_MASK; + value |= DISP_CTRL_MODE_C_DISPLAY; + tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); -static int tegra_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) -{ - struct tegra_dc *dc = to_tegra_dc(crtc); + value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); + value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | + PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; + tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - return tegra_dc_set_base(dc, x, y, crtc->primary->fb); + tegra_dc_commit(dc); } static void tegra_crtc_prepare(struct drm_crtc *crtc) @@ -1075,10 +1258,6 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc) drm_crtc_vblank_off(crtc); - /* hardware initialization */ - reset_control_deassert(dc->rst); - usleep_range(10000, 20000); - if (dc->pipe) syncpt = SYNCPT_VBLANK1; else @@ -1113,19 +1292,49 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc) static void tegra_crtc_commit(struct drm_crtc *crtc) { + drm_crtc_vblank_on(crtc); +} + +static int tegra_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + return 0; +} + +static void tegra_crtc_atomic_begin(struct drm_crtc *crtc) +{ struct tegra_dc *dc = to_tegra_dc(crtc); - drm_crtc_vblank_on(crtc); - tegra_dc_commit(dc); + if (crtc->state->event) { + crtc->state->event->pipe = drm_crtc_index(crtc); + + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + + dc->event = crtc->state->event; + crtc->state->event = NULL; + } +} + +static void tegra_crtc_atomic_flush(struct drm_crtc *crtc) +{ + struct tegra_dc_state *state = to_dc_state(crtc->state); + struct tegra_dc *dc = to_tegra_dc(crtc); + + tegra_dc_writel(dc, state->planes << 8, DC_CMD_STATE_CONTROL); + tegra_dc_writel(dc, state->planes, DC_CMD_STATE_CONTROL); } static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = { .disable = tegra_crtc_disable, .mode_fixup = tegra_crtc_mode_fixup, - .mode_set = tegra_crtc_mode_set, - .mode_set_base = tegra_crtc_mode_set_base, + .mode_set = drm_helper_crtc_mode_set, + .mode_set_nofb = tegra_crtc_mode_set_nofb, + .mode_set_base = drm_helper_crtc_mode_set_base, .prepare = tegra_crtc_prepare, .commit = tegra_crtc_commit, + .atomic_check = tegra_crtc_atomic_check, + .atomic_begin = tegra_crtc_atomic_begin, + .atomic_flush = tegra_crtc_atomic_flush, }; static irqreturn_t tegra_dc_irq(int irq, void *data) @@ -1571,6 +1780,7 @@ static const struct host1x_client_ops dc_client_ops = { }; static const struct tegra_dc_soc_info tegra20_dc_soc_info = { + .supports_border_color = true, .supports_interlacing = false, .supports_cursor = false, .supports_block_linear = false, @@ -1579,6 +1789,7 @@ static const struct tegra_dc_soc_info tegra20_dc_soc_info = { }; static const struct tegra_dc_soc_info tegra30_dc_soc_info = { + .supports_border_color = true, .supports_interlacing = false, .supports_cursor = false, .supports_block_linear = false, @@ -1587,6 +1798,7 @@ static const struct tegra_dc_soc_info tegra30_dc_soc_info = { }; static const struct tegra_dc_soc_info tegra114_dc_soc_info = { + .supports_border_color = true, .supports_interlacing = false, .supports_cursor = false, .supports_block_linear = false, @@ -1595,6 +1807,7 @@ static const struct tegra_dc_soc_info tegra114_dc_soc_info = { }; static const struct tegra_dc_soc_info tegra124_dc_soc_info = { + .supports_border_color = false, .supports_interlacing = true, .supports_cursor = true, .supports_block_linear = true, diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index d4f827593dfa..5f1880766110 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -10,6 +10,9 @@ #include <linux/host1x.h> #include <linux/iommu.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> + #include "drm.h" #include "gem.h" @@ -24,6 +27,92 @@ struct tegra_drm_file { struct list_head contexts; }; +static void tegra_atomic_schedule(struct tegra_drm *tegra, + struct drm_atomic_state *state) +{ + tegra->commit.state = state; + schedule_work(&tegra->commit.work); +} + +static void tegra_atomic_complete(struct tegra_drm *tegra, + struct drm_atomic_state *state) +{ + struct drm_device *drm = tegra->drm; + + /* + * Everything below can be run asynchronously without the need to grab + * any modeset locks at all under one condition: It must be guaranteed + * that the asynchronous work has either been cancelled (if the driver + * supports it, which at least requires that the framebuffers get + * cleaned up with drm_atomic_helper_cleanup_planes()) or completed + * before the new state gets committed on the software side with + * drm_atomic_helper_swap_state(). + * + * This scheme allows new atomic state updates to be prepared and + * checked in parallel to the asynchronous completion of the previous + * update. Which is important since compositors need to figure out the + * composition of the next frame right after having submitted the + * current layout. + */ + + drm_atomic_helper_commit_modeset_disables(drm, state); + drm_atomic_helper_commit_planes(drm, state); + drm_atomic_helper_commit_modeset_enables(drm, state); + + drm_atomic_helper_wait_for_vblanks(drm, state); + + drm_atomic_helper_cleanup_planes(drm, state); + drm_atomic_state_free(state); +} + +static void tegra_atomic_work(struct work_struct *work) +{ + struct tegra_drm *tegra = container_of(work, struct tegra_drm, + commit.work); + + tegra_atomic_complete(tegra, tegra->commit.state); +} + +static int tegra_atomic_commit(struct drm_device *drm, + struct drm_atomic_state *state, bool async) +{ + struct tegra_drm *tegra = drm->dev_private; + int err; + + err = drm_atomic_helper_prepare_planes(drm, state); + if (err) + return err; + + /* serialize outstanding asynchronous commits */ + mutex_lock(&tegra->commit.lock); + flush_work(&tegra->commit.work); + + /* + * This is the point of no return - everything below never fails except + * when the hw goes bonghits. Which means we can commit the new state on + * the software side now. + */ + + drm_atomic_helper_swap_state(drm, state); + + if (async) + tegra_atomic_schedule(tegra, state); + else + tegra_atomic_complete(tegra, state); + + mutex_unlock(&tegra->commit.lock); + return 0; +} + +static const struct drm_mode_config_funcs tegra_drm_mode_funcs = { + .fb_create = tegra_fb_create, +#ifdef CONFIG_DRM_TEGRA_FBDEV + .output_poll_changed = tegra_fb_output_poll_changed, +#endif + .atomic_check = drm_atomic_helper_check, + .atomic_commit = tegra_atomic_commit, +}; + static int tegra_drm_load(struct drm_device *drm, unsigned long flags) { struct host1x_device *device = to_host1x_device(drm->dev); @@ -36,8 +125,8 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) if (iommu_present(&platform_bus_type)) { tegra->domain = iommu_domain_alloc(&platform_bus_type); - if (IS_ERR(tegra->domain)) { - err = PTR_ERR(tegra->domain); + if (!tegra->domain) { + err = -ENOMEM; goto free; } @@ -47,11 +136,23 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) mutex_init(&tegra->clients_lock); INIT_LIST_HEAD(&tegra->clients); + + mutex_init(&tegra->commit.lock); + INIT_WORK(&tegra->commit.work, tegra_atomic_work); + drm->dev_private = tegra; tegra->drm = drm; drm_mode_config_init(drm); + drm->mode_config.min_width = 0; + drm->mode_config.min_height = 0; + + drm->mode_config.max_width = 4096; + drm->mode_config.max_height = 4096; + + drm->mode_config.funcs = &tegra_drm_mode_funcs; + err = tegra_drm_fb_prepare(drm); if (err < 0) goto config; @@ -62,6 +163,8 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) if (err < 0) goto fbdev; + drm_mode_config_reset(drm); + /* * We don't use the drm_irq_install() helpers provided by the DRM * core, so we need to set this manually in order to allow the @@ -106,8 +209,8 @@ static int tegra_drm_unload(struct drm_device *drm) drm_kms_helper_poll_fini(drm); tegra_drm_fb_exit(drm); - drm_vblank_cleanup(drm); drm_mode_config_cleanup(drm); + drm_vblank_cleanup(drm); err = host1x_device_exit(device); if (err < 0) @@ -190,7 +293,7 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest, if (err < 0) return err; - err = get_user(dest->target.offset, &src->cmdbuf.offset); + err = get_user(dest->target.offset, &src->target.offset); if (err < 0) return err; @@ -893,6 +996,30 @@ static int host1x_drm_remove(struct host1x_device *dev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int host1x_drm_suspend(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + + drm_kms_helper_poll_disable(drm); + + return 0; +} + +static int host1x_drm_resume(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + + drm_kms_helper_poll_enable(drm); + + return 0; +} +#endif + +static const struct dev_pm_ops host1x_drm_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(host1x_drm_suspend, host1x_drm_resume) +}; + static const struct of_device_id host1x_drm_subdevs[] = { { .compatible = "nvidia,tegra20-dc", }, { .compatible = "nvidia,tegra20-hdmi", }, @@ -912,7 +1039,10 @@ static const struct of_device_id host1x_drm_subdevs[] = { }; static struct host1x_driver host1x_drm_driver = { - .name = "drm", + .driver = { + .name = "drm", + .pm = &host1x_drm_pm_ops, + }, .probe = host1x_drm_probe, .remove = host1x_drm_remove, .subdevs = host1x_drm_subdevs, diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 3a3b2e7b5b3f..8cb2dfeaa957 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -50,6 +50,12 @@ struct tegra_drm { #endif unsigned int pitch_align; + + struct { + struct drm_atomic_state *state; + struct work_struct work; + struct mutex lock; + } commit; }; struct tegra_drm_client; @@ -164,45 +170,31 @@ struct tegra_dc_window { unsigned int h; } dst; unsigned int bits_per_pixel; - unsigned int format; - unsigned int swap; unsigned int stride[2]; unsigned long base[3]; bool bottom_up; struct tegra_bo_tiling tiling; + u32 format; + u32 swap; }; /* from dc.c */ void tegra_dc_enable_vblank(struct tegra_dc *dc); void tegra_dc_disable_vblank(struct tegra_dc *dc); void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file); - -struct tegra_output_ops { - int (*enable)(struct tegra_output *output); - int (*disable)(struct tegra_output *output); - int (*setup_clock)(struct tegra_output *output, struct clk *clk, - unsigned long pclk, unsigned int *div); - int (*check_mode)(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status); - enum drm_connector_status (*detect)(struct tegra_output *output); -}; - -enum tegra_output_type { - TEGRA_OUTPUT_RGB, - TEGRA_OUTPUT_HDMI, - TEGRA_OUTPUT_DSI, - TEGRA_OUTPUT_EDP, -}; +void tegra_dc_commit(struct tegra_dc *dc); +int tegra_dc_setup_clock(struct tegra_dc *dc, struct clk *parent, + unsigned long pclk, unsigned int div); +int tegra_dc_state_setup_clock(struct tegra_dc *dc, + struct drm_crtc_state *crtc_state, + struct clk *clk, unsigned long pclk, + unsigned int div); struct tegra_output { struct device_node *of_node; struct device *dev; - const struct tegra_output_ops *ops; - enum tegra_output_type type; - struct drm_panel *panel; struct i2c_adapter *ddc; const struct edid *edid; @@ -223,42 +215,6 @@ static inline struct tegra_output *connector_to_output(struct drm_connector *c) return container_of(c, struct tegra_output, connector); } -static inline int tegra_output_enable(struct tegra_output *output) -{ - if (output && output->ops && output->ops->enable) - return output->ops->enable(output); - - return output ? -ENOSYS : -EINVAL; -} - -static inline int tegra_output_disable(struct tegra_output *output) -{ - if (output && output->ops && output->ops->disable) - return output->ops->disable(output); - - return output ? -ENOSYS : -EINVAL; -} - -static inline int tegra_output_setup_clock(struct tegra_output *output, - struct clk *clk, unsigned long pclk, - unsigned int *div) -{ - if (output && output->ops && output->ops->setup_clock) - return output->ops->setup_clock(output, clk, pclk, div); - - return output ? -ENOSYS : -EINVAL; -} - -static inline int tegra_output_check_mode(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status) -{ - if (output && output->ops && output->ops->check_mode) - return output->ops->check_mode(output, mode, status); - - return output ? -ENOSYS : -EINVAL; -} - /* from rgb.c */ int tegra_dc_rgb_probe(struct tegra_dc *dc); int tegra_dc_rgb_remove(struct tegra_dc *dc); @@ -267,9 +223,18 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc); /* from output.c */ int tegra_output_probe(struct tegra_output *output); -int tegra_output_remove(struct tegra_output *output); +void tegra_output_remove(struct tegra_output *output); int tegra_output_init(struct drm_device *drm, struct tegra_output *output); -int tegra_output_exit(struct tegra_output *output); +void tegra_output_exit(struct tegra_output *output); + +int tegra_output_connector_get_modes(struct drm_connector *connector); +struct drm_encoder * +tegra_output_connector_best_encoder(struct drm_connector *connector); +enum drm_connector_status +tegra_output_connector_detect(struct drm_connector *connector, bool force); +void tegra_output_connector_destroy(struct drm_connector *connector); + +void tegra_output_encoder_destroy(struct drm_encoder *encoder); /* from dpaux.c */ struct tegra_dpaux; @@ -291,12 +256,16 @@ struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer, bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer); int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer, struct tegra_bo_tiling *tiling); +struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, + struct drm_file *file, + struct drm_mode_fb_cmd2 *cmd); int tegra_drm_fb_prepare(struct drm_device *drm); void tegra_drm_fb_free(struct drm_device *drm); int tegra_drm_fb_init(struct drm_device *drm); void tegra_drm_fb_exit(struct drm_device *drm); #ifdef CONFIG_DRM_TEGRA_FBDEV void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev); +void tegra_fb_output_poll_changed(struct drm_device *drm); #endif extern struct platform_driver tegra_dc_driver; diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index 33f67fd601c6..ed970f622903 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -17,6 +17,7 @@ #include <linux/regulator/consumer.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_panel.h> @@ -27,6 +28,28 @@ #include "dsi.h" #include "mipi-phy.h" +struct tegra_dsi_state { + struct drm_connector_state base; + + struct mipi_dphy_timing timing; + unsigned long period; + + unsigned int vrefresh; + unsigned int lanes; + unsigned long pclk; + unsigned long bclk; + + enum tegra_dsi_format format; + unsigned int mul; + unsigned int div; +}; + +static inline struct tegra_dsi_state * +to_dsi_state(struct drm_connector_state *state) +{ + return container_of(state, struct tegra_dsi_state, base); +} + struct tegra_dsi { struct host1x_client client; struct tegra_output output; @@ -51,7 +74,6 @@ struct tegra_dsi { struct mipi_dsi_host host; struct regulator *vdd; - bool enabled; unsigned int video_fifo_depth; unsigned int host_fifo_depth; @@ -77,13 +99,17 @@ static inline struct tegra_dsi *to_dsi(struct tegra_output *output) return container_of(output, struct tegra_dsi, output); } -static inline unsigned long tegra_dsi_readl(struct tegra_dsi *dsi, - unsigned long reg) +static struct tegra_dsi_state *tegra_dsi_get_state(struct tegra_dsi *dsi) +{ + return to_dsi_state(dsi->output.connector.state); +} + +static inline u32 tegra_dsi_readl(struct tegra_dsi *dsi, unsigned long reg) { return readl(dsi->regs + (reg << 2)); } -static inline void tegra_dsi_writel(struct tegra_dsi *dsi, unsigned long value, +static inline void tegra_dsi_writel(struct tegra_dsi *dsi, u32 value, unsigned long reg) { writel(value, dsi->regs + (reg << 2)); @@ -95,7 +121,7 @@ static int tegra_dsi_show_regs(struct seq_file *s, void *data) struct tegra_dsi *dsi = node->info_ent->data; #define DUMP_REG(name) \ - seq_printf(s, "%-32s %#05x %08lx\n", #name, name, \ + seq_printf(s, "%-32s %#05x %08x\n", #name, name, \ tegra_dsi_readl(dsi, name)) DUMP_REG(DSI_INCR_SYNCPT); @@ -230,7 +256,7 @@ remove: return err; } -static int tegra_dsi_debugfs_exit(struct tegra_dsi *dsi) +static void tegra_dsi_debugfs_exit(struct tegra_dsi *dsi) { drm_debugfs_remove_files(dsi->debugfs_files, ARRAY_SIZE(debugfs_files), dsi->minor); @@ -241,8 +267,6 @@ static int tegra_dsi_debugfs_exit(struct tegra_dsi *dsi) debugfs_remove(dsi->debugfs); dsi->debugfs = NULL; - - return 0; } #define PKT_ID0(id) ((((id) & 0x3f) << 3) | (1 << 9)) @@ -338,61 +362,36 @@ static const u32 pkt_seq_command_mode[NUM_PKT_SEQ] = { [11] = 0, }; -static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi) +static void tegra_dsi_set_phy_timing(struct tegra_dsi *dsi, + unsigned long period, + const struct mipi_dphy_timing *timing) { - struct mipi_dphy_timing timing; - unsigned long value, period; - long rate; - int err; - - rate = clk_get_rate(dsi->clk); - if (rate < 0) - return rate; - - period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, rate * 2); - - err = mipi_dphy_timing_get_default(&timing, period); - if (err < 0) - return err; - - err = mipi_dphy_timing_validate(&timing, period); - if (err < 0) { - dev_err(dsi->dev, "failed to validate D-PHY timing: %d\n", err); - return err; - } - - /* - * The D-PHY timing fields below are expressed in byte-clock cycles, - * so multiply the period by 8. - */ - period *= 8; + u32 value; - value = DSI_TIMING_FIELD(timing.hsexit, period, 1) << 24 | - DSI_TIMING_FIELD(timing.hstrail, period, 0) << 16 | - DSI_TIMING_FIELD(timing.hszero, period, 3) << 8 | - DSI_TIMING_FIELD(timing.hsprepare, period, 1); + value = DSI_TIMING_FIELD(timing->hsexit, period, 1) << 24 | + DSI_TIMING_FIELD(timing->hstrail, period, 0) << 16 | + DSI_TIMING_FIELD(timing->hszero, period, 3) << 8 | + DSI_TIMING_FIELD(timing->hsprepare, period, 1); tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_0); - value = DSI_TIMING_FIELD(timing.clktrail, period, 1) << 24 | - DSI_TIMING_FIELD(timing.clkpost, period, 1) << 16 | - DSI_TIMING_FIELD(timing.clkzero, period, 1) << 8 | - DSI_TIMING_FIELD(timing.lpx, period, 1); + value = DSI_TIMING_FIELD(timing->clktrail, period, 1) << 24 | + DSI_TIMING_FIELD(timing->clkpost, period, 1) << 16 | + DSI_TIMING_FIELD(timing->clkzero, period, 1) << 8 | + DSI_TIMING_FIELD(timing->lpx, period, 1); tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_1); - value = DSI_TIMING_FIELD(timing.clkprepare, period, 1) << 16 | - DSI_TIMING_FIELD(timing.clkpre, period, 1) << 8 | + value = DSI_TIMING_FIELD(timing->clkprepare, period, 1) << 16 | + DSI_TIMING_FIELD(timing->clkpre, period, 1) << 8 | DSI_TIMING_FIELD(0xff * period, period, 0) << 0; tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_2); - value = DSI_TIMING_FIELD(timing.taget, period, 1) << 16 | - DSI_TIMING_FIELD(timing.tasure, period, 1) << 8 | - DSI_TIMING_FIELD(timing.tago, period, 1); + value = DSI_TIMING_FIELD(timing->taget, period, 1) << 16 | + DSI_TIMING_FIELD(timing->tasure, period, 1) << 8 | + DSI_TIMING_FIELD(timing->tago, period, 1); tegra_dsi_writel(dsi, value, DSI_BTA_TIMING); if (dsi->slave) - return tegra_dsi_set_phy_timing(dsi->slave); - - return 0; + tegra_dsi_set_phy_timing(dsi->slave, period, timing); } static int tegra_dsi_get_muldiv(enum mipi_dsi_pixel_format format, @@ -484,14 +483,22 @@ static unsigned int tegra_dsi_get_lanes(struct tegra_dsi *dsi) return dsi->lanes; } -static int tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, - const struct drm_display_mode *mode) +static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, + const struct drm_display_mode *mode) { unsigned int hact, hsw, hbp, hfp, i, mul, div; - enum tegra_dsi_format format; + struct tegra_dsi_state *state; const u32 *pkt_seq; u32 value; - int err; + + /* XXX: pass in state into this function? */ + if (dsi->master) + state = tegra_dsi_get_state(dsi->master); + else + state = tegra_dsi_get_state(dsi); + + mul = state->mul; + div = state->div; if (dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) { DRM_DEBUG_KMS("Non-burst video mode with sync pulses\n"); @@ -504,15 +511,8 @@ static int tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, pkt_seq = pkt_seq_command_mode; } - err = tegra_dsi_get_muldiv(dsi->format, &mul, &div); - if (err < 0) - return err; - - err = tegra_dsi_get_format(dsi->format, &format); - if (err < 0) - return err; - - value = DSI_CONTROL_CHANNEL(0) | DSI_CONTROL_FORMAT(format) | + value = DSI_CONTROL_CHANNEL(0) | + DSI_CONTROL_FORMAT(state->format) | DSI_CONTROL_LANES(dsi->lanes - 1) | DSI_CONTROL_SOURCE(pipe); tegra_dsi_writel(dsi, value, DSI_CONTROL); @@ -591,8 +591,8 @@ static int tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, /* set SOL delay */ if (dsi->master || dsi->slave) { - unsigned int lanes = tegra_dsi_get_lanes(dsi); unsigned long delay, bclk, bclk_ganged; + unsigned int lanes = state->lanes; /* SOL to valid, valid to FIFO and FIFO write delay */ delay = 4 + 4 + 2; @@ -612,9 +612,7 @@ static int tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, } if (dsi->slave) { - err = tegra_dsi_configure(dsi->slave, pipe, mode); - if (err < 0) - return err; + tegra_dsi_configure(dsi->slave, pipe, mode); /* * TODO: Support modes other than symmetrical left-right @@ -624,49 +622,6 @@ static int tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, tegra_dsi_ganged_enable(dsi->slave, mode->hdisplay / 2, mode->hdisplay / 2); } - - return 0; -} - -static int tegra_output_dsi_enable(struct tegra_output *output) -{ - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); - const struct drm_display_mode *mode = &dc->base.mode; - struct tegra_dsi *dsi = to_dsi(output); - u32 value; - int err; - - if (dsi->enabled) - return 0; - - err = tegra_dsi_configure(dsi, dc->pipe, mode); - if (err < 0) - return err; - - /* enable display controller */ - value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); - value |= DSI_ENABLE; - tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); - value &= ~DISP_CTRL_MODE_MASK; - value |= DISP_CTRL_MODE_C_DISPLAY; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); - - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); - value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - - tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); - - /* enable DSI controller */ - tegra_dsi_enable(dsi); - - dsi->enabled = true; - - return 0; } static int tegra_dsi_wait_idle(struct tegra_dsi *dsi, unsigned long timeout) @@ -705,6 +660,29 @@ static void tegra_dsi_ganged_disable(struct tegra_dsi *dsi) tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL); } +static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk, + unsigned int vrefresh) +{ + unsigned int timeout; + u32 value; + + /* one frame high-speed transmission timeout */ + timeout = (bclk / vrefresh) / 512; + value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout); + tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0); + + /* 2 ms peripheral timeout for panel */ + timeout = 2 * bclk / 512 * 1000; + value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000); + tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1); + + value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0); + tegra_dsi_writel(dsi, value, DSI_TO_TALLY); + + if (dsi->slave) + tegra_dsi_set_timeout(dsi->slave, bclk, vrefresh); +} + static void tegra_dsi_disable(struct tegra_dsi *dsi) { u32 value; @@ -724,15 +702,149 @@ static void tegra_dsi_disable(struct tegra_dsi *dsi) usleep_range(5000, 10000); } -static int tegra_output_dsi_disable(struct tegra_output *output) +static void tegra_dsi_soft_reset(struct tegra_dsi *dsi) +{ + u32 value; + + value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL); + value &= ~DSI_POWER_CONTROL_ENABLE; + tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL); + + usleep_range(300, 1000); + + value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL); + value |= DSI_POWER_CONTROL_ENABLE; + tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL); + + usleep_range(300, 1000); + + value = tegra_dsi_readl(dsi, DSI_TRIGGER); + if (value) + tegra_dsi_writel(dsi, 0, DSI_TRIGGER); + + if (dsi->slave) + tegra_dsi_soft_reset(dsi->slave); +} + +static void tegra_dsi_connector_dpms(struct drm_connector *connector, int mode) +{ +} + +static void tegra_dsi_connector_reset(struct drm_connector *connector) +{ + struct tegra_dsi_state *state; + + kfree(connector->state); + connector->state = NULL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state) + connector->state = &state->base; +} + +static struct drm_connector_state * +tegra_dsi_connector_duplicate_state(struct drm_connector *connector) +{ + struct tegra_dsi_state *state = to_dsi_state(connector->state); + struct tegra_dsi_state *copy; + + copy = kmemdup(state, sizeof(*state), GFP_KERNEL); + if (!copy) + return NULL; + + return ©->base; +} + +static const struct drm_connector_funcs tegra_dsi_connector_funcs = { + .dpms = tegra_dsi_connector_dpms, + .reset = tegra_dsi_connector_reset, + .detect = tegra_output_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = tegra_output_connector_destroy, + .atomic_duplicate_state = tegra_dsi_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static enum drm_mode_status +tegra_dsi_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs = { + .get_modes = tegra_output_connector_get_modes, + .mode_valid = tegra_dsi_connector_mode_valid, + .best_encoder = tegra_output_connector_best_encoder, +}; + +static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = { + .destroy = tegra_output_encoder_destroy, +}; + +static void tegra_dsi_encoder_dpms(struct drm_encoder *encoder, int mode) +{ +} + +static void tegra_dsi_encoder_prepare(struct drm_encoder *encoder) +{ +} + +static void tegra_dsi_encoder_commit(struct drm_encoder *encoder) { - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); +} + +static void tegra_dsi_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + struct tegra_dsi *dsi = to_dsi(output); + struct tegra_dsi_state *state; + u32 value; + + state = tegra_dsi_get_state(dsi); + + tegra_dsi_set_timeout(dsi, state->bclk, state->vrefresh); + + /* + * The D-PHY timing fields are expressed in byte-clock cycles, so + * multiply the period by 8. + */ + tegra_dsi_set_phy_timing(dsi, state->period * 8, &state->timing); + + if (output->panel) + drm_panel_prepare(output->panel); + + tegra_dsi_configure(dsi, dc->pipe, mode); + + /* enable display controller */ + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value |= DSI_ENABLE; + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + + tegra_dc_commit(dc); + + /* enable DSI controller */ + tegra_dsi_enable(dsi); + + if (output->panel) + drm_panel_enable(output->panel); + + return; +} + +static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); struct tegra_dsi *dsi = to_dsi(output); - unsigned long value; + u32 value; int err; - if (!dsi->enabled) - return 0; + if (output->panel) + drm_panel_disable(output->panel); tegra_dsi_video_disable(dsi); @@ -741,85 +853,78 @@ static int tegra_output_dsi_disable(struct tegra_output *output) * sure it's only executed when the output is attached to one. */ if (dc) { - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); - value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); - value &= ~DISP_CTRL_MODE_MASK; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); - value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); value &= ~DSI_ENABLE; tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); + tegra_dc_commit(dc); } err = tegra_dsi_wait_idle(dsi, 100); if (err < 0) dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err); - tegra_dsi_disable(dsi); + tegra_dsi_soft_reset(dsi); - dsi->enabled = false; + if (output->panel) + drm_panel_unprepare(output->panel); - return 0; -} - -static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk, - unsigned int vrefresh) -{ - unsigned int timeout; - u32 value; - - /* one frame high-speed transmission timeout */ - timeout = (bclk / vrefresh) / 512; - value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout); - tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0); - - /* 2 ms peripheral timeout for panel */ - timeout = 2 * bclk / 512 * 1000; - value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000); - tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1); - - value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0); - tegra_dsi_writel(dsi, value, DSI_TO_TALLY); + tegra_dsi_disable(dsi); - if (dsi->slave) - tegra_dsi_set_timeout(dsi->slave, bclk, vrefresh); + return; } -static int tegra_output_dsi_setup_clock(struct tegra_output *output, - struct clk *clk, unsigned long pclk, - unsigned int *divp) +static int +tegra_dsi_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); - struct drm_display_mode *mode = &dc->base.mode; + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dsi_state *state = to_dsi_state(conn_state); + struct tegra_dc *dc = to_tegra_dc(conn_state->crtc); struct tegra_dsi *dsi = to_dsi(output); - unsigned int mul, div, vrefresh, lanes; - unsigned long bclk, plld; + unsigned int scdiv; + unsigned long plld; int err; - lanes = tegra_dsi_get_lanes(dsi); + state->pclk = crtc_state->mode.clock * 1000; + + err = tegra_dsi_get_muldiv(dsi->format, &state->mul, &state->div); + if (err < 0) + return err; + + state->lanes = tegra_dsi_get_lanes(dsi); - err = tegra_dsi_get_muldiv(dsi->format, &mul, &div); + err = tegra_dsi_get_format(dsi->format, &state->format); if (err < 0) return err; - DRM_DEBUG_KMS("mul: %u, div: %u, lanes: %u\n", mul, div, lanes); - vrefresh = drm_mode_vrefresh(mode); - DRM_DEBUG_KMS("vrefresh: %u\n", vrefresh); + state->vrefresh = drm_mode_vrefresh(&crtc_state->mode); /* compute byte clock */ - bclk = (pclk * mul) / (div * lanes); + state->bclk = (state->pclk * state->mul) / (state->div * state->lanes); + + DRM_DEBUG_KMS("mul: %u, div: %u, lanes: %u\n", state->mul, state->div, + state->lanes); + DRM_DEBUG_KMS("format: %u, vrefresh: %u\n", state->format, + state->vrefresh); + DRM_DEBUG_KMS("bclk: %lu\n", state->bclk); /* * Compute bit clock and round up to the next MHz. */ - plld = DIV_ROUND_UP(bclk * 8, USEC_PER_SEC) * USEC_PER_SEC; + plld = DIV_ROUND_UP(state->bclk * 8, USEC_PER_SEC) * USEC_PER_SEC; + state->period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, plld); + + err = mipi_dphy_timing_get_default(&state->timing, state->period); + if (err < 0) + return err; + + err = mipi_dphy_timing_validate(&state->timing, state->period); + if (err < 0) { + dev_err(dsi->dev, "failed to validate D-PHY timing: %d\n", err); + return err; + } /* * We divide the frequency by two here, but we make up for that by @@ -828,19 +933,6 @@ static int tegra_output_dsi_setup_clock(struct tegra_output *output, */ plld /= 2; - err = clk_set_parent(clk, dsi->clk_parent); - if (err < 0) { - dev_err(dsi->dev, "failed to set parent clock: %d\n", err); - return err; - } - - err = clk_set_rate(dsi->clk_parent, plld); - if (err < 0) { - dev_err(dsi->dev, "failed to set base clock rate to %lu Hz\n", - plld); - return err; - } - /* * Derive pixel clock from bit clock using the shift clock divider. * Note that this is only half of what we would expect, but we need @@ -851,44 +943,30 @@ static int tegra_output_dsi_setup_clock(struct tegra_output *output, * not working properly otherwise. Perhaps the PLLs cannot generate * frequencies sufficiently high. */ - *divp = ((8 * mul) / (div * lanes)) - 2; + scdiv = ((8 * state->mul) / (state->div * state->lanes)) - 2; - /* - * XXX: Move the below somewhere else so that we don't need to have - * access to the vrefresh in this function? - */ - tegra_dsi_set_timeout(dsi, bclk, vrefresh); - - err = tegra_dsi_set_phy_timing(dsi); - if (err < 0) + err = tegra_dc_state_setup_clock(dc, crtc_state, dsi->clk_parent, + plld, scdiv); + if (err < 0) { + dev_err(output->dev, "failed to setup CRTC state: %d\n", err); return err; + } - return 0; -} - -static int tegra_output_dsi_check_mode(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status) -{ - /* - * FIXME: For now, always assume that the mode is okay. - */ - - *status = MODE_OK; - - return 0; + return err; } -static const struct tegra_output_ops dsi_ops = { - .enable = tegra_output_dsi_enable, - .disable = tegra_output_dsi_disable, - .setup_clock = tegra_output_dsi_setup_clock, - .check_mode = tegra_output_dsi_check_mode, +static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = { + .dpms = tegra_dsi_encoder_dpms, + .prepare = tegra_dsi_encoder_prepare, + .commit = tegra_dsi_encoder_commit, + .mode_set = tegra_dsi_encoder_mode_set, + .disable = tegra_dsi_encoder_disable, + .atomic_check = tegra_dsi_encoder_atomic_check, }; static int tegra_dsi_pad_enable(struct tegra_dsi *dsi) { - unsigned long value; + u32 value; value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0); tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0); @@ -923,17 +1001,44 @@ static int tegra_dsi_init(struct host1x_client *client) struct tegra_dsi *dsi = host1x_client_to_dsi(client); int err; + reset_control_deassert(dsi->rst); + + err = tegra_dsi_pad_calibrate(dsi); + if (err < 0) { + dev_err(dsi->dev, "MIPI calibration failed: %d\n", err); + goto reset; + } + /* Gangsters must not register their own outputs. */ if (!dsi->master) { - dsi->output.type = TEGRA_OUTPUT_DSI; dsi->output.dev = client->dev; - dsi->output.ops = &dsi_ops; + + drm_connector_init(drm, &dsi->output.connector, + &tegra_dsi_connector_funcs, + DRM_MODE_CONNECTOR_DSI); + drm_connector_helper_add(&dsi->output.connector, + &tegra_dsi_connector_helper_funcs); + dsi->output.connector.dpms = DRM_MODE_DPMS_OFF; + + drm_encoder_init(drm, &dsi->output.encoder, + &tegra_dsi_encoder_funcs, + DRM_MODE_ENCODER_DSI); + drm_encoder_helper_add(&dsi->output.encoder, + &tegra_dsi_encoder_helper_funcs); + + drm_mode_connector_attach_encoder(&dsi->output.connector, + &dsi->output.encoder); + drm_connector_register(&dsi->output.connector); err = tegra_output_init(drm, &dsi->output); if (err < 0) { - dev_err(client->dev, "output setup failed: %d\n", err); - return err; + dev_err(client->dev, + "failed to initialize output: %d\n", + err); + goto reset; } + + dsi->output.encoder.possible_crtcs = 0x3; } if (IS_ENABLED(CONFIG_DEBUG_FS)) { @@ -943,34 +1048,22 @@ static int tegra_dsi_init(struct host1x_client *client) } return 0; + +reset: + reset_control_assert(dsi->rst); + return err; } static int tegra_dsi_exit(struct host1x_client *client) { struct tegra_dsi *dsi = host1x_client_to_dsi(client); - int err; - if (IS_ENABLED(CONFIG_DEBUG_FS)) { - err = tegra_dsi_debugfs_exit(dsi); - if (err < 0) - dev_err(dsi->dev, "debugfs cleanup failed: %d\n", err); - } + tegra_output_exit(&dsi->output); - if (!dsi->master) { - err = tegra_output_disable(&dsi->output); - if (err < 0) { - dev_err(client->dev, "output failed to disable: %d\n", - err); - return err; - } + if (IS_ENABLED(CONFIG_DEBUG_FS)) + tegra_dsi_debugfs_exit(dsi); - err = tegra_output_exit(&dsi->output); - if (err < 0) { - dev_err(client->dev, "output cleanup failed: %d\n", - err); - return err; - } - } + reset_control_assert(dsi->rst); return 0; } @@ -1398,13 +1491,6 @@ static int tegra_dsi_probe(struct platform_device *pdev) if (IS_ERR(dsi->rst)) return PTR_ERR(dsi->rst); - err = reset_control_deassert(dsi->rst); - if (err < 0) { - dev_err(&pdev->dev, "failed to bring DSI out of reset: %d\n", - err); - return err; - } - dsi->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(dsi->clk)) { dev_err(&pdev->dev, "cannot get DSI clock\n"); @@ -1470,12 +1556,6 @@ static int tegra_dsi_probe(struct platform_device *pdev) goto disable_vdd; } - err = tegra_dsi_pad_calibrate(dsi); - if (err < 0) { - dev_err(dsi->dev, "MIPI calibration failed: %d\n", err); - goto mipi_free; - } - dsi->host.ops = &tegra_dsi_host_ops; dsi->host.dev = &pdev->dev; @@ -1527,6 +1607,8 @@ static int tegra_dsi_remove(struct platform_device *pdev) return err; } + tegra_output_remove(&dsi->output); + mipi_dsi_host_unregister(&dsi->host); tegra_mipi_free(dsi->mipi); @@ -1535,12 +1617,6 @@ static int tegra_dsi_remove(struct platform_device *pdev) clk_disable_unprepare(dsi->clk); reset_control_assert(dsi->rst); - err = tegra_output_remove(&dsi->output); - if (err < 0) { - dev_err(&pdev->dev, "failed to remove output: %d\n", err); - return err; - } - return 0; } diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index e9c715d89261..397fb34d5d5b 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -129,9 +129,9 @@ static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm, return fb; } -static struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, - struct drm_file *file, - struct drm_mode_fb_cmd2 *cmd) +struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, + struct drm_file *file, + struct drm_mode_fb_cmd2 *cmd) { unsigned int hsub, vsub, i; struct tegra_bo *planes[4]; @@ -377,7 +377,7 @@ void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev) drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev->base); } -static void tegra_fb_output_poll_changed(struct drm_device *drm) +void tegra_fb_output_poll_changed(struct drm_device *drm) { struct tegra_drm *tegra = drm->dev_private; @@ -386,28 +386,11 @@ static void tegra_fb_output_poll_changed(struct drm_device *drm) } #endif -static const struct drm_mode_config_funcs tegra_drm_mode_funcs = { - .fb_create = tegra_fb_create, -#ifdef CONFIG_DRM_TEGRA_FBDEV - .output_poll_changed = tegra_fb_output_poll_changed, -#endif -}; - int tegra_drm_fb_prepare(struct drm_device *drm) { #ifdef CONFIG_DRM_TEGRA_FBDEV struct tegra_drm *tegra = drm->dev_private; -#endif - drm->mode_config.min_width = 0; - drm->mode_config.min_height = 0; - - drm->mode_config.max_width = 4096; - drm->mode_config.max_height = 4096; - - drm->mode_config.funcs = &tegra_drm_mode_funcs; - -#ifdef CONFIG_DRM_TEGRA_FBDEV tegra->fbdev = tegra_fbdev_create(drm); if (IS_ERR(tegra->fbdev)) return PTR_ERR(tegra->fbdev); diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index 8777b7f75791..cfb481943b6b 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -92,36 +92,6 @@ static const struct host1x_bo_ops tegra_bo_ops = { .kunmap = tegra_bo_kunmap, }; -/* - * A generic iommu_map_sg() function is being reviewed and will hopefully be - * merged soon. At that point this function can be dropped in favour of the - * one provided by the IOMMU API. - */ -static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, - struct scatterlist *sg, unsigned int nents, - int prot) -{ - struct scatterlist *s; - size_t offset = 0; - unsigned int i; - int err; - - for_each_sg(sg, s, nents, i) { - phys_addr_t phys = page_to_phys(sg_page(s)); - size_t length = s->offset + s->length; - - err = iommu_map(domain, iova + offset, phys, length, prot); - if (err < 0) { - iommu_unmap(domain, iova, offset); - return err; - } - - offset += length; - } - - return offset; -} - static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) { int prot = IOMMU_READ | IOMMU_WRITE; @@ -144,8 +114,8 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) bo->paddr = bo->mm->start; - err = __iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl, - bo->sgt->nents, prot); + err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl, + bo->sgt->nents, prot); if (err < 0) { dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err); goto remove; @@ -244,10 +214,8 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) for_each_sg(sgt->sgl, s, sgt->nents, i) sg_dma_address(s) = sg_phys(s); - if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) { - sgt = ERR_PTR(-ENOMEM); + if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) goto release_sgt; - } bo->sgt = sgt; @@ -256,6 +224,7 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) release_sgt: sg_free_table(sgt); kfree(sgt); + sgt = ERR_PTR(-ENOMEM); put_pages: drm_gem_put_pages(&bo->gem, bo->pages, false, false); return PTR_ERR(sgt); diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index ffe26547328d..7e06657ae58b 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -9,10 +9,15 @@ #include <linux/clk.h> #include <linux/debugfs.h> +#include <linux/gpio.h> #include <linux/hdmi.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> + #include "hdmi.h" #include "drm.h" #include "dc.h" @@ -31,7 +36,7 @@ struct tegra_hdmi_config { unsigned int num_tmds; unsigned long fuse_override_offset; - unsigned long fuse_override_value; + u32 fuse_override_value; bool has_sor_io_peak_current; }; @@ -40,7 +45,6 @@ struct tegra_hdmi { struct host1x_client client; struct tegra_output output; struct device *dev; - bool enabled; struct regulator *hdmi; struct regulator *pll; @@ -85,16 +89,16 @@ enum { HDA, }; -static inline unsigned long tegra_hdmi_readl(struct tegra_hdmi *hdmi, - unsigned long reg) +static inline u32 tegra_hdmi_readl(struct tegra_hdmi *hdmi, + unsigned long offset) { - return readl(hdmi->regs + (reg << 2)); + return readl(hdmi->regs + (offset << 2)); } -static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, unsigned long val, - unsigned long reg) +static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, u32 value, + unsigned long offset) { - writel(val, hdmi->regs + (reg << 2)); + writel(value, hdmi->regs + (offset << 2)); } struct tegra_hdmi_audio_config { @@ -455,8 +459,8 @@ static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi) for (i = 0; i < ARRAY_SIZE(freqs); i++) { unsigned int f = freqs[i]; unsigned int eight_half; - unsigned long value; unsigned int delta; + u32 value; if (f > 96000) delta = 2; @@ -477,7 +481,7 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk) struct device_node *node = hdmi->dev->of_node; const struct tegra_hdmi_audio_config *config; unsigned int offset = 0; - unsigned long value; + u32 value; switch (hdmi->audio_source) { case HDA: @@ -571,9 +575,9 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk) return 0; } -static inline unsigned long tegra_hdmi_subpack(const u8 *ptr, size_t size) +static inline u32 tegra_hdmi_subpack(const u8 *ptr, size_t size) { - unsigned long value = 0; + u32 value = 0; size_t i; for (i = size; i > 0; i--) @@ -587,8 +591,8 @@ static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data, { const u8 *ptr = data; unsigned long offset; - unsigned long value; size_t i, j; + u32 value; switch (ptr[0]) { case HDMI_INFOFRAME_TYPE_AVI: @@ -707,9 +711,9 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi) static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi) { struct hdmi_vendor_infoframe frame; - unsigned long value; u8 buffer[10]; ssize_t err; + u32 value; if (!hdmi->stereo) { value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); @@ -738,7 +742,7 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi) static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi, const struct tmds_config *tmds) { - unsigned long value; + u32 value; tegra_hdmi_writel(hdmi, tmds->pll0, HDMI_NV_PDISP_SOR_PLL0); tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1); @@ -768,21 +772,78 @@ static bool tegra_output_is_hdmi(struct tegra_output *output) return drm_detect_hdmi_monitor(edid); } -static int tegra_output_hdmi_enable(struct tegra_output *output) +static void tegra_hdmi_connector_dpms(struct drm_connector *connector, + int mode) +{ +} + +static const struct drm_connector_funcs tegra_hdmi_connector_funcs = { + .dpms = tegra_hdmi_connector_dpms, + .reset = drm_atomic_helper_connector_reset, + .detect = tegra_output_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = tegra_output_connector_destroy, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static enum drm_mode_status +tegra_hdmi_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct tegra_output *output = connector_to_output(connector); + struct tegra_hdmi *hdmi = to_hdmi(output); + unsigned long pclk = mode->clock * 1000; + enum drm_mode_status status = MODE_OK; + struct clk *parent; + long err; + + parent = clk_get_parent(hdmi->clk_parent); + + err = clk_round_rate(parent, pclk * 4); + if (err <= 0) + status = MODE_NOCLOCK; + + return status; +} + +static const struct drm_connector_helper_funcs +tegra_hdmi_connector_helper_funcs = { + .get_modes = tegra_output_connector_get_modes, + .mode_valid = tegra_hdmi_connector_mode_valid, + .best_encoder = tegra_output_connector_best_encoder, +}; + +static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = { + .destroy = tegra_output_encoder_destroy, +}; + +static void tegra_hdmi_encoder_dpms(struct drm_encoder *encoder, int mode) +{ +} + +static void tegra_hdmi_encoder_prepare(struct drm_encoder *encoder) +{ +} + +static void tegra_hdmi_encoder_commit(struct drm_encoder *encoder) +{ +} + +static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) { unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey; - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); - struct drm_display_mode *mode = &dc->base.mode; + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + struct device_node *node = output->dev->of_node; struct tegra_hdmi *hdmi = to_hdmi(output); - struct device_node *node = hdmi->dev->of_node; unsigned int pulse_start, div82, pclk; - unsigned long value; int retries = 1000; + u32 value; int err; - if (hdmi->enabled) - return 0; - hdmi->dvi = !tegra_output_is_hdmi(output); pclk = mode->clock * 1000; @@ -790,32 +851,6 @@ static int tegra_output_hdmi_enable(struct tegra_output *output) h_back_porch = mode->htotal - mode->hsync_end; h_front_porch = mode->hsync_start - mode->hdisplay; - err = regulator_enable(hdmi->pll); - if (err < 0) { - dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err); - return err; - } - - err = regulator_enable(hdmi->vdd); - if (err < 0) { - dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err); - return err; - } - - err = clk_set_rate(hdmi->clk, pclk); - if (err < 0) - return err; - - err = clk_prepare_enable(hdmi->clk); - if (err < 0) { - dev_err(hdmi->dev, "failed to enable clock: %d\n", err); - return err; - } - - reset_control_assert(hdmi->rst); - usleep_range(1000, 2000); - reset_control_deassert(hdmi->rst); - /* power up sequence */ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0); value &= ~SOR_PLL_PDBG; @@ -987,123 +1022,57 @@ static int tegra_output_hdmi_enable(struct tegra_output *output) value |= HDMI_ENABLE; tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); - value &= ~DISP_CTRL_MODE_MASK; - value |= DISP_CTRL_MODE_C_DISPLAY; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); - - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); - value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - - tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); + tegra_dc_commit(dc); /* TODO: add HDCP support */ - - hdmi->enabled = true; - - return 0; } -static int tegra_output_hdmi_disable(struct tegra_output *output) +static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder) { - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); - struct tegra_hdmi *hdmi = to_hdmi(output); - unsigned long value; - - if (!hdmi->enabled) - return 0; + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + u32 value; /* * The following accesses registers of the display controller, so make * sure it's only executed when the output is attached to one. */ if (dc) { - /* - * XXX: We can't do this here because it causes HDMI to go - * into an erroneous state with the result that HDMI won't - * properly work once disabled. See also a similar symptom - * for the SOR output. - */ - /* - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); - value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - */ - - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); - value &= ~DISP_CTRL_MODE_MASK; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); - value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); value &= ~HDMI_ENABLE; tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); + tegra_dc_commit(dc); } - - clk_disable_unprepare(hdmi->clk); - reset_control_assert(hdmi->rst); - regulator_disable(hdmi->vdd); - regulator_disable(hdmi->pll); - - hdmi->enabled = false; - - return 0; } -static int tegra_output_hdmi_setup_clock(struct tegra_output *output, - struct clk *clk, unsigned long pclk, - unsigned int *div) +static int +tegra_hdmi_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(conn_state->crtc); + unsigned long pclk = crtc_state->mode.clock * 1000; struct tegra_hdmi *hdmi = to_hdmi(output); int err; - err = clk_set_parent(clk, hdmi->clk_parent); + err = tegra_dc_state_setup_clock(dc, crtc_state, hdmi->clk_parent, + pclk, 0); if (err < 0) { - dev_err(output->dev, "failed to set parent: %d\n", err); + dev_err(output->dev, "failed to setup CRTC state: %d\n", err); return err; } - err = clk_set_rate(hdmi->clk_parent, pclk); - if (err < 0) - dev_err(output->dev, "failed to set clock rate to %lu Hz\n", - pclk); - - *div = 0; - - return 0; -} - -static int tegra_output_hdmi_check_mode(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status) -{ - struct tegra_hdmi *hdmi = to_hdmi(output); - unsigned long pclk = mode->clock * 1000; - struct clk *parent; - long err; - - parent = clk_get_parent(hdmi->clk_parent); - - err = clk_round_rate(parent, pclk * 4); - if (err <= 0) - *status = MODE_NOCLOCK; - else - *status = MODE_OK; - - return 0; + return err; } -static const struct tegra_output_ops hdmi_ops = { - .enable = tegra_output_hdmi_enable, - .disable = tegra_output_hdmi_disable, - .setup_clock = tegra_output_hdmi_setup_clock, - .check_mode = tegra_output_hdmi_check_mode, +static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = { + .dpms = tegra_hdmi_encoder_dpms, + .prepare = tegra_hdmi_encoder_prepare, + .commit = tegra_hdmi_encoder_commit, + .mode_set = tegra_hdmi_encoder_mode_set, + .disable = tegra_hdmi_encoder_disable, + .atomic_check = tegra_hdmi_encoder_atomic_check, }; static int tegra_hdmi_show_regs(struct seq_file *s, void *data) @@ -1117,8 +1086,8 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data) return err; #define DUMP_REG(name) \ - seq_printf(s, "%-56s %#05x %08lx\n", #name, name, \ - tegra_hdmi_readl(hdmi, name)) + seq_printf(s, "%-56s %#05x %08x\n", #name, name, \ + tegra_hdmi_readl(hdmi, name)) DUMP_REG(HDMI_CTXSW); DUMP_REG(HDMI_NV_PDISP_SOR_STATE0); @@ -1330,7 +1299,7 @@ remove: return err; } -static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi) +static void tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi) { drm_debugfs_remove_files(hdmi->debugfs_files, ARRAY_SIZE(debugfs_files), hdmi->minor); @@ -1341,8 +1310,6 @@ static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi) debugfs_remove(hdmi->debugfs); hdmi->debugfs = NULL; - - return 0; } static int tegra_hdmi_init(struct host1x_client *client) @@ -1351,16 +1318,32 @@ static int tegra_hdmi_init(struct host1x_client *client) struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); int err; - hdmi->output.type = TEGRA_OUTPUT_HDMI; hdmi->output.dev = client->dev; - hdmi->output.ops = &hdmi_ops; + + drm_connector_init(drm, &hdmi->output.connector, + &tegra_hdmi_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); + drm_connector_helper_add(&hdmi->output.connector, + &tegra_hdmi_connector_helper_funcs); + hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF; + + drm_encoder_init(drm, &hdmi->output.encoder, &tegra_hdmi_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(&hdmi->output.encoder, + &tegra_hdmi_encoder_helper_funcs); + + drm_mode_connector_attach_encoder(&hdmi->output.connector, + &hdmi->output.encoder); + drm_connector_register(&hdmi->output.connector); err = tegra_output_init(drm, &hdmi->output); if (err < 0) { - dev_err(client->dev, "output setup failed: %d\n", err); + dev_err(client->dev, "failed to initialize output: %d\n", err); return err; } + hdmi->output.encoder.possible_crtcs = 0x3; + if (IS_ENABLED(CONFIG_DEBUG_FS)) { err = tegra_hdmi_debugfs_init(hdmi, drm->primary); if (err < 0) @@ -1374,34 +1357,44 @@ static int tegra_hdmi_init(struct host1x_client *client) return err; } + err = regulator_enable(hdmi->pll); + if (err < 0) { + dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err); + return err; + } + + err = regulator_enable(hdmi->vdd); + if (err < 0) { + dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err); + return err; + } + + err = clk_prepare_enable(hdmi->clk); + if (err < 0) { + dev_err(hdmi->dev, "failed to enable clock: %d\n", err); + return err; + } + + reset_control_deassert(hdmi->rst); + return 0; } static int tegra_hdmi_exit(struct host1x_client *client) { struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); - int err; - regulator_disable(hdmi->hdmi); + tegra_output_exit(&hdmi->output); - if (IS_ENABLED(CONFIG_DEBUG_FS)) { - err = tegra_hdmi_debugfs_exit(hdmi); - if (err < 0) - dev_err(client->dev, "debugfs cleanup failed: %d\n", - err); - } + clk_disable_unprepare(hdmi->clk); + reset_control_assert(hdmi->rst); - err = tegra_output_disable(&hdmi->output); - if (err < 0) { - dev_err(client->dev, "output failed to disable: %d\n", err); - return err; - } + regulator_disable(hdmi->vdd); + regulator_disable(hdmi->pll); + regulator_disable(hdmi->hdmi); - err = tegra_output_exit(&hdmi->output); - if (err < 0) { - dev_err(client->dev, "output cleanup failed: %d\n", err); - return err; - } + if (IS_ENABLED(CONFIG_DEBUG_FS)) + tegra_hdmi_debugfs_exit(hdmi); return 0; } @@ -1559,11 +1552,7 @@ static int tegra_hdmi_remove(struct platform_device *pdev) return err; } - err = tegra_output_remove(&hdmi->output); - if (err < 0) { - dev_err(&pdev->dev, "failed to remove output: %d\n", err); - return err; - } + tegra_output_remove(&hdmi->output); clk_disable_unprepare(hdmi->clk_parent); clk_disable_unprepare(hdmi->clk); diff --git a/drivers/gpu/drm/tegra/mipi-phy.c b/drivers/gpu/drm/tegra/mipi-phy.c index 486d19d589c8..ba2ae6511957 100644 --- a/drivers/gpu/drm/tegra/mipi-phy.c +++ b/drivers/gpu/drm/tegra/mipi-phy.c @@ -12,9 +12,9 @@ #include "mipi-phy.h" /* - * Default D-PHY timings based on MIPI D-PHY specification. Derived from - * the valid ranges specified in Section 5.9 of the D-PHY specification - * with minor adjustments. + * Default D-PHY timings based on MIPI D-PHY specification. Derived from the + * valid ranges specified in Section 6.9, Table 14, Page 40 of the D-PHY + * specification (v1.2) with minor adjustments. */ int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing, unsigned long period) @@ -34,7 +34,20 @@ int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing, timing->hszero = 145 + 5 * period; timing->hssettle = 85 + 6 * period; timing->hsskip = 40; - timing->hstrail = max(8 * period, 60 + 4 * period); + + /* + * The MIPI D-PHY specification (Section 6.9, v1.2, Table 14, Page 40) + * contains this formula as: + * + * T_HS-TRAIL = max(n * 8 * period, 60 + n * 4 * period) + * + * where n = 1 for forward-direction HS mode and n = 4 for reverse- + * direction HS mode. There's only one setting and this function does + * not parameterize on anything other that period, so this code will + * assumes that reverse-direction HS mode is supported and uses n = 4. + */ + timing->hstrail = max(4 * 8 * period, 60 + 4 * 4 * period); + timing->init = 100000; timing->lpx = 60; timing->taget = 5 * timing->lpx; @@ -46,8 +59,8 @@ int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing, } /* - * Validate D-PHY timing according to MIPI Alliance Specification for D-PHY, - * Section 5.9 "Global Operation Timing Parameters". + * Validate D-PHY timing according to MIPI D-PHY specification (v1.2, Section + * Section 6.9 "Global Operation Timing Parameters"). */ int mipi_dphy_timing_validate(struct mipi_dphy_timing *timing, unsigned long period) diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index 6a5c7b81fbc5..37db47975d48 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -9,10 +9,11 @@ #include <linux/of_gpio.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_panel.h> #include "drm.h" -static int tegra_connector_get_modes(struct drm_connector *connector) +int tegra_output_connector_get_modes(struct drm_connector *connector) { struct tegra_output *output = connector_to_output(connector); struct edid *edid = NULL; @@ -43,43 +44,20 @@ static int tegra_connector_get_modes(struct drm_connector *connector) return err; } -static int tegra_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - struct tegra_output *output = connector_to_output(connector); - enum drm_mode_status status = MODE_OK; - int err; - - err = tegra_output_check_mode(output, mode, &status); - if (err < 0) - return MODE_ERROR; - - return status; -} - -static struct drm_encoder * -tegra_connector_best_encoder(struct drm_connector *connector) +struct drm_encoder * +tegra_output_connector_best_encoder(struct drm_connector *connector) { struct tegra_output *output = connector_to_output(connector); return &output->encoder; } -static const struct drm_connector_helper_funcs connector_helper_funcs = { - .get_modes = tegra_connector_get_modes, - .mode_valid = tegra_connector_mode_valid, - .best_encoder = tegra_connector_best_encoder, -}; - -static enum drm_connector_status -tegra_connector_detect(struct drm_connector *connector, bool force) +enum drm_connector_status +tegra_output_connector_detect(struct drm_connector *connector, bool force) { struct tegra_output *output = connector_to_output(connector); enum drm_connector_status status = connector_status_unknown; - if (output->ops->detect) - return output->ops->detect(output); - if (gpio_is_valid(output->hpd_gpio)) { if (gpio_get_value(output->hpd_gpio) == 0) status = connector_status_disconnected; @@ -90,95 +68,22 @@ tegra_connector_detect(struct drm_connector *connector, bool force) status = connector_status_disconnected; else status = connector_status_connected; - - if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) - status = connector_status_connected; } return status; } -static void drm_connector_clear(struct drm_connector *connector) -{ - memset(connector, 0, sizeof(*connector)); -} - -static void tegra_connector_destroy(struct drm_connector *connector) +void tegra_output_connector_destroy(struct drm_connector *connector) { drm_connector_unregister(connector); drm_connector_cleanup(connector); - drm_connector_clear(connector); } -static const struct drm_connector_funcs connector_funcs = { - .dpms = drm_helper_connector_dpms, - .detect = tegra_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = tegra_connector_destroy, -}; - -static void drm_encoder_clear(struct drm_encoder *encoder) -{ - memset(encoder, 0, sizeof(*encoder)); -} - -static void tegra_encoder_destroy(struct drm_encoder *encoder) +void tegra_output_encoder_destroy(struct drm_encoder *encoder) { drm_encoder_cleanup(encoder); - drm_encoder_clear(encoder); } -static const struct drm_encoder_funcs encoder_funcs = { - .destroy = tegra_encoder_destroy, -}; - -static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode) -{ - struct tegra_output *output = encoder_to_output(encoder); - struct drm_panel *panel = output->panel; - - if (mode != DRM_MODE_DPMS_ON) { - drm_panel_disable(panel); - tegra_output_disable(output); - drm_panel_unprepare(panel); - } else { - drm_panel_prepare(panel); - tegra_output_enable(output); - drm_panel_enable(panel); - } -} - -static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted) -{ - return true; -} - -static void tegra_encoder_prepare(struct drm_encoder *encoder) -{ - tegra_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); -} - -static void tegra_encoder_commit(struct drm_encoder *encoder) -{ - tegra_encoder_dpms(encoder, DRM_MODE_DPMS_ON); -} - -static void tegra_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted) -{ -} - -static const struct drm_encoder_helper_funcs encoder_helper_funcs = { - .dpms = tegra_encoder_dpms, - .mode_fixup = tegra_encoder_mode_fixup, - .prepare = tegra_encoder_prepare, - .commit = tegra_encoder_commit, - .mode_set = tegra_encoder_mode_set, -}; - static irqreturn_t hpd_irq(int irq, void *data) { struct tegra_output *output = data; @@ -268,7 +173,7 @@ int tegra_output_probe(struct tegra_output *output) return 0; } -int tegra_output_remove(struct tegra_output *output) +void tegra_output_remove(struct tegra_output *output) { if (gpio_is_valid(output->hpd_gpio)) { free_irq(output->hpd_irq, output); @@ -277,56 +182,17 @@ int tegra_output_remove(struct tegra_output *output) if (output->ddc) put_device(&output->ddc->dev); - - return 0; } int tegra_output_init(struct drm_device *drm, struct tegra_output *output) { - int connector, encoder; - - switch (output->type) { - case TEGRA_OUTPUT_RGB: - connector = DRM_MODE_CONNECTOR_LVDS; - encoder = DRM_MODE_ENCODER_LVDS; - break; - - case TEGRA_OUTPUT_HDMI: - connector = DRM_MODE_CONNECTOR_HDMIA; - encoder = DRM_MODE_ENCODER_TMDS; - break; - - case TEGRA_OUTPUT_DSI: - connector = DRM_MODE_CONNECTOR_DSI; - encoder = DRM_MODE_ENCODER_DSI; - break; - - case TEGRA_OUTPUT_EDP: - connector = DRM_MODE_CONNECTOR_eDP; - encoder = DRM_MODE_ENCODER_TMDS; - break; - - default: - connector = DRM_MODE_CONNECTOR_Unknown; - encoder = DRM_MODE_ENCODER_NONE; - break; - } - - drm_connector_init(drm, &output->connector, &connector_funcs, - connector); - drm_connector_helper_add(&output->connector, &connector_helper_funcs); - output->connector.dpms = DRM_MODE_DPMS_OFF; - - if (output->panel) - drm_panel_attach(output->panel, &output->connector); - - drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder); - drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs); - - drm_mode_connector_attach_encoder(&output->connector, &output->encoder); - drm_connector_register(&output->connector); + int err; - output->encoder.possible_crtcs = 0x3; + if (output->panel) { + err = drm_panel_attach(output->panel, &output->connector); + if (err < 0) + return err; + } /* * The connector is now registered and ready to receive hotplug events @@ -338,7 +204,7 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output) return 0; } -int tegra_output_exit(struct tegra_output *output) +void tegra_output_exit(struct tegra_output *output) { /* * The connector is going away, so the interrupt must be disabled to @@ -349,6 +215,4 @@ int tegra_output_exit(struct tegra_output *output) if (output->panel) drm_panel_detach(output->panel); - - return 0; } diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index d6af9be48f42..7cd833f5b5b5 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c @@ -9,6 +9,9 @@ #include <linux/clk.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_panel.h> + #include "drm.h" #include "dc.h" @@ -85,13 +88,65 @@ static void tegra_dc_write_regs(struct tegra_dc *dc, tegra_dc_writel(dc, table[i].value, table[i].offset); } -static int tegra_output_rgb_enable(struct tegra_output *output) +static void tegra_rgb_connector_dpms(struct drm_connector *connector, + int mode) +{ +} + +static const struct drm_connector_funcs tegra_rgb_connector_funcs = { + .dpms = tegra_rgb_connector_dpms, + .reset = drm_atomic_helper_connector_reset, + .detect = tegra_output_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = tegra_output_connector_destroy, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static enum drm_mode_status +tegra_rgb_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + /* + * FIXME: For now, always assume that the mode is okay. There are + * unresolved issues with clk_round_rate(), which doesn't always + * reliably report whether a frequency can be set or not. + */ + return MODE_OK; +} + +static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = { + .get_modes = tegra_output_connector_get_modes, + .mode_valid = tegra_rgb_connector_mode_valid, + .best_encoder = tegra_output_connector_best_encoder, +}; + +static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = { + .destroy = tegra_output_encoder_destroy, +}; + +static void tegra_rgb_encoder_dpms(struct drm_encoder *encoder, int mode) +{ +} + +static void tegra_rgb_encoder_prepare(struct drm_encoder *encoder) { +} + +static void tegra_rgb_encoder_commit(struct drm_encoder *encoder) +{ +} + +static void tegra_rgb_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + struct tegra_output *output = encoder_to_output(encoder); struct tegra_rgb *rgb = to_rgb(output); - unsigned long value; + u32 value; - if (rgb->enabled) - return 0; + if (output->panel) + drm_panel_prepare(output->panel); tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable)); @@ -113,64 +168,39 @@ static int tegra_output_rgb_enable(struct tegra_output *output) value = SC0_H_QUALIFIER_NONE | SC1_H_QUALIFIER_NONE; tegra_dc_writel(rgb->dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS); - value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_COMMAND); - value &= ~DISP_CTRL_MODE_MASK; - value |= DISP_CTRL_MODE_C_DISPLAY; - tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_COMMAND); + tegra_dc_commit(rgb->dc); - value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL); - value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; - tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - - tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); - - rgb->enabled = true; - - return 0; + if (output->panel) + drm_panel_enable(output->panel); } -static int tegra_output_rgb_disable(struct tegra_output *output) +static void tegra_rgb_encoder_disable(struct drm_encoder *encoder) { + struct tegra_output *output = encoder_to_output(encoder); struct tegra_rgb *rgb = to_rgb(output); - unsigned long value; - - if (!rgb->enabled) - return 0; - - value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL); - value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); - tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - - value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_COMMAND); - value &= ~DISP_CTRL_MODE_MASK; - tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_COMMAND); - tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); + if (output->panel) + drm_panel_disable(output->panel); tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable)); + tegra_dc_commit(rgb->dc); - rgb->enabled = false; - - return 0; + if (output->panel) + drm_panel_unprepare(output->panel); } -static int tegra_output_rgb_setup_clock(struct tegra_output *output, - struct clk *clk, unsigned long pclk, - unsigned int *div) +static int +tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(conn_state->crtc); + unsigned long pclk = crtc_state->mode.clock * 1000; struct tegra_rgb *rgb = to_rgb(output); + unsigned int div; int err; - err = clk_set_parent(clk, rgb->clk_parent); - if (err < 0) { - dev_err(output->dev, "failed to set parent: %d\n", err); - return err; - } - /* * We may not want to change the frequency of the parent clock, since * it may be a parent for other peripherals. This is due to the fact @@ -187,32 +217,26 @@ static int tegra_output_rgb_setup_clock(struct tegra_output *output, * and hope that the desired frequency can be matched (or at least * matched sufficiently close that the panel will still work). */ + div = ((clk_get_rate(rgb->clk) * 2) / pclk) - 2; + pclk = 0; - *div = ((clk_get_rate(clk) * 2) / pclk) - 2; - - return 0; -} - -static int tegra_output_rgb_check_mode(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status) -{ - /* - * FIXME: For now, always assume that the mode is okay. There are - * unresolved issues with clk_round_rate(), which doesn't always - * reliably report whether a frequency can be set or not. - */ - - *status = MODE_OK; + err = tegra_dc_state_setup_clock(dc, crtc_state, rgb->clk_parent, + pclk, div); + if (err < 0) { + dev_err(output->dev, "failed to setup CRTC state: %d\n", err); + return err; + } - return 0; + return err; } -static const struct tegra_output_ops rgb_ops = { - .enable = tegra_output_rgb_enable, - .disable = tegra_output_rgb_disable, - .setup_clock = tegra_output_rgb_setup_clock, - .check_mode = tegra_output_rgb_check_mode, +static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = { + .dpms = tegra_rgb_encoder_dpms, + .prepare = tegra_rgb_encoder_prepare, + .commit = tegra_rgb_encoder_commit, + .mode_set = tegra_rgb_encoder_mode_set, + .disable = tegra_rgb_encoder_disable, + .atomic_check = tegra_rgb_encoder_atomic_check, }; int tegra_dc_rgb_probe(struct tegra_dc *dc) @@ -262,64 +286,58 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc) int tegra_dc_rgb_remove(struct tegra_dc *dc) { - int err; - if (!dc->rgb) return 0; - err = tegra_output_remove(dc->rgb); - if (err < 0) - return err; + tegra_output_remove(dc->rgb); + dc->rgb = NULL; return 0; } int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc) { - struct tegra_rgb *rgb = to_rgb(dc->rgb); + struct tegra_output *output = dc->rgb; int err; if (!dc->rgb) return -ENODEV; - rgb->output.type = TEGRA_OUTPUT_RGB; - rgb->output.ops = &rgb_ops; + drm_connector_init(drm, &output->connector, &tegra_rgb_connector_funcs, + DRM_MODE_CONNECTOR_LVDS); + drm_connector_helper_add(&output->connector, + &tegra_rgb_connector_helper_funcs); + output->connector.dpms = DRM_MODE_DPMS_OFF; + + drm_encoder_init(drm, &output->encoder, &tegra_rgb_encoder_funcs, + DRM_MODE_ENCODER_LVDS); + drm_encoder_helper_add(&output->encoder, + &tegra_rgb_encoder_helper_funcs); - err = tegra_output_init(dc->base.dev, &rgb->output); + drm_mode_connector_attach_encoder(&output->connector, + &output->encoder); + drm_connector_register(&output->connector); + + err = tegra_output_init(drm, output); if (err < 0) { - dev_err(dc->dev, "output setup failed: %d\n", err); + dev_err(output->dev, "failed to initialize output: %d\n", err); return err; } /* - * By default, outputs can be associated with each display controller. - * RGB outputs are an exception, so we make sure they can be attached - * to only their parent display controller. + * Other outputs can be attached to either display controller. The RGB + * outputs are an exception and work only with their parent display + * controller. */ - rgb->output.encoder.possible_crtcs = drm_crtc_mask(&dc->base); + output->encoder.possible_crtcs = drm_crtc_mask(&dc->base); return 0; } int tegra_dc_rgb_exit(struct tegra_dc *dc) { - if (dc->rgb) { - int err; - - err = tegra_output_disable(dc->rgb); - if (err < 0) { - dev_err(dc->dev, "output failed to disable: %d\n", err); - return err; - } - - err = tegra_output_exit(dc->rgb); - if (err < 0) { - dev_err(dc->dev, "output cleanup failed: %d\n", err); - return err; - } - - dc->rgb = NULL; - } + if (dc->rgb) + tegra_output_exit(dc->rgb); return 0; } diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 7829e81f065d..2afe478ded3b 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -8,13 +8,16 @@ #include <linux/clk.h> #include <linux/debugfs.h> +#include <linux/gpio.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <soc/tegra/pmc.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_dp_helper.h> +#include <drm/drm_panel.h> #include "dc.h" #include "drm.h" @@ -258,18 +261,8 @@ static int tegra_sor_attach(struct tegra_sor *sor) static int tegra_sor_wakeup(struct tegra_sor *sor) { - struct tegra_dc *dc = to_tegra_dc(sor->output.encoder.crtc); unsigned long value, timeout; - /* enable display controller outputs */ - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); - value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - - tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); - timeout = jiffies + msecs_to_jiffies(250); /* wait for head to wake up */ @@ -482,10 +475,317 @@ static int tegra_sor_calc_config(struct tegra_sor *sor, return 0; } -static int tegra_output_sor_enable(struct tegra_output *output) +static int tegra_sor_detach(struct tegra_sor *sor) +{ + unsigned long value, timeout; + + /* switch to safe mode */ + value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); + value &= ~SOR_SUPER_STATE_MODE_NORMAL; + tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); + tegra_sor_super_update(sor); + + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_PWR); + if (value & SOR_PWR_MODE_SAFE) + break; + } + + if ((value & SOR_PWR_MODE_SAFE) == 0) + return -ETIMEDOUT; + + /* go to sleep */ + value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); + value &= ~SOR_SUPER_STATE_HEAD_MODE_MASK; + tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); + tegra_sor_super_update(sor); + + /* detach */ + value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); + value &= ~SOR_SUPER_STATE_ATTACHED; + tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); + tegra_sor_super_update(sor); + + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_TEST); + if ((value & SOR_TEST_ATTACHED) == 0) + break; + + usleep_range(25, 100); + } + + if ((value & SOR_TEST_ATTACHED) != 0) + return -ETIMEDOUT; + + return 0; +} + +static int tegra_sor_power_down(struct tegra_sor *sor) +{ + unsigned long value, timeout; + int err; + + value = tegra_sor_readl(sor, SOR_PWR); + value &= ~SOR_PWR_NORMAL_STATE_PU; + value |= SOR_PWR_TRIGGER; + tegra_sor_writel(sor, value, SOR_PWR); + + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_PWR); + if ((value & SOR_PWR_TRIGGER) == 0) + return 0; + + usleep_range(25, 100); + } + + if ((value & SOR_PWR_TRIGGER) != 0) + return -ETIMEDOUT; + + err = clk_set_parent(sor->clk, sor->clk_safe); + if (err < 0) + dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); + + value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); + value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 | + SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2); + tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); + + /* stop lane sequencer */ + value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP | + SOR_LANE_SEQ_CTL_POWER_STATE_DOWN; + tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); + + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0) + break; + + usleep_range(25, 100); + } + + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0) + return -ETIMEDOUT; + + value = tegra_sor_readl(sor, SOR_PLL_2); + value |= SOR_PLL_2_PORT_POWERDOWN; + tegra_sor_writel(sor, value, SOR_PLL_2); + + usleep_range(20, 100); + + value = tegra_sor_readl(sor, SOR_PLL_0); + value |= SOR_PLL_0_POWER_OFF; + value |= SOR_PLL_0_VCOPD; + tegra_sor_writel(sor, value, SOR_PLL_0); + + value = tegra_sor_readl(sor, SOR_PLL_2); + value |= SOR_PLL_2_SEQ_PLLCAPPD; + value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE; + tegra_sor_writel(sor, value, SOR_PLL_2); + + usleep_range(20, 100); + + return 0; +} + +static int tegra_sor_crc_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + + return 0; +} + +static int tegra_sor_crc_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout) +{ + u32 value; + + timeout = jiffies + msecs_to_jiffies(timeout); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_CRC_A); + if (value & SOR_CRC_A_VALID) + return 0; + + usleep_range(100, 200); + } + + return -ETIMEDOUT; +} + +static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer, + size_t size, loff_t *ppos) +{ + struct tegra_sor *sor = file->private_data; + ssize_t num, err; + char buf[10]; + u32 value; + + mutex_lock(&sor->lock); + + if (!sor->enabled) { + err = -EAGAIN; + goto unlock; + } + + value = tegra_sor_readl(sor, SOR_STATE_1); + value &= ~SOR_STATE_ASY_CRC_MODE_MASK; + tegra_sor_writel(sor, value, SOR_STATE_1); + + value = tegra_sor_readl(sor, SOR_CRC_CNTRL); + value |= SOR_CRC_CNTRL_ENABLE; + tegra_sor_writel(sor, value, SOR_CRC_CNTRL); + + value = tegra_sor_readl(sor, SOR_TEST); + value &= ~SOR_TEST_CRC_POST_SERIALIZE; + tegra_sor_writel(sor, value, SOR_TEST); + + err = tegra_sor_crc_wait(sor, 100); + if (err < 0) + goto unlock; + + tegra_sor_writel(sor, SOR_CRC_A_RESET, SOR_CRC_A); + value = tegra_sor_readl(sor, SOR_CRC_B); + + num = scnprintf(buf, sizeof(buf), "%08x\n", value); + + err = simple_read_from_buffer(buffer, size, ppos, buf, num); + +unlock: + mutex_unlock(&sor->lock); + return err; +} + +static const struct file_operations tegra_sor_crc_fops = { + .owner = THIS_MODULE, + .open = tegra_sor_crc_open, + .read = tegra_sor_crc_read, + .release = tegra_sor_crc_release, +}; + +static int tegra_sor_debugfs_init(struct tegra_sor *sor, + struct drm_minor *minor) +{ + struct dentry *entry; + int err = 0; + + sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root); + if (!sor->debugfs) + return -ENOMEM; + + entry = debugfs_create_file("crc", 0644, sor->debugfs, sor, + &tegra_sor_crc_fops); + if (!entry) { + dev_err(sor->dev, + "cannot create /sys/kernel/debug/dri/%s/sor/crc\n", + minor->debugfs_root->d_name.name); + err = -ENOMEM; + goto remove; + } + + return err; + +remove: + debugfs_remove(sor->debugfs); + sor->debugfs = NULL; + return err; +} + +static void tegra_sor_debugfs_exit(struct tegra_sor *sor) +{ + debugfs_remove_recursive(sor->debugfs); + sor->debugfs = NULL; +} + +static void tegra_sor_connector_dpms(struct drm_connector *connector, int mode) +{ +} + +static enum drm_connector_status +tegra_sor_connector_detect(struct drm_connector *connector, bool force) +{ + struct tegra_output *output = connector_to_output(connector); + struct tegra_sor *sor = to_sor(output); + + if (sor->dpaux) + return tegra_dpaux_detect(sor->dpaux); + + return connector_status_unknown; +} + +static const struct drm_connector_funcs tegra_sor_connector_funcs = { + .dpms = tegra_sor_connector_dpms, + .reset = drm_atomic_helper_connector_reset, + .detect = tegra_sor_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = tegra_output_connector_destroy, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int tegra_sor_connector_get_modes(struct drm_connector *connector) { - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); - struct drm_display_mode *mode = &dc->base.mode; + struct tegra_output *output = connector_to_output(connector); + struct tegra_sor *sor = to_sor(output); + int err; + + if (sor->dpaux) + tegra_dpaux_enable(sor->dpaux); + + err = tegra_output_connector_get_modes(connector); + + if (sor->dpaux) + tegra_dpaux_disable(sor->dpaux); + + return err; +} + +static enum drm_mode_status +tegra_sor_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs = { + .get_modes = tegra_sor_connector_get_modes, + .mode_valid = tegra_sor_connector_mode_valid, + .best_encoder = tegra_output_connector_best_encoder, +}; + +static const struct drm_encoder_funcs tegra_sor_encoder_funcs = { + .destroy = tegra_output_encoder_destroy, +}; + +static void tegra_sor_encoder_dpms(struct drm_encoder *encoder, int mode) +{ +} + +static void tegra_sor_encoder_prepare(struct drm_encoder *encoder) +{ +} + +static void tegra_sor_encoder_commit(struct drm_encoder *encoder) +{ +} + +static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); unsigned int vbe, vse, hbe, hse, vbs, hbs, i; struct tegra_sor *sor = to_sor(output); struct tegra_sor_config config; @@ -505,6 +805,9 @@ static int tegra_output_sor_enable(struct tegra_output *output) reset_control_deassert(sor->rst); + if (output->panel) + drm_panel_prepare(output->panel); + /* FIXME: properly convert to struct drm_dp_aux */ aux = (struct drm_dp_aux *)sor->dpaux; @@ -800,18 +1103,6 @@ static int tegra_output_sor_enable(struct tegra_output *output) goto unlock; } - /* start display controller in continuous mode */ - value = tegra_dc_readl(dc, DC_CMD_STATE_ACCESS); - value |= WRITE_MUX; - tegra_dc_writel(dc, value, DC_CMD_STATE_ACCESS); - - tegra_dc_writel(dc, VSYNC_H_POSITION(1), DC_DISP_DISP_TIMING_OPTIONS); - tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY, DC_CMD_DISPLAY_COMMAND); - - value = tegra_dc_readl(dc, DC_CMD_STATE_ACCESS); - value &= ~WRITE_MUX; - tegra_dc_writel(dc, value, DC_CMD_STATE_ACCESS); - /* * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete * raster, associate with display controller) @@ -886,11 +1177,13 @@ static int tegra_output_sor_enable(struct tegra_output *output) goto unlock; } + tegra_sor_update(sor); + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); value |= SOR_ENABLE; tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - tegra_sor_update(sor); + tegra_dc_commit(dc); err = tegra_sor_attach(sor); if (err < 0) { @@ -904,145 +1197,31 @@ static int tegra_output_sor_enable(struct tegra_output *output) goto unlock; } + if (output->panel) + drm_panel_enable(output->panel); + sor->enabled = true; unlock: mutex_unlock(&sor->lock); - return err; } -static int tegra_sor_detach(struct tegra_sor *sor) +static void tegra_sor_encoder_disable(struct drm_encoder *encoder) { - unsigned long value, timeout; - - /* switch to safe mode */ - value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); - value &= ~SOR_SUPER_STATE_MODE_NORMAL; - tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); - tegra_sor_super_update(sor); - - timeout = jiffies + msecs_to_jiffies(250); - - while (time_before(jiffies, timeout)) { - value = tegra_sor_readl(sor, SOR_PWR); - if (value & SOR_PWR_MODE_SAFE) - break; - } - - if ((value & SOR_PWR_MODE_SAFE) == 0) - return -ETIMEDOUT; - - /* go to sleep */ - value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); - value &= ~SOR_SUPER_STATE_HEAD_MODE_MASK; - tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); - tegra_sor_super_update(sor); - - /* detach */ - value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); - value &= ~SOR_SUPER_STATE_ATTACHED; - tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); - tegra_sor_super_update(sor); - - timeout = jiffies + msecs_to_jiffies(250); - - while (time_before(jiffies, timeout)) { - value = tegra_sor_readl(sor, SOR_TEST); - if ((value & SOR_TEST_ATTACHED) == 0) - break; - - usleep_range(25, 100); - } - - if ((value & SOR_TEST_ATTACHED) != 0) - return -ETIMEDOUT; - - return 0; -} - -static int tegra_sor_power_down(struct tegra_sor *sor) -{ - unsigned long value, timeout; - int err; - - value = tegra_sor_readl(sor, SOR_PWR); - value &= ~SOR_PWR_NORMAL_STATE_PU; - value |= SOR_PWR_TRIGGER; - tegra_sor_writel(sor, value, SOR_PWR); - - timeout = jiffies + msecs_to_jiffies(250); - - while (time_before(jiffies, timeout)) { - value = tegra_sor_readl(sor, SOR_PWR); - if ((value & SOR_PWR_TRIGGER) == 0) - return 0; - - usleep_range(25, 100); - } - - if ((value & SOR_PWR_TRIGGER) != 0) - return -ETIMEDOUT; - - err = clk_set_parent(sor->clk, sor->clk_safe); - if (err < 0) - dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); - - value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); - value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 | - SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2); - tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); - - /* stop lane sequencer */ - value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP | - SOR_LANE_SEQ_CTL_POWER_STATE_DOWN; - tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); - - timeout = jiffies + msecs_to_jiffies(250); - - while (time_before(jiffies, timeout)) { - value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); - if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0) - break; - - usleep_range(25, 100); - } - - if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0) - return -ETIMEDOUT; - - value = tegra_sor_readl(sor, SOR_PLL_2); - value |= SOR_PLL_2_PORT_POWERDOWN; - tegra_sor_writel(sor, value, SOR_PLL_2); - - usleep_range(20, 100); - - value = tegra_sor_readl(sor, SOR_PLL_0); - value |= SOR_PLL_0_POWER_OFF; - value |= SOR_PLL_0_VCOPD; - tegra_sor_writel(sor, value, SOR_PLL_0); - - value = tegra_sor_readl(sor, SOR_PLL_2); - value |= SOR_PLL_2_SEQ_PLLCAPPD; - value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE; - tegra_sor_writel(sor, value, SOR_PLL_2); - - usleep_range(20, 100); - - return 0; -} - -static int tegra_output_sor_disable(struct tegra_output *output) -{ - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); struct tegra_sor *sor = to_sor(output); - unsigned long value; - int err = 0; + u32 value; + int err; mutex_lock(&sor->lock); if (!sor->enabled) goto unlock; + if (output->panel) + drm_panel_disable(output->panel); + err = tegra_sor_detach(sor); if (err < 0) { dev_err(sor->dev, "failed to detach SOR: %d\n", err); @@ -1057,31 +1236,11 @@ static int tegra_output_sor_disable(struct tegra_output *output) * sure it's only executed when the output is attached to one. */ if (dc) { - /* - * XXX: We can't do this here because it causes the SOR to go - * into an erroneous state and the output will look scrambled - * the next time it is enabled. Presumably this is because we - * should be doing this only on the next VBLANK. A possible - * solution would be to queue a "power-off" event to trigger - * this code to be run during the next VBLANK. - */ - /* - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); - value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - */ - - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); - value &= ~DISP_CTRL_MODE_MASK; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); - value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); value &= ~SOR_ENABLE; tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); + tegra_dc_commit(dc); } err = tegra_sor_power_down(sor); @@ -1104,187 +1263,48 @@ static int tegra_output_sor_disable(struct tegra_output *output) goto unlock; } - reset_control_assert(sor->rst); + if (output->panel) + drm_panel_unprepare(output->panel); + clk_disable_unprepare(sor->clk); + reset_control_assert(sor->rst); sor->enabled = false; unlock: mutex_unlock(&sor->lock); - return err; } -static int tegra_output_sor_setup_clock(struct tegra_output *output, - struct clk *clk, unsigned long pclk, - unsigned int *div) +static int +tegra_sor_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(conn_state->crtc); + unsigned long pclk = crtc_state->mode.clock * 1000; struct tegra_sor *sor = to_sor(output); int err; - err = clk_set_parent(clk, sor->clk_parent); - if (err < 0) { - dev_err(sor->dev, "failed to set parent clock: %d\n", err); - return err; - } - - err = clk_set_rate(sor->clk_parent, pclk); + err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent, + pclk, 0); if (err < 0) { - dev_err(sor->dev, "failed to set clock rate to %lu Hz\n", pclk); + dev_err(output->dev, "failed to setup CRTC state: %d\n", err); return err; } - *div = 0; - return 0; } -static int tegra_output_sor_check_mode(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status) -{ - /* - * FIXME: For now, always assume that the mode is okay. - */ - - *status = MODE_OK; - - return 0; -} - -static enum drm_connector_status -tegra_output_sor_detect(struct tegra_output *output) -{ - struct tegra_sor *sor = to_sor(output); - - if (sor->dpaux) - return tegra_dpaux_detect(sor->dpaux); - - return connector_status_unknown; -} - -static const struct tegra_output_ops sor_ops = { - .enable = tegra_output_sor_enable, - .disable = tegra_output_sor_disable, - .setup_clock = tegra_output_sor_setup_clock, - .check_mode = tegra_output_sor_check_mode, - .detect = tegra_output_sor_detect, +static const struct drm_encoder_helper_funcs tegra_sor_encoder_helper_funcs = { + .dpms = tegra_sor_encoder_dpms, + .prepare = tegra_sor_encoder_prepare, + .commit = tegra_sor_encoder_commit, + .mode_set = tegra_sor_encoder_mode_set, + .disable = tegra_sor_encoder_disable, + .atomic_check = tegra_sor_encoder_atomic_check, }; -static int tegra_sor_crc_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - - return 0; -} - -static int tegra_sor_crc_release(struct inode *inode, struct file *file) -{ - return 0; -} - -static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout) -{ - u32 value; - - timeout = jiffies + msecs_to_jiffies(timeout); - - while (time_before(jiffies, timeout)) { - value = tegra_sor_readl(sor, SOR_CRC_A); - if (value & SOR_CRC_A_VALID) - return 0; - - usleep_range(100, 200); - } - - return -ETIMEDOUT; -} - -static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer, - size_t size, loff_t *ppos) -{ - struct tegra_sor *sor = file->private_data; - ssize_t num, err; - char buf[10]; - u32 value; - - mutex_lock(&sor->lock); - - if (!sor->enabled) { - err = -EAGAIN; - goto unlock; - } - - value = tegra_sor_readl(sor, SOR_STATE_1); - value &= ~SOR_STATE_ASY_CRC_MODE_MASK; - tegra_sor_writel(sor, value, SOR_STATE_1); - - value = tegra_sor_readl(sor, SOR_CRC_CNTRL); - value |= SOR_CRC_CNTRL_ENABLE; - tegra_sor_writel(sor, value, SOR_CRC_CNTRL); - - value = tegra_sor_readl(sor, SOR_TEST); - value &= ~SOR_TEST_CRC_POST_SERIALIZE; - tegra_sor_writel(sor, value, SOR_TEST); - - err = tegra_sor_crc_wait(sor, 100); - if (err < 0) - goto unlock; - - tegra_sor_writel(sor, SOR_CRC_A_RESET, SOR_CRC_A); - value = tegra_sor_readl(sor, SOR_CRC_B); - - num = scnprintf(buf, sizeof(buf), "%08x\n", value); - - err = simple_read_from_buffer(buffer, size, ppos, buf, num); - -unlock: - mutex_unlock(&sor->lock); - return err; -} - -static const struct file_operations tegra_sor_crc_fops = { - .owner = THIS_MODULE, - .open = tegra_sor_crc_open, - .read = tegra_sor_crc_read, - .release = tegra_sor_crc_release, -}; - -static int tegra_sor_debugfs_init(struct tegra_sor *sor, - struct drm_minor *minor) -{ - struct dentry *entry; - int err = 0; - - sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root); - if (!sor->debugfs) - return -ENOMEM; - - entry = debugfs_create_file("crc", 0644, sor->debugfs, sor, - &tegra_sor_crc_fops); - if (!entry) { - dev_err(sor->dev, - "cannot create /sys/kernel/debug/dri/%s/sor/crc\n", - minor->debugfs_root->d_name.name); - err = -ENOMEM; - goto remove; - } - - return err; - -remove: - debugfs_remove(sor->debugfs); - sor->debugfs = NULL; - return err; -} - -static int tegra_sor_debugfs_exit(struct tegra_sor *sor) -{ - debugfs_remove_recursive(sor->debugfs); - sor->debugfs = NULL; - - return 0; -} - static int tegra_sor_init(struct host1x_client *client) { struct drm_device *drm = dev_get_drvdata(client->parent); @@ -1294,17 +1314,32 @@ static int tegra_sor_init(struct host1x_client *client) if (!sor->dpaux) return -ENODEV; - sor->output.type = TEGRA_OUTPUT_EDP; - sor->output.dev = sor->dev; - sor->output.ops = &sor_ops; + + drm_connector_init(drm, &sor->output.connector, + &tegra_sor_connector_funcs, + DRM_MODE_CONNECTOR_eDP); + drm_connector_helper_add(&sor->output.connector, + &tegra_sor_connector_helper_funcs); + sor->output.connector.dpms = DRM_MODE_DPMS_OFF; + + drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(&sor->output.encoder, + &tegra_sor_encoder_helper_funcs); + + drm_mode_connector_attach_encoder(&sor->output.connector, + &sor->output.encoder); + drm_connector_register(&sor->output.connector); err = tegra_output_init(drm, &sor->output); if (err < 0) { - dev_err(sor->dev, "output setup failed: %d\n", err); + dev_err(client->dev, "failed to initialize output: %d\n", err); return err; } + sor->output.encoder.possible_crtcs = 0x3; + if (IS_ENABLED(CONFIG_DEBUG_FS)) { err = tegra_sor_debugfs_init(sor, drm->primary); if (err < 0) @@ -1319,6 +1354,20 @@ static int tegra_sor_init(struct host1x_client *client) } } + err = clk_prepare_enable(sor->clk); + if (err < 0) { + dev_err(sor->dev, "failed to enable clock: %d\n", err); + return err; + } + + err = clk_prepare_enable(sor->clk_safe); + if (err < 0) + return err; + + err = clk_prepare_enable(sor->clk_dp); + if (err < 0) + return err; + return 0; } @@ -1327,11 +1376,7 @@ static int tegra_sor_exit(struct host1x_client *client) struct tegra_sor *sor = host1x_client_to_sor(client); int err; - err = tegra_output_disable(&sor->output); - if (err < 0) { - dev_err(sor->dev, "output failed to disable: %d\n", err); - return err; - } + tegra_output_exit(&sor->output); if (sor->dpaux) { err = tegra_dpaux_detach(sor->dpaux); @@ -1341,17 +1386,12 @@ static int tegra_sor_exit(struct host1x_client *client) } } - if (IS_ENABLED(CONFIG_DEBUG_FS)) { - err = tegra_sor_debugfs_exit(sor); - if (err < 0) - dev_err(sor->dev, "debugfs cleanup failed: %d\n", err); - } + clk_disable_unprepare(sor->clk_safe); + clk_disable_unprepare(sor->clk_dp); + clk_disable_unprepare(sor->clk); - err = tegra_output_exit(&sor->output); - if (err < 0) { - dev_err(sor->dev, "output cleanup failed: %d\n", err); - return err; - } + if (IS_ENABLED(CONFIG_DEBUG_FS)) + tegra_sor_debugfs_exit(sor); return 0; } @@ -1404,26 +1444,14 @@ static int tegra_sor_probe(struct platform_device *pdev) if (IS_ERR(sor->clk_parent)) return PTR_ERR(sor->clk_parent); - err = clk_prepare_enable(sor->clk_parent); - if (err < 0) - return err; - sor->clk_safe = devm_clk_get(&pdev->dev, "safe"); if (IS_ERR(sor->clk_safe)) return PTR_ERR(sor->clk_safe); - err = clk_prepare_enable(sor->clk_safe); - if (err < 0) - return err; - sor->clk_dp = devm_clk_get(&pdev->dev, "dp"); if (IS_ERR(sor->clk_dp)) return PTR_ERR(sor->clk_dp); - err = clk_prepare_enable(sor->clk_dp); - if (err < 0) - return err; - INIT_LIST_HEAD(&sor->client.list); sor->client.ops = &sor_client_ops; sor->client.dev = &pdev->dev; @@ -1454,10 +1482,7 @@ static int tegra_sor_remove(struct platform_device *pdev) return err; } - clk_disable_unprepare(sor->clk_parent); - clk_disable_unprepare(sor->clk_safe); - clk_disable_unprepare(sor->clk_dp); - clk_disable_unprepare(sor->clk); + tegra_output_remove(&sor->output); return 0; } diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig index 7c3ef79fcb37..8394a0b3993e 100644 --- a/drivers/gpu/drm/tilcdc/Kconfig +++ b/drivers/gpu/drm/tilcdc/Kconfig @@ -1,6 +1,6 @@ config DRM_TILCDC tristate "DRM Support for TI LCDC Display Controller" - depends on DRM && OF && ARM + depends on DRM && OF && ARM && HAVE_DMA_ATTRS select DRM_KMS_HELPER select DRM_KMS_FB_HELPER select DRM_KMS_CMA_HELPER diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index 1701f1dfb23f..677190a65e82 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -340,11 +340,11 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc, wrptr = udl_dummy_render(wrptr); - ufb->active_16 = true; if (old_fb) { struct udl_framebuffer *uold_fb = to_udl_fb(old_fb); uold_fb->active_16 = false; } + ufb->active_16 = true; udl->mode_buf_len = wrptr - buf; /* damage all of it */ @@ -373,6 +373,13 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; unsigned long flags; + struct drm_framebuffer *old_fb = crtc->primary->fb; + if (old_fb) { + struct udl_framebuffer *uold_fb = to_udl_fb(old_fb); + uold_fb->active_16 = false; + } + ufb->active_16 = true; + udl_handle_damage(ufb, 0, 0, fb->width, fb->height); spin_lock_irqsave(&dev->event_lock, flags); diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c index f343db73e095..917dcb978c2c 100644 --- a/drivers/gpu/drm/udl/udl_transfer.c +++ b/drivers/gpu/drm/udl/udl_transfer.c @@ -82,12 +82,14 @@ static inline u16 pixel32_to_be16(const uint32_t pixel) ((pixel >> 8) & 0xf800)); } -static bool pixel_repeats(const void *pixel, const uint32_t repeat, int bpp) +static inline u16 get_pixel_val16(const uint8_t *pixel, int bpp) { + u16 pixel_val16 = 0; if (bpp == 2) - return *(const uint16_t *)pixel == repeat; - else - return *(const uint32_t *)pixel == repeat; + pixel_val16 = *(const uint16_t *)pixel; + else if (bpp == 4) + pixel_val16 = pixel32_to_be16(*(const uint32_t *)pixel); + return pixel_val16; } /* @@ -134,6 +136,7 @@ static void udl_compress_hline16( uint8_t *cmd_pixels_count_byte = NULL; const u8 *raw_pixel_start = NULL; const u8 *cmd_pixel_start, *cmd_pixel_end = NULL; + uint16_t pixel_val16; prefetchw((void *) cmd); /* pull in one cache line at least */ @@ -154,33 +157,29 @@ static void udl_compress_hline16( (int)(cmd_buffer_end - cmd) / 2))) * bpp; prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp); + pixel_val16 = get_pixel_val16(pixel, bpp); while (pixel < cmd_pixel_end) { const u8 *const start = pixel; - u32 repeating_pixel; - - if (bpp == 2) { - repeating_pixel = *(uint16_t *)pixel; - *(uint16_t *)cmd = cpu_to_be16(repeating_pixel); - } else { - repeating_pixel = *(uint32_t *)pixel; - *(uint16_t *)cmd = cpu_to_be16(pixel32_to_be16(repeating_pixel)); - } + const uint16_t repeating_pixel_val16 = pixel_val16; + + *(uint16_t *)cmd = cpu_to_be16(pixel_val16); cmd += 2; pixel += bpp; - if (unlikely((pixel < cmd_pixel_end) && - (pixel_repeats(pixel, repeating_pixel, bpp)))) { + while (pixel < cmd_pixel_end) { + pixel_val16 = get_pixel_val16(pixel, bpp); + if (pixel_val16 != repeating_pixel_val16) + break; + pixel += bpp; + } + + if (unlikely(pixel > start + bpp)) { /* go back and fill in raw pixel count */ *raw_pixels_count_byte = (((start - raw_pixel_start) / bpp) + 1) & 0xFF; - while ((pixel < cmd_pixel_end) && - (pixel_repeats(pixel, repeating_pixel, bpp))) { - pixel += bpp; - } - /* immediately after raw data is repeat byte */ *cmd++ = (((pixel - start) / bpp) - 1) & 0xFF; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 7b5d22110f25..6c6b655defcf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv, if (unlikely(ret != 0)) --dev_priv->num_3d_resources; } else if (unhide_svga) { - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_ENABLE, vmw_read(dev_priv, SVGA_REG_ENABLE) & ~SVGA_REG_ENABLE_HIDE); - mutex_unlock(&dev_priv->hw_mutex); } mutex_unlock(&dev_priv->release_mutex); @@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv, mutex_lock(&dev_priv->release_mutex); if (unlikely(--dev_priv->num_3d_resources == 0)) vmw_release_device(dev_priv); - else if (hide_svga) { - mutex_lock(&dev_priv->hw_mutex); + else if (hide_svga) vmw_write(dev_priv, SVGA_REG_ENABLE, vmw_read(dev_priv, SVGA_REG_ENABLE) | SVGA_REG_ENABLE_HIDE); - mutex_unlock(&dev_priv->hw_mutex); - } n3d = (int32_t) dev_priv->num_3d_resources; mutex_unlock(&dev_priv->release_mutex); @@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->dev = dev; dev_priv->vmw_chipset = chipset; dev_priv->last_read_seqno = (uint32_t) -100; - mutex_init(&dev_priv->hw_mutex); mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->release_mutex); mutex_init(&dev_priv->binding_mutex); rwlock_init(&dev_priv->resource_lock); ttm_lock_init(&dev_priv->reservation_sem); + spin_lock_init(&dev_priv->hw_lock); + spin_lock_init(&dev_priv->waiter_lock); + spin_lock_init(&dev_priv->cap_lock); for (i = vmw_res_context; i < vmw_res_max; ++i) { idr_init(&dev_priv->res_idr[i]); @@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->enable_fb = enable_fbdev; - mutex_lock(&dev_priv->hw_mutex); - vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); svga_id = vmw_read(dev_priv, SVGA_REG_ID); if (svga_id != SVGA_ID_2) { ret = -ENOSYS; DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); - mutex_unlock(&dev_priv->hw_mutex); goto out_err0; } @@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->prim_bb_mem = dev_priv->vram_size; ret = vmw_dma_masks(dev_priv); - if (unlikely(ret != 0)) { - mutex_unlock(&dev_priv->hw_mutex); + if (unlikely(ret != 0)) goto out_err0; - } /* * Limit back buffer size to VRAM size. Remove this once @@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) if (dev_priv->prim_bb_mem > dev_priv->vram_size) dev_priv->prim_bb_mem = dev_priv->vram_size; - mutex_unlock(&dev_priv->hw_mutex); - vmw_print_capabilities(dev_priv->capabilities); if (dev_priv->capabilities & SVGA_CAP_GMR2) { @@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev, if (unlikely(ret != 0)) return ret; vmw_kms_save_vga(dev_priv); - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 0); - mutex_unlock(&dev_priv->hw_mutex); } if (active) { @@ -1196,9 +1184,7 @@ out_no_active_lock: if (!dev_priv->enable_fb) { vmw_kms_restore_vga(dev_priv); vmw_3d_resource_dec(dev_priv, true); - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 1); - mutex_unlock(&dev_priv->hw_mutex); } return ret; } @@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev, DRM_ERROR("Unable to clean VRAM on master drop.\n"); vmw_kms_restore_vga(dev_priv); vmw_3d_resource_dec(dev_priv, true); - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 1); - mutex_unlock(&dev_priv->hw_mutex); } dev_priv->active_master = &dev_priv->fbdev_master; @@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev) struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); (void) vmw_read(dev_priv, SVGA_REG_ID); - mutex_unlock(&dev_priv->hw_mutex); /** * Reclaim 3d reference held by fbdev and potentially diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 4ee799b43d5d..d26a6daa9719 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -399,7 +399,8 @@ struct vmw_private { uint32_t memory_size; bool has_gmr; bool has_mob; - struct mutex hw_mutex; + spinlock_t hw_lock; + spinlock_t cap_lock; /* * VGA registers. @@ -449,8 +450,9 @@ struct vmw_private { atomic_t marker_seq; wait_queue_head_t fence_queue; wait_queue_head_t fifo_queue; - int fence_queue_waiters; /* Protected by hw_mutex */ - int goal_queue_waiters; /* Protected by hw_mutex */ + spinlock_t waiter_lock; + int fence_queue_waiters; /* Protected by waiter_lock */ + int goal_queue_waiters; /* Protected by waiter_lock */ atomic_t fifo_queue_waiters; uint32_t last_read_seqno; spinlock_t irq_lock; @@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master) return (struct vmw_master *) master->driver_priv; } +/* + * The locking here is fine-grained, so that it is performed once + * for every read- and write operation. This is of course costly, but we + * don't perform much register access in the timing critical paths anyway. + * Instead we have the extra benefit of being sure that we don't forget + * the hw lock around register accesses. + */ static inline void vmw_write(struct vmw_private *dev_priv, unsigned int offset, uint32_t value) { + unsigned long irq_flags; + + spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); + spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); } static inline uint32_t vmw_read(struct vmw_private *dev_priv, unsigned int offset) { - uint32_t val; + unsigned long irq_flags; + u32 val; + spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); + spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); + return val; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index b7594cb758af..945f1e0dad92 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -35,7 +35,7 @@ struct vmw_fence_manager { struct vmw_private *dev_priv; spinlock_t lock; struct list_head fence_list; - struct work_struct work, ping_work; + struct work_struct work; u32 user_fence_size; u32 fence_size; u32 event_fence_action_size; @@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f) return "svga"; } -static void vmw_fence_ping_func(struct work_struct *work) -{ - struct vmw_fence_manager *fman = - container_of(work, struct vmw_fence_manager, ping_work); - - vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC); -} - static bool vmw_fence_enable_signaling(struct fence *f) { struct vmw_fence_obj *fence = @@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f) if (seqno - fence->base.seqno < VMW_FENCE_WRAP) return false; - if (mutex_trylock(&dev_priv->hw_mutex)) { - vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC); - mutex_unlock(&dev_priv->hw_mutex); - } else - schedule_work(&fman->ping_work); + vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); return true; } @@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) INIT_LIST_HEAD(&fman->fence_list); INIT_LIST_HEAD(&fman->cleanup_list); INIT_WORK(&fman->work, &vmw_fence_work_func); - INIT_WORK(&fman->ping_work, &vmw_fence_ping_func); fman->fifo_down = true; fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); @@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) bool lists_empty; (void) cancel_work_sync(&fman->work); - (void) cancel_work_sync(&fman->ping_work); spin_lock_irqsave(&fman->lock, irq_flags); lists_empty = list_empty(&fman->fence_list) && diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 09e10aefcd8e..39f2b03888e7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c @@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) if (!dev_priv->has_mob) return false; - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->cap_lock); vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->cap_lock); return (result != 0); } @@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); - mutex_lock(&dev_priv->hw_mutex); dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); @@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) mb(); vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); - mutex_unlock(&dev_priv->hw_mutex); max = ioread32(fifo_mem + SVGA_FIFO_MAX); min = ioread32(fifo_mem + SVGA_FIFO_MIN); @@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) return vmw_fifo_send_fence(dev_priv, &dummy); } -void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason) +void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) { __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + static DEFINE_SPINLOCK(ping_lock); + unsigned long irq_flags; + /* + * The ping_lock is needed because we don't have an atomic + * test-and-set of the SVGA_FIFO_BUSY register. + */ + spin_lock_irqsave(&ping_lock, irq_flags); if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); vmw_write(dev_priv, SVGA_REG_SYNC, reason); } -} - -void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) -{ - mutex_lock(&dev_priv->hw_mutex); - - vmw_fifo_ping_host_locked(dev_priv, reason); - - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock_irqrestore(&ping_lock, irq_flags); } void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) { __le32 __iomem *fifo_mem = dev_priv->mmio_virt; - mutex_lock(&dev_priv->hw_mutex); - vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) ; @@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) vmw_write(dev_priv, SVGA_REG_TRACES, dev_priv->traces_state); - mutex_unlock(&dev_priv->hw_mutex); vmw_marker_queue_takedown(&fifo->marker_queue); if (likely(fifo->static_buffer != NULL)) { @@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, return vmw_fifo_wait_noirq(dev_priv, bytes, interruptible, timeout); - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); outl(SVGA_IRQFLAG_FIFO_PROGRESS, @@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); if (interruptible) ret = wait_event_interruptible_timeout @@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, else if (likely(ret > 0)) ret = 0; - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 37881ecf5d7a..69c8ce23123c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce, (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->cap_lock); for (i = 0; i < max_size; ++i) { vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); compat_cap->pairs[i][0] = i; compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->cap_lock); return 0; } @@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, if (num > SVGA3D_DEVCAP_MAX) num = SVGA3D_DEVCAP_MAX; - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->cap_lock); for (i = 0; i < num; ++i) { vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->cap_lock); } else if (gb_objects) { ret = vmw_fill_compat_cap(dev_priv, bounce, size); if (unlikely(ret != 0)) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 0c423766c441..9fe9827ee499 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c @@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg) static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) { - uint32_t busy; - mutex_lock(&dev_priv->hw_mutex); - busy = vmw_read(dev_priv, SVGA_REG_BUSY); - mutex_unlock(&dev_priv->hw_mutex); - - return (busy == 0); + return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); } void vmw_update_seqno(struct vmw_private *dev_priv, @@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, void vmw_seqno_waiter_add(struct vmw_private *dev_priv) { - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (dev_priv->fence_queue_waiters++ == 0) { unsigned long irq_flags; @@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv) vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); } void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) { - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (--dev_priv->fence_queue_waiters == 0) { unsigned long irq_flags; @@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); } void vmw_goal_waiter_add(struct vmw_private *dev_priv) { - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (dev_priv->goal_queue_waiters++ == 0) { unsigned long irq_flags; @@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv) vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); } void vmw_goal_waiter_remove(struct vmw_private *dev_priv) { - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (--dev_priv->goal_queue_waiters == 0) { unsigned long irq_flags; @@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv) vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); } int vmw_wait_seqno(struct vmw_private *dev_priv, @@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev) if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) return; - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); - mutex_unlock(&dev_priv->hw_mutex); status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 3725b521d931..8725b79e7847 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force) struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_display_unit *du = vmw_connector_to_du(connector); - mutex_lock(&dev_priv->hw_mutex); num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); - mutex_unlock(&dev_priv->hw_mutex); return ((vmw_connector_to_du(connector)->unit < num_displays && du->pref_active) ? diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index aaf54859adb0..4a99c6416e6a 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -72,13 +72,14 @@ static void host1x_subdev_del(struct host1x_subdev *subdev) /** * host1x_device_parse_dt() - scan device tree and add matching subdevices */ -static int host1x_device_parse_dt(struct host1x_device *device) +static int host1x_device_parse_dt(struct host1x_device *device, + struct host1x_driver *driver) { struct device_node *np; int err; for_each_child_of_node(device->dev.parent->of_node, np) { - if (of_match_node(device->driver->subdevs, np) && + if (of_match_node(driver->subdevs, np) && of_device_is_available(np)) { err = host1x_subdev_add(device, np); if (err < 0) @@ -109,14 +110,12 @@ static void host1x_subdev_register(struct host1x_device *device, mutex_unlock(&device->clients_lock); mutex_unlock(&device->subdevs_lock); - /* - * When all subdevices have been registered, the composite device is - * ready to be probed. - */ if (list_empty(&device->subdevs)) { - err = device->driver->probe(device); + err = device_add(&device->dev); if (err < 0) - dev_err(&device->dev, "probe failed: %d\n", err); + dev_err(&device->dev, "failed to add: %d\n", err); + else + device->registered = true; } } @@ -124,16 +123,16 @@ static void __host1x_subdev_unregister(struct host1x_device *device, struct host1x_subdev *subdev) { struct host1x_client *client = subdev->client; - int err; /* * If all subdevices have been activated, we're about to remove the * first active subdevice, so unload the driver first. */ if (list_empty(&device->subdevs)) { - err = device->driver->remove(device); - if (err < 0) - dev_err(&device->dev, "remove failed: %d\n", err); + if (device->registered) { + device->registered = false; + device_del(&device->dev); + } } /* @@ -260,24 +259,113 @@ static int host1x_del_client(struct host1x *host1x, return -ENODEV; } -static struct bus_type host1x_bus_type = { - .name = "host1x", -}; +static int host1x_device_match(struct device *dev, struct device_driver *drv) +{ + return strcmp(dev_name(dev), drv->name) == 0; +} + +static int host1x_device_probe(struct device *dev) +{ + struct host1x_driver *driver = to_host1x_driver(dev->driver); + struct host1x_device *device = to_host1x_device(dev); + + if (driver->probe) + return driver->probe(device); + + return 0; +} -int host1x_bus_init(void) +static int host1x_device_remove(struct device *dev) { - return bus_register(&host1x_bus_type); + struct host1x_driver *driver = to_host1x_driver(dev->driver); + struct host1x_device *device = to_host1x_device(dev); + + if (driver->remove) + return driver->remove(device); + + return 0; +} + +static void host1x_device_shutdown(struct device *dev) +{ + struct host1x_driver *driver = to_host1x_driver(dev->driver); + struct host1x_device *device = to_host1x_device(dev); + + if (driver->shutdown) + driver->shutdown(device); } -void host1x_bus_exit(void) +static const struct dev_pm_ops host1x_device_pm_ops = { + .suspend = pm_generic_suspend, + .resume = pm_generic_resume, + .freeze = pm_generic_freeze, + .thaw = pm_generic_thaw, + .poweroff = pm_generic_poweroff, + .restore = pm_generic_restore, +}; + +struct bus_type host1x_bus_type = { + .name = "host1x", + .match = host1x_device_match, + .probe = host1x_device_probe, + .remove = host1x_device_remove, + .shutdown = host1x_device_shutdown, + .pm = &host1x_device_pm_ops, +}; + +static void __host1x_device_del(struct host1x_device *device) { - bus_unregister(&host1x_bus_type); + struct host1x_subdev *subdev, *sd; + struct host1x_client *client, *cl; + + mutex_lock(&device->subdevs_lock); + + /* unregister subdevices */ + list_for_each_entry_safe(subdev, sd, &device->active, list) { + /* + * host1x_subdev_unregister() will remove the client from + * any lists, so we'll need to manually add it back to the + * list of idle clients. + * + * XXX: Alternatively, perhaps don't remove the client from + * any lists in host1x_subdev_unregister() and instead do + * that explicitly from host1x_unregister_client()? + */ + client = subdev->client; + + __host1x_subdev_unregister(device, subdev); + + /* add the client to the list of idle clients */ + mutex_lock(&clients_lock); + list_add_tail(&client->list, &clients); + mutex_unlock(&clients_lock); + } + + /* remove subdevices */ + list_for_each_entry_safe(subdev, sd, &device->subdevs, list) + host1x_subdev_del(subdev); + + mutex_unlock(&device->subdevs_lock); + + /* move clients to idle list */ + mutex_lock(&clients_lock); + mutex_lock(&device->clients_lock); + + list_for_each_entry_safe(client, cl, &device->clients, list) + list_move_tail(&client->list, &clients); + + mutex_unlock(&device->clients_lock); + mutex_unlock(&clients_lock); + + /* finally remove the device */ + list_del_init(&device->list); } static void host1x_device_release(struct device *dev) { struct host1x_device *device = to_host1x_device(dev); + __host1x_device_del(device); kfree(device); } @@ -293,6 +381,8 @@ static int host1x_device_add(struct host1x *host1x, if (!device) return -ENOMEM; + device_initialize(&device->dev); + mutex_init(&device->subdevs_lock); INIT_LIST_HEAD(&device->subdevs); INIT_LIST_HEAD(&device->active); @@ -303,24 +393,18 @@ static int host1x_device_add(struct host1x *host1x, device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; device->dev.dma_mask = &device->dev.coherent_dma_mask; + dev_set_name(&device->dev, "%s", driver->driver.name); device->dev.release = host1x_device_release; - dev_set_name(&device->dev, "%s", driver->name); device->dev.bus = &host1x_bus_type; device->dev.parent = host1x->dev; - err = device_register(&device->dev); - if (err < 0) - return err; - - err = host1x_device_parse_dt(device); + err = host1x_device_parse_dt(device, driver); if (err < 0) { - device_unregister(&device->dev); + kfree(device); return err; } - mutex_lock(&host1x->devices_lock); list_add_tail(&device->list, &host1x->devices); - mutex_unlock(&host1x->devices_lock); mutex_lock(&clients_lock); @@ -347,51 +431,12 @@ static int host1x_device_add(struct host1x *host1x, static void host1x_device_del(struct host1x *host1x, struct host1x_device *device) { - struct host1x_subdev *subdev, *sd; - struct host1x_client *client, *cl; - - mutex_lock(&device->subdevs_lock); - - /* unregister subdevices */ - list_for_each_entry_safe(subdev, sd, &device->active, list) { - /* - * host1x_subdev_unregister() will remove the client from - * any lists, so we'll need to manually add it back to the - * list of idle clients. - * - * XXX: Alternatively, perhaps don't remove the client from - * any lists in host1x_subdev_unregister() and instead do - * that explicitly from host1x_unregister_client()? - */ - client = subdev->client; - - __host1x_subdev_unregister(device, subdev); - - /* add the client to the list of idle clients */ - mutex_lock(&clients_lock); - list_add_tail(&client->list, &clients); - mutex_unlock(&clients_lock); + if (device->registered) { + device->registered = false; + device_del(&device->dev); } - /* remove subdevices */ - list_for_each_entry_safe(subdev, sd, &device->subdevs, list) - host1x_subdev_del(subdev); - - mutex_unlock(&device->subdevs_lock); - - /* move clients to idle list */ - mutex_lock(&clients_lock); - mutex_lock(&device->clients_lock); - - list_for_each_entry_safe(client, cl, &device->clients, list) - list_move_tail(&client->list, &clients); - - mutex_unlock(&device->clients_lock); - mutex_unlock(&clients_lock); - - /* finally remove the device */ - list_del_init(&device->list); - device_unregister(&device->dev); + put_device(&device->dev); } static void host1x_attach_driver(struct host1x *host1x, @@ -409,11 +454,11 @@ static void host1x_attach_driver(struct host1x *host1x, } } - mutex_unlock(&host1x->devices_lock); - err = host1x_device_add(host1x, driver); if (err < 0) dev_err(host1x->dev, "failed to allocate device: %d\n", err); + + mutex_unlock(&host1x->devices_lock); } static void host1x_detach_driver(struct host1x *host1x, @@ -466,7 +511,8 @@ int host1x_unregister(struct host1x *host1x) return 0; } -int host1x_driver_register(struct host1x_driver *driver) +int host1x_driver_register_full(struct host1x_driver *driver, + struct module *owner) { struct host1x *host1x; @@ -483,9 +529,12 @@ int host1x_driver_register(struct host1x_driver *driver) mutex_unlock(&devices_lock); - return 0; + driver->driver.bus = &host1x_bus_type; + driver->driver.owner = owner; + + return driver_register(&driver->driver); } -EXPORT_SYMBOL(host1x_driver_register); +EXPORT_SYMBOL(host1x_driver_register_full); void host1x_driver_unregister(struct host1x_driver *driver) { diff --git a/drivers/gpu/host1x/bus.h b/drivers/gpu/host1x/bus.h index 4099e99212c8..88fb1c4aac68 100644 --- a/drivers/gpu/host1x/bus.h +++ b/drivers/gpu/host1x/bus.h @@ -18,10 +18,10 @@ #ifndef HOST1X_BUS_H #define HOST1X_BUS_H +struct bus_type; struct host1x; -int host1x_bus_init(void); -void host1x_bus_exit(void); +extern struct bus_type host1x_bus_type; int host1x_register(struct host1x *host1x); int host1x_unregister(struct host1x *host1x); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 2529908d304b..53d3d1d45b48 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -216,7 +216,7 @@ static int __init tegra_host1x_init(void) { int err; - err = host1x_bus_init(); + err = bus_register(&host1x_bus_type); if (err < 0) return err; @@ -233,7 +233,7 @@ static int __init tegra_host1x_init(void) unregister_host1x: platform_driver_unregister(&tegra_host1x_driver); unregister_bus: - host1x_bus_exit(); + bus_unregister(&host1x_bus_type); return err; } module_init(tegra_host1x_init); @@ -242,7 +242,7 @@ static void __exit tegra_host1x_exit(void) { platform_driver_unregister(&tegra_mipi_driver); platform_driver_unregister(&tegra_host1x_driver); - host1x_bus_exit(); + bus_unregister(&host1x_bus_type); } module_exit(tegra_host1x_exit); diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index f707d25ae78f..67bab5c36056 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -742,7 +742,7 @@ static struct ipu_devtype ipu_type_imx51 = { .tpm_ofs = 0x1f060000, .csi0_ofs = 0x1f030000, .csi1_ofs = 0x1f038000, - .ic_ofs = 0x1f020000, + .ic_ofs = 0x1e020000, .disp0_ofs = 0x1e040000, .disp1_ofs = 0x1e048000, .dc_tmpl_ofs = 0x1f080000, @@ -758,7 +758,7 @@ static struct ipu_devtype ipu_type_imx53 = { .tpm_ofs = 0x07060000, .csi0_ofs = 0x07030000, .csi1_ofs = 0x07038000, - .ic_ofs = 0x07020000, + .ic_ofs = 0x06020000, .disp0_ofs = 0x06040000, .disp1_ofs = 0x06048000, .dc_tmpl_ofs = 0x07080000, diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c index 323203d0503a..4864f8300797 100644 --- a/drivers/gpu/ipu-v3/ipu-dc.c +++ b/drivers/gpu/ipu-v3/ipu-dc.c @@ -277,7 +277,8 @@ static irqreturn_t dc_irq_handler(int irq, void *dev_id) void ipu_dc_disable_channel(struct ipu_dc *dc) { struct ipu_dc_priv *priv = dc->priv; - int irq, ret; + int irq; + unsigned long ret; u32 val; /* TODO: Handle MEM_FG_SYNC differently from MEM_BG_SYNC */ @@ -292,7 +293,7 @@ void ipu_dc_disable_channel(struct ipu_dc *dc) enable_irq(irq); ret = wait_for_completion_timeout(&priv->comp, msecs_to_jiffies(50)); disable_irq(irq); - if (ret <= 0) { + if (ret == 0) { dev_warn(priv->dev, "DC stop timeout after 50 ms\n"); val = readl(dc->base + DC_WR_CH_CONF); |