diff options
Diffstat (limited to 'drivers/gpu')
468 files changed, 13384 insertions, 5436 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 64376dd298ed..0973f408d75f 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -232,6 +232,7 @@ config DRM_RADEON select FW_LOADER select DRM_KMS_HELPER select DRM_TTM + select DRM_TTM_HELPER select POWER_SUPPLY select HWMON select BACKLIGHT_CLASS_DEVICE @@ -252,6 +253,7 @@ config DRM_AMDGPU select DRM_KMS_HELPER select DRM_SCHED select DRM_TTM + select DRM_TTM_HELPER select POWER_SUPPLY select HWMON select BACKLIGHT_CLASS_DEVICE @@ -268,6 +270,8 @@ source "drivers/gpu/drm/nouveau/Kconfig" source "drivers/gpu/drm/i915/Kconfig" +source "drivers/gpu/drm/kmb/Kconfig" + config DRM_VGEM tristate "Virtual GEM provider" depends on DRM diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 81569009f884..fefaff4c832d 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -71,6 +71,7 @@ obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/ obj-$(CONFIG_DRM_MGA) += mga/ obj-$(CONFIG_DRM_I810) += i810/ obj-$(CONFIG_DRM_I915) += i915/ +obj-$(CONFIG_DRM_KMB_DISPLAY) += kmb/ obj-$(CONFIG_DRM_MGAG200) += mgag200/ obj-$(CONFIG_DRM_V3D) += v3d/ obj-$(CONFIG_DRM_VC4) += vc4/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index cdef422de29c..e42175e1acf1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -42,42 +42,6 @@ #include <linux/pci-p2pdma.h> /** - * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation - * @obj: GEM BO - * - * Sets up an in-kernel virtual mapping of the BO's memory. - * - * Returns: - * The virtual address of the mapping or an error pointer. - */ -void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj) -{ - struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - int ret; - - ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, - &bo->dma_buf_vmap); - if (ret) - return ERR_PTR(ret); - - return bo->dma_buf_vmap.virtual; -} - -/** - * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation - * @obj: GEM BO - * @vaddr: Virtual address (unused) - * - * Tears down the in-kernel virtual mapping of the BO's memory. - */ -void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) -{ - struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - - ttm_bo_kunmap(&bo->dma_buf_vmap); -} - -/** * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation * @obj: GEM BO * @vma: Virtual memory area diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h index 2c5c84a06bb9..39b5b9616fd8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h @@ -31,8 +31,6 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, struct amdgpu_bo *bo); -void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); -void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 95634ed1622b..ebdab31f9de9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -40,6 +40,7 @@ #include "amdgpu.h" #include "amdgpu_irq.h" #include "amdgpu_dma_buf.h" +#include "amdgpu_sched.h" #include "amdgpu_amdkfd.h" @@ -1117,7 +1118,7 @@ static const struct pci_device_id pciidlist[] = { MODULE_DEVICE_TABLE(pci, pciidlist); -static struct drm_driver kms_driver; +static const struct drm_driver amdgpu_kms_driver; static int amdgpu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -1188,7 +1189,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, if (ret) return ret; - adev = devm_drm_dev_alloc(&pdev->dev, &kms_driver, typeof(*adev), ddev); + adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev); if (IS_ERR(adev)) return PTR_ERR(adev); @@ -1532,7 +1533,29 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv) return 0; } -static struct drm_driver kms_driver = { +int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); + +const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER), + DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + /* KMS */ + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), +}; + +static const struct drm_driver amdgpu_kms_driver = { .driver_features = DRIVER_ATOMIC | DRIVER_GEM | @@ -1543,6 +1566,7 @@ static struct drm_driver kms_driver = { .lastclose = amdgpu_driver_lastclose_kms, .irq_handler = amdgpu_irq_handler, .ioctls = amdgpu_ioctls_kms, + .num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms), .dumb_create = amdgpu_mode_dumb_create, .dumb_map_offset = amdgpu_mode_dumb_mmap, .fops = &amdgpu_driver_kms_fops, @@ -1595,7 +1619,6 @@ static int __init amdgpu_init(void) goto error_fence; DRM_INFO("amdgpu kernel modesetting enabled.\n"); - kms_driver.num_ioctls = amdgpu_max_kms_ioctl; amdgpu_register_atpx_handler(); /* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index cc12ac636aef..d0a1fee1f5f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -33,6 +33,7 @@ #include <drm/amdgpu_drm.h> #include <drm/drm_debugfs.h> +#include <drm/drm_gem_ttm_helper.h> #include "amdgpu.h" #include "amdgpu_display.h" @@ -206,8 +207,8 @@ static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = { .open = amdgpu_gem_object_open, .close = amdgpu_gem_object_close, .export = amdgpu_gem_prime_export, - .vmap = amdgpu_gem_prime_vmap, - .vunmap = amdgpu_gem_prime_vunmap, + .vmap = drm_gem_ttm_vmap, + .vunmap = drm_gem_ttm_vunmap, }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index daa89bfe25ca..fc12fc72366f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -29,7 +29,6 @@ #include "amdgpu.h" #include <drm/drm_debugfs.h> #include <drm/amdgpu_drm.h> -#include "amdgpu_sched.h" #include "amdgpu_uvd.h" #include "amdgpu_vce.h" #include "atom.h" @@ -480,7 +479,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, * etc. (all asics). * Returns 0 on success, -EINVAL on failure. */ -static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) +int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct amdgpu_device *adev = drm_to_adev(dev); struct drm_amdgpu_info *info = data; @@ -1249,27 +1248,6 @@ void amdgpu_disable_vblank_kms(struct drm_crtc *crtc) amdgpu_irq_put(adev, &adev->crtc_irq, idx); } -const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER), - DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - /* KMS */ - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW) -}; -const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms); - /* * Debugfs info */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 2bfef286fda4..79120ec41396 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -100,7 +100,6 @@ struct amdgpu_bo { struct amdgpu_bo *parent; struct amdgpu_bo *shadow; - struct ttm_bo_kmap_obj dma_buf_vmap; struct amdgpu_mn *mn; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b719b4281cc9..b848f9e97613 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -512,119 +512,6 @@ error: return r; } -/** - * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer - * - * Called by amdgpu_bo_move(). - */ -static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) -{ - struct ttm_resource *old_mem = &bo->mem; - struct ttm_resource tmp_mem; - struct ttm_place placements; - struct ttm_placement placement; - int r; - - /* create space/pages for new_mem in GTT space */ - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; - placement.num_placement = 1; - placement.placement = &placements; - placement.num_busy_placement = 1; - placement.busy_placement = &placements; - placements.fpfn = 0; - placements.lpfn = 0; - placements.mem_type = TTM_PL_TT; - placements.flags = 0; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); - if (unlikely(r)) { - pr_err("Failed to find GTT space for blit from VRAM\n"); - return r; - } - - r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); - if (unlikely(r)) - goto out_cleanup; - - /* Bind the memory to the GTT space */ - r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem); - if (unlikely(r)) { - goto out_cleanup; - } - - /* blit VRAM to GTT */ - r = amdgpu_move_blit(bo, evict, &tmp_mem, old_mem); - if (unlikely(r)) { - goto out_cleanup; - } - - r = ttm_bo_wait_ctx(bo, ctx); - if (unlikely(r)) - goto out_cleanup; - - amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); - ttm_resource_free(bo, &bo->mem); - ttm_bo_assign_mem(bo, new_mem); -out_cleanup: - ttm_resource_free(bo, &tmp_mem); - return r; -} - -/** - * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM - * - * Called by amdgpu_bo_move(). - */ -static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) -{ - struct ttm_resource *old_mem = &bo->mem; - struct ttm_resource tmp_mem; - struct ttm_placement placement; - struct ttm_place placements; - int r; - - /* make space in GTT for old_mem buffer */ - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; - placement.num_placement = 1; - placement.placement = &placements; - placement.num_busy_placement = 1; - placement.busy_placement = &placements; - placements.fpfn = 0; - placements.lpfn = 0; - placements.mem_type = TTM_PL_TT; - placements.flags = 0; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); - if (unlikely(r)) { - pr_err("Failed to find GTT space for blit to VRAM\n"); - return r; - } - - /* move/bind old memory to GTT space */ - r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); - if (unlikely(r)) - return r; - - r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem); - if (unlikely(r)) { - goto out_cleanup; - } - - ttm_bo_assign_mem(bo, &tmp_mem); - /* copy to VRAM */ - r = amdgpu_move_blit(bo, evict, new_mem, old_mem); - if (unlikely(r)) { - goto out_cleanup; - } -out_cleanup: - ttm_resource_free(bo, &tmp_mem); - return r; -} - /* * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy * @@ -656,13 +543,25 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev, */ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) + struct ttm_resource *new_mem, + struct ttm_place *hop) { struct amdgpu_device *adev; struct amdgpu_bo *abo; struct ttm_resource *old_mem = &bo->mem; int r; + if ((old_mem->mem_type == TTM_PL_SYSTEM && + new_mem->mem_type == TTM_PL_VRAM) || + (old_mem->mem_type == TTM_PL_VRAM && + new_mem->mem_type == TTM_PL_SYSTEM)) { + hop->fpfn = 0; + hop->lpfn = 0; + hop->mem_type = TTM_PL_TT; + hop->flags = 0; + return -EMULTIHOP; + } + if (new_mem->mem_type == TTM_PL_TT) { r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); if (r) @@ -716,17 +615,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, goto memcpy; } - if (old_mem->mem_type == TTM_PL_VRAM && - new_mem->mem_type == TTM_PL_SYSTEM) { - r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem); - } else if (old_mem->mem_type == TTM_PL_SYSTEM && - new_mem->mem_type == TTM_PL_VRAM) { - r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem); - } else { - r = amdgpu_move_blit(bo, evict, - new_mem, old_mem); - } - + r = amdgpu_move_blit(bo, evict, new_mem, old_mem); if (r) { memcpy: /* Check that all memory is CPU accessible */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 83ca5cbffe2c..2d51b7694d1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -47,11 +47,13 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) void amdgpu_virt_init_setting(struct amdgpu_device *adev) { + struct drm_device *ddev = adev_to_drm(adev); + /* enable virtual display */ if (adev->mode_info.num_crtc == 0) adev->mode_info.num_crtc = 1; adev->enable_virtual_display = true; - adev_to_drm(adev)->driver->driver_features &= ~DRIVER_ATOMIC; + ddev->driver_features &= ~DRIVER_ATOMIC; adev->cg_flags = 0; adev->pg_flags = 0; } diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c index df0b9eeb8933..4b485eb512e2 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -387,10 +387,12 @@ static void komeda_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state, crtc); /* commit with modeset will be handled in enable/disable */ - if (drm_atomic_crtc_needs_modeset(crtc->state)) + if (drm_atomic_crtc_needs_modeset(crtc_state)) return; komeda_crtc_do_flush(crtc, old); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c index 1f6682032ca4..6b99df696384 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c @@ -58,7 +58,7 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data) return status; } -static struct drm_driver komeda_kms_driver = { +static const struct drm_driver komeda_kms_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .lastclose = drm_fb_helper_lastclose, DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_cma_dumb_create), diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index faa8a5a752da..81ae92390736 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -234,7 +234,7 @@ static void hdlcd_debugfs_init(struct drm_minor *minor) DEFINE_DRM_GEM_CMA_FOPS(fops); -static struct drm_driver hdlcd_driver = { +static const struct drm_driver hdlcd_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .irq_handler = hdlcd_irq, .irq_preinstall = hdlcd_irq_preinstall, diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 69fee05c256c..fceda010d65a 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -561,7 +561,7 @@ static void malidp_debugfs_init(struct drm_minor *minor) #endif //CONFIG_DEBUG_FS -static struct drm_driver malidp_driver = { +static const struct drm_driver malidp_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(malidp_dumb_create), #ifdef CONFIG_DEBUG_FS diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index ca643f4e2064..3ebcf5a52c8b 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -431,11 +431,13 @@ static int armada_drm_crtc_atomic_check(struct drm_crtc *crtc, static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); - if (crtc->state->color_mgmt_changed) + if (crtc_state->color_mgmt_changed) armada_drm_update_gamma(crtc); dcrtc->regs_idx = 0; @@ -445,6 +447,8 @@ static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc, static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); @@ -455,7 +459,7 @@ static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc, * If we aren't doing a full modeset, then we need to queue * the event here. */ - if (!drm_atomic_crtc_needs_modeset(crtc->state)) { + if (!drm_atomic_crtc_needs_modeset(crtc_state)) { dcrtc->update_pending = true; armada_drm_crtc_queue_state_event(crtc); spin_lock_irq(&dcrtc->irq_lock); diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 22247cfce80b..44fe9f994fc5 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -27,7 +27,7 @@ #include <drm/armada_drm.h> #include "armada_ioctlP.h" -static struct drm_ioctl_desc armada_ioctls[] = { +static const struct drm_ioctl_desc armada_ioctls[] = { DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,0), DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl, 0), DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl, 0), @@ -35,7 +35,7 @@ static struct drm_ioctl_desc armada_ioctls[] = { DEFINE_DRM_GEM_FOPS(armada_drm_fops); -static struct drm_driver armada_drm_driver = { +static const struct drm_driver armada_drm_driver = { .lastclose = drm_fb_helper_lastclose, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, @@ -48,6 +48,7 @@ static struct drm_driver armada_drm_driver = { .date = "20120730", .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .ioctls = armada_ioctls, + .num_ioctls = ARRAY_SIZE(armada_ioctls), .fops = &armada_drm_fops, }; @@ -275,8 +276,6 @@ static int __init armada_drm_init(void) { int ret; - armada_drm_driver.num_ioctls = ARRAY_SIZE(armada_ioctls); - ret = platform_driver_register(&armada_lcd_platform_driver); if (ret) return ret; diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index 30e01101f59e..6346b890279a 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -325,7 +325,7 @@ static void armada_overlay_reset(struct drm_plane *plane) } } -struct drm_plane_state * +static struct drm_plane_state * armada_overlay_duplicate_state(struct drm_plane *plane) { struct armada_overlay_state *state; diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index 771ad71cd340..457ec04950f7 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -191,7 +191,7 @@ static void aspeed_gfx_unload(struct drm_device *drm) DEFINE_DRM_GEM_CMA_FOPS(fops); -static struct drm_driver aspeed_gfx_driver = { +static const struct drm_driver aspeed_gfx_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_CMA_DRIVER_OPS, .fops = &fops, diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c index e0f4613918ad..742d43a7edf4 100644 --- a/drivers/gpu/drm/ast/ast_cursor.c +++ b/drivers/gpu/drm/ast/ast_cursor.c @@ -39,7 +39,7 @@ static void ast_cursor_fini(struct ast_private *ast) for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) { gbo = ast->cursor.gbo[i]; - drm_gem_vram_vunmap(gbo, ast->cursor.vaddr[i]); + drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]); drm_gem_vram_unpin(gbo); drm_gem_vram_put(gbo); } @@ -60,7 +60,7 @@ int ast_cursor_init(struct ast_private *ast) struct drm_device *dev = &ast->base; size_t size, i; struct drm_gem_vram_object *gbo; - void __iomem *vaddr; + struct dma_buf_map map; int ret; size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE); @@ -77,16 +77,15 @@ int ast_cursor_init(struct ast_private *ast) drm_gem_vram_put(gbo); goto err_drm_gem_vram_put; } - vaddr = drm_gem_vram_vmap(gbo); - if (IS_ERR(vaddr)) { - ret = PTR_ERR(vaddr); + ret = drm_gem_vram_vmap(gbo, &map); + if (ret) { drm_gem_vram_unpin(gbo); drm_gem_vram_put(gbo); goto err_drm_gem_vram_put; } ast->cursor.gbo[i] = gbo; - ast->cursor.vaddr[i] = vaddr; + ast->cursor.map[i] = map; } return drmm_add_action_or_reset(dev, ast_cursor_release, NULL); @@ -95,7 +94,7 @@ err_drm_gem_vram_put: while (i) { --i; gbo = ast->cursor.gbo[i]; - drm_gem_vram_vunmap(gbo, ast->cursor.vaddr[i]); + drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]); drm_gem_vram_unpin(gbo); drm_gem_vram_put(gbo); } @@ -170,6 +169,7 @@ int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb) { struct drm_device *dev = &ast->base; struct drm_gem_vram_object *gbo; + struct dma_buf_map map; int ret; void *src; void __iomem *dst; @@ -183,18 +183,17 @@ int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb) ret = drm_gem_vram_pin(gbo, 0); if (ret) return ret; - src = drm_gem_vram_vmap(gbo); - if (IS_ERR(src)) { - ret = PTR_ERR(src); + ret = drm_gem_vram_vmap(gbo, &map); + if (ret) goto err_drm_gem_vram_unpin; - } + src = map.vaddr; /* TODO: Use mapping abstraction properly */ - dst = ast->cursor.vaddr[ast->cursor.next_index]; + dst = ast->cursor.map[ast->cursor.next_index].vaddr_iomem; /* do data transfer to cursor BO */ update_cursor_image(dst, src, fb->width, fb->height); - drm_gem_vram_vunmap(gbo, src); + drm_gem_vram_vunmap(gbo, &map); drm_gem_vram_unpin(gbo); return 0; @@ -257,7 +256,7 @@ void ast_cursor_show(struct ast_private *ast, int x, int y, u8 __iomem *sig; u8 jreg; - dst = ast->cursor.vaddr[ast->cursor.next_index]; + dst = ast->cursor.map[ast->cursor.next_index].vaddr; sig = dst + AST_HWC_SIZE; writel(x, sig + AST_HWC_SIGNATURE_X); diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index f0b4af1c390a..667b450606ef 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -49,7 +49,7 @@ module_param_named(modeset, ast_modeset, int, 0400); DEFINE_DRM_GEM_FOPS(ast_fops); -static struct drm_driver ast_driver = { +static const struct drm_driver ast_driver = { .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET, diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 467049ca8430..ccaff81924ee 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -28,10 +28,11 @@ #ifndef __AST_DRV_H__ #define __AST_DRV_H__ -#include <linux/types.h> -#include <linux/io.h> +#include <linux/dma-buf-map.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> +#include <linux/io.h> +#include <linux/types.h> #include <drm/drm_connector.h> #include <drm/drm_crtc.h> @@ -63,6 +64,7 @@ enum ast_chip { AST2300, AST2400, AST2500, + AST2600, }; enum ast_tx_chip { @@ -131,7 +133,7 @@ struct ast_private { struct { struct drm_gem_vram_object *gbo[AST_DEFAULT_HWC_NUM]; - void __iomem *vaddr[AST_DEFAULT_HWC_NUM]; + struct dma_buf_map map[AST_DEFAULT_HWC_NUM]; unsigned int next_index; } cursor; @@ -159,7 +161,7 @@ static inline struct ast_private *to_ast_private(struct drm_device *dev) return container_of(dev, struct ast_private, base); } -struct ast_private *ast_device_create(struct drm_driver *drv, +struct ast_private *ast_device_create(const struct drm_driver *drv, struct pci_dev *pdev, unsigned long flags); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 77066bca8793..1b13199858cb 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -143,7 +143,10 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast_detect_config_mode(dev, &scu_rev); /* Identify chipset */ - if (dev->pdev->revision >= 0x40) { + if (dev->pdev->revision >= 0x50) { + ast->chip = AST2600; + drm_info(dev, "AST 2600 detected\n"); + } else if (dev->pdev->revision >= 0x40) { ast->chip = AST2500; drm_info(dev, "AST 2500 detected\n"); } else if (dev->pdev->revision >= 0x30) { @@ -392,7 +395,7 @@ static void ast_device_release(void *data) ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04); } -struct ast_private *ast_device_create(struct drm_driver *drv, +struct ast_private *ast_device_create(const struct drm_driver *drv, struct pci_dev *pdev, unsigned long flags) { diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 22f0e65fbe9a..9db371f4054f 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -782,10 +782,12 @@ static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); struct ast_private *ast = to_ast_private(crtc->dev); - struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc->state); + struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state); struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state); /* diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h index 3d013946714e..d9eb353a4bf0 100644 --- a/drivers/gpu/drm/ast/ast_tables.h +++ b/drivers/gpu/drm/ast/ast_tables.h @@ -295,10 +295,10 @@ static const struct ast_vbios_enhtable res_1600x900[] = { static const struct ast_vbios_enhtable res_1920x1080[] = { {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | AST2500PreCatchCRT), 60, 1, 0x38 }, {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */ - (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | AST2500PreCatchCRT), 0xFF, 1, 0x38 }, }; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index c17571a3cc2b..c58fa00b4848 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -25,7 +25,7 @@ #include "atmel_hlcdc_dc.h" /** - * Atmel HLCDC CRTC state structure + * struct atmel_hlcdc_crtc_state - Atmel HLCDC CRTC state structure * * @base: base CRTC state * @output_mode: RGBXXX output mode @@ -42,10 +42,10 @@ drm_crtc_state_to_atmel_hlcdc_crtc_state(struct drm_crtc_state *state) } /** - * Atmel HLCDC CRTC structure + * struct atmel_hlcdc_crtc - Atmel HLCDC CRTC structure * * @base: base DRM CRTC structure - * @hlcdc: pointer to the atmel_hlcdc structure provided by the MFD device + * @dc: pointer to the atmel_hlcdc structure provided by the MFD device * @event: pointer to the current page flip event * @id: CRTC id (returned by drm_crtc_index) */ diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 871293d1aeeb..98fb53b75f77 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -815,7 +815,7 @@ static void atmel_hlcdc_dc_irq_uninstall(struct drm_device *dev) DEFINE_DRM_GEM_CMA_FOPS(fops); -static struct drm_driver atmel_hlcdc_dc_driver = { +static const struct drm_driver atmel_hlcdc_dc_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .irq_handler = atmel_hlcdc_dc_irq_handler, .irq_preinstall = atmel_hlcdc_dc_irq_uninstall, diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 40800ec5700a..15bc93163833 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -19,7 +19,7 @@ #include "atmel_hlcdc_dc.h" /** - * Atmel HLCDC Plane state structure. + * struct atmel_hlcdc_plane_state - Atmel HLCDC Plane state structure. * * @base: DRM plane state * @crtc_x: x position of the plane relative to the CRTC @@ -34,6 +34,7 @@ * @disc_y: y discard position * @disc_w: discard width * @disc_h: discard height + * @ahb_id: AHB identification number * @bpp: bytes per pixel deduced from pixel_format * @offsets: offsets to apply to the GEM buffers * @xstride: value to add to the pixel pointer between each line @@ -280,8 +281,8 @@ atmel_hlcdc_plane_scaler_set_phicoeff(struct atmel_hlcdc_plane *plane, coeff_tab[i]); } -void atmel_hlcdc_plane_setup_scaler(struct atmel_hlcdc_plane *plane, - struct atmel_hlcdc_plane_state *state) +static void atmel_hlcdc_plane_setup_scaler(struct atmel_hlcdc_plane *plane, + struct atmel_hlcdc_plane_state *state) { const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc; u32 xfactor, yfactor; diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index e18c51de1196..fd454225fd19 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -57,7 +57,7 @@ err: DEFINE_DRM_GEM_FOPS(bochs_fops); -static struct drm_driver bochs_driver = { +static const struct drm_driver bochs_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &bochs_fops, .name = "bochs-drm", diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index 13d0d04c4457..853081d186d5 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -151,7 +151,6 @@ int bochs_kms_init(struct bochs_device *bochs) bochs->dev->mode_config.preferred_depth = 24; bochs->dev->mode_config.prefer_shadow = 0; bochs->dev->mode_config.prefer_shadow_fbdev = 1; - bochs->dev->mode_config.fbdev_use_iomem = true; bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true; bochs->dev->mode_config.funcs = &bochs_mode_funcs; diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index ef91646441b1..e4110d6ca7b3 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -61,6 +61,19 @@ config DRM_LONTIUM_LT9611 HDMI signals Please say Y if you have such hardware. +config DRM_LONTIUM_LT9611UXC + tristate "Lontium LT9611UXC DSI/HDMI bridge" + select SND_SOC_HDMI_CODEC if SND_SOC + depends on OF + select DRM_PANEL_BRIDGE + select DRM_KMS_HELPER + select REGMAP_I2C + help + Driver for Lontium LT9611UXC DSI to HDMI bridge + chip driver that converts dual DSI and I2S to + HDMI signals + Please say Y if you have such hardware. + config DRM_LVDS_CODEC tristate "Transparent LVDS encoders and decoders support" depends on OF diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 2b3aff104e46..86e7acc76f8d 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o obj-$(CONFIG_DRM_LONTIUM_LT9611) += lontium-lt9611.o +obj-$(CONFIG_DRM_LONTIUM_LT9611UXC) += lontium-lt9611uxc.o obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c index fafb4b492ea0..cab6c8b92efd 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c @@ -524,94 +524,6 @@ void analogix_dp_enable_sw_function(struct analogix_dp_device *dp) writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1); } -static int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp) -{ - int reg; - int retval = 0; - int timeout_loop = 0; - - /* Enable AUX CH operation */ - reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2); - reg |= AUX_EN; - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2); - - /* Is AUX CH command reply received? */ - reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); - while (!(reg & RPLY_RECEIV)) { - timeout_loop++; - if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { - dev_err(dp->dev, "AUX CH command reply failed!\n"); - return -ETIMEDOUT; - } - reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); - usleep_range(10, 11); - } - - /* Clear interrupt source for AUX CH command reply */ - writel(RPLY_RECEIV, dp->reg_base + ANALOGIX_DP_INT_STA); - - /* Clear interrupt source for AUX CH access error */ - reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); - if (reg & AUX_ERR) { - writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA); - return -EREMOTEIO; - } - - /* Check AUX CH error access status */ - reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA); - if ((reg & AUX_STATUS_MASK) != 0) { - dev_err(dp->dev, "AUX CH error happens: %d\n\n", - reg & AUX_STATUS_MASK); - return -EREMOTEIO; - } - - return retval; -} - -int analogix_dp_write_byte_to_dpcd(struct analogix_dp_device *dp, - unsigned int reg_addr, - unsigned char data) -{ - u32 reg; - int i; - int retval; - - for (i = 0; i < 3; i++) { - /* Clear AUX CH data buffer */ - reg = BUF_CLR; - writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL); - - /* Select DPCD device address */ - reg = AUX_ADDR_7_0(reg_addr); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0); - reg = AUX_ADDR_15_8(reg_addr); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8); - reg = AUX_ADDR_19_16(reg_addr); - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16); - - /* Write data buffer */ - reg = (unsigned int)data; - writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0); - - /* - * Set DisplayPort transaction and write 1 byte - * If bit 3 is 1, DisplayPort transaction. - * If Bit 3 is 0, I2C transaction. - */ - reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE; - writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1); - - /* Start AUX transaction */ - retval = analogix_dp_start_aux_transaction(dp); - if (retval == 0) - break; - - dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); - } - - return retval; -} - void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype) { u32 reg; diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c new file mode 100644 index 000000000000..0c98d27f84ac --- /dev/null +++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c @@ -0,0 +1,1002 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2019-2020. Linaro Limited. + */ + +#include <linux/firmware.h> +#include <linux/gpio/consumer.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of_graph.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/wait.h> + +#include <sound/hdmi-codec.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_print.h> +#include <drm/drm_probe_helper.h> + +#define EDID_BLOCK_SIZE 128 +#define EDID_NUM_BLOCKS 2 + +struct lt9611uxc { + struct device *dev; + struct drm_bridge bridge; + struct drm_connector connector; + + struct regmap *regmap; + /* Protects all accesses to registers by stopping the on-chip MCU */ + struct mutex ocm_lock; + + struct wait_queue_head wq; + + struct device_node *dsi0_node; + struct device_node *dsi1_node; + struct mipi_dsi_device *dsi0; + struct mipi_dsi_device *dsi1; + struct platform_device *audio_pdev; + + struct gpio_desc *reset_gpio; + struct gpio_desc *enable_gpio; + + struct regulator_bulk_data supplies[2]; + + struct i2c_client *client; + + bool hpd_supported; + bool edid_read; + uint8_t fw_version; +}; + +#define LT9611_PAGE_CONTROL 0xff + +static const struct regmap_range_cfg lt9611uxc_ranges[] = { + { + .name = "register_range", + .range_min = 0, + .range_max = 0xd0ff, + .selector_reg = LT9611_PAGE_CONTROL, + .selector_mask = 0xff, + .selector_shift = 0, + .window_start = 0, + .window_len = 0x100, + }, +}; + +static const struct regmap_config lt9611uxc_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0xffff, + .ranges = lt9611uxc_ranges, + .num_ranges = ARRAY_SIZE(lt9611uxc_ranges), +}; + +struct lt9611uxc_mode { + u16 hdisplay; + u16 vdisplay; + u8 vrefresh; +}; + +/* + * This chip supports only a fixed set of modes. + * Enumerate them here to check whether the mode is supported. + */ +static struct lt9611uxc_mode lt9611uxc_modes[] = { + { 1920, 1080, 60 }, + { 1920, 1080, 30 }, + { 1920, 1080, 25 }, + { 1366, 768, 60 }, + { 1360, 768, 60 }, + { 1280, 1024, 60 }, + { 1280, 800, 60 }, + { 1280, 720, 60 }, + { 1280, 720, 50 }, + { 1280, 720, 30 }, + { 1152, 864, 60 }, + { 1024, 768, 60 }, + { 800, 600, 60 }, + { 720, 576, 50 }, + { 720, 480, 60 }, + { 640, 480, 60 }, +}; + +static struct lt9611uxc *bridge_to_lt9611uxc(struct drm_bridge *bridge) +{ + return container_of(bridge, struct lt9611uxc, bridge); +} + +static struct lt9611uxc *connector_to_lt9611uxc(struct drm_connector *connector) +{ + return container_of(connector, struct lt9611uxc, connector); +} + +static void lt9611uxc_lock(struct lt9611uxc *lt9611uxc) +{ + mutex_lock(<9611uxc->ocm_lock); + regmap_write(lt9611uxc->regmap, 0x80ee, 0x01); +} + +static void lt9611uxc_unlock(struct lt9611uxc *lt9611uxc) +{ + regmap_write(lt9611uxc->regmap, 0x80ee, 0x00); + msleep(50); + mutex_unlock(<9611uxc->ocm_lock); +} + +static irqreturn_t lt9611uxc_irq_thread_handler(int irq, void *dev_id) +{ + struct lt9611uxc *lt9611uxc = dev_id; + unsigned int irq_status = 0; + unsigned int hpd_status = 0; + + lt9611uxc_lock(lt9611uxc); + + regmap_read(lt9611uxc->regmap, 0xb022, &irq_status); + regmap_read(lt9611uxc->regmap, 0xb023, &hpd_status); + if (irq_status) + regmap_write(lt9611uxc->regmap, 0xb022, 0); + + lt9611uxc_unlock(lt9611uxc); + + if (irq_status & BIT(0)) + lt9611uxc->edid_read = !!(hpd_status & BIT(0)); + + if (irq_status & BIT(1)) { + if (lt9611uxc->connector.dev) + drm_kms_helper_hotplug_event(lt9611uxc->connector.dev); + else + drm_bridge_hpd_notify(<9611uxc->bridge, !!(hpd_status & BIT(1))); + } + + return IRQ_HANDLED; +} + +static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc) +{ + gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1); + msleep(20); + + gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 0); + msleep(20); + + gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1); + msleep(300); +} + +static void lt9611uxc_assert_5v(struct lt9611uxc *lt9611uxc) +{ + if (!lt9611uxc->enable_gpio) + return; + + gpiod_set_value_cansleep(lt9611uxc->enable_gpio, 1); + msleep(20); +} + +static int lt9611uxc_regulator_init(struct lt9611uxc *lt9611uxc) +{ + int ret; + + lt9611uxc->supplies[0].supply = "vdd"; + lt9611uxc->supplies[1].supply = "vcc"; + + ret = devm_regulator_bulk_get(lt9611uxc->dev, 2, lt9611uxc->supplies); + if (ret < 0) + return ret; + + return regulator_set_load(lt9611uxc->supplies[0].consumer, 200000); +} + +static int lt9611uxc_regulator_enable(struct lt9611uxc *lt9611uxc) +{ + int ret; + + ret = regulator_enable(lt9611uxc->supplies[0].consumer); + if (ret < 0) + return ret; + + usleep_range(1000, 10000); /* 50000 according to dtsi */ + + ret = regulator_enable(lt9611uxc->supplies[1].consumer); + if (ret < 0) { + regulator_disable(lt9611uxc->supplies[0].consumer); + return ret; + } + + return 0; +} + +static struct lt9611uxc_mode *lt9611uxc_find_mode(const struct drm_display_mode *mode) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(lt9611uxc_modes); i++) { + if (lt9611uxc_modes[i].hdisplay == mode->hdisplay && + lt9611uxc_modes[i].vdisplay == mode->vdisplay && + lt9611uxc_modes[i].vrefresh == drm_mode_vrefresh(mode)) { + return <9611uxc_modes[i]; + } + } + + return NULL; +} + +static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc, + struct device_node *dsi_node) +{ + const struct mipi_dsi_device_info info = { "lt9611uxc", 0, NULL }; + struct mipi_dsi_device *dsi; + struct mipi_dsi_host *host; + int ret; + + host = of_find_mipi_dsi_host_by_node(dsi_node); + if (!host) { + dev_err(lt9611uxc->dev, "failed to find dsi host\n"); + return ERR_PTR(-EPROBE_DEFER); + } + + dsi = mipi_dsi_device_register_full(host, &info); + if (IS_ERR(dsi)) { + dev_err(lt9611uxc->dev, "failed to create dsi device\n"); + return dsi; + } + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | + MIPI_DSI_MODE_VIDEO_HSE; + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err(lt9611uxc->dev, "failed to attach dsi to host\n"); + mipi_dsi_device_unregister(dsi); + return ERR_PTR(ret); + } + + return dsi; +} + +static int lt9611uxc_connector_get_modes(struct drm_connector *connector) +{ + struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector); + unsigned int count; + struct edid *edid; + + edid = lt9611uxc->bridge.funcs->get_edid(<9611uxc->bridge, connector); + drm_connector_update_edid_property(connector, edid); + count = drm_add_edid_modes(connector, edid); + kfree(edid); + + return count; +} + +static enum drm_connector_status lt9611uxc_connector_detect(struct drm_connector *connector, + bool force) +{ + struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector); + + return lt9611uxc->bridge.funcs->detect(<9611uxc->bridge); +} + +static enum drm_mode_status lt9611uxc_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct lt9611uxc_mode *lt9611uxc_mode = lt9611uxc_find_mode(mode); + + return lt9611uxc_mode ? MODE_OK : MODE_BAD; +} + +static const struct drm_connector_helper_funcs lt9611uxc_bridge_connector_helper_funcs = { + .get_modes = lt9611uxc_connector_get_modes, + .mode_valid = lt9611uxc_connector_mode_valid, +}; + +static const struct drm_connector_funcs lt9611uxc_bridge_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .detect = lt9611uxc_connector_detect, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int lt9611uxc_connector_init(struct drm_bridge *bridge, struct lt9611uxc *lt9611uxc) +{ + int ret; + + if (!bridge->encoder) { + DRM_ERROR("Parent encoder object not found"); + return -ENODEV; + } + + drm_connector_helper_add(<9611uxc->connector, + <9611uxc_bridge_connector_helper_funcs); + ret = drm_connector_init(bridge->dev, <9611uxc->connector, + <9611uxc_bridge_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); + if (ret) { + DRM_ERROR("Failed to initialize connector with drm\n"); + return ret; + } + + return drm_connector_attach_encoder(<9611uxc->connector, bridge->encoder); +} + +static void lt9611uxc_bridge_detach(struct drm_bridge *bridge) +{ + struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); + + if (lt9611uxc->dsi1) { + mipi_dsi_detach(lt9611uxc->dsi1); + mipi_dsi_device_unregister(lt9611uxc->dsi1); + } + + mipi_dsi_detach(lt9611uxc->dsi0); + mipi_dsi_device_unregister(lt9611uxc->dsi0); +} + +static int lt9611uxc_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); + int ret; + + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { + ret = lt9611uxc_connector_init(bridge, lt9611uxc); + if (ret < 0) + return ret; + } + + /* Attach primary DSI */ + lt9611uxc->dsi0 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi0_node); + if (IS_ERR(lt9611uxc->dsi0)) + return PTR_ERR(lt9611uxc->dsi0); + + /* Attach secondary DSI, if specified */ + if (lt9611uxc->dsi1_node) { + lt9611uxc->dsi1 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi1_node); + if (IS_ERR(lt9611uxc->dsi1)) { + ret = PTR_ERR(lt9611uxc->dsi1); + goto err_unregister_dsi0; + } + } + + return 0; + +err_unregister_dsi0: + mipi_dsi_detach(lt9611uxc->dsi0); + mipi_dsi_device_unregister(lt9611uxc->dsi0); + + return ret; +} + +static enum drm_mode_status +lt9611uxc_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct lt9611uxc_mode *lt9611uxc_mode; + + lt9611uxc_mode = lt9611uxc_find_mode(mode); + + return lt9611uxc_mode ? MODE_OK : MODE_BAD; +} + +static void lt9611uxc_video_setup(struct lt9611uxc *lt9611uxc, + const struct drm_display_mode *mode) +{ + u32 h_total, hactive, hsync_len, hfront_porch; + u32 v_total, vactive, vsync_len, vfront_porch; + + h_total = mode->htotal; + v_total = mode->vtotal; + + hactive = mode->hdisplay; + hsync_len = mode->hsync_end - mode->hsync_start; + hfront_porch = mode->hsync_start - mode->hdisplay; + + vactive = mode->vdisplay; + vsync_len = mode->vsync_end - mode->vsync_start; + vfront_porch = mode->vsync_start - mode->vdisplay; + + regmap_write(lt9611uxc->regmap, 0xd00d, (u8)(v_total / 256)); + regmap_write(lt9611uxc->regmap, 0xd00e, (u8)(v_total % 256)); + + regmap_write(lt9611uxc->regmap, 0xd00f, (u8)(vactive / 256)); + regmap_write(lt9611uxc->regmap, 0xd010, (u8)(vactive % 256)); + + regmap_write(lt9611uxc->regmap, 0xd011, (u8)(h_total / 256)); + regmap_write(lt9611uxc->regmap, 0xd012, (u8)(h_total % 256)); + + regmap_write(lt9611uxc->regmap, 0xd013, (u8)(hactive / 256)); + regmap_write(lt9611uxc->regmap, 0xd014, (u8)(hactive % 256)); + + regmap_write(lt9611uxc->regmap, 0xd015, (u8)(vsync_len % 256)); + + regmap_update_bits(lt9611uxc->regmap, 0xd016, 0xf, (u8)(hsync_len / 256)); + regmap_write(lt9611uxc->regmap, 0xd017, (u8)(hsync_len % 256)); + + regmap_update_bits(lt9611uxc->regmap, 0xd018, 0xf, (u8)(vfront_porch / 256)); + regmap_write(lt9611uxc->regmap, 0xd019, (u8)(vfront_porch % 256)); + + regmap_update_bits(lt9611uxc->regmap, 0xd01a, 0xf, (u8)(hfront_porch / 256)); + regmap_write(lt9611uxc->regmap, 0xd01b, (u8)(hfront_porch % 256)); +} + +static void lt9611uxc_bridge_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adj_mode) +{ + struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); + + lt9611uxc_lock(lt9611uxc); + lt9611uxc_video_setup(lt9611uxc, mode); + lt9611uxc_unlock(lt9611uxc); +} + +static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *bridge) +{ + struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); + unsigned int reg_val = 0; + int ret; + int connected = 1; + + if (lt9611uxc->hpd_supported) { + lt9611uxc_lock(lt9611uxc); + ret = regmap_read(lt9611uxc->regmap, 0xb023, ®_val); + lt9611uxc_unlock(lt9611uxc); + + if (ret) + dev_err(lt9611uxc->dev, "failed to read hpd status: %d\n", ret); + else + connected = reg_val & BIT(1); + } + + return connected ? connector_status_connected : + connector_status_disconnected; +} + +static int lt9611uxc_wait_for_edid(struct lt9611uxc *lt9611uxc) +{ + return wait_event_interruptible_timeout(lt9611uxc->wq, lt9611uxc->edid_read, + msecs_to_jiffies(100)); +} + +static int lt9611uxc_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len) +{ + struct lt9611uxc *lt9611uxc = data; + int ret; + + if (len > EDID_BLOCK_SIZE) + return -EINVAL; + + if (block >= EDID_NUM_BLOCKS) + return -EINVAL; + + lt9611uxc_lock(lt9611uxc); + + regmap_write(lt9611uxc->regmap, 0xb00b, 0x10); + + regmap_write(lt9611uxc->regmap, 0xb00a, block * EDID_BLOCK_SIZE); + + ret = regmap_noinc_read(lt9611uxc->regmap, 0xb0b0, buf, len); + if (ret) + dev_err(lt9611uxc->dev, "edid read failed: %d\n", ret); + + lt9611uxc_unlock(lt9611uxc); + + return 0; +}; + +static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); + int ret; + + ret = lt9611uxc_wait_for_edid(lt9611uxc); + if (ret < 0) { + dev_err(lt9611uxc->dev, "wait for EDID failed: %d\n", ret); + return ERR_PTR(ret); + } + + return drm_do_get_edid(connector, lt9611uxc_get_edid_block, lt9611uxc); +} + +static const struct drm_bridge_funcs lt9611uxc_bridge_funcs = { + .attach = lt9611uxc_bridge_attach, + .detach = lt9611uxc_bridge_detach, + .mode_valid = lt9611uxc_bridge_mode_valid, + .mode_set = lt9611uxc_bridge_mode_set, + .detect = lt9611uxc_bridge_detect, + .get_edid = lt9611uxc_bridge_get_edid, +}; + +static int lt9611uxc_parse_dt(struct device *dev, + struct lt9611uxc *lt9611uxc) +{ + lt9611uxc->dsi0_node = of_graph_get_remote_node(dev->of_node, 0, -1); + if (!lt9611uxc->dsi0_node) { + dev_err(lt9611uxc->dev, "failed to get remote node for primary dsi\n"); + return -ENODEV; + } + + lt9611uxc->dsi1_node = of_graph_get_remote_node(dev->of_node, 1, -1); + + return 0; +} + +static int lt9611uxc_gpio_init(struct lt9611uxc *lt9611uxc) +{ + struct device *dev = lt9611uxc->dev; + + lt9611uxc->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(lt9611uxc->reset_gpio)) { + dev_err(dev, "failed to acquire reset gpio\n"); + return PTR_ERR(lt9611uxc->reset_gpio); + } + + lt9611uxc->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); + if (IS_ERR(lt9611uxc->enable_gpio)) { + dev_err(dev, "failed to acquire enable gpio\n"); + return PTR_ERR(lt9611uxc->enable_gpio); + } + + return 0; +} + +static int lt9611uxc_read_device_rev(struct lt9611uxc *lt9611uxc) +{ + unsigned int rev0, rev1, rev2; + int ret; + + lt9611uxc_lock(lt9611uxc); + + ret = regmap_read(lt9611uxc->regmap, 0x8100, &rev0); + ret |= regmap_read(lt9611uxc->regmap, 0x8101, &rev1); + ret |= regmap_read(lt9611uxc->regmap, 0x8102, &rev2); + if (ret) + dev_err(lt9611uxc->dev, "failed to read revision: %d\n", ret); + else + dev_info(lt9611uxc->dev, "LT9611 revision: 0x%02x.%02x.%02x\n", rev0, rev1, rev2); + + lt9611uxc_unlock(lt9611uxc); + + return ret; +} + +static int lt9611uxc_read_version(struct lt9611uxc *lt9611uxc) +{ + unsigned int rev; + int ret; + + lt9611uxc_lock(lt9611uxc); + + ret = regmap_read(lt9611uxc->regmap, 0xb021, &rev); + if (ret) + dev_err(lt9611uxc->dev, "failed to read revision: %d\n", ret); + else + dev_info(lt9611uxc->dev, "LT9611 version: 0x%02x\n", rev); + + lt9611uxc_unlock(lt9611uxc); + + return ret < 0 ? ret : rev; +} + +static int lt9611uxc_hdmi_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *hparms) +{ + /* + * LT9611UXC will automatically detect rate and sample size, so no need + * to setup anything here. + */ + return 0; +} + +static void lt9611uxc_audio_shutdown(struct device *dev, void *data) +{ +} + +static int lt9611uxc_hdmi_i2s_get_dai_id(struct snd_soc_component *component, + struct device_node *endpoint) +{ + struct of_endpoint of_ep; + int ret; + + ret = of_graph_parse_endpoint(endpoint, &of_ep); + if (ret < 0) + return ret; + + /* + * HDMI sound should be located as reg = <2> + * Then, it is sound port 0 + */ + if (of_ep.port == 2) + return 0; + + return -EINVAL; +} + +static const struct hdmi_codec_ops lt9611uxc_codec_ops = { + .hw_params = lt9611uxc_hdmi_hw_params, + .audio_shutdown = lt9611uxc_audio_shutdown, + .get_dai_id = lt9611uxc_hdmi_i2s_get_dai_id, +}; + +static int lt9611uxc_audio_init(struct device *dev, struct lt9611uxc *lt9611uxc) +{ + struct hdmi_codec_pdata codec_data = { + .ops = <9611uxc_codec_ops, + .max_i2s_channels = 2, + .i2s = 1, + .data = lt9611uxc, + }; + + lt9611uxc->audio_pdev = + platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, sizeof(codec_data)); + + return PTR_ERR_OR_ZERO(lt9611uxc->audio_pdev); +} + +static void lt9611uxc_audio_exit(struct lt9611uxc *lt9611uxc) +{ + if (lt9611uxc->audio_pdev) { + platform_device_unregister(lt9611uxc->audio_pdev); + lt9611uxc->audio_pdev = NULL; + } +} + +#define LT9611UXC_FW_PAGE_SIZE 32 +static void lt9611uxc_firmware_write_page(struct lt9611uxc *lt9611uxc, u16 addr, const u8 *buf) +{ + struct reg_sequence seq_write_prepare[] = { + REG_SEQ0(0x805a, 0x04), + REG_SEQ0(0x805a, 0x00), + + REG_SEQ0(0x805e, 0xdf), + REG_SEQ0(0x805a, 0x20), + REG_SEQ0(0x805a, 0x00), + REG_SEQ0(0x8058, 0x21), + }; + + struct reg_sequence seq_write_addr[] = { + REG_SEQ0(0x805b, (addr >> 16) & 0xff), + REG_SEQ0(0x805c, (addr >> 8) & 0xff), + REG_SEQ0(0x805d, addr & 0xff), + REG_SEQ0(0x805a, 0x10), + REG_SEQ0(0x805a, 0x00), + }; + + regmap_write(lt9611uxc->regmap, 0x8108, 0xbf); + msleep(20); + regmap_write(lt9611uxc->regmap, 0x8108, 0xff); + msleep(20); + regmap_multi_reg_write(lt9611uxc->regmap, seq_write_prepare, ARRAY_SIZE(seq_write_prepare)); + regmap_noinc_write(lt9611uxc->regmap, 0x8059, buf, LT9611UXC_FW_PAGE_SIZE); + regmap_multi_reg_write(lt9611uxc->regmap, seq_write_addr, ARRAY_SIZE(seq_write_addr)); + msleep(20); +} + +static void lt9611uxc_firmware_read_page(struct lt9611uxc *lt9611uxc, u16 addr, char *buf) +{ + struct reg_sequence seq_read_page[] = { + REG_SEQ0(0x805a, 0xa0), + REG_SEQ0(0x805a, 0x80), + REG_SEQ0(0x805b, (addr >> 16) & 0xff), + REG_SEQ0(0x805c, (addr >> 8) & 0xff), + REG_SEQ0(0x805d, addr & 0xff), + REG_SEQ0(0x805a, 0x90), + REG_SEQ0(0x805a, 0x80), + REG_SEQ0(0x8058, 0x21), + }; + + regmap_multi_reg_write(lt9611uxc->regmap, seq_read_page, ARRAY_SIZE(seq_read_page)); + regmap_noinc_read(lt9611uxc->regmap, 0x805f, buf, LT9611UXC_FW_PAGE_SIZE); +} + +static char *lt9611uxc_firmware_read(struct lt9611uxc *lt9611uxc, size_t size) +{ + struct reg_sequence seq_read_setup[] = { + REG_SEQ0(0x805a, 0x84), + REG_SEQ0(0x805a, 0x80), + }; + + char *readbuf; + u16 offset; + + readbuf = kzalloc(ALIGN(size, 32), GFP_KERNEL); + if (!readbuf) + return NULL; + + regmap_multi_reg_write(lt9611uxc->regmap, seq_read_setup, ARRAY_SIZE(seq_read_setup)); + + for (offset = 0; + offset < size; + offset += LT9611UXC_FW_PAGE_SIZE) + lt9611uxc_firmware_read_page(lt9611uxc, offset, &readbuf[offset]); + + return readbuf; +} + +static int lt9611uxc_firmware_update(struct lt9611uxc *lt9611uxc) +{ + int ret; + u16 offset; + size_t remain; + char *readbuf; + const struct firmware *fw; + + struct reg_sequence seq_setup[] = { + REG_SEQ0(0x805e, 0xdf), + REG_SEQ0(0x8058, 0x00), + REG_SEQ0(0x8059, 0x50), + REG_SEQ0(0x805a, 0x10), + REG_SEQ0(0x805a, 0x00), + }; + + + struct reg_sequence seq_block_erase[] = { + REG_SEQ0(0x805a, 0x04), + REG_SEQ0(0x805a, 0x00), + REG_SEQ0(0x805b, 0x00), + REG_SEQ0(0x805c, 0x00), + REG_SEQ0(0x805d, 0x00), + REG_SEQ0(0x805a, 0x01), + REG_SEQ0(0x805a, 0x00), + }; + + ret = request_firmware(&fw, "lt9611uxc_fw.bin", lt9611uxc->dev); + if (ret < 0) + return ret; + + dev_info(lt9611uxc->dev, "Updating firmware\n"); + lt9611uxc_lock(lt9611uxc); + + regmap_multi_reg_write(lt9611uxc->regmap, seq_setup, ARRAY_SIZE(seq_setup)); + + /* + * Need erase block 2 timess here. Sometimes, block erase can fail. + * This is a workaroud. + */ + regmap_multi_reg_write(lt9611uxc->regmap, seq_block_erase, ARRAY_SIZE(seq_block_erase)); + msleep(3000); + regmap_multi_reg_write(lt9611uxc->regmap, seq_block_erase, ARRAY_SIZE(seq_block_erase)); + msleep(3000); + + for (offset = 0, remain = fw->size; + remain >= LT9611UXC_FW_PAGE_SIZE; + offset += LT9611UXC_FW_PAGE_SIZE, remain -= LT9611UXC_FW_PAGE_SIZE) + lt9611uxc_firmware_write_page(lt9611uxc, offset, fw->data + offset); + + if (remain > 0) { + char buf[LT9611UXC_FW_PAGE_SIZE]; + + memset(buf, 0xff, LT9611UXC_FW_PAGE_SIZE); + memcpy(buf, fw->data + offset, remain); + lt9611uxc_firmware_write_page(lt9611uxc, offset, buf); + } + msleep(20); + + readbuf = lt9611uxc_firmware_read(lt9611uxc, fw->size); + if (!readbuf) { + ret = -ENOMEM; + goto out; + } + + if (!memcmp(readbuf, fw->data, fw->size)) { + dev_err(lt9611uxc->dev, "Firmware update failed\n"); + print_hex_dump(KERN_ERR, "fw: ", DUMP_PREFIX_OFFSET, 16, 1, readbuf, fw->size, false); + ret = -EINVAL; + } else { + dev_info(lt9611uxc->dev, "Firmware updates successfully\n"); + ret = 0; + } + kfree(readbuf); + +out: + lt9611uxc_unlock(lt9611uxc); + lt9611uxc_reset(lt9611uxc); + release_firmware(fw); + + return ret; +} + +static ssize_t lt9611uxc_firmware_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) +{ + struct lt9611uxc *lt9611uxc = dev_get_drvdata(dev); + int ret; + + ret = lt9611uxc_firmware_update(lt9611uxc); + if (ret < 0) + return ret; + return len; +} + +static ssize_t lt9611uxc_firmware_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct lt9611uxc *lt9611uxc = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%02x\n", lt9611uxc->fw_version); +} + +static DEVICE_ATTR_RW(lt9611uxc_firmware); + +static struct attribute *lt9611uxc_attrs[] = { + &dev_attr_lt9611uxc_firmware.attr, + NULL, +}; + +static const struct attribute_group lt9611uxc_attr_group = { + .attrs = lt9611uxc_attrs, +}; + +static const struct attribute_group *lt9611uxc_attr_groups[] = { + <9611uxc_attr_group, + NULL, +}; + +static int lt9611uxc_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct lt9611uxc *lt9611uxc; + struct device *dev = &client->dev; + int ret; + bool fw_updated = false; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + dev_err(dev, "device doesn't support I2C\n"); + return -ENODEV; + } + + lt9611uxc = devm_kzalloc(dev, sizeof(*lt9611uxc), GFP_KERNEL); + if (!lt9611uxc) + return -ENOMEM; + + lt9611uxc->dev = &client->dev; + lt9611uxc->client = client; + mutex_init(<9611uxc->ocm_lock); + + lt9611uxc->regmap = devm_regmap_init_i2c(client, <9611uxc_regmap_config); + if (IS_ERR(lt9611uxc->regmap)) { + dev_err(lt9611uxc->dev, "regmap i2c init failed\n"); + return PTR_ERR(lt9611uxc->regmap); + } + + ret = lt9611uxc_parse_dt(&client->dev, lt9611uxc); + if (ret) { + dev_err(dev, "failed to parse device tree\n"); + return ret; + } + + ret = lt9611uxc_gpio_init(lt9611uxc); + if (ret < 0) + goto err_of_put; + + ret = lt9611uxc_regulator_init(lt9611uxc); + if (ret < 0) + goto err_of_put; + + lt9611uxc_assert_5v(lt9611uxc); + + ret = lt9611uxc_regulator_enable(lt9611uxc); + if (ret) + goto err_of_put; + + lt9611uxc_reset(lt9611uxc); + + ret = lt9611uxc_read_device_rev(lt9611uxc); + if (ret) { + dev_err(dev, "failed to read chip rev\n"); + goto err_disable_regulators; + } + +retry: + ret = lt9611uxc_read_version(lt9611uxc); + if (ret < 0) { + dev_err(dev, "failed to read FW version\n"); + goto err_disable_regulators; + } else if (ret == 0) { + if (!fw_updated) { + fw_updated = true; + dev_err(dev, "FW version 0, enforcing firmware update\n"); + ret = lt9611uxc_firmware_update(lt9611uxc); + if (ret < 0) + goto err_disable_regulators; + else + goto retry; + } else { + dev_err(dev, "FW version 0, update failed\n"); + ret = -EOPNOTSUPP; + goto err_disable_regulators; + } + } else if (ret < 0x40) { + dev_info(dev, "FW version 0x%x, HPD not supported\n", ret); + } else { + lt9611uxc->hpd_supported = true; + } + lt9611uxc->fw_version = ret; + + init_waitqueue_head(<9611uxc->wq); + ret = devm_request_threaded_irq(dev, client->irq, NULL, + lt9611uxc_irq_thread_handler, + IRQF_ONESHOT, "lt9611uxc", lt9611uxc); + if (ret) { + dev_err(dev, "failed to request irq\n"); + goto err_disable_regulators; + } + + i2c_set_clientdata(client, lt9611uxc); + + lt9611uxc->bridge.funcs = <9611uxc_bridge_funcs; + lt9611uxc->bridge.of_node = client->dev.of_node; + lt9611uxc->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID; + if (lt9611uxc->hpd_supported) + lt9611uxc->bridge.ops |= DRM_BRIDGE_OP_HPD; + lt9611uxc->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + + drm_bridge_add(<9611uxc->bridge); + + return lt9611uxc_audio_init(dev, lt9611uxc); + +err_disable_regulators: + regulator_bulk_disable(ARRAY_SIZE(lt9611uxc->supplies), lt9611uxc->supplies); + +err_of_put: + of_node_put(lt9611uxc->dsi1_node); + of_node_put(lt9611uxc->dsi0_node); + + return ret; +} + +static int lt9611uxc_remove(struct i2c_client *client) +{ + struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client); + + disable_irq(client->irq); + lt9611uxc_audio_exit(lt9611uxc); + drm_bridge_remove(<9611uxc->bridge); + + mutex_destroy(<9611uxc->ocm_lock); + + regulator_bulk_disable(ARRAY_SIZE(lt9611uxc->supplies), lt9611uxc->supplies); + + of_node_put(lt9611uxc->dsi1_node); + of_node_put(lt9611uxc->dsi0_node); + + return 0; +} + +static struct i2c_device_id lt9611uxc_id[] = { + { "lontium,lt9611uxc", 0 }, + { /* sentinel */ } +}; + +static const struct of_device_id lt9611uxc_match_table[] = { + { .compatible = "lontium,lt9611uxc" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, lt9611uxc_match_table); + +static struct i2c_driver lt9611uxc_driver = { + .driver = { + .name = "lt9611uxc", + .of_match_table = lt9611uxc_match_table, + .dev_groups = lt9611uxc_attr_groups, + }, + .probe = lt9611uxc_probe, + .remove = lt9611uxc_remove, + .id_table = lt9611uxc_id, +}; +module_i2c_driver(lt9611uxc_driver); + +MODULE_AUTHOR("Dmitry Baryshkov <dmitry.baryshkov@linaro.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c index f52ccffc1bd1..dcf579a4cf83 100644 --- a/drivers/gpu/drm/bridge/lvds-codec.c +++ b/drivers/gpu/drm/bridge/lvds-codec.c @@ -80,7 +80,6 @@ static int lvds_codec_probe(struct platform_device *pdev) struct device_node *panel_node; struct drm_panel *panel; struct lvds_codec *lvds_codec; - int ret; lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL); if (!lvds_codec) @@ -90,13 +89,9 @@ static int lvds_codec_probe(struct platform_device *pdev) lvds_codec->connector_type = (uintptr_t)of_device_get_match_data(dev); lvds_codec->vcc = devm_regulator_get(lvds_codec->dev, "power"); - if (IS_ERR(lvds_codec->vcc)) { - ret = PTR_ERR(lvds_codec->vcc); - if (ret != -EPROBE_DEFER) - dev_err(lvds_codec->dev, - "Unable to get \"vcc\" supply: %d\n", ret); - return ret; - } + if (IS_ERR(lvds_codec->vcc)) + return dev_err_probe(dev, PTR_ERR(lvds_codec->vcc), + "Unable to get \"vcc\" supply\n"); lvds_codec->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH); diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 33fd33f953ec..89558e581530 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -17,6 +17,7 @@ #include <linux/i2c.h> #include <linux/module.h> #include <linux/regmap.h> +#include <linux/regulator/consumer.h> #include <linux/clk.h> #include <drm/drm_atomic_helper.h> @@ -168,6 +169,7 @@ struct sii902x { struct drm_connector connector; struct gpio_desc *reset_gpio; struct i2c_mux_core *i2cmux; + struct regulator_bulk_data supplies[2]; /* * Mutex protects audio and video functions from interfering * each other, by keeping their i2c command sequences atomic. @@ -954,41 +956,13 @@ static const struct drm_bridge_timings default_sii902x_timings = { | DRM_BUS_FLAG_DE_HIGH, }; -static int sii902x_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int sii902x_init(struct sii902x *sii902x) { - struct device *dev = &client->dev; + struct device *dev = &sii902x->i2c->dev; unsigned int status = 0; - struct sii902x *sii902x; u8 chipid[4]; int ret; - ret = i2c_check_functionality(client->adapter, - I2C_FUNC_SMBUS_BYTE_DATA); - if (!ret) { - dev_err(dev, "I2C adapter not suitable\n"); - return -EIO; - } - - sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL); - if (!sii902x) - return -ENOMEM; - - sii902x->i2c = client; - sii902x->regmap = devm_regmap_init_i2c(client, &sii902x_regmap_config); - if (IS_ERR(sii902x->regmap)) - return PTR_ERR(sii902x->regmap); - - sii902x->reset_gpio = devm_gpiod_get_optional(dev, "reset", - GPIOD_OUT_LOW); - if (IS_ERR(sii902x->reset_gpio)) { - dev_err(dev, "Failed to retrieve/request reset gpio: %ld\n", - PTR_ERR(sii902x->reset_gpio)); - return PTR_ERR(sii902x->reset_gpio); - } - - mutex_init(&sii902x->mutex); - sii902x_reset(sii902x); ret = regmap_write(sii902x->regmap, SII902X_REG_TPI_RQB, 0x0); @@ -1012,11 +986,11 @@ static int sii902x_probe(struct i2c_client *client, regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status); regmap_write(sii902x->regmap, SII902X_INT_STATUS, status); - if (client->irq > 0) { + if (sii902x->i2c->irq > 0) { regmap_write(sii902x->regmap, SII902X_INT_ENABLE, SII902X_HOTPLUG_EVENT); - ret = devm_request_threaded_irq(dev, client->irq, NULL, + ret = devm_request_threaded_irq(dev, sii902x->i2c->irq, NULL, sii902x_interrupt, IRQF_ONESHOT, dev_name(dev), sii902x); @@ -1031,9 +1005,9 @@ static int sii902x_probe(struct i2c_client *client, sii902x_audio_codec_init(sii902x, dev); - i2c_set_clientdata(client, sii902x); + i2c_set_clientdata(sii902x->i2c, sii902x); - sii902x->i2cmux = i2c_mux_alloc(client->adapter, dev, + sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev, 1, 0, I2C_MUX_GATE, sii902x_i2c_bypass_select, sii902x_i2c_bypass_deselect); @@ -1044,6 +1018,62 @@ static int sii902x_probe(struct i2c_client *client, return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0); } +static int sii902x_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct sii902x *sii902x; + int ret; + + ret = i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE_DATA); + if (!ret) { + dev_err(dev, "I2C adapter not suitable\n"); + return -EIO; + } + + sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL); + if (!sii902x) + return -ENOMEM; + + sii902x->i2c = client; + sii902x->regmap = devm_regmap_init_i2c(client, &sii902x_regmap_config); + if (IS_ERR(sii902x->regmap)) + return PTR_ERR(sii902x->regmap); + + sii902x->reset_gpio = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(sii902x->reset_gpio)) { + dev_err(dev, "Failed to retrieve/request reset gpio: %ld\n", + PTR_ERR(sii902x->reset_gpio)); + return PTR_ERR(sii902x->reset_gpio); + } + + mutex_init(&sii902x->mutex); + + sii902x->supplies[0].supply = "iovcc"; + sii902x->supplies[1].supply = "cvcc12"; + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(sii902x->supplies), + sii902x->supplies); + if (ret < 0) + return ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(sii902x->supplies), + sii902x->supplies); + if (ret < 0) { + dev_err_probe(dev, ret, "Failed to enable supplies"); + return ret; + } + + ret = sii902x_init(sii902x); + if (ret < 0) { + regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies), + sii902x->supplies); + } + + return ret; +} + static int sii902x_remove(struct i2c_client *client) { @@ -1051,6 +1081,8 @@ static int sii902x_remove(struct i2c_client *client) i2c_mux_del_adapters(sii902x->i2cmux); drm_bridge_remove(&sii902x->bridge); + regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies), + sii902x->supplies); return 0; } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 6ca1debd0f88..f27306c51e4d 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -17,6 +17,8 @@ #include <linux/regmap.h> #include <linux/regulator/consumer.h> +#include <asm/unaligned.h> + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> @@ -72,6 +74,7 @@ #define SN_AUX_ADDR_19_16_REG 0x74 #define SN_AUX_ADDR_15_8_REG 0x75 #define SN_AUX_ADDR_7_0_REG 0x76 +#define SN_AUX_ADDR_MASK GENMASK(19, 0) #define SN_AUX_LENGTH_REG 0x77 #define SN_AUX_CMD_REG 0x78 #define AUX_CMD_SEND BIT(0) @@ -118,6 +121,7 @@ * @debugfs: Used for managing our debugfs. * @host_node: Remote DSI node. * @dsi: Our MIPI DSI source. + * @edid: Detected EDID of eDP panel. * @refclk: Our reference clock. * @panel: Our panel. * @enable_gpio: The GPIO we toggle to enable the bridge. @@ -143,6 +147,7 @@ struct ti_sn_bridge { struct drm_bridge bridge; struct drm_connector connector; struct dentry *debugfs; + struct edid *edid; struct device_node *host_node; struct mipi_dsi_device *dsi; struct clk *refclk; @@ -264,6 +269,23 @@ connector_to_ti_sn_bridge(struct drm_connector *connector) static int ti_sn_bridge_connector_get_modes(struct drm_connector *connector) { struct ti_sn_bridge *pdata = connector_to_ti_sn_bridge(connector); + struct edid *edid = pdata->edid; + int num, ret; + + if (!edid) { + pm_runtime_get_sync(pdata->dev); + edid = pdata->edid = drm_get_edid(connector, &pdata->aux.ddc); + pm_runtime_put(pdata->dev); + } + + if (edid && drm_edid_is_valid(edid)) { + ret = drm_connector_update_edid_property(connector, edid); + if (!ret) { + num = drm_add_edid_modes(connector, edid); + if (num) + return num; + } + } return drm_panel_get_modes(pdata->panel, connector); } @@ -856,13 +878,15 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct ti_sn_bridge *pdata = aux_to_ti_sn_bridge(aux); - u32 request = msg->request & ~DP_AUX_I2C_MOT; + u32 request = msg->request & ~(DP_AUX_I2C_MOT | DP_AUX_I2C_WRITE_STATUS_UPDATE); u32 request_val = AUX_CMD_REQ(msg->request); - u8 *buf = (u8 *)msg->buffer; + u8 *buf = msg->buffer; + unsigned int len = msg->size; unsigned int val; - int ret, i; + int ret; + u8 addr_len[SN_AUX_LENGTH_REG + 1 - SN_AUX_ADDR_19_16_REG]; - if (msg->size > SN_AUX_MAX_PAYLOAD_BYTES) + if (len > SN_AUX_MAX_PAYLOAD_BYTES) return -EINVAL; switch (request) { @@ -871,24 +895,21 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux, case DP_AUX_NATIVE_READ: case DP_AUX_I2C_READ: regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val); + /* Assume it's good */ + msg->reply = 0; break; default: return -EINVAL; } - regmap_write(pdata->regmap, SN_AUX_ADDR_19_16_REG, - (msg->address >> 16) & 0xF); - regmap_write(pdata->regmap, SN_AUX_ADDR_15_8_REG, - (msg->address >> 8) & 0xFF); - regmap_write(pdata->regmap, SN_AUX_ADDR_7_0_REG, msg->address & 0xFF); - - regmap_write(pdata->regmap, SN_AUX_LENGTH_REG, msg->size); + BUILD_BUG_ON(sizeof(addr_len) != sizeof(__be32)); + put_unaligned_be32((msg->address & SN_AUX_ADDR_MASK) << 8 | len, + addr_len); + regmap_bulk_write(pdata->regmap, SN_AUX_ADDR_19_16_REG, addr_len, + ARRAY_SIZE(addr_len)); - if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE) { - for (i = 0; i < msg->size; i++) - regmap_write(pdata->regmap, SN_AUX_WDATA_REG(i), - buf[i]); - } + if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE) + regmap_bulk_write(pdata->regmap, SN_AUX_WDATA_REG(0), buf, len); /* Clear old status bits before start so we don't get confused */ regmap_write(pdata->regmap, SN_AUX_CMD_STATUS_REG, @@ -898,35 +919,52 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux, regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val | AUX_CMD_SEND); + /* Zero delay loop because i2c transactions are slow already */ ret = regmap_read_poll_timeout(pdata->regmap, SN_AUX_CMD_REG, val, - !(val & AUX_CMD_SEND), 200, - 50 * 1000); + !(val & AUX_CMD_SEND), 0, 50 * 1000); if (ret) return ret; ret = regmap_read(pdata->regmap, SN_AUX_CMD_STATUS_REG, &val); if (ret) return ret; - else if ((val & AUX_IRQ_STATUS_NAT_I2C_FAIL) - || (val & AUX_IRQ_STATUS_AUX_RPLY_TOUT) - || (val & AUX_IRQ_STATUS_AUX_SHORT)) - return -ENXIO; - if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE) - return msg->size; + if (val & AUX_IRQ_STATUS_AUX_RPLY_TOUT) { + /* + * The hardware tried the message seven times per the DP spec + * but it hit a timeout. We ignore defers here because they're + * handled in hardware. + */ + return -ETIMEDOUT; + } - for (i = 0; i < msg->size; i++) { - unsigned int val; - ret = regmap_read(pdata->regmap, SN_AUX_RDATA_REG(i), - &val); + if (val & AUX_IRQ_STATUS_AUX_SHORT) { + ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &len); if (ret) return ret; - - WARN_ON(val & ~0xFF); - buf[i] = (u8)(val & 0xFF); + } else if (val & AUX_IRQ_STATUS_NAT_I2C_FAIL) { + switch (request) { + case DP_AUX_I2C_WRITE: + case DP_AUX_I2C_READ: + msg->reply |= DP_AUX_I2C_REPLY_NACK; + break; + case DP_AUX_NATIVE_READ: + case DP_AUX_NATIVE_WRITE: + msg->reply |= DP_AUX_NATIVE_REPLY_NACK; + break; + } + return 0; } - return msg->size; + if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE || + len == 0) + return len; + + ret = regmap_bulk_read(pdata->regmap, SN_AUX_RDATA_REG(0), buf, len); + if (ret) + return ret; + + return len; } static int ti_sn_bridge_parse_dsi_host(struct ti_sn_bridge *pdata) @@ -1268,6 +1306,7 @@ static int ti_sn_bridge_remove(struct i2c_client *client) if (!pdata) return -EINVAL; + kfree(pdata->edid); ti_sn_debugfs_remove(pdata); of_node_put(pdata->host_node); diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c index 514cbf0eac75..e0e015243a60 100644 --- a/drivers/gpu/drm/bridge/ti-tpd12s015.c +++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c @@ -160,7 +160,7 @@ static int tpd12s015_probe(struct platform_device *pdev) /* Register the IRQ if the HPD GPIO is IRQ-capable. */ tpd->hpd_irq = gpiod_to_irq(tpd->hpd_gpio); - if (tpd->hpd_irq) { + if (tpd->hpd_irq >= 0) { ret = devm_request_threaded_irq(&pdev->dev, tpd->hpd_irq, NULL, tpd12s015_hpd_isr, IRQF_TRIGGER_RISING | diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index 9ad74045158e..ddcf5c2c8e6a 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c @@ -543,7 +543,7 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state); /** - * __drm_atomic_helper_private_duplicate_state - copy atomic private state + * __drm_atomic_helper_private_obj_duplicate_state - copy atomic private state * @obj: CRTC object * @state: new private object state * diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index ef82009035e6..268bb69c2e2f 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -85,13 +85,15 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, drm_mode_copy(&state->mode, mode); state->enable = true; - DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n", - mode->name, crtc->base.id, crtc->name, state); + drm_dbg_atomic(crtc->dev, + "Set [MODE:%s] for [CRTC:%d:%s] state %p\n", + mode->name, crtc->base.id, crtc->name, state); } else { memset(&state->mode, 0, sizeof(state->mode)); state->enable = false; - DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n", - crtc->base.id, crtc->name, state); + drm_dbg_atomic(crtc->dev, + "Set [NOMODE] for [CRTC:%d:%s] state %p\n", + crtc->base.id, crtc->name, state); } return 0; @@ -128,31 +130,35 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, int ret; if (blob->length != sizeof(struct drm_mode_modeinfo)) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n", - crtc->base.id, crtc->name, - blob->length); + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] bad mode blob length: %zu\n", + crtc->base.id, crtc->name, + blob->length); return -EINVAL; } ret = drm_mode_convert_umode(crtc->dev, &state->mode, blob->data); if (ret) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n", - crtc->base.id, crtc->name, - ret, drm_get_mode_status_name(state->mode.status)); + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n", + crtc->base.id, crtc->name, + ret, drm_get_mode_status_name(state->mode.status)); drm_mode_debug_printmodeline(&state->mode); return -EINVAL; } state->mode_blob = drm_property_blob_get(blob); state->enable = true; - DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n", - state->mode.name, crtc->base.id, crtc->name, - state); + drm_dbg_atomic(crtc->dev, + "Set [MODE:%s] for [CRTC:%d:%s] state %p\n", + state->mode.name, crtc->base.id, crtc->name, + state); } else { state->enable = false; - DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n", - crtc->base.id, crtc->name, state); + drm_dbg_atomic(crtc->dev, + "Set [NOMODE] for [CRTC:%d:%s] state %p\n", + crtc->base.id, crtc->name, state); } return 0; @@ -202,12 +208,14 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, } if (crtc) - DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n", - plane->base.id, plane->name, plane_state, - crtc->base.id, crtc->name); + drm_dbg_atomic(plane->dev, + "Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n", + plane->base.id, plane->name, plane_state, + crtc->base.id, crtc->name); else - DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n", - plane->base.id, plane->name, plane_state); + drm_dbg_atomic(plane->dev, + "Link [PLANE:%d:%s] state %p to [NOCRTC]\n", + plane->base.id, plane->name, plane_state); return 0; } @@ -230,12 +238,14 @@ drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, struct drm_plane *plane = plane_state->plane; if (fb) - DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n", - fb->base.id, plane->base.id, plane->name, - plane_state); + drm_dbg_atomic(plane->dev, + "Set [FB:%d] for [PLANE:%d:%s] state %p\n", + fb->base.id, plane->base.id, plane->name, + plane_state); else - DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n", - plane->base.id, plane->name, plane_state); + drm_dbg_atomic(plane->dev, + "Set [NOFB] for [PLANE:%d:%s] state %p\n", + plane->base.id, plane->name, plane_state); drm_framebuffer_assign(&plane_state->fb, fb); } @@ -324,13 +334,15 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, drm_connector_get(conn_state->connector); conn_state->crtc = crtc; - DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n", - connector->base.id, connector->name, - conn_state, crtc->base.id, crtc->name); + drm_dbg_atomic(connector->dev, + "Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n", + connector->base.id, connector->name, + conn_state, crtc->base.id, crtc->name); } else { - DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n", - connector->base.id, connector->name, - conn_state); + drm_dbg_atomic(connector->dev, + "Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n", + connector->base.id, connector->name, + conn_state); } return 0; @@ -474,9 +486,10 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc, } else if (crtc->funcs->atomic_set_property) { return crtc->funcs->atomic_set_property(crtc, state, property, val); } else { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n", - crtc->base.id, crtc->name, - property->base.id, property->name); + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n", + crtc->base.id, crtc->name, + property->base.id, property->name); return -EINVAL; } @@ -570,8 +583,9 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane, state->pixel_blend_mode = val; } else if (property == plane->rotation_property) { if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n", - plane->base.id, plane->name, val); + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n", + plane->base.id, plane->name, val); return -EINVAL; } state->rotation = val; @@ -595,9 +609,10 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane, return plane->funcs->atomic_set_property(plane, state, property, val); } else { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n", - plane->base.id, plane->name, - property->base.id, property->name); + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n", + plane->base.id, plane->name, + property->base.id, property->name); return -EINVAL; } @@ -665,17 +680,20 @@ static int drm_atomic_set_writeback_fb_for_connector( struct drm_framebuffer *fb) { int ret; + struct drm_connector *conn = conn_state->connector; ret = drm_writeback_set_fb(conn_state, fb); if (ret < 0) return ret; if (fb) - DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n", - fb->base.id, conn_state); + drm_dbg_atomic(conn->dev, + "Set [FB:%d] for connector state %p\n", + fb->base.id, conn_state); else - DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n", - conn_state); + drm_dbg_atomic(conn->dev, + "Set [NOFB] for connector state %p\n", + conn_state); return 0; } @@ -782,9 +800,10 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector, return connector->funcs->atomic_set_property(connector, state, property, val); } else { - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n", - connector->base.id, connector->name, - property->base.id, property->name); + drm_dbg_atomic(connector->dev, + "[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n", + connector->base.id, connector->name, + property->base.id, property->name); return -EINVAL; } @@ -1282,7 +1301,7 @@ static void complete_signaling(struct drm_device *dev, /* If this fails log error to the user */ if (fence_state[i].out_fence_ptr && put_user(-1, fence_state[i].out_fence_ptr)) - DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n"); + drm_dbg_atomic(dev, "Couldn't clear out_fence_ptr\n"); } kfree(fence_state); @@ -1311,22 +1330,35 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, * though this may be a bit overkill, since legacy userspace * wouldn't know how to call this ioctl) */ - if (!file_priv->atomic) + if (!file_priv->atomic) { + drm_dbg_atomic(dev, + "commit failed: atomic cap not enabled\n"); return -EINVAL; + } - if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS) + if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS) { + drm_dbg_atomic(dev, "commit failed: invalid flag\n"); return -EINVAL; + } - if (arg->reserved) + if (arg->reserved) { + drm_dbg_atomic(dev, "commit failed: reserved field set\n"); return -EINVAL; + } - if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) + if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) { + drm_dbg_atomic(dev, + "commit failed: invalid flag DRM_MODE_PAGE_FLIP_ASYNC\n"); return -EINVAL; + } /* can't test and expect an event at the same time. */ if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) && - (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) + (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) { + drm_dbg_atomic(dev, + "commit failed: page-flip event requested with test-only commit\n"); return -EINVAL; + } state = drm_atomic_state_alloc(dev); if (!state) diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index 495f47d23d87..fe573acf1067 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -3,6 +3,7 @@ * Copyright 2018 Noralf Trønnes */ +#include <linux/dma-buf-map.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> @@ -234,7 +235,7 @@ static void drm_client_buffer_delete(struct drm_client_buffer *buffer) { struct drm_device *dev = buffer->client->dev; - drm_gem_vunmap(buffer->gem, buffer->vaddr); + drm_gem_vunmap(buffer->gem, &buffer->map); if (buffer->gem) drm_gem_object_put(buffer->gem); @@ -290,24 +291,31 @@ err_delete: /** * drm_client_buffer_vmap - Map DRM client buffer into address space * @buffer: DRM client buffer + * @map_copy: Returns the mapped memory's address * * This function maps a client buffer into kernel address space. If the - * buffer is already mapped, it returns the mapping's address. + * buffer is already mapped, it returns the existing mapping's address. * * Client buffer mappings are not ref'counted. Each call to * drm_client_buffer_vmap() should be followed by a call to * drm_client_buffer_vunmap(); or the client buffer should be mapped * throughout its lifetime. * + * The returned address is a copy of the internal value. In contrast to + * other vmap interfaces, you don't need it for the client's vunmap + * function. So you can modify it at will during blit and draw operations. + * * Returns: - * The mapped memory's address + * 0 on success, or a negative errno code otherwise. */ -void *drm_client_buffer_vmap(struct drm_client_buffer *buffer) +int +drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map_copy) { - void *vaddr; + struct dma_buf_map *map = &buffer->map; + int ret; - if (buffer->vaddr) - return buffer->vaddr; + if (dma_buf_map_is_set(map)) + goto out; /* * FIXME: The dependency on GEM here isn't required, we could @@ -317,13 +325,14 @@ void *drm_client_buffer_vmap(struct drm_client_buffer *buffer) * fd_install step out of the driver backend hooks, to make that * final step optional for internal users. */ - vaddr = drm_gem_vmap(buffer->gem); - if (IS_ERR(vaddr)) - return vaddr; + ret = drm_gem_vmap(buffer->gem, map); + if (ret) + return ret; - buffer->vaddr = vaddr; +out: + *map_copy = *map; - return vaddr; + return 0; } EXPORT_SYMBOL(drm_client_buffer_vmap); @@ -337,8 +346,9 @@ EXPORT_SYMBOL(drm_client_buffer_vmap); */ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer) { - drm_gem_vunmap(buffer->gem, buffer->vaddr); - buffer->vaddr = NULL; + struct dma_buf_map *map = &buffer->map; + + drm_gem_vunmap(buffer->gem, map); } EXPORT_SYMBOL(drm_client_buffer_vunmap); diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 1913d8b4e16a..98b6ec45ef96 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -1432,7 +1432,8 @@ void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame, EXPORT_SYMBOL(drm_hdmi_avi_infoframe_content_type); /** - * drm_mode_attach_tv_margin_properties - attach TV connector margin properties + * drm_connector_attach_tv_margin_properties - attach TV connector margin + * properties * @connector: DRM connector * * Called by a driver when it needs to attach TV margin props to a connector. diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 37ec3b94389c..5bd0934004e3 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -1161,7 +1161,7 @@ drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE], EXPORT_SYMBOL(drm_dp_subconnector_type); /** - * drm_mode_set_dp_subconnector_property - set subconnector for DP connector + * drm_dp_set_subconnector_property - set subconnector for DP connector * @connector: connector to set property on * @status: connector status * @dpcd: DisplayPort configuration data diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 153b6065ba29..0401b2f47500 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -3260,7 +3260,7 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, { struct drm_dp_sideband_msg_tx *txmsg; u8 nonce[7]; - int len, ret; + int ret; txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); if (!txmsg) @@ -3281,7 +3281,7 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, */ txmsg->dst = mgr->mst_primary; - len = build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce); + build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce); drm_dp_queue_down_tx(mgr, txmsg); @@ -3686,10 +3686,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms WARN_ON(mgr->mst_primary); /* get dpcd info */ - ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd); - if (ret < 0) { - drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n", - mgr->aux->name, ret); + ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); + if (ret != DP_RECEIVER_CAP_SIZE) { + DRM_DEBUG_KMS("failed to read DPCD\n"); goto out_unlock; } diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index cd162d406078..734303802bc3 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -284,7 +284,7 @@ void drm_minor_release(struct drm_minor *minor) * struct clk *pclk; * }; * - * static struct drm_driver driver_drm_driver = { + * static const struct drm_driver driver_drm_driver = { * [...] * }; * @@ -574,7 +574,7 @@ static void drm_dev_init_release(struct drm_device *dev, void *res) } static int drm_dev_init(struct drm_device *dev, - struct drm_driver *driver, + const struct drm_driver *driver, struct device *parent) { int ret; @@ -589,7 +589,11 @@ static int drm_dev_init(struct drm_device *dev, kref_init(&dev->ref); dev->dev = get_device(parent); +#ifdef CONFIG_DRM_LEGACY + dev->driver = (struct drm_driver *)driver; +#else dev->driver = driver; +#endif INIT_LIST_HEAD(&dev->managed.resources); spin_lock_init(&dev->managed.lock); @@ -663,7 +667,7 @@ static void devm_drm_dev_init_release(void *data) static int devm_drm_dev_init(struct device *parent, struct drm_device *dev, - struct drm_driver *driver) + const struct drm_driver *driver) { int ret; @@ -678,7 +682,8 @@ static int devm_drm_dev_init(struct device *parent, return ret; } -void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver, +void *__devm_drm_dev_alloc(struct device *parent, + const struct drm_driver *driver, size_t size, size_t offset) { void *container; @@ -713,7 +718,7 @@ EXPORT_SYMBOL(__devm_drm_dev_alloc); * RETURNS: * Pointer to new DRM device, or ERR_PTR on failure. */ -struct drm_device *drm_dev_alloc(struct drm_driver *driver, +struct drm_device *drm_dev_alloc(const struct drm_driver *driver, struct device *parent) { struct drm_device *dev; @@ -858,7 +863,7 @@ static void remove_compat_control_link(struct drm_device *dev) */ int drm_dev_register(struct drm_device *dev, unsigned long flags) { - struct drm_driver *driver = dev->driver; + const struct drm_driver *driver = dev->driver; int ret; if (!driver->load) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c7363af731b4..74f5a3197214 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3114,6 +3114,8 @@ static int drm_cvt_modes(struct drm_connector *connector, case 0x0c: width = height * 15 / 9; break; + default: + unreachable(); } for (j = 1; j < 5; j++) { diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 92e0db30fdf7..25edf670867c 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -372,24 +372,22 @@ static void drm_fb_helper_resume_worker(struct work_struct *work) } static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper, - struct drm_clip_rect *clip) + struct drm_clip_rect *clip, + struct dma_buf_map *dst) { struct drm_framebuffer *fb = fb_helper->fb; unsigned int cpp = fb->format->cpp[0]; size_t offset = clip->y1 * fb->pitches[0] + clip->x1 * cpp; void *src = fb_helper->fbdev->screen_buffer + offset; - void *dst = fb_helper->buffer->vaddr + offset; size_t len = (clip->x2 - clip->x1) * cpp; unsigned int y; - for (y = clip->y1; y < clip->y2; y++) { - if (!fb_helper->dev->mode_config.fbdev_use_iomem) - memcpy(dst, src, len); - else - memcpy_toio((void __iomem *)dst, src, len); + dma_buf_map_incr(dst, offset); /* go to first pixel within clip rect */ + for (y = clip->y1; y < clip->y2; y++) { + dma_buf_map_memcpy_to(dst, src, len); + dma_buf_map_incr(dst, fb->pitches[0]); src += fb->pitches[0]; - dst += fb->pitches[0]; } } @@ -400,7 +398,8 @@ static void drm_fb_helper_dirty_work(struct work_struct *work) struct drm_clip_rect *clip = &helper->dirty_clip; struct drm_clip_rect clip_copy; unsigned long flags; - void *vaddr; + struct dma_buf_map map; + int ret; spin_lock_irqsave(&helper->dirty_lock, flags); clip_copy = *clip; @@ -413,11 +412,12 @@ static void drm_fb_helper_dirty_work(struct work_struct *work) /* Generic fbdev uses a shadow buffer */ if (helper->buffer) { - vaddr = drm_client_buffer_vmap(helper->buffer); - if (IS_ERR(vaddr)) + ret = drm_client_buffer_vmap(helper->buffer, &map); + if (ret) return; - drm_fb_helper_dirty_blit_real(helper, &clip_copy); + drm_fb_helper_dirty_blit_real(helper, &clip_copy, &map); } + if (helper->fb->funcs->dirty) helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); @@ -2026,6 +2026,199 @@ static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) return -ENODEV; } +static bool drm_fbdev_use_iomem(struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_client_buffer *buffer = fb_helper->buffer; + + return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem; +} + +static ssize_t fb_read_screen_base(struct fb_info *info, char __user *buf, size_t count, + loff_t pos) +{ + const char __iomem *src = info->screen_base + pos; + size_t alloc_size = min_t(size_t, count, PAGE_SIZE); + ssize_t ret = 0; + int err = 0; + char *tmp; + + tmp = kmalloc(alloc_size, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + while (count) { + size_t c = min_t(size_t, count, alloc_size); + + memcpy_fromio(tmp, src, c); + if (copy_to_user(buf, tmp, c)) { + err = -EFAULT; + break; + } + + src += c; + buf += c; + ret += c; + count -= c; + } + + kfree(tmp); + + return ret ? ret : err; +} + +static ssize_t fb_read_screen_buffer(struct fb_info *info, char __user *buf, size_t count, + loff_t pos) +{ + const char *src = info->screen_buffer + pos; + + if (copy_to_user(buf, src, count)) + return -EFAULT; + + return count; +} + +static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf, + size_t count, loff_t *ppos) +{ + loff_t pos = *ppos; + size_t total_size; + ssize_t ret; + + if (info->screen_size) + total_size = info->screen_size; + else + total_size = info->fix.smem_len; + + if (pos >= total_size) + return 0; + if (count >= total_size) + count = total_size; + if (total_size - count < pos) + count = total_size - pos; + + if (drm_fbdev_use_iomem(info)) + ret = fb_read_screen_base(info, buf, count, pos); + else + ret = fb_read_screen_buffer(info, buf, count, pos); + + if (ret > 0) + *ppos += ret; + + return ret; +} + +static ssize_t fb_write_screen_base(struct fb_info *info, const char __user *buf, size_t count, + loff_t pos) +{ + char __iomem *dst = info->screen_base + pos; + size_t alloc_size = min_t(size_t, count, PAGE_SIZE); + ssize_t ret = 0; + int err = 0; + u8 *tmp; + + tmp = kmalloc(alloc_size, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + while (count) { + size_t c = min_t(size_t, count, alloc_size); + + if (copy_from_user(tmp, buf, c)) { + err = -EFAULT; + break; + } + memcpy_toio(dst, tmp, c); + + dst += c; + buf += c; + ret += c; + count -= c; + } + + kfree(tmp); + + return ret ? ret : err; +} + +static ssize_t fb_write_screen_buffer(struct fb_info *info, const char __user *buf, size_t count, + loff_t pos) +{ + char *dst = info->screen_buffer + pos; + + if (copy_from_user(dst, buf, count)) + return -EFAULT; + + return count; +} + +static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos) +{ + loff_t pos = *ppos; + size_t total_size; + ssize_t ret; + int err = 0; + + if (info->screen_size) + total_size = info->screen_size; + else + total_size = info->fix.smem_len; + + if (pos > total_size) + return -EFBIG; + if (count > total_size) { + err = -EFBIG; + count = total_size; + } + if (total_size - count < pos) { + if (!err) + err = -ENOSPC; + count = total_size - pos; + } + + /* + * Copy to framebuffer even if we already logged an error. Emulates + * the behavior of the original fbdev implementation. + */ + if (drm_fbdev_use_iomem(info)) + ret = fb_write_screen_base(info, buf, count, pos); + else + ret = fb_write_screen_buffer(info, buf, count, pos); + + if (ret > 0) + *ppos += ret; + + return ret ? ret : err; +} + +static void drm_fbdev_fb_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ + if (drm_fbdev_use_iomem(info)) + drm_fb_helper_cfb_fillrect(info, rect); + else + drm_fb_helper_sys_fillrect(info, rect); +} + +static void drm_fbdev_fb_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ + if (drm_fbdev_use_iomem(info)) + drm_fb_helper_cfb_copyarea(info, area); + else + drm_fb_helper_sys_copyarea(info, area); +} + +static void drm_fbdev_fb_imageblit(struct fb_info *info, + const struct fb_image *image) +{ + if (drm_fbdev_use_iomem(info)) + drm_fb_helper_cfb_imageblit(info, image); + else + drm_fb_helper_sys_imageblit(info, image); +} + static const struct fb_ops drm_fbdev_fb_ops = { .owner = THIS_MODULE, DRM_FB_HELPER_DEFAULT_OPS, @@ -2033,11 +2226,11 @@ static const struct fb_ops drm_fbdev_fb_ops = { .fb_release = drm_fbdev_fb_release, .fb_destroy = drm_fbdev_fb_destroy, .fb_mmap = drm_fbdev_fb_mmap, - .fb_read = drm_fb_helper_sys_read, - .fb_write = drm_fb_helper_sys_write, - .fb_fillrect = drm_fb_helper_sys_fillrect, - .fb_copyarea = drm_fb_helper_sys_copyarea, - .fb_imageblit = drm_fb_helper_sys_imageblit, + .fb_read = drm_fbdev_fb_read, + .fb_write = drm_fbdev_fb_write, + .fb_fillrect = drm_fbdev_fb_fillrect, + .fb_copyarea = drm_fbdev_fb_copyarea, + .fb_imageblit = drm_fbdev_fb_imageblit, }; static struct fb_deferred_io drm_fbdev_defio = { @@ -2060,7 +2253,8 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, struct drm_framebuffer *fb; struct fb_info *fbi; u32 format; - void *vaddr; + struct dma_buf_map map; + int ret; drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", sizes->surface_width, sizes->surface_height, @@ -2096,14 +2290,22 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, fb_deferred_io_init(fbi); } else { /* buffer is mapped for HW framebuffer */ - vaddr = drm_client_buffer_vmap(fb_helper->buffer); - if (IS_ERR(vaddr)) - return PTR_ERR(vaddr); + ret = drm_client_buffer_vmap(fb_helper->buffer, &map); + if (ret) + return ret; + if (map.is_iomem) + fbi->screen_base = map.vaddr_iomem; + else + fbi->screen_buffer = map.vaddr; - fbi->screen_buffer = vaddr; - /* Shamelessly leak the physical address to user-space */ + /* + * Shamelessly leak the physical address to user-space. As + * page_to_phys() is undefined for I/O memory, warn in this + * case. + */ #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) - if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0) + if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0 && + !drm_WARN_ON_ONCE(dev, map.is_iomem)) fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer)); #endif diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 0ac4566ae3f4..b50380fa80ce 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -258,9 +258,11 @@ void drm_file_free(struct drm_file *file) (long)old_encode_dev(file->minor->kdev->devt), atomic_read(&dev->open_count)); +#ifdef CONFIG_DRM_LEGACY if (drm_core_check_feature(dev, DRIVER_LEGACY) && dev->driver->preclose) dev->driver->preclose(dev, file); +#endif if (drm_core_check_feature(dev, DRIVER_LEGACY)) drm_legacy_lock_release(dev, file->filp); diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 2f5b0c2bb0fe..aca62ed51e82 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -552,7 +552,7 @@ out: } /** - * drm_mode_getfb2 - get extended FB info + * drm_mode_getfb2_ioctl - get extended FB info * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index d586068f5509..92f89cee213e 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -36,6 +36,7 @@ #include <linux/pagemap.h> #include <linux/shmem_fs.h> #include <linux/dma-buf.h> +#include <linux/dma-buf-map.h> #include <linux/mem_encrypt.h> #include <linux/pagevec.h> @@ -866,7 +867,7 @@ err: } /** - * drm_gem_open - implementation of the GEM_OPEN ioctl + * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl * @dev: drm_device * @data: ioctl data * @file_priv: drm file-private structure @@ -911,7 +912,7 @@ err: } /** - * gem_gem_open - initalizes GEM file-private structures at devnode open time + * drm_gem_open - initalizes GEM file-private structures at devnode open time * @dev: drm_device which is being opened by userspace * @file_private: drm file-private structure to set up * @@ -1205,28 +1206,32 @@ void drm_gem_unpin(struct drm_gem_object *obj) obj->funcs->unpin(obj); } -void *drm_gem_vmap(struct drm_gem_object *obj) +int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { - void *vaddr; + int ret; - if (obj->funcs->vmap) - vaddr = obj->funcs->vmap(obj); - else - vaddr = ERR_PTR(-EOPNOTSUPP); + if (!obj->funcs->vmap) + return -EOPNOTSUPP; - if (!vaddr) - vaddr = ERR_PTR(-ENOMEM); + ret = obj->funcs->vmap(obj, map); + if (ret) + return ret; + else if (dma_buf_map_is_null(map)) + return -ENOMEM; - return vaddr; + return 0; } -void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr) +void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) { - if (!vaddr) + if (dma_buf_map_is_null(map)) return; if (obj->funcs->vunmap) - obj->funcs->vunmap(obj, vaddr); + obj->funcs->vunmap(obj, map); + + /* Always set the mapping to NULL. Callers may rely on this. */ + dma_buf_map_clear(map); } /** diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 2165633c9b9e..4d5c1d86b022 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -33,6 +33,14 @@ * display drivers that are unable to map scattered buffers via an IOMMU. */ +static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = { + .free = drm_gem_cma_free_object, + .print_info = drm_gem_cma_print_info, + .get_sg_table = drm_gem_cma_prime_get_sg_table, + .vmap = drm_gem_cma_prime_vmap, + .vm_ops = &drm_gem_cma_vm_ops, +}; + /** * __drm_gem_cma_create - Create a GEM CMA object without allocating memory * @drm: DRM device @@ -58,6 +66,10 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size) gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); if (!gem_obj) return ERR_PTR(-ENOMEM); + + if (!gem_obj->funcs) + gem_obj->funcs = &drm_gem_cma_default_funcs; + cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base); ret = drm_gem_object_init(drm, gem_obj, size); @@ -519,6 +531,8 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap); * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual * address space * @obj: GEM object + * @map: Returns the kernel virtual address of the CMA GEM object's backing + * store. * * This function maps a buffer exported via DRM PRIME into the kernel's * virtual address space. Since the CMA buffers are already mapped into the @@ -527,67 +541,17 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap); * driver's &drm_gem_object_funcs.vmap callback. * * Returns: - * The kernel virtual address of the CMA GEM object's backing store. + * 0 on success, or a negative error code otherwise. */ -void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj) +int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); - return cma_obj->vaddr; -} -EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap); - -/** - * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual - * address space - * @obj: GEM object - * @vaddr: kernel virtual address where the CMA GEM object was mapped - * - * This function removes a buffer exported via DRM PRIME from the kernel's - * virtual address space. This is a no-op because CMA buffers cannot be - * unmapped from kernel space. Drivers using the CMA helpers should set this - * as their &drm_gem_object_funcs.vunmap callback. - */ -void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr) -{ - /* Nothing to do */ -} -EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap); - -static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = { - .free = drm_gem_cma_free_object, - .print_info = drm_gem_cma_print_info, - .get_sg_table = drm_gem_cma_prime_get_sg_table, - .vmap = drm_gem_cma_prime_vmap, - .vm_ops = &drm_gem_cma_vm_ops, -}; + dma_buf_map_set_vaddr(map, cma_obj->vaddr); -/** - * drm_gem_cma_create_object_default_funcs - Create a CMA GEM object with a - * default function table - * @dev: DRM device - * @size: Size of the object to allocate - * - * This sets the GEM object functions to the default CMA helper functions. - * This function can be used as the &drm_driver.gem_create_object callback. - * - * Returns: - * A pointer to a allocated GEM object or an error pointer on failure. - */ -struct drm_gem_object * -drm_gem_cma_create_object_default_funcs(struct drm_device *dev, size_t size) -{ - struct drm_gem_cma_object *cma_obj; - - cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); - if (!cma_obj) - return NULL; - - cma_obj->base.funcs = &drm_gem_cma_default_funcs; - - return &cma_obj->base; + return 0; } -EXPORT_SYMBOL(drm_gem_cma_create_object_default_funcs); +EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap); /** * drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 8233bda4692f..499189c48f0b 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -258,19 +258,25 @@ void drm_gem_shmem_unpin(struct drm_gem_object *obj) } EXPORT_SYMBOL(drm_gem_shmem_unpin); -static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) +static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map) { struct drm_gem_object *obj = &shmem->base; - struct dma_buf_map map; int ret = 0; - if (shmem->vmap_use_count++ > 0) - return shmem->vaddr; + if (shmem->vmap_use_count++ > 0) { + dma_buf_map_set_vaddr(map, shmem->vaddr); + return 0; + } if (obj->import_attach) { - ret = dma_buf_vmap(obj->import_attach->dmabuf, &map); - if (!ret) - shmem->vaddr = map.vaddr; + ret = dma_buf_vmap(obj->import_attach->dmabuf, map); + if (!ret) { + if (WARN_ON(map->is_iomem)) { + ret = -EIO; + goto err_put_pages; + } + shmem->vaddr = map->vaddr; + } } else { pgprot_t prot = PAGE_KERNEL; @@ -284,6 +290,8 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) VM_MAP, prot); if (!shmem->vaddr) ret = -ENOMEM; + else + dma_buf_map_set_vaddr(map, shmem->vaddr); } if (ret) { @@ -291,7 +299,7 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) goto err_put_pages; } - return shmem->vaddr; + return 0; err_put_pages: if (!obj->import_attach) @@ -299,12 +307,14 @@ err_put_pages: err_zero_use: shmem->vmap_use_count = 0; - return ERR_PTR(ret); + return ret; } /* * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object * @shmem: shmem GEM object + * @map: Returns the kernel virtual address of the SHMEM GEM object's backing + * store. * * This function makes sure that a contiguous kernel virtual address mapping * exists for the buffer backing the shmem GEM object. @@ -318,26 +328,25 @@ err_zero_use: * Returns: * 0 on success or a negative error code on failure. */ -void *drm_gem_shmem_vmap(struct drm_gem_object *obj) +int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - void *vaddr; int ret; ret = mutex_lock_interruptible(&shmem->vmap_lock); if (ret) - return ERR_PTR(ret); - vaddr = drm_gem_shmem_vmap_locked(shmem); + return ret; + ret = drm_gem_shmem_vmap_locked(shmem, map); mutex_unlock(&shmem->vmap_lock); - return vaddr; + return ret; } EXPORT_SYMBOL(drm_gem_shmem_vmap); -static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) +static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, + struct dma_buf_map *map) { struct drm_gem_object *obj = &shmem->base; - struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(shmem->vaddr); if (WARN_ON_ONCE(!shmem->vmap_use_count)) return; @@ -346,7 +355,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) return; if (obj->import_attach) - dma_buf_vunmap(obj->import_attach->dmabuf, &map); + dma_buf_vunmap(obj->import_attach->dmabuf, map); else vunmap(shmem->vaddr); @@ -357,6 +366,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) /* * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object * @shmem: shmem GEM object + * @map: Kernel virtual address where the SHMEM GEM object was mapped * * This function cleans up a kernel virtual address mapping acquired by * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to @@ -366,12 +376,12 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) * also be called by drivers directly, in which case it will hide the * differences between dma-buf imported and natively allocated objects. */ -void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr) +void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); mutex_lock(&shmem->vmap_lock); - drm_gem_shmem_vunmap_locked(shmem); + drm_gem_shmem_vunmap_locked(shmem, map); mutex_unlock(&shmem->vmap_lock); } EXPORT_SYMBOL(drm_gem_shmem_vunmap); diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c index 0e4fb9ba43ad..de28720757af 100644 --- a/drivers/gpu/drm/drm_gem_ttm_helper.c +++ b/drivers/gpu/drm/drm_gem_ttm_helper.c @@ -50,6 +50,43 @@ void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent, EXPORT_SYMBOL(drm_gem_ttm_print_info); /** + * drm_gem_ttm_vmap() - vmap &ttm_buffer_object + * @gem: GEM object. + * @map: [out] returns the dma-buf mapping. + * + * Maps a GEM object with ttm_bo_vmap(). This function can be used as + * &drm_gem_object_funcs.vmap callback. + * + * Returns: + * 0 on success, or a negative errno code otherwise. + */ +int drm_gem_ttm_vmap(struct drm_gem_object *gem, + struct dma_buf_map *map) +{ + struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); + + return ttm_bo_vmap(bo, map); +} +EXPORT_SYMBOL(drm_gem_ttm_vmap); + +/** + * drm_gem_ttm_vunmap() - vunmap &ttm_buffer_object + * @gem: GEM object. + * @map: dma-buf mapping. + * + * Unmaps a GEM object with ttm_bo_vunmap(). This function can be used as + * &drm_gem_object_funcs.vmap callback. + */ +void drm_gem_ttm_vunmap(struct drm_gem_object *gem, + struct dma_buf_map *map) +{ + struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); + + ttm_bo_vunmap(bo, map); +} +EXPORT_SYMBOL(drm_gem_ttm_vunmap); + +/** * drm_gem_ttm_mmap() - mmap &ttm_buffer_object * @gem: GEM object. * @vma: vm area. diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 16d68c04ea5d..02ca22e90290 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later +#include <linux/dma-buf-map.h> #include <linux/module.h> #include <drm/drm_debugfs.h> @@ -112,8 +113,8 @@ static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo) * up; only release the GEM object. */ - WARN_ON(gbo->kmap_use_count); - WARN_ON(gbo->kmap.virtual); + WARN_ON(gbo->vmap_use_count); + WARN_ON(dma_buf_map_is_set(&gbo->map)); drm_gem_object_release(&gbo->bo.base); } @@ -378,39 +379,37 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo) } EXPORT_SYMBOL(drm_gem_vram_unpin); -static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo, - bool map, bool *is_iomem) +static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo, + struct dma_buf_map *map) { int ret; - struct ttm_bo_kmap_obj *kmap = &gbo->kmap; - if (gbo->kmap_use_count > 0) + if (gbo->vmap_use_count > 0) goto out; - if (kmap->virtual || !map) - goto out; - - ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap); + ret = ttm_bo_vmap(&gbo->bo, &gbo->map); if (ret) - return ERR_PTR(ret); + return ret; out: - if (!kmap->virtual) { - if (is_iomem) - *is_iomem = false; - return NULL; /* not mapped; don't increment ref */ - } - ++gbo->kmap_use_count; - if (is_iomem) - return ttm_kmap_obj_virtual(kmap, is_iomem); - return kmap->virtual; + ++gbo->vmap_use_count; + *map = gbo->map; + + return 0; } -static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo) +static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo, + struct dma_buf_map *map) { - if (WARN_ON_ONCE(!gbo->kmap_use_count)) + struct drm_device *dev = gbo->bo.base.dev; + + if (drm_WARN_ON_ONCE(dev, !gbo->vmap_use_count)) return; - if (--gbo->kmap_use_count > 0) + + if (drm_WARN_ON_ONCE(dev, !dma_buf_map_is_equal(&gbo->map, map))) + return; /* BUG: map not mapped from this BO */ + + if (--gbo->vmap_use_count > 0) return; /* @@ -424,7 +423,9 @@ static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo) /** * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address * space - * @gbo: The GEM VRAM object to map + * @gbo: The GEM VRAM object to map + * @map: Returns the kernel virtual address of the VRAM GEM object's backing + * store. * * The vmap function pins a GEM VRAM object to its current location, either * system or video memory, and maps its buffer into kernel address space. @@ -433,48 +434,44 @@ static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo) * unmap and unpin the GEM VRAM object. * * Returns: - * The buffer's virtual address on success, or - * an ERR_PTR()-encoded error code otherwise. + * 0 on success, or a negative error code otherwise. */ -void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo) +int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map) { int ret; - void *base; ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); if (ret) - return ERR_PTR(ret); + return ret; ret = drm_gem_vram_pin_locked(gbo, 0); if (ret) goto err_ttm_bo_unreserve; - base = drm_gem_vram_kmap_locked(gbo, true, NULL); - if (IS_ERR(base)) { - ret = PTR_ERR(base); + ret = drm_gem_vram_kmap_locked(gbo, map); + if (ret) goto err_drm_gem_vram_unpin_locked; - } ttm_bo_unreserve(&gbo->bo); - return base; + return 0; err_drm_gem_vram_unpin_locked: drm_gem_vram_unpin_locked(gbo); err_ttm_bo_unreserve: ttm_bo_unreserve(&gbo->bo); - return ERR_PTR(ret); + return ret; } EXPORT_SYMBOL(drm_gem_vram_vmap); /** * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object - * @gbo: The GEM VRAM object to unmap - * @vaddr: The mapping's base address as returned by drm_gem_vram_vmap() + * @gbo: The GEM VRAM object to unmap + * @map: Kernel virtual address where the VRAM GEM object was mapped * * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See * the documentation for drm_gem_vram_vmap() for more information. */ -void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr) +void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map) { int ret; @@ -482,7 +479,7 @@ void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr) if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret)) return; - drm_gem_vram_kunmap_locked(gbo); + drm_gem_vram_kunmap_locked(gbo, map); drm_gem_vram_unpin_locked(gbo); ttm_bo_unreserve(&gbo->bo); @@ -573,15 +570,13 @@ static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo, bool evict, struct ttm_resource *new_mem) { - struct ttm_bo_kmap_obj *kmap = &gbo->kmap; + struct ttm_buffer_object *bo = &gbo->bo; + struct drm_device *dev = bo->base.dev; - if (WARN_ON_ONCE(gbo->kmap_use_count)) + if (drm_WARN_ON_ONCE(dev, gbo->vmap_use_count)) return; - if (!kmap->virtual) - return; - ttm_bo_kunmap(kmap); - kmap->virtual = NULL; + ttm_bo_vunmap(bo, &gbo->map); } static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo, @@ -622,7 +617,7 @@ static void drm_gem_vram_object_free(struct drm_gem_object *gem) */ /** - * drm_gem_vram_driver_create_dumb() - \ + * drm_gem_vram_driver_dumb_create() - \ Implements &struct drm_driver.dumb_create * @file: the DRM file * @dev: the DRM device @@ -847,37 +842,33 @@ static void drm_gem_vram_object_unpin(struct drm_gem_object *gem) } /** - * drm_gem_vram_object_vmap() - \ - Implements &struct drm_gem_object_funcs.vmap - * @gem: The GEM object to map + * drm_gem_vram_object_vmap() - + * Implements &struct drm_gem_object_funcs.vmap + * @gem: The GEM object to map + * @map: Returns the kernel virtual address of the VRAM GEM object's backing + * store. * * Returns: - * The buffers virtual address on success, or - * NULL otherwise. + * 0 on success, or a negative error code otherwise. */ -static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem) +static int drm_gem_vram_object_vmap(struct drm_gem_object *gem, struct dma_buf_map *map) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); - void *base; - base = drm_gem_vram_vmap(gbo); - if (IS_ERR(base)) - return NULL; - return base; + return drm_gem_vram_vmap(gbo, map); } /** - * drm_gem_vram_object_vunmap() - \ - Implements &struct drm_gem_object_funcs.vunmap - * @gem: The GEM object to unmap - * @vaddr: The mapping's base address + * drm_gem_vram_object_vunmap() - + * Implements &struct drm_gem_object_funcs.vunmap + * @gem: The GEM object to unmap + * @map: Kernel virtual address where the VRAM GEM object was mapped */ -static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, - void *vaddr) +static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, struct dma_buf_map *map) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); - drm_gem_vram_vunmap(gbo, vaddr); + drm_gem_vram_vunmap(gbo, map); } /* @@ -964,7 +955,8 @@ static void bo_driver_delete_mem_notify(struct ttm_buffer_object *bo) static int bo_driver_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) + struct ttm_resource *new_mem, + struct ttm_place *hop) { struct drm_gem_vram_object *gbo; diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 2bdac3557765..81d386b5b92a 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -33,6 +33,7 @@ struct dentry; struct dma_buf; +struct dma_buf_map; struct drm_connector; struct drm_crtc; struct drm_framebuffer; @@ -187,8 +188,8 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent, int drm_gem_pin(struct drm_gem_object *obj); void drm_gem_unpin(struct drm_gem_object *obj); -void *drm_gem_vmap(struct drm_gem_object *obj); -void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr); +int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); +void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); /* drm_debugfs.c drm_debugfs_crc.c */ #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c index db05f386a709..b26588b52795 100644 --- a/drivers/gpu/drm/drm_mode_object.c +++ b/drivers/gpu/drm/drm_mode_object.c @@ -115,7 +115,7 @@ void drm_mode_object_unregister(struct drm_device *dev, } /** - * drm_lease_required - check types which must be leased to be used + * drm_mode_object_lease_required - check types which must be leased to be used * @type: type of object * * Returns whether the provided type of drm_mode_object must diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 501b4fe55a3d..33fb2f05ce66 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1889,7 +1889,7 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev, EXPORT_SYMBOL(drm_mode_create_from_cmdline_mode); /** - * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo + * drm_mode_convert_to_umode - convert a drm_display_mode into a modeinfo * @out: drm_mode_modeinfo struct to return to the user * @in: drm_display_mode to use * @@ -1941,7 +1941,7 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out, } /** - * drm_crtc_convert_umode - convert a modeinfo into a drm_display_mode + * drm_mode_convert_umode - convert a modeinfo into a drm_display_mode * @dev: drm device * @out: drm_display_mode to return to the user * @in: drm_mode_modeinfo to use diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index a7b61c2d9190..7db55fce35d8 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -667,21 +667,15 @@ EXPORT_SYMBOL(drm_gem_unmap_dma_buf); * * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling. + * The kernel virtual address is returned in map. * - * Returns the kernel virtual address or NULL on failure. + * Returns 0 on success or a negative errno code otherwise. */ int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map) { struct drm_gem_object *obj = dma_buf->priv; - void *vaddr; - vaddr = drm_gem_vmap(obj); - if (IS_ERR(vaddr)) - return PTR_ERR(vaddr); - - dma_buf_map_set_vaddr(map, vaddr); - - return 0; + return drm_gem_vmap(obj, map); } EXPORT_SYMBOL(drm_gem_dmabuf_vmap); @@ -697,7 +691,7 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map) { struct drm_gem_object *obj = dma_buf->priv; - drm_gem_vunmap(obj, map->vaddr); + drm_gem_vunmap(obj, map); } EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); diff --git a/drivers/gpu/drm/drm_scdc_helper.c b/drivers/gpu/drm/drm_scdc_helper.c index 311e71bbba5b..991b8c86d78d 100644 --- a/drivers/gpu/drm/drm_scdc_helper.c +++ b/drivers/gpu/drm/drm_scdc_helper.c @@ -125,7 +125,7 @@ ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset, EXPORT_SYMBOL(drm_scdc_write); /** - * drm_scdc_check_scrambling_status - what is status of scrambling? + * drm_scdc_get_scrambling_status - what is status of scrambling? * @adapter: I2C adapter for DDC channel * * Reads the scrambler status over SCDC, and checks the diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index f135b79593dd..d30e2f2b8f3c 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -209,9 +209,12 @@ static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe) if (crtc->funcs->get_vblank_counter) return crtc->funcs->get_vblank_counter(crtc); - } else if (dev->driver->get_vblank_counter) { + } +#ifdef CONFIG_DRM_LEGACY + else if (dev->driver->get_vblank_counter) { return dev->driver->get_vblank_counter(dev, pipe); } +#endif return drm_vblank_no_hw_counter(dev, pipe); } @@ -429,9 +432,12 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe) if (crtc->funcs->disable_vblank) crtc->funcs->disable_vblank(crtc); - } else { + } +#ifdef CONFIG_DRM_LEGACY + else { dev->driver->disable_vblank(dev, pipe); } +#endif } /* @@ -1096,9 +1102,12 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe) if (crtc->funcs->enable_vblank) return crtc->funcs->enable_vblank(crtc); - } else if (dev->driver->enable_vblank) { + } +#ifdef CONFIG_DRM_LEGACY + else if (dev->driver->enable_vblank) { return dev->driver->enable_vblank(dev, pipe); } +#endif return -EINVAL; } diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 1a6369633789..6d5a03b32238 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -70,9 +70,6 @@ static pgprot_t drm_io_prot(struct drm_local_map *map, { pgprot_t tmp = vm_get_page_prot(vma->vm_flags); - /* We don't want graphics memory to be mapped encrypted */ - tmp = pgprot_decrypted(tmp); - #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \ defined(__mips__) if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING)) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index aa270b79e585..f0a07278ad04 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -480,7 +480,7 @@ static const struct file_operations fops = { .mmap = etnaviv_gem_mmap, }; -static struct drm_driver etnaviv_drm_driver = { +static const struct drm_driver etnaviv_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_RENDER, .open = etnaviv_open, .postclose = etnaviv_postclose, @@ -522,7 +522,6 @@ static int etnaviv_bind(struct device *dev) } drm->dev_private = priv; - dev->dma_parms = &priv->dma_parms; dma_set_max_seg_size(dev, SZ_2G); mutex_init(&priv->gem_lock); @@ -572,8 +571,6 @@ static void etnaviv_unbind(struct device *dev) component_unbind_all(dev, drm); - dev->dma_parms = NULL; - etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc); drm->dev_private = NULL; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index 914f0867ff71..003288ebd896 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -33,7 +33,6 @@ struct etnaviv_file_private { struct etnaviv_drm_private { int num_gpus; - struct device_dma_parameters dma_parms; struct etnaviv_gpu *gpu[ETNA_MAX_PIPES]; gfp_t shm_gfp_mask; @@ -51,8 +50,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma); int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset); struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj); -void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj); -void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); int etnaviv_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 67d9a2b9ea6a..bbd235473645 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -571,7 +571,6 @@ static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = { .unpin = etnaviv_gem_prime_unpin, .get_sg_table = etnaviv_gem_prime_get_sg_table, .vmap = etnaviv_gem_prime_vmap, - .vunmap = etnaviv_gem_prime_vunmap, .vm_ops = &vm_ops, }; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 135fbff6fecf..d9bd83203a15 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -22,14 +22,16 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages); } -void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj) +int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { - return etnaviv_gem_vmap(obj); -} + void *vaddr; -void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) -{ - /* TODO msm_gem_vunmap() */ + vaddr = etnaviv_gem_vmap(obj); + if (!vaddr) + return -ENOMEM; + dma_buf_map_set_vaddr(map, vaddr); + + return 0; } int etnaviv_gem_prime_mmap(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 15d9fa3879e5..dab1b58006d8 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -124,6 +124,8 @@ static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context, { struct etnaviv_gem_object *etnaviv_obj = mapping->object; + lockdep_assert_held(&context->lock); + etnaviv_iommu_unmap(context, mapping->vram_node.start, etnaviv_obj->sgt, etnaviv_obj->base.size); drm_mm_remove_node(&mapping->vram_node); @@ -216,6 +218,8 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context, static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context, struct drm_mm_node *node, size_t size, u64 va) { + lockdep_assert_held(&context->lock); + return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va, va + size, DRM_MM_INSERT_LOWEST); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c index 75f9db8f7bec..bafdfe49c1d8 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c @@ -46,6 +46,33 @@ static u32 perf_reg_read(struct etnaviv_gpu *gpu, return gpu_read(gpu, domain->profile_read); } +static inline void pipe_select(struct etnaviv_gpu *gpu, u32 clock, unsigned pipe) +{ + clock &= ~(VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK); + clock |= VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(pipe); + + gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock); +} + +static u32 pipe_perf_reg_read(struct etnaviv_gpu *gpu, + const struct etnaviv_pm_domain *domain, + const struct etnaviv_pm_signal *signal) +{ + u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); + u32 value = 0; + unsigned i; + + for (i = 0; i < gpu->identity.pixel_pipes; i++) { + pipe_select(gpu, clock, i); + value += perf_reg_read(gpu, domain, signal); + } + + /* switch back to pixel pipe 0 to prevent GPU hang */ + pipe_select(gpu, clock, 0); + + return value; +} + static u32 pipe_reg_read(struct etnaviv_gpu *gpu, const struct etnaviv_pm_domain *domain, const struct etnaviv_pm_signal *signal) @@ -55,17 +82,12 @@ static u32 pipe_reg_read(struct etnaviv_gpu *gpu, unsigned i; for (i = 0; i < gpu->identity.pixel_pipes; i++) { - clock &= ~(VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK); - clock |= VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(i); - gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock); - gpu_write(gpu, domain->profile_config, signal->data); - value += gpu_read(gpu, domain->profile_read); + pipe_select(gpu, clock, i); + value += gpu_read(gpu, signal->data); } /* switch back to pixel pipe 0 to prevent GPU hang */ - clock &= ~(VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK); - clock |= VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(0); - gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock); + pipe_select(gpu, clock, 0); return value; } @@ -103,9 +125,19 @@ static const struct etnaviv_pm_domain doms_3d[] = { .name = "HI", .profile_read = VIVS_MC_PROFILE_HI_READ, .profile_config = VIVS_MC_PROFILE_CONFIG2, - .nr_signals = 5, + .nr_signals = 7, .signal = (const struct etnaviv_pm_signal[]) { { + "TOTAL_READ_BYTES8", + VIVS_HI_PROFILE_READ_BYTES8, + &pipe_reg_read, + }, + { + "TOTAL_WRITE_BYTES8", + VIVS_HI_PROFILE_WRITE_BYTES8, + &pipe_reg_read, + }, + { "TOTAL_CYCLES", 0, &hi_total_cycle_read @@ -141,22 +173,22 @@ static const struct etnaviv_pm_domain doms_3d[] = { { "PIXEL_COUNT_KILLED_BY_COLOR_PIPE", VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_COLOR_PIPE, - &pipe_reg_read + &pipe_perf_reg_read }, { "PIXEL_COUNT_KILLED_BY_DEPTH_PIPE", VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_DEPTH_PIPE, - &pipe_reg_read + &pipe_perf_reg_read }, { "PIXEL_COUNT_DRAWN_BY_COLOR_PIPE", VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_COLOR_PIPE, - &pipe_reg_read + &pipe_perf_reg_read }, { "PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE", VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE, - &pipe_reg_read + &pipe_perf_reg_read } } }, @@ -184,32 +216,32 @@ static const struct etnaviv_pm_domain doms_3d[] = { { "VS_INST_COUNTER", VIVS_MC_PROFILE_CONFIG0_SH_VS_INST_COUNTER, - &pipe_reg_read + &pipe_perf_reg_read }, { "RENDERED_VERTICE_COUNTER", VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_VERTICE_COUNTER, - &pipe_reg_read + &pipe_perf_reg_read }, { "VTX_BRANCH_INST_COUNTER", VIVS_MC_PROFILE_CONFIG0_SH_VTX_BRANCH_INST_COUNTER, - &pipe_reg_read + &pipe_perf_reg_read }, { "VTX_TEXLD_INST_COUNTER", VIVS_MC_PROFILE_CONFIG0_SH_VTX_TEXLD_INST_COUNTER, - &pipe_reg_read + &pipe_perf_reg_read }, { "PXL_BRANCH_INST_COUNTER", VIVS_MC_PROFILE_CONFIG0_SH_PXL_BRANCH_INST_COUNTER, - &pipe_reg_read + &pipe_perf_reg_read }, { "PXL_TEXLD_INST_COUNTER", VIVS_MC_PROFILE_CONFIG0_SH_PXL_TEXLD_INST_COUNTER, - &pipe_reg_read + &pipe_perf_reg_read } } }, @@ -237,17 +269,17 @@ static const struct etnaviv_pm_domain doms_3d[] = { { "DEPTH_CLIPPED_COUNTER", VIVS_MC_PROFILE_CONFIG1_PA_DEPTH_CLIPPED_COUNTER, - &pipe_reg_read + &pipe_perf_reg_read }, { "TRIVIAL_REJECTED_COUNTER", VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER, - &pipe_reg_read + &pipe_perf_reg_read }, { "CULLED_COUNTER", VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER, - &pipe_reg_read + &pipe_perf_reg_read } } }, @@ -400,7 +432,7 @@ static const struct etnaviv_pm_domain doms_2d[] = { { "PIXELS_RENDERED_2D", VIVS_MC_PROFILE_CONFIG0_PE_PIXELS_RENDERED_2D, - &pipe_reg_read + &pipe_perf_reg_read } } } diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index f2d87a7445c7..431c5d32f9a4 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -346,6 +346,7 @@ static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win) /** * shadow_protect_win() - disable updating values from shadow registers at vsync * + * @ctx: display and enhancement controller context * @win: window to protect registers for * @protect: 1 to protect (disable updates) */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index fe46680ca208..e60257f1f24b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -113,7 +113,7 @@ static const struct file_operations exynos_drm_driver_fops = { .release = drm_release, }; -static struct drm_driver exynos_drm_driver = { +static const struct drm_driver exynos_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_RENDER, .open = exynos_drm_open, diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 5b9666fc7af1..83ab6b343f51 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -282,7 +282,6 @@ struct exynos_dsi { struct list_head transfer_list; const struct exynos_dsi_driver_data *driver_data; - struct device_node *in_bridge_node; }; #define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host) @@ -1684,17 +1683,16 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi) if (ret < 0) return ret; - dsi->in_bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0); - return 0; } static int exynos_dsi_bind(struct device *dev, struct device *master, void *data) { - struct drm_encoder *encoder = dev_get_drvdata(dev); - struct exynos_dsi *dsi = encoder_to_dsi(encoder); + struct exynos_dsi *dsi = dev_get_drvdata(dev); + struct drm_encoder *encoder = &dsi->encoder; struct drm_device *drm_dev = data; + struct device_node *in_bridge_node; struct drm_bridge *in_bridge; int ret; @@ -1706,10 +1704,12 @@ static int exynos_dsi_bind(struct device *dev, struct device *master, if (ret < 0) return ret; - if (dsi->in_bridge_node) { - in_bridge = of_drm_find_bridge(dsi->in_bridge_node); + in_bridge_node = of_graph_get_remote_node(dev->of_node, DSI_PORT_IN, 0); + if (in_bridge_node) { + in_bridge = of_drm_find_bridge(in_bridge_node); if (in_bridge) drm_bridge_attach(encoder, in_bridge, NULL, 0); + of_node_put(in_bridge_node); } return mipi_dsi_host_register(&dsi->dsi_host); @@ -1718,8 +1718,8 @@ static int exynos_dsi_bind(struct device *dev, struct device *master, static void exynos_dsi_unbind(struct device *dev, struct device *master, void *data) { - struct drm_encoder *encoder = dev_get_drvdata(dev); - struct exynos_dsi *dsi = encoder_to_dsi(encoder); + struct exynos_dsi *dsi = dev_get_drvdata(dev); + struct drm_encoder *encoder = &dsi->encoder; exynos_dsi_disable(encoder); @@ -1815,7 +1815,7 @@ static int exynos_dsi_probe(struct platform_device *pdev) if (ret) return ret; - platform_set_drvdata(pdev, &dsi->encoder); + platform_set_drvdata(pdev, dsi); pm_runtime_enable(dev); @@ -1827,17 +1827,12 @@ static int exynos_dsi_probe(struct platform_device *pdev) err_disable_runtime: pm_runtime_disable(dev); - of_node_put(dsi->in_bridge_node); return ret; } static int exynos_dsi_remove(struct platform_device *pdev) { - struct exynos_dsi *dsi = platform_get_drvdata(pdev); - - of_node_put(dsi->in_bridge_node); - pm_runtime_disable(&pdev->dev); component_del(&pdev->dev, &exynos_dsi_component_ops); @@ -1847,8 +1842,7 @@ static int exynos_dsi_remove(struct platform_device *pdev) static int __maybe_unused exynos_dsi_suspend(struct device *dev) { - struct drm_encoder *encoder = dev_get_drvdata(dev); - struct exynos_dsi *dsi = encoder_to_dsi(encoder); + struct exynos_dsi *dsi = dev_get_drvdata(dev); const struct exynos_dsi_driver_data *driver_data = dsi->driver_data; int ret, i; @@ -1878,8 +1872,7 @@ static int __maybe_unused exynos_dsi_suspend(struct device *dev) static int __maybe_unused exynos_dsi_resume(struct device *dev) { - struct drm_encoder *encoder = dev_get_drvdata(dev); - struct exynos_dsi *dsi = encoder_to_dsi(encoder); + struct exynos_dsi *dsi = dev_get_drvdata(dev); const struct exynos_dsi_driver_data *driver_data = dsi->driver_data; int ret, i; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index bb67cad8371f..49a2e0c53918 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -725,6 +725,7 @@ static void fimd_win_set_colkey(struct fimd_context *ctx, unsigned int win) /** * shadow_protect_win() - disable updating values from shadow registers at vsync * + * @ctx: local driver data * @win: window to protect registers for * @protect: 1 to protect (disable updates) */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 4afbf5109cbf..4396224227d1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -135,8 +135,6 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = { static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = { .free = exynos_drm_gem_free_object, .get_sg_table = exynos_drm_gem_prime_get_sg_table, - .vmap = exynos_drm_gem_prime_vmap, - .vunmap = exynos_drm_gem_prime_vunmap, .vm_ops = &exynos_drm_gem_vm_ops, }; @@ -469,16 +467,6 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, return &exynos_gem->base; } -void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj) -{ - return NULL; -} - -void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) -{ - /* Nothing to do */ -} - int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 74e926abeff0..a23272fb96fb 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -107,8 +107,6 @@ struct drm_gem_object * exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); -void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj); -void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 45e9aee8366a..b01f36e76eaf 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -118,6 +118,7 @@ struct gsc_context { * struct gsc_driverdata - per device type driver data for init time. * * @limits: picture size limits array + * @num_limits: number of items in the aforementioned array * @clk_names: names of clocks needed by this variant * @num_clocks: the number of clocks needed by this variant */ diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index dc01c188c0e0..39fa5d3b01ef 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -522,6 +522,15 @@ static const struct hdmiphy_config hdmiphy_5420_configs[] = { 0x54, 0x4B, 0x25, 0x03, 0x00, 0x80, 0x01, 0x80, }, }, + { + .pixel_clock = 154000000, + .conf = { + 0x01, 0xD1, 0x20, 0x01, 0x40, 0x30, 0x08, 0xCC, + 0x8C, 0xE8, 0xC1, 0xD8, 0x45, 0xA0, 0xAC, 0x80, + 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x86, + 0x54, 0x3F, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80, + }, + }, }; static const struct hdmiphy_config hdmiphy_5433_configs[] = { diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index abbc1ddbf27f..7528e8a2d359 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -134,7 +134,7 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg) DEFINE_DRM_GEM_CMA_FOPS(fsl_dcu_drm_fops); -static struct drm_driver fsl_dcu_drm_driver = { +static const struct drm_driver fsl_dcu_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .load = fsl_dcu_load, .unload = fsl_dcu_unload, @@ -234,7 +234,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct resource *res; void __iomem *base; - struct drm_driver *driver = &fsl_dcu_drm_driver; struct clk *pix_clk_in; char pix_clk_name[32]; const char *pix_clk_in_name; @@ -304,7 +303,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) fsl_dev->tcon = fsl_tcon_init(dev); - drm = drm_dev_alloc(driver, dev); + drm = drm_dev_alloc(&fsl_dcu_drm_driver, dev); if (IS_ERR(drm)) { ret = PTR_ERR(drm); goto unregister_pix_clk; diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c index adc0507545bf..437bbb6af9e6 100644 --- a/drivers/gpu/drm/gma500/accel_2d.c +++ b/drivers/gpu/drm/gma500/accel_2d.c @@ -58,295 +58,3 @@ void psb_spank(struct drm_psb_private *dev_priv) (void) PSB_RSGX32(PSB_CR_BIF_CTRL); PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE); } - -/** - * psb2_2d_wait_available - wait for FIFO room - * @dev_priv: our DRM device - * @size: size (in dwords) of the command we want to issue - * - * Wait until there is room to load the FIFO with our data. If the - * device is not responding then reset it - */ -static int psb_2d_wait_available(struct drm_psb_private *dev_priv, - unsigned size) -{ - uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF); - unsigned long t = jiffies + HZ; - - while (avail < size) { - avail = PSB_RSGX32(PSB_CR_2D_SOCIF); - if (time_after(jiffies, t)) { - psb_spank(dev_priv); - return -EIO; - } - } - return 0; -} - -/** - * psb_2d_submit - submit a 2D command - * @dev_priv: our DRM device - * @cmdbuf: command to issue - * @size: length (in dwords) - * - * Issue one or more 2D commands to the accelerator. This needs to be - * serialized later when we add the GEM interfaces for acceleration - */ -static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf, - unsigned size) -{ - int ret = 0; - int i; - unsigned submit_size; - unsigned long flags; - - spin_lock_irqsave(&dev_priv->lock_2d, flags); - while (size > 0) { - submit_size = (size < 0x60) ? size : 0x60; - size -= submit_size; - ret = psb_2d_wait_available(dev_priv, submit_size); - if (ret) - break; - - submit_size <<= 2; - - for (i = 0; i < submit_size; i += 4) - PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i); - - (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4); - } - spin_unlock_irqrestore(&dev_priv->lock_2d, flags); - return ret; -} - - -/** - * psb_accel_2d_copy_direction - compute blit order - * @xdir: X direction of move - * @ydir: Y direction of move - * - * Compute the correct order setings to ensure that an overlapping blit - * correctly copies all the pixels. - */ -static u32 psb_accel_2d_copy_direction(int xdir, int ydir) -{ - if (xdir < 0) - return (ydir < 0) ? PSB_2D_COPYORDER_BR2TL : - PSB_2D_COPYORDER_TR2BL; - else - return (ydir < 0) ? PSB_2D_COPYORDER_BL2TR : - PSB_2D_COPYORDER_TL2BR; -} - -/** - * psb_accel_2d_copy - accelerated 2D copy - * @dev_priv: our DRM device - * @src_offset in bytes - * @src_stride in bytes - * @src_format psb 2D format defines - * @dst_offset in bytes - * @dst_stride in bytes - * @dst_format psb 2D format defines - * @src_x offset in pixels - * @src_y offset in pixels - * @dst_x offset in pixels - * @dst_y offset in pixels - * @size_x of the copied area - * @size_y of the copied area - * - * Format and issue a 2D accelerated copy command. - */ -static int psb_accel_2d_copy(struct drm_psb_private *dev_priv, - uint32_t src_offset, uint32_t src_stride, - uint32_t src_format, uint32_t dst_offset, - uint32_t dst_stride, uint32_t dst_format, - uint16_t src_x, uint16_t src_y, - uint16_t dst_x, uint16_t dst_y, - uint16_t size_x, uint16_t size_y) -{ - uint32_t blit_cmd; - uint32_t buffer[10]; - uint32_t *buf; - uint32_t direction; - - buf = buffer; - - direction = - psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y); - - if (direction == PSB_2D_COPYORDER_BR2TL || - direction == PSB_2D_COPYORDER_TR2BL) { - src_x += size_x - 1; - dst_x += size_x - 1; - } - if (direction == PSB_2D_COPYORDER_BR2TL || - direction == PSB_2D_COPYORDER_BL2TR) { - src_y += size_y - 1; - dst_y += size_y - 1; - } - - blit_cmd = - PSB_2D_BLIT_BH | - PSB_2D_ROT_NONE | - PSB_2D_DSTCK_DISABLE | - PSB_2D_SRCCK_DISABLE | - PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction; - - *buf++ = PSB_2D_FENCE_BH; - *buf++ = - PSB_2D_DST_SURF_BH | dst_format | (dst_stride << - PSB_2D_DST_STRIDE_SHIFT); - *buf++ = dst_offset; - *buf++ = - PSB_2D_SRC_SURF_BH | src_format | (src_stride << - PSB_2D_SRC_STRIDE_SHIFT); - *buf++ = src_offset; - *buf++ = - PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) | - (src_y << PSB_2D_SRCOFF_YSTART_SHIFT); - *buf++ = blit_cmd; - *buf++ = - (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y << - PSB_2D_DST_YSTART_SHIFT); - *buf++ = - (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y << - PSB_2D_DST_YSIZE_SHIFT); - *buf++ = PSB_2D_FLUSH_BH; - - return psbfb_2d_submit(dev_priv, buffer, buf - buffer); -} - -/** - * psbfb_copyarea_accel - copyarea acceleration for /dev/fb - * @info: our framebuffer - * @a: copyarea parameters from the framebuffer core - * - * Perform a 2D copy via the accelerator - */ -static void psbfb_copyarea_accel(struct fb_info *info, - const struct fb_copyarea *a) -{ - struct drm_fb_helper *fb_helper = info->par; - struct drm_framebuffer *fb = fb_helper->fb; - struct drm_device *dev; - struct drm_psb_private *dev_priv; - uint32_t offset; - uint32_t stride; - uint32_t src_format; - uint32_t dst_format; - - if (!fb) - return; - - dev = fb->dev; - dev_priv = dev->dev_private; - offset = to_gtt_range(fb->obj[0])->offset; - stride = fb->pitches[0]; - - switch (fb->format->depth) { - case 8: - src_format = PSB_2D_SRC_332RGB; - dst_format = PSB_2D_DST_332RGB; - break; - case 15: - src_format = PSB_2D_SRC_555RGB; - dst_format = PSB_2D_DST_555RGB; - break; - case 16: - src_format = PSB_2D_SRC_565RGB; - dst_format = PSB_2D_DST_565RGB; - break; - case 24: - case 32: - /* this is wrong but since we don't do blending its okay */ - src_format = PSB_2D_SRC_8888ARGB; - dst_format = PSB_2D_DST_8888ARGB; - break; - default: - /* software fallback */ - drm_fb_helper_cfb_copyarea(info, a); - return; - } - - if (!gma_power_begin(dev, false)) { - drm_fb_helper_cfb_copyarea(info, a); - return; - } - psb_accel_2d_copy(dev_priv, - offset, stride, src_format, - offset, stride, dst_format, - a->sx, a->sy, a->dx, a->dy, a->width, a->height); - gma_power_end(dev); -} - -/** - * psbfb_copyarea - 2D copy interface - * @info: our framebuffer - * @region: region to copy - * - * Copy an area of the framebuffer console either by the accelerator - * or directly using the cfb helpers according to the request - */ -void psbfb_copyarea(struct fb_info *info, - const struct fb_copyarea *region) -{ - if (unlikely(info->state != FBINFO_STATE_RUNNING)) - return; - - /* Avoid the 8 pixel erratum */ - if (region->width == 8 || region->height == 8 || - (info->flags & FBINFO_HWACCEL_DISABLED)) - return drm_fb_helper_cfb_copyarea(info, region); - - psbfb_copyarea_accel(info, region); -} - -/** - * psbfb_sync - synchronize 2D - * @info: our framebuffer - * - * Wait for the 2D engine to quiesce so that we can do CPU - * access to the framebuffer again - */ -int psbfb_sync(struct fb_info *info) -{ - struct drm_fb_helper *fb_helper = info->par; - struct drm_framebuffer *fb = fb_helper->fb; - struct drm_device *dev = fb->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - unsigned long _end = jiffies + HZ; - int busy = 0; - unsigned long flags; - - spin_lock_irqsave(&dev_priv->lock_2d, flags); - /* - * First idle the 2D engine. - */ - - if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) && - ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0)) - goto out; - - do { - busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY); - cpu_relax(); - } while (busy && !time_after_eq(jiffies, _end)); - - if (busy) - busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY); - if (busy) - goto out; - - do { - busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & - _PSB_C2B_STATUS_BUSY) != 0); - cpu_relax(); - } while (busy && !time_after_eq(jiffies, _end)); - if (busy) - busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & - _PSB_C2B_STATUS_BUSY) != 0); - -out: - spin_unlock_irqrestore(&dev_priv->lock_2d, flags); - return (busy) ? -EBUSY : 0; -} diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c index 4d216a0205f2..e75293e4a52f 100644 --- a/drivers/gpu/drm/gma500/cdv_device.c +++ b/drivers/gpu/drm/gma500/cdv_device.c @@ -584,7 +584,6 @@ static int cdv_chip_setup(struct drm_device *dev) const struct psb_ops cdv_chip_ops = { .name = "GMA3600/3650", - .accel_2d = 0, .pipes = 2, .crtcs = 2, .hdmi_mask = (1 << 0) | (1 << 1), diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 5ede24fb44ae..fc4fda1d258b 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -76,27 +76,6 @@ static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green, return 0; } -static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info) -{ - struct drm_fb_helper *fb_helper = info->par; - struct drm_framebuffer *fb = fb_helper->fb; - struct drm_device *dev = fb->dev; - struct gtt_range *gtt = to_gtt_range(fb->obj[0]); - - /* - * We have to poke our nose in here. The core fb code assumes - * panning is part of the hardware that can be invoked before - * the actual fb is mapped. In our case that isn't quite true. - */ - if (gtt->npage) { - /* GTT roll shifts in 4K pages, we need to shift the right - number of pages */ - int pages = info->fix.line_length >> 12; - psb_gtt_roll(dev, gtt, var->yoffset * pages); - } - return 0; -} - static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; @@ -165,28 +144,6 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma) return 0; } -static const struct fb_ops psbfb_ops = { - .owner = THIS_MODULE, - DRM_FB_HELPER_DEFAULT_OPS, - .fb_setcolreg = psbfb_setcolreg, - .fb_fillrect = drm_fb_helper_cfb_fillrect, - .fb_copyarea = psbfb_copyarea, - .fb_imageblit = drm_fb_helper_cfb_imageblit, - .fb_mmap = psbfb_mmap, - .fb_sync = psbfb_sync, -}; - -static const struct fb_ops psbfb_roll_ops = { - .owner = THIS_MODULE, - DRM_FB_HELPER_DEFAULT_OPS, - .fb_setcolreg = psbfb_setcolreg, - .fb_fillrect = drm_fb_helper_cfb_fillrect, - .fb_copyarea = drm_fb_helper_cfb_copyarea, - .fb_imageblit = drm_fb_helper_cfb_imageblit, - .fb_pan_display = psbfb_pan, - .fb_mmap = psbfb_mmap, -}; - static const struct fb_ops psbfb_unaccel_ops = { .owner = THIS_MODULE, DRM_FB_HELPER_DEFAULT_OPS, @@ -312,8 +269,6 @@ static int psbfb_create(struct drm_fb_helper *fb_helper, int ret; struct gtt_range *backing; u32 bpp, depth; - int gtt_roll = 0; - int pitch_lines = 0; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; @@ -324,50 +279,15 @@ static int psbfb_create(struct drm_fb_helper *fb_helper, if (bpp == 24) bpp = 32; - do { - /* - * Acceleration via the GTT requires pitch to be - * power of two aligned. Preferably page but less - * is ok with some fonts - */ - mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines); - - size = mode_cmd.pitches[0] * mode_cmd.height; - size = ALIGN(size, PAGE_SIZE); - - /* Allocate the fb in the GTT with stolen page backing */ - backing = psbfb_alloc(dev, size); - - if (pitch_lines) - pitch_lines *= 2; - else - pitch_lines = 1; - gtt_roll++; - } while (backing == NULL && pitch_lines <= 16); - - /* The final pitch we accepted if we succeeded */ - pitch_lines /= 2; - - if (backing == NULL) { - /* - * We couldn't get the space we wanted, fall back to the - * display engine requirement instead. The HW requires - * the pitch to be 64 byte aligned - */ - - gtt_roll = 0; /* Don't use GTT accelerated scrolling */ - pitch_lines = 64; - - mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64); - - size = mode_cmd.pitches[0] * mode_cmd.height; - size = ALIGN(size, PAGE_SIZE); - - /* Allocate the framebuffer in the GTT with stolen page backing */ - backing = psbfb_alloc(dev, size); - if (backing == NULL) - return -ENOMEM; - } + mode_cmd.pitches[0] = ALIGN(mode_cmd.width * DIV_ROUND_UP(bpp, 8), 64); + + size = mode_cmd.pitches[0] * mode_cmd.height; + size = ALIGN(size, PAGE_SIZE); + + /* Allocate the framebuffer in the GTT with stolen page backing */ + backing = psbfb_alloc(dev, size); + if (backing == NULL) + return -ENOMEM; memset(dev_priv->vram_addr + backing->offset, 0, size); @@ -387,17 +307,11 @@ static int psbfb_create(struct drm_fb_helper *fb_helper, fb_helper->fb = fb; - if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */ - info->fbops = &psbfb_ops; - else if (gtt_roll) { /* GTT rolling seems best */ - info->fbops = &psbfb_roll_ops; - info->flags |= FBINFO_HWACCEL_YPAN; - } else /* Software */ - info->fbops = &psbfb_unaccel_ops; + info->fbops = &psbfb_unaccel_ops; info->fix.smem_start = dev->mode_config.fb_base; info->fix.smem_len = size; - info->fix.ywrapstep = gtt_roll; + info->fix.ywrapstep = 0; info->fix.ypanstep = 0; /* Accessed stolen memory directly */ diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index 8f07de83b6fb..db827e591403 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c @@ -32,12 +32,6 @@ static void psb_gem_free_object(struct drm_gem_object *obj) psb_gtt_free_range(obj->dev, gtt); } -int psb_gem_get_aperture(struct drm_device *dev, void *data, - struct drm_file *file) -{ - return -EINVAL; -} - static const struct vm_operations_struct psb_gem_vm_ops = { .fault = psb_gem_fault, .open = drm_gem_vm_open, diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c index 9278bcfad1bf..d246b1f70366 100644 --- a/drivers/gpu/drm/gma500/gtt.c +++ b/drivers/gpu/drm/gma500/gtt.c @@ -96,16 +96,12 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r, } /* Write our page entries into the GTT itself */ - for (i = r->roll; i < r->npage; i++) { - pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), - PSB_MMU_CACHED_MEMORY); - iowrite32(pte, gtt_slot++); - } - for (i = 0; i < r->roll; i++) { + for (i = 0; i < r->npage; i++) { pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), PSB_MMU_CACHED_MEMORY); iowrite32(pte, gtt_slot++); } + /* Make sure all the entries are set before we return */ ioread32(gtt_slot - 1); @@ -141,49 +137,6 @@ static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r) } /** - * psb_gtt_roll - set scrolling position - * @dev: our DRM device - * @r: the gtt mapping we are using - * @roll: roll offset - * - * Roll an existing pinned mapping by moving the pages through the GTT. - * This allows us to implement hardware scrolling on the consoles without - * a 2D engine - */ -void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll) -{ - u32 __iomem *gtt_slot; - u32 pte; - int i; - - if (roll >= r->npage) { - WARN_ON(1); - return; - } - - r->roll = roll; - - /* Not currently in the GTT - no worry we will write the mapping at - the right position when it gets pinned */ - if (!r->stolen && !r->in_gart) - return; - - gtt_slot = psb_gtt_entry(dev, r); - - for (i = r->roll; i < r->npage; i++) { - pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), - PSB_MMU_CACHED_MEMORY); - iowrite32(pte, gtt_slot++); - } - for (i = 0; i < r->roll; i++) { - pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), - PSB_MMU_CACHED_MEMORY); - iowrite32(pte, gtt_slot++); - } - ioread32(gtt_slot - 1); -} - -/** * psb_gtt_attach_pages - attach and pin GEM pages * @gt: the gtt range * @@ -346,7 +299,6 @@ struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len, gt->resource.name = name; gt->stolen = backed; gt->in_gart = backed; - gt->roll = 0; /* Ensure this is set for non GEM objects */ gt->gem.dev = dev; ret = allocate_resource(dev_priv->gtt_mem, >->resource, diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h index 3cf190295ad3..2bf165849ebe 100644 --- a/drivers/gpu/drm/gma500/gtt.h +++ b/drivers/gpu/drm/gma500/gtt.h @@ -37,7 +37,6 @@ struct gtt_range { bool mmapping; /* Is mmappable */ struct page **pages; /* Backing pages if present */ int npage; /* Number of backing pages */ - int roll; /* Roll applied to the GTT entries */ }; #define to_gtt_range(x) container_of(x, struct gtt_range, gem) @@ -49,7 +48,5 @@ extern void psb_gtt_kref_put(struct gtt_range *gt); extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt); extern int psb_gtt_pin(struct gtt_range *gt); extern void psb_gtt_unpin(struct gtt_range *gt); -extern void psb_gtt_roll(struct drm_device *dev, - struct gtt_range *gt, int roll); extern int psb_gtt_restore(struct drm_device *dev); #endif diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c index be9cf6b1e3b3..b83d59b21de5 100644 --- a/drivers/gpu/drm/gma500/mdfld_device.c +++ b/drivers/gpu/drm/gma500/mdfld_device.c @@ -536,7 +536,6 @@ static int mdfld_chip_setup(struct drm_device *dev) const struct psb_ops mdfld_chip_ops = { .name = "mdfld", - .accel_2d = 0, .pipes = 3, .crtcs = 3, .lvds_mask = (1 << 1), diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c index ade7e2416a66..8754290b0e23 100644 --- a/drivers/gpu/drm/gma500/oaktrail_device.c +++ b/drivers/gpu/drm/gma500/oaktrail_device.c @@ -536,7 +536,6 @@ static void oaktrail_teardown(struct drm_device *dev) const struct psb_ops oaktrail_chip_ops = { .name = "Oaktrail", - .accel_2d = 1, .pipes = 2, .crtcs = 2, .hdmi_mask = (1 << 1), diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c index ece994c4c21a..2d21f8ec595f 100644 --- a/drivers/gpu/drm/gma500/psb_device.c +++ b/drivers/gpu/drm/gma500/psb_device.c @@ -318,7 +318,6 @@ static void psb_chip_teardown(struct drm_device *dev) const struct psb_ops psb_chip_ops = { .name = "Poulsbo", - .accel_2d = 1, .pipes = 2, .crtcs = 2, .hdmi_mask = (1 << 0), diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index b13376a6fb91..cc2d59e8471d 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -34,7 +34,7 @@ #include "psb_intel_reg.h" #include "psb_reg.h" -static struct drm_driver driver; +static const struct drm_driver driver; static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); /* @@ -124,7 +124,6 @@ static int psb_do_init(struct drm_device *dev) (stolen_gtt << PAGE_SHIFT) * 1024; spin_lock_init(&dev_priv->irqmask_lock); - spin_lock_init(&dev_priv->lock_2d); PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0); PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1); @@ -491,7 +490,7 @@ static const struct file_operations psb_gem_fops = { .read = drm_read, }; -static struct drm_driver driver = { +static const struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM, .lastclose = drm_fb_helper_lastclose, diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index c71a5a4e912c..5b7f7a312d53 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -576,9 +576,6 @@ struct drm_psb_private { struct drm_fb_helper *fb_helper; - /* 2D acceleration */ - spinlock_t lock_2d; - /* Panel brightness */ int brightness; int brightness_adjusted; @@ -615,7 +612,6 @@ struct drm_psb_private { /* Operations for each board type */ struct psb_ops { const char *name; - unsigned int accel_2d:1; int pipes; /* Number of output pipes */ int crtcs; /* Number of CRTCs */ int sgx_offset; /* Base offset of SGX device */ @@ -696,9 +692,6 @@ extern int psbfb_probed(struct drm_device *dev); extern int psbfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); /* accel_2d.c */ -extern void psbfb_copyarea(struct fb_info *info, - const struct fb_copyarea *region); -extern int psbfb_sync(struct fb_info *info); extern void psb_spank(struct drm_psb_private *dev_priv); /* psb_reset.c */ @@ -735,8 +728,6 @@ extern const struct drm_connector_helper_funcs extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs; /* gem.c */ -extern int psb_gem_get_aperture(struct drm_device *dev, void *data, - struct drm_file *file); extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index fee6fe810e74..d845657fd99c 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -43,7 +43,7 @@ static irqreturn_t hibmc_drm_interrupt(int irq, void *arg) return IRQ_HANDLED; } -static struct drm_driver hibmc_driver = { +static const struct drm_driver hibmc_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &hibmc_fops, .name = "hibmc", diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index d84d41f3e78f..aa6c53f88f7c 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -918,7 +918,7 @@ static const struct drm_mode_config_funcs ade_mode_config_funcs = { DEFINE_DRM_GEM_CMA_FOPS(ade_fops); -static struct drm_driver ade_driver = { +static const struct drm_driver ade_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ade_fops, DRM_GEM_CMA_DRIVER_OPS, diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h index dee8ec2f7f2e..386d137f29e5 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h @@ -40,7 +40,7 @@ struct kirin_drm_data { u32 num_planes; u32 prim_plane; - struct drm_driver *driver; + const struct drm_driver *driver; const struct drm_crtc_helper_funcs *crtc_helper_funcs; const struct drm_crtc_funcs *crtc_funcs; const struct drm_plane_helper_funcs *plane_helper_funcs; diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 096652921453..a9439b415603 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -1492,11 +1492,10 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - intel_dsc_get_config(encoder, pipe_config); - /* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */ pipe_config->port_clock = intel_dpll_get_freq(i915, - pipe_config->shared_dpll); + pipe_config->shared_dpll, + &pipe_config->dpll_hw_state); pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk; if (intel_dsi->dual_link) diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index 86be032bcf96..e00fdc47c0eb 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -133,7 +133,6 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn, struct drm_crtc_state *crtc_state; intel_hdcp_atomic_check(conn, old_state, new_state); - intel_psr_atomic_check(conn, old_state, new_state); if (!new_state->crtc) return 0; @@ -270,14 +269,15 @@ void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state) intel_crtc_put_color_blobs(crtc_state); } -void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state) +void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state, + const struct intel_crtc_state *from_crtc_state) { drm_property_replace_blob(&crtc_state->hw.degamma_lut, - crtc_state->uapi.degamma_lut); + from_crtc_state->uapi.degamma_lut); drm_property_replace_blob(&crtc_state->hw.gamma_lut, - crtc_state->uapi.gamma_lut); + from_crtc_state->uapi.gamma_lut); drm_property_replace_blob(&crtc_state->hw.ctm, - crtc_state->uapi.ctm); + from_crtc_state->uapi.ctm); } /** diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h index 285de07011dc..62a3365ed5e6 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.h +++ b/drivers/gpu/drm/i915/display/intel_atomic.h @@ -43,7 +43,8 @@ struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc); void intel_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state); void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state); -void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state); +void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state, + const struct intel_crtc_state *from_crtc_state); struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev); void intel_atomic_state_free(struct drm_atomic_state *state); void intel_atomic_state_clear(struct drm_atomic_state *state); diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 3334ff253600..7e9f84b00859 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -247,11 +247,19 @@ static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state) } void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state, - const struct intel_plane_state *from_plane_state) + const struct intel_plane_state *from_plane_state, + struct intel_crtc *crtc) { intel_plane_clear_hw_state(plane_state); - plane_state->hw.crtc = from_plane_state->uapi.crtc; + /* + * For the bigjoiner slave uapi.crtc will point at + * the master crtc. So we explicitly assign the right + * slave crtc to hw.crtc. uapi.crtc!=NULL simply indicates + * the plane is logically enabled on the uapi level. + */ + plane_state->hw.crtc = from_plane_state->uapi.crtc ? &crtc->base : NULL; + plane_state->hw.fb = from_plane_state->uapi.fb; if (plane_state->hw.fb) drm_framebuffer_get(plane_state->hw.fb); @@ -263,6 +271,21 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state, plane_state->hw.color_encoding = from_plane_state->uapi.color_encoding; plane_state->hw.color_range = from_plane_state->uapi.color_range; plane_state->hw.scaling_filter = from_plane_state->uapi.scaling_filter; + + plane_state->uapi.src = drm_plane_state_src(&from_plane_state->uapi); + plane_state->uapi.dst = drm_plane_state_dest(&from_plane_state->uapi); +} + +void intel_plane_copy_hw_state(struct intel_plane_state *plane_state, + const struct intel_plane_state *from_plane_state) +{ + intel_plane_clear_hw_state(plane_state); + + memcpy(&plane_state->hw, &from_plane_state->hw, + sizeof(plane_state->hw)); + + if (plane_state->hw.fb) + drm_framebuffer_get(plane_state->hw.fb); } void intel_plane_set_invisible(struct intel_crtc_state *crtc_state, @@ -319,15 +342,16 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ old_plane_state, new_plane_state); } -static struct intel_crtc * -get_crtc_from_states(const struct intel_plane_state *old_plane_state, - const struct intel_plane_state *new_plane_state) +static struct intel_plane * +intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id) { - if (new_plane_state->uapi.crtc) - return to_intel_crtc(new_plane_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_plane *plane; - if (old_plane_state->uapi.crtc) - return to_intel_crtc(old_plane_state->uapi.crtc); + for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { + if (plane->id == plane_id) + return plane; + } return NULL; } @@ -335,23 +359,37 @@ get_crtc_from_states(const struct intel_plane_state *old_plane_state, int intel_plane_atomic_check(struct intel_atomic_state *state, struct intel_plane *plane) { + struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_plane_state *new_plane_state = intel_atomic_get_new_plane_state(state, plane); const struct intel_plane_state *old_plane_state = intel_atomic_get_old_plane_state(state, plane); - struct intel_crtc *crtc = - get_crtc_from_states(old_plane_state, new_plane_state); - const struct intel_crtc_state *old_crtc_state; - struct intel_crtc_state *new_crtc_state; + const struct intel_plane_state *new_master_plane_state; + struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, plane->pipe); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + if (new_crtc_state && new_crtc_state->bigjoiner_slave) { + struct intel_plane *master_plane = + intel_crtc_get_plane(new_crtc_state->bigjoiner_linked_crtc, + plane->id); + + new_master_plane_state = + intel_atomic_get_new_plane_state(state, master_plane); + } else { + new_master_plane_state = new_plane_state; + } + + intel_plane_copy_uapi_to_hw_state(new_plane_state, + new_master_plane_state, + crtc); - intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state); new_plane_state->uapi.visible = false; - if (!crtc) + if (!new_crtc_state) return 0; - old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); - new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - return intel_plane_atomic_check_with_state(old_crtc_state, new_crtc_state, old_plane_state, @@ -479,6 +517,63 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, } } +int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, + struct intel_crtc_state *crtc_state, + int min_scale, int max_scale, + bool can_position) +{ + struct drm_framebuffer *fb = plane_state->hw.fb; + struct drm_rect *src = &plane_state->uapi.src; + struct drm_rect *dst = &plane_state->uapi.dst; + unsigned int rotation = plane_state->hw.rotation; + struct drm_rect clip = {}; + int hscale, vscale; + + if (!fb) { + plane_state->uapi.visible = false; + return 0; + } + + drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation); + + /* Check scaling */ + hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); + vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); + if (hscale < 0 || vscale < 0) { + DRM_DEBUG_KMS("Invalid scaling of plane\n"); + drm_rect_debug_print("src: ", src, true); + drm_rect_debug_print("dst: ", dst, false); + return -ERANGE; + } + + if (crtc_state->hw.enable) { + clip.x2 = crtc_state->pipe_src_w; + clip.y2 = crtc_state->pipe_src_h; + } + + /* right side of the image is on the slave crtc, adjust dst to match */ + if (crtc_state->bigjoiner_slave) + drm_rect_translate(dst, -crtc_state->pipe_src_w, 0); + + /* + * FIXME: This might need further adjustment for seamless scaling + * with phase information, for the 2p2 and 2p1 scenarios. + */ + plane_state->uapi.visible = drm_rect_clip_scaled(src, dst, &clip); + + drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation); + + if (!can_position && plane_state->uapi.visible && + !drm_rect_equals(dst, &clip)) { + DRM_DEBUG_KMS("Plane must cover entire CRTC\n"); + drm_rect_debug_print("dst: ", dst, false); + drm_rect_debug_print("clip: ", &clip, false); + return -EINVAL; + } + + return 0; +} + const struct drm_plane_helper_funcs intel_plane_helper_funcs = { .prepare_fb = intel_prepare_plane_fb, .cleanup_fb = intel_cleanup_plane_fb, diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h index 59dd1fbb02ea..5c78a087ed86 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h @@ -24,7 +24,10 @@ unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state, unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state, - const struct intel_plane_state *from_plane_state); + const struct intel_plane_state *from_plane_state, + struct intel_crtc *crtc); +void intel_plane_copy_hw_state(struct intel_plane_state *plane_state, + const struct intel_plane_state *from_plane_state); void intel_update_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); @@ -52,6 +55,10 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat int intel_plane_calc_min_cdclk(struct intel_atomic_state *state, struct intel_plane *plane, bool *need_cdclk_calc); +int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, + struct intel_crtc_state *crtc_state, + int min_scale, int max_scale, + bool can_position); void intel_plane_set_invisible(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state); diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 19b16517a502..92940a0c5ef8 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -28,6 +28,7 @@ #include <drm/drm_scdc_helper.h> #include "i915_drv.h" +#include "i915_trace.h" #include "intel_audio.h" #include "intel_combo_phy.h" #include "intel_connector.h" @@ -582,6 +583,34 @@ static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_dp[] = { { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ }; +static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr[] = { + /* NT mV Trans mV db */ + { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ + { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */ + { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */ + { 0xA, 0x35, 0x36, 0x00, 0x09 }, /* 200 350 4.9 */ + { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ + { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */ + { 0xA, 0x35, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */ + { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ + { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ +}; + +static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2[] = { + /* NT mV Trans mV db */ + { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ + { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 250 1.9 */ + { 0x1, 0x7F, 0x3D, 0x00, 0x02 }, /* 200 300 3.5 */ + { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 200 350 4.9 */ + { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ + { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 300 1.6 */ + { 0xA, 0x35, 0x3A, 0x00, 0x05 }, /* 250 350 2.9 */ + { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ + { 0xA, 0x35, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ +}; + struct icl_mg_phy_ddi_buf_trans { u32 cri_txdeemph_override_11_6; u32 cri_txdeemph_override_5_0; @@ -1163,6 +1192,57 @@ ehl_get_combo_buf_trans(struct intel_encoder *encoder, } static const struct cnl_ddi_buf_trans * +jsl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); + return icl_combo_phy_ddi_translations_hdmi; +} + +static const struct cnl_ddi_buf_trans * +jsl_get_combo_buf_trans_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2); + return icl_combo_phy_ddi_translations_dp_hbr2; +} + +static const struct cnl_ddi_buf_trans * +jsl_get_combo_buf_trans_edp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (dev_priv->vbt.edp.low_vswing) { + if (crtc_state->port_clock > 270000) { + *n_entries = ARRAY_SIZE(jsl_combo_phy_ddi_translations_edp_hbr2); + return jsl_combo_phy_ddi_translations_edp_hbr2; + } else { + *n_entries = ARRAY_SIZE(jsl_combo_phy_ddi_translations_edp_hbr); + return jsl_combo_phy_ddi_translations_edp_hbr; + } + } + + return jsl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); +} + +static const struct cnl_ddi_buf_trans * +jsl_get_combo_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) +{ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return jsl_get_combo_buf_trans_hdmi(encoder, crtc_state, n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + return jsl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); + else + return jsl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); +} + +static const struct cnl_ddi_buf_trans * tgl_get_combo_buf_trans_hdmi(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) @@ -1676,7 +1756,8 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder, encoder->port); else pipe_config->port_clock = - intel_dpll_get_freq(dev_priv, pipe_config->shared_dpll); + intel_dpll_get_freq(dev_priv, pipe_config->shared_dpll, + &pipe_config->dpll_hw_state); ddi_dotclock_get(pipe_config); } @@ -2216,13 +2297,6 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, intel_phy_is_tc(dev_priv, phy)) intel_display_power_get(dev_priv, intel_ddi_main_link_aux_domain(dig_port)); - - /* - * VDSC power is needed when DSC is enabled - */ - if (crtc_state->dsc.compression_enable) - intel_display_power_get(dev_priv, - intel_dsc_power_domain(crtc_state)); } void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder, @@ -2363,7 +2437,9 @@ static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp, else tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries); } else if (INTEL_GEN(dev_priv) == 11) { - if (IS_JSL_EHL(dev_priv)) + if (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE)) + jsl_get_combo_buf_trans(encoder, crtc_state, &n_entries); + else if (IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)) ehl_get_combo_buf_trans(encoder, crtc_state, &n_entries); else if (intel_phy_is_combo(dev_priv, phy)) icl_get_combo_buf_trans(encoder, crtc_state, &n_entries); @@ -2544,7 +2620,9 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, if (INTEL_GEN(dev_priv) >= 12) ddi_translations = tgl_get_combo_buf_trans(encoder, crtc_state, &n_entries); - else if (IS_JSL_EHL(dev_priv)) + else if (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE)) + ddi_translations = jsl_get_combo_buf_trans(encoder, crtc_state, &n_entries); + else if (IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)) ddi_translations = ehl_get_combo_buf_trans(encoder, crtc_state, &n_entries); else ddi_translations = icl_get_combo_buf_trans(encoder, crtc_state, &n_entries); @@ -2970,6 +3048,40 @@ static u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv, return 0; } +static void dg1_map_plls_to_ports(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_shared_dpll *pll = crtc_state->shared_dpll; + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + u32 val; + + /* + * If we fail this, something went very wrong: first 2 PLLs should be + * used by first 2 phys and last 2 PLLs by last phys + */ + if (drm_WARN_ON(&dev_priv->drm, + (pll->info->id < DPLL_ID_DG1_DPLL2 && phy >= PHY_C) || + (pll->info->id >= DPLL_ID_DG1_DPLL2 && phy < PHY_C))) + return; + + mutex_lock(&dev_priv->dpll.lock); + + val = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy)); + drm_WARN_ON(&dev_priv->drm, + (val & DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)) == 0); + + val &= ~DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); + val |= DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy); + intel_de_write(dev_priv, DG1_DPCLKA_CFGCR0(phy), val); + intel_de_posting_read(dev_priv, DG1_DPCLKA_CFGCR0(phy)); + + val &= ~DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); + intel_de_write(dev_priv, DG1_DPCLKA_CFGCR0(phy), val); + + mutex_unlock(&dev_priv->dpll.lock); +} + static void icl_map_plls_to_ports(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { @@ -3017,6 +3129,19 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder, mutex_unlock(&dev_priv->dpll.lock); } +static void dg1_unmap_plls_to_ports(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + + mutex_lock(&dev_priv->dpll.lock); + + intel_de_rmw(dev_priv, DG1_DPCLKA_CFGCR0(phy), 0, + DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)); + + mutex_unlock(&dev_priv->dpll.lock); +} + static void icl_unmap_plls_to_ports(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -3032,6 +3157,37 @@ static void icl_unmap_plls_to_ports(struct intel_encoder *encoder) mutex_unlock(&dev_priv->dpll.lock); } +static void dg1_sanitize_port_clk_off(struct drm_i915_private *dev_priv, + u32 port_mask, bool ddi_clk_needed) +{ + enum port port; + u32 val; + + for_each_port_masked(port, port_mask) { + enum phy phy = intel_port_to_phy(dev_priv, port); + bool ddi_clk_off; + + val = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy)); + ddi_clk_off = val & DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); + + if (ddi_clk_needed == !ddi_clk_off) + continue; + + /* + * Punt on the case now where clock is gated, but it would + * be needed by the port. Something else is really broken then. + */ + if (drm_WARN_ON(&dev_priv->drm, ddi_clk_needed)) + continue; + + drm_notice(&dev_priv->drm, + "PHY %c is disabled with an ungated DDI clock, gate it\n", + phy_name(phy)); + val |= DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); + intel_de_write(dev_priv, DG1_DPCLKA_CFGCR0(phy), val); + } +} + static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv, u32 port_mask, bool ddi_clk_needed) { @@ -3114,7 +3270,10 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) ddi_clk_needed = false; } - icl_sanitize_port_clk_off(dev_priv, port_mask, ddi_clk_needed); + if (IS_DG1(dev_priv)) + dg1_sanitize_port_clk_off(dev_priv, port_mask, ddi_clk_needed); + else + icl_sanitize_port_clk_off(dev_priv, port_mask, ddi_clk_needed); } static void intel_ddi_clk_select(struct intel_encoder *encoder, @@ -3507,7 +3666,8 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, /* 7.l Configure and enable FEC if needed */ intel_ddi_enable_fec(encoder, crtc_state); - intel_dsc_enable(encoder, crtc_state); + if (!crtc_state->bigjoiner) + intel_dsc_enable(encoder, crtc_state); } static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, @@ -3579,7 +3739,8 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, if (!is_mst) intel_ddi_enable_pipe_clock(encoder, crtc_state); - intel_dsc_enable(encoder, crtc_state); + if (!crtc_state->bigjoiner) + intel_dsc_enable(encoder, crtc_state); } static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, @@ -3666,7 +3827,9 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state, drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder); - if (INTEL_GEN(dev_priv) >= 11) + if (IS_DG1(dev_priv)) + dg1_map_plls_to_ports(encoder, crtc_state); + else if (INTEL_GEN(dev_priv) >= 11) icl_map_plls_to_ports(encoder, crtc_state); intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); @@ -3828,6 +3991,21 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state, ilk_pfit_disable(old_crtc_state); } + if (old_crtc_state->bigjoiner_linked_crtc) { + struct intel_atomic_state *state = + to_intel_atomic_state(old_crtc_state->uapi.state); + struct intel_crtc *slave = + old_crtc_state->bigjoiner_linked_crtc; + const struct intel_crtc_state *old_slave_crtc_state = + intel_atomic_get_old_crtc_state(state, slave); + + intel_crtc_vblank_off(old_slave_crtc_state); + trace_intel_pipe_disable(slave); + + intel_dsc_disable(old_slave_crtc_state); + skl_scaler_disable(old_slave_crtc_state); + } + /* * When called from DP MST code: * - old_conn_state will be NULL @@ -3848,7 +4026,9 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state, intel_ddi_post_disable_dp(state, encoder, old_crtc_state, old_conn_state); - if (INTEL_GEN(dev_priv) >= 11) + if (IS_DG1(dev_priv)) + dg1_unmap_plls_to_ports(encoder); + else if (INTEL_GEN(dev_priv) >= 11) icl_unmap_plls_to_ports(encoder); if (intel_crtc_has_dp_encoder(old_crtc_state) || is_tc_port) @@ -4044,7 +4224,8 @@ static void intel_enable_ddi(struct intel_atomic_state *state, { drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder); - intel_ddi_enable_transcoder_func(encoder, crtc_state); + if (!crtc_state->bigjoiner_slave) + intel_ddi_enable_transcoder_func(encoder, crtc_state); intel_enable_pipe(crtc_state); @@ -4396,20 +4577,14 @@ static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state) crtc_state->sync_mode_slaves_mask); } -void intel_ddi_get_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) +static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; u32 temp, flags = 0; - /* XXX: DSI transcoder paranoia */ - if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder))) - return; - - intel_dsc_get_config(encoder, pipe_config); - temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); if (temp & TRANS_DDI_PHSYNC) flags |= DRM_MODE_FLAG_PHSYNC; @@ -4503,6 +4678,30 @@ void intel_ddi_get_config(struct intel_encoder *encoder, default: break; } +} + +void intel_ddi_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; + + /* XXX: DSI transcoder paranoia */ + if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder))) + return; + + if (pipe_config->bigjoiner_slave) { + /* read out pipe settings from master */ + enum transcoder save = pipe_config->cpu_transcoder; + + /* Our own transcoder needs to be disabled when reading it in intel_ddi_read_func_ctl() */ + WARN_ON(pipe_config->output_types); + pipe_config->cpu_transcoder = (enum transcoder)pipe_config->bigjoiner_linked_crtc->pipe; + intel_ddi_read_func_ctl(encoder, pipe_config); + pipe_config->cpu_transcoder = save; + } else { + intel_ddi_read_func_ctl(encoder, pipe_config); + } pipe_config->has_audio = intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder); @@ -4528,7 +4727,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder, dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; } - intel_ddi_clock_get(encoder, pipe_config); + if (!pipe_config->bigjoiner_slave) + intel_ddi_clock_get(encoder, pipe_config); if (IS_GEN9_LP(dev_priv)) pipe_config->lane_lat_optim_mask = @@ -5126,6 +5326,9 @@ static enum hpd_pin cnl_hpd_pin(struct drm_i915_private *dev_priv, return HPD_PORT_A + port - PORT_A; } +#define port_tc_name(port) ((port) - PORT_TC1 + '1') +#define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1') + void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) { struct intel_digital_port *dig_port; @@ -5181,9 +5384,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) DRM_MODE_ENCODER_TMDS, "DDI %s%c/PHY %s%c", port >= PORT_TC1 ? "TC" : "", - port >= PORT_TC1 ? port_name(port) : port - PORT_TC1 + '1', + port >= PORT_TC1 ? port_tc_name(port) : port_name(port), tc_port != TC_PORT_NONE ? "TC" : "", - tc_port != TC_PORT_NONE ? phy_name(phy) : tc_port - TC_PORT_1 + '1'); + tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy)); } else if (INTEL_GEN(dev_priv) >= 11) { enum tc_port tc_port = intel_port_to_tc(dev_priv, port); @@ -5193,7 +5396,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) port_name(port), port >= PORT_C ? " (TC)" : "", tc_port != TC_PORT_NONE ? "TC" : "", - tc_port != TC_PORT_NONE ? phy_name(phy) : tc_port - TC_PORT_1 + '1'); + tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy)); } else { drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs, DRM_MODE_ENCODER_TMDS, diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index cddbda5303ff..ba26545392bc 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3631,6 +3631,8 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, struct intel_plane *intel_plane = to_intel_plane(primary); struct intel_plane_state *intel_state = to_intel_plane_state(plane_state); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(intel_crtc->base.state); struct drm_framebuffer *fb; struct i915_vma *vma; @@ -3653,7 +3655,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, if (c == &intel_crtc->base) continue; - if (!to_intel_crtc(c)->active) + if (!to_intel_crtc_state(c->state)->uapi.active) continue; state = to_intel_plane_state(c->primary->state); @@ -3675,6 +3677,11 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, * pretend the BIOS never had it enabled. */ intel_plane_disable_noatomic(intel_crtc, intel_plane); + if (crtc_state->bigjoiner) { + struct intel_crtc *slave = + crtc_state->bigjoiner_linked_crtc; + intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary)); + } return; @@ -3711,7 +3718,8 @@ valid_fb: drm_framebuffer_get(fb); plane_state->crtc = &intel_crtc->base; - intel_plane_copy_uapi_to_hw_state(intel_state, intel_state); + intel_plane_copy_uapi_to_hw_state(intel_state, intel_state, + intel_crtc); intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); @@ -3719,127 +3727,6 @@ valid_fb: &to_intel_frontbuffer(fb)->bits); } -static int skl_max_plane_width(const struct drm_framebuffer *fb, - int color_plane, - unsigned int rotation) -{ - int cpp = fb->format->cpp[color_plane]; - - switch (fb->modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - /* - * Validated limit is 4k, but has 5k should - * work apart from the following features: - * - Ytile (already limited to 4k) - * - FP16 (already limited to 4k) - * - render compression (already limited to 4k) - * - KVMR sprite and cursor (don't care) - * - horizontal panning (TODO verify this) - * - pipe and plane scaling (TODO verify this) - */ - if (cpp == 8) - return 4096; - else - return 5120; - case I915_FORMAT_MOD_Y_TILED_CCS: - case I915_FORMAT_MOD_Yf_TILED_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: - /* FIXME AUX plane? */ - case I915_FORMAT_MOD_Y_TILED: - case I915_FORMAT_MOD_Yf_TILED: - if (cpp == 8) - return 2048; - else - return 4096; - default: - MISSING_CASE(fb->modifier); - return 2048; - } -} - -static int glk_max_plane_width(const struct drm_framebuffer *fb, - int color_plane, - unsigned int rotation) -{ - int cpp = fb->format->cpp[color_plane]; - - switch (fb->modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - if (cpp == 8) - return 4096; - else - return 5120; - case I915_FORMAT_MOD_Y_TILED_CCS: - case I915_FORMAT_MOD_Yf_TILED_CCS: - /* FIXME AUX plane? */ - case I915_FORMAT_MOD_Y_TILED: - case I915_FORMAT_MOD_Yf_TILED: - if (cpp == 8) - return 2048; - else - return 5120; - default: - MISSING_CASE(fb->modifier); - return 2048; - } -} - -static int icl_min_plane_width(const struct drm_framebuffer *fb) -{ - /* Wa_14011264657, Wa_14011050563: gen11+ */ - switch (fb->format->format) { - case DRM_FORMAT_C8: - return 18; - case DRM_FORMAT_RGB565: - return 10; - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_ABGR8888: - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_ABGR2101010: - case DRM_FORMAT_XVYU2101010: - case DRM_FORMAT_Y212: - case DRM_FORMAT_Y216: - return 6; - case DRM_FORMAT_NV12: - return 20; - case DRM_FORMAT_P010: - case DRM_FORMAT_P012: - case DRM_FORMAT_P016: - return 12; - case DRM_FORMAT_XRGB16161616F: - case DRM_FORMAT_XBGR16161616F: - case DRM_FORMAT_ARGB16161616F: - case DRM_FORMAT_ABGR16161616F: - case DRM_FORMAT_XVYU12_16161616: - case DRM_FORMAT_XVYU16161616: - return 4; - default: - return 1; - } -} - -static int icl_max_plane_width(const struct drm_framebuffer *fb, - int color_plane, - unsigned int rotation) -{ - return 5120; -} - -static int skl_max_plane_height(void) -{ - return 4096; -} - -static int icl_max_plane_height(void) -{ - return 4320; -} static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, @@ -3897,35 +3784,55 @@ intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) return y; } +static int intel_plane_min_width(struct intel_plane *plane, + const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + if (plane->min_width) + return plane->min_width(fb, color_plane, rotation); + else + return 1; +} + +static int intel_plane_max_width(struct intel_plane *plane, + const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + if (plane->max_width) + return plane->max_width(fb, color_plane, rotation); + else + return INT_MAX; +} + +static int intel_plane_max_height(struct intel_plane *plane, + const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + if (plane->max_height) + return plane->max_height(fb, color_plane, rotation); + else + return INT_MAX; +} + static int skl_check_main_surface(struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; int x = plane_state->uapi.src.x1 >> 16; int y = plane_state->uapi.src.y1 >> 16; int w = drm_rect_width(&plane_state->uapi.src) >> 16; int h = drm_rect_height(&plane_state->uapi.src) >> 16; - int max_width, min_width, max_height; - u32 alignment, offset; + int min_width = intel_plane_min_width(plane, fb, 0, rotation); + int max_width = intel_plane_max_width(plane, fb, 0, rotation); + int max_height = intel_plane_max_height(plane, fb, 0, rotation); int aux_plane = intel_main_to_aux_plane(fb, 0); u32 aux_offset = plane_state->color_plane[aux_plane].offset; - - if (INTEL_GEN(dev_priv) >= 11) { - max_width = icl_max_plane_width(fb, 0, rotation); - min_width = icl_min_plane_width(fb); - } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { - max_width = glk_max_plane_width(fb, 0, rotation); - min_width = 1; - } else { - max_width = skl_max_plane_width(fb, 0, rotation); - min_width = 1; - } - - if (INTEL_GEN(dev_priv) >= 11) - max_height = icl_max_plane_height(); - else - max_height = skl_max_plane_height(); + u32 alignment, offset; if (w > max_width || w < min_width || h > max_height) { drm_dbg_kms(&dev_priv->drm, @@ -4008,22 +3915,19 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *i915 = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; int uv_plane = 1; - int max_width = skl_max_plane_width(fb, uv_plane, rotation); - int max_height = 4096; + int max_width = intel_plane_max_width(plane, fb, uv_plane, rotation); + int max_height = intel_plane_max_height(plane, fb, uv_plane, rotation); int x = plane_state->uapi.src.x1 >> 17; int y = plane_state->uapi.src.y1 >> 17; int w = drm_rect_width(&plane_state->uapi.src) >> 17; int h = drm_rect_height(&plane_state->uapi.src) >> 17; u32 offset; - intel_add_fb_offsets(&x, &y, plane_state, uv_plane); - offset = intel_plane_compute_aligned_offset(&x, &y, - plane_state, uv_plane); - /* FIXME not quite sure how/if these apply to the chroma plane */ if (w > max_width || h > max_height) { drm_dbg_kms(&i915->drm, @@ -4032,6 +3936,10 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) return -EINVAL; } + intel_add_fb_offsets(&x, &y, plane_state, uv_plane); + offset = intel_plane_compute_aligned_offset(&x, &y, + plane_state, uv_plane); + if (is_ccs_modifier(fb->modifier)) { int ccs_plane = main_to_ccs_plane(fb, uv_plane); u32 aux_offset = plane_state->color_plane[ccs_plane].offset; @@ -4407,12 +4315,10 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state, if (ret) return ret; - ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, - &crtc_state->uapi, - DRM_PLANE_HELPER_NO_SCALING, - DRM_PLANE_HELPER_NO_SCALING, - i9xx_plane_has_windowing(plane), - true); + ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + i9xx_plane_has_windowing(plane)); if (ret) return ret; @@ -4951,13 +4857,16 @@ static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) intel_has_gpu_reset(&dev_priv->gt)); } -void intel_prepare_reset(struct drm_i915_private *dev_priv) +void intel_display_prepare_reset(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; struct drm_atomic_state *state; int ret; + if (!HAS_DISPLAY(dev_priv)) + return; + /* reset doesn't touch the display */ if (!dev_priv->params.force_reset_modeset_test && !gpu_reset_clobbers_display(dev_priv)) @@ -5011,13 +4920,16 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) state->acquire_ctx = ctx; } -void intel_finish_reset(struct drm_i915_private *dev_priv) +void intel_display_finish_reset(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; struct drm_atomic_state *state; int ret; + if (!HAS_DISPLAY(dev_priv)) + return; + /* reset doesn't touch the display */ if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) return; @@ -6167,18 +6079,16 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state) { - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; + const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; int width, height; if (crtc_state->pch_pfit.enabled) { width = drm_rect_width(&crtc_state->pch_pfit.dst); height = drm_rect_height(&crtc_state->pch_pfit.dst); } else { - width = adjusted_mode->crtc_hdisplay; - height = adjusted_mode->crtc_vdisplay; + width = pipe_mode->crtc_hdisplay; + height = pipe_mode->crtc_vdisplay; } - return skl_update_scaler(crtc_state, !crtc_state->hw.active, SKL_CRTC_INDEX, &crtc_state->scaler_state.scaler_id, @@ -7234,6 +7144,45 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) intel_de_write(dev_priv, reg, val); } +static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, + const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_crtc_state *master_crtc_state; + struct drm_connector_state *conn_state; + struct drm_connector *conn; + struct intel_encoder *encoder = NULL; + int i; + + if (crtc_state->bigjoiner_slave) + master = crtc_state->bigjoiner_linked_crtc; + + master_crtc_state = intel_atomic_get_new_crtc_state(state, master); + + for_each_new_connector_in_state(&state->base, conn, conn_state, i) { + if (conn_state->crtc != &master->base) + continue; + + encoder = to_intel_encoder(conn_state->best_encoder); + break; + } + + if (!crtc_state->bigjoiner_slave) { + /* need to enable VDSC, which we skipped in pre-enable */ + intel_dsc_enable(encoder, crtc_state); + } else { + /* + * Enable sequence steps 1-7 on bigjoiner master + */ + intel_encoders_pre_pll_enable(state, master); + intel_enable_shared_dpll(master_crtc_state); + intel_encoders_pre_enable(state, master); + + /* and DSC on slave */ + intel_dsc_enable(NULL, crtc_state); + } +} + static void hsw_crtc_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -7247,34 +7196,37 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; - intel_encoders_pre_pll_enable(state, crtc); - - if (new_crtc_state->shared_dpll) - intel_enable_shared_dpll(new_crtc_state); + if (!new_crtc_state->bigjoiner) { + intel_encoders_pre_pll_enable(state, crtc); - intel_encoders_pre_enable(state, crtc); + if (new_crtc_state->shared_dpll) + intel_enable_shared_dpll(new_crtc_state); - if (!transcoder_is_dsi(cpu_transcoder)) - intel_set_transcoder_timings(new_crtc_state); + intel_encoders_pre_enable(state, crtc); + } else { + icl_ddi_bigjoiner_pre_enable(state, new_crtc_state); + } intel_set_pipe_src_size(new_crtc_state); + if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) + bdw_set_pipemisc(new_crtc_state); - if (cpu_transcoder != TRANSCODER_EDP && - !transcoder_is_dsi(cpu_transcoder)) - intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder), - new_crtc_state->pixel_multiplier - 1); + if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) { + intel_set_transcoder_timings(new_crtc_state); - if (new_crtc_state->has_pch_encoder) - intel_cpu_transcoder_set_m_n(new_crtc_state, - &new_crtc_state->fdi_m_n, NULL); + if (cpu_transcoder != TRANSCODER_EDP) + intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder), + new_crtc_state->pixel_multiplier - 1); + + if (new_crtc_state->has_pch_encoder) + intel_cpu_transcoder_set_m_n(new_crtc_state, + &new_crtc_state->fdi_m_n, NULL); - if (!transcoder_is_dsi(cpu_transcoder)) { hsw_set_frame_start_delay(new_crtc_state); - hsw_set_pipeconf(new_crtc_state); } - if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) - bdw_set_pipemisc(new_crtc_state); + if (!transcoder_is_dsi(cpu_transcoder)) + hsw_set_pipeconf(new_crtc_state); crtc->active = true; @@ -7310,6 +7262,11 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, if (INTEL_GEN(dev_priv) >= 11) icl_pipe_mbus_enable(crtc); + if (new_crtc_state->bigjoiner_slave) { + trace_intel_pipe_enable(crtc); + intel_crtc_vblank_on(new_crtc_state); + } + intel_encoders_enable(state, crtc); if (psl_clkgate_wa) { @@ -7439,7 +7396,7 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) { if (phy == PHY_NONE) return false; - else if (IS_ROCKETLAKE(dev_priv)) + else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) return phy <= PHY_D; else if (IS_JSL_EHL(dev_priv)) return phy <= PHY_C; @@ -7451,7 +7408,7 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) { - if (IS_ROCKETLAKE(dev_priv)) + if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) return false; else if (INTEL_GEN(dev_priv) >= 12) return phy >= PHY_D && phy <= PHY_I; @@ -7463,7 +7420,7 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) { - if (IS_ROCKETLAKE(i915) && port >= PORT_TC1) + if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1) return PHY_C + port - PORT_TC1; else if (IS_JSL_EHL(i915) && port == PORT_D) return PHY_A; @@ -7604,6 +7561,9 @@ static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) if (crtc_state->shared_dpll) mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE); + if (crtc_state->dsc.compression_enable) + mask |= BIT_ULL(intel_dsc_power_domain(crtc_state)); + return mask; } @@ -8192,7 +8152,7 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) { - u32 pixel_rate = crtc_state->hw.adjusted_mode.crtc_clock; + u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock; unsigned int pipe_w, pipe_h, pfit_w, pfit_h; /* @@ -8222,6 +8182,27 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) pfit_w * pfit_h); } +static void intel_mode_from_crtc_timings(struct drm_display_mode *mode, + const struct drm_display_mode *timings) +{ + mode->hdisplay = timings->crtc_hdisplay; + mode->htotal = timings->crtc_htotal; + mode->hsync_start = timings->crtc_hsync_start; + mode->hsync_end = timings->crtc_hsync_end; + + mode->vdisplay = timings->crtc_vdisplay; + mode->vtotal = timings->crtc_vtotal; + mode->vsync_start = timings->crtc_vsync_start; + mode->vsync_end = timings->crtc_vsync_end; + + mode->flags = timings->flags; + mode->type = DRM_MODE_TYPE_DRIVER; + + mode->clock = timings->crtc_clock; + + drm_mode_set_name(mode); +} + static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); @@ -8229,19 +8210,75 @@ static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) if (HAS_GMCH(dev_priv)) /* FIXME calculate proper pipe pixel rate for GMCH pfit */ crtc_state->pixel_rate = - crtc_state->hw.adjusted_mode.crtc_clock; + crtc_state->hw.pipe_mode.crtc_clock; else crtc_state->pixel_rate = ilk_pipe_pixel_rate(crtc_state); } +static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state) +{ + struct drm_display_mode *mode = &crtc_state->hw.mode; + struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; + struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + + drm_mode_copy(pipe_mode, adjusted_mode); + + if (crtc_state->bigjoiner) { + /* + * transcoder is programmed to the full mode, + * but pipe timings are half of the transcoder mode + */ + pipe_mode->crtc_hdisplay /= 2; + pipe_mode->crtc_hblank_start /= 2; + pipe_mode->crtc_hblank_end /= 2; + pipe_mode->crtc_hsync_start /= 2; + pipe_mode->crtc_hsync_end /= 2; + pipe_mode->crtc_htotal /= 2; + pipe_mode->crtc_clock /= 2; + } + + intel_mode_from_crtc_timings(pipe_mode, pipe_mode); + intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode); + + intel_crtc_compute_pixel_rate(crtc_state); + + drm_mode_copy(mode, adjusted_mode); + mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner; + mode->vdisplay = crtc_state->pipe_src_h; +} + +static void intel_encoder_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + encoder->get_config(encoder, crtc_state); + + intel_crtc_readout_derived_state(crtc_state); +} + static int intel_crtc_compute_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; + struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode; int clock_limit = dev_priv->max_dotclk_freq; + drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode); + + /* Adjust pipe_mode for bigjoiner, with half the horizontal mode */ + if (pipe_config->bigjoiner) { + pipe_mode->crtc_clock /= 2; + pipe_mode->crtc_hdisplay /= 2; + pipe_mode->crtc_hblank_start /= 2; + pipe_mode->crtc_hblank_end /= 2; + pipe_mode->crtc_hsync_start /= 2; + pipe_mode->crtc_hsync_end /= 2; + pipe_mode->crtc_htotal /= 2; + pipe_config->pipe_src_w /= 2; + } + + intel_mode_from_crtc_timings(pipe_mode, pipe_mode); + if (INTEL_GEN(dev_priv) < 4) { clock_limit = dev_priv->max_cdclk_freq * 9 / 10; @@ -8250,16 +8287,16 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, * is > 90% of the (display) core speed. */ if (intel_crtc_supports_double_wide(crtc) && - adjusted_mode->crtc_clock > clock_limit) { + pipe_mode->crtc_clock > clock_limit) { clock_limit = dev_priv->max_dotclk_freq; pipe_config->double_wide = true; } } - if (adjusted_mode->crtc_clock > clock_limit) { + if (pipe_mode->crtc_clock > clock_limit) { drm_dbg_kms(&dev_priv->drm, "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", - adjusted_mode->crtc_clock, clock_limit, + pipe_mode->crtc_clock, clock_limit, yesno(pipe_config->double_wide)); return -EINVAL; } @@ -8302,7 +8339,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. */ if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) && - adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) + pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay) return -EINVAL; intel_crtc_compute_pixel_rate(pipe_config); @@ -9113,30 +9150,6 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc, tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe)); pipe_config->pipe_src_h = (tmp & 0xffff) + 1; pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; - - pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h; - pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w; -} - -void intel_mode_from_pipe_config(struct drm_display_mode *mode, - struct intel_crtc_state *pipe_config) -{ - mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay; - mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal; - mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start; - mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end; - - mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay; - mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal; - mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start; - mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end; - - mode->flags = pipe_config->hw.adjusted_mode.flags; - mode->type = DRM_MODE_TYPE_DRIVER; - - mode->clock = pipe_config->hw.adjusted_mode.crtc_clock; - - drm_mode_set_name(mode); } static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) @@ -10703,6 +10716,7 @@ static void skl_get_initial_plane_config(struct intel_crtc *crtc, struct intel_initial_plane_config *plane_config) { + struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *plane = to_intel_plane(crtc->base.primary); @@ -10719,6 +10733,12 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, drm_WARN_ON(dev, pipe != crtc->pipe); + if (crtc_state->bigjoiner) { + drm_dbg_kms(&dev_priv->drm, + "Unsupported bigjoiner configuration for initial FB\n"); + return; + } + intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); if (!intel_fb) { drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n"); @@ -10927,6 +10947,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { struct intel_shared_dpll *pll; enum intel_dpll_id pll_id; + bool pll_active; pipe_config->has_pch_encoder = true; @@ -10954,8 +10975,9 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, intel_get_shared_dpll_by_id(dev_priv, pll_id); pll = pipe_config->shared_dpll; - drm_WARN_ON(dev, !pll->info->funcs->get_hw_state(dev_priv, pll, - &pipe_config->dpll_hw_state)); + pll_active = intel_dpll_get_hw_state(dev_priv, pll, + &pipe_config->dpll_hw_state); + drm_WARN_ON(dev, !pll_active); tmp = pipe_config->dpll_hw_state.dpll; pipe_config->pixel_multiplier = @@ -11003,19 +11025,32 @@ static int hsw_crtc_compute_clock(struct intel_crtc *crtc, return 0; } -static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, +static void dg1_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, struct intel_crtc_state *pipe_config) { + enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT; + enum phy phy = intel_port_to_phy(dev_priv, port); + struct icl_port_dpll *port_dpll; + struct intel_shared_dpll *pll; enum intel_dpll_id id; - u32 temp; + bool pll_active; + u32 clk_sel; - temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); - id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); + clk_sel = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy)) & DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); + id = DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_DPLL_MAP(clk_sel, phy); - if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2)) + if (WARN_ON(id > DPLL_ID_DG1_DPLL3)) return; - pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); + pll = intel_get_shared_dpll_by_id(dev_priv, id); + port_dpll = &pipe_config->icl_port_dplls[port_dpll_id]; + + port_dpll->pll = pll; + pll_active = intel_dpll_get_hw_state(dev_priv, pll, + &port_dpll->hw_state); + drm_WARN_ON(&dev_priv->drm, !pll_active); + + icl_set_active_port_dpll(pipe_config, port_dpll_id); } static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, @@ -11023,7 +11058,10 @@ static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, { enum phy phy = intel_port_to_phy(dev_priv, port); enum icl_port_dpll_id port_dpll_id; + struct icl_port_dpll *port_dpll; + struct intel_shared_dpll *pll; enum intel_dpll_id id; + bool pll_active; u32 temp; if (intel_phy_is_combo(dev_priv, phy)) { @@ -11058,17 +11096,46 @@ static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, return; } - pipe_config->icl_port_dplls[port_dpll_id].pll = - intel_get_shared_dpll_by_id(dev_priv, id); + pll = intel_get_shared_dpll_by_id(dev_priv, id); + port_dpll = &pipe_config->icl_port_dplls[port_dpll_id]; + + port_dpll->pll = pll; + pll_active = intel_dpll_get_hw_state(dev_priv, pll, + &port_dpll->hw_state); + drm_WARN_ON(&dev_priv->drm, !pll_active); icl_set_active_port_dpll(pipe_config, port_dpll_id); } +static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, + struct intel_crtc_state *pipe_config) +{ + struct intel_shared_dpll *pll; + enum intel_dpll_id id; + bool pll_active; + u32 temp; + + temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); + id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); + + if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2)) + return; + + pll = intel_get_shared_dpll_by_id(dev_priv, id); + + pipe_config->shared_dpll = pll; + pll_active = intel_dpll_get_hw_state(dev_priv, pll, + &pipe_config->dpll_hw_state); + drm_WARN_ON(&dev_priv->drm, !pll_active); +} + static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, struct intel_crtc_state *pipe_config) { + struct intel_shared_dpll *pll; enum intel_dpll_id id; + bool pll_active; switch (port) { case PORT_A: @@ -11085,13 +11152,20 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, return; } - pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); + pll = intel_get_shared_dpll_by_id(dev_priv, id); + + pipe_config->shared_dpll = pll; + pll_active = intel_dpll_get_hw_state(dev_priv, pll, + &pipe_config->dpll_hw_state); + drm_WARN_ON(&dev_priv->drm, !pll_active); } static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, struct intel_crtc_state *pipe_config) { + struct intel_shared_dpll *pll; enum intel_dpll_id id; + bool pll_active; u32 temp; temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); @@ -11100,14 +11174,21 @@ static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3)) return; - pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); + pll = intel_get_shared_dpll_by_id(dev_priv, id); + + pipe_config->shared_dpll = pll; + pll_active = intel_dpll_get_hw_state(dev_priv, pll, + &pipe_config->dpll_hw_state); + drm_WARN_ON(&dev_priv->drm, !pll_active); } static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, struct intel_crtc_state *pipe_config) { + struct intel_shared_dpll *pll; enum intel_dpll_id id; u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port)); + bool pll_active; switch (ddi_pll_sel) { case PORT_CLK_SEL_WRPLL1: @@ -11135,7 +11216,12 @@ static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, return; } - pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); + pll = intel_get_shared_dpll_by_id(dev_priv, id); + + pipe_config->shared_dpll = pll; + pll_active = intel_dpll_get_hw_state(dev_priv, pll, + &pipe_config->dpll_hw_state); + drm_WARN_ON(&dev_priv->drm, !pll_active); } static bool hsw_get_transcoder_state(struct intel_crtc *crtc, @@ -11295,7 +11381,6 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc, { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; - struct intel_shared_dpll *pll; enum port port; u32 tmp; @@ -11305,30 +11390,27 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc, } else { tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); + if (!(tmp & TRANS_DDI_FUNC_ENABLE)) + return; if (INTEL_GEN(dev_priv) >= 12) port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); else port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); } - if (INTEL_GEN(dev_priv) >= 11) + if (IS_DG1(dev_priv)) + dg1_get_ddi_pll(dev_priv, port, pipe_config); + else if (INTEL_GEN(dev_priv) >= 11) icl_get_ddi_pll(dev_priv, port, pipe_config); else if (IS_CANNONLAKE(dev_priv)) cnl_get_ddi_pll(dev_priv, port, pipe_config); - else if (IS_GEN9_BC(dev_priv)) - skl_get_ddi_pll(dev_priv, port, pipe_config); else if (IS_GEN9_LP(dev_priv)) bxt_get_ddi_pll(dev_priv, port, pipe_config); + else if (IS_GEN9_BC(dev_priv)) + skl_get_ddi_pll(dev_priv, port, pipe_config); else hsw_get_ddi_pll(dev_priv, port, pipe_config); - pll = pipe_config->shared_dpll; - if (pll) { - drm_WARN_ON(&dev_priv->drm, - !pll->info->funcs->get_hw_state(dev_priv, pll, - &pipe_config->dpll_hw_state)); - } - /* * Haswell has only FDI/PCH transcoder A. It is which is connected to * DDI E. So just check whether this pipe is wired to DDI E and whether @@ -11378,10 +11460,19 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, active = true; } - if (!active) - goto out; + intel_dsc_get_config(pipe_config); + + if (!active) { + /* bigjoiner slave doesn't enable transcoder */ + if (!pipe_config->bigjoiner_slave) + goto out; - if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || + active = true; + pipe_config->pixel_multiplier = 1; + + /* we cannot read out most state, so don't bother.. */ + pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE; + } else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || INTEL_GEN(dev_priv) >= 11) { hsw_get_ddi_port_state(crtc, pipe_config); intel_get_transcoder_timings(crtc, pipe_config); @@ -11456,7 +11547,10 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, } } - if (pipe_config->cpu_transcoder != TRANSCODER_EDP && + if (pipe_config->bigjoiner_slave) { + /* Cannot be read out as a slave, set to 0. */ + pipe_config->pixel_multiplier = 0; + } else if (pipe_config->cpu_transcoder != TRANSCODER_EDP && !transcoder_is_dsi(pipe_config->cpu_transcoder)) { pipe_config->pixel_multiplier = intel_de_read(dev_priv, @@ -11473,6 +11567,21 @@ out: return active; } +static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + + if (!i915->display.get_pipe_config(crtc, crtc_state)) + return false; + + crtc_state->hw.active = true; + + intel_crtc_readout_derived_state(crtc_state); + + return true; +} + static u32 intel_cursor_base(const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = @@ -11578,6 +11687,8 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state, { const struct drm_framebuffer *fb = plane_state->hw.fb; struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + const struct drm_rect src = plane_state->uapi.src; + const struct drm_rect dst = plane_state->uapi.dst; int ret; if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) { @@ -11585,17 +11696,16 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state, return -EINVAL; } - ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, - &crtc_state->uapi, - DRM_PLANE_HELPER_NO_SCALING, - DRM_PLANE_HELPER_NO_SCALING, - true, true); + ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + true); if (ret) return ret; /* Use the unclipped src/dst rectangles, which we program to hw */ - plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi); - plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi); + plane_state->uapi.src = src; + plane_state->uapi.dst = dst; ret = intel_cursor_check_surface(plane_state); if (ret) @@ -12469,15 +12579,15 @@ intel_encoder_current_mode(struct intel_encoder *encoder) return NULL; } - if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) { + if (!intel_crtc_get_pipe_config(crtc_state)) { kfree(crtc_state); kfree(mode); return NULL; } - encoder->get_config(encoder, crtc_state); + intel_encoder_get_config(encoder, crtc_state); - intel_mode_from_pipe_config(mode, crtc_state); + intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode); kfree(crtc_state); @@ -12663,7 +12773,7 @@ static bool encoders_cloneable(const struct intel_encoder *a, b->cloneable & (1 << a->type)); } -static bool check_single_encoder_cloning(struct drm_atomic_state *state, +static bool check_single_encoder_cloning(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { @@ -12672,7 +12782,7 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state, struct drm_connector_state *connector_state; int i; - for_each_new_connector_in_state(state, connector, connector_state, i) { + for_each_new_connector_in_state(&state->base, connector, connector_state, i) { if (connector_state->crtc != &crtc->base) continue; @@ -12787,7 +12897,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) memcpy(linked_state->color_plane, plane_state->color_plane, sizeof(linked_state->color_plane)); - intel_plane_copy_uapi_to_hw_state(linked_state, plane_state); + intel_plane_copy_hw_state(linked_state, plane_state); linked_state->uapi.src = plane_state->uapi.src; linked_state->uapi.dst = plane_state->uapi.dst; @@ -12821,15 +12931,15 @@ static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) { - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; + const struct drm_display_mode *pipe_mode = + &crtc_state->hw.pipe_mode; int linetime_wm; if (!crtc_state->hw.enable) return 0; - linetime_wm = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, - adjusted_mode->crtc_clock); + linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, + pipe_mode->crtc_clock); return min(linetime_wm, 0x1ff); } @@ -12837,14 +12947,14 @@ static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, const struct intel_cdclk_state *cdclk_state) { - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; + const struct drm_display_mode *pipe_mode = + &crtc_state->hw.pipe_mode; int linetime_wm; if (!crtc_state->hw.enable) return 0; - linetime_wm = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, + linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, cdclk_state->logical.cdclk); return min(linetime_wm, 0x1ff); @@ -12854,14 +12964,14 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; + const struct drm_display_mode *pipe_mode = + &crtc_state->hw.pipe_mode; int linetime_wm; if (!crtc_state->hw.enable) return 0; - linetime_wm = DIV_ROUND_UP(adjusted_mode->crtc_htotal * 1000 * 8, + linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8, crtc_state->pixel_rate); /* Display WA #1135: BXT:ALL GLK:ALL */ @@ -12912,6 +13022,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, if (mode_changed && crtc_state->hw.enable && dev_priv->display.crtc_compute_clock && + !crtc_state->bigjoiner_slave && !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) { ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state); if (ret) @@ -13038,10 +13149,11 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, case 10 ... 11: bpp = 10 * 3; break; - case 12: + case 12 ... 16: bpp = 12 * 3; break; default: + MISSING_CASE(conn_state->max_bpc); return -EINVAL; } @@ -13218,11 +13330,11 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state) } drm_dbg_kms(&i915->drm, - "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n", + "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s modifier = 0x%llx, visible: %s\n", plane->base.base.id, plane->base.name, fb->base.id, fb->width, fb->height, drm_get_format_name(fb->format->format, &format_name), - yesno(plane_state->uapi.visible)); + fb->modifier, yesno(plane_state->uapi.visible)); drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n", plane_state->hw.rotation, plane_state->scaler_id); if (plane_state->uapi.visible) @@ -13270,6 +13382,10 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, transcoder_name(pipe_config->master_transcoder), pipe_config->sync_mode_slaves_mask); + drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n", + pipe_config->bigjoiner_slave ? "slave" : + pipe_config->bigjoiner ? "master" : "no"); + if (pipe_config->has_pch_encoder) intel_dump_m_n_config(pipe_config, "fdi", pipe_config->fdi_lanes, @@ -13317,6 +13433,9 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n"); drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode); intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode); + drm_dbg_kms(&dev_priv->drm, "pipe mode:\n"); + drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode); + intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode); drm_dbg_kms(&dev_priv->drm, "port clock: %d, pipe src size: %dx%d, pixel rate %d\n", pipe_config->port_clock, @@ -13448,24 +13567,41 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state) } static void -intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state) +intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state, + struct intel_crtc_state *crtc_state) { - intel_crtc_copy_color_blobs(crtc_state); + const struct intel_crtc_state *from_crtc_state = crtc_state; + + if (crtc_state->bigjoiner_slave) { + from_crtc_state = intel_atomic_get_new_crtc_state(state, + crtc_state->bigjoiner_linked_crtc); + + /* No need to copy state if the master state is unchanged */ + if (!from_crtc_state) + return; + } + + intel_crtc_copy_color_blobs(crtc_state, from_crtc_state); } static void -intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state) +intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state, + struct intel_crtc_state *crtc_state) { crtc_state->hw.enable = crtc_state->uapi.enable; crtc_state->hw.active = crtc_state->uapi.active; crtc_state->hw.mode = crtc_state->uapi.mode; crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode; crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter; - intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state); + + intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state); } static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state) { + if (crtc_state->bigjoiner_slave) + return; + crtc_state->uapi.enable = crtc_state->hw.enable; crtc_state->uapi.active = crtc_state->hw.active; drm_WARN_ON(crtc_state->uapi.crtc->dev, @@ -13484,7 +13620,49 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state } static int -intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state) +copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state, + const struct intel_crtc_state *from_crtc_state) +{ + struct intel_crtc_state *saved_state; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL); + if (!saved_state) + return -ENOMEM; + + saved_state->uapi = crtc_state->uapi; + saved_state->scaler_state = crtc_state->scaler_state; + saved_state->shared_dpll = crtc_state->shared_dpll; + saved_state->dpll_hw_state = crtc_state->dpll_hw_state; + saved_state->crc_enabled = crtc_state->crc_enabled; + + intel_crtc_free_hw_state(crtc_state); + memcpy(crtc_state, saved_state, sizeof(*crtc_state)); + kfree(saved_state); + + /* Re-init hw state */ + memset(&crtc_state->hw, 0, sizeof(saved_state->hw)); + crtc_state->hw.enable = from_crtc_state->hw.enable; + crtc_state->hw.active = from_crtc_state->hw.active; + crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode; + crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode; + + /* Some fixups */ + crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed; + crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed; + crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed; + crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0; + crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc); + crtc_state->bigjoiner_slave = true; + crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe; + crtc_state->has_audio = false; + + return 0; +} + +static int +intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, + struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -13516,16 +13694,16 @@ intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state) memcpy(crtc_state, saved_state, sizeof(*crtc_state)); kfree(saved_state); - intel_crtc_copy_uapi_to_hw_state(crtc_state); + intel_crtc_copy_uapi_to_hw_state(state, crtc_state); return 0; } static int -intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) +intel_modeset_pipe_config(struct intel_atomic_state *state, + struct intel_crtc_state *pipe_config) { struct drm_crtc *crtc = pipe_config->uapi.crtc; - struct drm_atomic_state *state = pipe_config->uapi.state; struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); struct drm_connector *connector; struct drm_connector_state *connector_state; @@ -13567,7 +13745,7 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) &pipe_config->pipe_src_w, &pipe_config->pipe_src_h); - for_each_new_connector_in_state(state, connector, connector_state, i) { + for_each_new_connector_in_state(&state->base, connector, connector_state, i) { struct intel_encoder *encoder = to_intel_encoder(connector_state->best_encoder); @@ -13605,7 +13783,7 @@ encoder_retry: * adjust it according to limitations or connector properties, and also * a chance to reject the mode entirely. */ - for_each_new_connector_in_state(state, connector, connector_state, i) { + for_each_new_connector_in_state(&state->base, connector, connector_state, i) { struct intel_encoder *encoder = to_intel_encoder(connector_state->best_encoder); @@ -14065,21 +14243,53 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_X(output_types); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end); - - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end); - - PIPE_CONF_CHECK_I(pixel_multiplier); + /* FIXME do the readout properly and get rid of this quirk */ + if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) { + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end); + + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end); + + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end); + + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end); + + PIPE_CONF_CHECK_I(pixel_multiplier); + + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, + DRM_MODE_FLAG_INTERLACE); + + if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, + DRM_MODE_FLAG_PHSYNC); + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, + DRM_MODE_FLAG_NHSYNC); + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, + DRM_MODE_FLAG_PVSYNC); + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, + DRM_MODE_FLAG_NVSYNC); + } + } + PIPE_CONF_CHECK_I(output_format); PIPE_CONF_CHECK_BOOL(has_hdmi_sink); if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || @@ -14089,24 +14299,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_BOOL(hdmi_scrambling); PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); PIPE_CONF_CHECK_BOOL(has_infoframe); - PIPE_CONF_CHECK_BOOL(fec_enable); + /* FIXME do the readout properly and get rid of this quirk */ + if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) + PIPE_CONF_CHECK_BOOL(fec_enable); PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_INTERLACE); - - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_PHSYNC); - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_NHSYNC); - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_PVSYNC); - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_NVSYNC); - } - PIPE_CONF_CHECK_X(gmch_pfit.control); /* pfit ratios are autocomputed by the hw on gen4+ */ if (INTEL_GEN(dev_priv) < 4) @@ -14132,7 +14330,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, } PIPE_CONF_CHECK_I(scaler_state.scaler_id); - PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); + /* FIXME do the readout properly and get rid of this quirk */ + if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) + PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); PIPE_CONF_CHECK_X(gamma_mode); if (IS_CHERRYVIEW(dev_priv)) @@ -14153,48 +14353,53 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_BOOL(double_wide); PIPE_CONF_CHECK_P(shared_dpll); - PIPE_CONF_CHECK_X(dpll_hw_state.dpll); - PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); - PIPE_CONF_CHECK_X(dpll_hw_state.fp0); - PIPE_CONF_CHECK_X(dpll_hw_state.fp1); - PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); - PIPE_CONF_CHECK_X(dpll_hw_state.spll); - PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); - PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); - PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); - PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0); - PIPE_CONF_CHECK_X(dpll_hw_state.ebb0); - PIPE_CONF_CHECK_X(dpll_hw_state.ebb4); - PIPE_CONF_CHECK_X(dpll_hw_state.pll0); - PIPE_CONF_CHECK_X(dpll_hw_state.pll1); - PIPE_CONF_CHECK_X(dpll_hw_state.pll2); - PIPE_CONF_CHECK_X(dpll_hw_state.pll3); - PIPE_CONF_CHECK_X(dpll_hw_state.pll6); - PIPE_CONF_CHECK_X(dpll_hw_state.pll8); - PIPE_CONF_CHECK_X(dpll_hw_state.pll9); - PIPE_CONF_CHECK_X(dpll_hw_state.pll10); - PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias); - PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); - - PIPE_CONF_CHECK_X(dsi_pll.ctrl); - PIPE_CONF_CHECK_X(dsi_pll.div); - - if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) - PIPE_CONF_CHECK_I(pipe_bpp); - - PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); - PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); - - PIPE_CONF_CHECK_I(min_voltage_level); + + /* FIXME do the readout properly and get rid of this quirk */ + if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) { + PIPE_CONF_CHECK_X(dpll_hw_state.dpll); + PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); + PIPE_CONF_CHECK_X(dpll_hw_state.fp0); + PIPE_CONF_CHECK_X(dpll_hw_state.fp1); + PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); + PIPE_CONF_CHECK_X(dpll_hw_state.spll); + PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); + PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); + PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); + PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0); + PIPE_CONF_CHECK_X(dpll_hw_state.ebb0); + PIPE_CONF_CHECK_X(dpll_hw_state.ebb4); + PIPE_CONF_CHECK_X(dpll_hw_state.pll0); + PIPE_CONF_CHECK_X(dpll_hw_state.pll1); + PIPE_CONF_CHECK_X(dpll_hw_state.pll2); + PIPE_CONF_CHECK_X(dpll_hw_state.pll3); + PIPE_CONF_CHECK_X(dpll_hw_state.pll6); + PIPE_CONF_CHECK_X(dpll_hw_state.pll8); + PIPE_CONF_CHECK_X(dpll_hw_state.pll9); + PIPE_CONF_CHECK_X(dpll_hw_state.pll10); + PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12); + PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl); + PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1); + PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl); + PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0); + PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1); + PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf); + PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock); + PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc); + PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias); + PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); + + PIPE_CONF_CHECK_X(dsi_pll.ctrl); + PIPE_CONF_CHECK_X(dsi_pll.div); + + if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) + PIPE_CONF_CHECK_I(pipe_bpp); + + PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock); + PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); + PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); + + PIPE_CONF_CHECK_I(min_voltage_level); + } PIPE_CONF_CHECK_X(infoframes.enable); PIPE_CONF_CHECK_X(infoframes.gcp); @@ -14206,6 +14411,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_X(sync_mode_slaves_mask); PIPE_CONF_CHECK_I(master_transcoder); + PIPE_CONF_CHECK_BOOL(bigjoiner); + PIPE_CONF_CHECK_BOOL(bigjoiner_slave); + PIPE_CONF_CHECK_P(bigjoiner_linked_crtc); PIPE_CONF_CHECK_I(dsc.compression_enable); PIPE_CONF_CHECK_I(dsc.dsc_split); @@ -14477,6 +14685,7 @@ verify_crtc_state(struct intel_crtc *crtc, struct intel_encoder *encoder; struct intel_crtc_state *pipe_config = old_crtc_state; struct drm_atomic_state *state = old_crtc_state->uapi.state; + struct intel_crtc *master = crtc; __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi); intel_crtc_free_hw_state(old_crtc_state); @@ -14488,8 +14697,7 @@ verify_crtc_state(struct intel_crtc *crtc, pipe_config->hw.enable = new_crtc_state->hw.enable; - pipe_config->hw.active = - dev_priv->display.get_pipe_config(crtc, pipe_config); + intel_crtc_get_pipe_config(pipe_config); /* we keep both pipes enabled on 830 */ if (IS_I830(dev_priv) && pipe_config->hw.active) @@ -14505,7 +14713,10 @@ verify_crtc_state(struct intel_crtc *crtc, "(expected %i, found %i)\n", new_crtc_state->hw.active, crtc->active); - for_each_encoder_on_crtc(dev, &crtc->base, encoder) { + if (new_crtc_state->bigjoiner_slave) + master = new_crtc_state->bigjoiner_linked_crtc; + + for_each_encoder_on_crtc(dev, &master->base, encoder) { enum pipe pipe; bool active; @@ -14515,16 +14726,14 @@ verify_crtc_state(struct intel_crtc *crtc, encoder->base.base.id, active, new_crtc_state->hw.active); - I915_STATE_WARN(active && crtc->pipe != pipe, + I915_STATE_WARN(active && master->pipe != pipe, "Encoder connected to wrong pipe %c\n", pipe_name(pipe)); if (active) - encoder->get_config(encoder, pipe_config); + intel_encoder_get_config(encoder, pipe_config); } - intel_crtc_compute_pixel_rate(pipe_config); - if (!new_crtc_state->hw.active) return; @@ -14565,7 +14774,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name); - active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state); + active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state); if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) { I915_STATE_WARN(!pll->on && pll->active_mask, @@ -14901,6 +15110,44 @@ static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11); } +static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_crtc *other) +{ + const struct intel_plane_state *plane_state; + struct intel_plane *plane; + u8 plane_ids = 0; + int i; + + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + if (plane->pipe == crtc->pipe) + plane_ids |= BIT(plane->id); + } + + return intel_crtc_add_planes_to_state(state, other, plane_ids); +} + +static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state) +{ + const struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + int i; + + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { + int ret; + + if (!crtc_state->bigjoiner) + continue; + + ret = intel_crtc_add_bigjoiner_planes(state, crtc, + crtc_state->bigjoiner_linked_crtc); + if (ret) + return ret; + } + + return 0; +} + static int intel_atomic_check_planes(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); @@ -14914,6 +15161,10 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state) if (ret) return ret; + ret = intel_bigjoiner_add_affected_planes(state); + if (ret) + return ret; + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { ret = intel_plane_atomic_check(state, plane); if (ret) { @@ -15052,6 +15303,75 @@ static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, return false; } +static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *slave_crtc_state, *master_crtc_state; + struct intel_crtc *slave, *master; + + /* slave being enabled, is master is still claiming this crtc? */ + if (old_crtc_state->bigjoiner_slave) { + slave = crtc; + master = old_crtc_state->bigjoiner_linked_crtc; + master_crtc_state = intel_atomic_get_new_crtc_state(state, master); + if (!master_crtc_state || !needs_modeset(master_crtc_state)) + goto claimed; + } + + if (!new_crtc_state->bigjoiner) + return 0; + + if (1 + crtc->pipe >= INTEL_NUM_PIPES(dev_priv)) { + DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires " + "CRTC + 1 to be used, doesn't exist\n", + crtc->base.base.id, crtc->base.name); + return -EINVAL; + } + + slave = new_crtc_state->bigjoiner_linked_crtc = + intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1); + slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave); + master = crtc; + if (IS_ERR(slave_crtc_state)) + return PTR_ERR(slave_crtc_state); + + /* master being enabled, slave was already configured? */ + if (slave_crtc_state->uapi.enable) + goto claimed; + + DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n", + slave->base.base.id, slave->base.name); + + return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state); + +claimed: + DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but " + "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n", + slave->base.base.id, slave->base.name, + master->base.base.id, master->base.name); + return -EINVAL; +} + +static int kill_bigjoiner_slave(struct intel_atomic_state *state, + struct intel_crtc_state *master_crtc_state) +{ + struct intel_crtc_state *slave_crtc_state = + intel_atomic_get_crtc_state(&state->base, + master_crtc_state->bigjoiner_linked_crtc); + + if (IS_ERR(slave_crtc_state)) + return PTR_ERR(slave_crtc_state); + + slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false; + slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false; + slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL; + intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state); + return 0; +} + /** * DOC: asynchronous flip implementation * @@ -15185,6 +15505,27 @@ static int intel_atomic_check_async(struct intel_atomic_state *state) return 0; } +static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state) +{ + const struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + int i; + + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { + struct intel_crtc_state *linked_crtc_state; + + if (!crtc_state->bigjoiner) + continue; + + linked_crtc_state = intel_atomic_get_crtc_state(&state->base, + crtc_state->bigjoiner_linked_crtc); + if (IS_ERR(linked_crtc_state)) + return PTR_ERR(linked_crtc_state); + } + + return 0; +} + /** * intel_atomic_check - validate state object * @dev: drm device @@ -15210,23 +15551,44 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) goto fail; + ret = intel_bigjoiner_add_affected_crtcs(state); + if (ret) + goto fail; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!needs_modeset(new_crtc_state)) { /* Light copy */ - intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state); + intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state); continue; } - ret = intel_crtc_prepare_cleared_state(new_crtc_state); + /* Kill old bigjoiner link, we may re-establish afterwards */ + if (old_crtc_state->bigjoiner && !old_crtc_state->bigjoiner_slave) { + ret = kill_bigjoiner_slave(state, new_crtc_state); + if (ret) + goto fail; + } + + if (!new_crtc_state->uapi.enable) { + if (!new_crtc_state->bigjoiner_slave) { + intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state); + any_ms = true; + } + continue; + } + + ret = intel_crtc_prepare_cleared_state(state, new_crtc_state); if (ret) goto fail; - if (!new_crtc_state->hw.enable) - continue; + ret = intel_modeset_pipe_config(state, new_crtc_state); + if (ret) + goto fail; - ret = intel_modeset_pipe_config(new_crtc_state); + ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state, + new_crtc_state); if (ret) goto fail; } @@ -15528,6 +15890,9 @@ static void intel_enable_crtc(struct intel_atomic_state *state, dev_priv->display.crtc_enable(state, crtc); + if (new_crtc_state->bigjoiner_slave) + return; + /* vblanks work again, re-enable pipe CRC. */ intel_crtc_enable_pipe_crc(crtc); } @@ -15582,7 +15947,6 @@ static void intel_update_crtc(struct intel_atomic_state *state, intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); } - static void intel_old_crtc_state_disables(struct intel_atomic_state *state, struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state, @@ -15590,9 +15954,22 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state, { struct drm_i915_private *dev_priv = to_i915(state->base.dev); + drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave); + intel_crtc_disable_planes(state, crtc); /* + * We still need special handling for disabling bigjoiner master + * and slaves since for slave we do not have encoder or plls + * so we dont need to disable those. + */ + if (old_crtc_state->bigjoiner) { + intel_crtc_disable_planes(state, + old_crtc_state->bigjoiner_linked_crtc); + old_crtc_state->bigjoiner_linked_crtc->active = false; + } + + /* * We need to disable pipe CRC before disabling the pipe, * or we race against vblank off. */ @@ -15620,7 +15997,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) /* Only disable port sync and MST slaves */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (!needs_modeset(new_crtc_state)) + if (!needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner) continue; if (!old_crtc_state->hw.active) @@ -15645,10 +16022,18 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!needs_modeset(new_crtc_state) || - (handled & BIT(crtc->pipe))) + (handled & BIT(crtc->pipe)) || + old_crtc_state->bigjoiner_slave) continue; intel_pre_plane_update(state, crtc); + if (old_crtc_state->bigjoiner) { + struct intel_crtc *slave = + old_crtc_state->bigjoiner_linked_crtc; + + intel_pre_plane_update(state, slave); + } + if (old_crtc_state->hw.active) intel_old_crtc_state_disables(state, old_crtc_state, new_crtc_state, crtc); @@ -15746,7 +16131,8 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) continue; if (intel_dp_mst_is_slave_trans(new_crtc_state) || - is_trans_port_sync_master(new_crtc_state)) + is_trans_port_sync_master(new_crtc_state) || + (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave)) continue; modeset_pipes &= ~BIT(pipe); @@ -15756,7 +16142,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) /* * Then we enable all remaining pipes that depend on other - * pipes: MST slaves and port sync masters. + * pipes: MST slaves and port sync masters, big joiner master */ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { enum pipe pipe = crtc->pipe; @@ -16548,9 +16934,11 @@ intel_legacy_cursor_update(struct drm_plane *_plane, /* * When crtc is inactive or there is a modeset pending, * wait for it to complete in the slowpath + * + * FIXME bigjoiner fastpath would be good */ if (!crtc_state->hw.active || needs_modeset(crtc_state) || - crtc_state->update_pipe) + crtc_state->update_pipe || crtc_state->bigjoiner) goto slow; /* @@ -16596,7 +16984,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane, new_plane_state->uapi.crtc_w = crtc_w; new_plane_state->uapi.crtc_h = crtc_h; - intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state); + intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state, crtc); ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, old_plane_state, new_plane_state); @@ -17213,7 +17601,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; - if (IS_ROCKETLAKE(dev_priv)) { + if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) { intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); intel_ddi_init(dev_priv, PORT_TC1); @@ -17223,7 +17611,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_ddi_init(dev_priv, PORT_B); intel_ddi_init(dev_priv, PORT_TC1); intel_ddi_init(dev_priv, PORT_TC2); - intel_ddi_init(dev_priv, PORT_TC2); + intel_ddi_init(dev_priv, PORT_TC3); intel_ddi_init(dev_priv, PORT_TC4); intel_ddi_init(dev_priv, PORT_TC5); intel_ddi_init(dev_priv, PORT_TC6); @@ -17753,7 +18141,8 @@ intel_mode_valid(struct drm_device *dev, enum drm_mode_status intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, - const struct drm_display_mode *mode) + const struct drm_display_mode *mode, + bool bigjoiner) { int plane_width_max, plane_height_max; @@ -17770,7 +18159,7 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, * too big for that. */ if (INTEL_GEN(dev_priv) >= 11) { - plane_width_max = 5120; + plane_width_max = 5120 << bigjoiner; plane_height_max = 4320; } else { plane_width_max = 5120; @@ -18311,7 +18700,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915) for_each_intel_crtc(dev, crtc) { struct intel_initial_plane_config plane_config = {}; - if (!crtc->active) + if (!to_intel_crtc_state(crtc->base.state)->uapi.active) continue; /* @@ -18340,16 +18729,6 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915) if (!HAS_GMCH(i915)) sanitize_watermarks(i915); - /* - * Force all active planes to recompute their states. So that on - * mode_setcrtc after probe, all the intel_plane_state variables - * are already calculated and there is no assert_plane warnings - * during bootup. - */ - ret = intel_initial_commit(dev); - if (ret) - drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n"); - return 0; } @@ -18358,11 +18737,21 @@ int intel_modeset_init(struct drm_i915_private *i915) { int ret; - intel_overlay_setup(i915); - if (!HAS_DISPLAY(i915)) return 0; + /* + * Force all active planes to recompute their states. So that on + * mode_setcrtc after probe, all the intel_plane_state variables + * are already calculated and there is no assert_plane warnings + * during bootup. + */ + ret = intel_initial_commit(&i915->drm); + if (ret) + return ret; + + intel_overlay_setup(i915); + ret = intel_fbdev_init(&i915->drm); if (ret) return ret; @@ -18373,8 +18762,6 @@ int intel_modeset_init(struct drm_i915_private *i915) intel_init_ipc(i915); - intel_psr_set_force_mode_changed(i915->psr.dp); - return 0; } @@ -18624,7 +19011,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, /* Adjust the state of the output pipe according to whether we * have active connectors/encoders. */ - if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc)) + if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) && + !crtc_state->bigjoiner_slave) intel_crtc_disable_noatomic(crtc, ctx); if (crtc_state->hw.active || HAS_GMCH(dev_priv)) { @@ -18803,8 +19191,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) intel_crtc_free_hw_state(crtc_state); intel_crtc_state_reset(crtc_state, crtc); - crtc_state->hw.active = crtc_state->hw.enable = - dev_priv->display.get_pipe_config(crtc, crtc_state); + intel_crtc_get_pipe_config(crtc_state); + + crtc_state->hw.enable = crtc_state->hw.active; crtc->base.enabled = crtc_state->hw.enable; crtc->active = crtc_state->hw.active; @@ -18835,9 +19224,19 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) crtc_state = to_intel_crtc_state(crtc->base.state); encoder->base.crtc = &crtc->base; - encoder->get_config(encoder, crtc_state); + intel_encoder_get_config(encoder, crtc_state); if (encoder->sync_state) encoder->sync_state(encoder, crtc_state); + + /* read out to slave crtc as well for bigjoiner */ + if (crtc_state->bigjoiner) { + /* encoder should read be linked to bigjoiner master */ + WARN_ON(crtc_state->bigjoiner_slave); + + crtc = crtc_state->bigjoiner_linked_crtc; + crtc_state = to_intel_crtc_state(crtc->base.state); + intel_encoder_get_config(encoder, crtc_state); + } } else { encoder->base.crtc = NULL; } @@ -18893,16 +19292,10 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) struct intel_plane *plane; int min_cdclk = 0; - if (crtc_state->hw.active) { - struct drm_display_mode *mode = &crtc_state->hw.mode; - - intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode, - crtc_state); - - *mode = crtc_state->hw.adjusted_mode; - mode->hdisplay = crtc_state->pipe_src_w; - mode->vdisplay = crtc_state->pipe_src_h; + if (crtc_state->bigjoiner_slave) + continue; + if (crtc_state->hw.active) { /* * The initial mode needs to be set in order to keep * the atomic core happy. It wants a valid mode if the @@ -18914,8 +19307,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) */ crtc_state->inherited = true; - intel_crtc_compute_pixel_rate(crtc_state); - intel_crtc_update_active_timings(crtc_state); intel_crtc_copy_hw_to_uapi_state(crtc_state); @@ -18964,6 +19355,39 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) intel_bw_crtc_update(bw_state, crtc_state); intel_pipe_config_sanity_check(dev_priv, crtc_state); + + /* discard our incomplete slave state, copy it from master */ + if (crtc_state->bigjoiner && crtc_state->hw.active) { + struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc; + struct intel_crtc_state *slave_crtc_state = + to_intel_crtc_state(slave->base.state); + + copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state); + slave->base.mode = crtc->base.mode; + + cdclk_state->min_cdclk[slave->pipe] = min_cdclk; + cdclk_state->min_voltage_level[slave->pipe] = + crtc_state->min_voltage_level; + + for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) { + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + + /* + * FIXME don't have the fb yet, so can't + * use intel_plane_data_rate() :( + */ + if (plane_state->uapi.visible) + crtc_state->data_rate[plane->id] = + 4 * crtc_state->pixel_rate; + else + crtc_state->data_rate[plane->id] = 0; + } + + intel_bw_crtc_update(bw_state, slave_crtc_state); + drm_calc_timestamping_constants(&slave->base, + &slave_crtc_state->hw.adjusted_mode); + } } } diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index be774f216065..5e0d42d82c11 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -513,7 +513,8 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, bool intel_plane_can_remap(const struct intel_plane_state *plane_state); enum drm_mode_status intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, - const struct drm_display_mode *mode); + const struct drm_display_mode *mode, + bool bigjoiner); enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port); bool is_trans_port_sync_mode(const struct intel_crtc_state *state); @@ -590,8 +591,8 @@ void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe); int lpt_get_iclkip(struct drm_i915_private *dev_priv); bool intel_fuzzy_clock_check(int clock1, int clock2); -void intel_prepare_reset(struct drm_i915_private *dev_priv); -void intel_finish_reset(struct drm_i915_private *dev_priv); +void intel_display_prepare_reset(struct drm_i915_private *dev_priv); +void intel_display_finish_reset(struct drm_i915_private *dev_priv); void intel_dp_get_m_n(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, @@ -609,8 +610,6 @@ enum intel_display_power_domain intel_aux_power_domain(struct intel_digital_port *dig_port); enum intel_display_power_domain intel_legacy_aux_to_power_domain(enum aux_ch aux_ch); -void intel_mode_from_pipe_config(struct drm_display_mode *mode, - struct intel_crtc_state *pipe_config); void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index cfb4c1474982..ca41e8c00ad7 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -755,6 +755,17 @@ static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation) rotation); } +static const char *plane_visibility(const struct intel_plane_state *plane_state) +{ + if (plane_state->uapi.visible) + return "visible"; + + if (plane_state->planar_slave) + return "planar-slave"; + + return "hidden"; +} + static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane) { const struct intel_plane_state *plane_state = @@ -773,12 +784,19 @@ static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane) plane_rotation(rot_str, sizeof(rot_str), plane_state->uapi.rotation); - seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", + seq_printf(m, "\t\tuapi: [FB:%d] %s,0x%llx,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", fb ? fb->base.id : 0, fb ? format_name.str : "n/a", + fb ? fb->modifier : 0, fb ? fb->width : 0, fb ? fb->height : 0, + plane_visibility(plane_state), DRM_RECT_FP_ARG(&src), DRM_RECT_ARG(&dst), rot_str); + + if (plane_state->planar_linked_plane) + seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n", + plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name, + plane_state->planar_slave ? "slave" : "master"); } static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane) @@ -797,9 +815,9 @@ static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane) plane_rotation(rot_str, sizeof(rot_str), plane_state->hw.rotation); - seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", + seq_printf(m, "\t\thw: [FB:%d] %s,0x%llx,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", fb->base.id, format_name.str, - fb->width, fb->height, + fb->modifier, fb->width, fb->height, yesno(plane_state->uapi.visible), DRM_RECT_FP_ARG(&plane_state->uapi.src), DRM_RECT_ARG(&plane_state->uapi.dst), @@ -874,6 +892,12 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc) intel_scaler_info(m, crtc); } + if (crtc_state->bigjoiner) + seq_printf(m, "\tLinked to [CRTC:%d:%s] as a %s\n", + crtc_state->bigjoiner_linked_crtc->base.base.id, + crtc_state->bigjoiner_linked_crtc->base.name, + crtc_state->bigjoiner_slave ? "slave" : "master"); + for_each_intel_encoder_mask(&dev_priv->drm, encoder, crtc_state->uapi.encoder_mask) intel_encoder_info(m, crtc, encoder); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 689922480661..fe2d90bba536 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -4492,30 +4492,24 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, int requested_dc; int max_dc; - if (INTEL_GEN(dev_priv) >= 12) { - if (IS_DG1(dev_priv)) - max_dc = 3; - else - max_dc = 4; - /* - * DC9 has a separate HW flow from the rest of the DC states, - * not depending on the DMC firmware. It's needed by system - * suspend/resume, so allow it unconditionally. - */ - mask = DC_STATE_EN_DC9; - } else if (IS_GEN(dev_priv, 11)) { + if (IS_DG1(dev_priv)) + max_dc = 3; + else if (INTEL_GEN(dev_priv) >= 12) + max_dc = 4; + else if (INTEL_GEN(dev_priv) >= 10 || IS_GEN9_BC(dev_priv)) max_dc = 2; - mask = DC_STATE_EN_DC9; - } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) { - max_dc = 2; - mask = 0; - } else if (IS_GEN9_LP(dev_priv)) { + else if (IS_GEN9_LP(dev_priv)) max_dc = 1; - mask = DC_STATE_EN_DC9; - } else { + else max_dc = 0; - mask = 0; - } + + /* + * DC9 has a separate HW flow from the rest of the DC states, + * not depending on the DMC firmware. It's needed by system + * suspend/resume, so allow it unconditionally. + */ + mask = IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 11 ? + DC_STATE_EN_DC9 : 0; if (!dev_priv->params.disable_power_well) max_dc = 0; @@ -5858,10 +5852,15 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915) void intel_display_power_suspend_late(struct drm_i915_private *i915) { - if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) + if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) { bxt_enable_dc9(i915); - else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) + /* Tweaked Wa_14010685332:icp,jsp,mcc */ + if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC) + intel_de_rmw(i915, SOUTH_CHICKEN1, + SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); + } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_enable_pc8(i915); + } } void intel_display_power_resume_early(struct drm_i915_private *i915) @@ -5869,6 +5868,10 @@ void intel_display_power_resume_early(struct drm_i915_private *i915) if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) { gen9_sanitize_dc_state(i915); bxt_disable_dc9(i915); + /* Tweaked Wa_14010685332:icp,jsp,mcc */ + if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC) + intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); + } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_disable_pc8(i915); } diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index f6f0626649e0..ce82d654d0f2 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -686,6 +686,7 @@ struct skl_wm_level { u8 plane_res_l; bool plane_en; bool ignore_lines; + bool can_sagv; }; struct skl_plane_wm { @@ -737,24 +738,35 @@ struct g4x_wm_state { struct intel_crtc_wm_state { union { + /* + * raw: + * The "raw" watermark values produced by the formula + * given the plane's current state. They do not consider + * how much FIFO is actually allocated for each plane. + * + * optimal: + * The "optimal" watermark values given the current + * state of the planes and the amount of FIFO + * allocated to each, ignoring any previous state + * of the planes. + * + * intermediate: + * The "intermediate" watermark values when transitioning + * between the old and new "optimal" values. Used when + * the watermark registers are single buffered and hence + * their state changes asynchronously with regards to the + * actual plane registers. These are essentially the + * worst case combination of the old and new "optimal" + * watermarks, which are therefore safe to use when the + * plane is in either its old or new state. + */ struct { - /* - * Intermediate watermarks; these can be - * programmed immediately since they satisfy - * both the current configuration we're - * switching away from and the new - * configuration we're switching to. - */ struct intel_pipe_wm intermediate; - - /* - * Optimal watermarks, programmed post-vblank - * when this state is committed. - */ struct intel_pipe_wm optimal; } ilk; struct { + struct skl_pipe_wm raw; /* gen9+ only needs 1-step wm programming */ struct skl_pipe_wm optimal; struct skl_ddb_entry ddb; @@ -763,22 +775,15 @@ struct intel_crtc_wm_state { } skl; struct { - /* "raw" watermarks (not inverted) */ - struct g4x_pipe_wm raw[NUM_VLV_WM_LEVELS]; - /* intermediate watermarks (inverted) */ - struct vlv_wm_state intermediate; - /* optimal watermarks (inverted) */ - struct vlv_wm_state optimal; - /* display FIFO split */ + struct g4x_pipe_wm raw[NUM_VLV_WM_LEVELS]; /* not inverted */ + struct vlv_wm_state intermediate; /* inverted */ + struct vlv_wm_state optimal; /* inverted */ struct vlv_fifo_state fifo_state; } vlv; struct { - /* "raw" watermarks */ struct g4x_pipe_wm raw[NUM_G4X_WM_LEVELS]; - /* intermediate watermarks */ struct g4x_wm_state intermediate; - /* optimal watermarks */ struct g4x_wm_state optimal; } g4x; }; @@ -817,15 +822,22 @@ struct intel_crtc_state { * The following members are used to verify the hardware state: * - enable * - active - * - mode / adjusted_mode + * - mode / pipe_mode / adjusted_mode * - color property blobs. * * During initial hw readout, they need to be copied to uapi. + * + * Bigjoiner will allow a transcoder mode that spans 2 pipes; + * Use the pipe_mode for calculations like watermarks, pipe + * scaler, and bandwidth. + * + * Use adjusted_mode for things that need to know the full + * mode on the transcoder, which spans all pipes. */ struct { bool active, enable; struct drm_property_blob *degamma_lut, *gamma_lut, *ctm; - struct drm_display_mode mode, adjusted_mode; + struct drm_display_mode mode, pipe_mode, adjusted_mode; enum drm_scaling_filter scaling_filter; } hw; @@ -838,6 +850,7 @@ struct intel_crtc_state { * accordingly. */ #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ +#define PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE (1<<1) /* bigjoiner slave, partial readout */ unsigned long quirks; unsigned fb_bits; /* framebuffers to flip */ @@ -1019,6 +1032,10 @@ struct intel_crtc_state { u32 data_rate[I915_MAX_PLANES]; + /* FIXME unify with data_rate[] */ + u64 plane_data_rate[I915_MAX_PLANES]; + u64 uv_plane_data_rate[I915_MAX_PLANES]; + /* Gamma mode programmed on the pipe */ u32 gamma_mode; @@ -1063,6 +1080,15 @@ struct intel_crtc_state { /* enable pipe csc? */ bool csc_enable; + /* enable pipe big joiner? */ + bool bigjoiner; + + /* big joiner slave crtc? */ + bool bigjoiner_slave; + + /* linked crtc for bigjoiner, either slave or master */ + struct intel_crtc *bigjoiner_linked_crtc; + /* Display Stream compression state */ struct { bool compression_enable; @@ -1189,6 +1215,15 @@ struct intel_plane { * the intel_plane_state structure and accessed via plane_state. */ + int (*min_width)(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation); + int (*max_width)(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation); + int (*max_height)(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation); unsigned int (*max_stride)(struct intel_plane *plane, u32 pixel_format, u64 modifier, unsigned int rotation); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index cf09aca7607b..3896d08c4177 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -254,6 +254,17 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes) return max_link_clock * max_lanes; } +bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &intel_dig_port->base; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + return INTEL_GEN(dev_priv) >= 12 || + (INTEL_GEN(dev_priv) == 11 && + encoder->port != PORT_A); +} + static int cnl_max_source_rate(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); @@ -519,7 +530,8 @@ small_joiner_ram_size_bits(struct drm_i915_private *i915) static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, u32 link_clock, u32 lane_count, - u32 mode_clock, u32 mode_hdisplay) + u32 mode_clock, u32 mode_hdisplay, + bool bigjoiner) { u32 bits_per_pixel, max_bpp_small_joiner_ram; int i; @@ -537,6 +549,10 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / mode_hdisplay; + + if (bigjoiner) + max_bpp_small_joiner_ram *= 2; + drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", max_bpp_small_joiner_ram); @@ -546,6 +562,15 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, */ bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); + if (bigjoiner) { + u32 max_bpp_bigjoiner = + i915->max_cdclk_freq * 48 / + intel_dp_mode_to_fec_clock(mode_clock); + + DRM_DEBUG_KMS("Max big joiner bpp: %u\n", max_bpp_bigjoiner); + bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner); + } + /* Error out if the max bpp is less than smallest allowed valid bpp */ if (bits_per_pixel < valid_dsc_bpp[0]) { drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", @@ -568,7 +593,8 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, } static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, - int mode_clock, int mode_hdisplay) + int mode_clock, int mode_hdisplay, + bool bigjoiner) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 min_slice_count, i; @@ -595,12 +621,18 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, /* Find the closest match to the valid slice count values */ for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { - if (valid_dsc_slicecount[i] > - drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, - false)) + u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner; + + if (test_slice_count > + drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, false)) break; - if (min_slice_count <= valid_dsc_slicecount[i]) - return valid_dsc_slicecount[i]; + + /* big joiner needs small joiner to be enabled */ + if (bigjoiner && test_slice_count < 4) + continue; + + if (min_slice_count <= test_slice_count) + return test_slice_count; } drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", @@ -717,10 +749,14 @@ intel_dp_mode_valid(struct drm_connector *connector, u16 dsc_max_output_bpp = 0; u8 dsc_slice_count = 0; enum drm_mode_status status; + bool dsc = false, bigjoiner = false; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + return MODE_H_ILLEGAL; + if (intel_dp_is_edp(intel_dp) && fixed_mode) { if (mode->hdisplay > fixed_mode->hdisplay) return MODE_PANEL; @@ -731,6 +767,17 @@ intel_dp_mode_valid(struct drm_connector *connector, target_clock = fixed_mode->clock; } + if (mode->clock < 10000) + return MODE_CLOCK_LOW; + + if ((target_clock > max_dotclk || mode->hdisplay > 5120) && + intel_dp_can_bigjoiner(intel_dp)) { + bigjoiner = true; + max_dotclk *= 2; + } + if (target_clock > max_dotclk) + return MODE_CLOCK_HIGH; + max_link_clock = intel_dp_max_link_rate(intel_dp); max_lanes = intel_dp_max_lane_count(intel_dp); @@ -759,30 +806,31 @@ intel_dp_mode_valid(struct drm_connector *connector, max_link_clock, max_lanes, target_clock, - mode->hdisplay) >> 4; + mode->hdisplay, + bigjoiner) >> 4; dsc_slice_count = intel_dp_dsc_get_slice_count(intel_dp, target_clock, - mode->hdisplay); + mode->hdisplay, + bigjoiner); } + + dsc = dsc_max_output_bpp && dsc_slice_count; } - if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) || - target_clock > max_dotclk) + /* big joiner configuration needs DSC */ + if (bigjoiner && !dsc) return MODE_CLOCK_HIGH; - if (mode->clock < 10000) - return MODE_CLOCK_LOW; - - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - return MODE_H_ILLEGAL; + if (mode_rate > max_rate && !dsc) + return MODE_CLOCK_HIGH; status = intel_dp_mode_valid_downstream(intel_connector, mode, target_clock); if (status != MODE_OK) return status; - return intel_mode_valid_max_plane_size(dev_priv, mode); + return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner); } u32 intel_dp_pack_aux(const u8 *src, int src_bytes) @@ -2052,12 +2100,10 @@ static bool intel_dp_supports_fec(struct intel_dp *intel_dp, static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - - if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable) + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable) return false; - return intel_dsc_source_support(encoder, crtc_state) && + return intel_dsc_source_support(crtc_state) && drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); } @@ -2351,11 +2397,13 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, pipe_config->port_clock, pipe_config->lane_count, adjusted_mode->crtc_clock, - adjusted_mode->crtc_hdisplay); + adjusted_mode->crtc_hdisplay, + pipe_config->bigjoiner); dsc_dp_slice_count = intel_dp_dsc_get_slice_count(intel_dp, adjusted_mode->crtc_clock, - adjusted_mode->crtc_hdisplay); + adjusted_mode->crtc_hdisplay, + pipe_config->bigjoiner); if (!dsc_max_output_bpp || !dsc_dp_slice_count) { drm_dbg_kms(&dev_priv->drm, "Compressed BPP/Slice Count not supported\n"); @@ -2371,14 +2419,15 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, * is greater than the maximum Cdclock and if slice count is even * then we need to use 2 VDSC instances. */ - if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) { - if (pipe_config->dsc.slice_count > 1) { - pipe_config->dsc.dsc_split = true; - } else { + if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq || + pipe_config->bigjoiner) { + if (pipe_config->dsc.slice_count < 2) { drm_dbg_kms(&dev_priv->drm, "Cannot split stream to use 2 VDSC instances\n"); return -EINVAL; } + + pipe_config->dsc.dsc_split = true; } ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); @@ -2449,6 +2498,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, intel_dp->common_rates[limits.max_clock], limits.max_bpp, adjusted_mode->crtc_clock); + if ((adjusted_mode->crtc_clock > i915->max_dotclk_freq || + adjusted_mode->crtc_hdisplay > 5120) && + intel_dp_can_bigjoiner(intel_dp)) + pipe_config->bigjoiner = true; + /* * Optimize for slow and wide. This is the place to add alternative * optimization policy. @@ -2457,7 +2511,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, /* enable compression if the mode doesn't fit available BW */ drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); - if (ret || intel_dp->force_dsc_en) { + if (ret || intel_dp->force_dsc_en || pipe_config->bigjoiner) { ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, conn_state, &limits); if (ret < 0) @@ -3778,6 +3832,12 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, return false; } + if (CAN_PSR(i915) && intel_dp_is_edp(intel_dp)) { + drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n"); + crtc_state->uapi.mode_changed = true; + return false; + } + return true; } diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 3f862b4fd34f..b871a09b6901 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -106,6 +106,7 @@ bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp); bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp); int intel_dp_link_required(int pixel_clock, int bpp); int intel_dp_max_data_rate(int max_link_clock, int max_lanes); +bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp); bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index c8fcec4d0788..0c8684634fca 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -714,7 +714,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, return 0; } - *status = intel_mode_valid_max_plane_size(dev_priv, mode); + *status = intel_mode_valid_max_plane_size(dev_priv, mode, false); return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index eaef7a2d041f..f6ad257a260e 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -141,7 +141,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, "asserting DPLL %s with no DPLL\n", onoff(state))) return; - cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state); + cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state); I915_STATE_WARN(cur_state != state, "%s assertion failure (expected %s, current %s)\n", pll->info->name, onoff(state), onoff(cur_state)); @@ -891,11 +891,12 @@ hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state, } static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, - const struct intel_shared_dpll *pll) + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state) { int refclk; int n, p, r; - u32 wrpll = pll->state.hw_state.wrpll; + u32 wrpll = pll_state->wrpll; switch (wrpll & WRPLL_REF_MASK) { case WRPLL_REF_SPECIAL_HSW: @@ -962,7 +963,8 @@ hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state) } static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915, - const struct intel_shared_dpll *pll) + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state) { int link_clock = 0; @@ -1002,11 +1004,12 @@ hsw_ddi_spll_get_dpll(struct intel_atomic_state *state, } static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915, - const struct intel_shared_dpll *pll) + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state) { int link_clock = 0; - switch (pll->state.hw_state.spll & SPLL_FREQ_MASK) { + switch (pll_state->spll & SPLL_FREQ_MASK) { case SPLL_FREQ_810MHz: link_clock = 81000; break; @@ -1577,9 +1580,9 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) } static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915, - const struct intel_shared_dpll *pll) + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state) { - const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state; int ref_clock = i915->dpll.ref_clks.nssc; u32 p0, p1, p2, dco_freq; @@ -1688,12 +1691,12 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) } static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915, - const struct intel_shared_dpll *pll) + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state) { int link_clock = 0; - switch ((pll->state.hw_state.ctrl1 & - DPLL_CTRL1_LINK_RATE_MASK(0)) >> + switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >> DPLL_CTRL1_LINK_RATE_SHIFT(0)) { case DPLL_CTRL1_LINK_RATE_810: link_clock = 81000; @@ -1771,16 +1774,17 @@ static bool skl_get_dpll(struct intel_atomic_state *state, } static int skl_ddi_pll_get_freq(struct drm_i915_private *i915, - const struct intel_shared_dpll *pll) + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state) { /* * ctrl1 register is already shifted for each pll, just use 0 to get * the internal shift for each field */ - if (pll->state.hw_state.ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) - return skl_ddi_wrpll_get_freq(i915, pll); + if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) + return skl_ddi_wrpll_get_freq(i915, pll, pll_state); else - return skl_ddi_lcpll_get_freq(i915, pll); + return skl_ddi_lcpll_get_freq(i915, pll, pll_state); } static void skl_update_dpll_ref_clks(struct drm_i915_private *i915) @@ -2218,9 +2222,9 @@ bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state) } static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915, - const struct intel_shared_dpll *pll) + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state) { - const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state; struct dpll clock; clock.m1 = 2; @@ -2636,20 +2640,23 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) } /* - * Display WA #22010492432: tgl + * Display WA #22010492432: ehl, tgl * Program half of the nominal DCO divider fraction value. */ static bool -tgl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915) +ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915) { - return IS_TIGERLAKE(i915) && i915->dpll.ref_clks.nssc == 38400; + return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) && + IS_JSL_EHL_REVID(i915, EHL_REVID_B0, REVID_FOREVER)) || + IS_TIGERLAKE(i915)) && + i915->dpll.ref_clks.nssc == 38400; } static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state, int ref_clock) { - const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state; u32 dco_fraction; u32 p0, p1, p2, dco_freq; @@ -2696,7 +2703,7 @@ static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> DPLL_CFGCR0_DCO_FRACTION_SHIFT; - if (tgl_combo_pll_div_frac_wa_needed(dev_priv)) + if (ehl_combo_pll_div_frac_wa_needed(dev_priv)) dco_fraction *= 2; dco_freq += (dco_fraction * ref_clock) / 0x8000; @@ -2708,9 +2715,11 @@ static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, } static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915, - const struct intel_shared_dpll *pll) + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state) { - return __cnl_ddi_wrpll_get_freq(i915, pll, i915->dpll.ref_clks.nssc); + return __cnl_ddi_wrpll_get_freq(i915, pll, pll_state, + i915->dpll.ref_clks.nssc); } static bool @@ -2759,11 +2768,12 @@ cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) } static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915, - const struct intel_shared_dpll *pll) + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state) { int link_clock = 0; - switch (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) { + switch (pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) { case DPLL_CFGCR0_LINK_RATE_810: link_clock = 81000; break; @@ -2846,12 +2856,13 @@ static bool cnl_get_dpll(struct intel_atomic_state *state, } static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915, - const struct intel_shared_dpll *pll) + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state) { - if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) - return cnl_ddi_wrpll_get_freq(i915, pll); + if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE) + return cnl_ddi_wrpll_get_freq(i915, pll, pll_state); else - return cnl_ddi_lcpll_get_freq(i915, pll); + return cnl_ddi_lcpll_get_freq(i915, pll, pll_state); } static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915) @@ -3036,7 +3047,8 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, } static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915, - const struct intel_shared_dpll *pll) + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state) { /* * The PLL outputs multiple frequencies at the same time, selection is @@ -3072,9 +3084,10 @@ icl_calc_wrpll(struct intel_crtc_state *crtc_state, } static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915, - const struct intel_shared_dpll *pll) + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state) { - return __cnl_ddi_wrpll_get_freq(i915, pll, + return __cnl_ddi_wrpll_get_freq(i915, pll, pll_state, icl_wrpll_ref_clock(i915)); } @@ -3086,7 +3099,7 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915, memset(pll_state, 0, sizeof(*pll_state)); - if (tgl_combo_pll_div_frac_wa_needed(i915)) + if (ehl_combo_pll_div_frac_wa_needed(i915)) dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2); pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) | @@ -3399,9 +3412,9 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, } static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv, - const struct intel_shared_dpll *pll) + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state) { - const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state; u32 m1, m2_int, m2_frac, div1, div2, ref_clock; u64 tmp; @@ -4512,16 +4525,33 @@ void intel_update_active_dpll(struct intel_atomic_state *state, * intel_dpll_get_freq - calculate the DPLL's output frequency * @i915: i915 device * @pll: DPLL for which to calculate the output frequency + * @pll_state: DPLL state from which to calculate the output frequency * - * Return the output frequency corresponding to @pll's current state. + * Return the output frequency corresponding to @pll's passed in @pll_state. */ int intel_dpll_get_freq(struct drm_i915_private *i915, - const struct intel_shared_dpll *pll) + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state) { if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq)) return 0; - return pll->info->funcs->get_freq(i915, pll); + return pll->info->funcs->get_freq(i915, pll, pll_state); +} + +/** + * intel_dpll_get_hw_state - readout the DPLL's hardware state + * @i915: i915 device + * @pll: DPLL for which to calculate the output frequency + * @hw_state: DPLL's hardware state + * + * Read out @pll's hardware state into @hw_state. + */ +bool intel_dpll_get_hw_state(struct drm_i915_private *i915, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) +{ + return pll->info->funcs->get_hw_state(i915, pll, hw_state); } static void readout_dpll_hw_state(struct drm_i915_private *i915, @@ -4529,8 +4559,7 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915, { struct intel_crtc *crtc; - pll->on = pll->info->funcs->get_hw_state(i915, pll, - &pll->state.hw_state); + pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state); if (IS_JSL_EHL(i915) && pll->on && pll->info->id == DPLL_ID_EHL_DPLL4) { @@ -4587,7 +4616,7 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915) } /** - * intel_shared_dpll_dump_hw_state - write hw_state to dmesg + * intel_dpll_dump_hw_state - write hw_state to dmesg * @dev_priv: i915 drm device * @hw_state: hw state to be written to the log * diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index 205542fb8dc7..2eb7618ef957 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -300,10 +300,11 @@ struct intel_shared_dpll_funcs { * @get_freq: * * Hook for calculating the pll's output frequency based on its - * current state. + * passed in state. */ int (*get_freq)(struct drm_i915_private *i915, - const struct intel_shared_dpll *pll); + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state); }; /** @@ -399,7 +400,11 @@ void intel_update_active_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder); int intel_dpll_get_freq(struct drm_i915_private *i915, - const struct intel_shared_dpll *pll); + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state); +bool intel_dpll_get_hw_state(struct drm_i915_private *i915, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state); void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c index afa4e6817e8c..f453ceb8d149 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.c +++ b/drivers/gpu/drm/i915/display/intel_dsi.c @@ -75,7 +75,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, return MODE_CLOCK_HIGH; } - return intel_mode_valid_max_plane_size(dev_priv, mode); + return intel_mode_valid_max_plane_size(dev_priv, mode, false); } struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi, diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index f90838bc74fb..82674a8853c6 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2274,7 +2274,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector, if (status != MODE_OK) return status; - return intel_mode_valid_max_plane_size(dev_priv, mode); + return intel_mode_valid_max_plane_size(dev_priv, mode, false); } bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 1576c3722d0b..b3631b722de3 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -1024,8 +1024,6 @@ void intel_psr_enable(struct intel_dp *intel_dp, if (!CAN_PSR(dev_priv) || dev_priv->psr.dp != intel_dp) return; - dev_priv->psr.force_mode_changed = false; - if (!crtc_state->has_psr) return; @@ -1334,8 +1332,6 @@ void intel_psr_update(struct intel_dp *intel_dp, if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp) return; - dev_priv->psr.force_mode_changed = false; - mutex_lock(&dev_priv->psr.lock); enable = crtc_state->has_psr; @@ -1869,40 +1865,3 @@ bool intel_psr_enabled(struct intel_dp *intel_dp) return ret; } - -void intel_psr_atomic_check(struct drm_connector *connector, - struct drm_connector_state *old_state, - struct drm_connector_state *new_state) -{ - struct drm_i915_private *dev_priv = to_i915(connector->dev); - struct intel_connector *intel_connector; - struct intel_digital_port *dig_port; - struct drm_crtc_state *crtc_state; - - if (!CAN_PSR(dev_priv) || !new_state->crtc || - !dev_priv->psr.force_mode_changed) - return; - - intel_connector = to_intel_connector(connector); - dig_port = enc_to_dig_port(to_intel_encoder(new_state->best_encoder)); - if (dev_priv->psr.dp != &dig_port->dp) - return; - - crtc_state = drm_atomic_get_new_crtc_state(new_state->state, - new_state->crtc); - crtc_state->mode_changed = true; -} - -void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp) -{ - struct drm_i915_private *dev_priv; - - if (!intel_dp) - return; - - dev_priv = dp_to_i915(intel_dp); - if (!CAN_PSR(dev_priv) || intel_dp != dev_priv->psr.dp) - return; - - dev_priv->psr.force_mode_changed = true; -} diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index 3eca9dcec3c0..0a517978e8af 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -43,10 +43,6 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp); int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, u32 *out_value); bool intel_psr_enabled(struct intel_dp *intel_dp); -void intel_psr_atomic_check(struct drm_connector *connector, - struct drm_connector_state *old_state, - struct drm_connector_state *new_state); -void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp); int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index b6deeb338477..019a2d6d807a 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -408,6 +408,134 @@ static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state, return DIV_ROUND_UP(pixel_rate * num, den); } +static int skl_plane_max_width(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + int cpp = fb->format->cpp[color_plane]; + + switch (fb->modifier) { + case DRM_FORMAT_MOD_LINEAR: + case I915_FORMAT_MOD_X_TILED: + /* + * Validated limit is 4k, but has 5k should + * work apart from the following features: + * - Ytile (already limited to 4k) + * - FP16 (already limited to 4k) + * - render compression (already limited to 4k) + * - KVMR sprite and cursor (don't care) + * - horizontal panning (TODO verify this) + * - pipe and plane scaling (TODO verify this) + */ + if (cpp == 8) + return 4096; + else + return 5120; + case I915_FORMAT_MOD_Y_TILED_CCS: + case I915_FORMAT_MOD_Yf_TILED_CCS: + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: + /* FIXME AUX plane? */ + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Yf_TILED: + if (cpp == 8) + return 2048; + else + return 4096; + default: + MISSING_CASE(fb->modifier); + return 2048; + } +} + +static int glk_plane_max_width(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + int cpp = fb->format->cpp[color_plane]; + + switch (fb->modifier) { + case DRM_FORMAT_MOD_LINEAR: + case I915_FORMAT_MOD_X_TILED: + if (cpp == 8) + return 4096; + else + return 5120; + case I915_FORMAT_MOD_Y_TILED_CCS: + case I915_FORMAT_MOD_Yf_TILED_CCS: + /* FIXME AUX plane? */ + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Yf_TILED: + if (cpp == 8) + return 2048; + else + return 5120; + default: + MISSING_CASE(fb->modifier); + return 2048; + } +} + +static int icl_plane_min_width(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + /* Wa_14011264657, Wa_14011050563: gen11+ */ + switch (fb->format->format) { + case DRM_FORMAT_C8: + return 18; + case DRM_FORMAT_RGB565: + return 10; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_XVYU2101010: + case DRM_FORMAT_Y212: + case DRM_FORMAT_Y216: + return 6; + case DRM_FORMAT_NV12: + return 20; + case DRM_FORMAT_P010: + case DRM_FORMAT_P012: + case DRM_FORMAT_P016: + return 12; + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: + return 4; + default: + return 1; + } +} + +static int icl_plane_max_width(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + return 5120; +} + +static int skl_plane_max_height(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + return 4096; +} + +static int icl_plane_max_height(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + return 4320; +} + static unsigned int skl_plane_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, @@ -2059,10 +2187,8 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state, } } - ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, - &crtc_state->uapi, - min_scale, max_scale, - true, true); + ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, + min_scale, max_scale, true); if (ret) return ret; @@ -2117,11 +2243,10 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state, if (ret) return ret; - ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, - &crtc_state->uapi, - DRM_PLANE_HELPER_NO_SCALING, - DRM_PLANE_HELPER_NO_SCALING, - true, true); + ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + true); if (ret) return ret; @@ -2328,10 +2453,8 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state, max_scale = skl_plane_max_scale(dev_priv, fb); } - ret = drm_atomic_helper_check_plane_state(&plane_state->uapi, - &crtc_state->uapi, - min_scale, max_scale, - true, true); + ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, + min_scale, max_scale, true); if (ret) return ret; @@ -3133,6 +3256,18 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; } + if (INTEL_GEN(dev_priv) >= 11) { + plane->min_width = icl_plane_min_width; + plane->max_width = icl_plane_max_width; + plane->max_height = icl_plane_max_height; + } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + plane->max_width = glk_plane_max_width; + plane->max_height = skl_plane_max_height; + } else { + plane->max_width = skl_plane_max_width; + plane->max_height = skl_plane_max_height; + } + plane->max_stride = skl_plane_max_stride; plane->update_plane = skl_update_plane; plane->disable_plane = skl_disable_plane; diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index c5735c365659..e2716a67b281 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -332,11 +332,10 @@ static const struct rc_parameters *get_rc_params(u16 compressed_bpp, return &rc_parameters[row_index][column_index]; } -bool intel_dsc_source_support(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state) { const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum pipe pipe = crtc->pipe; @@ -490,11 +489,10 @@ intel_dsc_power_domain(const struct intel_crtc_state *crtc_state) return POWER_DOMAIN_TRANSCODER_VDSC_PW2; } -static void intel_dsc_pps_configure(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; enum pipe pipe = crtc->pipe; u32 pps_val = 0; @@ -503,6 +501,9 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder, u8 num_vdsc_instances = (crtc_state->dsc.dsc_split) ? 2 : 1; int i = 0; + if (crtc_state->bigjoiner) + num_vdsc_instances *= 2; + /* Populate PICTURE_PARAMETER_SET_0 registers */ pps_val = DSC_VER_MAJ | vdsc_cfg->dsc_version_minor << DSC_VER_MIN_SHIFT | @@ -973,55 +974,6 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder, } } -void intel_dsc_get_config(struct intel_encoder *encoder, - struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum pipe pipe = crtc->pipe; - enum intel_display_power_domain power_domain; - intel_wakeref_t wakeref; - u32 dss_ctl1, dss_ctl2, val; - - if (!intel_dsc_source_support(encoder, crtc_state)) - return; - - power_domain = intel_dsc_power_domain(crtc_state); - - wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); - if (!wakeref) - return; - - if (!is_pipe_dsc(crtc_state)) { - dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1); - dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2); - } else { - dss_ctl1 = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); - dss_ctl2 = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL2(pipe)); - } - - crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE; - if (!crtc_state->dsc.compression_enable) - goto out; - - crtc_state->dsc.dsc_split = (dss_ctl2 & RIGHT_BRANCH_VDSC_ENABLE) && - (dss_ctl1 & JOINER_ENABLE); - - /* FIXME: add more state readout as needed */ - - /* PPS1 */ - if (!is_pipe_dsc(crtc_state)) - val = intel_de_read(dev_priv, DSCA_PICTURE_PARAMETER_SET_1); - else - val = intel_de_read(dev_priv, - ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe)); - vdsc_cfg->bits_per_pixel = val; - crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4; -out: - intel_display_power_put(dev_priv, power_domain, wakeref); -} - static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { @@ -1060,77 +1012,126 @@ static void intel_dsc_dp_pps_write(struct intel_encoder *encoder, sizeof(dp_dsc_pps_sdp)); } +static i915_reg_t dss_ctl1_reg(const struct intel_crtc_state *crtc_state) +{ + enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; + + if (crtc_state->cpu_transcoder == TRANSCODER_EDP) + return DSS_CTL1; + + return ICL_PIPE_DSS_CTL1(pipe); +} + +static i915_reg_t dss_ctl2_reg(const struct intel_crtc_state *crtc_state) +{ + enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; + + if (crtc_state->cpu_transcoder == TRANSCODER_EDP) + return DSS_CTL2; + + return ICL_PIPE_DSS_CTL2(pipe); +} + void intel_dsc_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum pipe pipe = crtc->pipe; - i915_reg_t dss_ctl1_reg, dss_ctl2_reg; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dss_ctl1_val = 0; u32 dss_ctl2_val = 0; if (!crtc_state->dsc.compression_enable) return; - /* Enable Power wells for VDSC/joining */ - intel_display_power_get(dev_priv, - intel_dsc_power_domain(crtc_state)); - - intel_dsc_pps_configure(encoder, crtc_state); + intel_dsc_pps_configure(crtc_state); - if (encoder->type == INTEL_OUTPUT_DSI) - intel_dsc_dsi_pps_write(encoder, crtc_state); - else - intel_dsc_dp_pps_write(encoder, crtc_state); - - if (!is_pipe_dsc(crtc_state)) { - dss_ctl1_reg = DSS_CTL1; - dss_ctl2_reg = DSS_CTL2; - } else { - dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe); - dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe); + if (!crtc_state->bigjoiner_slave) { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) + intel_dsc_dsi_pps_write(encoder, crtc_state); + else + intel_dsc_dp_pps_write(encoder, crtc_state); } + dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE; if (crtc_state->dsc.dsc_split) { dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE; dss_ctl1_val |= JOINER_ENABLE; } - intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1_val); - intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2_val); + if (crtc_state->bigjoiner) { + dss_ctl1_val |= BIG_JOINER_ENABLE; + if (!crtc_state->bigjoiner_slave) + dss_ctl1_val |= MASTER_BIG_JOINER_ENABLE; + } + intel_de_write(dev_priv, dss_ctl1_reg(crtc_state), dss_ctl1_val); + intel_de_write(dev_priv, dss_ctl2_reg(crtc_state), dss_ctl2_val); } void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - i915_reg_t dss_ctl1_reg, dss_ctl2_reg; - u32 dss_ctl1_val = 0, dss_ctl2_val = 0; if (!old_crtc_state->dsc.compression_enable) return; - if (!is_pipe_dsc(old_crtc_state)) { - dss_ctl1_reg = DSS_CTL1; - dss_ctl2_reg = DSS_CTL2; - } else { - dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe); - dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe); + intel_de_write(dev_priv, dss_ctl1_reg(old_crtc_state), 0); + intel_de_write(dev_priv, dss_ctl2_reg(old_crtc_state), 0); +} + +void intel_dsc_get_config(struct intel_crtc_state *crtc_state) +{ + struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; + u32 dss_ctl1, dss_ctl2, val; + + if (!intel_dsc_source_support(crtc_state)) + return; + + power_domain = intel_dsc_power_domain(crtc_state); + + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) + return; + + dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc_state)); + dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg(crtc_state)); + + crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE; + if (!crtc_state->dsc.compression_enable) + goto out; + + crtc_state->dsc.dsc_split = (dss_ctl2 & RIGHT_BRANCH_VDSC_ENABLE) && + (dss_ctl1 & JOINER_ENABLE); + + if (dss_ctl1 & BIG_JOINER_ENABLE) { + crtc_state->bigjoiner = true; + + if (!(dss_ctl1 & MASTER_BIG_JOINER_ENABLE)) { + crtc_state->bigjoiner_slave = true; + if (!WARN_ON(crtc->pipe == PIPE_A)) + crtc_state->bigjoiner_linked_crtc = + intel_get_crtc_for_pipe(dev_priv, crtc->pipe - 1); + } else { + if (!WARN_ON(INTEL_NUM_PIPES(dev_priv) == crtc->pipe + 1)) + crtc_state->bigjoiner_linked_crtc = + intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1); + } } - dss_ctl1_val = intel_de_read(dev_priv, dss_ctl1_reg); - if (dss_ctl1_val & JOINER_ENABLE) - dss_ctl1_val &= ~JOINER_ENABLE; - intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1_val); - - dss_ctl2_val = intel_de_read(dev_priv, dss_ctl2_reg); - if (dss_ctl2_val & LEFT_BRANCH_VDSC_ENABLE || - dss_ctl2_val & RIGHT_BRANCH_VDSC_ENABLE) - dss_ctl2_val &= ~(LEFT_BRANCH_VDSC_ENABLE | - RIGHT_BRANCH_VDSC_ENABLE); - intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2_val); - - /* Disable Power wells for VDSC/joining */ - intel_display_power_put_unchecked(dev_priv, - intel_dsc_power_domain(old_crtc_state)); + + /* FIXME: add more state readout as needed */ + + /* PPS1 */ + if (!is_pipe_dsc(crtc_state)) + val = intel_de_read(dev_priv, DSCA_PICTURE_PARAMETER_SET_1); + else + val = intel_de_read(dev_priv, + ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe)); + vdsc_cfg->bits_per_pixel = val; + crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4; +out: + intel_display_power_put(dev_priv, power_domain, wakeref); } diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h index e56a3254c214..65d301c23580 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.h +++ b/drivers/gpu/drm/i915/display/intel_vdsc.h @@ -11,15 +11,13 @@ struct intel_encoder; struct intel_crtc_state; -bool intel_dsc_source_support(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state); +bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state); void intel_dsc_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void intel_dsc_disable(const struct intel_crtc_state *crtc_state); int intel_dsc_compute_params(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config); -void intel_dsc_get_config(struct intel_encoder *encoder, - struct intel_crtc_state *crtc_state); +void intel_dsc_get_config(struct intel_crtc_state *crtc_state); enum intel_display_power_domain intel_dsc_power_domain(const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c index 272cf3ea68d5..44821d94544f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c @@ -202,12 +202,6 @@ retry: if (unlikely(err)) goto out_request; - if (w->ce->engine->emit_init_breadcrumb) { - err = w->ce->engine->emit_init_breadcrumb(rq); - if (unlikely(err)) - goto out_request; - } - /* * w->dma is already exported via (vma|obj)->resv we need only * keep track of the GPU activity within this vma/request, and @@ -217,9 +211,15 @@ retry: if (err) goto out_request; - err = w->ce->engine->emit_bb_start(rq, - batch->node.start, batch->node.size, - 0); + if (rq->engine->emit_init_breadcrumb) { + err = rq->engine->emit_init_breadcrumb(rq); + if (unlikely(err)) + goto out_request; + } + + err = rq->engine->emit_bb_start(rq, + batch->node.start, batch->node.size, + 0); out_request: if (unlikely(err)) { i915_request_set_error_once(rq, err); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 3389ac972d16..00d24000b5e8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -82,6 +82,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, obj->mm.madv = I915_MADV_WILLNEED; INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); mutex_init(&obj->mm.get_page.lock); + INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN); + mutex_init(&obj->mm.get_dma_page.lock); if (IS_ENABLED(CONFIG_LOCKDEP) && i915_gem_object_is_shrinkable(obj)) i915_gem_shrinker_taints_mutex(to_i915(obj->base.dev), diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index eaf3d4147be0..be14486f63a7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -272,8 +272,26 @@ int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, unsigned int tiling, unsigned int stride); struct scatterlist * +__i915_gem_object_get_sg(struct drm_i915_gem_object *obj, + struct i915_gem_object_page_iter *iter, + unsigned int n, + unsigned int *offset); + +static inline struct scatterlist * i915_gem_object_get_sg(struct drm_i915_gem_object *obj, - unsigned int n, unsigned int *offset); + unsigned int n, + unsigned int *offset) +{ + return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset); +} + +static inline struct scatterlist * +i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj, + unsigned int n, + unsigned int *offset) +{ + return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset); +} struct page * i915_gem_object_get_page(struct drm_i915_gem_object *obj, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index b5c15557cc87..e2d9b7e1e152 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -56,6 +56,8 @@ struct drm_i915_gem_object_ops { void (*truncate)(struct drm_i915_gem_object *obj); void (*writeback)(struct drm_i915_gem_object *obj); + int (*pread)(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_pread *arg); int (*pwrite)(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pwrite *arg); @@ -80,6 +82,14 @@ struct i915_mmap_offset { struct rb_node offset; }; +struct i915_gem_object_page_iter { + struct scatterlist *sg_pos; + unsigned int sg_idx; /* in pages, but 32bit eek! */ + + struct radix_tree_root radix; + struct mutex lock; /* protects this cache */ +}; + struct drm_i915_gem_object { struct drm_gem_object base; @@ -246,13 +256,8 @@ struct drm_i915_gem_object { I915_SELFTEST_DECLARE(unsigned int page_mask); - struct i915_gem_object_page_iter { - struct scatterlist *sg_pos; - unsigned int sg_idx; /* in pages, but 32bit eek! */ - - struct radix_tree_root radix; - struct mutex lock; /* protects this cache */ - } get_page; + struct i915_gem_object_page_iter get_page; + struct i915_gem_object_page_iter get_dma_page; /** * Element within i915->mm.unbound_list or i915->mm.bound_list, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index f60ca6dc911f..e2c7b2a7895f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -33,6 +33,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, obj->mm.get_page.sg_pos = pages->sgl; obj->mm.get_page.sg_idx = 0; + obj->mm.get_dma_page.sg_pos = pages->sgl; + obj->mm.get_dma_page.sg_idx = 0; obj->mm.pages = pages; @@ -155,6 +157,8 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) rcu_read_lock(); radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) radix_tree_delete(&obj->mm.get_page.radix, iter.index); + radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0) + radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index); rcu_read_unlock(); } @@ -438,11 +442,12 @@ void __i915_gem_object_release_map(struct drm_i915_gem_object *obj) } struct scatterlist * -i915_gem_object_get_sg(struct drm_i915_gem_object *obj, - unsigned int n, - unsigned int *offset) +__i915_gem_object_get_sg(struct drm_i915_gem_object *obj, + struct i915_gem_object_page_iter *iter, + unsigned int n, + unsigned int *offset) { - struct i915_gem_object_page_iter *iter = &obj->mm.get_page; + const bool dma = iter == &obj->mm.get_dma_page; struct scatterlist *sg; unsigned int idx, count; @@ -471,7 +476,7 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj, sg = iter->sg_pos; idx = iter->sg_idx; - count = __sg_page_count(sg); + count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg); while (idx + count <= n) { void *entry; @@ -499,7 +504,7 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj, idx += count; sg = ____sg_next(sg); - count = __sg_page_count(sg); + count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg); } scan: @@ -517,7 +522,7 @@ scan: while (idx + count <= n) { idx += count; sg = ____sg_next(sg); - count = __sg_page_count(sg); + count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg); } *offset = n - idx; @@ -584,7 +589,7 @@ i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, struct scatterlist *sg; unsigned int offset; - sg = i915_gem_object_get_sg(obj, n, &offset); + sg = i915_gem_object_get_sg_dma(obj, n, &offset); if (len) *len = sg_dma_len(sg) - (offset << PAGE_SHIFT); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 28147aab47b9..3a4dfe2ef1da 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -134,6 +134,58 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, vaddr, dma); } +static int +phys_pwrite(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_pwrite *args) +{ + void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; + char __user *user_data = u64_to_user_ptr(args->data_ptr); + int err; + + err = i915_gem_object_wait(obj, + I915_WAIT_INTERRUPTIBLE | + I915_WAIT_ALL, + MAX_SCHEDULE_TIMEOUT); + if (err) + return err; + + /* + * We manually control the domain here and pretend that it + * remains coherent i.e. in the GTT domain, like shmem_pwrite. + */ + i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); + + if (copy_from_user(vaddr, user_data, args->size)) + return -EFAULT; + + drm_clflush_virt_range(vaddr, args->size); + intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); + + i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); + return 0; +} + +static int +phys_pread(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_pread *args) +{ + void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; + char __user *user_data = u64_to_user_ptr(args->data_ptr); + int err; + + err = i915_gem_object_wait(obj, + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT); + if (err) + return err; + + drm_clflush_virt_range(vaddr, args->size); + if (copy_to_user(user_data, vaddr, args->size)) + return -EFAULT; + + return 0; +} + static void phys_release(struct drm_i915_gem_object *obj) { fput(obj->base.filp); @@ -144,6 +196,9 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { .get_pages = i915_gem_object_get_pages_phys, .put_pages = i915_gem_object_put_pages_phys, + .pread = phys_pread, + .pwrite = phys_pwrite, + .release = phys_release, }; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 84b2707d8b17..29bffc6afcc1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -497,6 +497,43 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915) return 0; } +static void dbg_poison(struct i915_ggtt *ggtt, + dma_addr_t addr, resource_size_t size, + u8 x) +{ +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) + if (!drm_mm_node_allocated(&ggtt->error_capture)) + return; + + if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND) + return; /* beware stop_machine() inversion */ + + GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); + + mutex_lock(&ggtt->error_mutex); + while (size) { + void __iomem *s; + + ggtt->vm.insert_page(&ggtt->vm, addr, + ggtt->error_capture.start, + I915_CACHE_NONE, 0); + mb(); + + s = io_mapping_map_wc(&ggtt->iomap, + ggtt->error_capture.start, + PAGE_SIZE); + memset_io(s, x, PAGE_SIZE); + io_mapping_unmap(s); + + addr += PAGE_SIZE; + size -= PAGE_SIZE; + } + mb(); + ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE); + mutex_unlock(&ggtt->error_mutex); +#endif +} + static struct sg_table * i915_pages_create_for_stolen(struct drm_device *dev, resource_size_t offset, resource_size_t size) @@ -540,6 +577,11 @@ static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) if (IS_ERR(pages)) return PTR_ERR(pages); + dbg_poison(&to_i915(obj->base.dev)->ggtt, + sg_dma_address(pages->sgl), + sg_dma_len(pages->sgl), + POISON_INUSE); + __i915_gem_object_set_pages(obj, pages, obj->stolen->size); return 0; @@ -549,6 +591,12 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, struct sg_table *pages) { /* Should only be called from i915_gem_object_release_stolen() */ + + dbg_poison(&to_i915(obj->base.dev)->ggtt, + sg_dma_address(pages->sgl), + sg_dma_len(pages->sgl), + POISON_FREE); + sg_free_table(pages); kfree(pages); } diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index c30adc05fa98..680bd9442eb0 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -131,17 +131,17 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt)); do { - GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE); + GEM_BUG_ON(sg_dma_len(iter.sg) < I915_GTT_PAGE_SIZE); vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); iter.dma += I915_GTT_PAGE_SIZE; if (iter.dma == iter.max) { iter.sg = __sg_next(iter.sg); - if (!iter.sg) + if (!iter.sg || sg_dma_len(iter.sg) == 0) break; iter.dma = sg_dma_address(iter.sg); - iter.max = iter.dma + iter.sg->length; + iter.max = iter.dma + sg_dma_len(iter.sg); } if (++act_pte == GEN6_PTES) { diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index 38c7069b7749..a37c968ef8f7 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -372,19 +372,19 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2)); vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); do { - GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE); + GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE); vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma; iter->dma += I915_GTT_PAGE_SIZE; if (iter->dma >= iter->max) { iter->sg = __sg_next(iter->sg); - if (!iter->sg) { + if (!iter->sg || sg_dma_len(iter->sg) == 0) { idx = 0; break; } iter->dma = sg_dma_address(iter->sg); - iter->max = iter->dma + iter->sg->length; + iter->max = iter->dma + sg_dma_len(iter->sg); } if (gen8_pd_index(++idx, 0) == 0) { @@ -413,8 +413,8 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, u32 flags) { const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); + unsigned int rem = sg_dma_len(iter->sg); u64 start = vma->node.start; - dma_addr_t rem = iter->sg->length; GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm)); @@ -456,7 +456,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, } do { - GEM_BUG_ON(iter->sg->length < page_size); + GEM_BUG_ON(sg_dma_len(iter->sg) < page_size); vaddr[index++] = encode | iter->dma; start += page_size; @@ -467,7 +467,10 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, if (!iter->sg) break; - rem = iter->sg->length; + rem = sg_dma_len(iter->sg); + if (!rem) + break; + iter->dma = sg_dma_address(iter->sg); iter->max = iter->dma + rem; @@ -525,7 +528,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, } vma->page_sizes.gtt |= page_size; - } while (iter->sg); + } while (iter->sg && sg_dma_len(iter->sg)); } static void gen8_ppgtt_insert(struct i915_address_space *vm, diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 5bfb5f7ed02c..0b31670343f5 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -305,8 +305,9 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) engine->i915 = i915; engine->gt = gt; engine->uncore = gt->uncore; - engine->hw_id = engine->guc_id = info->hw_id; engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases); + engine->hw_id = info->hw_id; + engine->guc_id = MAKE_GUC_ID(info->class, info->instance); engine->class = info->class; engine->instance = info->instance; @@ -371,7 +372,8 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine) * instances. */ if ((INTEL_GEN(i915) >= 11 && - engine->gt->info.vdbox_sfc_access & engine->mask) || + (engine->gt->info.vdbox_sfc_access & + BIT(engine->instance))) || (INTEL_GEN(i915) >= 9 && engine->instance == 0)) engine->uabi_capabilities |= I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC; @@ -1599,6 +1601,41 @@ static unsigned long list_count(struct list_head *list) return count; } +static unsigned long read_ul(void *p, size_t x) +{ + return *(unsigned long *)(p + x); +} + +static void print_properties(struct intel_engine_cs *engine, + struct drm_printer *m) +{ + static const struct pmap { + size_t offset; + const char *name; + } props[] = { +#define P(x) { \ + .offset = offsetof(typeof(engine->props), x), \ + .name = #x \ +} + P(heartbeat_interval_ms), + P(max_busywait_duration_ns), + P(preempt_timeout_ms), + P(stop_timeout_ms), + P(timeslice_duration_ms), + + {}, +#undef P + }; + const struct pmap *p; + + drm_printf(m, "\tProperties:\n"); + for (p = props; p->name; p++) + drm_printf(m, "\t\t%s: %lu [default %lu]\n", + p->name, + read_ul(&engine->props, p->offset), + read_ul(&engine->defaults, p->offset)); +} + void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m, const char *header, ...) @@ -1641,6 +1678,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, drm_printf(m, "\tReset count: %d (global %d)\n", i915_reset_engine_count(error, engine), i915_reset_count(error)); + print_properties(engine, m); drm_printf(m, "\tRequests:\n"); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c index 5067d0524d4b..9060385cd69e 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c @@ -41,6 +41,8 @@ static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq) { engine->wakeref_serial = READ_ONCE(engine->serial) + 1; i915_request_add_active_barriers(rq); + if (!engine->heartbeat.systole && intel_engine_has_heartbeat(engine)) + engine->heartbeat.systole = i915_request_get(rq); } static void show_heartbeat(const struct i915_request *rq, @@ -144,8 +146,6 @@ static void heartbeat(struct work_struct *wrk) goto unlock; idle_pulse(engine, rq); - if (engine->i915->params.enable_hangcheck) - engine->heartbeat.systole = i915_request_get(rq); __i915_request_commit(rq); __i915_request_queue(rq, &attr); @@ -153,7 +153,7 @@ static void heartbeat(struct work_struct *wrk) unlock: mutex_unlock(&ce->timeline->mutex); out: - if (!next_heartbeat(engine)) + if (!engine->i915->params.enable_hangcheck || !next_heartbeat(engine)) i915_request_put(fetch_and_zero(&engine->heartbeat.systole)); intel_engine_pm_put(engine); } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index f7b2e07e2229..499b09cb4acf 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -17,6 +17,25 @@ #include "intel_ring.h" #include "shmem_utils.h" +static void dbg_poison_ce(struct intel_context *ce) +{ + if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + return; + + if (ce->state) { + struct drm_i915_gem_object *obj = ce->state->obj; + int type = i915_coherent_map_type(ce->engine->i915); + void *map; + + map = i915_gem_object_pin_map(obj, type); + if (!IS_ERR(map)) { + memset(map, CONTEXT_REDZONE, obj->base.size); + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); + } + } +} + static int __engine_unpark(struct intel_wakeref *wf) { struct intel_engine_cs *engine = @@ -32,20 +51,14 @@ static int __engine_unpark(struct intel_wakeref *wf) if (ce) { GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags)); + /* Flush all pending HW writes before we touch the context */ + while (unlikely(intel_context_inflight(ce))) + intel_engine_flush_submission(engine); + /* First poison the image to verify we never fully trust it */ - if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) { - struct drm_i915_gem_object *obj = ce->state->obj; - int type = i915_coherent_map_type(engine->i915); - void *map; - - map = i915_gem_object_pin_map(obj, type); - if (!IS_ERR(map)) { - memset(map, CONTEXT_REDZONE, obj->base.size); - i915_gem_object_flush_map(obj); - i915_gem_object_unpin_map(obj); - } - } + dbg_poison_ce(ce); + /* Scrub the context image after our loss of control */ ce->ops->reset(ce); } diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 188a5f70177d..cf94525be2c1 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -1383,7 +1383,7 @@ intel_partial_pages(const struct i915_ggtt_view *view, if (ret) goto err_sg_alloc; - iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset); + iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset); GEM_BUG_ON(!iter); sg = st->sgl; @@ -1391,7 +1391,7 @@ intel_partial_pages(const struct i915_ggtt_view *view, do { unsigned int len; - len = min(iter->length - (offset << PAGE_SHIFT), + len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), count << PAGE_SHIFT); sg_set_page(sg, NULL, len, 0); sg_dma_address(sg) = diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 39b428c5049c..44f1d51e5ae5 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -614,6 +614,8 @@ void intel_gt_driver_remove(struct intel_gt *gt) void intel_gt_driver_unregister(struct intel_gt *gt) { + intel_wakeref_t wakeref; + intel_rps_driver_unregister(>->rps); /* @@ -622,16 +624,15 @@ void intel_gt_driver_unregister(struct intel_gt *gt) * resources. */ intel_gt_set_wedged(gt); + + /* Scrub all HW state upon release */ + with_intel_runtime_pm(gt->uncore->rpm, wakeref) + __intel_gt_reset(gt, ALL_ENGINES); } void intel_gt_driver_release(struct intel_gt *gt) { struct i915_address_space *vm; - intel_wakeref_t wakeref; - - /* Scrub all HW state upon release */ - with_intel_runtime_pm(gt->uncore->rpm, wakeref) - __intel_gt_reset(gt, ALL_ENGINES); vm = fetch_and_zero(>->vm); if (vm) /* FIXME being called twice on error paths :( */ diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 3f1114b58b01..7bfe9072be9a 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -324,7 +324,7 @@ static void cnl_setup_private_ppat(struct intel_uncore *uncore) GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); intel_uncore_write(uncore, GEN10_PAT_INDEX(2), - GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); + GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE); intel_uncore_write(uncore, GEN10_PAT_INDEX(3), GEN8_PPAT_UC); @@ -349,17 +349,23 @@ static void cnl_setup_private_ppat(struct intel_uncore *uncore) */ static void bdw_setup_private_ppat(struct intel_uncore *uncore) { + struct drm_i915_private *i915 = uncore->i915; u64 pat; pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ - GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); + /* for scanout with eLLC */ + if (INTEL_GEN(i915) >= 9) + pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE); + else + pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); + intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); } diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index c13c650ced22..8a33940a71f3 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -580,7 +580,7 @@ static inline struct sgt_dma { struct scatterlist *sg = vma->pages->sgl; dma_addr_t addr = sg_dma_address(sg); - return (struct sgt_dma){ sg, addr, addr + sg->length }; + return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) }; } #endif diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index f82c6dd1de18..8a51c1c3a091 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1215,7 +1215,8 @@ static void intel_engine_context_out(struct intel_engine_cs *engine) static void execlists_check_context(const struct intel_context *ce, - const struct intel_engine_cs *engine) + const struct intel_engine_cs *engine, + const char *when) { const struct intel_ring *ring = ce->ring; u32 *regs = ce->lrc_reg_state; @@ -1250,7 +1251,7 @@ execlists_check_context(const struct intel_context *ce, valid = false; } - WARN_ONCE(!valid, "Invalid lrc state found before submission\n"); + WARN_ONCE(!valid, "Invalid lrc state found %s submission\n", when); } static void restore_default_state(struct intel_context *ce, @@ -1346,7 +1347,7 @@ __execlists_schedule_in(struct i915_request *rq) reset_active(rq, engine); if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) - execlists_check_context(ce, engine); + execlists_check_context(ce, engine, "before"); if (ce->tag) { /* Use a fixed tag for OA and friends */ @@ -1417,6 +1418,9 @@ __execlists_schedule_out(struct i915_request *rq, * refrain from doing non-trivial work here. */ + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + execlists_check_context(ce, engine, "after"); + /* * If we have just completed this context, the engine may now be * idle and we want to re-enter powersaving. @@ -2495,25 +2499,11 @@ invalidate_csb_entries(const u64 *first, const u64 *last) * bits 47-57: sw context id of the lrc the GT switched away from * bits 58-63: sw counter of the lrc the GT switched away from */ -static inline bool gen12_csb_parse(const u64 *csb) +static inline bool gen12_csb_parse(const u64 csb) { - bool ctx_away_valid; - bool new_queue; - u64 entry; - - /* HSD#22011248461 */ - entry = READ_ONCE(*csb); - if (unlikely(entry == -1)) { - preempt_disable(); - if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 50)) - GEM_WARN_ON("50us CSB timeout"); - preempt_enable(); - } - WRITE_ONCE(*(u64 *)csb, -1); - - ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(entry)); - new_queue = - lower_32_bits(entry) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE; + bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(csb)); + bool new_queue = + lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE; /* * The context switch detail is not guaranteed to be 5 when a preemption @@ -2523,7 +2513,7 @@ static inline bool gen12_csb_parse(const u64 *csb) * would require some extra handling, but we don't support that. */ if (!ctx_away_valid || new_queue) { - GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(entry))); + GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(csb))); return true; } @@ -2532,19 +2522,79 @@ static inline bool gen12_csb_parse(const u64 *csb) * context switch on an unsuccessful wait instruction since we always * use polling mode. */ - GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(entry))); + GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb))); return false; } -static inline bool gen8_csb_parse(const u64 *csb) +static inline bool gen8_csb_parse(const u64 csb) +{ + return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED); +} + +static noinline u64 +wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb) { - return *csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED); + u64 entry; + + /* + * Reading from the HWSP has one particular advantage: we can detect + * a stale entry. Since the write into HWSP is broken, we have no reason + * to trust the HW at all, the mmio entry may equally be unordered, so + * we prefer the path that is self-checking and as a last resort, + * return the mmio value. + * + * tgl,dg1:HSDES#22011327657 + */ + preempt_disable(); + if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 10)) { + int idx = csb - engine->execlists.csb_status; + int status; + + status = GEN8_EXECLISTS_STATUS_BUF; + if (idx >= 6) { + status = GEN11_EXECLISTS_STATUS_BUF2; + idx -= 6; + } + status += sizeof(u64) * idx; + + entry = intel_uncore_read64(engine->uncore, + _MMIO(engine->mmio_base + status)); + } + preempt_enable(); + + return entry; +} + +static inline u64 +csb_read(const struct intel_engine_cs *engine, u64 * const csb) +{ + u64 entry = READ_ONCE(*csb); + + /* + * Unfortunately, the GPU does not always serialise its write + * of the CSB entries before its write of the CSB pointer, at least + * from the perspective of the CPU, using what is known as a Global + * Observation Point. We may read a new CSB tail pointer, but then + * read the stale CSB entries, causing us to misinterpret the + * context-switch events, and eventually declare the GPU hung. + * + * icl:HSDES#1806554093 + * tgl:HSDES#22011248461 + */ + if (unlikely(entry == -1)) + entry = wa_csb_read(engine, csb); + + /* Consume this entry so that we can spot its future reuse. */ + WRITE_ONCE(*csb, -1); + + /* ELSP is an implicit wmb() before the GPU wraps and overwrites csb */ + return entry; } static void process_csb(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; - const u64 * const buf = execlists->csb_status; + u64 * const buf = execlists->csb_status; const u8 num_entries = execlists->csb_size; u8 head, tail; @@ -2602,6 +2652,7 @@ static void process_csb(struct intel_engine_cs *engine) rmb(); do { bool promote; + u64 csb; if (++head == num_entries) head = 0; @@ -2624,15 +2675,14 @@ static void process_csb(struct intel_engine_cs *engine) * status notifier. */ + csb = csb_read(engine, buf + head); ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n", - head, - upper_32_bits(buf[head]), - lower_32_bits(buf[head])); + head, upper_32_bits(csb), lower_32_bits(csb)); if (INTEL_GEN(engine->i915) >= 12) - promote = gen12_csb_parse(buf + head); + promote = gen12_csb_parse(csb); else - promote = gen8_csb_parse(buf + head); + promote = gen8_csb_parse(csb); if (promote) { struct i915_request * const *old = execlists->active; @@ -2987,6 +3037,8 @@ static struct execlists_capture *capture_regs(struct intel_engine_cs *engine) if (!cap->error->gt->engine) goto err_gt; + cap->error->gt->engine->hung = true; + return cap; err_gt: @@ -4047,6 +4099,8 @@ static void reset_csb_pointers(struct intel_engine_cs *engine) static void execlists_sanitize(struct intel_engine_cs *engine) { + GEM_BUG_ON(execlists_active(&engine->execlists)); + /* * Poison residual state on resume, in case the suspend didn't! * @@ -4376,6 +4430,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine) /* Mark all executing requests as skipped. */ list_for_each_entry(rq, &engine->active.requests, sched.link) mark_eio(rq); + intel_engine_signal_breadcrumbs(engine); /* Flush the queued requests to the timeline list (for retiring). */ while ((rb = rb_first_cached(&execlists->queue))) { @@ -5922,18 +5977,6 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, return 0; } -struct intel_engine_cs * -intel_virtual_engine_get_sibling(struct intel_engine_cs *engine, - unsigned int sibling) -{ - struct virtual_engine *ve = to_virtual_engine(engine); - - if (sibling >= ve->num_siblings) - return NULL; - - return ve->siblings[sibling]; -} - void intel_execlists_show_requests(struct intel_engine_cs *engine, struct drm_printer *m, void (*show_request)(struct drm_printer *m, diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h index 91fd8e452d9b..c2d287f25497 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc.h @@ -121,10 +121,6 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, const struct intel_engine_cs *master, const struct intel_engine_cs *sibling); -struct intel_engine_cs * -intel_virtual_engine_get_sibling(struct intel_engine_cs *engine, - unsigned int sibling); - bool intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h index 93cb6c460508..1b51f7b9a5c3 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h @@ -49,4 +49,7 @@ #define GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x1A #define GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0xD +#define GEN8_EXECLISTS_STATUS_BUF 0x370 +#define GEN11_EXECLISTS_STATUS_BUF2 0x3c0 + #endif /* _INTEL_LRC_REG_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 39179a3eee98..254873e1646e 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -124,7 +124,7 @@ struct drm_i915_mocs_table { LE_1_UC | LE_TC_2_LLC_ELLC, \ L3_1_UC), \ MOCS_ENTRY(I915_MOCS_PTE, \ - LE_0_PAGETABLE | LE_TC_2_LLC_ELLC | LE_LRUM(3), \ + LE_0_PAGETABLE | LE_TC_0_PAGETABLE | LE_LRUM(3), \ L3_3_WB) static const struct drm_i915_mocs_entry skl_mocs_table[] = { @@ -243,8 +243,9 @@ static const struct drm_i915_mocs_entry tgl_mocs_table[] = { * only, __init_mocs_table() take care to program unused index with * this entry. */ - MOCS_ENTRY(1, LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), - L3_3_WB), + MOCS_ENTRY(I915_MOCS_PTE, + LE_0_PAGETABLE | LE_TC_0_PAGETABLE, + L3_1_UC), GEN11_MOCS_ENTRIES, /* Implicitly enable L1 - HDC:L1 + L3 + LLC */ @@ -280,7 +281,7 @@ static const struct drm_i915_mocs_entry icl_mocs_table[] = { L3_1_UC), /* Base - L3 + LeCC:PAT (Deprecated) */ MOCS_ENTRY(I915_MOCS_PTE, - LE_0_PAGETABLE | LE_TC_1_LLC, + LE_0_PAGETABLE | LE_TC_0_PAGETABLE, L3_3_WB), GEN11_MOCS_ENTRIES diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index ab675d35030d..d7b8e4457fc2 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -56,9 +56,12 @@ static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) static void gen11_rc6_enable(struct intel_rc6 *rc6) { - struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct intel_gt *gt = rc6_to_gt(rc6); + struct intel_uncore *uncore = gt->uncore; struct intel_engine_cs *engine; enum intel_engine_id id; + u32 pg_enable; + int i; /* 2b: Program RC6 thresholds.*/ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85); @@ -102,10 +105,19 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6) GEN6_RC_CTL_RC6_ENABLE | GEN6_RC_CTL_EI_MODE(1); - set(uncore, GEN9_PG_ENABLE, - GEN9_RENDER_PG_ENABLE | - GEN9_MEDIA_PG_ENABLE | - GEN11_MEDIA_SAMPLER_PG_ENABLE); + pg_enable = + GEN9_RENDER_PG_ENABLE | + GEN9_MEDIA_PG_ENABLE | + GEN11_MEDIA_SAMPLER_PG_ENABLE; + + if (INTEL_GEN(gt->i915) >= 12) { + for (i = 0; i < I915_MAX_VCS; i++) + if (HAS_ENGINE(gt, _VCS(i))) + pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) | + VDN_MFX_POWERGATE_ENABLE(i)); + } + + set(uncore, GEN9_PG_ENABLE, pg_enable); } static void gen9_rc6_enable(struct intel_rc6 *rc6) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index ac36b67fb46b..3654c955e6be 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -19,6 +19,7 @@ #include "intel_engine_pm.h" #include "intel_gt.h" #include "intel_gt_pm.h" +#include "intel_gt_requests.h" #include "intel_reset.h" #include "uc/intel_guc.h" @@ -1190,14 +1191,14 @@ static void intel_gt_reset_global(struct intel_gt *gt, /* Use a watchdog to ensure that our reset completes */ intel_wedge_on_timeout(&w, gt, 5 * HZ) { - intel_prepare_reset(gt->i915); + intel_display_prepare_reset(gt->i915); /* Flush everyone using a resource about to be clobbered */ synchronize_srcu_expedited(>->reset.backoff_srcu); intel_gt_reset(gt, engine_mask, reason); - intel_finish_reset(gt->i915); + intel_display_finish_reset(gt->i915); } if (!test_bit(I915_WEDGED, >->reset.flags)) @@ -1250,7 +1251,7 @@ void intel_gt_handle_error(struct intel_gt *gt, engine_mask &= gt->info.engine_mask; if (flags & I915_ERROR_CAPTURE) { - i915_capture_error_state(gt->i915); + i915_capture_error_state(gt, engine_mask); intel_gt_clear_error_registers(gt, engine_mask); } @@ -1370,6 +1371,7 @@ void intel_gt_set_wedged_on_fini(struct intel_gt *gt) { intel_gt_set_wedged(gt); set_bit(I915_WEDGED_ON_FINI, >->reset.flags); + intel_gt_retire_requests(gt); /* cleanup any wedged requests */ } void intel_gt_init_reset(struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 16b48e72c369..a41b43f445b8 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -444,6 +444,7 @@ static void reset_cancel(struct intel_engine_cs *engine) i915_request_set_error_once(request, -EIO); i915_request_mark_complete(request); } + intel_engine_signal_breadcrumbs(engine); /* Remaining _unready_ requests will be nop'ed when submitted */ diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 466ec671b379..0d88f17799ff 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -1973,7 +1973,7 @@ static struct drm_i915_private *mchdev_get(void) rcu_read_lock(); i915 = rcu_dereference(ips_mchdev); - if (!kref_get_unless_zero(&i915->drm.ref)) + if (i915 && !kref_get_unless_zero(&i915->drm.ref)) i915 = NULL; rcu_read_unlock(); diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index dfd1cfb8a7ec..2f830017c51d 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -245,18 +245,39 @@ static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled) GEM_BUG_ON(stalled); } +static void mark_eio(struct i915_request *rq) +{ + if (i915_request_completed(rq)) + return; + + GEM_BUG_ON(i915_request_signaled(rq)); + + i915_request_set_error_once(rq, -EIO); + i915_request_mark_complete(rq); +} + static void mock_reset_cancel(struct intel_engine_cs *engine) { - struct i915_request *request; + struct mock_engine *mock = + container_of(engine, typeof(*mock), base); + struct i915_request *rq; unsigned long flags; + del_timer_sync(&mock->hw_delay); + spin_lock_irqsave(&engine->active.lock, flags); /* Mark all submitted requests as skipped. */ - list_for_each_entry(request, &engine->active.requests, sched.link) { - i915_request_set_error_once(request, -EIO); - i915_request_mark_complete(request); + list_for_each_entry(rq, &engine->active.requests, sched.link) + mark_eio(rq); + intel_engine_signal_breadcrumbs(engine); + + /* Cancel and submit all pending requests. */ + list_for_each_entry(rq, &mock->hw_queue, mock.link) { + mark_eio(rq); + __i915_request_submit(rq); } + INIT_LIST_HEAD(&mock->hw_queue); spin_unlock_irqrestore(&engine->active.lock, flags); } diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c index e73854dd2fe0..b88aa35ad75b 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c @@ -215,16 +215,17 @@ static int __live_heartbeat_fast(struct intel_engine_cs *engine) goto err_pm; for (i = 0; i < ARRAY_SIZE(times); i++) { - /* Manufacture a tick */ do { - while (READ_ONCE(engine->heartbeat.systole)) - flush_delayed_work(&engine->heartbeat.work); + /* Manufacture a tick */ + intel_engine_park_heartbeat(engine); + GEM_BUG_ON(engine->heartbeat.systole); + engine->serial++; /* pretend we are not idle! */ + intel_engine_unpark_heartbeat(engine); - engine->serial++; /* quick, pretend we are not idle! */ flush_delayed_work(&engine->heartbeat.work); if (!delayed_work_pending(&engine->heartbeat.work)) { - pr_err("%s: heartbeat did not start\n", - engine->name); + pr_err("%s: heartbeat %d did not start\n", + engine->name, i); err = -EINVAL; goto err_pm; } diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 3540ba9bd459..aa5675ecb5cc 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -219,7 +219,7 @@ int live_rps_clock_interval(void *arg) struct igt_spinner spin; int err = 0; - if (!intel_rps_is_enabled(rps)) + if (!intel_rps_is_enabled(rps) || INTEL_GEN(gt->i915) < 6) return 0; if (igt_spinner_init(&spin, gt)) @@ -1028,7 +1028,7 @@ int live_rps_interrupt(void *arg) * First, let's check whether or not we are receiving interrupts. */ - if (!intel_rps_has_interrupts(rps)) + if (!intel_rps_has_interrupts(rps) || INTEL_GEN(gt->i915) < 6) return 0; intel_gt_pm_get(gt); @@ -1133,7 +1133,7 @@ int live_rps_power(void *arg) * that theory. */ - if (!intel_rps_is_enabled(rps)) + if (!intel_rps_is_enabled(rps) || INTEL_GEN(gt->i915) < 6) return 0; if (!librapl_energy_uJ()) @@ -1237,7 +1237,7 @@ int live_rps_dynamic(void *arg) * moving parts into dynamic reclocking based on load. */ - if (!intel_rps_is_enabled(rps)) + if (!intel_rps_is_enabled(rps) || INTEL_GEN(gt->i915) < 6) return 0; if (igt_spinner_init(&spin, gt)) diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index 19c2cb166e7c..2edf2b15885f 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -17,8 +17,9 @@ #include "../selftests/i915_random.h" #include "../i915_selftest.h" -#include "../selftests/igt_flush_test.h" -#include "../selftests/mock_gem_device.h" +#include "selftests/igt_flush_test.h" +#include "selftests/lib_sw_fence.h" +#include "selftests/mock_gem_device.h" #include "selftests/mock_timeline.h" static struct page *hwsp_page(struct intel_timeline *tl) @@ -755,6 +756,378 @@ out_free: return err; } +static int emit_read_hwsp(struct i915_request *rq, + u32 seqno, u32 hwsp, + u32 *addr) +{ + const u32 gpr = i915_mmio_reg_offset(GEN8_RING_CS_GPR(rq->engine->mmio_base, 0)); + u32 *cs; + + cs = intel_ring_begin(rq, 12); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = *addr; + *cs++ = 0; + *cs++ = seqno; + *addr += 4; + + *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_USE_GGTT; + *cs++ = gpr; + *cs++ = hwsp; + *cs++ = 0; + + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; + *cs++ = gpr; + *cs++ = *addr; + *cs++ = 0; + *addr += 4; + + intel_ring_advance(rq, cs); + + return 0; +} + +struct hwsp_watcher { + struct i915_vma *vma; + struct i915_request *rq; + u32 addr; + u32 *map; +}; + +static bool cmp_lt(u32 a, u32 b) +{ + return a < b; +} + +static bool cmp_gte(u32 a, u32 b) +{ + return a >= b; +} + +static int setup_watcher(struct hwsp_watcher *w, struct intel_gt *gt) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + + obj = i915_gem_object_create_internal(gt->i915, SZ_2M); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + w->map = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(w->map)) { + i915_gem_object_put(obj); + return PTR_ERR(w->map); + } + + vma = i915_gem_object_ggtt_pin_ww(obj, NULL, NULL, 0, 0, 0); + if (IS_ERR(vma)) { + i915_gem_object_put(obj); + return PTR_ERR(vma); + } + + w->vma = vma; + w->addr = i915_ggtt_offset(vma); + return 0; +} + +static int create_watcher(struct hwsp_watcher *w, + struct intel_engine_cs *engine, + int ringsz) +{ + struct intel_context *ce; + struct intel_timeline *tl; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + ce->ring = __intel_context_ring_size(ringsz); + w->rq = intel_context_create_request(ce); + intel_context_put(ce); + if (IS_ERR(w->rq)) + return PTR_ERR(w->rq); + + w->addr = i915_ggtt_offset(w->vma); + tl = w->rq->context->timeline; + + /* some light mutex juggling required; think co-routines */ + lockdep_unpin_lock(&tl->mutex, w->rq->cookie); + mutex_unlock(&tl->mutex); + + return 0; +} + +static int check_watcher(struct hwsp_watcher *w, const char *name, + bool (*op)(u32 hwsp, u32 seqno)) +{ + struct i915_request *rq = fetch_and_zero(&w->rq); + struct intel_timeline *tl = rq->context->timeline; + u32 offset, end; + int err; + + GEM_BUG_ON(w->addr - i915_ggtt_offset(w->vma) > w->vma->size); + + i915_request_get(rq); + mutex_lock(&tl->mutex); + rq->cookie = lockdep_pin_lock(&tl->mutex); + i915_request_add(rq); + + if (i915_request_wait(rq, 0, HZ) < 0) { + err = -ETIME; + goto out; + } + + err = 0; + offset = 0; + end = (w->addr - i915_ggtt_offset(w->vma)) / sizeof(*w->map); + while (offset < end) { + if (!op(w->map[offset + 1], w->map[offset])) { + pr_err("Watcher '%s' found HWSP value %x for seqno %x\n", + name, w->map[offset + 1], w->map[offset]); + err = -EINVAL; + } + + offset += 2; + } + +out: + i915_request_put(rq); + return err; +} + +static void cleanup_watcher(struct hwsp_watcher *w) +{ + if (w->rq) { + struct intel_timeline *tl = w->rq->context->timeline; + + mutex_lock(&tl->mutex); + w->rq->cookie = lockdep_pin_lock(&tl->mutex); + + i915_request_add(w->rq); + } + + i915_vma_unpin_and_release(&w->vma, I915_VMA_RELEASE_MAP); +} + +static bool retire_requests(struct intel_timeline *tl) +{ + struct i915_request *rq, *rn; + + mutex_lock(&tl->mutex); + list_for_each_entry_safe(rq, rn, &tl->requests, link) + if (!i915_request_retire(rq)) + break; + mutex_unlock(&tl->mutex); + + return !i915_active_fence_isset(&tl->last_request); +} + +static struct i915_request *wrap_timeline(struct i915_request *rq) +{ + struct intel_context *ce = rq->context; + struct intel_timeline *tl = ce->timeline; + u32 seqno = rq->fence.seqno; + + while (tl->seqno >= seqno) { /* Cause a wrap */ + i915_request_put(rq); + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return rq; + + i915_request_get(rq); + i915_request_add(rq); + } + + i915_request_put(rq); + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return rq; + + i915_request_get(rq); + i915_request_add(rq); + + return rq; +} + +static int live_hwsp_read(void *arg) +{ + struct intel_gt *gt = arg; + struct hwsp_watcher watcher[2] = {}; + struct intel_engine_cs *engine; + struct intel_timeline *tl; + enum intel_engine_id id; + int err = 0; + int i; + + /* + * If we take a reference to the HWSP for reading on the GPU, that + * read may be arbitrarily delayed (either by foreign fence or + * priority saturation) and a wrap can happen within 30 minutes. + * When the GPU read is finally submitted it should be correct, + * even across multiple wraps. + */ + + if (INTEL_GEN(gt->i915) < 8) /* CS convenience [SRM/LRM] */ + return 0; + + tl = intel_timeline_create(gt); + if (IS_ERR(tl)) + return PTR_ERR(tl); + + if (!tl->hwsp_cacheline) + goto out_free; + + for (i = 0; i < ARRAY_SIZE(watcher); i++) { + err = setup_watcher(&watcher[i], gt); + if (err) + goto out; + } + + for_each_engine(engine, gt, id) { + struct intel_context *ce; + unsigned long count = 0; + IGT_TIMEOUT(end_time); + + /* Create a request we can use for remote reading of the HWSP */ + err = create_watcher(&watcher[1], engine, SZ_512K); + if (err) + goto out; + + do { + struct i915_sw_fence *submit; + struct i915_request *rq; + u32 hwsp; + + submit = heap_fence_create(GFP_KERNEL); + if (!submit) { + err = -ENOMEM; + goto out; + } + + err = create_watcher(&watcher[0], engine, SZ_4K); + if (err) + goto out; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out; + } + + /* Skip to the end, saving 30 minutes of nops */ + tl->seqno = -10u + 2 * (count & 3); + WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno); + ce->timeline = intel_timeline_get(tl); + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + intel_context_put(ce); + goto out; + } + + err = i915_sw_fence_await_dma_fence(&rq->submit, + &watcher[0].rq->fence, 0, + GFP_KERNEL); + if (err < 0) { + i915_request_add(rq); + intel_context_put(ce); + goto out; + } + + mutex_lock(&watcher[0].rq->context->timeline->mutex); + err = intel_timeline_read_hwsp(rq, watcher[0].rq, &hwsp); + if (err == 0) + err = emit_read_hwsp(watcher[0].rq, /* before */ + rq->fence.seqno, hwsp, + &watcher[0].addr); + mutex_unlock(&watcher[0].rq->context->timeline->mutex); + if (err) { + i915_request_add(rq); + intel_context_put(ce); + goto out; + } + + mutex_lock(&watcher[1].rq->context->timeline->mutex); + err = intel_timeline_read_hwsp(rq, watcher[1].rq, &hwsp); + if (err == 0) + err = emit_read_hwsp(watcher[1].rq, /* after */ + rq->fence.seqno, hwsp, + &watcher[1].addr); + mutex_unlock(&watcher[1].rq->context->timeline->mutex); + if (err) { + i915_request_add(rq); + intel_context_put(ce); + goto out; + } + + i915_request_get(rq); + i915_request_add(rq); + + rq = wrap_timeline(rq); + intel_context_put(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + err = i915_sw_fence_await_dma_fence(&watcher[1].rq->submit, + &rq->fence, 0, + GFP_KERNEL); + if (err < 0) { + i915_request_put(rq); + goto out; + } + + err = check_watcher(&watcher[0], "before", cmp_lt); + i915_sw_fence_commit(submit); + heap_fence_put(submit); + if (err) { + i915_request_put(rq); + goto out; + } + count++; + + if (8 * watcher[1].rq->ring->emit > + 3 * watcher[1].rq->ring->size) { + i915_request_put(rq); + break; + } + + /* Flush the timeline before manually wrapping again */ + if (i915_request_wait(rq, + I915_WAIT_INTERRUPTIBLE, + HZ) < 0) { + err = -ETIME; + i915_request_put(rq); + goto out; + } + + retire_requests(tl); + i915_request_put(rq); + } while (!__igt_timeout(end_time, NULL)); + WRITE_ONCE(*(u32 *)tl->hwsp_seqno, 0xdeadbeef); + + pr_info("%s: simulated %lu wraps\n", engine->name, count); + err = check_watcher(&watcher[1], "after", cmp_gte); + if (err) + goto out; + } + +out: + for (i = 0; i < ARRAY_SIZE(watcher); i++) + cleanup_watcher(&watcher[i]); + + if (igt_flush_test(gt->i915)) + err = -EIO; + +out_free: + intel_timeline_put(tl); + return err; +} + static int live_hwsp_rollover_kernel(void *arg) { struct intel_gt *gt = arg; @@ -998,6 +1371,7 @@ int intel_timeline_live_selftests(struct drm_i915_private *i915) SUBTEST(live_hwsp_engine), SUBTEST(live_hwsp_alternate), SUBTEST(live_hwsp_wrap), + SUBTEST(live_hwsp_read), SUBTEST(live_hwsp_rollover_kernel), SUBTEST(live_hwsp_rollover_user), }; diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c index 535cc1169e54..967031056202 100644 --- a/drivers/gpu/drm/i915/gt/sysfs_engines.c +++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c @@ -79,14 +79,12 @@ static ssize_t repr_trim(char *buf, ssize_t len) static ssize_t __caps_show(struct intel_engine_cs *engine, - u32 caps, char *buf, bool show_unknown) + unsigned long caps, char *buf, bool show_unknown) { const char * const *repr; int count, n; ssize_t len; - BUILD_BUG_ON(!typecheck(typeof(caps), engine->uabi_capabilities)); - switch (engine->class) { case VIDEO_DECODE_CLASS: repr = vcs_caps; @@ -103,12 +101,10 @@ __caps_show(struct intel_engine_cs *engine, count = 0; break; } - GEM_BUG_ON(count > BITS_PER_TYPE(typeof(caps))); + GEM_BUG_ON(count > BITS_PER_LONG); len = 0; - for_each_set_bit(n, - (unsigned long *)&caps, - show_unknown ? BITS_PER_TYPE(typeof(caps)) : count) { + for_each_set_bit(n, &caps, show_unknown ? BITS_PER_LONG : count) { if (n >= count || !repr[n]) { if (GEM_WARN_ON(show_unknown)) len += snprintf(buf + len, PAGE_SIZE - len, diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index e4aaa5f29796..2a343a977987 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -213,23 +213,6 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc) return flags; } -static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc) -{ - u32 flags = 0; - - if (intel_guc_submission_is_used(guc)) { - u32 ctxnum, base; - - base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool); - ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16; - - base >>= PAGE_SHIFT; - flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) | - (ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT); - } - return flags; -} - static u32 guc_ctl_log_params_flags(struct intel_guc *guc) { u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT; @@ -291,7 +274,6 @@ static void guc_init_params(struct intel_guc *guc) BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32)); - params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc); params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc); params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc); params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index d44061033f23..5212ff844292 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -10,11 +10,52 @@ /* * The Additional Data Struct (ADS) has pointers for different buffers used by - * the GuC. One single gem object contains the ADS struct itself (guc_ads), the - * scheduling policies (guc_policies), a structure describing a collection of - * register sets (guc_mmio_reg_state) and some extra pages for the GuC to save - * its internal state for sleep. + * the GuC. One single gem object contains the ADS struct itself (guc_ads) and + * all the extra buffers indirectly linked via the ADS struct's entries. + * + * Layout of the ADS blob allocated for the GuC: + * + * +---------------------------------------+ <== base + * | guc_ads | + * +---------------------------------------+ + * | guc_policies | + * +---------------------------------------+ + * | guc_gt_system_info | + * +---------------------------------------+ + * | guc_clients_info | + * +---------------------------------------+ + * | guc_ct_pool_entry[size] | + * +---------------------------------------+ + * | padding | + * +---------------------------------------+ <== 4K aligned + * | private data | + * +---------------------------------------+ + * | padding | + * +---------------------------------------+ <== 4K aligned */ +struct __guc_ads_blob { + struct guc_ads ads; + struct guc_policies policies; + struct guc_gt_system_info system_info; + struct guc_clients_info clients_info; + struct guc_ct_pool_entry ct_pool[GUC_CT_POOL_SIZE]; +} __packed; + +static u32 guc_ads_private_data_size(struct intel_guc *guc) +{ + return PAGE_ALIGN(guc->fw.private_data_size); +} + +static u32 guc_ads_private_data_offset(struct intel_guc *guc) +{ + return PAGE_ALIGN(sizeof(struct __guc_ads_blob)); +} + +static u32 guc_ads_blob_size(struct intel_guc *guc) +{ + return guc_ads_private_data_offset(guc) + + guc_ads_private_data_size(guc); +} static void guc_policy_init(struct guc_policy *policy) { @@ -48,26 +89,37 @@ static void guc_ct_pool_entries_init(struct guc_ct_pool_entry *pool, u32 num) memset(pool, 0, num * sizeof(*pool)); } +static void guc_mapping_table_init(struct intel_gt *gt, + struct guc_gt_system_info *system_info) +{ + unsigned int i, j; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* Table must be set to invalid values for entries not used */ + for (i = 0; i < GUC_MAX_ENGINE_CLASSES; ++i) + for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; ++j) + system_info->mapping_table[i][j] = + GUC_MAX_INSTANCES_PER_CLASS; + + for_each_engine(engine, gt, id) { + u8 guc_class = engine->class; + + system_info->mapping_table[guc_class][engine->instance] = + engine->instance; + } +} + /* * The first 80 dwords of the register state context, containing the * execlists and ppgtt registers. */ #define LR_HW_CONTEXT_SIZE (80 * sizeof(u32)) -/* The ads obj includes the struct itself and buffers passed to GuC */ -struct __guc_ads_blob { - struct guc_ads ads; - struct guc_policies policies; - struct guc_mmio_reg_state reg_state; - struct guc_gt_system_info system_info; - struct guc_clients_info clients_info; - struct guc_ct_pool_entry ct_pool[GUC_CT_POOL_SIZE]; - u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE]; -} __packed; - static void __guc_ads_init(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); + struct drm_i915_private *i915 = gt->i915; struct __guc_ads_blob *blob = guc->ads_blob; const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE; u32 base; @@ -99,13 +151,25 @@ static void __guc_ads_init(struct intel_guc *guc) } /* System info */ - blob->system_info.slice_enabled = hweight8(gt->info.sseu.slice_mask); - blob->system_info.rcs_enabled = 1; - blob->system_info.bcs_enabled = 1; + blob->system_info.engine_enabled_masks[RENDER_CLASS] = 1; + blob->system_info.engine_enabled_masks[COPY_ENGINE_CLASS] = 1; + blob->system_info.engine_enabled_masks[VIDEO_DECODE_CLASS] = VDBOX_MASK(gt); + blob->system_info.engine_enabled_masks[VIDEO_ENHANCEMENT_CLASS] = VEBOX_MASK(gt); + + blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED] = + hweight8(gt->info.sseu.slice_mask); + blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK] = + gt->info.vdbox_sfc_access; + + if (INTEL_GEN(i915) >= 12 && !IS_DGFX(i915)) { + u32 distdbreg = intel_uncore_read(gt->uncore, + GEN12_DIST_DBS_POPULATED); + blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI] = + ((distdbreg >> GEN12_DOORBELLS_PER_SQIDI_SHIFT) & + GEN12_DOORBELLS_PER_SQIDI) + 1; + } - blob->system_info.vdbox_enable_mask = VDBOX_MASK(gt); - blob->system_info.vebox_enable_mask = VEBOX_MASK(gt); - blob->system_info.vdbox_sfc_support_mask = gt->info.vdbox_sfc_access; + guc_mapping_table_init(guc_to_gt(guc), &blob->system_info); base = intel_guc_ggtt_offset(guc, guc->ads_vma); @@ -118,11 +182,12 @@ static void __guc_ads_init(struct intel_guc *guc) /* ADS */ blob->ads.scheduler_policies = base + ptr_offset(blob, policies); - blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer); - blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state); blob->ads.gt_system_info = base + ptr_offset(blob, system_info); blob->ads.clients_info = base + ptr_offset(blob, clients_info); + /* Private Data */ + blob->ads.private_data = base + guc_ads_private_data_offset(guc); + i915_gem_object_flush_map(guc->ads_vma->obj); } @@ -135,14 +200,15 @@ static void __guc_ads_init(struct intel_guc *guc) */ int intel_guc_ads_create(struct intel_guc *guc) { - const u32 size = PAGE_ALIGN(sizeof(struct __guc_ads_blob)); + u32 size; int ret; GEM_BUG_ON(guc->ads_vma); + size = guc_ads_blob_size(guc); + ret = intel_guc_allocate_and_map_vma(guc, size, &guc->ads_vma, (void **)&guc->ads_blob); - if (ret) return ret; @@ -154,6 +220,19 @@ int intel_guc_ads_create(struct intel_guc *guc) void intel_guc_ads_destroy(struct intel_guc *guc) { i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP); + guc->ads_blob = NULL; +} + +static void guc_ads_private_data_reset(struct intel_guc *guc) +{ + u32 size; + + size = guc_ads_private_data_size(guc); + if (!size) + return; + + memset((void *)guc->ads_blob + guc_ads_private_data_offset(guc), 0, + size); } /** @@ -168,5 +247,8 @@ void intel_guc_ads_reset(struct intel_guc *guc) { if (!guc->ads_vma) return; + __guc_ads_init(guc); + + guc_ads_private_data_reset(guc); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index 11742fca0e9e..fa9e048cc65f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -210,6 +210,7 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct) GEM_BUG_ON(ct->enabled); i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP); + memset(ct, 0, sizeof(*ct)); } /** diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index d4a87f4c9421..f9d0907ea1a5 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -76,6 +76,7 @@ static inline bool guc_ready(struct intel_uncore *uncore, u32 *status) static int guc_wait_ucode(struct intel_uncore *uncore) { + struct drm_device *drm = &uncore->i915->drm; u32 status; int ret; @@ -90,15 +91,27 @@ static int guc_wait_ucode(struct intel_uncore *uncore) ret = wait_for(guc_ready(uncore, &status), 100); DRM_DEBUG_DRIVER("GuC status %#x\n", status); - if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) { - DRM_ERROR("GuC firmware signature verification failed\n"); - ret = -ENOEXEC; - } - - if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) { - DRM_ERROR("GuC firmware exception. EIP: %#x\n", - intel_uncore_read(uncore, SOFT_SCRATCH(13))); - ret = -ENXIO; + if (ret) { + drm_err(drm, "GuC load failed: status = 0x%08X\n", status); + drm_err(drm, "GuC load failed: status: Reset = %d, " + "BootROM = 0x%02X, UKernel = 0x%02X, " + "MIA = 0x%02X, Auth = 0x%02X\n", + REG_FIELD_GET(GS_MIA_IN_RESET, status), + REG_FIELD_GET(GS_BOOTROM_MASK, status), + REG_FIELD_GET(GS_UKERNEL_MASK, status), + REG_FIELD_GET(GS_MIA_MASK, status), + REG_FIELD_GET(GS_AUTH_STATUS_MASK, status)); + + if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) { + drm_err(drm, "GuC firmware signature verification failed\n"); + ret = -ENOEXEC; + } + + if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) { + drm_err(drm, "GuC firmware exception. EIP: %#x\n", + intel_uncore_read(uncore, SOFT_SCRATCH(13))); + ret = -ENXIO; + } } return ret; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index a6b733c146c9..79c560d9c0b6 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -26,8 +26,8 @@ #define GUC_VIDEO_ENGINE2 4 #define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1) -#define GUC_MAX_ENGINE_CLASSES 5 -#define GUC_MAX_INSTANCES_PER_CLASS 16 +#define GUC_MAX_ENGINE_CLASSES 16 +#define GUC_MAX_INSTANCES_PER_CLASS 32 #define GUC_DOORBELL_INVALID 256 @@ -62,12 +62,7 @@ #define GUC_STAGE_DESC_ATTR_PCH BIT(6) #define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7) -/* New GuC control data */ -#define GUC_CTL_CTXINFO 0 -#define GUC_CTL_CTXNUM_IN16_SHIFT 0 -#define GUC_CTL_BASE_ADDR_SHIFT 12 - -#define GUC_CTL_LOG_PARAMS 1 +#define GUC_CTL_LOG_PARAMS 0 #define GUC_LOG_VALID (1 << 0) #define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1) #define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3) @@ -79,11 +74,11 @@ #define GUC_LOG_ISR_MASK (0x7 << GUC_LOG_ISR_SHIFT) #define GUC_LOG_BUF_ADDR_SHIFT 12 -#define GUC_CTL_WA 2 -#define GUC_CTL_FEATURE 3 +#define GUC_CTL_WA 1 +#define GUC_CTL_FEATURE 2 #define GUC_CTL_DISABLE_SCHEDULER (1 << 14) -#define GUC_CTL_DEBUG 4 +#define GUC_CTL_DEBUG 3 #define GUC_LOG_VERBOSITY_SHIFT 0 #define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT) #define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT) @@ -97,12 +92,37 @@ #define GUC_LOG_DISABLED (1 << 6) #define GUC_PROFILE_ENABLED (1 << 7) -#define GUC_CTL_ADS 5 +#define GUC_CTL_ADS 4 #define GUC_ADS_ADDR_SHIFT 1 #define GUC_ADS_ADDR_MASK (0xFFFFF << GUC_ADS_ADDR_SHIFT) #define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */ +/* Generic GT SysInfo data types */ +#define GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED 0 +#define GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK 1 +#define GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI 2 +#define GUC_GENERIC_GT_SYSINFO_MAX 16 + +/* + * The class goes in bits [0..2] of the GuC ID, the instance in bits [3..6]. + * Bit 7 can be used for operations that apply to all engine classes&instances. + */ +#define GUC_ENGINE_CLASS_SHIFT 0 +#define GUC_ENGINE_CLASS_MASK (0x7 << GUC_ENGINE_CLASS_SHIFT) +#define GUC_ENGINE_INSTANCE_SHIFT 3 +#define GUC_ENGINE_INSTANCE_MASK (0xf << GUC_ENGINE_INSTANCE_SHIFT) +#define GUC_ENGINE_ALL_INSTANCES BIT(7) + +#define MAKE_GUC_ID(class, instance) \ + (((class) << GUC_ENGINE_CLASS_SHIFT) | \ + ((instance) << GUC_ENGINE_INSTANCE_SHIFT)) + +#define GUC_ID_TO_ENGINE_CLASS(guc_id) \ + (((guc_id) & GUC_ENGINE_CLASS_MASK) >> GUC_ENGINE_CLASS_SHIFT) +#define GUC_ID_TO_ENGINE_INSTANCE(guc_id) \ + (((guc_id) & GUC_ENGINE_INSTANCE_MASK) >> GUC_ENGINE_INSTANCE_SHIFT) + /* Work item for submitting workloads into work queue of GuC. */ struct guc_wq_item { u32 header; @@ -336,11 +356,6 @@ struct guc_policies { } __packed; /* GuC MMIO reg state struct */ - - -#define GUC_REGSET_MAX_REGISTERS 64 -#define GUC_S3_SAVE_SPACE_PAGES 10 - struct guc_mmio_reg { u32 offset; u32 value; @@ -348,28 +363,18 @@ struct guc_mmio_reg { #define GUC_REGSET_MASKED (1 << 0) } __packed; -struct guc_mmio_regset { - struct guc_mmio_reg registers[GUC_REGSET_MAX_REGISTERS]; - u32 values_valid; - u32 number_of_registers; -} __packed; - /* GuC register sets */ -struct guc_mmio_reg_state { - struct guc_mmio_regset engine_reg[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS]; - u32 reserved[98]; +struct guc_mmio_reg_set { + u32 address; + u16 count; + u16 reserved; } __packed; /* HW info */ struct guc_gt_system_info { - u32 slice_enabled; - u32 rcs_enabled; - u32 reserved0; - u32 bcs_enabled; - u32 vdbox_enable_mask; - u32 vdbox_sfc_support_mask; - u32 vebox_enable_mask; - u32 reserved[9]; + u8 mapping_table[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS]; + u32 engine_enabled_masks[GUC_MAX_ENGINE_CLASSES]; + u32 generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_MAX]; } __packed; /* Clients info */ @@ -390,15 +395,16 @@ struct guc_clients_info { /* GuC Additional Data Struct */ struct guc_ads { - u32 reg_state_addr; - u32 reg_state_buffer; + struct guc_mmio_reg_set reg_state_list[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS]; + u32 reserved0; u32 scheduler_policies; u32 gt_system_info; u32 clients_info; u32 control_data; u32 golden_context_lrca[GUC_MAX_ENGINE_CLASSES]; u32 eng_state_size[GUC_MAX_ENGINE_CLASSES]; - u32 reserved[16]; + u32 private_data; + u32 reserved[15]; } __packed; /* GuC logging structures */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h index 1949346e714e..b37fc2ffaef2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h @@ -118,6 +118,11 @@ struct guc_doorbell_info { #define GEN8_DRB_VALID (1<<0) #define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4) +#define GEN12_DIST_DBS_POPULATED _MMIO(0xd08) +#define GEN12_DOORBELLS_PER_SQIDI_SHIFT 16 +#define GEN12_DOORBELLS_PER_SQIDI (0xff) +#define GEN12_SQIDIS_DOORBELL_EXIST (0xffff) + #define DE_GUCRMR _MMIO(0x44054) #define GUC_BCS_RCS_IER _MMIO(0xC550) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index d6f55f70889d..4e6070e95fe9 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -231,13 +231,15 @@ static int guc_enable_communication(struct intel_guc *guc) intel_guc_ct_event_handler(&guc->ct); spin_unlock_irq(&i915->irq_lock); - DRM_INFO("GuC communication enabled\n"); + drm_dbg(&i915->drm, "GuC communication enabled\n"); return 0; } static void guc_disable_communication(struct intel_guc *guc) { + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + /* * Events generated during or after CT disable are logged by guc in * via mmio. Make sure the register is clear before disabling CT since @@ -257,7 +259,7 @@ static void guc_disable_communication(struct intel_guc *guc) */ guc_get_mmio_msg(guc); - DRM_INFO("GuC communication disabled\n"); + drm_dbg(&i915->drm, "GuC communication disabled\n"); } static void __uc_fetch_firmwares(struct intel_uc *uc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 037bcaf3c8b5..180c23e2e25e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -44,24 +44,20 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, * List of required GuC and HuC binaries per-platform. * Must be ordered based on platform + revid, from newer to older. * - * TGL 35.2 is interface-compatible with 33.0 for previous Gens. The deltas - * between 33.0 and 35.2 are only related to new additions to support new Gen12 - * features. - * * Note that RKL uses the same firmware as TGL. */ #define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \ - fw_def(ROCKETLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 5, 0)) \ - fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 5, 0)) \ - fw_def(JASPERLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \ - fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \ - fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \ - fw_def(COMETLAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \ - fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \ - fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 4, 0, 0)) \ - fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \ - fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 2, 0, 0)) \ - fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 2, 0, 0)) + fw_def(ROCKETLAKE, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl, 7, 5, 0)) \ + fw_def(TIGERLAKE, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl, 7, 5, 0)) \ + fw_def(JASPERLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl, 9, 0, 0)) \ + fw_def(ELKHARTLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl, 9, 0, 0)) \ + fw_def(ICELAKE, 0, guc_def(icl, 49, 0, 1), huc_def(icl, 9, 0, 0)) \ + fw_def(COMETLAKE, 5, guc_def(cml, 49, 0, 1), huc_def(cml, 4, 0, 0)) \ + fw_def(COFFEELAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \ + fw_def(GEMINILAKE, 0, guc_def(glk, 49, 0, 1), huc_def(glk, 4, 0, 0)) \ + fw_def(KABYLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \ + fw_def(BROXTON, 0, guc_def(bxt, 49, 0, 1), huc_def(bxt, 2, 0, 0)) \ + fw_def(SKYLAKE, 0, guc_def(skl, 49, 0, 1), huc_def(skl, 2, 0, 0)) #define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \ "i915/" \ @@ -372,6 +368,9 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) } } + if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) + uc_fw->private_data_size = css->private_data_size; + obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size); if (IS_ERR(obj)) { err = PTR_ERR(obj); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index 23d3a423ac0f..99bb1fe1af66 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -88,6 +88,8 @@ struct intel_uc_fw { u32 rsa_size; u32 ucode_size; + + u32 private_data_size; }; #ifdef CONFIG_DRM_I915_DEBUG_GUC diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h index 029214cdedd5..e41ffc7a7fbc 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h @@ -69,7 +69,11 @@ struct uc_css_header { #define CSS_SW_VERSION_UC_MAJOR (0xFF << 16) #define CSS_SW_VERSION_UC_MINOR (0xFF << 8) #define CSS_SW_VERSION_UC_PATCH (0xFF << 0) - u32 reserved[14]; + u32 reserved0[13]; + union { + u32 private_data_size; /* only applies to GuC */ + u32 reserved1; + }; u32 header_info; } __packed; static_assert(sizeof(struct uc_css_header) == 128); diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 5b5c71a0b4af..5c1fcac260d3 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -173,22 +173,162 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) int pipe; if (IS_BROXTON(dev_priv)) { + enum transcoder trans; + enum port port; + + /* Clear PIPE, DDI, PHY, HPD before setting new */ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(GEN8_DE_PORT_HOTPLUG(HPD_PORT_A) | GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) | GEN8_DE_PORT_HOTPLUG(HPD_PORT_C)); + for_each_pipe(dev_priv, pipe) { + vgpu_vreg_t(vgpu, PIPECONF(pipe)) &= + ~(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE); + vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE; + vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE; + } + + for (trans = TRANSCODER_A; trans <= TRANSCODER_EDP; trans++) { + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(trans)) &= + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | + TRANS_DDI_PORT_MASK | TRANS_DDI_FUNC_ENABLE); + } + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | + TRANS_DDI_PORT_MASK); + + for (port = PORT_A; port <= PORT_C; port++) { + vgpu_vreg_t(vgpu, BXT_PHY_CTL(port)) &= + ~BXT_PHY_LANE_ENABLED; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(port)) |= + (BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK); + + vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port)) &= + ~(PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | + PORT_PLL_REF_SEL | PORT_PLL_LOCK | + PORT_PLL_ENABLE); + + vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) &= + ~(DDI_INIT_DISPLAY_DETECTED | + DDI_BUF_CTL_ENABLE); + vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE; + } + + vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1)); + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= + ~PHY_POWER_GOOD; + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &= + ~PHY_POWER_GOOD; + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &= ~BIT(30); + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &= ~BIT(30); + + vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIB_DETECTED; + vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIC_DETECTED; + + /* + * Only 1 PIPE enabled in current vGPU display and PIPE_A is + * tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A, + * TRANSCODER_A can be enabled. PORT_x depends on the input of + * setup_virtual_dp_monitor. + */ + vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; + vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= I965_PIPECONF_ACTIVE; + + /* + * Golden M/N are calculated based on: + * 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID), + * DP link clk 1620 MHz and non-constant_n. + * TODO: calculate DP link symbol clk and stream clk m/n. + */ + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT; + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e; + vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000; + vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e; + vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000; + + /* Enable per-DDI/PORT vreg */ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { + vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(1); + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |= + PHY_POWER_GOOD; + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) |= + BIT(30); + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |= + BXT_PHY_LANE_ENABLED; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &= + ~(BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK); + vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_A)) |= + (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | + PORT_PLL_REF_SEL | PORT_PLL_LOCK | + PORT_PLL_ENABLE); + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) |= + (DDI_BUF_CTL_ENABLE | DDI_INIT_DISPLAY_DETECTED); + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) &= + ~DDI_BUF_IS_IDLE; + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)) |= + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + TRANS_DDI_FUNC_ENABLE); vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; + vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(0); + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |= + PHY_POWER_GOOD; + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) |= + BIT(30); + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |= + BXT_PHY_LANE_ENABLED; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &= + ~(BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK); + vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_B)) |= + (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | + PORT_PLL_REF_SEL | PORT_PLL_LOCK | + PORT_PLL_ENABLE); + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |= + DDI_BUF_CTL_ENABLE; + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &= + ~DDI_BUF_IS_IDLE; + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (PORT_B << TRANS_DDI_PORT_SHIFT) | + TRANS_DDI_FUNC_ENABLE); vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= GEN8_DE_PORT_HOTPLUG(HPD_PORT_B); } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; + vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(0); + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |= + PHY_POWER_GOOD; + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) |= + BIT(30); + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |= + BXT_PHY_LANE_ENABLED; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &= + ~(BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK); + vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_C)) |= + (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE | + PORT_PLL_REF_SEL | PORT_PLL_LOCK | + PORT_PLL_ENABLE); + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |= + DDI_BUF_CTL_ENABLE; + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &= + ~DDI_BUF_IS_IDLE; + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (PORT_B << TRANS_DDI_PORT_SHIFT) | + TRANS_DDI_FUNC_ENABLE); vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= GEN8_DE_PORT_HOTPLUG(HPD_PORT_C); } @@ -520,6 +660,45 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected) vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= PORTD_HOTPLUG_STATUS_MASK; intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG); + } else if (IS_BROXTON(i915)) { + if (connected) { + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= + GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); + } + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= + SFUSE_STRAP_DDIB_DETECTED; + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= + GEN8_DE_PORT_HOTPLUG(HPD_PORT_B); + } + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= + SFUSE_STRAP_DDIC_DETECTED; + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= + GEN8_DE_PORT_HOTPLUG(HPD_PORT_C); + } + } else { + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= + ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); + } + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { + vgpu_vreg_t(vgpu, SFUSE_STRAP) &= + ~SFUSE_STRAP_DDIB_DETECTED; + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= + ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B); + } + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { + vgpu_vreg_t(vgpu, SFUSE_STRAP) &= + ~SFUSE_STRAP_DDIC_DETECTED; + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= + ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_C); + } + } + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= + PORTB_HOTPLUG_STATUS_MASK; + intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG); } } diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index a3a4305eda01..897c007ea96a 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -636,9 +636,18 @@ static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + unsigned long offset = index; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); + if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) { + offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT); + mm->ggtt_mm.host_ggtt_aperture[offset] = entry->val64; + } else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) { + offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT); + mm->ggtt_mm.host_ggtt_hidden[offset] = entry->val64; + } + pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu); } @@ -1944,6 +1953,21 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) return ERR_PTR(-ENOMEM); } + mm->ggtt_mm.host_ggtt_aperture = vzalloc((vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64)); + if (!mm->ggtt_mm.host_ggtt_aperture) { + vfree(mm->ggtt_mm.virtual_ggtt); + vgpu_free_mm(mm); + return ERR_PTR(-ENOMEM); + } + + mm->ggtt_mm.host_ggtt_hidden = vzalloc((vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64)); + if (!mm->ggtt_mm.host_ggtt_hidden) { + vfree(mm->ggtt_mm.host_ggtt_aperture); + vfree(mm->ggtt_mm.virtual_ggtt); + vgpu_free_mm(mm); + return ERR_PTR(-ENOMEM); + } + return mm; } @@ -1971,6 +1995,8 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) invalidate_ppgtt_mm(mm); } else { vfree(mm->ggtt_mm.virtual_ggtt); + vfree(mm->ggtt_mm.host_ggtt_aperture); + vfree(mm->ggtt_mm.host_ggtt_hidden); } vgpu_free_mm(mm); @@ -2852,3 +2878,41 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) intel_vgpu_destroy_all_ppgtt_mm(vgpu); intel_vgpu_reset_ggtt(vgpu, true); } + +/** + * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries + * @gvt: intel gvt device + * + * This function is called at driver resume stage to restore + * GGTT entries of every vGPU. + * + */ +void intel_gvt_restore_ggtt(struct intel_gvt *gvt) +{ + struct intel_vgpu *vgpu; + struct intel_vgpu_mm *mm; + int id; + gen8_pte_t pte; + u32 idx, num_low, num_hi, offset; + + /* Restore dirty host ggtt for all vGPUs */ + idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { + mm = vgpu->gtt.ggtt_mm; + + num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; + offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; + for (idx = 0; idx < num_low; idx++) { + pte = mm->ggtt_mm.host_ggtt_aperture[idx]; + if (pte & _PAGE_PRESENT) + write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); + } + + num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; + offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; + for (idx = 0; idx < num_hi; idx++) { + pte = mm->ggtt_mm.host_ggtt_hidden[idx]; + if (pte & _PAGE_PRESENT) + write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); + } + } +} diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 52d0d88abd86..b0e173f2d990 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -164,6 +164,9 @@ struct intel_vgpu_mm { } ppgtt_mm; struct { void *virtual_ggtt; + /* Save/restore for PM */ + u64 *host_ggtt_aperture; + u64 *host_ggtt_hidden; struct list_head partial_pte_list; } ggtt_mm; }; @@ -280,5 +283,6 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes); void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu); +void intel_gvt_restore_ggtt(struct intel_gvt *gvt); #endif /* _GVT_GTT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index c7c561237883..d1d8ee4a5f16 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -312,7 +312,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915) gvt_dbg_core("init gvt device\n"); - idr_init(&gvt->vgpu_idr); + idr_init_base(&gvt->vgpu_idr, 1); spin_lock_init(&gvt->scheduler.mmio_context_lock); mutex_init(&gvt->lock); mutex_init(&gvt->sched_lock); @@ -406,7 +406,16 @@ out_clean_idr: } int -intel_gvt_register_hypervisor(struct intel_gvt_mpt *m) +intel_gvt_pm_resume(struct intel_gvt *gvt) +{ + intel_gvt_restore_fence(gvt); + intel_gvt_restore_mmio(gvt); + intel_gvt_restore_ggtt(gvt); + return 0; +} + +int +intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m) { int ret; void *gvt; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 9831361f181e..cf3578e3f4dd 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -56,7 +56,7 @@ struct intel_gvt_host { struct device *dev; bool initialized; int hypervisor_type; - struct intel_gvt_mpt *mpt; + const struct intel_gvt_mpt *mpt; }; extern struct intel_gvt_host intel_gvt_host; @@ -255,7 +255,9 @@ struct intel_gvt_mmio { #define F_CMD_ACCESS (1 << 3) /* This reg has been accessed by a VM */ #define F_ACCESSED (1 << 4) -/* This reg has been accessed through GPU commands */ +/* This reg requires save & restore during host PM suspend/resume */ +#define F_PM_SAVE (1 << 5) +/* This reg could be accessed by unaligned address */ #define F_UNALIGN (1 << 6) /* This reg is in GVT's mmio save-restor list and in hardware * logical context image @@ -685,6 +687,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); void intel_gvt_debugfs_init(struct intel_gvt *gvt); void intel_gvt_debugfs_clean(struct intel_gvt *gvt); +int intel_gvt_pm_resume(struct intel_gvt *gvt); #include "trace.h" #include "mpt.h" diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index ce93079cf933..aa7e75cb3e6a 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -3120,9 +3120,10 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS, - NULL, gen9_trtte_write); - MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write); + MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE, + NULL, gen9_trtte_write); + MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE, + NULL, gen9_trtt_chicken_write); MMIO_D(_MMIO(0x46430), D_SKL_PLUS); @@ -3671,3 +3672,40 @@ default_rw: intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) : intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes); } + +void intel_gvt_restore_fence(struct intel_gvt *gvt) +{ + struct intel_vgpu *vgpu; + int i, id; + + idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { + mmio_hw_access_pre(gvt->gt); + for (i = 0; i < vgpu_fence_sz(vgpu); i++) + intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i))); + mmio_hw_access_post(gvt->gt); + } +} + +static inline int mmio_pm_restore_handler(struct intel_gvt *gvt, + u32 offset, void *data) +{ + struct intel_vgpu *vgpu = data; + struct drm_i915_private *dev_priv = gvt->gt->i915; + + if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE) + I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); + + return 0; +} + +void intel_gvt_restore_mmio(struct intel_gvt *gvt) +{ + struct intel_vgpu *vgpu; + int id; + + idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { + mmio_hw_access_pre(gvt->gt); + intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu); + mmio_hw_access_post(gvt->gt); + } +} diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index ad8a9df49f29..d830b6c65284 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -2099,7 +2099,7 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) return ret; } -static struct intel_gvt_mpt kvmgt_mpt = { +static const struct intel_gvt_mpt kvmgt_mpt = { .type = INTEL_GVT_HYPERVISOR_KVM, .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index b6811f6a230d..24210b1eaec5 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -280,6 +280,11 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |= BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK; + vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |= + SKL_FUSE_DOWNLOAD_STATUS | + SKL_FUSE_PG_DIST_STATUS(SKL_PG0) | + SKL_FUSE_PG_DIST_STATUS(SKL_PG1) | + SKL_FUSE_PG_DIST_STATUS(SKL_PG2); } } else { #define GVT_GEN8_MMIO_RESET_OFFSET (0x44200) diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index cc4812648bf4..9e862dc73579 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -104,4 +104,8 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); + +void intel_gvt_restore_fence(struct intel_gvt *gvt); +void intel_gvt_restore_mmio(struct intel_gvt *gvt); + #endif diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 9ad224df9c68..6f92cde71971 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -392,7 +392,7 @@ static inline bool intel_gvt_hypervisor_is_valid_gfn( return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn); } -int intel_gvt_register_hypervisor(struct intel_gvt_mpt *); +int intel_gvt_register_hypervisor(const struct intel_gvt_mpt *); void intel_gvt_unregister_hypervisor(void); #endif /* _GVT_MPT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index f6d7e33c7099..1c8e63f84134 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -393,7 +393,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, mutex_init(&vgpu->dmabuf_lock); INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head); INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL); - idr_init(&vgpu->object_idr); + idr_init_base(&vgpu->object_idr, 1); intel_vgpu_init_cfg_space(vgpu, param->primary); vgpu->d3_entered = false; diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index e88970256e8e..93265951fdbb 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -1452,43 +1452,42 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, * space. Parsing should be faster in some cases this way. */ batch_end = cmd + batch_length / sizeof(*batch_end); - do { - u32 length; - - if (*cmd == MI_BATCH_BUFFER_END) - break; - - desc = find_cmd(engine, *cmd, desc, &default_desc); - if (!desc) { - DRM_DEBUG("CMD: Unrecognized command: 0x%08X\n", *cmd); - ret = -EINVAL; - break; - } + while (*cmd != MI_BATCH_BUFFER_END) { + u32 length = 1; + + if (*cmd != MI_NOOP) { /* MI_NOOP == 0 */ + desc = find_cmd(engine, *cmd, desc, &default_desc); + if (!desc) { + DRM_DEBUG("CMD: Unrecognized command: 0x%08X\n", *cmd); + ret = -EINVAL; + break; + } - if (desc->flags & CMD_DESC_FIXED) - length = desc->length.fixed; - else - length = (*cmd & desc->length.mask) + LENGTH_BIAS; + if (desc->flags & CMD_DESC_FIXED) + length = desc->length.fixed; + else + length = (*cmd & desc->length.mask) + LENGTH_BIAS; - if ((batch_end - cmd) < length) { - DRM_DEBUG("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n", - *cmd, - length, - batch_end - cmd); - ret = -EINVAL; - break; - } + if ((batch_end - cmd) < length) { + DRM_DEBUG("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n", + *cmd, + length, + batch_end - cmd); + ret = -EINVAL; + break; + } - if (!check_cmd(engine, desc, cmd, length)) { - ret = -EACCES; - break; - } + if (!check_cmd(engine, desc, cmd, length)) { + ret = -EACCES; + break; + } - if (cmd_desc_is(desc, MI_BATCH_BUFFER_START)) { - ret = check_bbstart(cmd, offset, length, batch_length, - batch_addr, shadow_addr, - jump_whitelist); - break; + if (cmd_desc_is(desc, MI_BATCH_BUFFER_START)) { + ret = check_bbstart(cmd, offset, length, batch_length, + batch_addr, shadow_addr, + jump_whitelist); + break; + } } if (!IS_ERR_OR_NULL(jump_whitelist)) @@ -1501,7 +1500,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, ret = -EINVAL; break; } - } while (1); + } if (trampoline) { /* diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 200f6b86f864..77e76b665098 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -725,7 +725,7 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file) gpu = NULL; with_intel_runtime_pm(&i915->runtime_pm, wakeref) - gpu = i915_gpu_coredump(i915); + gpu = i915_gpu_coredump(&i915->gt, ALL_ENGINES); if (IS_ERR(gpu)) return PTR_ERR(gpu); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index d6e25212d5c0..320856b665a1 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -87,7 +87,7 @@ #include "intel_sideband.h" #include "vlv_suspend.h" -static struct drm_driver driver; +static const struct drm_driver driver; static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) { @@ -1271,6 +1271,8 @@ static int i915_drm_resume(struct drm_device *dev) intel_power_domains_enable(dev_priv); + intel_gvt_resume(dev_priv); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); return 0; @@ -1759,7 +1761,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW), }; -static struct drm_driver driver = { +static const struct drm_driver driver = { /* Don't use MTRRs here; the Xserver or userspace app should * deal with them for Intel hardware. */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d7765b31fbef..15be8debae54 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -508,7 +508,6 @@ struct i915_psr { bool dc3co_enabled; u32 dc3co_exit_delay; struct delayed_work dc3co_work; - bool force_mode_changed; struct drm_dp_vsc_sdp vsc; }; @@ -1560,6 +1559,7 @@ extern const struct i915_rev_steppings kbl_revids[]; (IS_ICELAKE(p) && IS_REVID(p, since, until)) #define EHL_REVID_A0 0x0 +#define EHL_REVID_B0 0x1 #define IS_JSL_EHL_REVID(p, since, until) \ (IS_JSL_EHL(p) && IS_REVID(p, since, until)) @@ -1641,8 +1641,7 @@ tgl_revids_get(struct drm_i915_private *dev_priv) #define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop) #define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb) #define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6) -#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \ - IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv)) +#define HAS_WT(dev_priv) HAS_EDRAM(dev_priv) #define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index bb0c12975f38..58276694c848 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -180,30 +180,6 @@ try_again: } static int -i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, - struct drm_i915_gem_pwrite *args, - struct drm_file *file) -{ - void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; - char __user *user_data = u64_to_user_ptr(args->data_ptr); - - /* - * We manually control the domain here and pretend that it - * remains coherent i.e. in the GTT domain, like shmem_pwrite. - */ - i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); - - if (copy_from_user(vaddr, user_data, args->size)) - return -EFAULT; - - drm_clflush_virt_range(vaddr, args->size); - intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); - - i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); - return 0; -} - -static int i915_gem_create(struct drm_file *file, struct intel_memory_region *mr, u64 *size_p, @@ -527,6 +503,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, trace_i915_gem_object_pread(obj, args->offset, args->size); + ret = -ENODEV; + if (obj->ops->pread) + ret = obj->ops->pread(obj, args); + if (ret != -ENODEV) + goto out; + ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); @@ -866,8 +848,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (ret == -EFAULT || ret == -ENOSPC) { if (i915_gem_object_has_struct_page(obj)) ret = i915_gem_shmem_pwrite(obj, args); - else - ret = i915_gem_phys_pwrite(obj, args, file); } i915_gem_object_unpin_pages(obj); diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 6501939929d5..e1a66c8245b8 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -238,7 +238,7 @@ found: } /** - * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one + * i915_gem_evict_for_node - Evict vmas to make room for binding a new one * @vm: address space to evict from * @target: range (and color) to evict for * @flags: additional flags to control the eviction algorithm diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index cf6e47adfde6..d8cac4c5881f 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -570,6 +570,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, ee->vm_info.pp_dir_base); } } + err_printf(m, " hung: %u\n", ee->hung); err_printf(m, " engine reset count: %u\n", ee->reset_count); for (n = 0; n < ee->num_ports; n++) { @@ -1026,6 +1027,7 @@ i915_vma_coredump_create(const struct intel_gt *gt, dma_addr_t dma; for_each_sgt_daddr(dma, iter, vma->pages) { + mutex_lock(&ggtt->error_mutex); ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); mb(); @@ -1035,6 +1037,10 @@ i915_vma_coredump_create(const struct intel_gt *gt, (void __force *)s, dst, true); io_mapping_unmap(s); + + mb(); + ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); + mutex_unlock(&ggtt->error_mutex); if (ret) break; } @@ -1451,6 +1457,7 @@ capture_engine(struct intel_engine_cs *engine, static void gt_record_engines(struct intel_gt_coredump *gt, + intel_engine_mask_t engine_mask, struct i915_vma_compress *compress) { struct intel_engine_cs *engine; @@ -1466,6 +1473,8 @@ gt_record_engines(struct intel_gt_coredump *gt, if (!ee) continue; + ee->hung = engine->mask & engine_mask; + gt->simulated |= ee->simulated; if (ee->simulated) { kfree(ee); @@ -1505,25 +1514,6 @@ gt_record_uc(struct intel_gt_coredump *gt, return error_uc; } -static void gt_capture_prepare(struct intel_gt_coredump *gt) -{ - struct i915_ggtt *ggtt = gt->_gt->ggtt; - - mutex_lock(&ggtt->error_mutex); -} - -static void gt_capture_finish(struct intel_gt_coredump *gt) -{ - struct i915_ggtt *ggtt = gt->_gt->ggtt; - - if (drm_mm_node_allocated(&ggtt->error_capture)) - ggtt->vm.clear_range(&ggtt->vm, - ggtt->error_capture.start, - PAGE_SIZE); - - mutex_unlock(&ggtt->error_mutex); -} - /* Capture all registers which don't fit into another category. */ static void gt_record_regs(struct intel_gt_coredump *gt) { @@ -1669,24 +1659,25 @@ static u32 generate_ecode(const struct intel_engine_coredump *ee) static const char *error_msg(struct i915_gpu_coredump *error) { struct intel_engine_coredump *first = NULL; + unsigned int hung_classes = 0; struct intel_gt_coredump *gt; - intel_engine_mask_t engines; int len; - engines = 0; for (gt = error->gt; gt; gt = gt->next) { struct intel_engine_coredump *cs; - if (gt->engine && !first) - first = gt->engine; - - for (cs = gt->engine; cs; cs = cs->next) - engines |= cs->engine->mask; + for (cs = gt->engine; cs; cs = cs->next) { + if (cs->hung) { + hung_classes |= BIT(cs->engine->uabi_class); + if (!first) + first = cs; + } + } } len = scnprintf(error->error_msg, sizeof(error->error_msg), "GPU HANG: ecode %d:%x:%08x", - INTEL_GEN(error->i915), engines, + INTEL_GEN(error->i915), hung_classes, generate_ecode(first)); if (first && first->context.pid) { /* Just show the first executing process, more is confusing */ @@ -1782,8 +1773,6 @@ i915_vma_capture_prepare(struct intel_gt_coredump *gt) return NULL; } - gt_capture_prepare(gt); - return compress; } @@ -1793,14 +1782,14 @@ void i915_vma_capture_finish(struct intel_gt_coredump *gt, if (!compress) return; - gt_capture_finish(gt); - compress_fini(compress); kfree(compress); } -struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915) +struct i915_gpu_coredump * +i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask) { + struct drm_i915_private *i915 = gt->i915; struct i915_gpu_coredump *error; /* Check if GPU capture has been disabled */ @@ -1812,7 +1801,7 @@ struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915) if (!error) return ERR_PTR(-ENOMEM); - error->gt = intel_gt_coredump_alloc(&i915->gt, ALLOW_FAIL); + error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL); if (error->gt) { struct i915_vma_compress *compress; @@ -1824,7 +1813,7 @@ struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915) } gt_record_info(error->gt); - gt_record_engines(error->gt, compress); + gt_record_engines(error->gt, engine_mask, compress); if (INTEL_INFO(i915)->has_gt_uc) error->gt->uc = gt_record_uc(error->gt, compress); @@ -1871,20 +1860,23 @@ void i915_error_state_store(struct i915_gpu_coredump *error) /** * i915_capture_error_state - capture an error record for later analysis - * @i915: i915 device + * @gt: intel_gt which originated the hang + * @engine_mask: hung engines + * * * Should be called when an error is detected (either a hang or an error * interrupt) to capture error state from the time of the error. Fills * out a structure which becomes available in debugfs for user level tools * to pick up. */ -void i915_capture_error_state(struct drm_i915_private *i915) +void i915_capture_error_state(struct intel_gt *gt, + intel_engine_mask_t engine_mask) { struct i915_gpu_coredump *error; - error = i915_gpu_coredump(i915); + error = i915_gpu_coredump(gt, engine_mask); if (IS_ERR(error)) { - cmpxchg(&i915->gpu_error.first_error, NULL, error); + cmpxchg(>->i915->gpu_error.first_error, NULL, error); return; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 0220b0992808..16bc42de4b84 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -59,6 +59,7 @@ struct i915_request_coredump { struct intel_engine_coredump { const struct intel_engine_cs *engine; + bool hung; bool simulated; u32 reset_count; @@ -218,8 +219,10 @@ struct drm_i915_error_state_buf { __printf(2, 3) void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); -struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915); -void i915_capture_error_state(struct drm_i915_private *i915); +struct i915_gpu_coredump *i915_gpu_coredump(struct intel_gt *gt, + intel_engine_mask_t engine_mask); +void i915_capture_error_state(struct intel_gt *gt, + intel_engine_mask_t engine_mask); struct i915_gpu_coredump * i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp); @@ -271,7 +274,8 @@ void i915_disable_error_state(struct drm_i915_private *i915, int err); #else -static inline void i915_capture_error_state(struct drm_i915_private *i915) +static inline void +i915_capture_error_state(struct intel_gt *gt, intel_engine_mask_t engine_mask) { } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e0eb32bd9607..dc6febc63f1c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3058,8 +3058,10 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) GEN3_IRQ_RESET(uncore, SDE); - /* Wa_14010685332:icl,jsl,ehl,tgl,rkl */ - if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) { + /* Wa_14010685332:cnp/cmp,tgp,adp */ + if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP || + (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && + INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) { intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); intel_uncore_rmw(uncore, SOUTH_CHICKEN1, @@ -4204,10 +4206,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv) struct drm_device *dev = &dev_priv->drm; int i; - intel_hpd_init_pins(dev_priv); - - intel_hpd_init_work(dev_priv); - INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work); for (i = 0; i < MAX_L3_SLICES; ++i) dev_priv->l3_parity.remap_info[i] = NULL; @@ -4216,6 +4214,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11) dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16; + if (!HAS_DISPLAY(dev_priv)) + return; + + intel_hpd_init_pins(dev_priv); + + intel_hpd_init_work(dev_priv); + dev->vblank_disable_immediate = true; /* Most platforms treat the display irq block as an always-on @@ -4237,21 +4242,18 @@ void intel_irq_init(struct drm_i915_private *dev_priv) */ dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); - if (HAS_GMCH(dev_priv)) { - if (I915_HAS_HOTPLUG(dev_priv)) - dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; - } else { - if (HAS_PCH_DG1(dev_priv)) - dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup; - else if (INTEL_GEN(dev_priv) >= 11) - dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; - else if (IS_GEN9_LP(dev_priv)) - dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; - else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) - dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; - else - dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; - } + if (HAS_PCH_DG1(dev_priv)) + dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup; + else if (INTEL_GEN(dev_priv) >= 11) + dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; + else if (IS_GEN9_LP(dev_priv)) + dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) + dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; + else if (HAS_GMCH(dev_priv) && I915_HAS_HOTPLUG(dev_priv)) + dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; + else + dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; } /** diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 1fe390727d80..11fe790b1969 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -1151,9 +1151,13 @@ static int __init i915_init(void) return 0; } + i915_pmu_init(); + err = pci_register_driver(&i915_pci_driver); - if (err) + if (err) { + i915_pmu_exit(); return err; + } i915_perf_sysctl_register(); return 0; @@ -1167,6 +1171,7 @@ static void __exit i915_exit(void) i915_perf_sysctl_unregister(); pci_unregister_driver(&i915_pci_driver); i915_globals_exit(); + i915_pmu_exit(); } module_init(i915_init); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index e94976976571..3b12c8ff7182 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -625,7 +625,8 @@ static int append_oa_sample(struct i915_perf_stream *stream, } /** - * Copies all buffered OA reports into userspace read() buffer. + * gen8_append_oa_reports - Copies all buffered OA reports into + * userspace read() buffer. * @stream: An i915-perf stream opened for OA metrics * @buf: destination buffer given by userspace * @count: the number of bytes userspace wants to read @@ -909,15 +910,21 @@ static int gen8_oa_read(struct i915_perf_stream *stream, DRM_I915_PERF_RECORD_OA_REPORT_LOST); if (ret) return ret; - intel_uncore_write(uncore, oastatus_reg, - oastatus & ~GEN8_OASTATUS_REPORT_LOST); + + intel_uncore_rmw(uncore, oastatus_reg, + GEN8_OASTATUS_COUNTER_OVERFLOW | + GEN8_OASTATUS_REPORT_LOST, + IS_GEN_RANGE(uncore->i915, 8, 10) ? + (GEN8_OASTATUS_HEAD_POINTER_WRAP | + GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0); } return gen8_append_oa_reports(stream, buf, count, offset); } /** - * Copies all buffered OA reports into userspace read() buffer. + * gen7_append_oa_reports - Copies all buffered OA reports into + * userspace read() buffer. * @stream: An i915-perf stream opened for OA metrics * @buf: destination buffer given by userspace * @count: the number of bytes userspace wants to read @@ -3227,7 +3234,7 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream, } /** - * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs + * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs * @stream: An i915 perf stream * @cmd: the ioctl request * @arg: the ioctl data diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 69c0fa20eba1..cd786ad12be7 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -30,6 +30,7 @@ #define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS) static cpumask_t i915_pmu_cpumask; +static unsigned int i915_pmu_target_cpu = -1; static u8 engine_config_sample(u64 config) { @@ -445,6 +446,8 @@ static void i915_pmu_event_destroy(struct perf_event *event) container_of(event->pmu, typeof(*i915), pmu.base); drm_WARN_ON(&i915->drm, event->parent); + + drm_dev_put(&i915->drm); } static int @@ -510,8 +513,12 @@ static int i915_pmu_event_init(struct perf_event *event) { struct drm_i915_private *i915 = container_of(event->pmu, typeof(*i915), pmu.base); + struct i915_pmu *pmu = &i915->pmu; int ret; + if (pmu->closed) + return -ENODEV; + if (event->attr.type != event->pmu->type) return -ENOENT; @@ -536,8 +543,10 @@ static int i915_pmu_event_init(struct perf_event *event) if (ret) return ret; - if (!event->parent) + if (!event->parent) { + drm_dev_get(&i915->drm); event->destroy = i915_pmu_event_destroy; + } return 0; } @@ -594,9 +603,16 @@ static u64 __i915_pmu_event_read(struct perf_event *event) static void i915_pmu_event_read(struct perf_event *event) { + struct drm_i915_private *i915 = + container_of(event->pmu, typeof(*i915), pmu.base); struct hw_perf_event *hwc = &event->hw; + struct i915_pmu *pmu = &i915->pmu; u64 prev, new; + if (pmu->closed) { + event->hw.state = PERF_HES_STOPPED; + return; + } again: prev = local64_read(&hwc->prev_count); new = __i915_pmu_event_read(event); @@ -724,6 +740,13 @@ static void i915_pmu_disable(struct perf_event *event) static void i915_pmu_event_start(struct perf_event *event, int flags) { + struct drm_i915_private *i915 = + container_of(event->pmu, typeof(*i915), pmu.base); + struct i915_pmu *pmu = &i915->pmu; + + if (pmu->closed) + return; + i915_pmu_enable(event); event->hw.state = 0; } @@ -738,6 +761,13 @@ static void i915_pmu_event_stop(struct perf_event *event, int flags) static int i915_pmu_event_add(struct perf_event *event, int flags) { + struct drm_i915_private *i915 = + container_of(event->pmu, typeof(*i915), pmu.base); + struct i915_pmu *pmu = &i915->pmu; + + if (pmu->closed) + return -ENODEV; + if (flags & PERF_EF_START) i915_pmu_event_start(event, flags); @@ -1020,25 +1050,39 @@ static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) { struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); - unsigned int target; + unsigned int target = i915_pmu_target_cpu; GEM_BUG_ON(!pmu->base.event_init); + /* + * Unregistering an instance generates a CPU offline event which we must + * ignore to avoid incorrectly modifying the shared i915_pmu_cpumask. + */ + if (pmu->closed) + return 0; + if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) { target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); + /* Migrate events if there is a valid target */ if (target < nr_cpu_ids) { cpumask_set_cpu(target, &i915_pmu_cpumask); - perf_pmu_migrate_context(&pmu->base, cpu, target); + i915_pmu_target_cpu = target; } } + if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) { + perf_pmu_migrate_context(&pmu->base, cpu, target); + pmu->cpuhp.cpu = target; + } + return 0; } -static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu) +static enum cpuhp_state cpuhp_slot = CPUHP_INVALID; + +void i915_pmu_init(void) { - enum cpuhp_state slot; int ret; ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, @@ -1046,27 +1090,29 @@ static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu) i915_pmu_cpu_online, i915_pmu_cpu_offline); if (ret < 0) - return ret; + pr_notice("Failed to setup cpuhp state for i915 PMU! (%d)\n", + ret); + else + cpuhp_slot = ret; +} - slot = ret; - ret = cpuhp_state_add_instance(slot, &pmu->cpuhp.node); - if (ret) { - cpuhp_remove_multi_state(slot); - return ret; - } +void i915_pmu_exit(void) +{ + if (cpuhp_slot != CPUHP_INVALID) + cpuhp_remove_multi_state(cpuhp_slot); +} - pmu->cpuhp.slot = slot; - return 0; +static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu) +{ + if (cpuhp_slot == CPUHP_INVALID) + return -EINVAL; + + return cpuhp_state_add_instance(cpuhp_slot, &pmu->cpuhp.node); } static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) { - struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); - - drm_WARN_ON(&i915->drm, pmu->cpuhp.slot == CPUHP_INVALID); - drm_WARN_ON(&i915->drm, cpuhp_state_remove_instance(pmu->cpuhp.slot, &pmu->cpuhp.node)); - cpuhp_remove_multi_state(pmu->cpuhp.slot); - pmu->cpuhp.slot = CPUHP_INVALID; + cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node); } static bool is_igp(struct drm_i915_private *i915) @@ -1100,7 +1146,7 @@ void i915_pmu_register(struct drm_i915_private *i915) spin_lock_init(&pmu->lock); hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); pmu->timer.function = i915_sample; - pmu->cpuhp.slot = CPUHP_INVALID; + pmu->cpuhp.cpu = -1; if (!is_igp(i915)) { pmu->name = kasprintf(GFP_KERNEL, @@ -1167,7 +1213,13 @@ void i915_pmu_unregister(struct drm_i915_private *i915) if (!pmu->base.event_init) return; - drm_WARN_ON(&i915->drm, pmu->enable); + /* + * "Disconnect" the PMU callbacks - since all are atomic synchronize_rcu + * ensures all currently executing ones will have exited before we + * proceed with unregistration. + */ + pmu->closed = true; + synchronize_rcu(); hrtimer_cancel(&pmu->timer); diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index 941f0c14037c..a24885ab415c 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -43,13 +43,17 @@ struct i915_pmu { */ struct { struct hlist_node node; - enum cpuhp_state slot; + unsigned int cpu; } cpuhp; /** * @base: PMU base. */ struct pmu base; /** + * @closed: i915 is unregistering. + */ + bool closed; + /** * @name: Name as registered with perf core. */ const char *name; @@ -122,11 +126,15 @@ struct i915_pmu { }; #ifdef CONFIG_PERF_EVENTS +void i915_pmu_init(void); +void i915_pmu_exit(void); void i915_pmu_register(struct drm_i915_private *i915); void i915_pmu_unregister(struct drm_i915_private *i915); void i915_pmu_gt_parked(struct drm_i915_private *i915); void i915_pmu_gt_unparked(struct drm_i915_private *i915); #else +static inline void i915_pmu_init(void) {} +static inline void i915_pmu_exit(void) {} static inline void i915_pmu_register(struct drm_i915_private *i915) {} static inline void i915_pmu_unregister(struct drm_i915_private *i915) {} static inline void i915_pmu_gt_parked(struct drm_i915_private *i915) {} diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index bb0656875697..5375b219cc3b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -230,12 +230,14 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b) #define _PORT(port, a, b) _PICK_EVEN(port, a, b) #define _PLL(pll, a, b) _PICK_EVEN(pll, a, b) +#define _PHY(phy, a, b) _PICK_EVEN(phy, a, b) #define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b)) #define _MMIO_PLANE(plane, a, b) _MMIO(_PLANE(plane, a, b)) #define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b)) #define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b)) #define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b)) +#define _MMIO_PHY(phy, a, b) _MMIO(_PHY(phy, a, b)) #define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__) @@ -677,6 +679,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN7_OASTATUS2_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */ #define GEN8_OASTATUS _MMIO(0x2b08) +#define GEN8_OASTATUS_TAIL_POINTER_WRAP (1 << 17) +#define GEN8_OASTATUS_HEAD_POINTER_WRAP (1 << 16) #define GEN8_OASTATUS_OVERRUN_STATUS (1 << 3) #define GEN8_OASTATUS_COUNTER_OVERFLOW (1 << 2) #define GEN8_OASTATUS_OABUFFER_OVERFLOW (1 << 1) @@ -8994,10 +8998,6 @@ enum { #define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0) #define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1) -#define POWERGATE_ENABLE _MMIO(0xa210) -#define VDN_HCP_POWERGATE_ENABLE(n) BIT(((n) * 2) + 3) -#define VDN_MFX_POWERGATE_ENABLE(n) BIT(((n) * 2) + 4) - #define GTFIFODBG _MMIO(0x120000) #define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20) #define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13) @@ -9137,9 +9137,11 @@ enum { #define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4) #define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8) #define GEN9_PG_ENABLE _MMIO(0xA210) -#define GEN9_RENDER_PG_ENABLE REG_BIT(0) -#define GEN9_MEDIA_PG_ENABLE REG_BIT(1) -#define GEN11_MEDIA_SAMPLER_PG_ENABLE REG_BIT(2) +#define GEN9_RENDER_PG_ENABLE REG_BIT(0) +#define GEN9_MEDIA_PG_ENABLE REG_BIT(1) +#define GEN11_MEDIA_SAMPLER_PG_ENABLE REG_BIT(2) +#define VDN_HCP_POWERGATE_ENABLE(n) REG_BIT(3 + 2 * (n)) +#define VDN_MFX_POWERGATE_ENABLE(n) REG_BIT(4 + 2 * (n)) #define GEN8_PUSHBUS_CONTROL _MMIO(0xA248) #define GEN8_PUSHBUS_ENABLE _MMIO(0xA250) #define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C) @@ -10300,6 +10302,7 @@ enum skl_power_gate { #define DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port) (3 << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port)) #define DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port) ((pll) << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port)) +/* ICL Clocks */ #define ICL_DPCLKA_CFGCR0 _MMIO(0x164280) #define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24)) #define RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT((phy) + 10) @@ -10315,6 +10318,27 @@ enum skl_power_gate { #define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) \ ((pll) << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) +/* + * DG1 Clocks + * First registers controls the first A and B, while the second register + * controls the phy C and D. The bits on these registers are the + * same, but refer to different phys + */ +#define _DG1_DPCLKA_CFGCR0 0x164280 +#define _DG1_DPCLKA1_CFGCR0 0x16C280 +#define _DG1_DPCLKA_PHY_IDX(phy) ((phy) % 2) +#define _DG1_DPCLKA_PLL_IDX(pll) ((pll) % 2) +#define _DG1_PHY_DPLL_MAP(phy) ((phy) >= PHY_C ? DPLL_ID_DG1_DPLL2 : DPLL_ID_DG1_DPLL0) +#define DG1_DPCLKA_CFGCR0(phy) _MMIO_PHY((phy) / 2, \ + _DG1_DPCLKA_CFGCR0, \ + _DG1_DPCLKA1_CFGCR0) +#define DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT(_DG1_DPCLKA_PHY_IDX(phy) + 10) +#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) (_DG1_DPCLKA_PHY_IDX(phy) * 2) +#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) (_DG1_DPCLKA_PLL_IDX(pll) << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) +#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (0x3 << DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) +#define DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_DPLL_MAP(clk_sel, phy) \ + (((clk_sel) >> DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) + _DG1_PHY_DPLL_MAP(phy)) + /* CNL PLL */ #define DPLL0_ENABLE 0x46010 #define DPLL1_ENABLE 0x46014 diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h index 883dd8d09d6b..9cb26a224034 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.h +++ b/drivers/gpu/drm/i915/i915_scatterlist.h @@ -27,13 +27,17 @@ static __always_inline struct sgt_iter { } __sgt_iter(struct scatterlist *sgl, bool dma) { struct sgt_iter s = { .sgp = sgl }; - if (s.sgp) { + if (dma && s.sgp && sg_dma_len(s.sgp) == 0) { + s.sgp = NULL; + } else if (s.sgp) { s.max = s.curr = s.sgp->offset; - s.max += s.sgp->length; - if (dma) + if (dma) { s.dma = sg_dma_address(s.sgp); - else + s.max += sg_dma_len(s.sgp); + } else { s.pfn = page_to_pfn(sg_page(s.sgp)); + s.max += s.sgp->length; + } } return s; @@ -44,6 +48,11 @@ static inline int __sg_page_count(const struct scatterlist *sg) return sg->length >> PAGE_SHIFT; } +static inline int __sg_dma_page_count(const struct scatterlist *sg) +{ + return sg_dma_len(sg) >> PAGE_SHIFT; +} + static inline struct scatterlist *____sg_next(struct scatterlist *sg) { ++sg; diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 99fe8aef1c67..4e70c1a9ef2e 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -24,6 +24,7 @@ #include "i915_drv.h" #include "i915_vgpu.h" #include "intel_gvt.h" +#include "gvt/gvt.h" /** * DOC: Intel GVT-g host support @@ -147,3 +148,17 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) intel_gvt_clean_device(dev_priv); } + +/** + * intel_gvt_resume - GVT resume routine wapper + * + * @dev_priv: drm i915 private * + * + * This function is called at the i915 driver resume stage to restore required + * HW status for GVT so that vGPU can continue running after resumed. + */ +void intel_gvt_resume(struct drm_i915_private *dev_priv) +{ + if (intel_gvt_active(dev_priv)) + intel_gvt_pm_resume(dev_priv->gvt); +} diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h index 502fad8a8652..d7d3fb6186fd 100644 --- a/drivers/gpu/drm/i915/intel_gvt.h +++ b/drivers/gpu/drm/i915/intel_gvt.h @@ -33,6 +33,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv); void intel_gvt_clean_device(struct drm_i915_private *dev_priv); int intel_gvt_init_host(void); void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv); +void intel_gvt_resume(struct drm_i915_private *dev_priv); #else static inline int intel_gvt_init(struct drm_i915_private *dev_priv) { @@ -46,6 +47,10 @@ static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) { } + +static inline void intel_gvt_resume(struct drm_i915_private *dev_priv) +{ +} #endif #endif /* _INTEL_GVT_H_ */ diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index 180e1078ef7c..b326993a1026 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -114,7 +114,7 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem, n_pages -= BIT(order); block->private = mem; - list_add(&block->link, blocks); + list_add_tail(&block->link, blocks); if (!n_pages) break; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index f54375b11964..a20b5051f18c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -33,6 +33,7 @@ #include <drm/drm_plane_helper.h> #include "display/intel_atomic.h" +#include "display/intel_atomic_plane.h" #include "display/intel_bw.h" #include "display/intel_display_types.h" #include "display/intel_fbc.h" @@ -899,12 +900,12 @@ static void pnv_update_wm(struct intel_crtc *unused_crtc) crtc = single_enabled_crtc(dev_priv); if (crtc) { - const struct drm_display_mode *adjusted_mode = - &crtc->config->hw.adjusted_mode; + const struct drm_display_mode *pipe_mode = + &crtc->config->hw.pipe_mode; const struct drm_framebuffer *fb = crtc->base.primary->state->fb; int cpp = fb->format->cpp[0]; - int clock = adjusted_mode->crtc_clock; + int clock = pipe_mode->crtc_clock; /* Display SR */ wm = intel_calculate_wm(clock, &pnv_display_wm, @@ -1135,8 +1136,8 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; + const struct drm_display_mode *pipe_mode = + &crtc_state->hw.pipe_mode; unsigned int latency = dev_priv->wm.pri_latency[level] * 10; unsigned int clock, htotal, cpp, width, wm; @@ -1163,8 +1164,8 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, level != G4X_WM_LEVEL_NORMAL) cpp = max(cpp, 4u); - clock = adjusted_mode->crtc_clock; - htotal = adjusted_mode->crtc_htotal; + clock = pipe_mode->crtc_clock; + htotal = pipe_mode->crtc_htotal; width = drm_rect_width(&plane_state->uapi.dst); @@ -1660,8 +1661,8 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; + const struct drm_display_mode *pipe_mode = + &crtc_state->hw.pipe_mode; unsigned int clock, htotal, cpp, width, wm; if (dev_priv->wm.pri_latency[level] == 0) @@ -1671,8 +1672,8 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, return 0; cpp = plane_state->hw.fb->format->cpp[0]; - clock = adjusted_mode->crtc_clock; - htotal = adjusted_mode->crtc_htotal; + clock = pipe_mode->crtc_clock; + htotal = pipe_mode->crtc_htotal; width = crtc_state->pipe_src_w; if (plane->id == PLANE_CURSOR) { @@ -2261,12 +2262,12 @@ static void i965_update_wm(struct intel_crtc *unused_crtc) if (crtc) { /* self-refresh has much higher latency */ static const int sr_latency_ns = 12000; - const struct drm_display_mode *adjusted_mode = - &crtc->config->hw.adjusted_mode; + const struct drm_display_mode *pipe_mode = + &crtc->config->hw.pipe_mode; const struct drm_framebuffer *fb = crtc->base.primary->state->fb; - int clock = adjusted_mode->crtc_clock; - int htotal = adjusted_mode->crtc_htotal; + int clock = pipe_mode->crtc_clock; + int htotal = pipe_mode->crtc_htotal; int hdisplay = crtc->config->pipe_src_w; int cpp = fb->format->cpp[0]; int entries; @@ -2345,8 +2346,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_A); crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A); if (intel_crtc_active(crtc)) { - const struct drm_display_mode *adjusted_mode = - &crtc->config->hw.adjusted_mode; + const struct drm_display_mode *pipe_mode = + &crtc->config->hw.pipe_mode; const struct drm_framebuffer *fb = crtc->base.primary->state->fb; int cpp; @@ -2356,7 +2357,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) else cpp = fb->format->cpp[0]; - planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, + planea_wm = intel_calculate_wm(pipe_mode->crtc_clock, wm_info, fifo_size, cpp, pessimal_latency_ns); enabled = crtc; @@ -2372,8 +2373,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B); crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B); if (intel_crtc_active(crtc)) { - const struct drm_display_mode *adjusted_mode = - &crtc->config->hw.adjusted_mode; + const struct drm_display_mode *pipe_mode = + &crtc->config->hw.pipe_mode; const struct drm_framebuffer *fb = crtc->base.primary->state->fb; int cpp; @@ -2383,7 +2384,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) else cpp = fb->format->cpp[0]; - planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock, + planeb_wm = intel_calculate_wm(pipe_mode->crtc_clock, wm_info, fifo_size, cpp, pessimal_latency_ns); if (enabled == NULL) @@ -2421,12 +2422,12 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) if (HAS_FW_BLC(dev_priv) && enabled) { /* self-refresh has much higher latency */ static const int sr_latency_ns = 6000; - const struct drm_display_mode *adjusted_mode = - &enabled->config->hw.adjusted_mode; + const struct drm_display_mode *pipe_mode = + &enabled->config->hw.pipe_mode; const struct drm_framebuffer *fb = enabled->base.primary->state->fb; - int clock = adjusted_mode->crtc_clock; - int htotal = adjusted_mode->crtc_htotal; + int clock = pipe_mode->crtc_clock; + int htotal = pipe_mode->crtc_htotal; int hdisplay = enabled->config->pipe_src_w; int cpp; int entries; @@ -2474,7 +2475,7 @@ static void i845_update_wm(struct intel_crtc *unused_crtc) { struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); struct intel_crtc *crtc; - const struct drm_display_mode *adjusted_mode; + const struct drm_display_mode *pipe_mode; u32 fwater_lo; int planea_wm; @@ -2482,8 +2483,8 @@ static void i845_update_wm(struct intel_crtc *unused_crtc) if (crtc == NULL) return; - adjusted_mode = &crtc->config->hw.adjusted_mode; - planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, + pipe_mode = &crtc->config->hw.pipe_mode; + planea_wm = intel_calculate_wm(pipe_mode->crtc_clock, &i845_wm_info, dev_priv->display.get_fifo_size(dev_priv, PLANE_A), 4, pessimal_latency_ns); @@ -2573,7 +2574,7 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state, return method1; method2 = ilk_wm_method2(crtc_state->pixel_rate, - crtc_state->hw.adjusted_mode.crtc_htotal, + crtc_state->hw.pipe_mode.crtc_htotal, drm_rect_width(&plane_state->uapi.dst), cpp, mem_value); @@ -2601,7 +2602,7 @@ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state, method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); method2 = ilk_wm_method2(crtc_state->pixel_rate, - crtc_state->hw.adjusted_mode.crtc_htotal, + crtc_state->hw.pipe_mode.crtc_htotal, drm_rect_width(&plane_state->uapi.dst), cpp, mem_value); return min(method1, method2); @@ -2626,7 +2627,7 @@ static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state, cpp = plane_state->hw.fb->format->cpp[0]; return ilk_wm_method2(crtc_state->pixel_rate, - crtc_state->hw.adjusted_mode.crtc_htotal, + crtc_state->hw.pipe_mode.crtc_htotal, drm_rect_width(&plane_state->uapi.dst), cpp, mem_value); } @@ -3873,9 +3874,7 @@ static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_plane *plane; - const struct intel_plane_state *plane_state; - int level, latency; + enum plane_id plane_id; if (!intel_has_sagv(dev_priv)) return false; @@ -3883,12 +3882,13 @@ static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) if (!crtc_state->hw.active) return true; - if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) + if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE) return false; - intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { + for_each_plane_id_on_crtc(crtc, plane_id) { const struct skl_plane_wm *wm = - &crtc_state->wm.skl.optimal.planes[plane->id]; + &crtc_state->wm.skl.optimal.planes[plane_id]; + int level; /* Skip this plane if it's not enabled */ if (!wm->wm[0].plane_en) @@ -3899,19 +3899,12 @@ static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) !wm->wm[level].plane_en; --level) { } - latency = dev_priv->wm.skl_latency[level]; - - if (skl_needs_memory_bw_wa(dev_priv) && - plane_state->uapi.fb->modifier == - I915_FORMAT_MOD_X_TILED) - latency += 15; - /* * If any of the planes on this pipe don't enable wm levels that * incur memory latencies higher than sagv_block_time_us we * can't enable SAGV. */ - if (latency < dev_priv->sagv_block_time_us) + if (!wm->wm[level].can_sagv) return false; } @@ -4174,8 +4167,8 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, */ total_slice_mask = dbuf_slice_mask; for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; + const struct drm_display_mode *pipe_mode = + &crtc_state->hw.pipe_mode; enum pipe pipe = crtc->pipe; int hdisplay, vdisplay; u32 pipe_dbuf_slice_mask; @@ -4205,7 +4198,7 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, if (dbuf_slice_mask != pipe_dbuf_slice_mask) continue; - drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay); + drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay); total_width_in_range += hdisplay; @@ -4704,50 +4697,63 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, } static u64 -skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, - u64 *plane_data_rate, - u64 *uv_plane_data_rate) +skl_get_total_relative_data_rate(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_plane *plane; + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); const struct intel_plane_state *plane_state; + struct intel_plane *plane; u64 total_data_rate = 0; + enum plane_id plane_id; + int i; /* Calculate and cache data rate for each plane */ - intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { - enum plane_id plane_id = plane->id; - u64 rate; + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + if (plane->pipe != crtc->pipe) + continue; + + plane_id = plane->id; /* packed/y */ - rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0); - plane_data_rate[plane_id] = rate; - total_data_rate += rate; + crtc_state->plane_data_rate[plane_id] = + skl_plane_relative_data_rate(crtc_state, plane_state, 0); /* uv-plane */ - rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1); - uv_plane_data_rate[plane_id] = rate; - total_data_rate += rate; + crtc_state->uv_plane_data_rate[plane_id] = + skl_plane_relative_data_rate(crtc_state, plane_state, 1); + } + + for_each_plane_id_on_crtc(crtc, plane_id) { + total_data_rate += crtc_state->plane_data_rate[plane_id]; + total_data_rate += crtc_state->uv_plane_data_rate[plane_id]; } return total_data_rate; } static u64 -icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, - u64 *plane_data_rate) +icl_get_total_relative_data_rate(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_plane *plane; + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); const struct intel_plane_state *plane_state; + struct intel_plane *plane; u64 total_data_rate = 0; + enum plane_id plane_id; + int i; /* Calculate and cache data rate for each plane */ - intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { - enum plane_id plane_id = plane->id; - u64 rate; + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + if (plane->pipe != crtc->pipe) + continue; + + plane_id = plane->id; if (!plane_state->planar_linked_plane) { - rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0); - plane_data_rate[plane_id] = rate; - total_data_rate += rate; + crtc_state->plane_data_rate[plane_id] = + skl_plane_relative_data_rate(crtc_state, plane_state, 0); } else { enum plane_id y_plane_id; @@ -4762,17 +4768,18 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, continue; /* Y plane rate is calculated on the slave */ - rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0); y_plane_id = plane_state->planar_linked_plane->id; - plane_data_rate[y_plane_id] = rate; - total_data_rate += rate; + crtc_state->plane_data_rate[y_plane_id] = + skl_plane_relative_data_rate(crtc_state, plane_state, 0); - rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1); - plane_data_rate[plane_id] = rate; - total_data_rate += rate; + crtc_state->plane_data_rate[plane_id] = + skl_plane_relative_data_rate(crtc_state, plane_state, 1); } } + for_each_plane_id_on_crtc(crtc, plane_id) + total_data_rate += crtc_state->plane_data_rate[plane_id]; + return total_data_rate; } @@ -4791,9 +4798,11 @@ skl_plane_wm_level(const struct intel_crtc_state *crtc_state, } static int -skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state) +skl_allocate_pipe_ddb(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb; u16 alloc_size, start = 0; @@ -4802,8 +4811,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state) u64 total_data_rate; enum plane_id plane_id; int num_active; - u64 plane_data_rate[I915_MAX_PLANES] = {}; - u64 uv_plane_data_rate[I915_MAX_PLANES] = {}; u32 blocks; int level; int ret; @@ -4843,13 +4850,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state) if (INTEL_GEN(dev_priv) >= 11) total_data_rate = - icl_get_total_relative_data_rate(crtc_state, - plane_data_rate); + icl_get_total_relative_data_rate(state, crtc); else total_data_rate = - skl_get_total_relative_data_rate(crtc_state, - plane_data_rate, - uv_plane_data_rate); + skl_get_total_relative_data_rate(state, crtc); ret = skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate, @@ -4930,7 +4934,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state) if (total_data_rate == 0) break; - rate = plane_data_rate[plane_id]; + rate = crtc_state->plane_data_rate[plane_id]; extra = min_t(u16, alloc_size, DIV64_U64_ROUND_UP(alloc_size * rate, total_data_rate)); @@ -4941,7 +4945,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state) if (total_data_rate == 0) break; - rate = uv_plane_data_rate[plane_id]; + rate = crtc_state->uv_plane_data_rate[plane_id]; extra = min_t(u16, alloc_size, DIV64_U64_ROUND_UP(alloc_size * rate, total_data_rate)); @@ -5093,36 +5097,12 @@ intel_get_linetime_us(const struct intel_crtc_state *crtc_state) if (drm_WARN_ON(&dev_priv->drm, pixel_rate == 0)) return u32_to_fixed16(0); - crtc_htotal = crtc_state->hw.adjusted_mode.crtc_htotal; + crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal; linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate); return linetime_us; } -static u32 -skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - u64 adjusted_pixel_rate; - uint_fixed_16_16_t downscale_amount; - - /* Shouldn't reach here on disabled planes... */ - if (drm_WARN_ON(&dev_priv->drm, - !intel_wm_plane_visible(crtc_state, plane_state))) - return 0; - - /* - * Adjusted plane pixel rate is just the pipe's adjusted pixel rate - * with additional adjustments for plane-specific scaling. - */ - adjusted_pixel_rate = crtc_state->pixel_rate; - downscale_amount = skl_plane_downscale_amount(crtc_state, plane_state); - - return mul_round_up_u32_fixed16(adjusted_pixel_rate, - downscale_amount); -} - static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, int width, const struct drm_format_info *format, @@ -5235,7 +5215,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state, return skl_compute_wm_params(crtc_state, width, fb->format, fb->modifier, plane_state->hw.rotation, - skl_adjusted_plane_pixel_rate(crtc_state, plane_state), + intel_plane_pixel_rate(crtc_state, plane_state), wp, color_plane); } @@ -5282,14 +5262,14 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate, wp->cpp, latency, wp->dbuf_block_size); method2 = skl_wm_method2(wp->plane_pixel_rate, - crtc_state->hw.adjusted_mode.crtc_htotal, + crtc_state->hw.pipe_mode.crtc_htotal, latency, wp->plane_blocks_per_line); if (wp->y_tiled) { selected_result = max_fixed16(method2, wp->y_tile_minimum); } else { - if ((wp->cpp * crtc_state->hw.adjusted_mode.crtc_htotal / + if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal / wp->dbuf_block_size < 1) && (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) { selected_result = method2; @@ -5373,6 +5353,9 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */ result->min_ddb_alloc = max(min_ddb_alloc, res_blocks) + 1; result->plane_en = true; + + if (INTEL_GEN(dev_priv) < 12) + result->can_sagv = latency >= dev_priv->sagv_block_time_us; } static void @@ -5478,7 +5461,7 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; + struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; struct skl_wm_params wm_params; int ret; @@ -5501,7 +5484,7 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, enum plane_id plane_id) { - struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; + struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; struct skl_wm_params wm_params; int ret; @@ -5522,10 +5505,13 @@ static int skl_build_plane_wm(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - const struct drm_framebuffer *fb = plane_state->hw.fb; enum plane_id plane_id = plane->id; + struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; + const struct drm_framebuffer *fb = plane_state->hw.fb; int ret; + memset(wm, 0, sizeof(*wm)); + if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; @@ -5547,10 +5533,14 @@ static int skl_build_plane_wm(struct intel_crtc_state *crtc_state, static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - enum plane_id plane_id = to_intel_plane(plane_state->uapi.plane)->id; + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum plane_id plane_id = plane->id; + struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; int ret; + memset(wm, 0, sizeof(*wm)); + /* Watermarks calculated in master */ if (plane_state->planar_slave) return 0; @@ -5583,22 +5573,24 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, return 0; } -static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state) +static int skl_build_pipe_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; - struct intel_plane *plane; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); const struct intel_plane_state *plane_state; - int ret; - - /* - * We'll only calculate watermarks for planes that are actually - * enabled, so make sure all other planes are set as disabled. - */ - memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes)); + struct intel_plane *plane; + int ret, i; - intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, - crtc_state) { + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + /* + * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc + * instead but we don't populate that correctly for NV12 Y + * planes so for now hack this. + */ + if (plane->pipe != crtc->pipe) + continue; if (INTEL_GEN(dev_priv) >= 11) ret = icl_build_plane_wm(crtc_state, plane_state); @@ -5608,6 +5600,8 @@ static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state) return ret; } + crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw; + return 0; } @@ -5794,7 +5788,7 @@ skl_compute_ddb(struct intel_atomic_state *state) for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - ret = skl_allocate_pipe_ddb(new_crtc_state); + ret = skl_allocate_pipe_ddb(state, crtc); if (ret) return ret; @@ -6092,7 +6086,6 @@ skl_compute_wm(struct intel_atomic_state *state) { struct intel_crtc *crtc; struct intel_crtc_state *new_crtc_state; - struct intel_crtc_state *old_crtc_state; int ret, i; ret = skl_ddb_add_affected_pipes(state); @@ -6104,9 +6097,8 @@ skl_compute_wm(struct intel_atomic_state *state) * Note that skl_ddb_add_affected_pipes may have added more CRTC's that * weren't otherwise being modified if pipe allocations had to change. */ - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - ret = skl_build_pipe_wm(new_crtc_state); + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + ret = skl_build_pipe_wm(state, crtc); if (ret) return ret; } @@ -6124,8 +6116,7 @@ skl_compute_wm(struct intel_atomic_state *state) * based on how much ddb is available. Now we can actually * check if the final watermarks changed. */ - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { ret = skl_wm_add_affected_planes(state, crtc); if (ret) return ret; @@ -6271,6 +6262,7 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) crtc_state = to_intel_crtc_state(crtc->base.state); skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal); + crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal; } if (dev_priv->active_pipes) { @@ -7111,22 +7103,8 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) 0, CNL_DELAY_PMRSP); } -static void gen12_init_clock_gating(struct drm_i915_private *i915) -{ - unsigned int i; - - /* This is not a WA. Enable VD HCP & MFX_ENC powergate */ - for (i = 0; i < I915_MAX_VCS; i++) - if (HAS_ENGINE(&i915->gt, _VCS(i))) - intel_uncore_rmw(&i915->uncore, POWERGATE_ENABLE, 0, - VDN_HCP_POWERGATE_ENABLE(i) | - VDN_MFX_POWERGATE_ENABLE(i)); -} - static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) { - gen12_init_clock_gating(dev_priv); - /* Wa_1409120013:tgl */ I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); @@ -7143,8 +7121,6 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) static void dg1_init_clock_gating(struct drm_i915_private *dev_priv) { - gen12_init_clock_gating(dev_priv); - /* Wa_1409836686:dg1[a0] */ if (IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0)) I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) | diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 9220c9d1a4b7..e946bd2087d8 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -81,7 +81,7 @@ out: i915_params_free(&i915->params); } -static struct drm_driver mock_driver = { +static const struct drm_driver mock_driver = { .name = "mock", .driver_features = DRIVER_GEM, .release = mock_device_release, diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c index b72e5cef7e40..b549ce5e7607 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-kms.c +++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c @@ -26,7 +26,7 @@ static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -static struct drm_driver dcss_kms_driver = { +static const struct drm_driver dcss_kms_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, DRM_GEM_CMA_DRIVER_OPS, .fops = &dcss_cma_fops, diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 9bf5ad6d18a2..d1a9841adeed 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -145,7 +145,7 @@ static const struct drm_ioctl_desc imx_drm_ioctls[] = { /* none so far */ }; -static struct drm_driver imx_drm_driver = { +static const struct drm_driver imx_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, DRM_GEM_CMA_DRIVER_OPS, .ioctls = imx_drm_ioctls, diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index b9c156e13156..368bfef8b340 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -305,11 +305,13 @@ ingenic_drm_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode static void ingenic_drm_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct ingenic_drm *priv = drm_crtc_get_priv(crtc); u32 ctrl = 0; if (priv->soc_info->has_osd && - drm_atomic_crtc_needs_modeset(crtc->state)) { + drm_atomic_crtc_needs_modeset(crtc_state)) { /* * If IPU plane is enabled, enable IPU as source for the F1 * plane; otherwise use regular DMA. @@ -326,7 +328,8 @@ static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct ingenic_drm *priv = drm_crtc_get_priv(crtc); - struct drm_crtc_state *crtc_state = crtc->state; + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct drm_pending_vblank_event *event = crtc_state->event; if (drm_atomic_crtc_needs_modeset(crtc_state)) { @@ -716,7 +719,7 @@ static void ingenic_drm_disable_vblank(struct drm_crtc *crtc) DEFINE_DRM_GEM_CMA_FOPS(ingenic_drm_fops); -static struct drm_driver ingenic_drm_driver_data = { +static const struct drm_driver ingenic_drm_driver_data = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .name = "ingenic-drm", .desc = "DRM module for Ingenic SoCs", diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c index fc8c6e970ee3..e52777ef85fd 100644 --- a/drivers/gpu/drm/ingenic/ingenic-ipu.c +++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c @@ -516,7 +516,7 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane, static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { - unsigned int num_w, denom_w, num_h, denom_h, xres, yres; + unsigned int num_w, denom_w, num_h, denom_h, xres, yres, max_w, max_h; struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane); struct drm_crtc *crtc = state->crtc ?: plane->state->crtc; struct drm_crtc_state *crtc_state; @@ -558,19 +558,26 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, xres = state->src_w >> 16; yres = state->src_h >> 16; - /* Adjust the coefficients until we find a valid configuration */ - for (denom_w = xres, num_w = state->crtc_w; - num_w <= crtc_state->mode.hdisplay; num_w++) + /* + * Increase the scaled image's theorical width/height until we find a + * configuration that has valid scaling coefficients, up to 102% of the + * screen's resolution. This makes sure that we can scale from almost + * every resolution possible at the cost of a very small distorsion. + * The CRTC_W / CRTC_H are not modified. + */ + max_w = crtc_state->mode.hdisplay * 102 / 100; + max_h = crtc_state->mode.vdisplay * 102 / 100; + + for (denom_w = xres, num_w = state->crtc_w; num_w <= max_w; num_w++) if (!reduce_fraction(&num_w, &denom_w)) break; - if (num_w > crtc_state->mode.hdisplay) + if (num_w > max_w) return -EINVAL; - for (denom_h = yres, num_h = state->crtc_h; - num_h <= crtc_state->mode.vdisplay; num_h++) + for (denom_h = yres, num_h = state->crtc_h; num_h <= max_h; num_h++) if (!reduce_fraction(&num_h, &denom_h)) break; - if (num_h > crtc_state->mode.vdisplay) + if (num_h > max_h) return -EINVAL; ipu->num_w = num_w; diff --git a/drivers/gpu/drm/kmb/Kconfig b/drivers/gpu/drm/kmb/Kconfig new file mode 100644 index 000000000000..bc4cb5e1cd8a --- /dev/null +++ b/drivers/gpu/drm/kmb/Kconfig @@ -0,0 +1,13 @@ +config DRM_KMB_DISPLAY + tristate "Intel Keembay Display" + depends on DRM + depends on ARCH_KEEMBAY || COMPILE_TEST + select DRM_KMS_HELPER + select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER + select DRM_MIPI_DSI + help + Choose this option if you have Intel's KeemBay SOC which integrates + an ARM Cortex A53 CPU with an Intel Movidius VPU. + + If M is selected the module will be called kmb-drm. diff --git a/drivers/gpu/drm/kmb/Makefile b/drivers/gpu/drm/kmb/Makefile new file mode 100644 index 000000000000..527d737a0539 --- /dev/null +++ b/drivers/gpu/drm/kmb/Makefile @@ -0,0 +1,2 @@ +kmb-drm-y := kmb_crtc.o kmb_drv.o kmb_plane.o kmb_dsi.o +obj-$(CONFIG_DRM_KMB_DISPLAY) += kmb-drm.o diff --git a/drivers/gpu/drm/kmb/kmb_crtc.c b/drivers/gpu/drm/kmb/kmb_crtc.c new file mode 100644 index 000000000000..44327bc629ca --- /dev/null +++ b/drivers/gpu/drm/kmb/kmb_crtc.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright © 2018-2020 Intel Corporation + */ + +#include <linux/clk.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_print.h> +#include <drm/drm_vblank.h> +#include <drm/drm_modeset_helper_vtables.h> + +#include "kmb_drv.h" +#include "kmb_dsi.h" +#include "kmb_plane.h" +#include "kmb_regs.h" + +struct kmb_crtc_timing { + u32 vfront_porch; + u32 vback_porch; + u32 vsync_len; + u32 hfront_porch; + u32 hback_porch; + u32 hsync_len; +}; + +static int kmb_crtc_enable_vblank(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct kmb_drm_private *kmb = to_kmb(dev); + + /* Clear interrupt */ + kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_VERT_COMP); + /* Set which interval to generate vertical interrupt */ + kmb_write_lcd(kmb, LCD_VSTATUS_COMPARE, + LCD_VSTATUS_COMPARE_VSYNC); + /* Enable vertical interrupt */ + kmb_set_bitmask_lcd(kmb, LCD_INT_ENABLE, + LCD_INT_VERT_COMP); + return 0; +} + +static void kmb_crtc_disable_vblank(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct kmb_drm_private *kmb = to_kmb(dev); + + /* Clear interrupt */ + kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_VERT_COMP); + /* Disable vertical interrupt */ + kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE, + LCD_INT_VERT_COMP); +} + +static const struct drm_crtc_funcs kmb_crtc_funcs = { + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .enable_vblank = kmb_crtc_enable_vblank, + .disable_vblank = kmb_crtc_disable_vblank, +}; + +static void kmb_crtc_set_mode(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_display_mode *m = &crtc->state->adjusted_mode; + struct kmb_crtc_timing vm; + struct kmb_drm_private *kmb = to_kmb(dev); + unsigned int val = 0; + + /* Initialize mipi */ + kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz); + drm_info(dev, + "vfp= %d vbp= %d vsync_len=%d hfp=%d hbp=%d hsync_len=%d\n", + m->crtc_vsync_start - m->crtc_vdisplay, + m->crtc_vtotal - m->crtc_vsync_end, + m->crtc_vsync_end - m->crtc_vsync_start, + m->crtc_hsync_start - m->crtc_hdisplay, + m->crtc_htotal - m->crtc_hsync_end, + m->crtc_hsync_end - m->crtc_hsync_start); + val = kmb_read_lcd(kmb, LCD_INT_ENABLE); + kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE, val); + kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR, ~0x0); + vm.vfront_porch = 2; + vm.vback_porch = 2; + vm.vsync_len = 8; + vm.hfront_porch = 0; + vm.hback_porch = 0; + vm.hsync_len = 28; + + drm_dbg(dev, "%s : %dactive height= %d vbp=%d vfp=%d vsync-w=%d h-active=%d h-bp=%d h-fp=%d hsync-l=%d", + __func__, __LINE__, + m->crtc_vdisplay, vm.vback_porch, vm.vfront_porch, + vm.vsync_len, m->crtc_hdisplay, vm.hback_porch, + vm.hfront_porch, vm.hsync_len); + kmb_write_lcd(kmb, LCD_V_ACTIVEHEIGHT, + m->crtc_vdisplay - 1); + kmb_write_lcd(kmb, LCD_V_BACKPORCH, vm.vback_porch); + kmb_write_lcd(kmb, LCD_V_FRONTPORCH, vm.vfront_porch); + kmb_write_lcd(kmb, LCD_VSYNC_WIDTH, vm.vsync_len - 1); + kmb_write_lcd(kmb, LCD_H_ACTIVEWIDTH, + m->crtc_hdisplay - 1); + kmb_write_lcd(kmb, LCD_H_BACKPORCH, vm.hback_porch); + kmb_write_lcd(kmb, LCD_H_FRONTPORCH, vm.hfront_porch); + kmb_write_lcd(kmb, LCD_HSYNC_WIDTH, vm.hsync_len - 1); + /* This is hardcoded as 0 in the Myriadx code */ + kmb_write_lcd(kmb, LCD_VSYNC_START, 0); + kmb_write_lcd(kmb, LCD_VSYNC_END, 0); + /* Back ground color */ + kmb_write_lcd(kmb, LCD_BG_COLOUR_LS, 0x4); + if (m->flags == DRM_MODE_FLAG_INTERLACE) { + kmb_write_lcd(kmb, + LCD_VSYNC_WIDTH_EVEN, vm.vsync_len - 1); + kmb_write_lcd(kmb, + LCD_V_BACKPORCH_EVEN, vm.vback_porch); + kmb_write_lcd(kmb, + LCD_V_FRONTPORCH_EVEN, vm.vfront_porch); + kmb_write_lcd(kmb, LCD_V_ACTIVEHEIGHT_EVEN, + m->crtc_vdisplay - 1); + /* This is hardcoded as 10 in the Myriadx code */ + kmb_write_lcd(kmb, LCD_VSYNC_START_EVEN, 10); + kmb_write_lcd(kmb, LCD_VSYNC_END_EVEN, 10); + } + kmb_write_lcd(kmb, LCD_TIMING_GEN_TRIG, 1); + kmb_set_bitmask_lcd(kmb, LCD_CONTROL, LCD_CTRL_ENABLE); + kmb_set_bitmask_lcd(kmb, LCD_INT_ENABLE, val); +} + +static void kmb_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct kmb_drm_private *kmb = crtc_to_kmb_priv(crtc); + + clk_prepare_enable(kmb->kmb_clk.clk_lcd); + kmb_crtc_set_mode(crtc); + drm_crtc_vblank_on(crtc); +} + +static void kmb_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct kmb_drm_private *kmb = crtc_to_kmb_priv(crtc); + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc); + + /* due to hw limitations, planes need to be off when crtc is off */ + drm_atomic_helper_disable_planes_on_crtc(old_state, false); + + drm_crtc_vblank_off(crtc); + clk_disable_unprepare(kmb->kmb_clk.clk_lcd); +} + +static void kmb_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct kmb_drm_private *kmb = to_kmb(dev); + + kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE, + LCD_INT_VERT_COMP); +} + +static void kmb_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct kmb_drm_private *kmb = to_kmb(dev); + + kmb_set_bitmask_lcd(kmb, LCD_INT_ENABLE, + LCD_INT_VERT_COMP); + + spin_lock_irq(&crtc->dev->event_lock); + if (crtc->state->event) { + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, crtc->state->event); + else + drm_crtc_send_vblank_event(crtc, crtc->state->event); + } + crtc->state->event = NULL; + spin_unlock_irq(&crtc->dev->event_lock); +} + +static const struct drm_crtc_helper_funcs kmb_crtc_helper_funcs = { + .atomic_begin = kmb_crtc_atomic_begin, + .atomic_enable = kmb_crtc_atomic_enable, + .atomic_disable = kmb_crtc_atomic_disable, + .atomic_flush = kmb_crtc_atomic_flush, +}; + +int kmb_setup_crtc(struct drm_device *drm) +{ + struct kmb_drm_private *kmb = to_kmb(drm); + struct kmb_plane *primary; + int ret; + + primary = kmb_plane_init(drm); + if (IS_ERR(primary)) + return PTR_ERR(primary); + + ret = drm_crtc_init_with_planes(drm, &kmb->crtc, &primary->base_plane, + NULL, &kmb_crtc_funcs, NULL); + if (ret) { + kmb_plane_destroy(&primary->base_plane); + return ret; + } + + drm_crtc_helper_add(&kmb->crtc, &kmb_crtc_helper_funcs); + return 0; +} diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c new file mode 100644 index 000000000000..a31a840ce634 --- /dev/null +++ b/drivers/gpu/drm/kmb/kmb_drv.c @@ -0,0 +1,602 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright © 2018-2020 Intel Corporation + */ + +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/of_graph.h> +#include <linux/of_platform.h> +#include <linux/of_reserved_mem.h> +#include <linux/mfd/syscon.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_irq.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include "kmb_drv.h" +#include "kmb_dsi.h" +#include "kmb_regs.h" + +static int kmb_display_clk_enable(struct kmb_drm_private *kmb) +{ + int ret = 0; + + ret = clk_prepare_enable(kmb->kmb_clk.clk_lcd); + if (ret) { + drm_err(&kmb->drm, "Failed to enable LCD clock: %d\n", ret); + return ret; + } + DRM_INFO("SUCCESS : enabled LCD clocks\n"); + return 0; +} + +static int kmb_initialize_clocks(struct kmb_drm_private *kmb, struct device *dev) +{ + int ret = 0; + struct regmap *msscam; + + kmb->kmb_clk.clk_lcd = devm_clk_get(dev, "clk_lcd"); + if (IS_ERR(kmb->kmb_clk.clk_lcd)) { + drm_err(&kmb->drm, "clk_get() failed clk_lcd\n"); + return PTR_ERR(kmb->kmb_clk.clk_lcd); + } + + kmb->kmb_clk.clk_pll0 = devm_clk_get(dev, "clk_pll0"); + if (IS_ERR(kmb->kmb_clk.clk_pll0)) { + drm_err(&kmb->drm, "clk_get() failed clk_pll0 "); + return PTR_ERR(kmb->kmb_clk.clk_pll0); + } + kmb->sys_clk_mhz = clk_get_rate(kmb->kmb_clk.clk_pll0) / 1000000; + drm_info(&kmb->drm, "system clk = %d Mhz", kmb->sys_clk_mhz); + + ret = kmb_dsi_clk_init(kmb->kmb_dsi); + + /* Set LCD clock to 200 Mhz */ + clk_set_rate(kmb->kmb_clk.clk_lcd, KMB_LCD_DEFAULT_CLK); + if (clk_get_rate(kmb->kmb_clk.clk_lcd) != KMB_LCD_DEFAULT_CLK) { + drm_err(&kmb->drm, "failed to set to clk_lcd to %d\n", + KMB_LCD_DEFAULT_CLK); + return -1; + } + drm_dbg(&kmb->drm, "clk_lcd = %ld\n", clk_get_rate(kmb->kmb_clk.clk_lcd)); + + ret = kmb_display_clk_enable(kmb); + if (ret) + return ret; + + msscam = syscon_regmap_lookup_by_compatible("intel,keembay-msscam"); + if (IS_ERR(msscam)) { + drm_err(&kmb->drm, "failed to get msscam syscon"); + return -1; + } + + /* Enable MSS_CAM_CLK_CTRL for MIPI TX and LCD */ + regmap_update_bits(msscam, MSS_CAM_CLK_CTRL, 0x1fff, 0x1fff); + regmap_update_bits(msscam, MSS_CAM_RSTN_CTRL, 0xffffffff, 0xffffffff); + return 0; +} + +static void kmb_display_clk_disable(struct kmb_drm_private *kmb) +{ + clk_disable_unprepare(kmb->kmb_clk.clk_lcd); +} + +static void __iomem *kmb_map_mmio(struct drm_device *drm, + struct platform_device *pdev, + char *name) +{ + struct resource *res; + void __iomem *mem; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + if (!res) { + drm_err(drm, "failed to get resource for %s", name); + return ERR_PTR(-ENOMEM); + } + mem = devm_ioremap_resource(drm->dev, res); + if (IS_ERR(mem)) + drm_err(drm, "failed to ioremap %s registers", name); + return mem; +} + +static int kmb_hw_init(struct drm_device *drm, unsigned long flags) +{ + struct kmb_drm_private *kmb = to_kmb(drm); + struct platform_device *pdev = to_platform_device(drm->dev); + int irq_lcd; + int ret = 0; + + /* Map LCD MMIO registers */ + kmb->lcd_mmio = kmb_map_mmio(drm, pdev, "lcd"); + if (IS_ERR(kmb->lcd_mmio)) { + drm_err(&kmb->drm, "failed to map LCD registers\n"); + return -ENOMEM; + } + + /* Map MIPI MMIO registers */ + ret = kmb_dsi_map_mmio(kmb->kmb_dsi); + if (ret) + return ret; + + /* Enable display clocks */ + kmb_initialize_clocks(kmb, &pdev->dev); + + /* Register irqs here - section 17.3 in databook + * lists LCD at 79 and 82 for MIPI under MSS CPU - + * firmware has redirected 79 to A53 IRQ 33 + */ + + /* Allocate LCD interrupt resources */ + irq_lcd = platform_get_irq(pdev, 0); + if (irq_lcd < 0) { + drm_err(&kmb->drm, "irq_lcd not found"); + goto setup_fail; + } + + /* Get the optional framebuffer memory resource */ + ret = of_reserved_mem_device_init(drm->dev); + if (ret && ret != -ENODEV) + return ret; + + spin_lock_init(&kmb->irq_lock); + + kmb->irq_lcd = irq_lcd; + + return 0; + + setup_fail: + of_reserved_mem_device_release(drm->dev); + + return ret; +} + +static const struct drm_mode_config_funcs kmb_mode_config_funcs = { + .fb_create = drm_gem_fb_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static int kmb_setup_mode_config(struct drm_device *drm) +{ + int ret; + struct kmb_drm_private *kmb = to_kmb(drm); + + ret = drmm_mode_config_init(drm); + if (ret) + return ret; + drm->mode_config.min_width = KMB_MIN_WIDTH; + drm->mode_config.min_height = KMB_MIN_HEIGHT; + drm->mode_config.max_width = KMB_MAX_WIDTH; + drm->mode_config.max_height = KMB_MAX_HEIGHT; + drm->mode_config.funcs = &kmb_mode_config_funcs; + + ret = kmb_setup_crtc(drm); + if (ret < 0) { + drm_err(drm, "failed to create crtc\n"); + return ret; + } + ret = kmb_dsi_encoder_init(drm, kmb->kmb_dsi); + /* Set the CRTC's port so that the encoder component can find it */ + kmb->crtc.port = of_graph_get_port_by_id(drm->dev->of_node, 0); + ret = drm_vblank_init(drm, drm->mode_config.num_crtc); + if (ret < 0) { + drm_err(drm, "failed to initialize vblank\n"); + pm_runtime_disable(drm->dev); + return ret; + } + + drm_mode_config_reset(drm); + return 0; +} + +static irqreturn_t handle_lcd_irq(struct drm_device *dev) +{ + unsigned long status, val, val1; + int plane_id, dma0_state, dma1_state; + struct kmb_drm_private *kmb = to_kmb(dev); + + status = kmb_read_lcd(kmb, LCD_INT_STATUS); + + spin_lock(&kmb->irq_lock); + if (status & LCD_INT_EOF) { + kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_EOF); + + /* When disabling/enabling LCD layers, the change takes effect + * immediately and does not wait for EOF (end of frame). + * When kmb_plane_atomic_disable is called, mark the plane as + * disabled but actually disable the plane when EOF irq is + * being handled. + */ + for (plane_id = LAYER_0; + plane_id < KMB_MAX_PLANES; plane_id++) { + if (kmb->plane_status[plane_id].disable) { + kmb_clr_bitmask_lcd(kmb, + LCD_LAYERn_DMA_CFG + (plane_id), + LCD_DMA_LAYER_ENABLE); + + kmb_clr_bitmask_lcd(kmb, LCD_CONTROL, + kmb->plane_status[plane_id].ctrl); + + kmb->plane_status[plane_id].disable = false; + } + } + if (kmb->kmb_under_flow) { + /* DMA Recovery after underflow */ + dma0_state = (kmb->layer_no == 0) ? + LCD_VIDEO0_DMA0_STATE : LCD_VIDEO1_DMA0_STATE; + dma1_state = (kmb->layer_no == 0) ? + LCD_VIDEO0_DMA1_STATE : LCD_VIDEO1_DMA1_STATE; + + do { + kmb_write_lcd(kmb, LCD_FIFO_FLUSH, 1); + val = kmb_read_lcd(kmb, dma0_state) + & LCD_DMA_STATE_ACTIVE; + val1 = kmb_read_lcd(kmb, dma1_state) + & LCD_DMA_STATE_ACTIVE; + } while ((val || val1)); + /* disable dma */ + kmb_clr_bitmask_lcd(kmb, + LCD_LAYERn_DMA_CFG(kmb->layer_no), + LCD_DMA_LAYER_ENABLE); + kmb_write_lcd(kmb, LCD_FIFO_FLUSH, 1); + kmb->kmb_flush_done = 1; + kmb->kmb_under_flow = 0; + } + } + + if (status & LCD_INT_LINE_CMP) { + /* clear line compare interrupt */ + kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_LINE_CMP); + } + + if (status & LCD_INT_VERT_COMP) { + /* Read VSTATUS */ + val = kmb_read_lcd(kmb, LCD_VSTATUS); + val = (val & LCD_VSTATUS_VERTICAL_STATUS_MASK); + switch (val) { + case LCD_VSTATUS_COMPARE_VSYNC: + /* Clear vertical compare interrupt */ + kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_VERT_COMP); + if (kmb->kmb_flush_done) { + kmb_set_bitmask_lcd(kmb, + LCD_LAYERn_DMA_CFG + (kmb->layer_no), + LCD_DMA_LAYER_ENABLE); + kmb->kmb_flush_done = 0; + } + drm_crtc_handle_vblank(&kmb->crtc); + break; + case LCD_VSTATUS_COMPARE_BACKPORCH: + case LCD_VSTATUS_COMPARE_ACTIVE: + case LCD_VSTATUS_COMPARE_FRONT_PORCH: + kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_VERT_COMP); + break; + } + } + if (status & LCD_INT_DMA_ERR) { + val = + (status & LCD_INT_DMA_ERR & + kmb_read_lcd(kmb, LCD_INT_ENABLE)); + /* LAYER0 - VL0 */ + if (val & (LAYER0_DMA_FIFO_UNDERFLOW | + LAYER0_DMA_CB_FIFO_UNDERFLOW | + LAYER0_DMA_CR_FIFO_UNDERFLOW)) { + kmb->kmb_under_flow++; + drm_info(&kmb->drm, + "!LAYER0:VL0 DMA UNDERFLOW val = 0x%lx,under_flow=%d", + val, kmb->kmb_under_flow); + /* disable underflow interrupt */ + kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE, + LAYER0_DMA_FIFO_UNDERFLOW | + LAYER0_DMA_CB_FIFO_UNDERFLOW | + LAYER0_DMA_CR_FIFO_UNDERFLOW); + kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR, + LAYER0_DMA_CB_FIFO_UNDERFLOW | + LAYER0_DMA_FIFO_UNDERFLOW | + LAYER0_DMA_CR_FIFO_UNDERFLOW); + /* disable auto restart mode */ + kmb_clr_bitmask_lcd(kmb, LCD_LAYERn_DMA_CFG(0), + LCD_DMA_LAYER_CONT_PING_PONG_UPDATE); + + kmb->layer_no = 0; + } + + if (val & LAYER0_DMA_FIFO_OVERFLOW) + drm_dbg(&kmb->drm, + "LAYER0:VL0 DMA OVERFLOW val = 0x%lx", val); + if (val & LAYER0_DMA_CB_FIFO_OVERFLOW) + drm_dbg(&kmb->drm, + "LAYER0:VL0 DMA CB OVERFLOW val = 0x%lx", val); + if (val & LAYER0_DMA_CR_FIFO_OVERFLOW) + drm_dbg(&kmb->drm, + "LAYER0:VL0 DMA CR OVERFLOW val = 0x%lx", val); + + /* LAYER1 - VL1 */ + if (val & (LAYER1_DMA_FIFO_UNDERFLOW | + LAYER1_DMA_CB_FIFO_UNDERFLOW | + LAYER1_DMA_CR_FIFO_UNDERFLOW)) { + kmb->kmb_under_flow++; + drm_info(&kmb->drm, + "!LAYER1:VL1 DMA UNDERFLOW val = 0x%lx, under_flow=%d", + val, kmb->kmb_under_flow); + /* disable underflow interrupt */ + kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE, + LAYER1_DMA_FIFO_UNDERFLOW | + LAYER1_DMA_CB_FIFO_UNDERFLOW | + LAYER1_DMA_CR_FIFO_UNDERFLOW); + kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR, + LAYER1_DMA_CB_FIFO_UNDERFLOW | + LAYER1_DMA_FIFO_UNDERFLOW | + LAYER1_DMA_CR_FIFO_UNDERFLOW); + /* disable auto restart mode */ + kmb_clr_bitmask_lcd(kmb, LCD_LAYERn_DMA_CFG(1), + LCD_DMA_LAYER_CONT_PING_PONG_UPDATE); + kmb->layer_no = 1; + } + + /* LAYER1 - VL1 */ + if (val & LAYER1_DMA_FIFO_OVERFLOW) + drm_dbg(&kmb->drm, + "LAYER1:VL1 DMA OVERFLOW val = 0x%lx", val); + if (val & LAYER1_DMA_CB_FIFO_OVERFLOW) + drm_dbg(&kmb->drm, + "LAYER1:VL1 DMA CB OVERFLOW val = 0x%lx", val); + if (val & LAYER1_DMA_CR_FIFO_OVERFLOW) + drm_dbg(&kmb->drm, + "LAYER1:VL1 DMA CR OVERFLOW val = 0x%lx", val); + + /* LAYER2 - GL0 */ + if (val & LAYER2_DMA_FIFO_UNDERFLOW) + drm_dbg(&kmb->drm, + "LAYER2:GL0 DMA UNDERFLOW val = 0x%lx", val); + if (val & LAYER2_DMA_FIFO_OVERFLOW) + drm_dbg(&kmb->drm, + "LAYER2:GL0 DMA OVERFLOW val = 0x%lx", val); + + /* LAYER3 - GL1 */ + if (val & LAYER3_DMA_FIFO_UNDERFLOW) + drm_dbg(&kmb->drm, + "LAYER3:GL1 DMA UNDERFLOW val = 0x%lx", val); + if (val & LAYER3_DMA_FIFO_UNDERFLOW) + drm_dbg(&kmb->drm, + "LAYER3:GL1 DMA OVERFLOW val = 0x%lx", val); + } + + spin_unlock(&kmb->irq_lock); + + if (status & LCD_INT_LAYER) { + /* Clear layer interrupts */ + kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_LAYER); + } + + /* Clear all interrupts */ + kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR, 1); + return IRQ_HANDLED; +} + +/* IRQ handler */ +static irqreturn_t kmb_isr(int irq, void *arg) +{ + struct drm_device *dev = (struct drm_device *)arg; + + handle_lcd_irq(dev); + return IRQ_HANDLED; +} + +static void kmb_irq_reset(struct drm_device *drm) +{ + kmb_write_lcd(to_kmb(drm), LCD_INT_CLEAR, 0xFFFF); + kmb_write_lcd(to_kmb(drm), LCD_INT_ENABLE, 0); +} + +DEFINE_DRM_GEM_CMA_FOPS(fops); + +static struct drm_driver kmb_driver = { + .driver_features = DRIVER_GEM | + DRIVER_MODESET | DRIVER_ATOMIC, + .irq_handler = kmb_isr, + .irq_preinstall = kmb_irq_reset, + .irq_uninstall = kmb_irq_reset, + /* GEM Operations */ + .fops = &fops, + DRM_GEM_CMA_DRIVER_OPS_VMAP, + .name = "kmb-drm", + .desc = "KEEMBAY DISPLAY DRIVER ", + .date = "20201008", + .major = 1, + .minor = 0, +}; + +static int kmb_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct drm_device *drm = dev_get_drvdata(dev); + struct kmb_drm_private *kmb = to_kmb(drm); + + drm_dev_unregister(drm); + drm_kms_helper_poll_fini(drm); + of_node_put(kmb->crtc.port); + kmb->crtc.port = NULL; + pm_runtime_get_sync(drm->dev); + drm_irq_uninstall(drm); + pm_runtime_put_sync(drm->dev); + pm_runtime_disable(drm->dev); + + of_reserved_mem_device_release(drm->dev); + + /* Release clks */ + kmb_display_clk_disable(kmb); + + dev_set_drvdata(dev, NULL); + + /* Unregister DSI host */ + kmb_dsi_host_unregister(kmb->kmb_dsi); + drm_atomic_helper_shutdown(drm); + return 0; +} + +static int kmb_probe(struct platform_device *pdev) +{ + struct device *dev = get_device(&pdev->dev); + struct kmb_drm_private *kmb; + int ret = 0; + struct device_node *dsi_in; + struct device_node *dsi_node; + struct platform_device *dsi_pdev; + + /* The bridge (ADV 7535) will return -EPROBE_DEFER until it + * has a mipi_dsi_host to register its device to. So, we + * first register the DSI host during probe time, and then return + * -EPROBE_DEFER until the bridge is loaded. Probe will be called again + * and then the rest of the driver initialization can proceed + * afterwards and the bridge can be successfully attached. + */ + dsi_in = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0); + if (!dsi_in) { + DRM_ERROR("Failed to get dsi_in node info from DT"); + return -EINVAL; + } + dsi_node = of_graph_get_remote_port_parent(dsi_in); + if (!dsi_node) { + of_node_put(dsi_in); + DRM_ERROR("Failed to get dsi node from DT\n"); + return -EINVAL; + } + + dsi_pdev = of_find_device_by_node(dsi_node); + if (!dsi_pdev) { + of_node_put(dsi_in); + of_node_put(dsi_node); + DRM_ERROR("Failed to get dsi platform device\n"); + return -EINVAL; + } + + of_node_put(dsi_in); + of_node_put(dsi_node); + ret = kmb_dsi_host_bridge_init(get_device(&dsi_pdev->dev)); + + if (ret == -EPROBE_DEFER) { + return -EPROBE_DEFER; + } else if (ret) { + DRM_ERROR("probe failed to initialize DSI host bridge\n"); + return ret; + } + + /* Create DRM device */ + kmb = devm_drm_dev_alloc(dev, &kmb_driver, + struct kmb_drm_private, drm); + if (IS_ERR(kmb)) + return PTR_ERR(kmb); + + dev_set_drvdata(dev, &kmb->drm); + + /* Initialize MIPI DSI */ + kmb->kmb_dsi = kmb_dsi_init(dsi_pdev); + if (IS_ERR(kmb->kmb_dsi)) { + drm_err(&kmb->drm, "failed to initialize DSI\n"); + ret = PTR_ERR(kmb->kmb_dsi); + goto err_free1; + } + + kmb->kmb_dsi->dev = &dsi_pdev->dev; + kmb->kmb_dsi->pdev = dsi_pdev; + ret = kmb_hw_init(&kmb->drm, 0); + if (ret) + goto err_free1; + + ret = kmb_setup_mode_config(&kmb->drm); + if (ret) + goto err_free; + + ret = drm_irq_install(&kmb->drm, kmb->irq_lcd); + if (ret < 0) { + drm_err(&kmb->drm, "failed to install IRQ handler\n"); + goto err_irq; + } + + drm_kms_helper_poll_init(&kmb->drm); + + /* Register graphics device with the kernel */ + ret = drm_dev_register(&kmb->drm, 0); + if (ret) + goto err_register; + + return 0; + + err_register: + drm_kms_helper_poll_fini(&kmb->drm); + err_irq: + pm_runtime_disable(kmb->drm.dev); + err_free: + drm_crtc_cleanup(&kmb->crtc); + drm_mode_config_cleanup(&kmb->drm); + err_free1: + dev_set_drvdata(dev, NULL); + kmb_dsi_host_unregister(kmb->kmb_dsi); + + return ret; +} + +static const struct of_device_id kmb_of_match[] = { + {.compatible = "intel,keembay-display"}, + {}, +}; + +MODULE_DEVICE_TABLE(of, kmb_of_match); + +static int __maybe_unused kmb_pm_suspend(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct kmb_drm_private *kmb = drm ? to_kmb(drm) : NULL; + + drm_kms_helper_poll_disable(drm); + + kmb->state = drm_atomic_helper_suspend(drm); + if (IS_ERR(kmb->state)) { + drm_kms_helper_poll_enable(drm); + return PTR_ERR(kmb->state); + } + + return 0; +} + +static int __maybe_unused kmb_pm_resume(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct kmb_drm_private *kmb = drm ? to_kmb(drm) : NULL; + + if (!kmb) + return 0; + + drm_atomic_helper_resume(drm, kmb->state); + drm_kms_helper_poll_enable(drm); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(kmb_pm_ops, kmb_pm_suspend, kmb_pm_resume); + +static struct platform_driver kmb_platform_driver = { + .probe = kmb_probe, + .remove = kmb_remove, + .driver = { + .name = "kmb-drm", + .pm = &kmb_pm_ops, + .of_match_table = kmb_of_match, + }, +}; + +module_platform_driver(kmb_platform_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Keembay Display driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/kmb/kmb_drv.h b/drivers/gpu/drm/kmb/kmb_drv.h new file mode 100644 index 000000000000..02e806712a64 --- /dev/null +++ b/drivers/gpu/drm/kmb/kmb_drv.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright © 2018-2020 Intel Corporation + */ + +#ifndef __KMB_DRV_H__ +#define __KMB_DRV_H__ + +#include <drm/drm_device.h> + +#include "kmb_plane.h" +#include "kmb_regs.h" + +#define KMB_MAX_WIDTH 1920 /*Max width in pixels */ +#define KMB_MAX_HEIGHT 1080 /*Max height in pixels */ +#define KMB_MIN_WIDTH 1920 /*Max width in pixels */ +#define KMB_MIN_HEIGHT 1080 /*Max height in pixels */ +#define KMB_LCD_DEFAULT_CLK 200000000 +#define KMB_SYS_CLK_MHZ 500 + +#define ICAM_MMIO 0x3b100000 +#define ICAM_LCD_OFFSET 0x1080 +#define ICAM_MMIO_SIZE 0x2000 + +struct kmb_dsi; + +struct kmb_clock { + struct clk *clk_lcd; + struct clk *clk_pll0; +}; + +struct kmb_drm_private { + struct drm_device drm; + struct kmb_dsi *kmb_dsi; + void __iomem *lcd_mmio; + struct kmb_clock kmb_clk; + struct drm_crtc crtc; + struct kmb_plane *plane; + struct drm_atomic_state *state; + spinlock_t irq_lock; + int irq_lcd; + int sys_clk_mhz; + struct layer_status plane_status[KMB_MAX_PLANES]; + int kmb_under_flow; + int kmb_flush_done; + int layer_no; +}; + +static inline struct kmb_drm_private *to_kmb(const struct drm_device *dev) +{ + return container_of(dev, struct kmb_drm_private, drm); +} + +static inline struct kmb_drm_private *crtc_to_kmb_priv(const struct drm_crtc *x) +{ + return container_of(x, struct kmb_drm_private, crtc); +} + +static inline void kmb_write_lcd(struct kmb_drm_private *dev_p, + unsigned int reg, u32 value) +{ + writel(value, (dev_p->lcd_mmio + reg)); +} + +static inline u32 kmb_read_lcd(struct kmb_drm_private *dev_p, unsigned int reg) +{ + return readl(dev_p->lcd_mmio + reg); +} + +static inline void kmb_set_bitmask_lcd(struct kmb_drm_private *dev_p, + unsigned int reg, u32 mask) +{ + u32 reg_val = kmb_read_lcd(dev_p, reg); + + kmb_write_lcd(dev_p, reg, (reg_val | mask)); +} + +static inline void kmb_clr_bitmask_lcd(struct kmb_drm_private *dev_p, + unsigned int reg, u32 mask) +{ + u32 reg_val = kmb_read_lcd(dev_p, reg); + + kmb_write_lcd(dev_p, reg, (reg_val & (~mask))); +} + +int kmb_setup_crtc(struct drm_device *dev); +void kmb_set_scanout(struct kmb_drm_private *lcd); +#endif /* __KMB_DRV_H__ */ diff --git a/drivers/gpu/drm/kmb/kmb_dsi.c b/drivers/gpu/drm/kmb/kmb_dsi.c new file mode 100644 index 000000000000..4b5d82af84b3 --- /dev/null +++ b/drivers/gpu/drm/kmb/kmb_dsi.c @@ -0,0 +1,1561 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright © 2019-2020 Intel Corporation + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/of_graph.h> +#include <linux/mfd/syscon.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_simple_kms_helper.h> +#include <drm/drm_print.h> +#include <drm/drm_probe_helper.h> + +#include "kmb_dsi.h" +#include "kmb_regs.h" + +static struct mipi_dsi_host *dsi_host; +static struct mipi_dsi_device *dsi_device; +static struct drm_bridge *adv_bridge; + +/* Default setting is 1080p, 4 lanes */ +#define IMG_HEIGHT_LINES 1080 +#define IMG_WIDTH_PX 1920 +#define MIPI_TX_ACTIVE_LANES 4 + +static struct mipi_tx_frame_section_cfg mipi_tx_frame0_sect_cfg = { + .width_pixels = IMG_WIDTH_PX, + .height_lines = IMG_HEIGHT_LINES, + .data_type = DSI_LP_DT_PPS_RGB888_24B, + .data_mode = MIPI_DATA_MODE1, + .dma_packed = 0 +}; + +static struct mipi_tx_frame_cfg mipitx_frame0_cfg = { + .sections[0] = &mipi_tx_frame0_sect_cfg, + .sections[1] = NULL, + .sections[2] = NULL, + .sections[3] = NULL, + .vsync_width = 5, + .v_backporch = 36, + .v_frontporch = 4, + .hsync_width = 44, + .h_backporch = 148, + .h_frontporch = 88 +}; + +static const struct mipi_tx_dsi_cfg mipitx_dsi_cfg = { + .hfp_blank_en = 0, + .eotp_en = 0, + .lpm_last_vfp_line = 0, + .lpm_first_vsa_line = 0, + .sync_pulse_eventn = DSI_VIDEO_MODE_NO_BURST_EVENT, + .hfp_blanking = SEND_BLANK_PACKET, + .hbp_blanking = SEND_BLANK_PACKET, + .hsa_blanking = SEND_BLANK_PACKET, + .v_blanking = SEND_BLANK_PACKET, +}; + +static struct mipi_ctrl_cfg mipi_tx_init_cfg = { + .active_lanes = MIPI_TX_ACTIVE_LANES, + .lane_rate_mbps = MIPI_TX_LANE_DATA_RATE_MBPS, + .ref_clk_khz = MIPI_TX_REF_CLK_KHZ, + .cfg_clk_khz = MIPI_TX_CFG_CLK_KHZ, + .tx_ctrl_cfg = { + .frames[0] = &mipitx_frame0_cfg, + .frames[1] = NULL, + .frames[2] = NULL, + .frames[3] = NULL, + .tx_dsi_cfg = &mipitx_dsi_cfg, + .line_sync_pkt_en = 0, + .line_counter_active = 0, + .frame_counter_active = 0, + .tx_always_use_hact = 1, + .tx_hact_wait_stop = 1, + } +}; + +struct mipi_hs_freq_range_cfg { + u16 default_bit_rate_mbps; + u8 hsfreqrange_code; +}; + +struct vco_params { + u32 freq; + u32 range; + u32 divider; +}; + +static const struct vco_params vco_table[] = { + {52, 0x3f, 8}, + {80, 0x39, 8}, + {105, 0x2f, 4}, + {160, 0x29, 4}, + {210, 0x1f, 2}, + {320, 0x19, 2}, + {420, 0x0f, 1}, + {630, 0x09, 1}, + {1100, 0x03, 1}, + {0xffff, 0x01, 1}, +}; + +static const struct mipi_hs_freq_range_cfg +mipi_hs_freq_range[MIPI_DPHY_DEFAULT_BIT_RATES] = { + {.default_bit_rate_mbps = 80, .hsfreqrange_code = 0x00}, + {.default_bit_rate_mbps = 90, .hsfreqrange_code = 0x10}, + {.default_bit_rate_mbps = 100, .hsfreqrange_code = 0x20}, + {.default_bit_rate_mbps = 110, .hsfreqrange_code = 0x30}, + {.default_bit_rate_mbps = 120, .hsfreqrange_code = 0x01}, + {.default_bit_rate_mbps = 130, .hsfreqrange_code = 0x11}, + {.default_bit_rate_mbps = 140, .hsfreqrange_code = 0x21}, + {.default_bit_rate_mbps = 150, .hsfreqrange_code = 0x31}, + {.default_bit_rate_mbps = 160, .hsfreqrange_code = 0x02}, + {.default_bit_rate_mbps = 170, .hsfreqrange_code = 0x12}, + {.default_bit_rate_mbps = 180, .hsfreqrange_code = 0x22}, + {.default_bit_rate_mbps = 190, .hsfreqrange_code = 0x32}, + {.default_bit_rate_mbps = 205, .hsfreqrange_code = 0x03}, + {.default_bit_rate_mbps = 220, .hsfreqrange_code = 0x13}, + {.default_bit_rate_mbps = 235, .hsfreqrange_code = 0x23}, + {.default_bit_rate_mbps = 250, .hsfreqrange_code = 0x33}, + {.default_bit_rate_mbps = 275, .hsfreqrange_code = 0x04}, + {.default_bit_rate_mbps = 300, .hsfreqrange_code = 0x14}, + {.default_bit_rate_mbps = 325, .hsfreqrange_code = 0x25}, + {.default_bit_rate_mbps = 350, .hsfreqrange_code = 0x35}, + {.default_bit_rate_mbps = 400, .hsfreqrange_code = 0x05}, + {.default_bit_rate_mbps = 450, .hsfreqrange_code = 0x16}, + {.default_bit_rate_mbps = 500, .hsfreqrange_code = 0x26}, + {.default_bit_rate_mbps = 550, .hsfreqrange_code = 0x37}, + {.default_bit_rate_mbps = 600, .hsfreqrange_code = 0x07}, + {.default_bit_rate_mbps = 650, .hsfreqrange_code = 0x18}, + {.default_bit_rate_mbps = 700, .hsfreqrange_code = 0x28}, + {.default_bit_rate_mbps = 750, .hsfreqrange_code = 0x39}, + {.default_bit_rate_mbps = 800, .hsfreqrange_code = 0x09}, + {.default_bit_rate_mbps = 850, .hsfreqrange_code = 0x19}, + {.default_bit_rate_mbps = 900, .hsfreqrange_code = 0x29}, + {.default_bit_rate_mbps = 1000, .hsfreqrange_code = 0x0A}, + {.default_bit_rate_mbps = 1050, .hsfreqrange_code = 0x1A}, + {.default_bit_rate_mbps = 1100, .hsfreqrange_code = 0x2A}, + {.default_bit_rate_mbps = 1150, .hsfreqrange_code = 0x3B}, + {.default_bit_rate_mbps = 1200, .hsfreqrange_code = 0x0B}, + {.default_bit_rate_mbps = 1250, .hsfreqrange_code = 0x1B}, + {.default_bit_rate_mbps = 1300, .hsfreqrange_code = 0x2B}, + {.default_bit_rate_mbps = 1350, .hsfreqrange_code = 0x3C}, + {.default_bit_rate_mbps = 1400, .hsfreqrange_code = 0x0C}, + {.default_bit_rate_mbps = 1450, .hsfreqrange_code = 0x1C}, + {.default_bit_rate_mbps = 1500, .hsfreqrange_code = 0x2C}, + {.default_bit_rate_mbps = 1550, .hsfreqrange_code = 0x3D}, + {.default_bit_rate_mbps = 1600, .hsfreqrange_code = 0x0D}, + {.default_bit_rate_mbps = 1650, .hsfreqrange_code = 0x1D}, + {.default_bit_rate_mbps = 1700, .hsfreqrange_code = 0x2E}, + {.default_bit_rate_mbps = 1750, .hsfreqrange_code = 0x3E}, + {.default_bit_rate_mbps = 1800, .hsfreqrange_code = 0x0E}, + {.default_bit_rate_mbps = 1850, .hsfreqrange_code = 0x1E}, + {.default_bit_rate_mbps = 1900, .hsfreqrange_code = 0x2F}, + {.default_bit_rate_mbps = 1950, .hsfreqrange_code = 0x3F}, + {.default_bit_rate_mbps = 2000, .hsfreqrange_code = 0x0F}, + {.default_bit_rate_mbps = 2050, .hsfreqrange_code = 0x40}, + {.default_bit_rate_mbps = 2100, .hsfreqrange_code = 0x41}, + {.default_bit_rate_mbps = 2150, .hsfreqrange_code = 0x42}, + {.default_bit_rate_mbps = 2200, .hsfreqrange_code = 0x43}, + {.default_bit_rate_mbps = 2250, .hsfreqrange_code = 0x44}, + {.default_bit_rate_mbps = 2300, .hsfreqrange_code = 0x45}, + {.default_bit_rate_mbps = 2350, .hsfreqrange_code = 0x46}, + {.default_bit_rate_mbps = 2400, .hsfreqrange_code = 0x47}, + {.default_bit_rate_mbps = 2450, .hsfreqrange_code = 0x48}, + {.default_bit_rate_mbps = 2500, .hsfreqrange_code = 0x49} +}; + +static void kmb_dsi_clk_disable(struct kmb_dsi *kmb_dsi) +{ + clk_disable_unprepare(kmb_dsi->clk_mipi); + clk_disable_unprepare(kmb_dsi->clk_mipi_ecfg); + clk_disable_unprepare(kmb_dsi->clk_mipi_cfg); +} + +void kmb_dsi_host_unregister(struct kmb_dsi *kmb_dsi) +{ + kmb_dsi_clk_disable(kmb_dsi); + mipi_dsi_host_unregister(kmb_dsi->host); +} + +/* + * This DSI can only be paired with bridges that do config through i2c + * which is ADV 7535 in the KMB EVM + */ +static ssize_t kmb_dsi_host_transfer(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + return 0; +} + +static int kmb_dsi_host_attach(struct mipi_dsi_host *host, + struct mipi_dsi_device *dev) +{ + return 0; +} + +static int kmb_dsi_host_detach(struct mipi_dsi_host *host, + struct mipi_dsi_device *dev) +{ + return 0; +} + +static const struct mipi_dsi_host_ops kmb_dsi_host_ops = { + .attach = kmb_dsi_host_attach, + .detach = kmb_dsi_host_detach, + .transfer = kmb_dsi_host_transfer, +}; + +int kmb_dsi_host_bridge_init(struct device *dev) +{ + struct device_node *encoder_node, *dsi_out; + + /* Create and register MIPI DSI host */ + if (!dsi_host) { + dsi_host = kzalloc(sizeof(*dsi_host), GFP_KERNEL); + if (!dsi_host) + return -ENOMEM; + + dsi_host->ops = &kmb_dsi_host_ops; + + if (!dsi_device) { + dsi_device = kzalloc(sizeof(*dsi_device), GFP_KERNEL); + if (!dsi_device) { + kfree(dsi_host); + return -ENOMEM; + } + } + + dsi_host->dev = dev; + mipi_dsi_host_register(dsi_host); + } + + /* Find ADV7535 node and initialize it */ + dsi_out = of_graph_get_endpoint_by_regs(dev->of_node, 0, 1); + if (!dsi_out) { + DRM_ERROR("Failed to get dsi_out node info from DT\n"); + return -EINVAL; + } + encoder_node = of_graph_get_remote_port_parent(dsi_out); + if (!encoder_node) { + of_node_put(dsi_out); + DRM_ERROR("Failed to get bridge info from DT\n"); + return -EINVAL; + } + /* Locate drm bridge from the hdmi encoder DT node */ + adv_bridge = of_drm_find_bridge(encoder_node); + of_node_put(dsi_out); + of_node_put(encoder_node); + if (!adv_bridge) { + DRM_DEBUG("Wait for external bridge driver DT\n"); + return -EPROBE_DEFER; + } + + return 0; +} + +static u32 mipi_get_datatype_params(u32 data_type, u32 data_mode, + struct mipi_data_type_params *params) +{ + struct mipi_data_type_params data_type_param; + + switch (data_type) { + case DSI_LP_DT_PPS_YCBCR420_12B: + data_type_param.size_constraint_pixels = 2; + data_type_param.size_constraint_bytes = 3; + switch (data_mode) { + /* Case 0 not supported according to MDK */ + case 1: + case 2: + case 3: + data_type_param.pixels_per_pclk = 2; + data_type_param.bits_per_pclk = 24; + break; + default: + DRM_ERROR("DSI: Invalid data_mode %d\n", data_mode); + return -EINVAL; + }; + break; + case DSI_LP_DT_PPS_YCBCR422_16B: + data_type_param.size_constraint_pixels = 2; + data_type_param.size_constraint_bytes = 4; + switch (data_mode) { + /* Case 0 and 1 not supported according + * to MDK + */ + case 2: + data_type_param.pixels_per_pclk = 1; + data_type_param.bits_per_pclk = 16; + break; + case 3: + data_type_param.pixels_per_pclk = 2; + data_type_param.bits_per_pclk = 32; + break; + default: + DRM_ERROR("DSI: Invalid data_mode %d\n", data_mode); + return -EINVAL; + }; + break; + case DSI_LP_DT_LPPS_YCBCR422_20B: + case DSI_LP_DT_PPS_YCBCR422_24B: + data_type_param.size_constraint_pixels = 2; + data_type_param.size_constraint_bytes = 6; + switch (data_mode) { + /* Case 0 not supported according to MDK */ + case 1: + case 2: + case 3: + data_type_param.pixels_per_pclk = 1; + data_type_param.bits_per_pclk = 24; + break; + default: + DRM_ERROR("DSI: Invalid data_mode %d\n", data_mode); + return -EINVAL; + }; + break; + case DSI_LP_DT_PPS_RGB565_16B: + data_type_param.size_constraint_pixels = 1; + data_type_param.size_constraint_bytes = 2; + switch (data_mode) { + case 0: + case 1: + data_type_param.pixels_per_pclk = 1; + data_type_param.bits_per_pclk = 16; + break; + case 2: + case 3: + data_type_param.pixels_per_pclk = 2; + data_type_param.bits_per_pclk = 32; + break; + default: + DRM_ERROR("DSI: Invalid data_mode %d\n", data_mode); + return -EINVAL; + }; + break; + case DSI_LP_DT_PPS_RGB666_18B: + data_type_param.size_constraint_pixels = 4; + data_type_param.size_constraint_bytes = 9; + data_type_param.bits_per_pclk = 18; + data_type_param.pixels_per_pclk = 1; + break; + case DSI_LP_DT_LPPS_RGB666_18B: + case DSI_LP_DT_PPS_RGB888_24B: + data_type_param.size_constraint_pixels = 1; + data_type_param.size_constraint_bytes = 3; + data_type_param.bits_per_pclk = 24; + data_type_param.pixels_per_pclk = 1; + break; + case DSI_LP_DT_PPS_RGB101010_30B: + data_type_param.size_constraint_pixels = 4; + data_type_param.size_constraint_bytes = 15; + data_type_param.bits_per_pclk = 30; + data_type_param.pixels_per_pclk = 1; + break; + default: + DRM_ERROR("DSI: Invalid data_type %d\n", data_type); + return -EINVAL; + }; + + *params = data_type_param; + return 0; +} + +static u32 compute_wc(u32 width_px, u8 size_constr_p, u8 size_constr_b) +{ + /* Calculate the word count for each long packet */ + return (((width_px / size_constr_p) * size_constr_b) & 0xffff); +} + +static u32 compute_unpacked_bytes(u32 wc, u8 bits_per_pclk) +{ + /* Number of PCLK cycles needed to transfer a line + * with each PCLK cycle, 4 Bytes are sent through the PPL module + */ + return ((wc * 8) / bits_per_pclk) * 4; +} + +static u32 mipi_tx_fg_section_cfg_regs(struct kmb_dsi *kmb_dsi, + u8 frame_id, u8 section, + u32 height_lines, u32 unpacked_bytes, + struct mipi_tx_frame_sect_phcfg *ph_cfg) +{ + u32 cfg = 0; + u32 ctrl_no = MIPI_CTRL6; + u32 reg_adr; + + /* Frame section packet header */ + /* Word count bits [15:0] */ + cfg = (ph_cfg->wc & MIPI_TX_SECT_WC_MASK) << 0; + + /* Data type (bits [21:16]) */ + cfg |= ((ph_cfg->data_type & MIPI_TX_SECT_DT_MASK) + << MIPI_TX_SECT_DT_SHIFT); + + /* Virtual channel (bits [23:22]) */ + cfg |= ((ph_cfg->vchannel & MIPI_TX_SECT_VC_MASK) + << MIPI_TX_SECT_VC_SHIFT); + + /* Data mode (bits [24:25]) */ + cfg |= ((ph_cfg->data_mode & MIPI_TX_SECT_DM_MASK) + << MIPI_TX_SECT_DM_SHIFT); + if (ph_cfg->dma_packed) + cfg |= MIPI_TX_SECT_DMA_PACKED; + + dev_dbg(kmb_dsi->dev, + "ctrl=%d frame_id=%d section=%d cfg=%x packed=%d\n", + ctrl_no, frame_id, section, cfg, ph_cfg->dma_packed); + kmb_write_mipi(kmb_dsi, + (MIPI_TXm_HS_FGn_SECTo_PH(ctrl_no, frame_id, section)), + cfg); + + /* Unpacked bytes */ + + /* There are 4 frame generators and each fg has 4 sections + * There are 2 registers for unpacked bytes (# bytes each + * section occupies in memory) + * REG_UNPACKED_BYTES0: [15:0]-BYTES0, [31:16]-BYTES1 + * REG_UNPACKED_BYTES1: [15:0]-BYTES2, [31:16]-BYTES3 + */ + reg_adr = + MIPI_TXm_HS_FGn_SECT_UNPACKED_BYTES0(ctrl_no, + frame_id) + (section / 2) * 4; + kmb_write_bits_mipi(kmb_dsi, reg_adr, (section % 2) * 16, 16, + unpacked_bytes); + dev_dbg(kmb_dsi->dev, + "unpacked_bytes = %d, wordcount = %d\n", unpacked_bytes, + ph_cfg->wc); + + /* Line config */ + reg_adr = MIPI_TXm_HS_FGn_SECTo_LINE_CFG(ctrl_no, frame_id, section); + kmb_write_mipi(kmb_dsi, reg_adr, height_lines); + return 0; +} + +static u32 mipi_tx_fg_section_cfg(struct kmb_dsi *kmb_dsi, + u8 frame_id, u8 section, + struct mipi_tx_frame_section_cfg *frame_scfg, + u32 *bits_per_pclk, u32 *wc) +{ + u32 ret = 0; + u32 unpacked_bytes; + struct mipi_data_type_params data_type_parameters; + struct mipi_tx_frame_sect_phcfg ph_cfg; + + ret = mipi_get_datatype_params(frame_scfg->data_type, + frame_scfg->data_mode, + &data_type_parameters); + if (ret) + return ret; + + /* Packet width has to be a multiple of the minimum packet width + * (in pixels) set for each data type + */ + if (frame_scfg->width_pixels % + data_type_parameters.size_constraint_pixels != 0) + return -EINVAL; + + *wc = compute_wc(frame_scfg->width_pixels, + data_type_parameters.size_constraint_pixels, + data_type_parameters.size_constraint_bytes); + unpacked_bytes = compute_unpacked_bytes(*wc, + data_type_parameters.bits_per_pclk); + ph_cfg.wc = *wc; + ph_cfg.data_mode = frame_scfg->data_mode; + ph_cfg.data_type = frame_scfg->data_type; + ph_cfg.dma_packed = frame_scfg->dma_packed; + ph_cfg.vchannel = frame_id; + + mipi_tx_fg_section_cfg_regs(kmb_dsi, frame_id, section, + frame_scfg->height_lines, + unpacked_bytes, &ph_cfg); + + /* Caller needs bits_per_clk for additional caluclations */ + *bits_per_pclk = data_type_parameters.bits_per_pclk; + + return 0; +} + +static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen, + struct mipi_tx_frame_timing_cfg *fg_cfg) +{ + u32 sysclk; + u32 ppl_llp_ratio; + u32 ctrl_no = MIPI_CTRL6, reg_adr, val, offset; + + /* 500 Mhz system clock minus 50 to account for the difference in + * MIPI clock speed in RTL tests + */ + sysclk = kmb_dsi->sys_clk_mhz - 50; + + /* PPL-Pixel Packing Layer, LLP-Low Level Protocol + * Frame genartor timing parameters are clocked on the system clock, + * whereas as the equivalent parameters in the LLP blocks are clocked + * on LLP Tx clock from the D-PHY - BYTE clock + */ + + /* Multiply by 1000 to maintain precision */ + ppl_llp_ratio = ((fg_cfg->bpp / 8) * sysclk * 1000) / + ((fg_cfg->lane_rate_mbps / 8) * fg_cfg->active_lanes); + + dev_dbg(kmb_dsi->dev, "ppl_llp_ratio=%d\n", ppl_llp_ratio); + dev_dbg(kmb_dsi->dev, "bpp=%d sysclk=%d lane-rate=%d active-lanes=%d\n", + fg_cfg->bpp, sysclk, fg_cfg->lane_rate_mbps, + fg_cfg->active_lanes); + + /* Frame generator number of lines */ + reg_adr = MIPI_TXm_HS_FGn_NUM_LINES(ctrl_no, frame_gen); + kmb_write_mipi(kmb_dsi, reg_adr, fg_cfg->v_active); + + /* vsync width + * There are 2 registers for vsync width (VSA in lines for + * channels 0-3) + * REG_VSYNC_WIDTH0: [15:0]-VSA for channel0, [31:16]-VSA for channel1 + * REG_VSYNC_WIDTH1: [15:0]-VSA for channel2, [31:16]-VSA for channel3 + */ + offset = (frame_gen % 2) * 16; + reg_adr = MIPI_TXm_HS_VSYNC_WIDTHn(ctrl_no, frame_gen / 2); + kmb_write_bits_mipi(kmb_dsi, reg_adr, offset, 16, fg_cfg->vsync_width); + + /* vertical backporch (vbp) */ + reg_adr = MIPI_TXm_HS_V_BACKPORCHESn(ctrl_no, frame_gen / 2); + kmb_write_bits_mipi(kmb_dsi, reg_adr, offset, 16, fg_cfg->v_backporch); + + /* vertical frontporch (vfp) */ + reg_adr = MIPI_TXm_HS_V_FRONTPORCHESn(ctrl_no, frame_gen / 2); + kmb_write_bits_mipi(kmb_dsi, reg_adr, offset, 16, fg_cfg->v_frontporch); + + /* vertical active (vactive) */ + reg_adr = MIPI_TXm_HS_V_ACTIVEn(ctrl_no, frame_gen / 2); + kmb_write_bits_mipi(kmb_dsi, reg_adr, offset, 16, fg_cfg->v_active); + + /* hsync width */ + reg_adr = MIPI_TXm_HS_HSYNC_WIDTHn(ctrl_no, frame_gen); + kmb_write_mipi(kmb_dsi, reg_adr, + (fg_cfg->hsync_width * ppl_llp_ratio) / 1000); + + /* horizontal backporch (hbp) */ + reg_adr = MIPI_TXm_HS_H_BACKPORCHn(ctrl_no, frame_gen); + kmb_write_mipi(kmb_dsi, reg_adr, + (fg_cfg->h_backporch * ppl_llp_ratio) / 1000); + + /* horizontal frontporch (hfp) */ + reg_adr = MIPI_TXm_HS_H_FRONTPORCHn(ctrl_no, frame_gen); + kmb_write_mipi(kmb_dsi, reg_adr, + (fg_cfg->h_frontporch * ppl_llp_ratio) / 1000); + + /* horizontal active (ha) */ + reg_adr = MIPI_TXm_HS_H_ACTIVEn(ctrl_no, frame_gen); + + /* convert h_active which is wc in bytes to cycles */ + val = (fg_cfg->h_active * sysclk * 1000) / + ((fg_cfg->lane_rate_mbps / 8) * fg_cfg->active_lanes); + val /= 1000; + kmb_write_mipi(kmb_dsi, reg_adr, val); + + /* llp hsync width */ + reg_adr = MIPI_TXm_HS_LLP_HSYNC_WIDTHn(ctrl_no, frame_gen); + kmb_write_mipi(kmb_dsi, reg_adr, fg_cfg->hsync_width * (fg_cfg->bpp / 8)); + + /* llp h backporch */ + reg_adr = MIPI_TXm_HS_LLP_H_BACKPORCHn(ctrl_no, frame_gen); + kmb_write_mipi(kmb_dsi, reg_adr, fg_cfg->h_backporch * (fg_cfg->bpp / 8)); + + /* llp h frontporch */ + reg_adr = MIPI_TXm_HS_LLP_H_FRONTPORCHn(ctrl_no, frame_gen); + kmb_write_mipi(kmb_dsi, reg_adr, + fg_cfg->h_frontporch * (fg_cfg->bpp / 8)); +} + +static void mipi_tx_fg_cfg(struct kmb_dsi *kmb_dsi, u8 frame_gen, + u8 active_lanes, u32 bpp, u32 wc, + u32 lane_rate_mbps, struct mipi_tx_frame_cfg *fg_cfg) +{ + u32 i, fg_num_lines = 0; + struct mipi_tx_frame_timing_cfg fg_t_cfg; + + /* Calculate the total frame generator number of + * lines based on it's active sections + */ + for (i = 0; i < MIPI_TX_FRAME_GEN_SECTIONS; i++) { + if (fg_cfg->sections[i]) + fg_num_lines += fg_cfg->sections[i]->height_lines; + } + + fg_t_cfg.bpp = bpp; + fg_t_cfg.lane_rate_mbps = lane_rate_mbps; + fg_t_cfg.hsync_width = fg_cfg->hsync_width; + fg_t_cfg.h_backporch = fg_cfg->h_backporch; + fg_t_cfg.h_frontporch = fg_cfg->h_frontporch; + fg_t_cfg.h_active = wc; + fg_t_cfg.vsync_width = fg_cfg->vsync_width; + fg_t_cfg.v_backporch = fg_cfg->v_backporch; + fg_t_cfg.v_frontporch = fg_cfg->v_frontporch; + fg_t_cfg.v_active = fg_num_lines; + fg_t_cfg.active_lanes = active_lanes; + + /* Apply frame generator timing setting */ + mipi_tx_fg_cfg_regs(kmb_dsi, frame_gen, &fg_t_cfg); +} + +static void mipi_tx_multichannel_fifo_cfg(struct kmb_dsi *kmb_dsi, + u8 active_lanes, u8 vchannel_id) +{ + u32 fifo_size, fifo_rthreshold; + u32 ctrl_no = MIPI_CTRL6; + + /* Clear all mc fifo channel sizes and thresholds */ + kmb_write_mipi(kmb_dsi, MIPI_TX_HS_MC_FIFO_CTRL_EN, 0); + kmb_write_mipi(kmb_dsi, MIPI_TX_HS_MC_FIFO_CHAN_ALLOC0, 0); + kmb_write_mipi(kmb_dsi, MIPI_TX_HS_MC_FIFO_CHAN_ALLOC1, 0); + kmb_write_mipi(kmb_dsi, MIPI_TX_HS_MC_FIFO_RTHRESHOLD0, 0); + kmb_write_mipi(kmb_dsi, MIPI_TX_HS_MC_FIFO_RTHRESHOLD1, 0); + + fifo_size = ((active_lanes > MIPI_D_LANES_PER_DPHY) ? + MIPI_CTRL_4LANE_MAX_MC_FIFO_LOC : + MIPI_CTRL_2LANE_MAX_MC_FIFO_LOC) - 1; + + /* MC fifo size for virtual channels 0-3 + * REG_MC_FIFO_CHAN_ALLOC0: [8:0]-channel0, [24:16]-channel1 + * REG_MC_FIFO_CHAN_ALLOC1: [8:0]-2, [24:16]-channel3 + */ + SET_MC_FIFO_CHAN_ALLOC(kmb_dsi, ctrl_no, vchannel_id, fifo_size); + + /* Set threshold to half the fifo size, actual size=size*16 */ + fifo_rthreshold = ((fifo_size) * 8) & BIT_MASK_16; + SET_MC_FIFO_RTHRESHOLD(kmb_dsi, ctrl_no, vchannel_id, fifo_rthreshold); + + /* Enable the MC FIFO channel corresponding to the Virtual Channel */ + kmb_set_bit_mipi(kmb_dsi, MIPI_TXm_HS_MC_FIFO_CTRL_EN(ctrl_no), + vchannel_id); +} + +static void mipi_tx_ctrl_cfg(struct kmb_dsi *kmb_dsi, u8 fg_id, + struct mipi_ctrl_cfg *ctrl_cfg) +{ + u32 sync_cfg = 0, ctrl = 0, fg_en; + u32 ctrl_no = MIPI_CTRL6; + + /* MIPI_TX_HS_SYNC_CFG */ + if (ctrl_cfg->tx_ctrl_cfg.line_sync_pkt_en) + sync_cfg |= LINE_SYNC_PKT_ENABLE; + if (ctrl_cfg->tx_ctrl_cfg.frame_counter_active) + sync_cfg |= FRAME_COUNTER_ACTIVE; + if (ctrl_cfg->tx_ctrl_cfg.line_counter_active) + sync_cfg |= LINE_COUNTER_ACTIVE; + if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->v_blanking) + sync_cfg |= DSI_V_BLANKING; + if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->hsa_blanking) + sync_cfg |= DSI_HSA_BLANKING; + if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->hbp_blanking) + sync_cfg |= DSI_HBP_BLANKING; + if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->hfp_blanking) + sync_cfg |= DSI_HFP_BLANKING; + if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->sync_pulse_eventn) + sync_cfg |= DSI_SYNC_PULSE_EVENTN; + if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->lpm_first_vsa_line) + sync_cfg |= DSI_LPM_FIRST_VSA_LINE; + if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->lpm_last_vfp_line) + sync_cfg |= DSI_LPM_LAST_VFP_LINE; + + /* Enable frame generator */ + fg_en = 1 << fg_id; + sync_cfg |= FRAME_GEN_EN(fg_en); + + if (ctrl_cfg->tx_ctrl_cfg.tx_always_use_hact) + sync_cfg |= ALWAYS_USE_HACT(fg_en); + if (ctrl_cfg->tx_ctrl_cfg.tx_hact_wait_stop) + sync_cfg |= HACT_WAIT_STOP(fg_en); + + dev_dbg(kmb_dsi->dev, "sync_cfg=%d fg_en=%d\n", sync_cfg, fg_en); + + /* MIPI_TX_HS_CTRL */ + + /* type:DSI, source:LCD */ + ctrl = HS_CTRL_EN | TX_SOURCE; + ctrl |= LCD_VC(fg_id); + ctrl |= ACTIVE_LANES(ctrl_cfg->active_lanes - 1); + if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->eotp_en) + ctrl |= DSI_EOTP_EN; + if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->hfp_blank_en) + ctrl |= DSI_CMD_HFP_EN; + + /*67 ns stop time */ + ctrl |= HSEXIT_CNT(0x43); + + kmb_write_mipi(kmb_dsi, MIPI_TXm_HS_SYNC_CFG(ctrl_no), sync_cfg); + kmb_write_mipi(kmb_dsi, MIPI_TXm_HS_CTRL(ctrl_no), ctrl); +} + +static u32 mipi_tx_init_cntrl(struct kmb_dsi *kmb_dsi, + struct mipi_ctrl_cfg *ctrl_cfg) +{ + u32 ret = 0; + u8 active_vchannels = 0; + u8 frame_id, sect; + u32 bits_per_pclk = 0; + u32 word_count = 0; + struct mipi_tx_frame_cfg *frame; + + /* This is the order to initialize MIPI TX: + * 1. set frame section parameters + * 2. set frame specific parameters + * 3. connect lcd to mipi + * 4. multi channel fifo cfg + * 5. set mipitxcctrlcfg + */ + + for (frame_id = 0; frame_id < 4; frame_id++) { + frame = ctrl_cfg->tx_ctrl_cfg.frames[frame_id]; + + /* Find valid frame, assume only one valid frame */ + if (!frame) + continue; + + /* Frame Section configuration */ + /* TODO - assume there is only one valid section in a frame, + * so bits_per_pclk and word_count are only set once + */ + for (sect = 0; sect < MIPI_CTRL_VIRTUAL_CHANNELS; sect++) { + if (!frame->sections[sect]) + continue; + + ret = mipi_tx_fg_section_cfg(kmb_dsi, frame_id, sect, + frame->sections[sect], + &bits_per_pclk, + &word_count); + if (ret) + return ret; + } + + /* Set frame specific parameters */ + mipi_tx_fg_cfg(kmb_dsi, frame_id, ctrl_cfg->active_lanes, + bits_per_pclk, word_count, + ctrl_cfg->lane_rate_mbps, frame); + + active_vchannels++; + + /* Stop iterating as only one virtual channel + * shall be used for LCD connection + */ + break; + } + + if (active_vchannels == 0) + return -EINVAL; + /* Multi-Channel FIFO Configuration */ + mipi_tx_multichannel_fifo_cfg(kmb_dsi, ctrl_cfg->active_lanes, frame_id); + + /* Frame Generator Enable */ + mipi_tx_ctrl_cfg(kmb_dsi, frame_id, ctrl_cfg); + + return ret; +} + +static void test_mode_send(struct kmb_dsi *kmb_dsi, u32 dphy_no, + u32 test_code, u32 test_data) +{ + /* Steps to send test code: + * - set testclk HIGH + * - set testdin with test code + * - set testen HIGH + * - set testclk LOW + * - set testen LOW + */ + + /* Set testclk high */ + SET_DPHY_TEST_CTRL1_CLK(kmb_dsi, dphy_no); + + /* Set testdin */ + SET_TEST_DIN0_3(kmb_dsi, dphy_no, test_code); + + /* Set testen high */ + SET_DPHY_TEST_CTRL1_EN(kmb_dsi, dphy_no); + + /* Set testclk low */ + CLR_DPHY_TEST_CTRL1_CLK(kmb_dsi, dphy_no); + + /* Set testen low */ + CLR_DPHY_TEST_CTRL1_EN(kmb_dsi, dphy_no); + + if (test_code) { + /* Steps to send test data: + * - set testen LOW + * - set testclk LOW + * - set testdin with data + * - set testclk HIGH + */ + + /* Set testen low */ + CLR_DPHY_TEST_CTRL1_EN(kmb_dsi, dphy_no); + + /* Set testclk low */ + CLR_DPHY_TEST_CTRL1_CLK(kmb_dsi, dphy_no); + + /* Set data in testdin */ + kmb_write_mipi(kmb_dsi, + DPHY_TEST_DIN0_3 + ((dphy_no / 0x4) * 0x4), + test_data << ((dphy_no % 4) * 8)); + + /* Set testclk high */ + SET_DPHY_TEST_CTRL1_CLK(kmb_dsi, dphy_no); + } +} + +static inline void + set_test_mode_src_osc_freq_target_low_bits(struct kmb_dsi *kmb_dsi, + u32 dphy_no, + u32 freq) +{ + /* Typical rise/fall time=166, refer Table 1207 databook, + * sr_osc_freq_target[7:0] + */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_SLEW_RATE_DDL_CYCLES, + (freq & 0x7f)); +} + +static inline void + set_test_mode_src_osc_freq_target_hi_bits(struct kmb_dsi *kmb_dsi, + u32 dphy_no, + u32 freq) +{ + u32 data; + + /* Flag this as high nibble */ + data = ((freq >> 6) & 0x1f) | (1 << 7); + + /* Typical rise/fall time=166, refer Table 1207 databook, + * sr_osc_freq_target[11:7] + */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_SLEW_RATE_DDL_CYCLES, data); +} + +static void mipi_tx_get_vco_params(struct vco_params *vco) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(vco_table); i++) { + if (vco->freq < vco_table[i].freq) { + *vco = vco_table[i]; + return; + } + } + + WARN_ONCE(1, "Invalid vco freq = %u for PLL setup\n", vco->freq); +} + +static void mipi_tx_pll_setup(struct kmb_dsi *kmb_dsi, u32 dphy_no, + u32 ref_clk_mhz, u32 target_freq_mhz) +{ + u32 best_n = 0, best_m = 0; + u32 n = 0, m = 0, div = 0, delta, freq = 0, t_freq; + u32 best_freq_delta = 3000; + + /* pll_ref_clk: - valid range: 2~64 MHz; Typically 24 MHz + * Fvco: - valid range: 320~1250 MHz (Gen3 D-PHY) + * Fout: - valid range: 40~1250 MHz (Gen3 D-PHY) + * n: - valid range [0 15] + * N: - N = n + 1 + * -valid range: [1 16] + * -conditions: - (pll_ref_clk / N) >= 2 MHz + * -(pll_ref_clk / N) <= 8 MHz + * m: valid range [62 623] + * M: - M = m + 2 + * -valid range [64 625] + * -Fvco = (M/N) * pll_ref_clk + */ + struct vco_params vco_p = { + .range = 0, + .divider = 1, + }; + + vco_p.freq = target_freq_mhz; + mipi_tx_get_vco_params(&vco_p); + + /* Search pll n parameter */ + for (n = PLL_N_MIN; n <= PLL_N_MAX; n++) { + /* Calculate the pll input frequency division ratio + * multiply by 1000 for precision - + * no floating point, add n for rounding + */ + div = ((ref_clk_mhz * 1000) + n) / (n + 1); + + /* Found a valid n parameter */ + if ((div < 2000 || div > 8000)) + continue; + + /* Search pll m parameter */ + for (m = PLL_M_MIN; m <= PLL_M_MAX; m++) { + /* Calculate the Fvco(DPHY PLL output frequency) + * using the current n,m params + */ + freq = div * (m + 2); + freq /= 1000; + + /* Trim the potential pll freq to max supported */ + if (freq > PLL_FVCO_MAX) + continue; + + delta = abs(freq - target_freq_mhz); + + /* Select the best (closest to target pll freq) + * n,m parameters so far + */ + if (delta < best_freq_delta) { + best_n = n; + best_m = m; + best_freq_delta = delta; + } + } + } + + /* Program vco_cntrl parameter + * PLL_VCO_Control[5:0] = pll_vco_cntrl_ovr, + * PLL_VCO_Control[6] = pll_vco_cntrl_ovr_en + */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_VCO_CTRL, (vco_p.range + | (1 << 6))); + + /* Program m, n pll parameters */ + dev_dbg(kmb_dsi->dev, "m = %d n = %d\n", best_m, best_n); + + /* PLL_Input_Divider_Ratio[3:0] = pll_n_ovr */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_INPUT_DIVIDER, + (best_n & 0x0f)); + + /* m - low nibble PLL_Loop_Divider_Ratio[4:0] + * pll_m_ovr[4:0] + */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_FEEDBACK_DIVIDER, + (best_m & 0x1f)); + + /* m - high nibble PLL_Loop_Divider_Ratio[4:0] + * pll_m_ovr[9:5] + */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_FEEDBACK_DIVIDER, + ((best_m >> 5) & 0x1f) | PLL_FEEDBACK_DIVIDER_HIGH); + + /* Enable overwrite of n,m parameters :pll_n_ovr_en, pll_m_ovr_en */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_OUTPUT_CLK_SEL, + (PLL_N_OVR_EN | PLL_M_OVR_EN)); + + /* Program Charge-Pump parameters */ + + /* pll_prop_cntrl-fixed values for prop_cntrl from DPHY doc */ + t_freq = target_freq_mhz * vco_p.divider; + test_mode_send(kmb_dsi, dphy_no, + TEST_CODE_PLL_PROPORTIONAL_CHARGE_PUMP_CTRL, + ((t_freq > 1150) ? 0x0C : 0x0B)); + + /* pll_int_cntrl-fixed value for int_cntrl from DPHY doc */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_INTEGRAL_CHARGE_PUMP_CTRL, + 0x00); + + /* pll_gmp_cntrl-fixed value for gmp_cntrl from DPHY doci */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_GMP_CTRL, 0x10); + + /* pll_cpbias_cntrl-fixed value for cpbias_cntrl from DPHY doc */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_CHARGE_PUMP_BIAS, 0x10); + + /* pll_th1 -Lock Detector Phase error threshold, + * document gives fixed value + */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_PHASE_ERR_CTRL, 0x02); + + /* PLL Lock Configuration */ + + /* pll_th2 - Lock Filter length, document gives fixed value */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_LOCK_FILTER, 0x60); + + /* pll_th3- PLL Unlocking filter, document gives fixed value */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_UNLOCK_FILTER, 0x03); + + /* pll_lock_sel-PLL Lock Detector Selection, + * document gives fixed value + */ + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_LOCK_DETECTOR, 0x02); +} + +static void set_slewrate_gt_1500(struct kmb_dsi *kmb_dsi, u32 dphy_no) +{ + u32 test_code = 0, test_data = 0; + /* Bypass slew rate calibration algorithm + * bits[1:0} srcal_en_ovr_en, srcal_en_ovr + */ + test_code = TEST_CODE_SLEW_RATE_OVERRIDE_CTRL; + test_data = 0x02; + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* Disable slew rate calibration */ + test_code = TEST_CODE_SLEW_RATE_DDL_LOOP_CTRL; + test_data = 0x00; + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); +} + +static void set_slewrate_gt_1000(struct kmb_dsi *kmb_dsi, u32 dphy_no) +{ + u32 test_code = 0, test_data = 0; + + /* BitRate: > 1 Gbps && <= 1.5 Gbps: - slew rate control ON + * typical rise/fall times: 166 ps + */ + + /* Do not bypass slew rate calibration algorithm + * bits[1:0}=srcal_en_ovr_en, srcal_en_ovr, bit[6]=sr_range + */ + test_code = TEST_CODE_SLEW_RATE_OVERRIDE_CTRL; + test_data = (0x03 | (1 << 6)); + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* Enable slew rate calibration */ + test_code = TEST_CODE_SLEW_RATE_DDL_LOOP_CTRL; + test_data = 0x01; + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* Set sr_osc_freq_target[6:0] low nibble + * typical rise/fall time=166, refer Table 1207 databook + */ + test_code = TEST_CODE_SLEW_RATE_DDL_CYCLES; + test_data = (0x72f & 0x7f); + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* Set sr_osc_freq_target[11:7] high nibble + * Typical rise/fall time=166, refer Table 1207 databook + */ + test_code = TEST_CODE_SLEW_RATE_DDL_CYCLES; + test_data = ((0x72f >> 6) & 0x1f) | (1 << 7); + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); +} + +static void set_slewrate_lt_1000(struct kmb_dsi *kmb_dsi, u32 dphy_no) +{ + u32 test_code = 0, test_data = 0; + + /* lane_rate_mbps <= 1000 Mbps + * BitRate: <= 1 Gbps: + * - slew rate control ON + * - typical rise/fall times: 225 ps + */ + + /* Do not bypass slew rate calibration algorithm */ + test_code = TEST_CODE_SLEW_RATE_OVERRIDE_CTRL; + test_data = (0x03 | (1 << 6)); + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* Enable slew rate calibration */ + test_code = TEST_CODE_SLEW_RATE_DDL_LOOP_CTRL; + test_data = 0x01; + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* Typical rise/fall time=255, refer Table 1207 databook */ + test_code = TEST_CODE_SLEW_RATE_DDL_CYCLES; + test_data = (0x523 & 0x7f); + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* Set sr_osc_freq_target[11:7] high nibble */ + test_code = TEST_CODE_SLEW_RATE_DDL_CYCLES; + test_data = ((0x523 >> 6) & 0x1f) | (1 << 7); + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); +} + +static void setup_pll(struct kmb_dsi *kmb_dsi, u32 dphy_no, + struct mipi_ctrl_cfg *cfg) +{ + u32 test_code = 0, test_data = 0; + + /* Set PLL regulator in bypass */ + test_code = TEST_CODE_PLL_ANALOG_PROG; + test_data = 0x01; + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* PLL Parameters Setup */ + mipi_tx_pll_setup(kmb_dsi, dphy_no, cfg->ref_clk_khz / 1000, + cfg->lane_rate_mbps / 2); + + /* Set clksel */ + kmb_write_bits_mipi(kmb_dsi, DPHY_INIT_CTRL1, PLL_CLKSEL_0, 2, 0x01); + + /* Set pll_shadow_control */ + kmb_set_bit_mipi(kmb_dsi, DPHY_INIT_CTRL1, PLL_SHADOW_CTRL); +} + +static void set_lane_data_rate(struct kmb_dsi *kmb_dsi, u32 dphy_no, + struct mipi_ctrl_cfg *cfg) +{ + u32 i, test_code = 0, test_data = 0; + + for (i = 0; i < MIPI_DPHY_DEFAULT_BIT_RATES; i++) { + if (mipi_hs_freq_range[i].default_bit_rate_mbps < + cfg->lane_rate_mbps) + continue; + + /* Send the test code and data */ + /* bit[6:0] = hsfreqrange_ovr bit[7] = hsfreqrange_ovr_en */ + test_code = TEST_CODE_HS_FREQ_RANGE_CFG; + test_data = (mipi_hs_freq_range[i].hsfreqrange_code & 0x7f) | + (1 << 7); + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + break; + } +} + +static void dphy_init_sequence(struct kmb_dsi *kmb_dsi, + struct mipi_ctrl_cfg *cfg, u32 dphy_no, + int active_lanes, enum dphy_mode mode) +{ + u32 test_code = 0, test_data = 0, val; + + /* Set D-PHY in shutdown mode */ + /* Assert RSTZ signal */ + CLR_DPHY_INIT_CTRL0(kmb_dsi, dphy_no, RESETZ); + + /* Assert SHUTDOWNZ signal */ + CLR_DPHY_INIT_CTRL0(kmb_dsi, dphy_no, SHUTDOWNZ); + val = kmb_read_mipi(kmb_dsi, DPHY_INIT_CTRL0); + + /* Init D-PHY_n + * Pulse testclear signal to make sure the d-phy configuration + * starts from a clean base + */ + CLR_DPHY_TEST_CTRL0(kmb_dsi, dphy_no); + ndelay(15); + SET_DPHY_TEST_CTRL0(kmb_dsi, dphy_no); + ndelay(15); + CLR_DPHY_TEST_CTRL0(kmb_dsi, dphy_no); + ndelay(15); + + /* Set mastermacro bit - Master or slave mode */ + test_code = TEST_CODE_MULTIPLE_PHY_CTRL; + + /* DPHY has its own clock lane enabled (master) */ + if (mode == MIPI_DPHY_MASTER) + test_data = 0x01; + else + test_data = 0x00; + + /* Send the test code and data */ + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* Set the lane data rate */ + set_lane_data_rate(kmb_dsi, dphy_no, cfg); + + /* High-Speed Tx Slew Rate Calibration + * BitRate: > 1.5 Gbps && <= 2.5 Gbps: slew rate control OFF + */ + if (cfg->lane_rate_mbps > 1500) + set_slewrate_gt_1500(kmb_dsi, dphy_no); + else if (cfg->lane_rate_mbps > 1000) + set_slewrate_gt_1000(kmb_dsi, dphy_no); + else + set_slewrate_lt_1000(kmb_dsi, dphy_no); + + /* Set cfgclkfreqrange */ + val = (((cfg->cfg_clk_khz / 1000) - 17) * 4) & 0x3f; + SET_DPHY_FREQ_CTRL0_3(kmb_dsi, dphy_no, val); + + /* Enable config clk for the corresponding d-phy */ + kmb_set_bit_mipi(kmb_dsi, DPHY_CFG_CLK_EN, dphy_no); + + /* PLL setup */ + if (mode == MIPI_DPHY_MASTER) + setup_pll(kmb_dsi, dphy_no, cfg); + + /* Send NORMAL OPERATION test code */ + test_code = 0x0; + test_data = 0x0; + test_mode_send(kmb_dsi, dphy_no, test_code, test_data); + + /* Configure BASEDIR for data lanes + * NOTE: basedir only applies to LANE_0 of each D-PHY. + * The other lanes keep their direction based on the D-PHY type, + * either Rx or Tx. + * bits[5:0] - BaseDir: 1 = Rx + * bits[9:6] - BaseDir: 0 = Tx + */ + kmb_write_bits_mipi(kmb_dsi, DPHY_INIT_CTRL2, 0, 9, 0x03f); + ndelay(15); + + /* Enable CLOCK LANE + * Clock lane should be enabled regardless of the direction + * set for the D-PHY (Rx/Tx) + */ + kmb_set_bit_mipi(kmb_dsi, DPHY_INIT_CTRL2, 12 + dphy_no); + + /* Enable DATA LANES */ + kmb_write_bits_mipi(kmb_dsi, DPHY_ENABLE, dphy_no * 2, 2, + ((1 << active_lanes) - 1)); + + ndelay(15); + + /* Take D-PHY out of shutdown mode */ + /* Deassert SHUTDOWNZ signal */ + SET_DPHY_INIT_CTRL0(kmb_dsi, dphy_no, SHUTDOWNZ); + ndelay(15); + + /* Deassert RSTZ signal */ + SET_DPHY_INIT_CTRL0(kmb_dsi, dphy_no, RESETZ); +} + +static void dphy_wait_fsm(struct kmb_dsi *kmb_dsi, u32 dphy_no, + enum dphy_tx_fsm fsm_state) +{ + enum dphy_tx_fsm val = DPHY_TX_POWERDWN; + int i = 0; + int status = 1; + + do { + test_mode_send(kmb_dsi, dphy_no, TEST_CODE_FSM_CONTROL, 0x80); + + val = GET_TEST_DOUT4_7(kmb_dsi, dphy_no); + i++; + if (i > TIMEOUT) { + status = 0; + break; + } + } while (val != fsm_state); + + dev_dbg(kmb_dsi->dev, "%s: dphy %d val = %x", __func__, dphy_no, val); + dev_dbg(kmb_dsi->dev, "* DPHY %d WAIT_FSM %s *", + dphy_no, status ? "SUCCESS" : "FAILED"); +} + +static void wait_init_done(struct kmb_dsi *kmb_dsi, u32 dphy_no, + u32 active_lanes) +{ + u32 stopstatedata = 0; + u32 data_lanes = (1 << active_lanes) - 1; + int i = 0; + int status = 1; + + do { + stopstatedata = GET_STOPSTATE_DATA(kmb_dsi, dphy_no) + & data_lanes; + + /* TODO-need to add a time out and return failure */ + i++; + + if (i > TIMEOUT) { + status = 0; + dev_dbg(kmb_dsi->dev, + "! WAIT_INIT_DONE: TIMING OUT!(err_stat=%d)", + kmb_read_mipi(kmb_dsi, MIPI_DPHY_ERR_STAT6_7)); + break; + } + } while (stopstatedata != data_lanes); + + dev_dbg(kmb_dsi->dev, "* DPHY %d INIT - %s *", + dphy_no, status ? "SUCCESS" : "FAILED"); +} + +static void wait_pll_lock(struct kmb_dsi *kmb_dsi, u32 dphy_no) +{ + int i = 0; + int status = 1; + + do { + /* TODO-need to add a time out and return failure */ + i++; + if (i > TIMEOUT) { + status = 0; + dev_dbg(kmb_dsi->dev, "%s: timing out", __func__); + break; + } + } while (!GET_PLL_LOCK(kmb_dsi, dphy_no)); + + dev_dbg(kmb_dsi->dev, "* PLL Locked for DPHY %d - %s *", + dphy_no, status ? "SUCCESS" : "FAILED"); +} + +static u32 mipi_tx_init_dphy(struct kmb_dsi *kmb_dsi, + struct mipi_ctrl_cfg *cfg) +{ + u32 dphy_no = MIPI_DPHY6; + + /* Multiple D-PHYs needed */ + if (cfg->active_lanes > MIPI_DPHY_D_LANES) { + /* + *Initialization for Tx aggregation mode is done according to + *a. start init PHY1 + *b. poll for PHY1 FSM state LOCK + * b1. reg addr 0x03[3:0] - state_main[3:0] == 5 (LOCK) + *c. poll for PHY1 calibrations done : + * c1. termination calibration lower section: addr 0x22[5] + * - rescal_done + * c2. slewrate calibration (if data rate < = 1500 Mbps): + * addr 0xA7[3:2] - srcal_done, sr_finished + *d. start init PHY0 + *e. poll for PHY0 stopstate + *f. poll for PHY1 stopstate + */ + /* PHY #N+1 ('slave') */ + + dphy_init_sequence(kmb_dsi, cfg, dphy_no + 1, + (cfg->active_lanes - MIPI_DPHY_D_LANES), + MIPI_DPHY_SLAVE); + dphy_wait_fsm(kmb_dsi, dphy_no + 1, DPHY_TX_LOCK); + + /* PHY #N master */ + dphy_init_sequence(kmb_dsi, cfg, dphy_no, MIPI_DPHY_D_LANES, + MIPI_DPHY_MASTER); + + /* Wait for DPHY init to complete */ + wait_init_done(kmb_dsi, dphy_no, MIPI_DPHY_D_LANES); + wait_init_done(kmb_dsi, dphy_no + 1, + cfg->active_lanes - MIPI_DPHY_D_LANES); + wait_pll_lock(kmb_dsi, dphy_no); + wait_pll_lock(kmb_dsi, dphy_no + 1); + dphy_wait_fsm(kmb_dsi, dphy_no, DPHY_TX_IDLE); + } else { /* Single DPHY */ + dphy_init_sequence(kmb_dsi, cfg, dphy_no, cfg->active_lanes, + MIPI_DPHY_MASTER); + dphy_wait_fsm(kmb_dsi, dphy_no, DPHY_TX_IDLE); + wait_init_done(kmb_dsi, dphy_no, cfg->active_lanes); + wait_pll_lock(kmb_dsi, dphy_no); + } + + return 0; +} + +static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi) +{ + struct regmap *msscam; + + msscam = syscon_regmap_lookup_by_compatible("intel,keembay-msscam"); + if (IS_ERR(msscam)) { + dev_dbg(kmb_dsi->dev, "failed to get msscam syscon"); + return; + } + + /* DISABLE MIPI->CIF CONNECTION */ + regmap_write(msscam, MSS_MIPI_CIF_CFG, 0); + + /* ENABLE LCD->MIPI CONNECTION */ + regmap_write(msscam, MSS_LCD_MIPI_CFG, 1); + /* DISABLE LCD->CIF LOOPBACK */ + regmap_write(msscam, MSS_LOOPBACK_CFG, 1); +} + +int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode, + int sys_clk_mhz) +{ + u64 data_rate; + + kmb_dsi->sys_clk_mhz = sys_clk_mhz; + mipi_tx_init_cfg.active_lanes = MIPI_TX_ACTIVE_LANES; + + mipi_tx_frame0_sect_cfg.width_pixels = mode->crtc_hdisplay; + mipi_tx_frame0_sect_cfg.height_lines = mode->crtc_vdisplay; + mipitx_frame0_cfg.vsync_width = + mode->crtc_vsync_end - mode->crtc_vsync_start; + mipitx_frame0_cfg.v_backporch = + mode->crtc_vtotal - mode->crtc_vsync_end; + mipitx_frame0_cfg.v_frontporch = + mode->crtc_vsync_start - mode->crtc_vdisplay; + mipitx_frame0_cfg.hsync_width = + mode->crtc_hsync_end - mode->crtc_hsync_start; + mipitx_frame0_cfg.h_backporch = + mode->crtc_htotal - mode->crtc_hsync_end; + mipitx_frame0_cfg.h_frontporch = + mode->crtc_hsync_start - mode->crtc_hdisplay; + + /* Lane rate = (vtotal*htotal*fps*bpp)/4 / 1000000 + * to convert to Mbps + */ + data_rate = ((((u32)mode->crtc_vtotal * (u32)mode->crtc_htotal) * + (u32)(drm_mode_vrefresh(mode)) * + MIPI_TX_BPP) / mipi_tx_init_cfg.active_lanes) / 1000000; + + dev_dbg(kmb_dsi->dev, "data_rate=%u active_lanes=%d\n", + (u32)data_rate, mipi_tx_init_cfg.active_lanes); + + /* When late rate < 800, modeset fails with 4 lanes, + * so switch to 2 lanes + */ + if (data_rate < 800) { + mipi_tx_init_cfg.active_lanes = 2; + mipi_tx_init_cfg.lane_rate_mbps = data_rate * 2; + } else { + mipi_tx_init_cfg.lane_rate_mbps = data_rate; + } + + kmb_write_mipi(kmb_dsi, DPHY_ENABLE, 0); + kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL0, 0); + kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL1, 0); + kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL2, 0); + + /* Initialize mipi controller */ + mipi_tx_init_cntrl(kmb_dsi, &mipi_tx_init_cfg); + + /* Dphy initialization */ + mipi_tx_init_dphy(kmb_dsi, &mipi_tx_init_cfg); + + connect_lcd_to_mipi(kmb_dsi); + dev_info(kmb_dsi->dev, "mipi hw initialized"); + + return 0; +} + +struct kmb_dsi *kmb_dsi_init(struct platform_device *pdev) +{ + struct kmb_dsi *kmb_dsi; + struct device *dev = get_device(&pdev->dev); + + kmb_dsi = devm_kzalloc(dev, sizeof(*kmb_dsi), GFP_KERNEL); + if (!kmb_dsi) { + dev_err(dev, "failed to allocate kmb_dsi\n"); + return ERR_PTR(-ENOMEM); + } + + kmb_dsi->host = dsi_host; + kmb_dsi->host->ops = &kmb_dsi_host_ops; + + dsi_device->host = kmb_dsi->host; + kmb_dsi->device = dsi_device; + + return kmb_dsi; +} + +int kmb_dsi_encoder_init(struct drm_device *dev, struct kmb_dsi *kmb_dsi) +{ + struct drm_encoder *encoder; + struct drm_connector *connector; + int ret = 0; + + encoder = &kmb_dsi->base; + encoder->possible_crtcs = 1; + encoder->possible_clones = 0; + + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DSI); + if (ret) { + dev_err(kmb_dsi->dev, "Failed to init encoder %d\n", ret); + return ret; + } + + /* Link drm_bridge to encoder */ + ret = drm_bridge_attach(encoder, adv_bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) { + DRM_ERROR("failed to attach bridge to MIPI\n"); + drm_encoder_cleanup(encoder); + return ret; + } + drm_info(dev, "Bridge attached : SUCCESS"); + connector = drm_bridge_connector_init(dev, encoder); + if (IS_ERR(connector)) { + DRM_ERROR("Unable to create bridge connector"); + drm_encoder_cleanup(encoder); + return PTR_ERR(connector); + } + drm_connector_attach_encoder(connector, encoder); + return 0; +} + +int kmb_dsi_map_mmio(struct kmb_dsi *kmb_dsi) +{ + struct resource *res; + struct device *dev = kmb_dsi->dev; + + res = platform_get_resource_byname(kmb_dsi->pdev, IORESOURCE_MEM, + "mipi"); + if (!res) { + dev_err(dev, "failed to get resource for mipi"); + return -ENOMEM; + } + kmb_dsi->mipi_mmio = devm_ioremap_resource(dev, res); + if (IS_ERR(kmb_dsi->mipi_mmio)) { + dev_err(dev, "failed to ioremap mipi registers"); + return PTR_ERR(kmb_dsi->mipi_mmio); + } + return 0; +} + +static int kmb_dsi_clk_enable(struct kmb_dsi *kmb_dsi) +{ + int ret; + struct device *dev = kmb_dsi->dev; + + ret = clk_prepare_enable(kmb_dsi->clk_mipi); + if (ret) { + dev_err(dev, "Failed to enable MIPI clock: %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(kmb_dsi->clk_mipi_ecfg); + if (ret) { + dev_err(dev, "Failed to enable MIPI_ECFG clock: %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(kmb_dsi->clk_mipi_cfg); + if (ret) { + dev_err(dev, "Failed to enable MIPI_CFG clock: %d\n", ret); + return ret; + } + + dev_info(dev, "SUCCESS : enabled MIPI clocks\n"); + return 0; +} + +int kmb_dsi_clk_init(struct kmb_dsi *kmb_dsi) +{ + struct device *dev = kmb_dsi->dev; + unsigned long clk; + + kmb_dsi->clk_mipi = devm_clk_get(dev, "clk_mipi"); + if (IS_ERR(kmb_dsi->clk_mipi)) { + dev_err(dev, "devm_clk_get() failed clk_mipi\n"); + return PTR_ERR(kmb_dsi->clk_mipi); + } + + kmb_dsi->clk_mipi_ecfg = devm_clk_get(dev, "clk_mipi_ecfg"); + if (IS_ERR(kmb_dsi->clk_mipi_ecfg)) { + dev_err(dev, "devm_clk_get() failed clk_mipi_ecfg\n"); + return PTR_ERR(kmb_dsi->clk_mipi_ecfg); + } + + kmb_dsi->clk_mipi_cfg = devm_clk_get(dev, "clk_mipi_cfg"); + if (IS_ERR(kmb_dsi->clk_mipi_cfg)) { + dev_err(dev, "devm_clk_get() failed clk_mipi_cfg\n"); + return PTR_ERR(kmb_dsi->clk_mipi_cfg); + } + /* Set MIPI clock to 24 Mhz */ + clk_set_rate(kmb_dsi->clk_mipi, KMB_MIPI_DEFAULT_CLK); + if (clk_get_rate(kmb_dsi->clk_mipi) != KMB_MIPI_DEFAULT_CLK) { + dev_err(dev, "failed to set to clk_mipi to %d\n", + KMB_MIPI_DEFAULT_CLK); + return -1; + } + dev_dbg(dev, "clk_mipi = %ld\n", clk_get_rate(kmb_dsi->clk_mipi)); + + clk = clk_get_rate(kmb_dsi->clk_mipi_ecfg); + if (clk != KMB_MIPI_DEFAULT_CFG_CLK) { + /* Set MIPI_ECFG clock to 24 Mhz */ + clk_set_rate(kmb_dsi->clk_mipi_ecfg, KMB_MIPI_DEFAULT_CFG_CLK); + clk = clk_get_rate(kmb_dsi->clk_mipi_ecfg); + if (clk != KMB_MIPI_DEFAULT_CFG_CLK) { + dev_err(dev, "failed to set to clk_mipi_ecfg to %d\n", + KMB_MIPI_DEFAULT_CFG_CLK); + return -1; + } + } + + clk = clk_get_rate(kmb_dsi->clk_mipi_cfg); + if (clk != KMB_MIPI_DEFAULT_CFG_CLK) { + /* Set MIPI_CFG clock to 24 Mhz */ + clk_set_rate(kmb_dsi->clk_mipi_cfg, 24000000); + clk = clk_get_rate(kmb_dsi->clk_mipi_cfg); + if (clk != KMB_MIPI_DEFAULT_CFG_CLK) { + dev_err(dev, "failed to set clk_mipi_cfg to %d\n", + KMB_MIPI_DEFAULT_CFG_CLK); + return -1; + } + } + + return kmb_dsi_clk_enable(kmb_dsi); +} diff --git a/drivers/gpu/drm/kmb/kmb_dsi.h b/drivers/gpu/drm/kmb/kmb_dsi.h new file mode 100644 index 000000000000..66b7c500d9bc --- /dev/null +++ b/drivers/gpu/drm/kmb/kmb_dsi.h @@ -0,0 +1,387 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright © 2019-2020 Intel Corporation + */ + +#ifndef __KMB_DSI_H__ +#define __KMB_DSI_H__ + +#include <drm/drm_encoder.h> +#include <drm/drm_mipi_dsi.h> + +/* MIPI TX CFG */ +#define MIPI_TX_LANE_DATA_RATE_MBPS 891 +#define MIPI_TX_REF_CLK_KHZ 24000 +#define MIPI_TX_CFG_CLK_KHZ 24000 +#define MIPI_TX_BPP 24 + +/* DPHY Tx test codes*/ +#define TEST_CODE_FSM_CONTROL 0x03 +#define TEST_CODE_MULTIPLE_PHY_CTRL 0x0C +#define TEST_CODE_PLL_PROPORTIONAL_CHARGE_PUMP_CTRL 0x0E +#define TEST_CODE_PLL_INTEGRAL_CHARGE_PUMP_CTRL 0x0F +#define TEST_CODE_PLL_VCO_CTRL 0x12 +#define TEST_CODE_PLL_GMP_CTRL 0x13 +#define TEST_CODE_PLL_PHASE_ERR_CTRL 0x14 +#define TEST_CODE_PLL_LOCK_FILTER 0x15 +#define TEST_CODE_PLL_UNLOCK_FILTER 0x16 +#define TEST_CODE_PLL_INPUT_DIVIDER 0x17 +#define TEST_CODE_PLL_FEEDBACK_DIVIDER 0x18 +#define PLL_FEEDBACK_DIVIDER_HIGH BIT(7) +#define TEST_CODE_PLL_OUTPUT_CLK_SEL 0x19 +#define PLL_N_OVR_EN BIT(4) +#define PLL_M_OVR_EN BIT(5) +#define TEST_CODE_VOD_LEVEL 0x24 +#define TEST_CODE_PLL_CHARGE_PUMP_BIAS 0x1C +#define TEST_CODE_PLL_LOCK_DETECTOR 0x1D +#define TEST_CODE_HS_FREQ_RANGE_CFG 0x44 +#define TEST_CODE_PLL_ANALOG_PROG 0x1F +#define TEST_CODE_SLEW_RATE_OVERRIDE_CTRL 0xA0 +#define TEST_CODE_SLEW_RATE_DDL_LOOP_CTRL 0xA3 +#define TEST_CODE_SLEW_RATE_DDL_CYCLES 0xA4 + +/* DPHY params */ +#define PLL_N_MIN 0 +#define PLL_N_MAX 15 +#define PLL_M_MIN 62 +#define PLL_M_MAX 623 +#define PLL_FVCO_MAX 1250 + +#define TIMEOUT 600 + +#define MIPI_TX_FRAME_GEN 4 +#define MIPI_TX_FRAME_GEN_SECTIONS 4 +#define MIPI_CTRL_VIRTUAL_CHANNELS 4 +#define MIPI_D_LANES_PER_DPHY 2 +#define MIPI_CTRL_2LANE_MAX_MC_FIFO_LOC 255 +#define MIPI_CTRL_4LANE_MAX_MC_FIFO_LOC 511 +/* 2 Data Lanes per D-PHY */ +#define MIPI_DPHY_D_LANES 2 +#define MIPI_DPHY_DEFAULT_BIT_RATES 63 + +#define KMB_MIPI_DEFAULT_CLK 24000000 +#define KMB_MIPI_DEFAULT_CFG_CLK 24000000 + +#define to_kmb_dsi(x) container_of(x, struct kmb_dsi, base) + +struct kmb_dsi { + struct drm_encoder base; + struct device *dev; + struct platform_device *pdev; + struct mipi_dsi_host *host; + struct mipi_dsi_device *device; + struct drm_bridge *adv_bridge; + void __iomem *mipi_mmio; + struct clk *clk_mipi; + struct clk *clk_mipi_ecfg; + struct clk *clk_mipi_cfg; + int sys_clk_mhz; +}; + +/* DPHY Tx test codes */ + +enum mipi_ctrl_num { + MIPI_CTRL0 = 0, + MIPI_CTRL1, + MIPI_CTRL2, + MIPI_CTRL3, + MIPI_CTRL4, + MIPI_CTRL5, + MIPI_CTRL6, + MIPI_CTRL7, + MIPI_CTRL8, + MIPI_CTRL9, + MIPI_CTRL_NA +}; + +enum mipi_dphy_num { + MIPI_DPHY0 = 0, + MIPI_DPHY1, + MIPI_DPHY2, + MIPI_DPHY3, + MIPI_DPHY4, + MIPI_DPHY5, + MIPI_DPHY6, + MIPI_DPHY7, + MIPI_DPHY8, + MIPI_DPHY9, + MIPI_DPHY_NA +}; + +enum mipi_dir { + MIPI_RX, + MIPI_TX +}; + +enum mipi_ctrl_type { + MIPI_DSI, + MIPI_CSI +}; + +enum mipi_data_if { + MIPI_IF_DMA, + MIPI_IF_PARALLEL +}; + +enum mipi_data_mode { + MIPI_DATA_MODE0, + MIPI_DATA_MODE1, + MIPI_DATA_MODE2, + MIPI_DATA_MODE3 +}; + +enum mipi_dsi_video_mode { + DSI_VIDEO_MODE_NO_BURST_PULSE, + DSI_VIDEO_MODE_NO_BURST_EVENT, + DSI_VIDEO_MODE_BURST +}; + +enum mipi_dsi_blanking_mode { + TRANSITION_TO_LOW_POWER, + SEND_BLANK_PACKET +}; + +enum mipi_dsi_eotp { + DSI_EOTP_DISABLED, + DSI_EOTP_ENABLES +}; + +enum mipi_dsi_data_type { + DSI_SP_DT_RESERVED_00 = 0x00, + DSI_SP_DT_VSYNC_START = 0x01, + DSI_SP_DT_COLOR_MODE_OFF = 0x02, + DSI_SP_DT_GENERIC_SHORT_WR = 0x03, + DSI_SP_DT_GENERIC_RD = 0x04, + DSI_SP_DT_DCS_SHORT_WR = 0x05, + DSI_SP_DT_DCS_RD = 0x06, + DSI_SP_DT_EOTP = 0x08, + DSI_LP_DT_NULL = 0x09, + DSI_LP_DT_RESERVED_0A = 0x0a, + DSI_LP_DT_RESERVED_0B = 0x0b, + DSI_LP_DT_LPPS_YCBCR422_20B = 0x0c, + DSI_LP_DT_PPS_RGB101010_30B = 0x0d, + DSI_LP_DT_PPS_RGB565_16B = 0x0e, + DSI_LP_DT_RESERVED_0F = 0x0f, + + DSI_SP_DT_RESERVED_10 = 0x10, + DSI_SP_DT_VSYNC_END = 0x11, + DSI_SP_DT_COLOR_MODE_ON = 0x12, + DSI_SP_DT_GENERIC_SHORT_WR_1PAR = 0x13, + DSI_SP_DT_GENERIC_RD_1PAR = 0x14, + DSI_SP_DT_DCS_SHORT_WR_1PAR = 0x15, + DSI_SP_DT_RESERVED_16 = 0x16, + DSI_SP_DT_RESERVED_17 = 0x17, + DSI_SP_DT_RESERVED_18 = 0x18, + DSI_LP_DT_BLANK = 0x19, + DSI_LP_DT_RESERVED_1A = 0x1a, + DSI_LP_DT_RESERVED_1B = 0x1b, + DSI_LP_DT_PPS_YCBCR422_24B = 0x1c, + DSI_LP_DT_PPS_RGB121212_36B = 0x1d, + DSI_LP_DT_PPS_RGB666_18B = 0x1e, + DSI_LP_DT_RESERVED_1F = 0x1f, + + DSI_SP_DT_RESERVED_20 = 0x20, + DSI_SP_DT_HSYNC_START = 0x21, + DSI_SP_DT_SHUT_DOWN_PERIPH_CMD = 0x22, + DSI_SP_DT_GENERIC_SHORT_WR_2PAR = 0x23, + DSI_SP_DT_GENERIC_RD_2PAR = 0x24, + DSI_SP_DT_RESERVED_25 = 0x25, + DSI_SP_DT_RESERVED_26 = 0x26, + DSI_SP_DT_RESERVED_27 = 0x27, + DSI_SP_DT_RESERVED_28 = 0x28, + DSI_LP_DT_GENERIC_LONG_WR = 0x29, + DSI_LP_DT_RESERVED_2A = 0x2a, + DSI_LP_DT_RESERVED_2B = 0x2b, + DSI_LP_DT_PPS_YCBCR422_16B = 0x2c, + DSI_LP_DT_RESERVED_2D = 0x2d, + DSI_LP_DT_LPPS_RGB666_18B = 0x2e, + DSI_LP_DT_RESERVED_2F = 0x2f, + + DSI_SP_DT_RESERVED_30 = 0x30, + DSI_SP_DT_HSYNC_END = 0x31, + DSI_SP_DT_TURN_ON_PERIPH_CMD = 0x32, + DSI_SP_DT_RESERVED_33 = 0x33, + DSI_SP_DT_RESERVED_34 = 0x34, + DSI_SP_DT_RESERVED_35 = 0x35, + DSI_SP_DT_RESERVED_36 = 0x36, + DSI_SP_DT_SET_MAX_RETURN_PKT_SIZE = 0x37, + DSI_SP_DT_RESERVED_38 = 0x38, + DSI_LP_DT_DSC_LONG_WR = 0x39, + DSI_LP_DT_RESERVED_3A = 0x3a, + DSI_LP_DT_RESERVED_3B = 0x3b, + DSI_LP_DT_RESERVED_3C = 0x3c, + DSI_LP_DT_PPS_YCBCR420_12B = 0x3d, + DSI_LP_DT_PPS_RGB888_24B = 0x3e, + DSI_LP_DT_RESERVED_3F = 0x3f +}; + +enum mipi_tx_hs_tp_sel { + MIPI_TX_HS_TP_WHOLE_FRAME_COLOR0 = 0, + MIPI_TX_HS_TP_WHOLE_FRAME_COLOR1, + MIPI_TX_HS_TP_V_STRIPES, + MIPI_TX_HS_TP_H_STRIPES, +}; + +enum dphy_mode { + MIPI_DPHY_SLAVE = 0, + MIPI_DPHY_MASTER +}; + +enum dphy_tx_fsm { + DPHY_TX_POWERDWN = 0, + DPHY_TX_BGPON, + DPHY_TX_TERMCAL, + DPHY_TX_TERMCALUP, + DPHY_TX_OFFSETCAL, + DPHY_TX_LOCK, + DPHY_TX_SRCAL, + DPHY_TX_IDLE, + DPHY_TX_ULP, + DPHY_TX_LANESTART, + DPHY_TX_CLKALIGN, + DPHY_TX_DDLTUNNING, + DPHY_TX_ULP_FORCE_PLL, + DPHY_TX_LOCK_LOSS +}; + +struct mipi_data_type_params { + u8 size_constraint_pixels; + u8 size_constraint_bytes; + u8 pixels_per_pclk; + u8 bits_per_pclk; +}; + +struct mipi_tx_dsi_cfg { + u8 hfp_blank_en; /* Horizontal front porch blanking enable */ + u8 eotp_en; /* End of transmission packet enable */ + /* Last vertical front porch blanking mode */ + u8 lpm_last_vfp_line; + /* First vertical sync active blanking mode */ + u8 lpm_first_vsa_line; + u8 sync_pulse_eventn; /* Sync type */ + u8 hfp_blanking; /* Horizontal front porch blanking mode */ + u8 hbp_blanking; /* Horizontal back porch blanking mode */ + u8 hsa_blanking; /* Horizontal sync active blanking mode */ + u8 v_blanking; /* Vertical timing blanking mode */ +}; + +struct mipi_tx_frame_section_cfg { + u32 dma_v_stride; + u16 dma_v_scale_cfg; + u16 width_pixels; + u16 height_lines; + u8 dma_packed; + u8 bpp; + u8 bpp_unpacked; + u8 dma_h_stride; + u8 data_type; + u8 data_mode; + u8 dma_flip_rotate_sel; +}; + +struct mipi_tx_frame_timing_cfg { + u32 bpp; + u32 lane_rate_mbps; + u32 hsync_width; + u32 h_backporch; + u32 h_frontporch; + u32 h_active; + u16 vsync_width; + u16 v_backporch; + u16 v_frontporch; + u16 v_active; + u8 active_lanes; +}; + +struct mipi_tx_frame_sect_phcfg { + u32 wc; + enum mipi_data_mode data_mode; + enum mipi_dsi_data_type data_type; + u8 vchannel; + u8 dma_packed; +}; + +struct mipi_tx_frame_cfg { + struct mipi_tx_frame_section_cfg *sections[MIPI_TX_FRAME_GEN_SECTIONS]; + u32 hsync_width; /* in pixels */ + u32 h_backporch; /* in pixels */ + u32 h_frontporch; /* in pixels */ + u16 vsync_width; /* in lines */ + u16 v_backporch; /* in lines */ + u16 v_frontporch; /* in lines */ +}; + +struct mipi_tx_ctrl_cfg { + struct mipi_tx_frame_cfg *frames[MIPI_TX_FRAME_GEN]; + const struct mipi_tx_dsi_cfg *tx_dsi_cfg; + u8 line_sync_pkt_en; + u8 line_counter_active; + u8 frame_counter_active; + u8 tx_hsclkkidle_cnt; + u8 tx_hsexit_cnt; + u8 tx_crc_en; + u8 tx_hact_wait_stop; + u8 tx_always_use_hact; + u8 tx_wait_trig; + u8 tx_wait_all_sect; +}; + +/* configuration structure for MIPI control */ +struct mipi_ctrl_cfg { + u8 active_lanes; /* # active lanes per controller 2/4 */ + u32 lane_rate_mbps; /* MBPS */ + u32 ref_clk_khz; + u32 cfg_clk_khz; + struct mipi_tx_ctrl_cfg tx_ctrl_cfg; +}; + +static inline void kmb_write_mipi(struct kmb_dsi *kmb_dsi, + unsigned int reg, u32 value) +{ + writel(value, (kmb_dsi->mipi_mmio + reg)); +} + +static inline u32 kmb_read_mipi(struct kmb_dsi *kmb_dsi, unsigned int reg) +{ + return readl(kmb_dsi->mipi_mmio + reg); +} + +static inline void kmb_write_bits_mipi(struct kmb_dsi *kmb_dsi, + unsigned int reg, u32 offset, + u32 num_bits, u32 value) +{ + u32 reg_val = kmb_read_mipi(kmb_dsi, reg); + u32 mask = (1 << num_bits) - 1; + + value &= mask; + mask <<= offset; + reg_val &= (~mask); + reg_val |= (value << offset); + kmb_write_mipi(kmb_dsi, reg, reg_val); +} + +static inline void kmb_set_bit_mipi(struct kmb_dsi *kmb_dsi, + unsigned int reg, u32 offset) +{ + u32 reg_val = kmb_read_mipi(kmb_dsi, reg); + + kmb_write_mipi(kmb_dsi, reg, reg_val | (1 << offset)); +} + +static inline void kmb_clr_bit_mipi(struct kmb_dsi *kmb_dsi, + unsigned int reg, u32 offset) +{ + u32 reg_val = kmb_read_mipi(kmb_dsi, reg); + + kmb_write_mipi(kmb_dsi, reg, reg_val & (~(1 << offset))); +} + +int kmb_dsi_host_bridge_init(struct device *dev); +struct kmb_dsi *kmb_dsi_init(struct platform_device *pdev); +void kmb_dsi_host_unregister(struct kmb_dsi *kmb_dsi); +int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode, + int sys_clk_mhz); +int kmb_dsi_map_mmio(struct kmb_dsi *kmb_dsi); +int kmb_dsi_clk_init(struct kmb_dsi *kmb_dsi); +int kmb_dsi_encoder_init(struct drm_device *dev, struct kmb_dsi *kmb_dsi); +#endif /* __KMB_DSI_H__ */ diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c new file mode 100644 index 000000000000..8448d1edb553 --- /dev/null +++ b/drivers/gpu/drm/kmb/kmb_plane.c @@ -0,0 +1,522 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright © 2018-2020 Intel Corporation + */ + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_managed.h> +#include <drm/drm_plane_helper.h> + +#include "kmb_drv.h" +#include "kmb_plane.h" +#include "kmb_regs.h" + +const u32 layer_irqs[] = { + LCD_INT_VL0, + LCD_INT_VL1, + LCD_INT_GL0, + LCD_INT_GL1 +}; + +/* Conversion (yuv->rgb) matrix from myriadx */ +static const u32 csc_coef_lcd[] = { + 1024, 0, 1436, + 1024, -352, -731, + 1024, 1814, 0, + -179, 125, -226 +}; + +/* Graphics layer (layers 2 & 3) formats, only packed formats are supported */ +static const u32 kmb_formats_g[] = { + DRM_FORMAT_RGB332, + DRM_FORMAT_XRGB4444, DRM_FORMAT_XBGR4444, + DRM_FORMAT_ARGB4444, DRM_FORMAT_ABGR4444, + DRM_FORMAT_XRGB1555, DRM_FORMAT_XBGR1555, + DRM_FORMAT_ARGB1555, DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, + DRM_FORMAT_RGB888, DRM_FORMAT_BGR888, + DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888, +}; + +/* Video layer ( 0 & 1) formats, packed and planar formats are supported */ +static const u32 kmb_formats_v[] = { + /* packed formats */ + DRM_FORMAT_RGB332, + DRM_FORMAT_XRGB4444, DRM_FORMAT_XBGR4444, + DRM_FORMAT_ARGB4444, DRM_FORMAT_ABGR4444, + DRM_FORMAT_XRGB1555, DRM_FORMAT_XBGR1555, + DRM_FORMAT_ARGB1555, DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, + DRM_FORMAT_RGB888, DRM_FORMAT_BGR888, + DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888, + /*planar formats */ + DRM_FORMAT_YUV420, DRM_FORMAT_YVU420, + DRM_FORMAT_YUV422, DRM_FORMAT_YVU422, + DRM_FORMAT_YUV444, DRM_FORMAT_YVU444, + DRM_FORMAT_NV12, DRM_FORMAT_NV21, +}; + +static unsigned int check_pixel_format(struct drm_plane *plane, u32 format) +{ + int i; + + for (i = 0; i < plane->format_count; i++) { + if (plane->format_types[i] == format) + return 0; + } + return -EINVAL; +} + +static int kmb_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_framebuffer *fb; + int ret; + struct drm_crtc_state *crtc_state; + bool can_position; + + fb = state->fb; + if (!fb || !state->crtc) + return 0; + + ret = check_pixel_format(plane, fb->format->format); + if (ret) + return ret; + + if (state->crtc_w > KMB_MAX_WIDTH || state->crtc_h > KMB_MAX_HEIGHT) + return -EINVAL; + if (state->crtc_w < KMB_MIN_WIDTH || state->crtc_h < KMB_MIN_HEIGHT) + return -EINVAL; + can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY); + crtc_state = + drm_atomic_get_existing_crtc_state(state->state, state->crtc); + return drm_atomic_helper_check_plane_state(state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + can_position, true); +} + +static void kmb_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct kmb_plane *kmb_plane = to_kmb_plane(plane); + int plane_id = kmb_plane->id; + struct kmb_drm_private *kmb; + + kmb = to_kmb(plane->dev); + + switch (plane_id) { + case LAYER_0: + kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL1_ENABLE; + break; + case LAYER_1: + kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL2_ENABLE; + break; + case LAYER_2: + kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL1_ENABLE; + break; + case LAYER_3: + kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL2_ENABLE; + break; + } + + kmb->plane_status[plane_id].disable = true; +} + +static unsigned int get_pixel_format(u32 format) +{ + unsigned int val = 0; + + switch (format) { + /* planar formats */ + case DRM_FORMAT_YUV444: + val = LCD_LAYER_FORMAT_YCBCR444PLAN | LCD_LAYER_PLANAR_STORAGE; + break; + case DRM_FORMAT_YVU444: + val = LCD_LAYER_FORMAT_YCBCR444PLAN | LCD_LAYER_PLANAR_STORAGE + | LCD_LAYER_CRCB_ORDER; + break; + case DRM_FORMAT_YUV422: + val = LCD_LAYER_FORMAT_YCBCR422PLAN | LCD_LAYER_PLANAR_STORAGE; + break; + case DRM_FORMAT_YVU422: + val = LCD_LAYER_FORMAT_YCBCR422PLAN | LCD_LAYER_PLANAR_STORAGE + | LCD_LAYER_CRCB_ORDER; + break; + case DRM_FORMAT_YUV420: + val = LCD_LAYER_FORMAT_YCBCR420PLAN | LCD_LAYER_PLANAR_STORAGE; + break; + case DRM_FORMAT_YVU420: + val = LCD_LAYER_FORMAT_YCBCR420PLAN | LCD_LAYER_PLANAR_STORAGE + | LCD_LAYER_CRCB_ORDER; + break; + case DRM_FORMAT_NV12: + val = LCD_LAYER_FORMAT_NV12 | LCD_LAYER_PLANAR_STORAGE; + break; + case DRM_FORMAT_NV21: + val = LCD_LAYER_FORMAT_NV12 | LCD_LAYER_PLANAR_STORAGE + | LCD_LAYER_CRCB_ORDER; + break; + /* packed formats */ + /* looks hw requires B & G to be swapped when RGB */ + case DRM_FORMAT_RGB332: + val = LCD_LAYER_FORMAT_RGB332 | LCD_LAYER_BGR_ORDER; + break; + case DRM_FORMAT_XBGR4444: + val = LCD_LAYER_FORMAT_RGBX4444; + break; + case DRM_FORMAT_ARGB4444: + val = LCD_LAYER_FORMAT_RGBA4444 | LCD_LAYER_BGR_ORDER; + break; + case DRM_FORMAT_ABGR4444: + val = LCD_LAYER_FORMAT_RGBA4444; + break; + case DRM_FORMAT_XRGB1555: + val = LCD_LAYER_FORMAT_XRGB1555 | LCD_LAYER_BGR_ORDER; + break; + case DRM_FORMAT_XBGR1555: + val = LCD_LAYER_FORMAT_XRGB1555; + break; + case DRM_FORMAT_ARGB1555: + val = LCD_LAYER_FORMAT_RGBA1555 | LCD_LAYER_BGR_ORDER; + break; + case DRM_FORMAT_ABGR1555: + val = LCD_LAYER_FORMAT_RGBA1555; + break; + case DRM_FORMAT_RGB565: + val = LCD_LAYER_FORMAT_RGB565 | LCD_LAYER_BGR_ORDER; + break; + case DRM_FORMAT_BGR565: + val = LCD_LAYER_FORMAT_RGB565; + break; + case DRM_FORMAT_RGB888: + val = LCD_LAYER_FORMAT_RGB888 | LCD_LAYER_BGR_ORDER; + break; + case DRM_FORMAT_BGR888: + val = LCD_LAYER_FORMAT_RGB888; + break; + case DRM_FORMAT_XRGB8888: + val = LCD_LAYER_FORMAT_RGBX8888 | LCD_LAYER_BGR_ORDER; + break; + case DRM_FORMAT_XBGR8888: + val = LCD_LAYER_FORMAT_RGBX8888; + break; + case DRM_FORMAT_ARGB8888: + val = LCD_LAYER_FORMAT_RGBA8888 | LCD_LAYER_BGR_ORDER; + break; + case DRM_FORMAT_ABGR8888: + val = LCD_LAYER_FORMAT_RGBA8888; + break; + } + DRM_INFO_ONCE("%s : %d format=0x%x val=0x%x\n", + __func__, __LINE__, format, val); + return val; +} + +static unsigned int get_bits_per_pixel(const struct drm_format_info *format) +{ + u32 bpp = 0; + unsigned int val = 0; + + if (format->num_planes > 1) { + val = LCD_LAYER_8BPP; + return val; + } + + bpp += 8 * format->cpp[0]; + + switch (bpp) { + case 8: + val = LCD_LAYER_8BPP; + break; + case 16: + val = LCD_LAYER_16BPP; + break; + case 24: + val = LCD_LAYER_24BPP; + break; + case 32: + val = LCD_LAYER_32BPP; + break; + } + + DRM_DEBUG("bpp=%d val=0x%x\n", bpp, val); + return val; +} + +static void config_csc(struct kmb_drm_private *kmb, int plane_id) +{ + /* YUV to RGB conversion using the fixed matrix csc_coef_lcd */ + kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF11(plane_id), csc_coef_lcd[0]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF12(plane_id), csc_coef_lcd[1]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF13(plane_id), csc_coef_lcd[2]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF21(plane_id), csc_coef_lcd[3]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF22(plane_id), csc_coef_lcd[4]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF23(plane_id), csc_coef_lcd[5]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF31(plane_id), csc_coef_lcd[6]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF32(plane_id), csc_coef_lcd[7]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF33(plane_id), csc_coef_lcd[8]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF1(plane_id), csc_coef_lcd[9]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF2(plane_id), csc_coef_lcd[10]); + kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF3(plane_id), csc_coef_lcd[11]); +} + +static void kmb_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_framebuffer *fb; + struct kmb_drm_private *kmb; + unsigned int width; + unsigned int height; + unsigned int dma_len; + struct kmb_plane *kmb_plane; + unsigned int dma_cfg; + unsigned int ctrl = 0, val = 0, out_format = 0; + unsigned int src_w, src_h, crtc_x, crtc_y; + unsigned char plane_id; + int num_planes; + static dma_addr_t addr[MAX_SUB_PLANES]; + + if (!plane || !plane->state || !state) + return; + + fb = plane->state->fb; + if (!fb) + return; + num_planes = fb->format->num_planes; + kmb_plane = to_kmb_plane(plane); + plane_id = kmb_plane->id; + + kmb = to_kmb(plane->dev); + + spin_lock_irq(&kmb->irq_lock); + if (kmb->kmb_under_flow || kmb->kmb_flush_done) { + spin_unlock_irq(&kmb->irq_lock); + drm_dbg(&kmb->drm, "plane_update:underflow!!!! returning"); + return; + } + spin_unlock_irq(&kmb->irq_lock); + + src_w = (plane->state->src_w >> 16); + src_h = plane->state->src_h >> 16; + crtc_x = plane->state->crtc_x; + crtc_y = plane->state->crtc_y; + + drm_dbg(&kmb->drm, + "src_w=%d src_h=%d, fb->format->format=0x%x fb->flags=0x%x\n", + src_w, src_h, fb->format->format, fb->flags); + + width = fb->width; + height = fb->height; + dma_len = (width * height * fb->format->cpp[0]); + drm_dbg(&kmb->drm, "dma_len=%d ", dma_len); + kmb_write_lcd(kmb, LCD_LAYERn_DMA_LEN(plane_id), dma_len); + kmb_write_lcd(kmb, LCD_LAYERn_DMA_LEN_SHADOW(plane_id), dma_len); + kmb_write_lcd(kmb, LCD_LAYERn_DMA_LINE_VSTRIDE(plane_id), + fb->pitches[0]); + kmb_write_lcd(kmb, LCD_LAYERn_DMA_LINE_WIDTH(plane_id), + (width * fb->format->cpp[0])); + + addr[Y_PLANE] = drm_fb_cma_get_gem_addr(fb, plane->state, 0); + kmb_write_lcd(kmb, LCD_LAYERn_DMA_START_ADDR(plane_id), + addr[Y_PLANE] + fb->offsets[0]); + val = get_pixel_format(fb->format->format); + val |= get_bits_per_pixel(fb->format); + /* Program Cb/Cr for planar formats */ + if (num_planes > 1) { + kmb_write_lcd(kmb, LCD_LAYERn_DMA_CB_LINE_VSTRIDE(plane_id), + width * fb->format->cpp[0]); + kmb_write_lcd(kmb, LCD_LAYERn_DMA_CB_LINE_WIDTH(plane_id), + (width * fb->format->cpp[0])); + + addr[U_PLANE] = drm_fb_cma_get_gem_addr(fb, plane->state, + U_PLANE); + /* check if Cb/Cr is swapped*/ + if (num_planes == 3 && (val & LCD_LAYER_CRCB_ORDER)) + kmb_write_lcd(kmb, + LCD_LAYERn_DMA_START_CR_ADR(plane_id), + addr[U_PLANE]); + else + kmb_write_lcd(kmb, + LCD_LAYERn_DMA_START_CB_ADR(plane_id), + addr[U_PLANE]); + + if (num_planes == 3) { + kmb_write_lcd(kmb, + LCD_LAYERn_DMA_CR_LINE_VSTRIDE(plane_id), + ((width) * fb->format->cpp[0])); + + kmb_write_lcd(kmb, + LCD_LAYERn_DMA_CR_LINE_WIDTH(plane_id), + ((width) * fb->format->cpp[0])); + + addr[V_PLANE] = drm_fb_cma_get_gem_addr(fb, + plane->state, + V_PLANE); + + /* check if Cb/Cr is swapped*/ + if (val & LCD_LAYER_CRCB_ORDER) + kmb_write_lcd(kmb, + LCD_LAYERn_DMA_START_CB_ADR(plane_id), + addr[V_PLANE]); + else + kmb_write_lcd(kmb, + LCD_LAYERn_DMA_START_CR_ADR(plane_id), + addr[V_PLANE]); + } + } + + kmb_write_lcd(kmb, LCD_LAYERn_WIDTH(plane_id), src_w - 1); + kmb_write_lcd(kmb, LCD_LAYERn_HEIGHT(plane_id), src_h - 1); + kmb_write_lcd(kmb, LCD_LAYERn_COL_START(plane_id), crtc_x); + kmb_write_lcd(kmb, LCD_LAYERn_ROW_START(plane_id), crtc_y); + + val |= LCD_LAYER_FIFO_100; + + if (val & LCD_LAYER_PLANAR_STORAGE) { + val |= LCD_LAYER_CSC_EN; + + /* Enable CSC if input is planar and output is RGB */ + config_csc(kmb, plane_id); + } + + kmb_write_lcd(kmb, LCD_LAYERn_CFG(plane_id), val); + + switch (plane_id) { + case LAYER_0: + ctrl = LCD_CTRL_VL1_ENABLE; + break; + case LAYER_1: + ctrl = LCD_CTRL_VL2_ENABLE; + break; + case LAYER_2: + ctrl = LCD_CTRL_GL1_ENABLE; + break; + case LAYER_3: + ctrl = LCD_CTRL_GL2_ENABLE; + break; + } + + ctrl |= LCD_CTRL_PROGRESSIVE | LCD_CTRL_TIM_GEN_ENABLE + | LCD_CTRL_CONTINUOUS | LCD_CTRL_OUTPUT_ENABLED; + + /* LCD is connected to MIPI on kmb + * Therefore this bit is required for DSI Tx + */ + ctrl |= LCD_CTRL_VHSYNC_IDLE_LVL; + + kmb_set_bitmask_lcd(kmb, LCD_CONTROL, ctrl); + + /* FIXME no doc on how to set output format,these values are + * taken from the Myriadx tests + */ + out_format |= LCD_OUTF_FORMAT_RGB888; + + /* Leave RGB order,conversion mode and clip mode to default */ + /* do not interleave RGB channels for mipi Tx compatibility */ + out_format |= LCD_OUTF_MIPI_RGB_MODE; + kmb_write_lcd(kmb, LCD_OUT_FORMAT_CFG, out_format); + + dma_cfg = LCD_DMA_LAYER_ENABLE | LCD_DMA_LAYER_VSTRIDE_EN | + LCD_DMA_LAYER_CONT_UPDATE | LCD_DMA_LAYER_AXI_BURST_16; + + /* Enable DMA */ + kmb_write_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id), dma_cfg); + drm_dbg(&kmb->drm, "dma_cfg=0x%x LCD_DMA_CFG=0x%x\n", dma_cfg, + kmb_read_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id))); + + kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR, LCD_INT_EOF | + LCD_INT_DMA_ERR); + kmb_set_bitmask_lcd(kmb, LCD_INT_ENABLE, LCD_INT_EOF | + LCD_INT_DMA_ERR); +} + +static const struct drm_plane_helper_funcs kmb_plane_helper_funcs = { + .atomic_check = kmb_plane_atomic_check, + .atomic_update = kmb_plane_atomic_update, + .atomic_disable = kmb_plane_atomic_disable +}; + +void kmb_plane_destroy(struct drm_plane *plane) +{ + struct kmb_plane *kmb_plane = to_kmb_plane(plane); + + drm_plane_cleanup(plane); + kfree(kmb_plane); +} + +static const struct drm_plane_funcs kmb_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = kmb_plane_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +struct kmb_plane *kmb_plane_init(struct drm_device *drm) +{ + struct kmb_drm_private *kmb = to_kmb(drm); + struct kmb_plane *plane = NULL; + struct kmb_plane *primary = NULL; + int i = 0; + int ret = 0; + enum drm_plane_type plane_type; + const u32 *plane_formats; + int num_plane_formats; + + for (i = 0; i < KMB_MAX_PLANES; i++) { + plane = drmm_kzalloc(drm, sizeof(*plane), GFP_KERNEL); + + if (!plane) { + drm_err(drm, "Failed to allocate plane\n"); + return ERR_PTR(-ENOMEM); + } + + plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY : + DRM_PLANE_TYPE_OVERLAY; + if (i < 2) { + plane_formats = kmb_formats_v; + num_plane_formats = ARRAY_SIZE(kmb_formats_v); + } else { + plane_formats = kmb_formats_g; + num_plane_formats = ARRAY_SIZE(kmb_formats_g); + } + + ret = drm_universal_plane_init(drm, &plane->base_plane, + POSSIBLE_CRTCS, &kmb_plane_funcs, + plane_formats, num_plane_formats, + NULL, plane_type, "plane %d", i); + if (ret < 0) { + drm_err(drm, "drm_universal_plane_init failed (ret=%d)", + ret); + goto cleanup; + } + drm_dbg(drm, "%s : %d i=%d type=%d", + __func__, __LINE__, + i, plane_type); + drm_plane_helper_add(&plane->base_plane, + &kmb_plane_helper_funcs); + if (plane_type == DRM_PLANE_TYPE_PRIMARY) { + primary = plane; + kmb->plane = plane; + } + drm_dbg(drm, "%s : %d primary=%p\n", __func__, __LINE__, + &primary->base_plane); + plane->id = i; + } + + return primary; +cleanup: + drmm_kfree(drm, plane); + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/kmb/kmb_plane.h b/drivers/gpu/drm/kmb/kmb_plane.h new file mode 100644 index 000000000000..486490f7a3ec --- /dev/null +++ b/drivers/gpu/drm/kmb/kmb_plane.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright © 2018-2020 Intel Corporation + */ + +#ifndef __KMB_PLANE_H__ +#define __KMB_PLANE_H__ + +#include <drm/drm_fourcc.h> +#include <drm/drm_plane.h> + +#define LCD_INT_VL0_ERR ((LAYER0_DMA_FIFO_UNDERFLOW) | \ + (LAYER0_DMA_FIFO_OVERFLOW) | \ + (LAYER0_DMA_CB_FIFO_OVERFLOW) | \ + (LAYER0_DMA_CB_FIFO_UNDERFLOW) | \ + (LAYER0_DMA_CR_FIFO_OVERFLOW) | \ + (LAYER0_DMA_CR_FIFO_UNDERFLOW)) + +#define LCD_INT_VL1_ERR ((LAYER1_DMA_FIFO_UNDERFLOW) | \ + (LAYER1_DMA_FIFO_OVERFLOW) | \ + (LAYER1_DMA_CB_FIFO_OVERFLOW) | \ + (LAYER1_DMA_CB_FIFO_UNDERFLOW) | \ + (LAYER1_DMA_CR_FIFO_OVERFLOW) | \ + (LAYER1_DMA_CR_FIFO_UNDERFLOW)) + +#define LCD_INT_GL0_ERR (LAYER2_DMA_FIFO_OVERFLOW | LAYER2_DMA_FIFO_UNDERFLOW) +#define LCD_INT_GL1_ERR (LAYER3_DMA_FIFO_OVERFLOW | LAYER3_DMA_FIFO_UNDERFLOW) +#define LCD_INT_VL0 (LAYER0_DMA_DONE | LAYER0_DMA_IDLE | LCD_INT_VL0_ERR) +#define LCD_INT_VL1 (LAYER1_DMA_DONE | LAYER1_DMA_IDLE | LCD_INT_VL1_ERR) +#define LCD_INT_GL0 (LAYER2_DMA_DONE | LAYER2_DMA_IDLE | LCD_INT_GL0_ERR) +#define LCD_INT_GL1 (LAYER3_DMA_DONE | LAYER3_DMA_IDLE | LCD_INT_GL1_ERR) +#define LCD_INT_DMA_ERR (LCD_INT_VL0_ERR | LCD_INT_VL1_ERR \ + | LCD_INT_GL0_ERR | LCD_INT_GL1_ERR) + +#define POSSIBLE_CRTCS 1 +#define to_kmb_plane(x) container_of(x, struct kmb_plane, base_plane) + +enum layer_id { + LAYER_0, + LAYER_1, + LAYER_2, + LAYER_3, + /* KMB_MAX_PLANES */ +}; + +#define KMB_MAX_PLANES 1 + +enum sub_plane_id { + Y_PLANE, + U_PLANE, + V_PLANE, + MAX_SUB_PLANES, +}; + +struct kmb_plane { + struct drm_plane base_plane; + unsigned char id; +}; + +struct layer_status { + bool disable; + u32 ctrl; +}; + +struct kmb_plane *kmb_plane_init(struct drm_device *drm); +void kmb_plane_destroy(struct drm_plane *plane); +#endif /* __KMB_PLANE_H__ */ diff --git a/drivers/gpu/drm/kmb/kmb_regs.h b/drivers/gpu/drm/kmb/kmb_regs.h new file mode 100644 index 000000000000..48150569f702 --- /dev/null +++ b/drivers/gpu/drm/kmb/kmb_regs.h @@ -0,0 +1,725 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright © 2018-2020 Intel Corporation + */ + +#ifndef __KMB_REGS_H__ +#define __KMB_REGS_H__ + +/*************************************************************************** + * LCD controller control register defines + ***************************************************************************/ +#define LCD_CONTROL (0x4 * 0x000) +#define LCD_CTRL_PROGRESSIVE (0 << 0) +#define LCD_CTRL_INTERLACED BIT(0) +#define LCD_CTRL_ENABLE BIT(1) +#define LCD_CTRL_VL1_ENABLE BIT(2) +#define LCD_CTRL_VL2_ENABLE BIT(3) +#define LCD_CTRL_GL1_ENABLE BIT(4) +#define LCD_CTRL_GL2_ENABLE BIT(5) +#define LCD_CTRL_ALPHA_BLEND_VL1 (0 << 6) +#define LCD_CTRL_ALPHA_BLEND_VL2 BIT(6) +#define LCD_CTRL_ALPHA_BLEND_GL1 (2 << 6) +#define LCD_CTRL_ALPHA_BLEND_GL2 (3 << 6) +#define LCD_CTRL_ALPHA_TOP_VL1 (0 << 8) +#define LCD_CTRL_ALPHA_TOP_VL2 BIT(8) +#define LCD_CTRL_ALPHA_TOP_GL1 (2 << 8) +#define LCD_CTRL_ALPHA_TOP_GL2 (3 << 8) +#define LCD_CTRL_ALPHA_MIDDLE_VL1 (0 << 10) +#define LCD_CTRL_ALPHA_MIDDLE_VL2 BIT(10) +#define LCD_CTRL_ALPHA_MIDDLE_GL1 (2 << 10) +#define LCD_CTRL_ALPHA_MIDDLE_GL2 (3 << 10) +#define LCD_CTRL_ALPHA_BOTTOM_VL1 (0 << 12) +#define LCD_CTRL_ALPHA_BOTTOM_VL2 BIT(12) +#define LCD_CTRL_ALPHA_BOTTOM_GL1 (2 << 12) +#define LCD_CTRL_ALPHA_BOTTOM_GL2 (3 << 12) +#define LCD_CTRL_TIM_GEN_ENABLE BIT(14) +#define LCD_CTRL_CONTINUOUS (0 << 15) +#define LCD_CTRL_ONE_SHOT BIT(15) +#define LCD_CTRL_PWM0_EN BIT(16) +#define LCD_CTRL_PWM1_EN BIT(17) +#define LCD_CTRL_PWM2_EN BIT(18) +#define LCD_CTRL_OUTPUT_DISABLED (0 << 19) +#define LCD_CTRL_OUTPUT_ENABLED BIT(19) +#define LCD_CTRL_BPORCH_ENABLE BIT(21) +#define LCD_CTRL_FPORCH_ENABLE BIT(22) +#define LCD_CTRL_PIPELINE_DMA BIT(28) +#define LCD_CTRL_VHSYNC_IDLE_LVL BIT(31) + +/* interrupts */ +#define LCD_INT_STATUS (0x4 * 0x001) +#define LCD_INT_EOF BIT(0) +#define LCD_INT_LINE_CMP BIT(1) +#define LCD_INT_VERT_COMP BIT(2) +#define LAYER0_DMA_DONE BIT(3) +#define LAYER0_DMA_IDLE BIT(4) +#define LAYER0_DMA_FIFO_OVERFLOW BIT(5) +#define LAYER0_DMA_FIFO_UNDERFLOW BIT(6) +#define LAYER0_DMA_CB_FIFO_OVERFLOW BIT(7) +#define LAYER0_DMA_CB_FIFO_UNDERFLOW BIT(8) +#define LAYER0_DMA_CR_FIFO_OVERFLOW BIT(9) +#define LAYER0_DMA_CR_FIFO_UNDERFLOW BIT(10) +#define LAYER1_DMA_DONE BIT(11) +#define LAYER1_DMA_IDLE BIT(12) +#define LAYER1_DMA_FIFO_OVERFLOW BIT(13) +#define LAYER1_DMA_FIFO_UNDERFLOW BIT(14) +#define LAYER1_DMA_CB_FIFO_OVERFLOW BIT(15) +#define LAYER1_DMA_CB_FIFO_UNDERFLOW BIT(16) +#define LAYER1_DMA_CR_FIFO_OVERFLOW BIT(17) +#define LAYER1_DMA_CR_FIFO_UNDERFLOW BIT(18) +#define LAYER2_DMA_DONE BIT(19) +#define LAYER2_DMA_IDLE BIT(20) +#define LAYER2_DMA_FIFO_OVERFLOW BIT(21) +#define LAYER2_DMA_FIFO_UNDERFLOW BIT(22) +#define LAYER3_DMA_DONE BIT(23) +#define LAYER3_DMA_IDLE BIT(24) +#define LAYER3_DMA_FIFO_OVERFLOW BIT(25) +#define LAYER3_DMA_FIFO_UNDERFLOW BIT(26) +#define LCD_INT_LAYER (0x07fffff8) +#define LCD_INT_ENABLE (0x4 * 0x002) +#define LCD_INT_CLEAR (0x4 * 0x003) +#define LCD_LINE_COUNT (0x4 * 0x004) +#define LCD_LINE_COMPARE (0x4 * 0x005) +#define LCD_VSTATUS (0x4 * 0x006) + +/*LCD_VSTATUS_COMPARE Vertcal interval in which to generate vertcal + * interval interrupt + */ +/* BITS 13 and 14 */ +#define LCD_VSTATUS_COMPARE (0x4 * 0x007) +#define LCD_VSTATUS_VERTICAL_STATUS_MASK (3 << 13) +#define LCD_VSTATUS_COMPARE_VSYNC (0 << 13) +#define LCD_VSTATUS_COMPARE_BACKPORCH BIT(13) +#define LCD_VSTATUS_COMPARE_ACTIVE (2 << 13) +#define LCD_VSTATUS_COMPARE_FRONT_PORCH (3 << 13) + +#define LCD_SCREEN_WIDTH (0x4 * 0x008) +#define LCD_SCREEN_HEIGHT (0x4 * 0x009) +#define LCD_FIELD_INT_CFG (0x4 * 0x00a) +#define LCD_FIFO_FLUSH (0x4 * 0x00b) +#define LCD_BG_COLOUR_LS (0x4 * 0x00c) +#define LCD_BG_COLOUR_MS (0x4 * 0x00d) +#define LCD_RAM_CFG (0x4 * 0x00e) + +/**************************************************************************** + * LCD controller Layer config register + ***************************************************************************/ +#define LCD_LAYER0_CFG (0x4 * 0x100) +#define LCD_LAYERn_CFG(N) (LCD_LAYER0_CFG + (0x400 * (N))) +#define LCD_LAYER_SCALE_H BIT(1) +#define LCD_LAYER_SCALE_V BIT(2) +#define LCD_LAYER_SCALE_H_V (LCD_LAYER_SCALE_H | \ + LCD_LAYER_SCALE_V) +#define LCD_LAYER_CSC_EN BIT(3) +#define LCD_LAYER_ALPHA_STATIC BIT(4) +#define LCD_LAYER_ALPHA_EMBED BIT(5) +#define LCD_LAYER_ALPHA_COMBI (LCD_LAYER_ALPHA_STATIC | \ + LCD_LAYER_ALPHA_EMBED) +/* RGB multiplied with alpha */ +#define LCD_LAYER_ALPHA_PREMULT BIT(6) +#define LCD_LAYER_INVERT_COL BIT(7) +#define LCD_LAYER_TRANSPARENT_EN BIT(8) +#define LCD_LAYER_FORMAT_YCBCR444PLAN (0 << 9) +#define LCD_LAYER_FORMAT_YCBCR422PLAN BIT(9) +#define LCD_LAYER_FORMAT_YCBCR420PLAN (2 << 9) +#define LCD_LAYER_FORMAT_RGB888PLAN (3 << 9) +#define LCD_LAYER_FORMAT_YCBCR444LIN (4 << 9) +#define LCD_LAYER_FORMAT_YCBCR422LIN (5 << 9) +#define LCD_LAYER_FORMAT_RGB888 (6 << 9) +#define LCD_LAYER_FORMAT_RGBA8888 (7 << 9) +#define LCD_LAYER_FORMAT_RGBX8888 (8 << 9) +#define LCD_LAYER_FORMAT_RGB565 (9 << 9) +#define LCD_LAYER_FORMAT_RGBA1555 (0xa << 9) +#define LCD_LAYER_FORMAT_XRGB1555 (0xb << 9) +#define LCD_LAYER_FORMAT_RGB444 (0xc << 9) +#define LCD_LAYER_FORMAT_RGBA4444 (0xd << 9) +#define LCD_LAYER_FORMAT_RGBX4444 (0xe << 9) +#define LCD_LAYER_FORMAT_RGB332 (0xf << 9) +#define LCD_LAYER_FORMAT_RGBA3328 (0x10 << 9) +#define LCD_LAYER_FORMAT_RGBX3328 (0x11 << 9) +#define LCD_LAYER_FORMAT_CLUT (0x12 << 9) +#define LCD_LAYER_FORMAT_NV12 (0x1c << 9) +#define LCD_LAYER_PLANAR_STORAGE BIT(14) +#define LCD_LAYER_8BPP (0 << 15) +#define LCD_LAYER_16BPP BIT(15) +#define LCD_LAYER_24BPP (2 << 15) +#define LCD_LAYER_32BPP (3 << 15) +#define LCD_LAYER_Y_ORDER BIT(17) +#define LCD_LAYER_CRCB_ORDER BIT(18) +#define LCD_LAYER_BGR_ORDER BIT(19) +#define LCD_LAYER_LUT_2ENT (0 << 20) +#define LCD_LAYER_LUT_4ENT BIT(20) +#define LCD_LAYER_LUT_16ENT (2 << 20) +#define LCD_LAYER_NO_FLIP (0 << 22) +#define LCD_LAYER_FLIP_V BIT(22) +#define LCD_LAYER_FLIP_H (2 << 22) +#define LCD_LAYER_ROT_R90 (3 << 22) +#define LCD_LAYER_ROT_L90 (4 << 22) +#define LCD_LAYER_ROT_180 (5 << 22) +#define LCD_LAYER_FIFO_00 (0 << 25) +#define LCD_LAYER_FIFO_25 BIT(25) +#define LCD_LAYER_FIFO_50 (2 << 25) +#define LCD_LAYER_FIFO_100 (3 << 25) +#define LCD_LAYER_INTERLEAVE_DIS (0 << 27) +#define LCD_LAYER_INTERLEAVE_V BIT(27) +#define LCD_LAYER_INTERLEAVE_H (2 << 27) +#define LCD_LAYER_INTERLEAVE_CH (3 << 27) +#define LCD_LAYER_INTERLEAVE_V_SUB (4 << 27) +#define LCD_LAYER_INTERLEAVE_H_SUB (5 << 27) +#define LCD_LAYER_INTERLEAVE_CH_SUB (6 << 27) +#define LCD_LAYER_INTER_POS_EVEN (0 << 30) +#define LCD_LAYER_INTER_POS_ODD BIT(30) + +#define LCD_LAYER0_COL_START (0x4 * 0x101) +#define LCD_LAYERn_COL_START(N) (LCD_LAYER0_COL_START + (0x400 * (N))) +#define LCD_LAYER0_ROW_START (0x4 * 0x102) +#define LCD_LAYERn_ROW_START(N) (LCD_LAYER0_ROW_START + (0x400 * (N))) +#define LCD_LAYER0_WIDTH (0x4 * 0x103) +#define LCD_LAYERn_WIDTH(N) (LCD_LAYER0_WIDTH + (0x400 * (N))) +#define LCD_LAYER0_HEIGHT (0x4 * 0x104) +#define LCD_LAYERn_HEIGHT(N) (LCD_LAYER0_HEIGHT + (0x400 * (N))) +#define LCD_LAYER0_SCALE_CFG (0x4 * 0x105) +#define LCD_LAYERn_SCALE_CFG(N) (LCD_LAYER0_SCALE_CFG + (0x400 * (N))) +#define LCD_LAYER0_ALPHA (0x4 * 0x106) +#define LCD_LAYERn_ALPHA(N) (LCD_LAYER0_ALPHA + (0x400 * (N))) +#define LCD_LAYER0_INV_COLOUR_LS (0x4 * 0x107) +#define LCD_LAYERn_INV_COLOUR_LS(N) (LCD_LAYER0_INV_COLOUR_LS + \ + (0x400 * (N))) +#define LCD_LAYER0_INV_COLOUR_MS (0x4 * 0x108) +#define LCD_LAYERn_INV_COLOUR_MS(N) (LCD_LAYER0_INV_COLOUR_MS + \ + (0x400 * (N))) +#define LCD_LAYER0_TRANS_COLOUR_LS (0x4 * 0x109) +#define LCD_LAYERn_TRANS_COLOUR_LS(N) (LCD_LAYER0_TRANS_COLOUR_LS + \ + (0x400 * (N))) +#define LCD_LAYER0_TRANS_COLOUR_MS (0x4 * 0x10a) +#define LCD_LAYERn_TRANS_COLOUR_MS(N) (LCD_LAYER0_TRANS_COLOUR_MS + \ + (0x400 * (N))) +#define LCD_LAYER0_CSC_COEFF11 (0x4 * 0x10b) +#define LCD_LAYERn_CSC_COEFF11(N) (LCD_LAYER0_CSC_COEFF11 + (0x400 * (N))) +#define LCD_LAYER0_CSC_COEFF12 (0x4 * 0x10c) +#define LCD_LAYERn_CSC_COEFF12(N) (LCD_LAYER0_CSC_COEFF12 + (0x400 * (N))) +#define LCD_LAYER0_CSC_COEFF13 (0x4 * 0x10d) +#define LCD_LAYERn_CSC_COEFF13(N) (LCD_LAYER0_CSC_COEFF13 + (0x400 * (N))) +#define LCD_LAYER0_CSC_COEFF21 (0x4 * 0x10e) +#define LCD_LAYERn_CSC_COEFF21(N) (LCD_LAYER0_CSC_COEFF21 + (0x400 * (N))) +#define LCD_LAYER0_CSC_COEFF22 (0x4 * 0x10f) +#define LCD_LAYERn_CSC_COEFF22(N) (LCD_LAYER0_CSC_COEFF22 + (0x400 * (N))) +#define LCD_LAYER0_CSC_COEFF23 (0x4 * 0x110) +#define LCD_LAYERn_CSC_COEFF23(N) (LCD_LAYER0_CSC_COEFF23 + (0x400 * (N))) +#define LCD_LAYER0_CSC_COEFF31 (0x4 * 0x111) +#define LCD_LAYERn_CSC_COEFF31(N) (LCD_LAYER0_CSC_COEFF31 + (0x400 * (N))) +#define LCD_LAYER0_CSC_COEFF32 (0x4 * 0x112) +#define LCD_LAYERn_CSC_COEFF32(N) (LCD_LAYER0_CSC_COEFF32 + (0x400 * (N))) +#define LCD_LAYER0_CSC_COEFF33 (0x4 * 0x113) +#define LCD_LAYERn_CSC_COEFF33(N) (LCD_LAYER0_CSC_COEFF33 + (0x400 * (N))) +#define LCD_LAYER0_CSC_OFF1 (0x4 * 0x114) +#define LCD_LAYERn_CSC_OFF1(N) (LCD_LAYER0_CSC_OFF1 + (0x400 * (N))) +#define LCD_LAYER0_CSC_OFF2 (0x4 * 0x115) +#define LCD_LAYERn_CSC_OFF2(N) (LCD_LAYER0_CSC_OFF2 + (0x400 * (N))) +#define LCD_LAYER0_CSC_OFF3 (0x4 * 0x116) +#define LCD_LAYERn_CSC_OFF3(N) (LCD_LAYER0_CSC_OFF3 + (0x400 * (N))) + +/* LCD controller Layer DMA config register */ +#define LCD_LAYER0_DMA_CFG (0x4 * 0x117) +#define LCD_LAYERn_DMA_CFG(N) (LCD_LAYER0_DMA_CFG + \ + (0x400 * (N))) +#define LCD_DMA_LAYER_ENABLE BIT(0) +#define LCD_DMA_LAYER_STATUS BIT(1) +#define LCD_DMA_LAYER_AUTO_UPDATE BIT(2) +#define LCD_DMA_LAYER_CONT_UPDATE BIT(3) +#define LCD_DMA_LAYER_CONT_PING_PONG_UPDATE (LCD_DMA_LAYER_AUTO_UPDATE \ + | LCD_DMA_LAYER_CONT_UPDATE) +#define LCD_DMA_LAYER_FIFO_ADR_MODE BIT(4) +#define LCD_DMA_LAYER_AXI_BURST_1 BIT(5) +#define LCD_DMA_LAYER_AXI_BURST_2 (2 << 5) +#define LCD_DMA_LAYER_AXI_BURST_3 (3 << 5) +#define LCD_DMA_LAYER_AXI_BURST_4 (4 << 5) +#define LCD_DMA_LAYER_AXI_BURST_5 (5 << 5) +#define LCD_DMA_LAYER_AXI_BURST_6 (6 << 5) +#define LCD_DMA_LAYER_AXI_BURST_7 (7 << 5) +#define LCD_DMA_LAYER_AXI_BURST_8 (8 << 5) +#define LCD_DMA_LAYER_AXI_BURST_9 (9 << 5) +#define LCD_DMA_LAYER_AXI_BURST_10 (0xa << 5) +#define LCD_DMA_LAYER_AXI_BURST_11 (0xb << 5) +#define LCD_DMA_LAYER_AXI_BURST_12 (0xc << 5) +#define LCD_DMA_LAYER_AXI_BURST_13 (0xd << 5) +#define LCD_DMA_LAYER_AXI_BURST_14 (0xe << 5) +#define LCD_DMA_LAYER_AXI_BURST_15 (0xf << 5) +#define LCD_DMA_LAYER_AXI_BURST_16 (0x10 << 5) +#define LCD_DMA_LAYER_VSTRIDE_EN BIT(10) + +#define LCD_LAYER0_DMA_START_ADR (0x4 * 0x118) +#define LCD_LAYERn_DMA_START_ADDR(N) (LCD_LAYER0_DMA_START_ADR \ + + (0x400 * (N))) +#define LCD_LAYER0_DMA_START_SHADOW (0x4 * 0x119) +#define LCD_LAYERn_DMA_START_SHADOW(N) (LCD_LAYER0_DMA_START_SHADOW \ + + (0x400 * (N))) +#define LCD_LAYER0_DMA_LEN (0x4 * 0x11a) +#define LCD_LAYERn_DMA_LEN(N) (LCD_LAYER0_DMA_LEN + \ + (0x400 * (N))) +#define LCD_LAYER0_DMA_LEN_SHADOW (0x4 * 0x11b) +#define LCD_LAYERn_DMA_LEN_SHADOW(N) (LCD_LAYER0_DMA_LEN_SHADOW + \ + (0x400 * (N))) +#define LCD_LAYER0_DMA_STATUS (0x4 * 0x11c) +#define LCD_LAYERn_DMA_STATUS(N) (LCD_LAYER0_DMA_STATUS + \ + (0x400 * (N))) +#define LCD_LAYER0_DMA_LINE_WIDTH (0x4 * 0x11d) +#define LCD_LAYERn_DMA_LINE_WIDTH(N) (LCD_LAYER0_DMA_LINE_WIDTH + \ + (0x400 * (N))) +#define LCD_LAYER0_DMA_LINE_VSTRIDE (0x4 * 0x11e) +#define LCD_LAYERn_DMA_LINE_VSTRIDE(N) (LCD_LAYER0_DMA_LINE_VSTRIDE +\ + (0x400 * (N))) +#define LCD_LAYER0_DMA_FIFO_STATUS (0x4 * 0x11f) +#define LCD_LAYERn_DMA_FIFO_STATUS(N) (LCD_LAYER0_DMA_FIFO_STATUS + \ + (0x400 * (N))) +#define LCD_LAYER0_CFG2 (0x4 * 0x120) +#define LCD_LAYERn_CFG2(N) (LCD_LAYER0_CFG2 + (0x400 * (N))) +#define LCD_LAYER0_DMA_START_CB_ADR (0x4 * 0x700) +#define LCD_LAYERn_DMA_START_CB_ADR(N) (LCD_LAYER0_DMA_START_CB_ADR + \ + (0x20 * (N))) +#define LCD_LAYER0_DMA_START_CB_SHADOW (0x4 * 0x701) +#define LCD_LAYERn_DMA_START_CB_SHADOW(N) (LCD_LAYER0_DMA_START_CB_SHADOW\ + + (0x20 * (N))) +#define LCD_LAYER0_DMA_CB_LINE_WIDTH (0x4 * 0x702) +#define LCD_LAYERn_DMA_CB_LINE_WIDTH(N) (LCD_LAYER0_DMA_CB_LINE_WIDTH +\ + (0x20 * (N))) +#define LCD_LAYER0_DMA_CB_LINE_VSTRIDE (0x4 * 0x703) +#define LCD_LAYERn_DMA_CB_LINE_VSTRIDE(N) (LCD_LAYER0_DMA_CB_LINE_VSTRIDE\ + + (0x20 * (N))) +#define LCD_LAYER0_DMA_START_CR_ADR (0x4 * 0x704) +#define LCD_LAYERn_DMA_START_CR_ADR(N) (LCD_LAYER0_DMA_START_CR_ADR + \ + (0x20 * (N))) +#define LCD_LAYER0_DMA_START_CR_SHADOW (0x4 * 0x705) +#define LCD_LAYERn_DMA_START_CR_SHADOW(N) \ + (LCD_LAYER0_DMA_START_CR_SHADOW\ + + (0x20 * (N))) +#define LCD_LAYER0_DMA_CR_LINE_WIDTH (0x4 * 0x706) +#define LCD_LAYERn_DMA_CR_LINE_WIDTH(N) (LCD_LAYER0_DMA_CR_LINE_WIDTH +\ + (0x20 * (N))) +#define LCD_LAYER0_DMA_CR_LINE_VSTRIDE (0x4 * 0x707) +#define LCD_LAYERn_DMA_CR_LINE_VSTRIDE(N) (LCD_LAYER0_DMA_CR_LINE_VSTRIDE\ + + (0x20 * (N))) +#define LCD_LAYER1_DMA_START_CB_ADR (0x4 * 0x708) +#define LCD_LAYER1_DMA_START_CB_SHADOW (0x4 * 0x709) +#define LCD_LAYER1_DMA_CB_LINE_WIDTH (0x4 * 0x70a) +#define LCD_LAYER1_DMA_CB_LINE_VSTRIDE (0x4 * 0x70b) +#define LCD_LAYER1_DMA_START_CR_ADR (0x4 * 0x70c) +#define LCD_LAYER1_DMA_START_CR_SHADOW (0x4 * 0x70d) +#define LCD_LAYER1_DMA_CR_LINE_WIDTH (0x4 * 0x70e) +#define LCD_LAYER1_DMA_CR_LINE_VSTRIDE (0x4 * 0x70f) + +/**************************************************************************** + * LCD controller output format register defines + ***************************************************************************/ +#define LCD_OUT_FORMAT_CFG (0x4 * 0x800) +#define LCD_OUTF_FORMAT_RGB121212 (0x00) +#define LCD_OUTF_FORMAT_RGB101010 (0x01) +#define LCD_OUTF_FORMAT_RGB888 (0x02) +#define LCD_OUTF_FORMAT_RGB666 (0x03) +#define LCD_OUTF_FORMAT_RGB565 (0x04) +#define LCD_OUTF_FORMAT_RGB444 (0x05) +#define LCD_OUTF_FORMAT_MRGB121212 (0x10) +#define LCD_OUTF_FORMAT_MRGB101010 (0x11) +#define LCD_OUTF_FORMAT_MRGB888 (0x12) +#define LCD_OUTF_FORMAT_MRGB666 (0x13) +#define LCD_OUTF_FORMAT_MRGB565 (0x14) +#define LCD_OUTF_FORMAT_YCBCR420_8B_LEGACY (0x08) +#define LCD_OUTF_FORMAT_YCBCR420_8B_DCI (0x09) +#define LCD_OUTF_FORMAT_YCBCR420_8B (0x0A) +#define LCD_OUTF_FORMAT_YCBCR420_10B (0x0B) +#define LCD_OUTF_FORMAT_YCBCR420_12B (0x0C) +#define LCD_OUTF_FORMAT_YCBCR422_8B (0x0D) +#define LCD_OUTF_FORMAT_YCBCR422_10B (0x0E) +#define LCD_OUTF_FORMAT_YCBCR444 (0x0F) +#define LCD_OUTF_FORMAT_MYCBCR420_8B_LEGACY (0x18) +#define LCD_OUTF_FORMAT_MYCBCR420_8B_DCI (0x19) +#define LCD_OUTF_FORMAT_MYCBCR420_8B (0x1A) +#define LCD_OUTF_FORMAT_MYCBCR420_10B (0x1B) +#define LCD_OUTF_FORMAT_MYCBCR420_12B (0x1C) +#define LCD_OUTF_FORMAT_MYCBCR422_8B (0x1D) +#define LCD_OUTF_FORMAT_MYCBCR422_10B (0x1E) +#define LCD_OUTF_FORMAT_MYCBCR444 (0x1F) +#define LCD_OUTF_BGR_ORDER BIT(5) +#define LCD_OUTF_Y_ORDER BIT(6) +#define LCD_OUTF_CRCB_ORDER BIT(7) +#define LCD_OUTF_SYNC_MODE BIT(11) +#define LCD_OUTF_RGB_CONV_MODE BIT(14) +#define LCD_OUTF_MIPI_RGB_MODE BIT(18) + +#define LCD_HSYNC_WIDTH (0x4 * 0x801) +#define LCD_H_BACKPORCH (0x4 * 0x802) +#define LCD_H_ACTIVEWIDTH (0x4 * 0x803) +#define LCD_H_FRONTPORCH (0x4 * 0x804) +#define LCD_VSYNC_WIDTH (0x4 * 0x805) +#define LCD_V_BACKPORCH (0x4 * 0x806) +#define LCD_V_ACTIVEHEIGHT (0x4 * 0x807) +#define LCD_V_FRONTPORCH (0x4 * 0x808) +#define LCD_VSYNC_START (0x4 * 0x809) +#define LCD_VSYNC_END (0x4 * 0x80a) +#define LCD_V_BACKPORCH_EVEN (0x4 * 0x80b) +#define LCD_VSYNC_WIDTH_EVEN (0x4 * 0x80c) +#define LCD_V_ACTIVEHEIGHT_EVEN (0x4 * 0x80d) +#define LCD_V_FRONTPORCH_EVEN (0x4 * 0x80e) +#define LCD_VSYNC_START_EVEN (0x4 * 0x80f) +#define LCD_VSYNC_END_EVEN (0x4 * 0x810) +#define LCD_TIMING_GEN_TRIG (0x4 * 0x811) +#define LCD_PWM0_CTRL (0x4 * 0x812) +#define LCD_PWM0_RPT_LEADIN (0x4 * 0x813) +#define LCD_PWM0_HIGH_LOW (0x4 * 0x814) +#define LCD_PWM1_CTRL (0x4 * 0x815) +#define LCD_PWM1_RPT_LEADIN (0x4 * 0x816) +#define LCD_PWM1_HIGH_LOW (0x4 * 0x817) +#define LCD_PWM2_CTRL (0x4 * 0x818) +#define LCD_PWM2_RPT_LEADIN (0x4 * 0x819) +#define LCD_PWM2_HIGH_LOW (0x4 * 0x81a) +#define LCD_VIDEO0_DMA0_BYTES (0x4 * 0xb00) +#define LCD_VIDEO0_DMA0_STATE (0x4 * 0xb01) +#define LCD_DMA_STATE_ACTIVE BIT(3) +#define LCD_VIDEO0_DMA1_BYTES (0x4 * 0xb02) +#define LCD_VIDEO0_DMA1_STATE (0x4 * 0xb03) +#define LCD_VIDEO0_DMA2_BYTES (0x4 * 0xb04) +#define LCD_VIDEO0_DMA2_STATE (0x4 * 0xb05) +#define LCD_VIDEO1_DMA0_BYTES (0x4 * 0xb06) +#define LCD_VIDEO1_DMA0_STATE (0x4 * 0xb07) +#define LCD_VIDEO1_DMA1_BYTES (0x4 * 0xb08) +#define LCD_VIDEO1_DMA1_STATE (0x4 * 0xb09) +#define LCD_VIDEO1_DMA2_BYTES (0x4 * 0xb0a) +#define LCD_VIDEO1_DMA2_STATE (0x4 * 0xb0b) +#define LCD_GRAPHIC0_DMA_BYTES (0x4 * 0xb0c) +#define LCD_GRAPHIC0_DMA_STATE (0x4 * 0xb0d) +#define LCD_GRAPHIC1_DMA_BYTES (0x4 * 0xb0e) +#define LCD_GRAPHIC1_DMA_STATE (0x4 * 0xb0f) + +/*************************************************************************** + * MIPI controller control register defines + *************************************************************************/ +#define MIPI0_HS_BASE_ADDR (MIPI_BASE_ADDR + 0x400) +#define HS_OFFSET(M) (((M) + 1) * 0x400) + +#define MIPI_TX_HS_CTRL (0x0) +#define MIPI_TXm_HS_CTRL(M) (MIPI_TX_HS_CTRL + HS_OFFSET(M)) +#define HS_CTRL_EN BIT(0) +/* 1:CSI 0:DSI */ +#define HS_CTRL_CSIDSIN BIT(2) +/* 1:LCD, 0:DMA */ +#define TX_SOURCE BIT(3) +#define ACTIVE_LANES(n) ((n) << 4) +#define LCD_VC(ch) ((ch) << 8) +#define DSI_EOTP_EN BIT(11) +#define DSI_CMD_HFP_EN BIT(12) +#define CRC_EN BIT(14) +#define HSEXIT_CNT(n) ((n) << 16) +#define HSCLKIDLE_CNT BIT(24) +#define MIPI_TX_HS_SYNC_CFG (0x8) +#define MIPI_TXm_HS_SYNC_CFG(M) (MIPI_TX_HS_SYNC_CFG \ + + HS_OFFSET(M)) +#define LINE_SYNC_PKT_ENABLE BIT(0) +#define FRAME_COUNTER_ACTIVE BIT(1) +#define LINE_COUNTER_ACTIVE BIT(2) +#define DSI_V_BLANKING BIT(4) +#define DSI_HSA_BLANKING BIT(5) +#define DSI_HBP_BLANKING BIT(6) +#define DSI_HFP_BLANKING BIT(7) +#define DSI_SYNC_PULSE_EVENTN BIT(8) +#define DSI_LPM_FIRST_VSA_LINE BIT(9) +#define DSI_LPM_LAST_VFP_LINE BIT(10) +#define WAIT_ALL_SECT BIT(11) +#define WAIT_TRIG_POS BIT(15) +#define ALWAYS_USE_HACT(f) ((f) << 19) +#define FRAME_GEN_EN(f) ((f) << 23) +#define HACT_WAIT_STOP(f) ((f) << 28) +#define MIPI_TX0_HS_FG0_SECT0_PH (0x40) +#define MIPI_TXm_HS_FGn_SECTo_PH(M, N, O) (MIPI_TX0_HS_FG0_SECT0_PH + \ + HS_OFFSET(M) + (0x2C * (N)) \ + + (8 * (O))) +#define MIPI_TX_SECT_WC_MASK (0xffff) +#define MIPI_TX_SECT_VC_MASK (3) +#define MIPI_TX_SECT_VC_SHIFT (22) +#define MIPI_TX_SECT_DT_MASK (0x3f) +#define MIPI_TX_SECT_DT_SHIFT (16) +#define MIPI_TX_SECT_DM_MASK (3) +#define MIPI_TX_SECT_DM_SHIFT (24) +#define MIPI_TX_SECT_DMA_PACKED BIT(26) +#define MIPI_TX_HS_FG0_SECT_UNPACKED_BYTES0 (0x60) +#define MIPI_TX_HS_FG0_SECT_UNPACKED_BYTES1 (0x64) +#define MIPI_TXm_HS_FGn_SECT_UNPACKED_BYTES0(M, N) \ + (MIPI_TX_HS_FG0_SECT_UNPACKED_BYTES0 \ + + HS_OFFSET(M) + (0x2C * (N))) +#define MIPI_TX_HS_FG0_SECT0_LINE_CFG (0x44) +#define MIPI_TXm_HS_FGn_SECTo_LINE_CFG(M, N, O) \ + (MIPI_TX_HS_FG0_SECT0_LINE_CFG + HS_OFFSET(M) \ + + (0x2C * (N)) + (8 * (O))) + +#define MIPI_TX_HS_FG0_NUM_LINES (0x68) +#define MIPI_TXm_HS_FGn_NUM_LINES(M, N) \ + (MIPI_TX_HS_FG0_NUM_LINES + HS_OFFSET(M) \ + + (0x2C * (N))) +#define MIPI_TX_HS_VSYNC_WIDTHS0 (0x104) +#define MIPI_TXm_HS_VSYNC_WIDTHn(M, N) \ + (MIPI_TX_HS_VSYNC_WIDTHS0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_V_BACKPORCHES0 (0x16c) +#define MIPI_TXm_HS_V_BACKPORCHESn(M, N) \ + (MIPI_TX_HS_V_BACKPORCHES0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_V_FRONTPORCHES0 (0x174) +#define MIPI_TXm_HS_V_FRONTPORCHESn(M, N) \ + (MIPI_TX_HS_V_FRONTPORCHES0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_V_ACTIVE0 (0x17c) +#define MIPI_TXm_HS_V_ACTIVEn(M, N) \ + (MIPI_TX_HS_V_ACTIVE0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_HSYNC_WIDTH0 (0x10c) +#define MIPI_TXm_HS_HSYNC_WIDTHn(M, N) \ + (MIPI_TX_HS_HSYNC_WIDTH0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_H_BACKPORCH0 (0x11c) +#define MIPI_TXm_HS_H_BACKPORCHn(M, N) \ + (MIPI_TX_HS_H_BACKPORCH0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_H_FRONTPORCH0 (0x12c) +#define MIPI_TXm_HS_H_FRONTPORCHn(M, N) \ + (MIPI_TX_HS_H_FRONTPORCH0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_H_ACTIVE0 (0x184) +#define MIPI_TXm_HS_H_ACTIVEn(M, N) \ + (MIPI_TX_HS_H_ACTIVE0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_LLP_HSYNC_WIDTH0 (0x13c) +#define MIPI_TXm_HS_LLP_HSYNC_WIDTHn(M, N) \ + (MIPI_TX_HS_LLP_HSYNC_WIDTH0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_LLP_H_BACKPORCH0 (0x14c) +#define MIPI_TXm_HS_LLP_H_BACKPORCHn(M, N) \ + (MIPI_TX_HS_LLP_H_BACKPORCH0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define MIPI_TX_HS_LLP_H_FRONTPORCH0 (0x15c) +#define MIPI_TXm_HS_LLP_H_FRONTPORCHn(M, N) \ + (MIPI_TX_HS_LLP_H_FRONTPORCH0 + HS_OFFSET(M) \ + + (0x4 * (N))) + +#define MIPI_TX_HS_MC_FIFO_CTRL_EN (0x194) +#define MIPI_TXm_HS_MC_FIFO_CTRL_EN(M) \ + (MIPI_TX_HS_MC_FIFO_CTRL_EN + HS_OFFSET(M)) + +#define MIPI_TX_HS_MC_FIFO_CHAN_ALLOC0 (0x198) +#define MIPI_TX_HS_MC_FIFO_CHAN_ALLOC1 (0x19c) +#define MIPI_TXm_HS_MC_FIFO_CHAN_ALLOCn(M, N) \ + (MIPI_TX_HS_MC_FIFO_CHAN_ALLOC0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define SET_MC_FIFO_CHAN_ALLOC(dev, ctrl, vc, sz) \ + kmb_write_bits_mipi(dev, \ + MIPI_TXm_HS_MC_FIFO_CHAN_ALLOCn(ctrl, \ + (vc) / 2), ((vc) % 2) * 16, 16, sz) +#define MIPI_TX_HS_MC_FIFO_RTHRESHOLD0 (0x1a0) +#define MIPI_TX_HS_MC_FIFO_RTHRESHOLD1 (0x1a4) +#define MIPI_TXm_HS_MC_FIFO_RTHRESHOLDn(M, N) \ + (MIPI_TX_HS_MC_FIFO_RTHRESHOLD0 + HS_OFFSET(M) \ + + (0x4 * (N))) +#define SET_MC_FIFO_RTHRESHOLD(dev, ctrl, vc, th) \ + kmb_write_bits_mipi(dev, MIPI_TXm_HS_MC_FIFO_RTHRESHOLDn(ctrl, \ + (vc) / 2), ((vc) % 2) * 16, 16, th) +#define MIPI_TX_HS_DMA_CFG (0x1a8) +#define MIPI_TX_HS_DMA_START_ADR_CHAN0 (0x1ac) +#define MIPI_TX_HS_DMA_LEN_CHAN0 (0x1b4) + +/* MIPI IRQ */ +#define MIPI_CTRL_IRQ_STATUS0 (0x00) +#define MIPI_DPHY_ERR_IRQ 1 +#define MIPI_DPHY_ERR_MASK 0x7FE /*bits 1-10 */ +#define MIPI_HS_IRQ 13 +/* bits 13-22 */ +#define MIPI_HS_IRQ_MASK 0x7FE000 +#define MIPI_LP_EVENT_IRQ 25 +#define MIPI_GET_IRQ_STAT0(dev) kmb_read_mipi(dev, \ + MIPI_CTRL_IRQ_STATUS0) +#define MIPI_CTRL_IRQ_STATUS1 (0x04) +#define MIPI_HS_RX_EVENT_IRQ 0 +#define MIPI_GET_IRQ_STAT1(dev) kmb_read_mipi(dev, \ + MIPI_CTRL_IRQ_STATUS1) +#define MIPI_CTRL_IRQ_ENABLE0 (0x08) +#define SET_MIPI_CTRL_IRQ_ENABLE0(dev, M, N) kmb_set_bit_mipi(dev, \ + MIPI_CTRL_IRQ_ENABLE0, \ + (M) + (N)) +#define MIPI_GET_IRQ_ENABLED0(dev) kmb_read_mipi(dev, \ + MIPI_CTRL_IRQ_ENABLE0) +#define MIPI_CTRL_IRQ_ENABLE1 (0x0c) +#define MIPI_GET_IRQ_ENABLED1(dev) kmb_read_mipi(dev, \ + MIPI_CTRL_IRQ_ENABLE1) +#define MIPI_CTRL_IRQ_CLEAR0 (0x010) +#define SET_MIPI_CTRL_IRQ_CLEAR0(dev, M, N) \ + kmb_set_bit_mipi(dev, MIPI_CTRL_IRQ_CLEAR0, (M) + (N)) +#define MIPI_CTRL_IRQ_CLEAR1 (0x014) +#define SET_MIPI_CTRL_IRQ_CLEAR1(dev, M, N) \ + kmb_set_bit_mipi(dev, MIPI_CTRL_IRQ_CLEAR1, (M) + (N)) +#define MIPI_CTRL_DIG_LOOPBACK (0x018) +#define MIPI_TX_HS_IRQ_STATUS (0x01c) +#define MIPI_TX_HS_IRQ_STATUSm(M) (MIPI_TX_HS_IRQ_STATUS + \ + HS_OFFSET(M)) +#define GET_MIPI_TX_HS_IRQ_STATUS(dev, M) kmb_read_mipi(dev, \ + MIPI_TX_HS_IRQ_STATUSm(M)) +#define MIPI_TX_HS_IRQ_LINE_COMPARE BIT(1) +#define MIPI_TX_HS_IRQ_FRAME_DONE_0 BIT(2) +#define MIPI_TX_HS_IRQ_FRAME_DONE_1 BIT(3) +#define MIPI_TX_HS_IRQ_FRAME_DONE_2 BIT(4) +#define MIPI_TX_HS_IRQ_FRAME_DONE_3 BIT(5) +#define MIPI_TX_HS_IRQ_DMA_DONE_0 BIT(6) +#define MIPI_TX_HS_IRQ_DMA_IDLE_0 BIT(7) +#define MIPI_TX_HS_IRQ_DMA_DONE_1 BIT(8) +#define MIPI_TX_HS_IRQ_DMA_IDLE_1 BIT(9) +#define MIPI_TX_HS_IRQ_DMA_DONE_2 BIT(10) +#define MIPI_TX_HS_IRQ_DMA_IDLE_2 BIT(11) +#define MIPI_TX_HS_IRQ_DMA_DONE_3 BIT(12) +#define MIPI_TX_HS_IRQ_DMA_IDLE_3 BIT(13) +#define MIPI_TX_HS_IRQ_MC_FIFO_UNDERFLOW BIT(14) +#define MIPI_TX_HS_IRQ_MC_FIFO_OVERFLOW BIT(15) +#define MIPI_TX_HS_IRQ_LLP_FIFO_EMPTY BIT(16) +#define MIPI_TX_HS_IRQ_LLP_REQUEST_QUEUE_FULL BIT(17) +#define MIPI_TX_HS_IRQ_LLP_REQUEST_QUEUE_ERROR BIT(18) +#define MIPI_TX_HS_IRQ_LLP_WORD_COUNT_ERROR BIT(20) +#define MIPI_TX_HS_IRQ_FRAME_DONE \ + (MIPI_TX_HS_IRQ_FRAME_DONE_0 | \ + MIPI_TX_HS_IRQ_FRAME_DONE_1 | \ + MIPI_TX_HS_IRQ_FRAME_DONE_2 | \ + MIPI_TX_HS_IRQ_FRAME_DONE_3) + +#define MIPI_TX_HS_IRQ_DMA_DONE \ + (MIPI_TX_HS_IRQ_DMA_DONE_0 | \ + MIPI_TX_HS_IRQ_DMA_DONE_1 | \ + MIPI_TX_HS_IRQ_DMA_DONE_2 | \ + MIPI_TX_HS_IRQ_DMA_DONE_3) + +#define MIPI_TX_HS_IRQ_DMA_IDLE \ + (MIPI_TX_HS_IRQ_DMA_IDLE_0 | \ + MIPI_TX_HS_IRQ_DMA_IDLE_1 | \ + MIPI_TX_HS_IRQ_DMA_IDLE_2 | \ + MIPI_TX_HS_IRQ_DMA_IDLE_3) + +#define MIPI_TX_HS_IRQ_ERROR \ + (MIPI_TX_HS_IRQ_MC_FIFO_UNDERFLOW | \ + MIPI_TX_HS_IRQ_MC_FIFO_OVERFLOW | \ + MIPI_TX_HS_IRQ_LLP_FIFO_EMPTY | \ + MIPI_TX_HS_IRQ_LLP_REQUEST_QUEUE_FULL | \ + MIPI_TX_HS_IRQ_LLP_REQUEST_QUEUE_ERROR | \ + MIPI_TX_HS_IRQ_LLP_WORD_COUNT_ERROR) + +#define MIPI_TX_HS_IRQ_ALL \ + (MIPI_TX_HS_IRQ_FRAME_DONE | \ + MIPI_TX_HS_IRQ_DMA_DONE | \ + MIPI_TX_HS_IRQ_DMA_IDLE | \ + MIPI_TX_HS_IRQ_LINE_COMPARE | \ + MIPI_TX_HS_IRQ_ERROR) + +#define MIPI_TX_HS_IRQ_ENABLE (0x020) +#define GET_HS_IRQ_ENABLE(dev, M) kmb_read_mipi(dev, \ + MIPI_TX_HS_IRQ_ENABLE \ + + HS_OFFSET(M)) +#define MIPI_TX_HS_IRQ_CLEAR (0x024) + +/* MIPI Test Pattern Generation */ +#define MIPI_TX_HS_TEST_PAT_CTRL (0x230) +#define MIPI_TXm_HS_TEST_PAT_CTRL(M) \ + (MIPI_TX_HS_TEST_PAT_CTRL + HS_OFFSET(M)) +#define TP_EN_VCm(M) (1 << ((M) * 0x04)) +#define TP_SEL_VCm(M, N) \ + ((N) << (((M) * 0x04) + 1)) +#define TP_STRIPE_WIDTH(M) ((M) << 16) +#define MIPI_TX_HS_TEST_PAT_COLOR0 (0x234) +#define MIPI_TXm_HS_TEST_PAT_COLOR0(M) \ + (MIPI_TX_HS_TEST_PAT_COLOR0 + HS_OFFSET(M)) +#define MIPI_TX_HS_TEST_PAT_COLOR1 (0x238) +#define MIPI_TXm_HS_TEST_PAT_COLOR1(M) \ + (MIPI_TX_HS_TEST_PAT_COLOR1 + HS_OFFSET(M)) + +/* D-PHY regs */ +#define DPHY_ENABLE (0x100) +#define DPHY_INIT_CTRL0 (0x104) +#define SHUTDOWNZ 0 +#define RESETZ 12 +#define DPHY_INIT_CTRL1 (0x108) +#define PLL_CLKSEL_0 18 +#define PLL_SHADOW_CTRL 16 +#define DPHY_INIT_CTRL2 (0x10c) +#define SET_DPHY_INIT_CTRL0(dev, dphy, offset) \ + kmb_set_bit_mipi(dev, DPHY_INIT_CTRL0, \ + ((dphy) + (offset))) +#define CLR_DPHY_INIT_CTRL0(dev, dphy, offset) \ + kmb_clr_bit_mipi(dev, DPHY_INIT_CTRL0, \ + ((dphy) + (offset))) +#define DPHY_INIT_CTRL2 (0x10c) +#define DPHY_PLL_OBS0 (0x110) +#define DPHY_PLL_OBS1 (0x114) +#define DPHY_PLL_OBS2 (0x118) +#define DPHY_FREQ_CTRL0_3 (0x11c) +#define DPHY_FREQ_CTRL4_7 (0x120) +#define SET_DPHY_FREQ_CTRL0_3(dev, dphy, val) \ + kmb_write_bits_mipi(dev, DPHY_FREQ_CTRL0_3 \ + + (((dphy) / 4) * 4), (dphy % 4) * 8, 6, val) + +#define DPHY_FORCE_CTRL0 (0x128) +#define DPHY_FORCE_CTRL1 (0x12C) +#define MIPI_DPHY_STAT0_3 (0x134) +#define MIPI_DPHY_STAT4_7 (0x138) +#define GET_STOPSTATE_DATA(dev, dphy) \ + (((kmb_read_mipi(dev, MIPI_DPHY_STAT0_3 + \ + ((dphy) / 4) * 4)) >> \ + (((dphy % 4) * 8) + 4)) & 0x03) + +#define MIPI_DPHY_ERR_STAT6_7 (0x14C) + +#define DPHY_TEST_CTRL0 (0x154) +#define SET_DPHY_TEST_CTRL0(dev, dphy) \ + kmb_set_bit_mipi(dev, DPHY_TEST_CTRL0, (dphy)) +#define CLR_DPHY_TEST_CTRL0(dev, dphy) \ + kmb_clr_bit_mipi(dev, DPHY_TEST_CTRL0, \ + (dphy)) +#define DPHY_TEST_CTRL1 (0x158) +#define SET_DPHY_TEST_CTRL1_CLK(dev, dphy) \ + kmb_set_bit_mipi(dev, DPHY_TEST_CTRL1, (dphy)) +#define CLR_DPHY_TEST_CTRL1_CLK(dev, dphy) \ + kmb_clr_bit_mipi(dev, DPHY_TEST_CTRL1, (dphy)) +#define SET_DPHY_TEST_CTRL1_EN(dev, dphy) \ + kmb_set_bit_mipi(dev, DPHY_TEST_CTRL1, ((dphy) + 12)) +#define CLR_DPHY_TEST_CTRL1_EN(dev, dphy) \ + kmb_clr_bit_mipi(dev, DPHY_TEST_CTRL1, ((dphy) + 12)) +#define DPHY_TEST_DIN0_3 (0x15c) +#define SET_TEST_DIN0_3(dev, dphy, val) \ + kmb_write_mipi(dev, DPHY_TEST_DIN0_3 + \ + 4, ((val) << (((dphy) % 4) * 8))) +#define DPHY_TEST_DOUT0_3 (0x168) +#define GET_TEST_DOUT0_3(dev, dphy) \ + (kmb_read_mipi(dev, DPHY_TEST_DOUT0_3) \ + >> (((dphy) % 4) * 8) & 0xff) +#define DPHY_TEST_DOUT4_7 (0x16C) +#define GET_TEST_DOUT4_7(dev, dphy) \ + (kmb_read_mipi(dev, DPHY_TEST_DOUT4_7) \ + >> (((dphy) % 4) * 8) & 0xff) +#define DPHY_TEST_DOUT8_9 (0x170) +#define DPHY_TEST_DIN4_7 (0x160) +#define DPHY_TEST_DIN8_9 (0x164) +#define DPHY_PLL_LOCK (0x188) +#define GET_PLL_LOCK(dev, dphy) \ + (kmb_read_mipi(dev, DPHY_PLL_LOCK) \ + & (1 << ((dphy) - MIPI_DPHY6))) +#define DPHY_CFG_CLK_EN (0x18c) + +#define MSS_MIPI_CIF_CFG (0x00) +#define MSS_LCD_MIPI_CFG (0x04) +#define MSS_CAM_CLK_CTRL (0x10) +#define MSS_LOOPBACK_CFG (0x0C) +#define LCD BIT(1) +#define MIPI_COMMON BIT(2) +#define MIPI_TX0 BIT(9) +#define MSS_CAM_RSTN_CTRL (0x14) +#define MSS_CAM_RSTN_SET (0x20) +#define MSS_CAM_RSTN_CLR (0x24) + +#define MSSCPU_CPR_CLK_EN (0x0) +#define MSSCPU_CPR_RST_EN (0x10) +#define BIT_MASK_16 (0xffff) +/* icam lcd qos */ +#define LCD_QOS_PRIORITY (0x8) +#define LCD_QOS_MODE (0xC) +#define LCD_QOS_BW (0x10) +#endif /* __KMB_REGS_H__ */ diff --git a/drivers/gpu/drm/lima/lima_devfreq.c b/drivers/gpu/drm/lima/lima_devfreq.c index bbe02817721b..da7099d20bd5 100644 --- a/drivers/gpu/drm/lima/lima_devfreq.c +++ b/drivers/gpu/drm/lima/lima_devfreq.c @@ -35,18 +35,13 @@ static int lima_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct dev_pm_opp *opp; - int err; opp = devfreq_recommended_opp(dev, freq, flags); if (IS_ERR(opp)) return PTR_ERR(opp); dev_pm_opp_put(opp); - err = dev_pm_opp_set_rate(dev, *freq); - if (err) - return err; - - return 0; + return dev_pm_opp_set_rate(dev, *freq); } static void lima_devfreq_reset(struct lima_devfreq *devfreq) @@ -105,10 +100,7 @@ void lima_devfreq_fini(struct lima_device *ldev) devfreq->devfreq = NULL; } - if (devfreq->opp_of_table_added) { - dev_pm_opp_of_remove_table(ldev->dev); - devfreq->opp_of_table_added = false; - } + dev_pm_opp_of_remove_table(ldev->dev); if (devfreq->regulators_opp_table) { dev_pm_opp_put_regulators(devfreq->regulators_opp_table); @@ -162,7 +154,6 @@ int lima_devfreq_init(struct lima_device *ldev) ret = dev_pm_opp_of_add_table(dev); if (ret) goto err_fini; - ldevfreq->opp_of_table_added = true; lima_devfreq_reset(ldevfreq); diff --git a/drivers/gpu/drm/lima/lima_devfreq.h b/drivers/gpu/drm/lima/lima_devfreq.h index 5eed2975a375..2d9b3008ce77 100644 --- a/drivers/gpu/drm/lima/lima_devfreq.h +++ b/drivers/gpu/drm/lima/lima_devfreq.h @@ -18,7 +18,6 @@ struct lima_devfreq { struct opp_table *clkname_opp_table; struct opp_table *regulators_opp_table; struct thermal_cooling_device *cooling; - bool opp_of_table_added; ktime_t busy_time; ktime_t idle_time; diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c index ab460121fd52..7b8d7178d09a 100644 --- a/drivers/gpu/drm/lima/lima_drv.c +++ b/drivers/gpu/drm/lima/lima_drv.c @@ -255,13 +255,13 @@ static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = { DEFINE_DRM_GEM_FOPS(lima_drm_driver_fops); -/** +/* * Changelog: * * - 1.1.0 - add heap buffer support */ -static struct drm_driver lima_drm_driver = { +static const struct drm_driver lima_drm_driver = { .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, .open = lima_drm_driver_open, .postclose = lima_drm_driver_postclose, diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 11223fe348df..832e5280a6ed 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -182,14 +182,14 @@ static int lima_gem_pin(struct drm_gem_object *obj) return drm_gem_shmem_pin(obj); } -static void *lima_gem_vmap(struct drm_gem_object *obj) +static int lima_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct lima_bo *bo = to_lima_bo(obj); if (bo->heap_size) - return ERR_PTR(-EINVAL); + return -EINVAL; - return drm_gem_shmem_vmap(obj); + return drm_gem_shmem_vmap(obj, map); } static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index dc6df9e9a40d..63b4c5643f9c 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ +#include <linux/dma-buf-map.h> #include <linux/kthread.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -223,7 +224,6 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job) struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); struct lima_device *ldev = pipe->ldev; struct lima_fence *fence; - struct dma_fence *ret; int i, err; /* after GPU reset */ @@ -245,7 +245,7 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job) /* for caller usage of the fence, otherwise irq handler * may consume the fence before caller use it */ - ret = dma_fence_get(task->fence); + dma_fence_get(task->fence); pipe->current_task = task; @@ -303,6 +303,8 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task) struct lima_dump_chunk_buffer *buffer_chunk; u32 size, task_size, mem_size; int i; + struct dma_buf_map map; + int ret; mutex_lock(&dev->error_task_list_lock); @@ -388,15 +390,15 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task) } else { buffer_chunk->size = lima_bo_size(bo); - data = drm_gem_shmem_vmap(&bo->base.base); - if (IS_ERR_OR_NULL(data)) { + ret = drm_gem_shmem_vmap(&bo->base.base, &map); + if (ret) { kvfree(et); goto out; } - memcpy(buffer_chunk + 1, data, buffer_chunk->size); + memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size); - drm_gem_shmem_vunmap(&bo->base.base, data); + drm_gem_shmem_vunmap(&bo->base.base, &map); } buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size; diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index f9b5f450a9cb..870626e04ec0 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -178,7 +178,7 @@ static int mcde_modeset_init(struct drm_device *drm) DEFINE_DRM_GEM_CMA_FOPS(drm_fops); -static struct drm_driver mcde_drm_driver = { +static const struct drm_driver mcde_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .lastclose = drm_fb_helper_lastclose, diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index 65cd03a4be29..2976d21e9a34 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig @@ -13,6 +13,7 @@ config DRM_MEDIATEK select DRM_PANEL select MEMORY select MTK_SMI + select PHY_MTK_MIPI_DSI select VIDEOMODE_HELPERS help Choose this option if you have a Mediatek SoCs. diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile index 77b0fd86063d..a892edec5563 100644 --- a/drivers/gpu/drm/mediatek/Makefile +++ b/drivers/gpu/drm/mediatek/Makefile @@ -10,9 +10,6 @@ mediatek-drm-y := mtk_disp_color.o \ mtk_drm_gem.o \ mtk_drm_plane.o \ mtk_dsi.o \ - mtk_mipi_tx.o \ - mtk_mt8173_mipi_tx.o \ - mtk_mt8183_mipi_tx.o \ mtk_dpi.o obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c index 3ae9c810845b..6048cbc9f0ec 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_color.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c @@ -16,6 +16,7 @@ #define DISP_COLOR_CFG_MAIN 0x0400 #define DISP_COLOR_START_MT2701 0x0f00 +#define DISP_COLOR_START_MT8167 0x0400 #define DISP_COLOR_START_MT8173 0x0c00 #define DISP_COLOR_START(comp) ((comp)->data->color_offset) #define DISP_COLOR_WIDTH(comp) (DISP_COLOR_START(comp) + 0x50) @@ -30,8 +31,9 @@ struct mtk_disp_color_data { /** * struct mtk_disp_color - DISP_COLOR driver structure - * @ddp_comp - structure containing type enum and hardware resources - * @crtc - associated crtc to report irq events to + * @ddp_comp: structure containing type enum and hardware resources + * @crtc: associated crtc to report irq events to + * @data: platform colour driver data */ struct mtk_disp_color { struct mtk_ddp_comp ddp_comp; @@ -148,6 +150,10 @@ static const struct mtk_disp_color_data mt2701_color_driver_data = { .color_offset = DISP_COLOR_START_MT2701, }; +static const struct mtk_disp_color_data mt8167_color_driver_data = { + .color_offset = DISP_COLOR_START_MT8167, +}; + static const struct mtk_disp_color_data mt8173_color_driver_data = { .color_offset = DISP_COLOR_START_MT8173, }; @@ -155,6 +161,8 @@ static const struct mtk_disp_color_data mt8173_color_driver_data = { static const struct of_device_id mtk_disp_color_driver_dt_match[] = { { .compatible = "mediatek,mt2701-disp-color", .data = &mt2701_color_driver_data}, + { .compatible = "mediatek,mt8167-disp-color", + .data = &mt8167_color_driver_data}, { .compatible = "mediatek,mt8173-disp-color", .data = &mt8173_color_driver_data}, {}, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 28651bc579bc..74ef6fc0528b 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -65,8 +65,9 @@ struct mtk_disp_ovl_data { /** * struct mtk_disp_ovl - DISP_OVL driver structure - * @ddp_comp - structure containing type enum and hardware resources - * @crtc - associated crtc to report vblank events to + * @ddp_comp: structure containing type enum and hardware resources + * @crtc: associated crtc to report vblank events to + * @data: platform data */ struct mtk_disp_ovl { struct mtk_ddp_comp ddp_comp; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index e04319fedf46..d46b8ae1d080 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -56,8 +56,9 @@ struct mtk_disp_rdma_data { /** * struct mtk_disp_rdma - DISP_RDMA driver structure - * @ddp_comp - structure containing type enum and hardware resources - * @crtc - associated crtc to report irq events to + * @ddp_comp: structure containing type enum and hardware resources + * @crtc: associated crtc to report irq events to + * @data: local driver data */ struct mtk_disp_rdma { struct mtk_ddp_comp ddp_comp; diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index cf11c4850b40..52f11a63a330 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -522,15 +522,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi, return 0; } -static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = { - .destroy = mtk_dpi_encoder_destroy, -}; - static int mtk_dpi_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 23f5c10b0c67..bfe994230543 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -11,6 +11,7 @@ #include <asm/barrier.h> #include <soc/mediatek/smi.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> @@ -23,7 +24,7 @@ #include "mtk_drm_gem.h" #include "mtk_drm_plane.h" -/** +/* * struct mtk_drm_crtc - MediaTek specific crtc structure. * @base: crtc object. * @enabled: records whether crtc_enable succeeded @@ -33,6 +34,8 @@ * @mutex: handle to one of the ten disp_mutex streams * @ddp_comp_nr: number of components in ddp_comp * @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc + * + * TODO: Needs update: this header is missing a bunch of member descriptions. */ struct mtk_drm_crtc { struct drm_crtc base; @@ -577,17 +580,19 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state) { - struct mtk_crtc_state *crtc_state = to_mtk_crtc_state(crtc->state); + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state); struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - if (mtk_crtc->event && crtc_state->base.event) + if (mtk_crtc->event && mtk_crtc_state->base.event) DRM_ERROR("new event while there is still a pending event\n"); - if (crtc_state->base.event) { - crtc_state->base.event->pipe = drm_crtc_index(crtc); + if (mtk_crtc_state->base.event) { + mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc); WARN_ON(drm_crtc_vblank_get(crtc) != 0); - mtk_crtc->event = crtc_state->base.event; - crtc_state->base.event = NULL; + mtk_crtc->event = mtk_crtc_state->base.event; + mtk_crtc_state->base.event = NULL; } } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c index 014c1bbe1df2..1f99db6b1a42 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c @@ -25,6 +25,19 @@ #define INT_MUTEX BIT(1) +#define MT8167_MUTEX_MOD_DISP_PWM 1 +#define MT8167_MUTEX_MOD_DISP_OVL0 6 +#define MT8167_MUTEX_MOD_DISP_OVL1 7 +#define MT8167_MUTEX_MOD_DISP_RDMA0 8 +#define MT8167_MUTEX_MOD_DISP_RDMA1 9 +#define MT8167_MUTEX_MOD_DISP_WDMA0 10 +#define MT8167_MUTEX_MOD_DISP_CCORR 11 +#define MT8167_MUTEX_MOD_DISP_COLOR 12 +#define MT8167_MUTEX_MOD_DISP_AAL 13 +#define MT8167_MUTEX_MOD_DISP_GAMMA 14 +#define MT8167_MUTEX_MOD_DISP_DITHER 15 +#define MT8167_MUTEX_MOD_DISP_UFOE 16 + #define MT8173_MUTEX_MOD_DISP_OVL0 11 #define MT8173_MUTEX_MOD_DISP_OVL1 12 #define MT8173_MUTEX_MOD_DISP_RDMA0 13 @@ -73,6 +86,8 @@ #define MUTEX_SOF_DPI1 4 #define MUTEX_SOF_DSI2 5 #define MUTEX_SOF_DSI3 6 +#define MT8167_MUTEX_SOF_DPI0 2 +#define MT8167_MUTEX_SOF_DPI1 3 struct mtk_disp_mutex { @@ -135,6 +150,21 @@ static const unsigned int mt2712_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_WDMA1] = MT2712_MUTEX_MOD_DISP_WDMA1, }; +static const unsigned int mt8167_mutex_mod[DDP_COMPONENT_ID_MAX] = { + [DDP_COMPONENT_AAL0] = MT8167_MUTEX_MOD_DISP_AAL, + [DDP_COMPONENT_CCORR] = MT8167_MUTEX_MOD_DISP_CCORR, + [DDP_COMPONENT_COLOR0] = MT8167_MUTEX_MOD_DISP_COLOR, + [DDP_COMPONENT_DITHER] = MT8167_MUTEX_MOD_DISP_DITHER, + [DDP_COMPONENT_GAMMA] = MT8167_MUTEX_MOD_DISP_GAMMA, + [DDP_COMPONENT_OVL0] = MT8167_MUTEX_MOD_DISP_OVL0, + [DDP_COMPONENT_OVL1] = MT8167_MUTEX_MOD_DISP_OVL1, + [DDP_COMPONENT_PWM0] = MT8167_MUTEX_MOD_DISP_PWM, + [DDP_COMPONENT_RDMA0] = MT8167_MUTEX_MOD_DISP_RDMA0, + [DDP_COMPONENT_RDMA1] = MT8167_MUTEX_MOD_DISP_RDMA1, + [DDP_COMPONENT_UFOE] = MT8167_MUTEX_MOD_DISP_UFOE, + [DDP_COMPONENT_WDMA0] = MT8167_MUTEX_MOD_DISP_WDMA0, +}; + static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL0] = MT8173_MUTEX_MOD_DISP_AAL, [DDP_COMPONENT_COLOR0] = MT8173_MUTEX_MOD_DISP_COLOR0, @@ -163,6 +193,13 @@ static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_DSI3 + 1] = { [DDP_MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3, }; +static const unsigned int mt8167_mutex_sof[DDP_MUTEX_SOF_DSI3 + 1] = { + [DDP_MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE, + [DDP_MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0, + [DDP_MUTEX_SOF_DPI0] = MT8167_MUTEX_SOF_DPI0, + [DDP_MUTEX_SOF_DPI1] = MT8167_MUTEX_SOF_DPI1, +}; + static const struct mtk_ddp_data mt2701_ddp_driver_data = { .mutex_mod = mt2701_mutex_mod, .mutex_sof = mt2712_mutex_sof, @@ -177,6 +214,14 @@ static const struct mtk_ddp_data mt2712_ddp_driver_data = { .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0, }; +static const struct mtk_ddp_data mt8167_ddp_driver_data = { + .mutex_mod = mt8167_mutex_mod, + .mutex_sof = mt8167_mutex_sof, + .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0, + .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0, + .no_clk = true, +}; + static const struct mtk_ddp_data mt8173_ddp_driver_data = { .mutex_mod = mt8173_mutex_mod, .mutex_sof = mt2712_mutex_sof, @@ -400,6 +445,8 @@ static const struct of_device_id ddp_driver_dt_match[] = { .data = &mt2701_ddp_driver_data}, { .compatible = "mediatek,mt2712-disp-mutex", .data = &mt2712_ddp_driver_data}, + { .compatible = "mediatek,mt8167-disp-mutex", + .data = &mt8167_ddp_driver_data}, { .compatible = "mediatek,mt8173-disp-mutex", .data = &mt8173_ddp_driver_data}, {}, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 8eba44be3a8a..3064eac1a750 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -359,7 +359,7 @@ static const struct mtk_ddp_comp_funcs ddp_ufoe = { static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = { [MTK_DISP_OVL] = "ovl", - [MTK_DISP_OVL_2L] = "ovl_2l", + [MTK_DISP_OVL_2L] = "ovl-2l", [MTK_DISP_RDMA] = "rdma", [MTK_DISP_WDMA] = "wdma", [MTK_DISP_COLOR] = "color", diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 7f3398a7c2b0..2f717df28a77 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -241,21 +241,10 @@ static int mtk_drm_kms_init(struct drm_device *drm) * Configure the DMA segment size to make sure we get contiguous IOVA * when importing PRIME buffers. */ - if (!dma_dev->dma_parms) { - private->dma_parms_allocated = true; - dma_dev->dma_parms = - devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms), - GFP_KERNEL); - } - if (!dma_dev->dma_parms) { - ret = -ENOMEM; - goto put_dma_dev; - } - - ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32)); + ret = dma_set_max_seg_size(dma_dev, UINT_MAX); if (ret) { dev_err(dma_dev, "Failed to set DMA segment size\n"); - goto err_unset_dma_parms; + goto err_component_unbind; } /* @@ -266,18 +255,13 @@ static int mtk_drm_kms_init(struct drm_device *drm) drm->irq_enabled = true; ret = drm_vblank_init(drm, MAX_CRTC); if (ret < 0) - goto err_unset_dma_parms; + goto err_component_unbind; drm_kms_helper_poll_init(drm); drm_mode_config_reset(drm); return 0; -err_unset_dma_parms: - if (private->dma_parms_allocated) - dma_dev->dma_parms = NULL; -put_dma_dev: - put_device(private->dma_dev); err_component_unbind: component_unbind_all(drm->dev, drm); put_mutex_dev: @@ -287,14 +271,9 @@ put_mutex_dev: static void mtk_drm_kms_deinit(struct drm_device *drm) { - struct mtk_drm_private *private = drm->dev_private; - drm_kms_helper_poll_fini(drm); drm_atomic_helper_shutdown(drm); - if (private->dma_parms_allocated) - private->dma_dev->dma_parms = NULL; - component_unbind_all(drm->dev, drm); } @@ -313,15 +292,15 @@ static const struct file_operations mtk_drm_fops = { * We need to override this because the device used to import the memory is * not dev->dev, as drm_gem_prime_import() expects. */ -struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf) +static struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) { struct mtk_drm_private *private = dev->dev_private; return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev); } -static struct drm_driver mtk_drm_driver = { +static const struct drm_driver mtk_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .dumb_create = mtk_drm_gem_dumb_create, @@ -631,7 +610,6 @@ static struct platform_driver * const mtk_drm_drivers[] = { &mtk_disp_rdma_driver, &mtk_dpi_driver, &mtk_drm_platform_driver, - &mtk_mipi_tx_driver, &mtk_dsi_driver, }; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index b5be63e53176..5d771cf0bf25 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -44,8 +44,6 @@ struct mtk_drm_private { struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX]; const struct mtk_mmsys_driver_data *data; struct drm_atomic_state *suspend_state; - - bool dma_parms_allocated; }; extern struct platform_driver mtk_ddp_driver; @@ -54,6 +52,5 @@ extern struct platform_driver mtk_disp_ovl_driver; extern struct platform_driver mtk_disp_rdma_driver; extern struct platform_driver mtk_dpi_driver; extern struct platform_driver mtk_dsi_driver; -extern struct platform_driver mtk_mipi_tx_driver; #endif /* MTK_DRM_DRV_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index cdd1a6e61564..28a2ee1336ef 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -240,23 +240,25 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, return &mtk_gem->base; } -void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj) +int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); - struct sg_table *sgt; + struct sg_table *sgt = NULL; unsigned int npages; if (mtk_gem->kvaddr) - return mtk_gem->kvaddr; + goto out; sgt = mtk_gem_prime_get_sg_table(obj); if (IS_ERR(sgt)) - return NULL; + return PTR_ERR(sgt); npages = obj->size >> PAGE_SHIFT; mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL); - if (!mtk_gem->pages) - goto out; + if (!mtk_gem->pages) { + kfree(sgt); + return -ENOMEM; + } drm_prime_sg_to_page_addr_arrays(sgt, mtk_gem->pages, NULL, npages); @@ -265,13 +267,15 @@ void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj) out: kfree(sgt); + dma_buf_map_set_vaddr(map, mtk_gem->kvaddr); - return mtk_gem->kvaddr; + return 0; } -void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); + void *vaddr = map->vaddr; if (!mtk_gem->pages) return; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h index ff9f976d9807..6da5ccb4b933 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h @@ -45,7 +45,7 @@ int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj, struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); -void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj); -void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); +void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); #endif diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 97a1ff529a1d..8ee55f9e2954 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -875,19 +875,8 @@ static void mtk_hdmi_video_set_display_mode(struct mtk_hdmi *hdmi, mtk_hdmi_hw_msic_setting(hdmi, mode); } -static int mtk_hdmi_aud_enable_packet(struct mtk_hdmi *hdmi, bool enable) -{ - mtk_hdmi_hw_send_aud_packet(hdmi, enable); - return 0; -} -static int mtk_hdmi_aud_on_off_hw_ncts(struct mtk_hdmi *hdmi, bool on) -{ - mtk_hdmi_hw_ncts_enable(hdmi, on); - return 0; -} - -static int mtk_hdmi_aud_set_input(struct mtk_hdmi *hdmi) +static void mtk_hdmi_aud_set_input(struct mtk_hdmi *hdmi) { enum hdmi_aud_channel_type chan_type; u8 chan_count; @@ -917,8 +906,6 @@ static int mtk_hdmi_aud_set_input(struct mtk_hdmi *hdmi) chan_count = mtk_hdmi_aud_get_chnl_count(chan_type); mtk_hdmi_hw_aud_set_i2s_chan_num(hdmi, chan_type, chan_count); mtk_hdmi_hw_aud_set_input_type(hdmi, hdmi->aud_param.aud_input_type); - - return 0; } static int mtk_hdmi_aud_set_src(struct mtk_hdmi *hdmi, @@ -926,7 +913,7 @@ static int mtk_hdmi_aud_set_src(struct mtk_hdmi *hdmi, { unsigned int sample_rate = hdmi->aud_param.codec_params.sample_rate; - mtk_hdmi_aud_on_off_hw_ncts(hdmi, false); + mtk_hdmi_hw_ncts_enable(hdmi, false); mtk_hdmi_hw_aud_src_disable(hdmi); mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_ACLK_INV); @@ -964,7 +951,7 @@ static int mtk_hdmi_aud_output_config(struct mtk_hdmi *hdmi, struct drm_display_mode *display_mode) { mtk_hdmi_hw_aud_mute(hdmi); - mtk_hdmi_aud_enable_packet(hdmi, false); + mtk_hdmi_hw_send_aud_packet(hdmi, false); mtk_hdmi_aud_set_input(hdmi); mtk_hdmi_aud_set_src(hdmi, display_mode); @@ -973,8 +960,8 @@ static int mtk_hdmi_aud_output_config(struct mtk_hdmi *hdmi, usleep_range(50, 100); - mtk_hdmi_aud_on_off_hw_ncts(hdmi, true); - mtk_hdmi_aud_enable_packet(hdmi, true); + mtk_hdmi_hw_ncts_enable(hdmi, true); + mtk_hdmi_hw_send_aud_packet(hdmi, true); mtk_hdmi_hw_aud_unmute(hdmi); return 0; } @@ -1102,13 +1089,13 @@ static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi) static void mtk_hdmi_audio_enable(struct mtk_hdmi *hdmi) { - mtk_hdmi_aud_enable_packet(hdmi, true); + mtk_hdmi_hw_send_aud_packet(hdmi, true); hdmi->audio_enable = true; } static void mtk_hdmi_audio_disable(struct mtk_hdmi *hdmi) { - mtk_hdmi_aud_enable_packet(hdmi, false); + mtk_hdmi_hw_send_aud_packet(hdmi, false); hdmi->audio_enable = false; } diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c deleted file mode 100644 index 8cee2591e728..000000000000 --- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c +++ /dev/null @@ -1,245 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2015 MediaTek Inc. - */ - -#include "mtk_mipi_tx.h" - -inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw) -{ - return container_of(hw, struct mtk_mipi_tx, pll_hw); -} - -void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, - u32 bits) -{ - u32 temp = readl(mipi_tx->regs + offset); - - writel(temp & ~bits, mipi_tx->regs + offset); -} - -void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, - u32 bits) -{ - u32 temp = readl(mipi_tx->regs + offset); - - writel(temp | bits, mipi_tx->regs + offset); -} - -void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, - u32 mask, u32 data) -{ - u32 temp = readl(mipi_tx->regs + offset); - - writel((temp & ~mask) | (data & mask), mipi_tx->regs + offset); -} - -int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) -{ - struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); - - dev_dbg(mipi_tx->dev, "set rate: %lu Hz\n", rate); - - mipi_tx->data_rate = rate; - - return 0; -} - -unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) -{ - struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); - - return mipi_tx->data_rate; -} - -static int mtk_mipi_tx_power_on(struct phy *phy) -{ - struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); - int ret; - - /* Power up core and enable PLL */ - ret = clk_prepare_enable(mipi_tx->pll); - if (ret < 0) - return ret; - - /* Enable DSI Lane LDO outputs, disable pad tie low */ - mipi_tx->driver_data->mipi_tx_enable_signal(phy); - return 0; -} - -static int mtk_mipi_tx_power_off(struct phy *phy) -{ - struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); - - /* Enable pad tie low, disable DSI Lane LDO outputs */ - mipi_tx->driver_data->mipi_tx_disable_signal(phy); - - /* Disable PLL and power down core */ - clk_disable_unprepare(mipi_tx->pll); - - return 0; -} - -static const struct phy_ops mtk_mipi_tx_ops = { - .power_on = mtk_mipi_tx_power_on, - .power_off = mtk_mipi_tx_power_off, - .owner = THIS_MODULE, -}; - -static void mtk_mipi_tx_get_calibration_datal(struct mtk_mipi_tx *mipi_tx) -{ - struct nvmem_cell *cell; - size_t len; - u32 *buf; - - cell = nvmem_cell_get(mipi_tx->dev, "calibration-data"); - if (IS_ERR(cell)) { - dev_info(mipi_tx->dev, "can't get nvmem_cell_get, ignore it\n"); - return; - } - buf = (u32 *)nvmem_cell_read(cell, &len); - nvmem_cell_put(cell); - - if (IS_ERR(buf)) { - dev_info(mipi_tx->dev, "can't get data, ignore it\n"); - return; - } - - if (len < 3 * sizeof(u32)) { - dev_info(mipi_tx->dev, "invalid calibration data\n"); - kfree(buf); - return; - } - - mipi_tx->rt_code[0] = ((buf[0] >> 6 & 0x1f) << 5) | - (buf[0] >> 11 & 0x1f); - mipi_tx->rt_code[1] = ((buf[1] >> 27 & 0x1f) << 5) | - (buf[0] >> 1 & 0x1f); - mipi_tx->rt_code[2] = ((buf[1] >> 17 & 0x1f) << 5) | - (buf[1] >> 22 & 0x1f); - mipi_tx->rt_code[3] = ((buf[1] >> 7 & 0x1f) << 5) | - (buf[1] >> 12 & 0x1f); - mipi_tx->rt_code[4] = ((buf[2] >> 27 & 0x1f) << 5) | - (buf[1] >> 2 & 0x1f); - kfree(buf); -} - -static int mtk_mipi_tx_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct mtk_mipi_tx *mipi_tx; - struct resource *mem; - const char *ref_clk_name; - struct clk *ref_clk; - struct clk_init_data clk_init = { - .num_parents = 1, - .parent_names = (const char * const *)&ref_clk_name, - .flags = CLK_SET_RATE_GATE, - }; - struct phy *phy; - struct phy_provider *phy_provider; - int ret; - - mipi_tx = devm_kzalloc(dev, sizeof(*mipi_tx), GFP_KERNEL); - if (!mipi_tx) - return -ENOMEM; - - mipi_tx->driver_data = of_device_get_match_data(dev); - - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mipi_tx->regs = devm_ioremap_resource(dev, mem); - if (IS_ERR(mipi_tx->regs)) { - ret = PTR_ERR(mipi_tx->regs); - dev_err(dev, "Failed to get memory resource: %d\n", ret); - return ret; - } - - ref_clk = devm_clk_get(dev, NULL); - if (IS_ERR(ref_clk)) { - ret = PTR_ERR(ref_clk); - dev_err(dev, "Failed to get reference clock: %d\n", ret); - return ret; - } - - ret = of_property_read_u32(dev->of_node, "drive-strength-microamp", - &mipi_tx->mipitx_drive); - /* If can't get the "mipi_tx->mipitx_drive", set it default 0x8 */ - if (ret < 0) - mipi_tx->mipitx_drive = 4600; - - /* check the mipitx_drive valid */ - if (mipi_tx->mipitx_drive > 6000 || mipi_tx->mipitx_drive < 3000) { - dev_warn(dev, "drive-strength-microamp is invalid %d, not in 3000 ~ 6000\n", - mipi_tx->mipitx_drive); - mipi_tx->mipitx_drive = clamp_val(mipi_tx->mipitx_drive, 3000, - 6000); - } - - ref_clk_name = __clk_get_name(ref_clk); - - ret = of_property_read_string(dev->of_node, "clock-output-names", - &clk_init.name); - if (ret < 0) { - dev_err(dev, "Failed to read clock-output-names: %d\n", ret); - return ret; - } - - clk_init.ops = mipi_tx->driver_data->mipi_tx_clk_ops; - - mipi_tx->pll_hw.init = &clk_init; - mipi_tx->pll = devm_clk_register(dev, &mipi_tx->pll_hw); - if (IS_ERR(mipi_tx->pll)) { - ret = PTR_ERR(mipi_tx->pll); - dev_err(dev, "Failed to register PLL: %d\n", ret); - return ret; - } - - phy = devm_phy_create(dev, NULL, &mtk_mipi_tx_ops); - if (IS_ERR(phy)) { - ret = PTR_ERR(phy); - dev_err(dev, "Failed to create MIPI D-PHY: %d\n", ret); - return ret; - } - phy_set_drvdata(phy, mipi_tx); - - phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - if (IS_ERR(phy_provider)) { - ret = PTR_ERR(phy_provider); - return ret; - } - - mipi_tx->dev = dev; - - mtk_mipi_tx_get_calibration_datal(mipi_tx); - - return of_clk_add_provider(dev->of_node, of_clk_src_simple_get, - mipi_tx->pll); -} - -static int mtk_mipi_tx_remove(struct platform_device *pdev) -{ - of_clk_del_provider(pdev->dev.of_node); - return 0; -} - -static const struct of_device_id mtk_mipi_tx_match[] = { - { .compatible = "mediatek,mt2701-mipi-tx", - .data = &mt2701_mipitx_data }, - { .compatible = "mediatek,mt8173-mipi-tx", - .data = &mt8173_mipitx_data }, - { .compatible = "mediatek,mt8183-mipi-tx", - .data = &mt8183_mipitx_data }, - { }, -}; - -struct platform_driver mtk_mipi_tx_driver = { - .probe = mtk_mipi_tx_probe, - .remove = mtk_mipi_tx_remove, - .driver = { - .name = "mediatek-mipi-tx", - .of_match_table = mtk_mipi_tx_match, - }, -}; - diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.h b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h deleted file mode 100644 index c76f07c3fdeb..000000000000 --- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.h +++ /dev/null @@ -1,53 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (c) 2019 MediaTek Inc. - * Author: Jitao Shi <jitao.shi@mediatek.com> - */ - -#ifndef _MTK_MIPI_TX_H -#define _MTK_MIPI_TX_H - -#include <linux/clk.h> -#include <linux/clk-provider.h> -#include <linux/delay.h> -#include <linux/io.h> -#include <linux/module.h> -#include <linux/nvmem-consumer.h> -#include <linux/of_device.h> -#include <linux/platform_device.h> -#include <linux/phy/phy.h> -#include <linux/slab.h> - -struct mtk_mipitx_data { - const u32 mppll_preserve; - const struct clk_ops *mipi_tx_clk_ops; - void (*mipi_tx_enable_signal)(struct phy *phy); - void (*mipi_tx_disable_signal)(struct phy *phy); -}; - -struct mtk_mipi_tx { - struct device *dev; - void __iomem *regs; - u32 data_rate; - u32 mipitx_drive; - u32 rt_code[5]; - const struct mtk_mipitx_data *driver_data; - struct clk_hw pll_hw; - struct clk *pll; -}; - -struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw); -void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits); -void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits); -void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 mask, - u32 data); -int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate); -unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate); - -extern const struct mtk_mipitx_data mt2701_mipitx_data; -extern const struct mtk_mipitx_data mt8173_mipitx_data; -extern const struct mtk_mipitx_data mt8183_mipitx_data; - -#endif diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c deleted file mode 100644 index f18db14d8b63..000000000000 --- a/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c +++ /dev/null @@ -1,288 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2019 MediaTek Inc. - * Author: jitao.shi <jitao.shi@mediatek.com> - */ - -#include "mtk_mipi_tx.h" - -#define MIPITX_DSI_CON 0x00 -#define RG_DSI_LDOCORE_EN BIT(0) -#define RG_DSI_CKG_LDOOUT_EN BIT(1) -#define RG_DSI_BCLK_SEL (3 << 2) -#define RG_DSI_LD_IDX_SEL (7 << 4) -#define RG_DSI_PHYCLK_SEL (2 << 8) -#define RG_DSI_DSICLK_FREQ_SEL BIT(10) -#define RG_DSI_LPTX_CLMP_EN BIT(11) - -#define MIPITX_DSI_CLOCK_LANE 0x04 -#define MIPITX_DSI_DATA_LANE0 0x08 -#define MIPITX_DSI_DATA_LANE1 0x0c -#define MIPITX_DSI_DATA_LANE2 0x10 -#define MIPITX_DSI_DATA_LANE3 0x14 -#define RG_DSI_LNTx_LDOOUT_EN BIT(0) -#define RG_DSI_LNTx_CKLANE_EN BIT(1) -#define RG_DSI_LNTx_LPTX_IPLUS1 BIT(2) -#define RG_DSI_LNTx_LPTX_IPLUS2 BIT(3) -#define RG_DSI_LNTx_LPTX_IMINUS BIT(4) -#define RG_DSI_LNTx_LPCD_IPLUS BIT(5) -#define RG_DSI_LNTx_LPCD_IMINUS BIT(6) -#define RG_DSI_LNTx_RT_CODE (0xf << 8) - -#define MIPITX_DSI_TOP_CON 0x40 -#define RG_DSI_LNT_INTR_EN BIT(0) -#define RG_DSI_LNT_HS_BIAS_EN BIT(1) -#define RG_DSI_LNT_IMP_CAL_EN BIT(2) -#define RG_DSI_LNT_TESTMODE_EN BIT(3) -#define RG_DSI_LNT_IMP_CAL_CODE (0xf << 4) -#define RG_DSI_LNT_AIO_SEL (7 << 8) -#define RG_DSI_PAD_TIE_LOW_EN BIT(11) -#define RG_DSI_DEBUG_INPUT_EN BIT(12) -#define RG_DSI_PRESERVE (7 << 13) - -#define MIPITX_DSI_BG_CON 0x44 -#define RG_DSI_BG_CORE_EN BIT(0) -#define RG_DSI_BG_CKEN BIT(1) -#define RG_DSI_BG_DIV (0x3 << 2) -#define RG_DSI_BG_FAST_CHARGE BIT(4) -#define RG_DSI_VOUT_MSK (0x3ffff << 5) -#define RG_DSI_V12_SEL (7 << 5) -#define RG_DSI_V10_SEL (7 << 8) -#define RG_DSI_V072_SEL (7 << 11) -#define RG_DSI_V04_SEL (7 << 14) -#define RG_DSI_V032_SEL (7 << 17) -#define RG_DSI_V02_SEL (7 << 20) -#define RG_DSI_BG_R1_TRIM (0xf << 24) -#define RG_DSI_BG_R2_TRIM (0xf << 28) - -#define MIPITX_DSI_PLL_CON0 0x50 -#define RG_DSI_MPPLL_PLL_EN BIT(0) -#define RG_DSI_MPPLL_DIV_MSK (0x1ff << 1) -#define RG_DSI_MPPLL_PREDIV (3 << 1) -#define RG_DSI_MPPLL_TXDIV0 (3 << 3) -#define RG_DSI_MPPLL_TXDIV1 (3 << 5) -#define RG_DSI_MPPLL_POSDIV (7 << 7) -#define RG_DSI_MPPLL_MONVC_EN BIT(10) -#define RG_DSI_MPPLL_MONREF_EN BIT(11) -#define RG_DSI_MPPLL_VOD_EN BIT(12) - -#define MIPITX_DSI_PLL_CON1 0x54 -#define RG_DSI_MPPLL_SDM_FRA_EN BIT(0) -#define RG_DSI_MPPLL_SDM_SSC_PH_INIT BIT(1) -#define RG_DSI_MPPLL_SDM_SSC_EN BIT(2) -#define RG_DSI_MPPLL_SDM_SSC_PRD (0xffff << 16) - -#define MIPITX_DSI_PLL_CON2 0x58 - -#define MIPITX_DSI_PLL_TOP 0x64 -#define RG_DSI_MPPLL_PRESERVE (0xff << 8) - -#define MIPITX_DSI_PLL_PWR 0x68 -#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0) -#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1) -#define RG_DSI_MPPLL_SDM_PWR_ACK BIT(8) - -#define MIPITX_DSI_SW_CTRL 0x80 -#define SW_CTRL_EN BIT(0) - -#define MIPITX_DSI_SW_CTRL_CON0 0x84 -#define SW_LNTC_LPTX_PRE_OE BIT(0) -#define SW_LNTC_LPTX_OE BIT(1) -#define SW_LNTC_LPTX_P BIT(2) -#define SW_LNTC_LPTX_N BIT(3) -#define SW_LNTC_HSTX_PRE_OE BIT(4) -#define SW_LNTC_HSTX_OE BIT(5) -#define SW_LNTC_HSTX_ZEROCLK BIT(6) -#define SW_LNT0_LPTX_PRE_OE BIT(7) -#define SW_LNT0_LPTX_OE BIT(8) -#define SW_LNT0_LPTX_P BIT(9) -#define SW_LNT0_LPTX_N BIT(10) -#define SW_LNT0_HSTX_PRE_OE BIT(11) -#define SW_LNT0_HSTX_OE BIT(12) -#define SW_LNT0_LPRX_EN BIT(13) -#define SW_LNT1_LPTX_PRE_OE BIT(14) -#define SW_LNT1_LPTX_OE BIT(15) -#define SW_LNT1_LPTX_P BIT(16) -#define SW_LNT1_LPTX_N BIT(17) -#define SW_LNT1_HSTX_PRE_OE BIT(18) -#define SW_LNT1_HSTX_OE BIT(19) -#define SW_LNT2_LPTX_PRE_OE BIT(20) -#define SW_LNT2_LPTX_OE BIT(21) -#define SW_LNT2_LPTX_P BIT(22) -#define SW_LNT2_LPTX_N BIT(23) -#define SW_LNT2_HSTX_PRE_OE BIT(24) -#define SW_LNT2_HSTX_OE BIT(25) - -static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw) -{ - struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); - u8 txdiv, txdiv0, txdiv1; - u64 pcw; - - dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate); - - if (mipi_tx->data_rate >= 500000000) { - txdiv = 1; - txdiv0 = 0; - txdiv1 = 0; - } else if (mipi_tx->data_rate >= 250000000) { - txdiv = 2; - txdiv0 = 1; - txdiv1 = 0; - } else if (mipi_tx->data_rate >= 125000000) { - txdiv = 4; - txdiv0 = 2; - txdiv1 = 0; - } else if (mipi_tx->data_rate > 62000000) { - txdiv = 8; - txdiv0 = 2; - txdiv1 = 1; - } else if (mipi_tx->data_rate >= 50000000) { - txdiv = 16; - txdiv0 = 2; - txdiv1 = 2; - } else { - return -EINVAL; - } - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON, - RG_DSI_VOUT_MSK | - RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN, - (4 << 20) | (4 << 17) | (4 << 14) | - (4 << 11) | (4 << 8) | (4 << 5) | - RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN); - - usleep_range(30, 100); - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON, - RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN, - (8 << 4) | RG_DSI_LNT_HS_BIAS_EN); - - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON, - RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN); - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR, - RG_DSI_MPPLL_SDM_PWR_ON | - RG_DSI_MPPLL_SDM_ISO_EN, - RG_DSI_MPPLL_SDM_PWR_ON); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0, - RG_DSI_MPPLL_PLL_EN); - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0, - RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 | - RG_DSI_MPPLL_PREDIV, - (txdiv0 << 3) | (txdiv1 << 5)); - - /* - * PLL PCW config - * PCW bit 24~30 = integer part of pcw - * PCW bit 0~23 = fractional part of pcw - * pcw = data_Rate*4*txdiv/(Ref_clk*2); - * Post DIV =4, so need data_Rate*4 - * Ref_clk is 26MHz - */ - pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24, - 26000000); - writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2); - - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1, - RG_DSI_MPPLL_SDM_FRA_EN); - - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN); - - usleep_range(20, 100); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1, - RG_DSI_MPPLL_SDM_SSC_EN); - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP, - RG_DSI_MPPLL_PRESERVE, - mipi_tx->driver_data->mppll_preserve); - - return 0; -} - -static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw) -{ - struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); - - dev_dbg(mipi_tx->dev, "unprepare\n"); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0, - RG_DSI_MPPLL_PLL_EN); - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP, - RG_DSI_MPPLL_PRESERVE, 0); - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR, - RG_DSI_MPPLL_SDM_ISO_EN | - RG_DSI_MPPLL_SDM_PWR_ON, - RG_DSI_MPPLL_SDM_ISO_EN); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON, - RG_DSI_LNT_HS_BIAS_EN); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON, - RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON, - RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0, - RG_DSI_MPPLL_DIV_MSK); -} - -static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) -{ - return clamp_val(rate, 50000000, 1250000000); -} - -static const struct clk_ops mtk_mipi_tx_pll_ops = { - .prepare = mtk_mipi_tx_pll_prepare, - .unprepare = mtk_mipi_tx_pll_unprepare, - .round_rate = mtk_mipi_tx_pll_round_rate, - .set_rate = mtk_mipi_tx_pll_set_rate, - .recalc_rate = mtk_mipi_tx_pll_recalc_rate, -}; - -static void mtk_mipi_tx_power_on_signal(struct phy *phy) -{ - struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); - u32 reg; - - for (reg = MIPITX_DSI_CLOCK_LANE; - reg <= MIPITX_DSI_DATA_LANE3; reg += 4) - mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON, - RG_DSI_PAD_TIE_LOW_EN); -} - -static void mtk_mipi_tx_power_off_signal(struct phy *phy) -{ - struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); - u32 reg; - - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON, - RG_DSI_PAD_TIE_LOW_EN); - - for (reg = MIPITX_DSI_CLOCK_LANE; - reg <= MIPITX_DSI_DATA_LANE3; reg += 4) - mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN); -} - -const struct mtk_mipitx_data mt2701_mipitx_data = { - .mppll_preserve = (3 << 8), - .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops, - .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal, - .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal, -}; - -const struct mtk_mipitx_data mt8173_mipitx_data = { - .mppll_preserve = (0 << 8), - .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops, - .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal, - .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal, -}; diff --git a/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c deleted file mode 100644 index 9f3e55aeebb2..000000000000 --- a/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c +++ /dev/null @@ -1,177 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2019 MediaTek Inc. - * Author: jitao.shi <jitao.shi@mediatek.com> - */ - -#include "mtk_mipi_tx.h" - -#define MIPITX_LANE_CON 0x000c -#define RG_DSI_CPHY_T1DRV_EN BIT(0) -#define RG_DSI_ANA_CK_SEL BIT(1) -#define RG_DSI_PHY_CK_SEL BIT(2) -#define RG_DSI_CPHY_EN BIT(3) -#define RG_DSI_PHYCK_INV_EN BIT(4) -#define RG_DSI_PWR04_EN BIT(5) -#define RG_DSI_BG_LPF_EN BIT(6) -#define RG_DSI_BG_CORE_EN BIT(7) -#define RG_DSI_PAD_TIEL_SEL BIT(8) - -#define MIPITX_VOLTAGE_SEL 0x0010 -#define RG_DSI_HSTX_LDO_REF_SEL (0xf << 6) - -#define MIPITX_PLL_PWR 0x0028 -#define MIPITX_PLL_CON0 0x002c -#define MIPITX_PLL_CON1 0x0030 -#define MIPITX_PLL_CON2 0x0034 -#define MIPITX_PLL_CON3 0x0038 -#define MIPITX_PLL_CON4 0x003c -#define RG_DSI_PLL_IBIAS (3 << 10) - -#define MIPITX_D2P_RTCODE 0x0100 -#define MIPITX_D2_SW_CTL_EN 0x0144 -#define MIPITX_D0_SW_CTL_EN 0x0244 -#define MIPITX_CK_CKMODE_EN 0x0328 -#define DSI_CK_CKMODE_EN BIT(0) -#define MIPITX_CK_SW_CTL_EN 0x0344 -#define MIPITX_D1_SW_CTL_EN 0x0444 -#define MIPITX_D3_SW_CTL_EN 0x0544 -#define DSI_SW_CTL_EN BIT(0) -#define AD_DSI_PLL_SDM_PWR_ON BIT(0) -#define AD_DSI_PLL_SDM_ISO_EN BIT(1) - -#define RG_DSI_PLL_EN BIT(4) -#define RG_DSI_PLL_POSDIV (0x7 << 8) - -static int mtk_mipi_tx_pll_enable(struct clk_hw *hw) -{ - struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); - unsigned int txdiv, txdiv0; - u64 pcw; - - dev_dbg(mipi_tx->dev, "enable: %u bps\n", mipi_tx->data_rate); - - if (mipi_tx->data_rate >= 2000000000) { - txdiv = 1; - txdiv0 = 0; - } else if (mipi_tx->data_rate >= 1000000000) { - txdiv = 2; - txdiv0 = 1; - } else if (mipi_tx->data_rate >= 500000000) { - txdiv = 4; - txdiv0 = 2; - } else if (mipi_tx->data_rate > 250000000) { - txdiv = 8; - txdiv0 = 3; - } else if (mipi_tx->data_rate >= 125000000) { - txdiv = 16; - txdiv0 = 4; - } else { - return -EINVAL; - } - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON4, RG_DSI_PLL_IBIAS); - - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON); - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN); - udelay(1); - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN); - pcw = div_u64(((u64)mipi_tx->data_rate * txdiv) << 24, 26000000); - writel(pcw, mipi_tx->regs + MIPITX_PLL_CON0); - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_POSDIV, - txdiv0 << 8); - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN); - - return 0; -} - -static void mtk_mipi_tx_pll_disable(struct clk_hw *hw) -{ - struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN); - - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN); - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON); -} - -static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) -{ - return clamp_val(rate, 50000000, 1600000000); -} - -static const struct clk_ops mtk_mipi_tx_pll_ops = { - .enable = mtk_mipi_tx_pll_enable, - .disable = mtk_mipi_tx_pll_disable, - .round_rate = mtk_mipi_tx_pll_round_rate, - .set_rate = mtk_mipi_tx_pll_set_rate, - .recalc_rate = mtk_mipi_tx_pll_recalc_rate, -}; - -static void mtk_mipi_tx_config_calibration_data(struct mtk_mipi_tx *mipi_tx) -{ - int i, j; - - for (i = 0; i < 5; i++) { - if ((mipi_tx->rt_code[i] & 0x1f) == 0) - mipi_tx->rt_code[i] |= 0x10; - - if ((mipi_tx->rt_code[i] >> 5 & 0x1f) == 0) - mipi_tx->rt_code[i] |= 0x10 << 5; - - for (j = 0; j < 10; j++) - mtk_mipi_tx_update_bits(mipi_tx, - MIPITX_D2P_RTCODE * (i + 1) + j * 4, - 1, mipi_tx->rt_code[i] >> j & 1); - } -} - -static void mtk_mipi_tx_power_on_signal(struct phy *phy) -{ - struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); - - /* BG_LPF_EN / BG_CORE_EN */ - writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN, - mipi_tx->regs + MIPITX_LANE_CON); - usleep_range(30, 100); - writel(RG_DSI_BG_CORE_EN | RG_DSI_BG_LPF_EN, - mipi_tx->regs + MIPITX_LANE_CON); - - /* Switch OFF each Lane */ - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN); - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN); - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN); - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN); - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN); - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_VOLTAGE_SEL, - RG_DSI_HSTX_LDO_REF_SEL, - (mipi_tx->mipitx_drive - 3000) / 200 << 6); - - mtk_mipi_tx_config_calibration_data(mipi_tx); - - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_CKMODE_EN, DSI_CK_CKMODE_EN); -} - -static void mtk_mipi_tx_power_off_signal(struct phy *phy) -{ - struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); - - /* Switch ON each Lane */ - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN); - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN); - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN); - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN); - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN); - - writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN, - mipi_tx->regs + MIPITX_LANE_CON); - writel(RG_DSI_PAD_TIEL_SEL, mipi_tx->regs + MIPITX_LANE_CON); -} - -const struct mtk_mipitx_data mt8183_mipitx_data = { - .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops, - .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal, - .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal, -}; diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 8b9c8dd788c4..42c5d3246cfc 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -90,7 +90,7 @@ static int meson_dumb_create(struct drm_file *file, struct drm_device *dev, DEFINE_DRM_GEM_CMA_FOPS(fops); -static struct drm_driver meson_driver = { +static const struct drm_driver meson_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, /* IRQ */ @@ -389,15 +389,17 @@ static void meson_drv_unbind(struct device *dev) meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2); } + drm_dev_unregister(drm); + drm_kms_helper_poll_fini(drm); + drm_atomic_helper_shutdown(drm); + component_unbind_all(dev, drm); + drm_irq_uninstall(drm); + drm_dev_put(drm); + if (priv->afbcd.ops) { priv->afbcd.ops->reset(priv); meson_rdma_free(priv); } - - drm_dev_unregister(drm); - drm_irq_uninstall(drm); - drm_kms_helper_poll_fini(drm); - drm_dev_put(drm); } static const struct component_master_ops meson_drv_master_ops = { diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 29a8ff41595d..7f8eea494147 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -941,6 +941,11 @@ static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi) } +static void meson_disable_regulator(void *data) +{ + regulator_disable(data); +} + static int meson_dw_hdmi_bind(struct device *dev, struct device *master, void *data) { @@ -989,6 +994,10 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, ret = regulator_enable(meson_dw_hdmi->hdmi_supply); if (ret) return ret; + ret = devm_add_action_or_reset(dev, meson_disable_regulator, + meson_dw_hdmi->hdmi_supply); + if (ret) + return ret; } meson_dw_hdmi->hdmitx_apb = devm_reset_control_get_exclusive(dev, @@ -1064,8 +1073,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, DRM_DEBUG_DRIVER("encoder initialized\n"); - meson_dw_hdmi_init(meson_dw_hdmi); - /* Bridge / Connector */ dw_plat_data->priv_data = meson_dw_hdmi; @@ -1088,6 +1095,8 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, if (IS_ERR(meson_dw_hdmi->hdmi)) return PTR_ERR(meson_dw_hdmi->hdmi); + meson_dw_hdmi_init(meson_dw_hdmi); + next_bridge = of_drm_find_bridge(pdev->dev.of_node); if (next_bridge) drm_bridge_attach(encoder, next_bridge, diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c index 0eb86943a358..2a82119eb58e 100644 --- a/drivers/gpu/drm/meson/meson_vclk.c +++ b/drivers/gpu/drm/meson/meson_vclk.c @@ -131,7 +131,7 @@ enum { VID_PLL_DIV_15, }; -void meson_vid_pll_set(struct meson_drm *priv, unsigned int div) +static void meson_vid_pll_set(struct meson_drm *priv, unsigned int div) { unsigned int shift_val = 0; unsigned int shift_sel = 0; @@ -487,9 +487,9 @@ static inline unsigned int pll_od_to_reg(unsigned int od) return 0; } -void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m, - unsigned int frac, unsigned int od1, - unsigned int od2, unsigned int od3) +static void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m, + unsigned int frac, unsigned int od1, + unsigned int od2, unsigned int od3) { unsigned int val; diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c index f93c725b6f02..5e2236ec189f 100644 --- a/drivers/gpu/drm/meson/meson_venc.c +++ b/drivers/gpu/drm/meson/meson_venc.c @@ -890,8 +890,8 @@ bool meson_venc_hdmi_supported_vic(int vic) } EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_vic); -void meson_venc_hdmi_get_dmt_vmode(const struct drm_display_mode *mode, - union meson_hdmi_venc_mode *dmt_mode) +static void meson_venc_hdmi_get_dmt_vmode(const struct drm_display_mode *mode, + union meson_hdmi_venc_mode *dmt_mode) { memset(dmt_mode, 0, sizeof(*dmt_mode)); diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c index 85c74364ce24..1cb7d120d18f 100644 --- a/drivers/gpu/drm/mga/mga_dma.c +++ b/drivers/gpu/drm/mga/mga_dma.c @@ -25,7 +25,7 @@ * DEALINGS IN THE SOFTWARE. */ -/** +/* * \file mga_dma.c * DMA support for MGA G200 / G400. * @@ -435,7 +435,7 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags) } #if IS_ENABLED(CONFIG_AGP) -/** +/* * Bootstrap the driver for AGP DMA. * * \todo @@ -610,7 +610,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev, } #endif -/** +/* * Bootstrap the driver for PCI DMA. * * \todo @@ -1143,7 +1143,7 @@ int mga_dma_buffers(struct drm_device *dev, void *data, return ret; } -/** +/* * Called just before the module is unloaded. */ void mga_driver_unload(struct drm_device *dev) @@ -1152,7 +1152,7 @@ void mga_driver_unload(struct drm_device *dev) dev->dev_private = NULL; } -/** +/* * Called when the last opener of the device is closed. */ void mga_driver_lastclose(struct drm_device *dev) diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c index 77a0b006f066..0dec4062e5a2 100644 --- a/drivers/gpu/drm/mga/mga_state.c +++ b/drivers/gpu/drm/mga/mga_state.c @@ -942,7 +942,6 @@ static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *fi struct drm_device_dma *dma = dev->dma; drm_mga_private_t *dev_priv = dev->dev_private; struct drm_buf *buf; - drm_mga_buf_priv_t *buf_priv; drm_mga_iload_t *iload = data; DRM_DEBUG("\n"); @@ -959,7 +958,6 @@ static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *fi return -EINVAL; buf = dma->buflist[iload->idx]; - buf_priv = buf->dev_private; if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) { mga_freelist_put(dev, buf); diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 771b26aeee19..0f07f259503d 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -28,7 +28,7 @@ module_param_named(modeset, mgag200_modeset, int, 0400); DEFINE_DRM_GEM_FOPS(mgag200_driver_fops); -static struct drm_driver mgag200_driver = { +static const struct drm_driver mgag200_driver = { .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET, .fops = &mgag200_driver_fops, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 38672f9e5c4f..1dfc42170059 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -9,6 +9,7 @@ */ #include <linux/delay.h> +#include <linux/dma-buf-map.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_state_helper.h> @@ -794,21 +795,16 @@ static int mgag200_crtc_set_plls(struct mga_device *mdev, long clock) case G200_SE_A: case G200_SE_B: return mga_g200se_set_plls(mdev, clock); - break; case G200_WB: case G200_EW3: return mga_g200wb_set_plls(mdev, clock); - break; case G200_EV: return mga_g200ev_set_plls(mdev, clock); - break; case G200_EH: case G200_EH3: return mga_g200eh_set_plls(mdev, clock); - break; case G200_ER: return mga_g200er_set_plls(mdev, clock); - break; } misc = RREG8(MGA_MISC_IN); @@ -1556,15 +1552,18 @@ mgag200_handle_damage(struct mga_device *mdev, struct drm_framebuffer *fb, struct drm_rect *clip) { struct drm_device *dev = &mdev->base; + struct dma_buf_map map; void *vmap; + int ret; - vmap = drm_gem_shmem_vmap(fb->obj[0]); - if (drm_WARN_ON(dev, !vmap)) + ret = drm_gem_shmem_vmap(fb->obj[0], &map); + if (drm_WARN_ON(dev, ret)) return; /* BUG: SHMEM BO should always be vmapped */ + vmap = map.vaddr; /* TODO: Use mapping abstraction properly */ drm_fb_memcpy_dstclip(mdev->vram, vmap, fb, clip); - drm_gem_shmem_vunmap(fb->obj[0], vmap); + drm_gem_shmem_vunmap(fb->obj[0], &map); /* Always scanout image at VRAM offset 0 */ mgag200_set_startadd(mdev, (u32)0); diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index e5816b498494..dabb4a1ccdcf 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -4,8 +4,8 @@ config DRM_MSM tristate "MSM DRM" depends on DRM depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST) + depends on IOMMU_SUPPORT depends on OF && COMMON_CLK - depends on MMU depends on QCOM_OCMEM || QCOM_OCMEM=n select IOMMU_IO_PGTABLE select QCOM_MDT_LOADER if ARCH_QCOM diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 340682cd0f32..3cc906121fb3 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -67,6 +67,7 @@ msm-y := \ disp/dpu1/dpu_hw_pingpong.o \ disp/dpu1/dpu_hw_sspp.o \ disp/dpu1/dpu_hw_dspp.o \ + disp/dpu1/dpu_hw_merge3d.o \ disp/dpu1/dpu_hw_top.o \ disp/dpu1/dpu_hw_util.o \ disp/dpu1/dpu_hw_vbif.o \ diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index f29c77d9cd42..93da6683a866 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -519,6 +519,8 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) struct msm_gpu *gpu; struct msm_drm_private *priv = dev->dev_private; struct platform_device *pdev = priv->gpu_pdev; + struct icc_path *ocmem_icc_path; + struct icc_path *icc_path; int ret; if (!pdev) { @@ -566,13 +568,28 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) goto fail; } + icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem"); + ret = IS_ERR(icc_path); + if (ret) + goto fail; + + ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem"); + ret = IS_ERR(ocmem_icc_path); + if (ret) { + /* allow -ENODATA, ocmem icc is optional */ + if (ret != -ENODATA) + goto fail; + ocmem_icc_path = NULL; + } + + /* * Set the ICC path to maximum speed for now by multiplying the fastest * frequency by the bus width (8). We'll want to scale this later on to * improve battery life. */ - icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); - icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); + icc_set_bw(icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); + icc_set_bw(ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); return gpu; diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 2b93b33b05e4..c0be3a0f36b2 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -648,6 +648,8 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) struct msm_gpu *gpu; struct msm_drm_private *priv = dev->dev_private; struct platform_device *pdev = priv->gpu_pdev; + struct icc_path *ocmem_icc_path; + struct icc_path *icc_path; int ret; if (!pdev) { @@ -694,13 +696,27 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) goto fail; } + icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem"); + ret = IS_ERR(icc_path); + if (ret) + goto fail; + + ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem"); + ret = IS_ERR(ocmem_icc_path); + if (ret) { + /* allow -ENODATA, ocmem icc is optional */ + if (ret != -ENODATA) + goto fail; + ocmem_icc_path = NULL; + } + /* * Set the ICC path to maximum speed for now by multiplying the fastest * frequency by the bus width (8). We'll want to scale this later on to * improve battery life. */ - icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); - icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); + icc_set_bw(icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); + icc_set_bw(ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); return gpu; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index d6804a802355..a5af223eaf50 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -36,7 +36,7 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring))); } - spin_lock_irqsave(&ring->lock, flags); + spin_lock_irqsave(&ring->preempt_lock, flags); /* Copy the shadow to the actual register */ ring->cur = ring->next; @@ -44,7 +44,7 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, /* Make sure to wrap wptr if we need to */ wptr = get_wptr(ring); - spin_unlock_irqrestore(&ring->lock, flags); + spin_unlock_irqrestore(&ring->preempt_lock, flags); /* Make sure everything is posted before making a decision */ mb(); @@ -426,7 +426,7 @@ static int a5xx_preempt_start(struct msm_gpu *gpu) static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu, struct drm_gem_object *obj) { - u32 *buf = msm_gem_get_vaddr_active(obj); + u32 *buf = msm_gem_get_vaddr(obj); if (IS_ERR(buf)) return; @@ -755,12 +755,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); - /* Disable preemption if WHERE_AM_I isn't available */ - if (!a5xx_gpu->has_whereami && gpu->nr_rings > 1) { - a5xx_preempt_fini(gpu); - gpu->nr_rings = 1; - } else { - /* Create a privileged buffer for the RPTR shadow */ + /* Create a privileged buffer for the RPTR shadow */ + if (a5xx_gpu->has_whereami) { if (!a5xx_gpu->shadow_bo) { a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, sizeof(u32) * gpu->nr_rings, @@ -774,6 +770,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR_HI, shadowptr(a5xx_gpu, gpu->rb[0])); + } else if (gpu->nr_rings > 1) { + /* Disable preemption if WHERE_AM_I isn't available */ + a5xx_preempt_fini(gpu); + gpu->nr_rings = 1; } a5xx_preempt_hw_init(gpu); @@ -1056,7 +1056,6 @@ static void a5xx_gpmu_err_irq(struct msm_gpu *gpu) static void a5xx_fault_detect_irq(struct msm_gpu *gpu) { struct drm_device *dev = gpu->dev; - struct msm_drm_private *priv = dev->dev_private; struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", @@ -1072,7 +1071,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu) /* Turn off the hangcheck timer to keep it from bothering us */ del_timer(&gpu->hangcheck_timer); - queue_work(priv->wq, &gpu->recover_work); + kthread_queue_work(gpu->worker, &gpu->recover_work); } #define RBBM_ERROR_MASK \ @@ -1207,7 +1206,9 @@ static int a5xx_pm_resume(struct msm_gpu *gpu) static int a5xx_pm_suspend(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); u32 mask = 0xf; + int i, ret; /* A510 has 3 XIN ports in VBIF */ if (adreno_is_a510(adreno_gpu)) @@ -1227,7 +1228,15 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000); gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000); - return msm_gpu_pm_suspend(gpu); + ret = msm_gpu_pm_suspend(gpu); + if (ret) + return ret; + + if (a5xx_gpu->has_whereami) + for (i = 0; i < gpu->nr_rings; i++) + a5xx_gpu->shadow[i] = 0; + + return 0; } static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c index 7e04509c4e1f..42eaef7ad7c7 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c @@ -45,9 +45,9 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) if (!ring) return; - spin_lock_irqsave(&ring->lock, flags); + spin_lock_irqsave(&ring->preempt_lock, flags); wptr = get_wptr(ring); - spin_unlock_irqrestore(&ring->lock, flags); + spin_unlock_irqrestore(&ring->preempt_lock, flags); gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); } @@ -62,9 +62,9 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) bool empty; struct msm_ringbuffer *ring = gpu->rb[i]; - spin_lock_irqsave(&ring->lock, flags); + spin_lock_irqsave(&ring->preempt_lock, flags); empty = (get_wptr(ring) == ring->memptrs->rptr); - spin_unlock_irqrestore(&ring->lock, flags); + spin_unlock_irqrestore(&ring->preempt_lock, flags); if (!empty) return ring; @@ -78,13 +78,12 @@ static void a5xx_preempt_timer(struct timer_list *t) struct a5xx_gpu *a5xx_gpu = from_timer(a5xx_gpu, t, preempt_timer); struct msm_gpu *gpu = &a5xx_gpu->base.base; struct drm_device *dev = gpu->dev; - struct msm_drm_private *priv = dev->dev_private; if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED)) return; DRM_DEV_ERROR(dev->dev, "%s: preemption timed out\n", gpu->name); - queue_work(priv->wq, &gpu->recover_work); + kthread_queue_work(gpu->worker, &gpu->recover_work); } /* Try to trigger a preemption switch */ @@ -132,9 +131,9 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu) } /* Make sure the wptr doesn't update while we're in motion */ - spin_lock_irqsave(&ring->lock, flags); + spin_lock_irqsave(&ring->preempt_lock, flags); a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring); - spin_unlock_irqrestore(&ring->lock, flags); + spin_unlock_irqrestore(&ring->preempt_lock, flags); /* Set the address of the incoming preemption record */ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO, @@ -162,7 +161,6 @@ void a5xx_preempt_irq(struct msm_gpu *gpu) struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); struct drm_device *dev = gpu->dev; - struct msm_drm_private *priv = dev->dev_private; if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING)) return; @@ -181,7 +179,7 @@ void a5xx_preempt_irq(struct msm_gpu *gpu) set_preempt_state(a5xx_gpu, PREEMPT_FAULTED); DRM_DEV_ERROR(dev->dev, "%s: Preemption failed to complete\n", gpu->name); - queue_work(priv->wq, &gpu->recover_work); + kthread_queue_work(gpu->worker, &gpu->recover_work); return; } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 491fee410daf..e6703ae98760 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -19,8 +19,6 @@ static void a6xx_gmu_fault(struct a6xx_gmu *gmu) struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct msm_gpu *gpu = &adreno_gpu->base; - struct drm_device *dev = gpu->dev; - struct msm_drm_private *priv = dev->dev_private; /* FIXME: add a banner here */ gmu->hung = true; @@ -29,7 +27,7 @@ static void a6xx_gmu_fault(struct a6xx_gmu *gmu) del_timer(&gpu->hangcheck_timer); /* Queue the GPU handler because we need to treat this as a recovery */ - queue_work(priv->wq, &gpu->recover_work); + kthread_queue_work(gpu->worker, &gpu->recover_work); } static irqreturn_t a6xx_gmu_irq(int irq, void *data) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 948f3656c20c..130661898546 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -8,7 +8,9 @@ #include "a6xx_gpu.h" #include "a6xx_gmu.xml.h" +#include <linux/bitfield.h> #include <linux/devfreq.h> +#include <linux/soc/qcom/llcc-qcom.h> #define GPU_PAS_ID 13 @@ -30,7 +32,7 @@ static inline bool _a6xx_check_idle(struct msm_gpu *gpu) A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT); } -bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { /* wait for CP to drain ringbuffer: */ if (!adreno_idle(gpu, ring)) @@ -65,7 +67,7 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring))); } - spin_lock_irqsave(&ring->lock, flags); + spin_lock_irqsave(&ring->preempt_lock, flags); /* Copy the shadow to the actual register */ ring->cur = ring->next; @@ -73,7 +75,7 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) /* Make sure to wrap wptr if we need to */ wptr = get_wptr(ring); - spin_unlock_irqrestore(&ring->lock, flags); + spin_unlock_irqrestore(&ring->preempt_lock, flags); /* Make sure everything is posted before making a decision */ mb(); @@ -522,7 +524,7 @@ static int a6xx_cp_init(struct msm_gpu *gpu) static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, struct drm_gem_object *obj) { - u32 *buf = msm_gem_get_vaddr_active(obj); + u32 *buf = msm_gem_get_vaddr(obj); if (IS_ERR(buf)) return; @@ -965,8 +967,6 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); - struct drm_device *dev = gpu->dev; - struct msm_drm_private *priv = dev->dev_private; struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); /* @@ -989,7 +989,7 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu) /* Turn off the hangcheck timer to keep it from bothering us */ del_timer(&gpu->hangcheck_timer); - queue_work(priv->wq, &gpu->recover_work); + kthread_queue_work(gpu->worker, &gpu->recover_work); } static irqreturn_t a6xx_irq(struct msm_gpu *gpu) @@ -1022,6 +1022,105 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu) return IRQ_HANDLED; } +static void a6xx_llc_rmw(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 mask, u32 or) +{ + return msm_rmw(a6xx_gpu->llc_mmio + (reg << 2), mask, or); +} + +static void a6xx_llc_write(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 value) +{ + return msm_writel(value, a6xx_gpu->llc_mmio + (reg << 2)); +} + +static void a6xx_llc_deactivate(struct a6xx_gpu *a6xx_gpu) +{ + llcc_slice_deactivate(a6xx_gpu->llc_slice); + llcc_slice_deactivate(a6xx_gpu->htw_llc_slice); +} + +static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) +{ + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + u32 cntl1_regval = 0; + + if (IS_ERR(a6xx_gpu->llc_mmio)) + return; + + if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { + u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); + + gpu_scid &= 0x1f; + cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) | + (gpu_scid << 15) | (gpu_scid << 20); + } + + /* + * For targets with a MMU500, activate the slice but don't program the + * register. The XBL will take care of that. + */ + if (!llcc_slice_activate(a6xx_gpu->htw_llc_slice)) { + if (!a6xx_gpu->have_mmu500) { + u32 gpuhtw_scid = llcc_get_slice_id(a6xx_gpu->htw_llc_slice); + + gpuhtw_scid &= 0x1f; + cntl1_regval |= FIELD_PREP(GENMASK(29, 25), gpuhtw_scid); + } + } + + if (cntl1_regval) { + /* + * Program the slice IDs for the various GPU blocks and GPU MMU + * pagetables + */ + if (a6xx_gpu->have_mmu500) + gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), + cntl1_regval); + else { + a6xx_llc_write(a6xx_gpu, + REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval); + + /* + * Program cacheability overrides to not allocate cache + * lines on a write miss + */ + a6xx_llc_rmw(a6xx_gpu, + REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03); + } + } +} + +static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu) +{ + llcc_slice_putd(a6xx_gpu->llc_slice); + llcc_slice_putd(a6xx_gpu->htw_llc_slice); +} + +static void a6xx_llc_slices_init(struct platform_device *pdev, + struct a6xx_gpu *a6xx_gpu) +{ + struct device_node *phandle; + + a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx"); + if (IS_ERR(a6xx_gpu->llc_mmio)) + return; + + /* + * There is a different programming path for targets with an mmu500 + * attached, so detect if that is the case + */ + phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0); + a6xx_gpu->have_mmu500 = (phandle && + of_device_is_compatible(phandle, "arm,mmu-500")); + of_node_put(phandle); + + a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU); + a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW); + + if (IS_ERR(a6xx_gpu->llc_slice) && IS_ERR(a6xx_gpu->htw_llc_slice)) + a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL); +} + static int a6xx_pm_resume(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -1038,6 +1137,8 @@ static int a6xx_pm_resume(struct msm_gpu *gpu) msm_gpu_resume_devfreq(gpu); + a6xx_llc_activate(a6xx_gpu); + return 0; } @@ -1045,12 +1146,23 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + int i, ret; trace_msm_gpu_suspend(0); + a6xx_llc_deactivate(a6xx_gpu); + devfreq_suspend_device(gpu->devfreq.devfreq); - return a6xx_gmu_stop(a6xx_gpu); + ret = a6xx_gmu_stop(a6xx_gpu); + if (ret) + return ret; + + if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) + for (i = 0; i < gpu->nr_rings; i++) + a6xx_gpu->shadow[i] = 0; + + return 0; } static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) @@ -1091,6 +1203,8 @@ static void a6xx_destroy(struct msm_gpu *gpu) drm_gem_object_put(a6xx_gpu->shadow_bo); } + a6xx_llc_slices_destroy(a6xx_gpu); + a6xx_gmu_remove(a6xx_gpu); adreno_gpu_cleanup(adreno_gpu); @@ -1209,6 +1323,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) if (info && info->revn == 650) adreno_gpu->base.hw_apriv = true; + a6xx_llc_slices_init(pdev, a6xx_gpu); + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); if (ret) { a6xx_destroy(&(a6xx_gpu->base.base)); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index 3eeebf6a754b..e793d329e77b 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -28,6 +28,11 @@ struct a6xx_gpu { uint32_t *shadow; bool has_whereami; + + void __iomem *llc_mmio; + void *llc_slice; + void *htw_llc_slice; + bool have_mmu500; }; #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index e9ede19193b0..c1699b4f9a89 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -80,7 +80,7 @@ struct a6xx_state_memobj { unsigned long long data[]; }; -void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize) +static void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize) { struct a6xx_state_memobj *obj = kzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL); @@ -92,7 +92,7 @@ void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize) return &obj->data; } -void *state_kmemdup(struct a6xx_gpu_state *a6xx_state, void *src, +static void *state_kmemdup(struct a6xx_gpu_state *a6xx_state, void *src, size_t size) { void *dst = state_kcalloc(a6xx_state, 1, size); @@ -944,7 +944,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu) return &a6xx_state->base; } -void a6xx_gpu_state_destroy(struct kref *kref) +static void a6xx_gpu_state_destroy(struct kref *kref) { struct a6xx_state_memobj *obj, *tmp; struct msm_gpu_state *state = container_of(kref, diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 58e03b20e1c7..87c8b033ad1a 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -475,6 +475,11 @@ static int adreno_remove(struct platform_device *pdev) return 0; } +static void adreno_shutdown(struct platform_device *pdev) +{ + pm_runtime_force_suspend(&pdev->dev); +} + static const struct of_device_id dt_match[] = { { .compatible = "qcom,adreno" }, { .compatible = "qcom,adreno-3xx" }, @@ -509,6 +514,7 @@ static const struct dev_pm_ops adreno_pm_ops = { static struct platform_driver adreno_driver = { .probe = adreno_probe, .remove = adreno_remove, + .shutdown = adreno_shutdown, .driver = { .name = "adreno", .of_match_table = dt_match, diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 458b5b26d3c2..6cf9975e951e 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -16,6 +16,7 @@ #include <linux/soc/qcom/mdt_loader.h> #include <soc/qcom/ocmem.h> #include "adreno_gpu.h" +#include "a6xx_gpu.h" #include "msm_gem.h" #include "msm_mmu.h" @@ -189,6 +190,9 @@ struct msm_gem_address_space * adreno_iommu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct io_pgtable_domain_attr pgtbl_cfg; struct iommu_domain *iommu; struct msm_mmu *mmu; struct msm_gem_address_space *aspace; @@ -198,7 +202,20 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu, if (!iommu) return NULL; + /* + * This allows GPU to set the bus attributes required to use system + * cache on behalf of the iommu page table walker. + */ + if (!IS_ERR(a6xx_gpu->htw_llc_slice)) { + pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA; + iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg); + } + mmu = msm_iommu_new(&pdev->dev, iommu); + if (IS_ERR(mmu)) { + iommu_domain_free(iommu); + return ERR_CAST(mmu); + } /* * Use the aperture start or SZ_16M, whichever is greater. This will @@ -899,7 +916,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct adreno_platform_config *config = dev->platform_data; struct msm_gpu_config adreno_gpu_config = { 0 }; struct msm_gpu *gpu = &adreno_gpu->base; - int ret; adreno_gpu->funcs = funcs; adreno_gpu->info = adreno_info(config->rev); @@ -918,37 +934,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, pm_runtime_use_autosuspend(dev); pm_runtime_enable(dev); - ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, + return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, adreno_gpu->info->name, &adreno_gpu_config); - if (ret) - return ret; - - /* - * The legacy case, before "interconnect-names", only has a - * single interconnect path which is equivalent to "gfx-mem" - */ - if (!of_find_property(dev->of_node, "interconnect-names", NULL)) { - gpu->icc_path = of_icc_get(dev, NULL); - } else { - gpu->icc_path = of_icc_get(dev, "gfx-mem"); - gpu->ocmem_icc_path = of_icc_get(dev, "ocmem"); - } - - if (IS_ERR(gpu->icc_path)) { - ret = PTR_ERR(gpu->icc_path); - gpu->icc_path = NULL; - return ret; - } - - if (IS_ERR(gpu->ocmem_icc_path)) { - ret = PTR_ERR(gpu->ocmem_icc_path); - gpu->ocmem_icc_path = NULL; - /* allow -ENODATA, ocmem icc is optional */ - if (ret != -ENODATA) - return ret; - } - - return 0; } void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c index 393858ef8a83..b6b3bbab0333 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c @@ -22,6 +22,7 @@ * @DPU_PERF_MODE_NORMAL: performance controlled by user mode client * @DPU_PERF_MODE_MINIMUM: performance bounded by minimum setting * @DPU_PERF_MODE_FIXED: performance bounded by fixed setting + * @DPU_PERF_MODE_MAX: maximum value, used for error checking */ enum dpu_perf_mode { DPU_PERF_MODE_NORMAL, @@ -31,9 +32,9 @@ enum dpu_perf_mode { }; /** - * @_dpu_core_perf_calc_bw() - to calculate BW per crtc - * @kms - pointer to the dpu_kms - * @crtc - pointer to a crtc + * _dpu_core_perf_calc_bw() - to calculate BW per crtc + * @kms: pointer to the dpu_kms + * @crtc: pointer to a crtc * Return: returns aggregated BW for all planes in crtc. */ static u64 _dpu_core_perf_calc_bw(struct dpu_kms *kms, @@ -63,9 +64,9 @@ static u64 _dpu_core_perf_calc_bw(struct dpu_kms *kms, /** * _dpu_core_perf_calc_clk() - to calculate clock per crtc - * @kms - pointer to the dpu_kms - * @crtc - pointer to a crtc - * @state - pointer to a crtc state + * @kms: pointer to the dpu_kms + * @crtc: pointer to a crtc + * @state: pointer to a crtc state * Return: returns max clk for all planes in crtc. */ static u64 _dpu_core_perf_calc_clk(struct dpu_kms *kms, @@ -110,14 +111,11 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms, struct drm_crtc_state *state, struct dpu_core_perf_params *perf) { - struct dpu_crtc_state *dpu_cstate; - if (!kms || !kms->catalog || !crtc || !state || !perf) { DPU_ERROR("invalid parameters\n"); return; } - dpu_cstate = to_dpu_crtc_state(state); memset(perf, 0, sizeof(struct dpu_core_perf_params)); if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) { @@ -219,9 +217,6 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, int i, ret = 0; u64 avg_bw; - if (!kms->num_paths) - return -EINVAL; - drm_for_each_crtc(tmp_crtc, crtc->dev) { if (tmp_crtc->enabled && curr_client_type == @@ -239,6 +234,9 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, } } + if (!kms->num_paths) + return 0; + avg_bw = perf.bw_ctl; do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/ @@ -249,8 +247,8 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, } /** - * @dpu_core_perf_crtc_release_bw() - request zero bandwidth - * @crtc - pointer to a crtc + * dpu_core_perf_crtc_release_bw() - request zero bandwidth + * @crtc: pointer to a crtc * * Function checks a state variable for the crtc, if all pending commit * requests are done, meaning no more bandwidth is needed, release diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index e55be2922c2f..56eb22554197 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -845,7 +845,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, } mode = &crtc_state->adjusted_mode; - DPU_DEBUG("%s: check", dpu_crtc->name); + DPU_DEBUG("%s: check\n", dpu_crtc->name); /* force a full mode set if active state changed */ if (crtc_state->active_changed) @@ -953,7 +953,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, } pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0; - DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos); + DPU_DEBUG("%s: zpos %d\n", dpu_crtc->name, z_pos); } for (i = 0; i < multirect_count; i++) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index f7f5c258b553..288e95ee8e1d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -132,9 +132,10 @@ enum dpu_enc_rc_states { * @phys_encs: Container of physical encoders managed. * @cur_master: Pointer to the current master in this mode. Optimization * Only valid after enable. Cleared as disable. - * @hw_pp Handle to the pingpong blocks used for the display. No. + * @cur_slave: As above but for the slave encoder. + * @hw_pp: Handle to the pingpong blocks used for the display. No. * pingpong blocks can be different than num_phys_encs. - * @intfs_swapped Whether or not the phys_enc interfaces have been swapped + * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped * for partial update right-only cases, such as pingpong * split where virtual pingpong does not generate IRQs * @crtc: Pointer to the currently assigned crtc. Normally you @@ -973,12 +974,11 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, struct drm_crtc *drm_crtc; struct dpu_crtc_state *cstate; struct dpu_global_state *global_state; - struct msm_display_topology topology; struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC]; struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL }; - int num_lm, num_ctl, num_pp, num_dspp; + int num_lm, num_ctl, num_pp; int i, j; if (!drm_enc) { @@ -1020,8 +1020,6 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, if (drm_crtc->state->encoder_mask & drm_encoder_mask(drm_enc)) break; - topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode); - /* Query resource that have been reserved in atomic check step. */ num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp, @@ -1030,7 +1028,7 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); - num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp, ARRAY_SIZE(hw_dspp)); @@ -1096,7 +1094,6 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc = NULL; - struct msm_drm_private *priv; int i; if (!drm_enc || !drm_enc->dev) { @@ -1104,8 +1101,6 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) return; } - priv = drm_enc->dev->dev_private; - dpu_enc = to_dpu_encoder_virt(drm_enc); if (!dpu_enc || !dpu_enc->cur_master) { DPU_ERROR("invalid dpu encoder/master\n"); @@ -1207,7 +1202,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc = NULL; struct msm_drm_private *priv; - struct dpu_kms *dpu_kms; int i = 0; if (!drm_enc) { @@ -1225,7 +1219,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) dpu_enc->enabled = false; priv = drm_enc->dev->dev_private; - dpu_kms = to_dpu_kms(priv->kms); trace_dpu_enc_disable(DRMID(drm_enc)); @@ -1444,9 +1437,9 @@ static void dpu_encoder_off_work(struct work_struct *work) /** * _dpu_encoder_trigger_flush - trigger flush for a physical encoder - * drm_enc: Pointer to drm encoder structure - * phys: Pointer to physical encoder structure - * extra_flush_bits: Additional bit mask to include in flush trigger + * @drm_enc: Pointer to drm encoder structure + * @phys: Pointer to physical encoder structure + * @extra_flush_bits: Additional bit mask to include in flush trigger */ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, struct dpu_encoder_phys *phys, uint32_t extra_flush_bits) @@ -1483,7 +1476,7 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, /** * _dpu_encoder_trigger_start - trigger start for a physical encoder - * phys: Pointer to physical encoder structure + * @phys: Pointer to physical encoder structure */ static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys) { @@ -1566,7 +1559,7 @@ static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc) * encoder rather than the individual physical ones in order to handle * use cases that require visibility into multiple physical encoders at * a time. - * dpu_enc: Pointer to virtual encoder structure + * @dpu_enc: Pointer to virtual encoder structure */ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index 8493d68ad841..5a056c1191df 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -437,7 +437,6 @@ static void dpu_encoder_phys_cmd_enable_helper( struct dpu_encoder_phys *phys_enc) { struct dpu_hw_ctl *ctl; - u32 flush_mask = 0; if (!phys_enc->hw_pp) { DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL); @@ -452,8 +451,7 @@ static void dpu_encoder_phys_cmd_enable_helper( return; ctl = phys_enc->hw_ctl; - ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx); - ctl->ops.update_pending_flush(ctl, flush_mask); + ctl->ops.update_pending_flush_intf(ctl, phys_enc->intf_idx); } static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index 805e059b50b7..9a69fad832cd 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -5,6 +5,7 @@ #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ #include "dpu_encoder_phys.h" #include "dpu_hw_interrupts.h" +#include "dpu_hw_merge3d.h" #include "dpu_core_irq.h" #include "dpu_formats.h" #include "dpu_trace.h" @@ -282,6 +283,8 @@ static void dpu_encoder_phys_vid_setup_timing_engine( intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID; intf_cfg.stream_sel = 0; /* Don't care value for video mode */ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); + if (phys_enc->hw_pp->merge_3d) + intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->id; spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf, @@ -295,6 +298,12 @@ static void dpu_encoder_phys_vid_setup_timing_engine( true, phys_enc->hw_pp->idx); + if (phys_enc->hw_pp->merge_3d) { + struct dpu_hw_merge_3d *merge_3d = to_dpu_hw_merge_3d(phys_enc->hw_pp->merge_3d); + + merge_3d->ops.setup_3d_mode(merge_3d, intf_cfg.mode_3d); + } + spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); programmable_fetch_config(phys_enc, &timing_params); @@ -429,8 +438,6 @@ end: static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc) { struct dpu_hw_ctl *ctl; - u32 flush_mask = 0; - u32 intf_flush_mask = 0; ctl = phys_enc->hw_ctl; @@ -452,20 +459,14 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc) !dpu_encoder_phys_vid_is_master(phys_enc)) goto skip_flush; - ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->hw_intf->idx); - ctl->ops.update_pending_flush(ctl, flush_mask); - - if (ctl->ops.get_bitmask_active_intf) - ctl->ops.get_bitmask_active_intf(ctl, &intf_flush_mask, - phys_enc->hw_intf->idx); - - if (ctl->ops.update_pending_intf_flush) - ctl->ops.update_pending_intf_flush(ctl, intf_flush_mask); + ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx); + if (ctl->ops.update_pending_flush_merge_3d && phys_enc->hw_pp->merge_3d) + ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->id); skip_flush: DPU_DEBUG_VIDENC(phys_enc, - "update pending flush ctl %d flush_mask 0%x intf_mask 0x%x\n", - ctl->idx - CTL_0, flush_mask, intf_flush_mask); + "update pending flush ctl %d intf %d\n", + ctl->idx - CTL_0, phys_enc->hw_intf->idx); /* ctl_flush & timing engine enable will be triggered by framework */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c index a05282dede91..21ff8f9e5dfd 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c @@ -22,7 +22,7 @@ #define DPU_MAX_IMG_WIDTH 0x3FFF #define DPU_MAX_IMG_HEIGHT 0x3FFF -/** +/* * DPU supported format packing, bpp, and other format * information. * DPU currently only supports interleaved RGB formats diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c index ca26666d2af9..819b26e660b9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c @@ -19,6 +19,7 @@ static LIST_HEAD(dpu_hw_blk_list); /** * dpu_hw_blk_init - initialize hw block object + * @hw_blk: pointer to hw block object * @type: hw block type - enum dpu_hw_blk_type * @id: instance id of the hw block * @ops: Pointer to block operations @@ -114,7 +115,6 @@ error_start: /** * dpu_hw_blk_put - put hw_blk to free pool if decremented refcount is zero * @hw_blk: hw block to be freed - * @free_blk: function to be called when reference count goes to zero */ void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 60b304b72b7c..90393fe9e59c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -8,7 +8,6 @@ #include <linux/platform_device.h> #include "dpu_hw_mdss.h" #include "dpu_hw_catalog.h" -#include "dpu_hw_catalog_format.h" #include "dpu_kms.h" #define VIG_MASK \ @@ -41,6 +40,8 @@ #define PINGPONG_SDM845_SPLIT_MASK \ (PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2)) +#define MERGE_3D_SM8150_MASK (0) + #define DSPP_SC7180_MASK BIT(DPU_DSPP_PCC) #define INTF_SDM845_MASK (0) @@ -60,6 +61,79 @@ #define STRCAT(X, Y) (X Y) +static const uint32_t plane_formats[] = { + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, +}; + +static const uint32_t plane_formats_yuv[] = { + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, + + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, + DRM_FORMAT_NV16, + DRM_FORMAT_NV61, + DRM_FORMAT_VYUY, + DRM_FORMAT_UYVY, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_YUV420, + DRM_FORMAT_YVU420, +}; + /************************************************************* * DPU sub blocks config *************************************************************/ @@ -111,7 +185,6 @@ static const struct dpu_caps sm8150_dpu_caps = { static const struct dpu_caps sm8250_dpu_caps = { .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_blendstages = 0xb, - .max_linewidth = 4096, .qseed_type = DPU_SSPP_SCALER_QSEED3, /* TODO: qseed3 lite */ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */ .ubwc_version = DPU_HW_UBWC_VER_40, @@ -433,9 +506,9 @@ static const struct dpu_lm_cfg sc7180_lm[] = { static const struct dpu_lm_cfg sm8150_lm[] = { LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK, - &sdm845_lm_sblk, PINGPONG_0, LM_1, 0), + &sdm845_lm_sblk, PINGPONG_0, LM_1, DSPP_0), LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK, - &sdm845_lm_sblk, PINGPONG_1, LM_0, 0), + &sdm845_lm_sblk, PINGPONG_1, LM_0, DSPP_1), LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK, &sdm845_lm_sblk, PINGPONG_2, LM_3, 0), LM_BLK("lm_3", LM_3, 0x47000, MIXER_SDM845_MASK, @@ -454,16 +527,28 @@ static const struct dpu_dspp_sub_blks sc7180_dspp_sblk = { .len = 0x90, .version = 0x10000}, }; -#define DSPP_BLK(_name, _id, _base) \ +static const struct dpu_dspp_sub_blks sm8150_dspp_sblk = { + .pcc = {.id = DPU_DSPP_PCC, .base = 0x1700, + .len = 0x90, .version = 0x40000}, +}; + +#define DSPP_BLK(_name, _id, _base, _sblk) \ {\ .name = _name, .id = _id, \ .base = _base, .len = 0x1800, \ .features = DSPP_SC7180_MASK, \ - .sblk = &sc7180_dspp_sblk \ + .sblk = _sblk \ } static const struct dpu_dspp_cfg sc7180_dspp[] = { - DSPP_BLK("dspp_0", DSPP_0, 0x54000), + DSPP_BLK("dspp_0", DSPP_0, 0x54000, &sc7180_dspp_sblk), +}; + +static const struct dpu_dspp_cfg sm8150_dspp[] = { + DSPP_BLK("dspp_0", DSPP_0, 0x54000, &sm8150_dspp_sblk), + DSPP_BLK("dspp_1", DSPP_1, 0x56000, &sm8150_dspp_sblk), + DSPP_BLK("dspp_2", DSPP_2, 0x58000, &sm8150_dspp_sblk), + DSPP_BLK("dspp_3", DSPP_3, 0x5a000, &sm8150_dspp_sblk), }; /************************************************************* @@ -481,40 +566,59 @@ static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = { .len = 0x20, .version = 0x10000}, }; -#define PP_BLK_TE(_name, _id, _base) \ +#define PP_BLK_TE(_name, _id, _base, _merge_3d) \ {\ .name = _name, .id = _id, \ .base = _base, .len = 0xd4, \ .features = PINGPONG_SDM845_SPLIT_MASK, \ + .merge_3d = _merge_3d, \ .sblk = &sdm845_pp_sblk_te \ } -#define PP_BLK(_name, _id, _base) \ +#define PP_BLK(_name, _id, _base, _merge_3d) \ {\ .name = _name, .id = _id, \ .base = _base, .len = 0xd4, \ .features = PINGPONG_SDM845_MASK, \ + .merge_3d = _merge_3d, \ .sblk = &sdm845_pp_sblk \ } static const struct dpu_pingpong_cfg sdm845_pp[] = { - PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000), - PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800), - PP_BLK("pingpong_2", PINGPONG_2, 0x71000), - PP_BLK("pingpong_3", PINGPONG_3, 0x71800), + PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0), + PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0), + PP_BLK("pingpong_2", PINGPONG_2, 0x71000, 0), + PP_BLK("pingpong_3", PINGPONG_3, 0x71800, 0), }; static struct dpu_pingpong_cfg sc7180_pp[] = { - PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000), - PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800), + PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0), + PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0), }; static const struct dpu_pingpong_cfg sm8150_pp[] = { - PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000), - PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800), - PP_BLK("pingpong_2", PINGPONG_2, 0x71000), - PP_BLK("pingpong_3", PINGPONG_3, 0x71800), - PP_BLK("pingpong_4", PINGPONG_4, 0x72000), - PP_BLK("pingpong_5", PINGPONG_5, 0x72800), + PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0), + PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0), + PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1), + PP_BLK("pingpong_3", PINGPONG_3, 0x71800, MERGE_3D_1), + PP_BLK("pingpong_4", PINGPONG_4, 0x72000, MERGE_3D_2), + PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2), +}; + +/************************************************************* + * MERGE_3D sub blocks config + *************************************************************/ +#define MERGE_3D_BLK(_name, _id, _base) \ + {\ + .name = _name, .id = _id, \ + .base = _base, .len = 0x100, \ + .features = MERGE_3D_SM8150_MASK, \ + .sblk = NULL \ + } + +static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = { + MERGE_3D_BLK("merge_3d_0", MERGE_3D_0, 0x83000), + MERGE_3D_BLK("merge_3d_1", MERGE_3D_1, 0x83100), + MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x83200), }; /************************************************************* @@ -836,8 +940,12 @@ static void sm8150_cfg_init(struct dpu_mdss_cfg *dpu_cfg) .sspp = sdm845_sspp, .mixer_count = ARRAY_SIZE(sm8150_lm), .mixer = sm8150_lm, + .dspp_count = ARRAY_SIZE(sm8150_dspp), + .dspp = sm8150_dspp, .pingpong_count = ARRAY_SIZE(sm8150_pp), .pingpong = sm8150_pp, + .merge_3d_count = ARRAY_SIZE(sm8150_merge_3d), + .merge_3d = sm8150_merge_3d, .intf_count = ARRAY_SIZE(sm8150_intf), .intf = sm8150_intf, .vbif_count = ARRAY_SIZE(sdm845_vbif), @@ -866,8 +974,12 @@ static void sm8250_cfg_init(struct dpu_mdss_cfg *dpu_cfg) .sspp = sdm845_sspp, .mixer_count = ARRAY_SIZE(sm8150_lm), .mixer = sm8150_lm, + .dspp_count = ARRAY_SIZE(sm8150_dspp), + .dspp = sm8150_dspp, .pingpong_count = ARRAY_SIZE(sm8150_pp), .pingpong = sm8150_pp, + .merge_3d_count = ARRAY_SIZE(sm8150_merge_3d), + .merge_3d = sm8150_merge_3d, .intf_count = ARRAY_SIZE(sm8150_intf), .intf = sm8150_intf, .vbif_count = ARRAY_SIZE(sdm845_vbif), diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index 3544af1a45c5..eaef99db2d2f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -524,10 +524,24 @@ struct dpu_dspp_cfg { */ struct dpu_pingpong_cfg { DPU_HW_BLK_INFO; + u32 merge_3d; const struct dpu_pingpong_sub_blks *sblk; }; /** + * struct dpu_merge_3d_cfg - information of DSPP blocks + * @id enum identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + * supported by this block + * @sblk sub-blocks information + */ +struct dpu_merge_3d_cfg { + DPU_HW_BLK_INFO; + const struct dpu_merge_3d_sub_blks *sblk; +}; + +/** * struct dpu_intf_cfg - information of timing engine blocks * @id enum identifying this block * @base register offset of this block @@ -724,6 +738,9 @@ struct dpu_mdss_cfg { u32 pingpong_count; const struct dpu_pingpong_cfg *pingpong; + u32 merge_3d_count; + const struct dpu_merge_3d_cfg *merge_3d; + u32 intf_count; const struct dpu_intf_cfg *intf; @@ -767,6 +784,7 @@ struct dpu_mdss_hw_cfg_handler { #define BLK_INTF(s) ((s)->intf) #define BLK_AD(s) ((s)->ad) #define BLK_DSPP(s) ((s)->dspp) +#define BLK_MERGE3d(s) ((s)->merge_3d) /** * dpu_hw_catalog_init - dpu hardware catalog init API retrieves diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h deleted file mode 100644 index 3766f0fd0bf0..000000000000 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h +++ /dev/null @@ -1,88 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. - */ - -#include "dpu_hw_mdss.h" - -static const uint32_t qcom_compressed_supported_formats[] = { - DRM_FORMAT_ABGR8888, - DRM_FORMAT_ARGB8888, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_BGR565, - - DRM_FORMAT_NV12, -}; - -static const uint32_t plane_formats[] = { - DRM_FORMAT_ARGB8888, - DRM_FORMAT_ABGR8888, - DRM_FORMAT_RGBA8888, - DRM_FORMAT_BGRA8888, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_RGBX8888, - DRM_FORMAT_BGRX8888, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_RGB888, - DRM_FORMAT_BGR888, - DRM_FORMAT_RGB565, - DRM_FORMAT_BGR565, - DRM_FORMAT_ARGB1555, - DRM_FORMAT_ABGR1555, - DRM_FORMAT_RGBA5551, - DRM_FORMAT_BGRA5551, - DRM_FORMAT_XRGB1555, - DRM_FORMAT_XBGR1555, - DRM_FORMAT_RGBX5551, - DRM_FORMAT_BGRX5551, - DRM_FORMAT_ARGB4444, - DRM_FORMAT_ABGR4444, - DRM_FORMAT_RGBA4444, - DRM_FORMAT_BGRA4444, - DRM_FORMAT_XRGB4444, - DRM_FORMAT_XBGR4444, - DRM_FORMAT_RGBX4444, - DRM_FORMAT_BGRX4444, -}; - -static const uint32_t plane_formats_yuv[] = { - DRM_FORMAT_ARGB8888, - DRM_FORMAT_ABGR8888, - DRM_FORMAT_RGBA8888, - DRM_FORMAT_BGRX8888, - DRM_FORMAT_BGRA8888, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_RGBX8888, - DRM_FORMAT_RGB888, - DRM_FORMAT_BGR888, - DRM_FORMAT_RGB565, - DRM_FORMAT_BGR565, - DRM_FORMAT_ARGB1555, - DRM_FORMAT_ABGR1555, - DRM_FORMAT_RGBA5551, - DRM_FORMAT_BGRA5551, - DRM_FORMAT_XRGB1555, - DRM_FORMAT_XBGR1555, - DRM_FORMAT_RGBX5551, - DRM_FORMAT_BGRX5551, - DRM_FORMAT_ARGB4444, - DRM_FORMAT_ABGR4444, - DRM_FORMAT_RGBA4444, - DRM_FORMAT_BGRA4444, - DRM_FORMAT_XRGB4444, - DRM_FORMAT_XBGR4444, - DRM_FORMAT_RGBX4444, - DRM_FORMAT_BGRX4444, - - DRM_FORMAT_NV12, - DRM_FORMAT_NV21, - DRM_FORMAT_NV16, - DRM_FORMAT_NV61, - DRM_FORMAT_VYUY, - DRM_FORMAT_UYVY, - DRM_FORMAT_YUYV, - DRM_FORMAT_YVYU, - DRM_FORMAT_YUV420, - DRM_FORMAT_YVU420, -}; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c index 758c355b4fd8..8981cfa9dbc3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c @@ -22,7 +22,9 @@ #define CTL_PREPARE 0x0d0 #define CTL_SW_RESET 0x030 #define CTL_LAYER_EXTN_OFFSET 0x40 +#define CTL_MERGE_3D_ACTIVE 0x0E4 #define CTL_INTF_ACTIVE 0x0F4 +#define CTL_MERGE_3D_FLUSH 0x100 #define CTL_INTF_FLUSH 0x110 #define CTL_INTF_MASTER 0x134 @@ -30,6 +32,7 @@ #define CTL_FLUSH_MASK_CTL BIT(17) #define DPU_REG_RESET_TIMEOUT_US 2000 +#define MERGE_3D_IDX 23 #define INTF_IDX 31 static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl, @@ -104,12 +107,6 @@ static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx, ctx->pending_flush_mask |= flushbits; } -static inline void dpu_hw_ctl_update_pending_intf_flush(struct dpu_hw_ctl *ctx, - u32 flushbits) -{ - ctx->pending_intf_flush_mask |= flushbits; -} - static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx) { return ctx->pending_flush_mask; @@ -118,6 +115,9 @@ static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx) static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx) { + if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX)) + DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH, + ctx->pending_merge_3d_flush_mask); if (ctx->pending_flush_mask & BIT(INTF_IDX)) DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH, ctx->pending_intf_flush_mask); @@ -220,40 +220,39 @@ static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx, return flushbits; } -static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx, - u32 *flushbits, enum dpu_intf intf) +static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx, + enum dpu_intf intf) { switch (intf) { case INTF_0: - *flushbits |= BIT(31); + ctx->pending_flush_mask |= BIT(31); break; case INTF_1: - *flushbits |= BIT(30); + ctx->pending_flush_mask |= BIT(30); break; case INTF_2: - *flushbits |= BIT(29); + ctx->pending_flush_mask |= BIT(29); break; case INTF_3: - *flushbits |= BIT(28); + ctx->pending_flush_mask |= BIT(28); break; default: - return -EINVAL; + break; } - return 0; } -static int dpu_hw_ctl_get_bitmask_intf_v1(struct dpu_hw_ctl *ctx, - u32 *flushbits, enum dpu_intf intf) +static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx, + enum dpu_intf intf) { - *flushbits |= BIT(31); - return 0; + ctx->pending_intf_flush_mask |= BIT(intf - INTF_0); + ctx->pending_flush_mask |= BIT(INTF_IDX); } -static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx, - u32 *flushbits, enum dpu_intf intf) +static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx, + enum dpu_merge_3d merge_3d) { - *flushbits |= BIT(intf - INTF_0); - return 0; + ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0); + ctx->pending_flush_mask |= BIT(MERGE_3D_IDX); } static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx, @@ -497,6 +496,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx, DPU_REG_WRITE(c, CTL_TOP, mode_sel); DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active); + DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, BIT(cfg->merge_3d - MERGE_3D_0)); } static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx, @@ -535,15 +535,15 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops, if (cap & BIT(DPU_CTL_ACTIVE_CFG)) { ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1; ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1; - ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf_v1; - ops->get_bitmask_active_intf = - dpu_hw_ctl_active_get_bitmask_intf; - ops->update_pending_intf_flush = - dpu_hw_ctl_update_pending_intf_flush; + ops->update_pending_flush_intf = + dpu_hw_ctl_update_pending_flush_intf_v1; + ops->update_pending_flush_merge_3d = + dpu_hw_ctl_update_pending_flush_merge_3d_v1; } else { ops->trigger_flush = dpu_hw_ctl_trigger_flush; ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg; - ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf; + ops->update_pending_flush_intf = + dpu_hw_ctl_update_pending_flush_intf; } ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush; ops->update_pending_flush = dpu_hw_ctl_update_pending_flush; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h index ec579b470a80..e93a42ab60b1 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h @@ -37,12 +37,14 @@ struct dpu_hw_stage_cfg { * struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface * @intf : Interface id * @mode_3d: 3d mux configuration + * @merge_3d: 3d merge block used * @intf_mode_sel: Interface mode, cmd / vid * @stream_sel: Stream selection for multi-stream interfaces */ struct dpu_hw_intf_cfg { enum dpu_intf intf; enum dpu_3d_blend_mode mode_3d; + enum dpu_merge_3d merge_3d; enum dpu_ctl_mode_sel intf_mode_sel; int stream_sel; }; @@ -91,13 +93,22 @@ struct dpu_hw_ctl_ops { u32 flushbits); /** - * OR in the given flushbits to the cached pending_intf_flush_mask + * OR in the given flushbits to the cached pending_(intf_)flush_mask * No effect on hardware * @ctx : ctl path ctx pointer - * @flushbits : module flushmask + * @blk : interface block index */ - void (*update_pending_intf_flush)(struct dpu_hw_ctl *ctx, - u32 flushbits); + void (*update_pending_flush_intf)(struct dpu_hw_ctl *ctx, + enum dpu_intf blk); + + /** + * OR in the given flushbits to the cached pending_(merge_3d_)flush_mask + * No effect on hardware + * @ctx : ctl path ctx pointer + * @blk : interface block index + */ + void (*update_pending_flush_merge_3d)(struct dpu_hw_ctl *ctx, + enum dpu_merge_3d blk); /** * Write the value of the pending_flush_mask to hardware @@ -143,23 +154,6 @@ struct dpu_hw_ctl_ops { enum dpu_dspp blk); /** - * Query the value of the intf flush mask - * No effect on hardware - * @ctx : ctl path ctx pointer - */ - int (*get_bitmask_intf)(struct dpu_hw_ctl *ctx, - u32 *flushbits, - enum dpu_intf blk); - - /** - * Query the value of the intf active flush mask - * No effect on hardware - * @ctx : ctl path ctx pointer - */ - int (*get_bitmask_active_intf)(struct dpu_hw_ctl *ctx, - u32 *flushbits, enum dpu_intf blk); - - /** * Set all blend stages to disabled * @ctx : ctl path ctx pointer */ @@ -198,6 +192,7 @@ struct dpu_hw_ctl { const struct dpu_lm_cfg *mixer_hw_caps; u32 pending_flush_mask; u32 pending_intf_flush_mask; + u32 pending_merge_3d_flush_mask; /* ops */ struct dpu_hw_ctl_ops ops; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c index a7a24539921f..e42f901a7de5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c @@ -57,8 +57,7 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx, static void _setup_dspp_ops(struct dpu_hw_dspp *c, unsigned long features) { - if (test_bit(DPU_DSPP_PCC, &features) && - IS_SC7180_TARGET(c->hw.hwversion)) + if (test_bit(DPU_DSPP_PCC, &features)) c->ops.setup_pcc = dpu_setup_dspp_pcc; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c index d84a84f7fe1a..5c521de71567 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c @@ -189,8 +189,8 @@ struct dpu_irq_type { u32 reg_idx; }; -/** - * List of DPU interrupt registers +/* + * struct dpu_intr_reg - List of DPU interrupt registers */ static const struct dpu_intr_reg dpu_intr_set[] = { { @@ -245,9 +245,10 @@ static const struct dpu_intr_reg dpu_intr_set[] = { } }; -/** - * IRQ mapping table - use for lookup an irq_idx in this table that have - * a matching interface type and instance index. +/* + * struct dpu_irq_type - IRQ mapping table use for lookup an irq_idx in this + * table that have a matching interface type and + * instance index. */ static const struct dpu_irq_type dpu_irq_map[] = { /* BEGIN MAP_RANGE: 0-31, INTR */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c index 4b8baf71423f..6ac0b5a0e057 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c @@ -48,7 +48,7 @@ static const struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer, /** * _stage_offset(): returns the relative offset of the blend registers * for the stage to be setup - * @c: mixer ctx contains the mixer to be programmed + * @ctx: mixer ctx contains the mixer to be programmed * @stage: stage index to setup */ static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h index 979fd2c60aa0..09a3fb3e89f5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h @@ -96,6 +96,7 @@ enum dpu_hw_blk_type { DPU_HW_BLK_INTF, DPU_HW_BLK_WB, DPU_HW_BLK_DSPP, + DPU_HW_BLK_MERGE_3D, DPU_HW_BLK_MAX, }; @@ -186,6 +187,13 @@ enum dpu_pingpong { PINGPONG_MAX }; +enum dpu_merge_3d { + MERGE_3D_0 = 1, + MERGE_3D_1, + MERGE_3D_2, + MERGE_3D_MAX +}; + enum dpu_intf { INTF_0 = 1, INTF_1, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c new file mode 100644 index 000000000000..720813e5a8ae --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#include <linux/iopoll.h> + +#include "dpu_hw_mdss.h" +#include "dpu_hwio.h" +#include "dpu_hw_catalog.h" +#include "dpu_hw_merge3d.h" +#include "dpu_kms.h" +#include "dpu_trace.h" + +#define MERGE_3D_MUX 0x000 +#define MERGE_3D_MODE 0x004 + +static const struct dpu_merge_3d_cfg *_merge_3d_offset(enum dpu_merge_3d idx, + const struct dpu_mdss_cfg *m, + void __iomem *addr, + struct dpu_hw_blk_reg_map *b) +{ + int i; + + for (i = 0; i < m->merge_3d_count; i++) { + if (idx == m->merge_3d[i].id) { + b->base_off = addr; + b->blk_off = m->merge_3d[i].base; + b->length = m->merge_3d[i].len; + b->hwversion = m->hwversion; + b->log_mask = DPU_DBG_MASK_PINGPONG; + return &m->merge_3d[i]; + } + } + + return ERR_PTR(-EINVAL); +} + +static void dpu_hw_merge_3d_setup_3d_mode(struct dpu_hw_merge_3d *merge_3d, + enum dpu_3d_blend_mode mode_3d) +{ + struct dpu_hw_blk_reg_map *c; + u32 data; + + + c = &merge_3d->hw; + if (mode_3d == BLEND_3D_NONE) { + DPU_REG_WRITE(c, MERGE_3D_MODE, 0); + DPU_REG_WRITE(c, MERGE_3D_MUX, 0); + } else { + data = BIT(0) | ((mode_3d - 1) << 1); + DPU_REG_WRITE(c, MERGE_3D_MODE, data); + } +} + +static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c, + unsigned long features) +{ + c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode; +}; + +static struct dpu_hw_blk_ops dpu_hw_ops; + +struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(enum dpu_merge_3d idx, + void __iomem *addr, + const struct dpu_mdss_cfg *m) +{ + struct dpu_hw_merge_3d *c; + const struct dpu_merge_3d_cfg *cfg; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _merge_3d_offset(idx, m, addr, &c->hw); + if (IS_ERR_OR_NULL(cfg)) { + kfree(c); + return ERR_PTR(-EINVAL); + } + + c->idx = idx; + c->caps = cfg; + _setup_merge_3d_ops(c, c->caps->features); + + dpu_hw_blk_init(&c->base, DPU_HW_BLK_MERGE_3D, idx, &dpu_hw_ops); + + return c; +} + +void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *hw) +{ + if (hw) + dpu_hw_blk_destroy(&hw->base); + kfree(hw); +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h new file mode 100644 index 000000000000..870bdb14613e --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_HW_MERGE3D_H +#define _DPU_HW_MERGE3D_H + +#include "dpu_hw_catalog.h" +#include "dpu_hw_mdss.h" +#include "dpu_hw_util.h" +#include "dpu_hw_blk.h" + +struct dpu_hw_merge_3d; + +/** + * + * struct dpu_hw_merge_3d_ops : Interface to the merge_3d Hw driver functions + * Assumption is these functions will be called after clocks are enabled + * @setup_3d_mode : enable 3D merge + */ +struct dpu_hw_merge_3d_ops { + void (*setup_3d_mode)(struct dpu_hw_merge_3d *merge_3d, + enum dpu_3d_blend_mode mode_3d); + +}; + +struct dpu_hw_merge_3d { + struct dpu_hw_blk base; + struct dpu_hw_blk_reg_map hw; + + /* merge_3d */ + enum dpu_merge_3d idx; + const struct dpu_merge_3d_cfg *caps; + + /* ops */ + struct dpu_hw_merge_3d_ops ops; +}; + +/** + * to_dpu_hw_merge_3d - convert base object dpu_hw_base to container + * @hw: Pointer to base hardware block + * return: Pointer to hardware block container + */ +static inline struct dpu_hw_merge_3d *to_dpu_hw_merge_3d(struct dpu_hw_blk *hw) +{ + return container_of(hw, struct dpu_hw_merge_3d, base); +} + +/** + * dpu_hw_merge_3d_init - initializes the merge_3d driver for the passed + * merge_3d idx. + * @idx: Pingpong index for which driver object is required + * @addr: Mapped register io address of MDP + * @m: Pointer to mdss catalog data + * Returns: Error code or allocated dpu_hw_merge_3d context + */ +struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(enum dpu_merge_3d idx, + void __iomem *addr, + const struct dpu_mdss_cfg *m); + +/** + * dpu_hw_merge_3d_destroy - destroys merge_3d driver context + * should be called to free the context + * @pp: Pointer to PP driver context returned by dpu_hw_merge_3d_init + */ +void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *pp); + +#endif /*_DPU_HW_MERGE3D_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h index 065996b3ece9..6902b9b95c8e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h @@ -119,6 +119,7 @@ struct dpu_hw_pingpong { /* pingpong */ enum dpu_pingpong idx; const struct dpu_pingpong_cfg *caps; + struct dpu_hw_blk *merge_3d; /* ops */ struct dpu_hw_pingpong_ops ops; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c index c940b69435e1..2c2ca5335aa8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c @@ -231,7 +231,7 @@ static void _sspp_setup_csc10_opmode(struct dpu_hw_pipe *ctx, DPU_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode); } -/** +/* * Setup source pixel format, flip, */ static void dpu_hw_sspp_setup_format(struct dpu_hw_pipe *ctx, @@ -437,7 +437,7 @@ static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_pipe *ctx) return dpu_hw_get_scaler3_ver(&ctx->hw, idx); } -/** +/* * dpu_hw_sspp_setup_rects() */ static void dpu_hw_sspp_setup_rects(struct dpu_hw_pipe *ctx, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index d93c44f6996d..374b0e8471e6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -718,6 +718,8 @@ static void dpu_kms_destroy(struct msm_kms *kms) dpu_kms = to_dpu_kms(kms); _dpu_kms_hw_destroy(dpu_kms); + + msm_kms_destroy(&dpu_kms->base); } static void _dpu_kms_set_encoder_mode(struct msm_kms *kms, @@ -1091,12 +1093,9 @@ static int dpu_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(dpu_kms->opp_table); /* OPP table is optional */ ret = dev_pm_opp_of_add_table(dev); - if (!ret) { - dpu_kms->has_opp_table = true; - } else if (ret != -ENODEV) { + if (ret && ret != -ENODEV) { dev_err(dev, "invalid OPP table in device tree\n"); - dev_pm_opp_put_clkname(dpu_kms->opp_table); - return ret; + goto put_clkname; } mp = &dpu_kms->mp; @@ -1108,7 +1107,11 @@ static int dpu_bind(struct device *dev, struct device *master, void *data) platform_set_drvdata(pdev, dpu_kms); - msm_kms_init(&dpu_kms->base, &kms_funcs); + ret = msm_kms_init(&dpu_kms->base, &kms_funcs); + if (ret) { + DPU_ERROR("failed to init kms, ret=%d\n", ret); + goto err; + } dpu_kms->dev = ddev; dpu_kms->pdev = pdev; @@ -1118,8 +1121,8 @@ static int dpu_bind(struct device *dev, struct device *master, void *data) priv->kms = &dpu_kms->base; return ret; err: - if (dpu_kms->has_opp_table) - dev_pm_opp_of_remove_table(dev); + dev_pm_opp_of_remove_table(dev); +put_clkname: dev_pm_opp_put_clkname(dpu_kms->opp_table); return ret; } @@ -1137,8 +1140,7 @@ static void dpu_unbind(struct device *dev, struct device *master, void *data) if (dpu_kms->rpm_enabled) pm_runtime_disable(&pdev->dev); - if (dpu_kms->has_opp_table) - dev_pm_opp_of_remove_table(dev); + dev_pm_opp_of_remove_table(dev); dev_pm_opp_put_clkname(dpu_kms->opp_table); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index 1c0e4c0c9ffb..d6717d6672f7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -131,7 +131,6 @@ struct dpu_kms { bool rpm_enabled; struct opp_table *opp_table; - bool has_opp_table; struct dss_module_power mp; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index 7ea90d25a3b6..bc0231a50132 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -19,7 +19,6 @@ #include "dpu_kms.h" #include "dpu_formats.h" #include "dpu_hw_sspp.h" -#include "dpu_hw_catalog_format.h" #include "dpu_trace.h" #include "dpu_crtc.h" #include "dpu_vbif.h" @@ -63,6 +62,16 @@ enum { #define DEFAULT_REFRESH_RATE 60 +static const uint32_t qcom_compressed_supported_formats[] = { + DRM_FORMAT_ABGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_BGR565, + + DRM_FORMAT_NV12, +}; + /** * enum dpu_plane_qos - Different qos configurations for each pipe * @@ -133,7 +142,8 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane) /** * _dpu_plane_calc_bw - calculate bandwidth required for a plane - * @Plane: Pointer to drm plane. + * @plane: Pointer to drm plane. + * @fb: Pointer to framebuffer associated with the given plane * Result: Updates calculated bandwidth in the plane state. * BW Equation: src_w * src_h * bpp * fps * (v_total / v_dest) * Prefill BW Equation: line src bytes * line_time @@ -151,7 +161,7 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane, u64 plane_bw; u32 hw_latency_lines; u64 scale_factor; - int vbp, vpw; + int vbp, vpw, vfp; pstate = to_dpu_plane_state(plane->state); mode = &plane->state->crtc->mode; @@ -164,6 +174,7 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane, fps = drm_mode_vrefresh(mode); vbp = mode->vtotal - mode->vsync_end; vpw = mode->vsync_end - mode->vsync_start; + vfp = mode->vsync_start - mode->vdisplay; hw_latency_lines = dpu_kms->catalog->perf.min_prefill_lines; scale_factor = src_height > dst_height ? mult_frac(src_height, 1, dst_height) : 1; @@ -176,14 +187,20 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane, src_width * hw_latency_lines * fps * fmt->bpp * scale_factor * mode->vtotal; - do_div(plane_prefill_bw, (vbp+vpw)); + if ((vbp+vpw) > hw_latency_lines) + do_div(plane_prefill_bw, (vbp+vpw)); + else if ((vbp+vpw+vfp) < hw_latency_lines) + do_div(plane_prefill_bw, (vbp+vpw+vfp)); + else + do_div(plane_prefill_bw, hw_latency_lines); + pstate->plane_fetch_bw = max(plane_bw, plane_prefill_bw); } /** * _dpu_plane_calc_clk - calculate clock required for a plane - * @Plane: Pointer to drm plane. + * @plane: Pointer to drm plane. * Result: Updates calculated clock in the plane state. * Clock equation: dst_w * v_total * fps * (src_h / dst_h) */ @@ -215,7 +232,7 @@ static void _dpu_plane_calc_clk(struct drm_plane *plane) * _dpu_plane_calc_fill_level - calculate fill level of the given source format * @plane: Pointer to drm plane * @fmt: Pointer to source buffer format - * @src_wdith: width of source buffer + * @src_width: width of source buffer * Return: fill level corresponding to the source buffer/format or 0 if error */ static int _dpu_plane_calc_fill_level(struct drm_plane *plane, @@ -937,6 +954,7 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, { int ret = 0, min_scale; struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_plane_state *pstate = to_dpu_plane_state(state); const struct drm_crtc_state *crtc_state = NULL; const struct dpu_format *fmt; struct drm_rect src, dst, fb_rect = { 0 }; @@ -1009,6 +1027,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, return -E2BIG; } + pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state); + return 0; } @@ -1046,6 +1066,7 @@ void dpu_plane_flush(struct drm_plane *plane) /** * dpu_plane_set_error: enable/disable error condition * @plane: pointer to drm_plane structure + * @error: error value to set */ void dpu_plane_set_error(struct drm_plane *plane, bool error) { @@ -1066,6 +1087,7 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) struct dpu_plane_state *pstate = to_dpu_plane_state(state); struct drm_crtc *crtc = state->crtc; struct drm_framebuffer *fb = state->fb; + bool is_rt_pipe, update_qos_remap; const struct dpu_format *fmt = to_dpu_format(msm_framebuffer_format(fb)); @@ -1075,7 +1097,7 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) pstate->pending = true; - pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT); + is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT); _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL); DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT @@ -1181,7 +1203,16 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) _dpu_plane_set_ot_limit(plane, crtc); } - _dpu_plane_set_qos_remap(plane); + update_qos_remap = (is_rt_pipe != pdpu->is_rt_pipe) || + pstate->needs_qos_remap; + + if (update_qos_remap) { + if (is_rt_pipe != pdpu->is_rt_pipe) + pdpu->is_rt_pipe = is_rt_pipe; + else if (pstate->needs_qos_remap) + pstate->needs_qos_remap = false; + _dpu_plane_set_qos_remap(plane); + } _dpu_plane_calc_bw(plane, fb); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h index ca83b8753d59..13a983fa8213 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h @@ -19,6 +19,7 @@ * @base: base drm plane state object * @aspace: pointer to address space for input/output buffers * @stage: assigned by crtc blender + * @needs_qos_remap: qos remap settings need to be updated * @multirect_index: index of the rectangle of SSPP * @multirect_mode: parallel or time multiplex multirect mode * @pending: whether the current update is still pending @@ -32,6 +33,7 @@ struct dpu_plane_state { struct drm_plane_state base; struct msm_gem_address_space *aspace; enum dpu_stage stage; + bool needs_qos_remap; uint32_t multirect_index; uint32_t multirect_mode; bool pending; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 9b2b5044e8e0..fd2d104f0a91 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -10,6 +10,7 @@ #include "dpu_hw_pingpong.h" #include "dpu_hw_intf.h" #include "dpu_hw_dspp.h" +#include "dpu_hw_merge3d.h" #include "dpu_encoder.h" #include "dpu_trace.h" @@ -42,6 +43,14 @@ int dpu_rm_destroy(struct dpu_rm *rm) dpu_hw_pingpong_destroy(hw); } } + for (i = 0; i < ARRAY_SIZE(rm->merge_3d_blks); i++) { + struct dpu_hw_merge_3d *hw; + + if (rm->merge_3d_blks[i]) { + hw = to_dpu_hw_merge_3d(rm->merge_3d_blks[i]); + dpu_hw_merge_3d_destroy(hw); + } + } for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) { struct dpu_hw_mixer *hw; @@ -119,6 +128,24 @@ int dpu_rm_init(struct dpu_rm *rm, } } + for (i = 0; i < cat->merge_3d_count; i++) { + struct dpu_hw_merge_3d *hw; + const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i]; + + if (merge_3d->id < MERGE_3D_0 || merge_3d->id >= MERGE_3D_MAX) { + DPU_ERROR("skip merge_3d %d with invalid id\n", merge_3d->id); + continue; + } + hw = dpu_hw_merge_3d_init(merge_3d->id, mmio, cat); + if (IS_ERR_OR_NULL(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed merge_3d object creation: err %d\n", + rc); + goto fail; + } + rm->merge_3d_blks[merge_3d->id - MERGE_3D_0] = &hw->base; + } + for (i = 0; i < cat->pingpong_count; i++) { struct dpu_hw_pingpong *hw; const struct dpu_pingpong_cfg *pp = &cat->pingpong[i]; @@ -134,6 +161,8 @@ int dpu_rm_init(struct dpu_rm *rm, rc); goto fail; } + if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX) + hw->merge_3d = rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0]; rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base; } @@ -210,7 +239,7 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) * @rm: dpu resource manager handle * @primary_idx: index of primary mixer in rm->mixer_blks[] * @peer_idx: index of other mixer in rm->mixer_blks[] - * @Return: true if rm->mixer_blks[peer_idx] is a peer of + * Return: true if rm->mixer_blks[peer_idx] is a peer of * rm->mixer_blks[primary_idx] */ static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx, @@ -235,6 +264,7 @@ static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx, * proposed use case requirements, incl. hardwired dependent blocks like * pingpong * @rm: dpu resource manager handle + * @global_state: resources shared across multiple kms objects * @enc_id: encoder id requesting for allocation * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks * if lm, and all other hardwired blocks connected to the lm (pp) is @@ -245,7 +275,7 @@ static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx, * mixer in rm->dspp_blks[]. * @reqs: input parameter, rm requirements for HW blocks needed in the * datapath. - * @Return: true if lm matches all requirements, false otherwise + * Return: true if lm matches all requirements, false otherwise */ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, struct dpu_global_state *global_state, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h index 08726bb1063a..1f12c8d5b8aa 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h @@ -29,6 +29,7 @@ struct dpu_rm { struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0]; struct dpu_hw_blk *intf_blks[INTF_MAX - INTF_0]; struct dpu_hw_blk *dspp_blks[DSPP_MAX - DSPP_0]; + struct dpu_hw_blk *merge_3d_blks[MERGE_3D_MAX - MERGE_3D_0]; uint32_t lm_max_width; }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c index 5e8c3f3e6625..7e08f40e7e6f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c @@ -140,7 +140,7 @@ exit: /** * dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters - * @vbif: Pointer to hardware vbif driver + * @dpu_kms: DPU handler * @params: Pointer to usecase parameters * * Note this function would block waiting for bus halt. diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c index 34e3186e236d..169f9de4a12a 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c @@ -11,6 +11,7 @@ #include <drm/drm_vblank.h> #include "mdp4_kms.h" +#include "msm_gem.h" struct mdp4_crtc { struct drm_crtc base; diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c index dbf8d429223e..3d729270bde1 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c @@ -175,6 +175,8 @@ static void mdp4_destroy(struct msm_kms *kms) if (mdp4_kms->rpm_enabled) pm_runtime_disable(dev); + mdp_kms_destroy(&mdp4_kms->base); + kfree(mdp4_kms); } @@ -427,7 +429,11 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) goto fail; } - mdp_kms_init(&mdp4_kms->base, &kms_funcs); + ret = mdp_kms_init(&mdp4_kms->base, &kms_funcs); + if (ret) { + DRM_DEV_ERROR(dev->dev, "failed to init kms\n"); + goto fail; + } kms = &mdp4_kms->base.base; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 4a53d7b42e9c..0c8f9f88301f 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -16,6 +16,7 @@ #include <drm/drm_vblank.h> #include "mdp5_kms.h" +#include "msm_gem.h" #define CURSOR_WIDTH 64 #define CURSOR_HEIGHT 64 @@ -577,9 +578,9 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, mdp5_crtc->enabled = true; } -int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc, - struct drm_crtc_state *new_crtc_state, - bool need_right_mixer) +static int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc, + struct drm_crtc_state *new_crtc_state, + bool need_right_mixer) { struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(new_crtc_state); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c index 030279d7b64b..81b0c7cf954e 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c @@ -216,7 +216,9 @@ static void send_start_signal(struct mdp5_ctl *ctl) /** * mdp5_ctl_set_encoder_state() - set the encoder state * - * @enable: true, when encoder is ready for data streaming; false, otherwise. + * @ctl: the CTL instance + * @pipeline: the encoder's INTF + MIXER configuration + * @enabled: true, when encoder is ready for data streaming; false, otherwise. * * Note: * This encoder state is needed to trigger START signal (data path kickoff). @@ -510,6 +512,13 @@ static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask, /** * mdp5_ctl_commit() - Register Flush * + * @ctl: the CTL instance + * @pipeline: the encoder's INTF + MIXER configuration + * @flush_mask: bitmask of display controller hw blocks to flush + * @start: if true, immediately update flush registers and set START + * bit, otherwise accumulate flush_mask bits until we are + * ready to START + * * The flush register is used to indicate several registers are all * programmed, and are safe to update to the back copy of the double * buffered registers. diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index e193865ce9a2..15aed45022bc 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -232,6 +232,8 @@ static void mdp5_kms_destroy(struct msm_kms *kms) aspace->mmu->funcs->detach(aspace->mmu); msm_gem_address_space_put(aspace); } + + mdp_kms_destroy(&mdp5_kms->base); } #ifdef CONFIG_DEBUG_FS @@ -294,7 +296,7 @@ static const struct mdp_kms_funcs kms_funcs = { .set_irqmask = mdp5_set_irqmask, }; -int mdp5_disable(struct mdp5_kms *mdp5_kms) +static int mdp5_disable(struct mdp5_kms *mdp5_kms) { DBG(""); @@ -314,7 +316,7 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms) return 0; } -int mdp5_enable(struct mdp5_kms *mdp5_kms) +static int mdp5_enable(struct mdp5_kms *mdp5_kms) { DBG(""); @@ -592,11 +594,14 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) return NULL; mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - - mdp_kms_init(&mdp5_kms->base, &kms_funcs); - pdev = mdp5_kms->pdev; + ret = mdp_kms_init(&mdp5_kms->base, &kms_funcs); + if (ret) { + DRM_DEV_ERROR(&pdev->dev, "failed to init kms\n"); + goto fail; + } + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); if (irq < 0) { ret = irq; diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.h b/drivers/gpu/drm/msm/disp/mdp_kms.h index 1535c5618491..b0286d5d5130 100644 --- a/drivers/gpu/drm/msm/disp/mdp_kms.h +++ b/drivers/gpu/drm/msm/disp/mdp_kms.h @@ -36,12 +36,17 @@ struct mdp_kms { }; #define to_mdp_kms(x) container_of(x, struct mdp_kms, base) -static inline void mdp_kms_init(struct mdp_kms *mdp_kms, +static inline int mdp_kms_init(struct mdp_kms *mdp_kms, const struct mdp_kms_funcs *funcs) { mdp_kms->funcs = funcs; INIT_LIST_HEAD(&mdp_kms->irq_list); - msm_kms_init(&mdp_kms->base, &funcs->base); + return msm_kms_init(&mdp_kms->base, &funcs->base); +} + +static inline void mdp_kms_destroy(struct mdp_kms *mdp_kms) +{ + msm_kms_destroy(&mdp_kms->base); } /* diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c index b15b4ce4ba35..44f0c57798d0 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -5,7 +5,6 @@ #define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ -#include <linux/rational.h> #include <linux/delay.h> #include <linux/iopoll.h> #include <linux/phy/phy.h> @@ -572,6 +571,19 @@ void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog) dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN); } +u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 status; + + status = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); + status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT; + status &= DP_DP_HPD_STATE_STATUS_BITS_MASK; + + return status; +} + u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog) { struct dp_catalog_private *catalog = container_of(dp_catalog, diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h index 4b7666f1fe6f..176a9020a520 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.h +++ b/drivers/gpu/drm/msm/dp/dp_catalog.h @@ -97,6 +97,7 @@ void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable); void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, u32 intr_mask, bool en); void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog); +u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog); u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog); void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog); int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog, u8 v_level, diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index 2e3e1917351f..e3462f5d96d7 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -10,6 +10,7 @@ #include <linux/delay.h> #include <linux/phy/phy.h> #include <linux/phy/phy-dp.h> +#include <linux/pm_opp.h> #include <drm/drm_fixed.h> #include <drm/drm_dp_helper.h> #include <drm/drm_print.h> @@ -76,6 +77,8 @@ struct dp_ctrl_private { struct dp_parser *parser; struct dp_catalog *catalog; + struct opp_table *opp_table; + struct completion idle_comp; struct completion video_comp; }; @@ -611,7 +614,7 @@ static void _tu_valid_boundary_calc(struct tu_algo_data *tu) static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in, struct dp_vc_tu_mapping_table *tu_table) { - struct tu_algo_data tu; + struct tu_algo_data *tu; int compare_result_1, compare_result_2; u64 temp = 0; s64 temp_fp = 0, temp1_fp = 0, temp2_fp = 0; @@ -626,298 +629,300 @@ static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in, uint EXTRA_PIXCLK_CYCLE_DELAY = 4; uint HBLANK_MARGIN = 4; - memset(&tu, 0, sizeof(tu)); + tu = kzalloc(sizeof(*tu), GFP_KERNEL); + if (!tu) + return - dp_panel_update_tu_timings(in, &tu); + dp_panel_update_tu_timings(in, tu); - tu.err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */ + tu->err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */ temp1_fp = drm_fixp_from_fraction(4, 1); - temp2_fp = drm_fixp_mul(temp1_fp, tu.lclk_fp); - temp_fp = drm_fixp_div(temp2_fp, tu.pclk_fp); - tu.extra_buffer_margin = drm_fixp2int_ceil(temp_fp); + temp2_fp = drm_fixp_mul(temp1_fp, tu->lclk_fp); + temp_fp = drm_fixp_div(temp2_fp, tu->pclk_fp); + tu->extra_buffer_margin = drm_fixp2int_ceil(temp_fp); - temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); - temp2_fp = drm_fixp_mul(tu.pclk_fp, temp1_fp); - temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1); + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp2_fp = drm_fixp_mul(tu->pclk_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1); temp2_fp = drm_fixp_div(temp2_fp, temp1_fp); - tu.ratio_fp = drm_fixp_div(temp2_fp, tu.lclk_fp); - - tu.original_ratio_fp = tu.ratio_fp; - tu.boundary_moderation_en = false; - tu.upper_boundary_count = 0; - tu.lower_boundary_count = 0; - tu.i_upper_boundary_count = 0; - tu.i_lower_boundary_count = 0; - tu.valid_lower_boundary_link = 0; - tu.even_distribution_BF = 0; - tu.even_distribution_legacy = 0; - tu.even_distribution = 0; - tu.delay_start_time_fp = 0; - - tu.err_fp = drm_fixp_from_fraction(1000, 1); - tu.n_err_fp = 0; - tu.n_n_err_fp = 0; - - tu.ratio = drm_fixp2int(tu.ratio_fp); - temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1); - div64_u64_rem(tu.lwidth_fp, temp1_fp, &temp2_fp); + tu->ratio_fp = drm_fixp_div(temp2_fp, tu->lclk_fp); + + tu->original_ratio_fp = tu->ratio_fp; + tu->boundary_moderation_en = false; + tu->upper_boundary_count = 0; + tu->lower_boundary_count = 0; + tu->i_upper_boundary_count = 0; + tu->i_lower_boundary_count = 0; + tu->valid_lower_boundary_link = 0; + tu->even_distribution_BF = 0; + tu->even_distribution_legacy = 0; + tu->even_distribution = 0; + tu->delay_start_time_fp = 0; + + tu->err_fp = drm_fixp_from_fraction(1000, 1); + tu->n_err_fp = 0; + tu->n_n_err_fp = 0; + + tu->ratio = drm_fixp2int(tu->ratio_fp); + temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1); + div64_u64_rem(tu->lwidth_fp, temp1_fp, &temp2_fp); if (temp2_fp != 0 && - !tu.ratio && tu.dsc_en == 0) { - tu.ratio_fp = drm_fixp_mul(tu.ratio_fp, RATIO_SCALE_fp); - tu.ratio = drm_fixp2int(tu.ratio_fp); - if (tu.ratio) - tu.ratio_fp = drm_fixp_from_fraction(1, 1); + !tu->ratio && tu->dsc_en == 0) { + tu->ratio_fp = drm_fixp_mul(tu->ratio_fp, RATIO_SCALE_fp); + tu->ratio = drm_fixp2int(tu->ratio_fp); + if (tu->ratio) + tu->ratio_fp = drm_fixp_from_fraction(1, 1); } - if (tu.ratio > 1) - tu.ratio = 1; + if (tu->ratio > 1) + tu->ratio = 1; - if (tu.ratio == 1) + if (tu->ratio == 1) goto tu_size_calc; - compare_result_1 = _tu_param_compare(tu.ratio_fp, const_p49_fp); + compare_result_1 = _tu_param_compare(tu->ratio_fp, const_p49_fp); if (!compare_result_1 || compare_result_1 == 1) compare_result_1 = 1; else compare_result_1 = 0; - compare_result_2 = _tu_param_compare(tu.ratio_fp, const_p56_fp); + compare_result_2 = _tu_param_compare(tu->ratio_fp, const_p56_fp); if (!compare_result_2 || compare_result_2 == 2) compare_result_2 = 1; else compare_result_2 = 0; - if (tu.dsc_en && compare_result_1 && compare_result_2) { + if (tu->dsc_en && compare_result_1 && compare_result_2) { HBLANK_MARGIN += 4; DRM_DEBUG_DP("Info: increase HBLANK_MARGIN to %d\n", HBLANK_MARGIN); } tu_size_calc: - for (tu.tu_size = 32; tu.tu_size <= 64; tu.tu_size++) { - temp1_fp = drm_fixp_from_fraction(tu.tu_size, 1); - temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp); + for (tu->tu_size = 32; tu->tu_size <= 64; tu->tu_size++) { + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); temp = drm_fixp2int_ceil(temp2_fp); temp1_fp = drm_fixp_from_fraction(temp, 1); - tu.n_err_fp = temp1_fp - temp2_fp; + tu->n_err_fp = temp1_fp - temp2_fp; - if (tu.n_err_fp < tu.err_fp) { - tu.err_fp = tu.n_err_fp; - tu.tu_size_desired = tu.tu_size; + if (tu->n_err_fp < tu->err_fp) { + tu->err_fp = tu->n_err_fp; + tu->tu_size_desired = tu->tu_size; } } - tu.tu_size_minus1 = tu.tu_size_desired - 1; + tu->tu_size_minus1 = tu->tu_size_desired - 1; - temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1); - temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp); - tu.valid_boundary_link = drm_fixp2int_ceil(temp2_fp); + temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1); + temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); + tu->valid_boundary_link = drm_fixp2int_ceil(temp2_fp); - temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); - temp2_fp = tu.lwidth_fp; + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp2_fp = tu->lwidth_fp; temp2_fp = drm_fixp_mul(temp2_fp, temp1_fp); - temp1_fp = drm_fixp_from_fraction(tu.valid_boundary_link, 1); + temp1_fp = drm_fixp_from_fraction(tu->valid_boundary_link, 1); temp2_fp = drm_fixp_div(temp2_fp, temp1_fp); - tu.n_tus = drm_fixp2int(temp2_fp); + tu->n_tus = drm_fixp2int(temp2_fp); if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000) - tu.n_tus += 1; + tu->n_tus += 1; - tu.even_distribution_legacy = tu.n_tus % tu.nlanes == 0 ? 1 : 0; + tu->even_distribution_legacy = tu->n_tus % tu->nlanes == 0 ? 1 : 0; DRM_DEBUG_DP("Info: n_sym = %d, num_of_tus = %d\n", - tu.valid_boundary_link, tu.n_tus); + tu->valid_boundary_link, tu->n_tus); - temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1); - temp2_fp = drm_fixp_mul(tu.original_ratio_fp, temp1_fp); - temp1_fp = drm_fixp_from_fraction(tu.valid_boundary_link, 1); + temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1); + temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu->valid_boundary_link, 1); temp2_fp = temp1_fp - temp2_fp; - temp1_fp = drm_fixp_from_fraction(tu.n_tus + 1, 1); + temp1_fp = drm_fixp_from_fraction(tu->n_tus + 1, 1); temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp); temp = drm_fixp2int(temp2_fp); if (temp && temp2_fp) - tu.extra_bytes = drm_fixp2int_ceil(temp2_fp); + tu->extra_bytes = drm_fixp2int_ceil(temp2_fp); else - tu.extra_bytes = 0; + tu->extra_bytes = 0; - temp1_fp = drm_fixp_from_fraction(tu.extra_bytes, 1); - temp2_fp = drm_fixp_from_fraction(8, tu.bpp); + temp1_fp = drm_fixp_from_fraction(tu->extra_bytes, 1); + temp2_fp = drm_fixp_from_fraction(8, tu->bpp); temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp); if (temp && temp1_fp) - tu.extra_pclk_cycles = drm_fixp2int_ceil(temp1_fp); + tu->extra_pclk_cycles = drm_fixp2int_ceil(temp1_fp); else - tu.extra_pclk_cycles = drm_fixp2int(temp1_fp); + tu->extra_pclk_cycles = drm_fixp2int(temp1_fp); - temp1_fp = drm_fixp_div(tu.lclk_fp, tu.pclk_fp); - temp2_fp = drm_fixp_from_fraction(tu.extra_pclk_cycles, 1); + temp1_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp); + temp2_fp = drm_fixp_from_fraction(tu->extra_pclk_cycles, 1); temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); if (temp1_fp) - tu.extra_pclk_cycles_in_link_clk = drm_fixp2int_ceil(temp1_fp); + tu->extra_pclk_cycles_in_link_clk = drm_fixp2int_ceil(temp1_fp); else - tu.extra_pclk_cycles_in_link_clk = drm_fixp2int(temp1_fp); + tu->extra_pclk_cycles_in_link_clk = drm_fixp2int(temp1_fp); - tu.filler_size = tu.tu_size_desired - tu.valid_boundary_link; + tu->filler_size = tu->tu_size_desired - tu->valid_boundary_link; - temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1); - tu.ratio_by_tu_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1); + tu->ratio_by_tu_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); - tu.delay_start_link = tu.extra_pclk_cycles_in_link_clk + - tu.filler_size + tu.extra_buffer_margin; + tu->delay_start_link = tu->extra_pclk_cycles_in_link_clk + + tu->filler_size + tu->extra_buffer_margin; - tu.resulting_valid_fp = - drm_fixp_from_fraction(tu.valid_boundary_link, 1); + tu->resulting_valid_fp = + drm_fixp_from_fraction(tu->valid_boundary_link, 1); - temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1); - temp2_fp = drm_fixp_div(tu.resulting_valid_fp, temp1_fp); - tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp; + temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1); + temp2_fp = drm_fixp_div(tu->resulting_valid_fp, temp1_fp); + tu->TU_ratio_err_fp = temp2_fp - tu->original_ratio_fp; temp1_fp = drm_fixp_from_fraction(HBLANK_MARGIN, 1); - temp1_fp = tu.hbp_relative_to_pclk_fp - temp1_fp; - tu.hbp_time_fp = drm_fixp_div(temp1_fp, tu.pclk_fp); + temp1_fp = tu->hbp_relative_to_pclk_fp - temp1_fp; + tu->hbp_time_fp = drm_fixp_div(temp1_fp, tu->pclk_fp); - temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1); - tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp); + temp1_fp = drm_fixp_from_fraction(tu->delay_start_link, 1); + tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp); - compare_result_1 = _tu_param_compare(tu.hbp_time_fp, - tu.delay_start_time_fp); + compare_result_1 = _tu_param_compare(tu->hbp_time_fp, + tu->delay_start_time_fp); if (compare_result_1 == 2) /* if (hbp_time_fp < delay_start_time_fp) */ - tu.min_hblank_violated = 1; + tu->min_hblank_violated = 1; - tu.hactive_time_fp = drm_fixp_div(tu.lwidth_fp, tu.pclk_fp); + tu->hactive_time_fp = drm_fixp_div(tu->lwidth_fp, tu->pclk_fp); - compare_result_2 = _tu_param_compare(tu.hactive_time_fp, - tu.delay_start_time_fp); + compare_result_2 = _tu_param_compare(tu->hactive_time_fp, + tu->delay_start_time_fp); if (compare_result_2 == 2) - tu.min_hblank_violated = 1; + tu->min_hblank_violated = 1; - tu.delay_start_time_fp = 0; + tu->delay_start_time_fp = 0; /* brute force */ - tu.delay_start_link_extra_pixclk = EXTRA_PIXCLK_CYCLE_DELAY; - tu.diff_abs_fp = tu.resulting_valid_fp - tu.ratio_by_tu_fp; + tu->delay_start_link_extra_pixclk = EXTRA_PIXCLK_CYCLE_DELAY; + tu->diff_abs_fp = tu->resulting_valid_fp - tu->ratio_by_tu_fp; - temp = drm_fixp2int(tu.diff_abs_fp); - if (!temp && tu.diff_abs_fp <= 0xffff) - tu.diff_abs_fp = 0; + temp = drm_fixp2int(tu->diff_abs_fp); + if (!temp && tu->diff_abs_fp <= 0xffff) + tu->diff_abs_fp = 0; /* if(diff_abs < 0) diff_abs *= -1 */ - if (tu.diff_abs_fp < 0) - tu.diff_abs_fp = drm_fixp_mul(tu.diff_abs_fp, -1); + if (tu->diff_abs_fp < 0) + tu->diff_abs_fp = drm_fixp_mul(tu->diff_abs_fp, -1); - tu.boundary_mod_lower_err = 0; - if ((tu.diff_abs_fp != 0 && - ((tu.diff_abs_fp > BRUTE_FORCE_THRESHOLD_fp) || - (tu.even_distribution_legacy == 0) || + tu->boundary_mod_lower_err = 0; + if ((tu->diff_abs_fp != 0 && + ((tu->diff_abs_fp > BRUTE_FORCE_THRESHOLD_fp) || + (tu->even_distribution_legacy == 0) || (DP_BRUTE_FORCE == 1))) || - (tu.min_hblank_violated == 1)) { + (tu->min_hblank_violated == 1)) { do { - tu.err_fp = drm_fixp_from_fraction(1000, 1); + tu->err_fp = drm_fixp_from_fraction(1000, 1); - temp1_fp = drm_fixp_div(tu.lclk_fp, tu.pclk_fp); + temp1_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp); temp2_fp = drm_fixp_from_fraction( - tu.delay_start_link_extra_pixclk, 1); + tu->delay_start_link_extra_pixclk, 1); temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); if (temp1_fp) - tu.extra_buffer_margin = + tu->extra_buffer_margin = drm_fixp2int_ceil(temp1_fp); else - tu.extra_buffer_margin = 0; + tu->extra_buffer_margin = 0; - temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); - temp1_fp = drm_fixp_mul(tu.lwidth_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp1_fp = drm_fixp_mul(tu->lwidth_fp, temp1_fp); if (temp1_fp) - tu.n_symbols = drm_fixp2int_ceil(temp1_fp); + tu->n_symbols = drm_fixp2int_ceil(temp1_fp); else - tu.n_symbols = 0; - - for (tu.tu_size = 32; tu.tu_size <= 64; tu.tu_size++) { - for (tu.i_upper_boundary_count = 1; - tu.i_upper_boundary_count <= 15; - tu.i_upper_boundary_count++) { - for (tu.i_lower_boundary_count = 1; - tu.i_lower_boundary_count <= 15; - tu.i_lower_boundary_count++) { - _tu_valid_boundary_calc(&tu); + tu->n_symbols = 0; + + for (tu->tu_size = 32; tu->tu_size <= 64; tu->tu_size++) { + for (tu->i_upper_boundary_count = 1; + tu->i_upper_boundary_count <= 15; + tu->i_upper_boundary_count++) { + for (tu->i_lower_boundary_count = 1; + tu->i_lower_boundary_count <= 15; + tu->i_lower_boundary_count++) { + _tu_valid_boundary_calc(tu); } } } - tu.delay_start_link_extra_pixclk--; - } while (tu.boundary_moderation_en != true && - tu.boundary_mod_lower_err == 1 && - tu.delay_start_link_extra_pixclk != 0); + tu->delay_start_link_extra_pixclk--; + } while (tu->boundary_moderation_en != true && + tu->boundary_mod_lower_err == 1 && + tu->delay_start_link_extra_pixclk != 0); - if (tu.boundary_moderation_en == true) { + if (tu->boundary_moderation_en == true) { temp1_fp = drm_fixp_from_fraction( - (tu.upper_boundary_count * - tu.valid_boundary_link + - tu.lower_boundary_count * - (tu.valid_boundary_link - 1)), 1); + (tu->upper_boundary_count * + tu->valid_boundary_link + + tu->lower_boundary_count * + (tu->valid_boundary_link - 1)), 1); temp2_fp = drm_fixp_from_fraction( - (tu.upper_boundary_count + - tu.lower_boundary_count), 1); - tu.resulting_valid_fp = + (tu->upper_boundary_count + + tu->lower_boundary_count), 1); + tu->resulting_valid_fp = drm_fixp_div(temp1_fp, temp2_fp); temp1_fp = drm_fixp_from_fraction( - tu.tu_size_desired, 1); - tu.ratio_by_tu_fp = - drm_fixp_mul(tu.original_ratio_fp, temp1_fp); + tu->tu_size_desired, 1); + tu->ratio_by_tu_fp = + drm_fixp_mul(tu->original_ratio_fp, temp1_fp); - tu.valid_lower_boundary_link = - tu.valid_boundary_link - 1; + tu->valid_lower_boundary_link = + tu->valid_boundary_link - 1; - temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); - temp1_fp = drm_fixp_mul(tu.lwidth_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp1_fp = drm_fixp_mul(tu->lwidth_fp, temp1_fp); temp2_fp = drm_fixp_div(temp1_fp, - tu.resulting_valid_fp); - tu.n_tus = drm_fixp2int(temp2_fp); + tu->resulting_valid_fp); + tu->n_tus = drm_fixp2int(temp2_fp); - tu.tu_size_minus1 = tu.tu_size_desired - 1; - tu.even_distribution_BF = 1; + tu->tu_size_minus1 = tu->tu_size_desired - 1; + tu->even_distribution_BF = 1; temp1_fp = - drm_fixp_from_fraction(tu.tu_size_desired, 1); + drm_fixp_from_fraction(tu->tu_size_desired, 1); temp2_fp = - drm_fixp_div(tu.resulting_valid_fp, temp1_fp); - tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp; + drm_fixp_div(tu->resulting_valid_fp, temp1_fp); + tu->TU_ratio_err_fp = temp2_fp - tu->original_ratio_fp; } } - temp2_fp = drm_fixp_mul(LCLK_FAST_SKEW_fp, tu.lwidth_fp); + temp2_fp = drm_fixp_mul(LCLK_FAST_SKEW_fp, tu->lwidth_fp); if (temp2_fp) temp = drm_fixp2int_ceil(temp2_fp); else temp = 0; - temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1); - temp2_fp = drm_fixp_mul(tu.original_ratio_fp, temp1_fp); - temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1); + temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); temp2_fp = drm_fixp_div(temp1_fp, temp2_fp); temp1_fp = drm_fixp_from_fraction(temp, 1); temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp); temp = drm_fixp2int(temp2_fp); - if (tu.async_en) - tu.delay_start_link += (int)temp; + if (tu->async_en) + tu->delay_start_link += (int)temp; - temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1); - tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp); + temp1_fp = drm_fixp_from_fraction(tu->delay_start_link, 1); + tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp); /* OUTPUTS */ - tu_table->valid_boundary_link = tu.valid_boundary_link; - tu_table->delay_start_link = tu.delay_start_link; - tu_table->boundary_moderation_en = tu.boundary_moderation_en; - tu_table->valid_lower_boundary_link = tu.valid_lower_boundary_link; - tu_table->upper_boundary_count = tu.upper_boundary_count; - tu_table->lower_boundary_count = tu.lower_boundary_count; - tu_table->tu_size_minus1 = tu.tu_size_minus1; + tu_table->valid_boundary_link = tu->valid_boundary_link; + tu_table->delay_start_link = tu->delay_start_link; + tu_table->boundary_moderation_en = tu->boundary_moderation_en; + tu_table->valid_lower_boundary_link = tu->valid_lower_boundary_link; + tu_table->upper_boundary_count = tu->upper_boundary_count; + tu_table->lower_boundary_count = tu->lower_boundary_count; + tu_table->tu_size_minus1 = tu->tu_size_minus1; DRM_DEBUG_DP("TU: valid_boundary_link: %d\n", tu_table->valid_boundary_link); @@ -932,6 +937,8 @@ tu_size_calc: DRM_DEBUG_DP("TU: lower_boundary_count: %d\n", tu_table->lower_boundary_count); DRM_DEBUG_DP("TU: tu_size_minus1: %d\n", tu_table->tu_size_minus1); + + kfree(tu); } static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl, @@ -1061,23 +1068,15 @@ static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl, static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl, u8 *link_status) { - int len = 0; - u32 const offset = DP_LANE_ALIGN_STATUS_UPDATED - DP_LANE0_1_STATUS; - u32 link_status_read_max_retries = 100; - - while (--link_status_read_max_retries) { - len = drm_dp_dpcd_read_link_status(ctrl->aux, - link_status); - if (len != DP_LINK_STATUS_SIZE) { - DRM_ERROR("DP link status read failed, err: %d\n", len); - return len; - } + int ret = 0, len; - if (!(link_status[offset] & DP_LINK_STATUS_UPDATED)) - return 0; + len = drm_dp_dpcd_read_link_status(ctrl->aux, link_status); + if (len != DP_LINK_STATUS_SIZE) { + DRM_ERROR("DP link status read failed, err: %d\n", len); + ret = -EINVAL; } - return -ETIMEDOUT; + return ret; } static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, @@ -1400,6 +1399,8 @@ int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip) void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl) { struct dp_ctrl_private *ctrl; + struct dp_io *dp_io; + struct phy *phy; if (!dp_ctrl) { DRM_ERROR("Invalid input data\n"); @@ -1407,8 +1408,11 @@ void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl) } ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + dp_io = &ctrl->parser->io; + phy = dp_io->phy; dp_catalog_ctrl_enable_irq(ctrl->catalog, false); + phy_exit(phy); DRM_DEBUG_DP("Host deinitialized successfully\n"); } @@ -1463,6 +1467,30 @@ static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl) return ret; } +static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl) +{ + struct dp_io *dp_io; + struct phy *phy; + int ret; + + dp_io = &ctrl->parser->io; + phy = dp_io->phy; + + dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + + dp_catalog_ctrl_reset(ctrl->catalog); + + ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false); + if (ret) { + DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret); + } + + phy_power_off(phy); + phy_exit(phy); + + return 0; +} + static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl) { int ret = 0; @@ -1643,11 +1671,7 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) if (rc) return rc; - ctrl->link->phy_params.p_level = 0; - ctrl->link->phy_params.v_level = 0; - - while (--link_train_max_retries && - !atomic_read(&ctrl->dp_ctrl.aborted)) { + while (--link_train_max_retries) { rc = dp_ctrl_reinitialize_mainlink(ctrl); if (rc) { DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", @@ -1662,6 +1686,10 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) break; } else if (training_step == DP_TRAINING_1) { /* link train_1 failed */ + if (!dp_catalog_link_is_connected(ctrl->catalog)) { + break; + } + rc = dp_ctrl_link_rate_down_shift(ctrl); if (rc < 0) { /* already in RBR = 1.6G */ if (cr.lane_0_1 & DP_LANE0_1_CR_DONE) { @@ -1681,6 +1709,10 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) } } else if (training_step == DP_TRAINING_2) { /* link train_2 failed, lower lane rate */ + if (!dp_catalog_link_is_connected(ctrl->catalog)) { + break; + } + rc = dp_ctrl_link_lane_down_shift(ctrl); if (rc < 0) { /* end with failure */ @@ -1701,6 +1733,11 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) */ if (rc == 0) /* link train successfully */ dp_ctrl_push_idle(dp_ctrl); + else { + /* link training failed */ + dp_ctrl_deinitialize_mainlink(ctrl); + rc = -ECONNRESET; + } return rc; } @@ -1836,6 +1873,7 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, struct dp_parser *parser) { struct dp_ctrl_private *ctrl; + int ret; if (!dev || !panel || !aux || !link || !catalog) { @@ -1849,6 +1887,21 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, return ERR_PTR(-ENOMEM); } + ctrl->opp_table = dev_pm_opp_set_clkname(dev, "ctrl_link"); + if (IS_ERR(ctrl->opp_table)) { + dev_err(dev, "invalid DP OPP table in device tree\n"); + /* caller do PTR_ERR(ctrl->opp_table) */ + return (struct dp_ctrl *)ctrl->opp_table; + } + + /* OPP table is optional */ + ret = dev_pm_opp_of_add_table(dev); + if (ret) { + dev_err(dev, "failed to add DP OPP table\n"); + dev_pm_opp_put_clkname(ctrl->opp_table); + ctrl->opp_table = NULL; + } + init_completion(&ctrl->idle_comp); init_completion(&ctrl->video_comp); @@ -1866,4 +1919,13 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, void dp_ctrl_put(struct dp_ctrl *dp_ctrl) { + struct dp_ctrl_private *ctrl; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + if (ctrl->opp_table) { + dev_pm_opp_of_remove_table(ctrl->dev); + dev_pm_opp_put_clkname(ctrl->opp_table); + ctrl->opp_table = NULL; + } } diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index e175aa3fd3a9..6e971d552911 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -45,7 +45,7 @@ enum { ST_CONNECT_PENDING, ST_CONNECTED, ST_DISCONNECT_PENDING, - ST_SUSPEND_PENDING, + ST_DISPLAY_OFF, ST_SUSPENDED, }; @@ -102,20 +102,20 @@ struct dp_display_private { struct dp_display_mode dp_mode; struct msm_dp dp_display; + bool encoder_mode_set; + /* wait for audio signaling */ struct completion audio_comp; /* event related only access by event thread */ struct mutex event_mutex; wait_queue_head_t event_q; - atomic_t hpd_state; + u32 hpd_state; u32 event_pndx; u32 event_gndx; struct dp_event event_list[DP_EVENT_Q_MAX]; spinlock_t event_lock; - struct completion resume_comp; - struct dp_audio *audio; }; @@ -281,13 +281,24 @@ static void dp_display_send_hpd_event(struct msm_dp *dp_display) drm_helper_hpd_irq_event(connector->dev); } -static int dp_display_send_hpd_notification(struct dp_display_private *dp, - bool hpd) + +static void dp_display_set_encoder_mode(struct dp_display_private *dp) { - static bool encoder_mode_set; struct msm_drm_private *priv = dp->dp_display.drm_dev->dev_private; struct msm_kms *kms = priv->kms; + if (!dp->encoder_mode_set && dp->dp_display.encoder && + kms->funcs->set_encoder_mode) { + kms->funcs->set_encoder_mode(kms, + dp->dp_display.encoder, false); + + dp->encoder_mode_set = true; + } +} + +static int dp_display_send_hpd_notification(struct dp_display_private *dp, + bool hpd) +{ if ((hpd && dp->dp_display.is_connected) || (!hpd && !dp->dp_display.is_connected)) { DRM_DEBUG_DP("HPD already %s\n", (hpd ? "on" : "off")); @@ -300,15 +311,6 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp, dp->dp_display.is_connected = hpd; - if (dp->dp_display.is_connected && dp->dp_display.encoder - && !encoder_mode_set - && kms->funcs->set_encoder_mode) { - kms->funcs->set_encoder_mode(kms, - dp->dp_display.encoder, false); - DRM_DEBUG_DP("set_encoder_mode() Completed\n"); - encoder_mode_set = true; - } - dp_display_send_hpd_event(&dp->dp_display); return 0; @@ -335,6 +337,7 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp) dp->dp_display.max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ; dp->dp_display.max_dp_lanes = dp->parser->max_dp_lanes; + dp_link_reset_phy_params_vx_px(dp->link); rc = dp_ctrl_on_link(dp->ctrl); if (rc) { DRM_ERROR("failed to complete DP link training\n"); @@ -343,7 +346,6 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp) dp_add_event(dp, EV_USER_NOTIFICATION, true, 0); - end: return rc; } @@ -360,12 +362,28 @@ static void dp_display_host_init(struct dp_display_private *dp) if (dp->usbpd->orientation == ORIENTATION_CC2) flip = true; + dp_display_set_encoder_mode(dp); + dp_power_init(dp->power, flip); dp_ctrl_host_init(dp->ctrl, flip); dp_aux_init(dp->aux); dp->core_initialized = true; } +static void dp_display_host_deinit(struct dp_display_private *dp) +{ + if (!dp->core_initialized) { + DRM_DEBUG_DP("DP core not initialized\n"); + return; + } + + dp_ctrl_host_deinit(dp->ctrl); + dp_aux_deinit(dp->aux); + dp_power_deinit(dp->power); + + dp->core_initialized = false; +} + static int dp_display_usbpd_configure_cb(struct device *dev) { int rc = 0; @@ -429,25 +447,42 @@ static void dp_display_handle_video_request(struct dp_display_private *dp) } } -static int dp_display_handle_irq_hpd(struct dp_display_private *dp) +static int dp_display_handle_port_ststus_changed(struct dp_display_private *dp) { - u32 sink_request; - - sink_request = dp->link->sink_request; + int rc = 0; - if (sink_request & DS_PORT_STATUS_CHANGED) { - dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); - if (dp_display_is_sink_count_zero(dp)) { - DRM_DEBUG_DP("sink count is zero, nothing to do\n"); - return 0; + if (dp_display_is_sink_count_zero(dp)) { + DRM_DEBUG_DP("sink count is zero, nothing to do\n"); + if (dp->hpd_state != ST_DISCONNECTED) { + dp->hpd_state = ST_DISCONNECT_PENDING; + dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); } + } else { + if (dp->hpd_state == ST_DISCONNECTED) { + dp->hpd_state = ST_CONNECT_PENDING; + rc = dp_display_process_hpd_high(dp); + if (rc) + dp->hpd_state = ST_DISCONNECTED; + } + } + + return rc; +} - return dp_display_process_hpd_high(dp); +static int dp_display_handle_irq_hpd(struct dp_display_private *dp) +{ + u32 sink_request = dp->link->sink_request; + + if (dp->hpd_state == ST_DISCONNECTED) { + if (sink_request & DP_LINK_STATUS_UPDATED) { + DRM_ERROR("Disconnected, no DP_LINK_STATUS_UPDATED\n"); + return -EINVAL; + } } dp_ctrl_handle_sink_request(dp->ctrl); - if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) + if (sink_request & DP_TEST_LINK_VIDEO_PATTERN) dp_display_handle_video_request(dp); return 0; @@ -456,7 +491,9 @@ static int dp_display_handle_irq_hpd(struct dp_display_private *dp) static int dp_display_usbpd_attention_cb(struct device *dev) { int rc = 0; + u32 sink_request; struct dp_display_private *dp; + struct dp_usbpd *hpd; if (!dev) { DRM_ERROR("invalid dev\n"); @@ -470,10 +507,17 @@ static int dp_display_usbpd_attention_cb(struct device *dev) return -ENODEV; } + hpd = dp->usbpd; + /* check for any test request issued by sink */ rc = dp_link_process_request(dp->link); - if (!rc) - dp_display_handle_irq_hpd(dp); + if (!rc) { + sink_request = dp->link->sink_request; + if (sink_request & DS_PORT_STATUS_CHANGED) + rc = dp_display_handle_port_ststus_changed(dp); + else + rc = dp_display_handle_irq_hpd(dp); + } return rc; } @@ -490,8 +534,8 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) mutex_lock(&dp->event_mutex); - state = atomic_read(&dp->hpd_state); - if (state == ST_SUSPEND_PENDING) { + state = dp->hpd_state; + if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) { mutex_unlock(&dp->event_mutex); return 0; } @@ -508,21 +552,23 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) return 0; } - if (state == ST_SUSPENDED) - tout = DP_TIMEOUT_NONE; - - atomic_set(&dp->hpd_state, ST_CONNECT_PENDING); + dp->hpd_state = ST_CONNECT_PENDING; hpd->hpd_high = 1; ret = dp_display_usbpd_configure_cb(&dp->pdev->dev); - if (ret) { /* failed */ + if (ret) { /* link train failed */ hpd->hpd_high = 0; - atomic_set(&dp->hpd_state, ST_DISCONNECTED); - } + dp->hpd_state = ST_DISCONNECTED; + + if (ret == -ECONNRESET) { /* cable unplugged */ + dp->core_initialized = false; + } - /* start sanity checking */ - dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout); + } else { + /* start sentinel checking in case of missing uevent */ + dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout); + } mutex_unlock(&dp->event_mutex); @@ -539,10 +585,10 @@ static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data) mutex_lock(&dp->event_mutex); - state = atomic_read(&dp->hpd_state); + state = dp->hpd_state; if (state == ST_CONNECT_PENDING) { dp_display_enable(dp, 0); - atomic_set(&dp->hpd_state, ST_CONNECTED); + dp->hpd_state = ST_CONNECTED; } mutex_unlock(&dp->event_mutex); @@ -553,7 +599,14 @@ static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data) static void dp_display_handle_plugged_change(struct msm_dp *dp_display, bool plugged) { - if (dp_display->plugged_cb && dp_display->codec_dev) + struct dp_display_private *dp; + + dp = container_of(dp_display, + struct dp_display_private, dp_display); + + /* notify audio subsystem only if sink supports audio */ + if (dp_display->plugged_cb && dp_display->codec_dev && + dp->audio_supported) dp_display->plugged_cb(dp_display->codec_dev, plugged); } @@ -567,12 +620,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) mutex_lock(&dp->event_mutex); - state = atomic_read(&dp->hpd_state); - if (state == ST_SUSPEND_PENDING) { - mutex_unlock(&dp->event_mutex); - return 0; - } - + state = dp->hpd_state; if (state == ST_DISCONNECT_PENDING || state == ST_DISCONNECTED) { mutex_unlock(&dp->event_mutex); return 0; @@ -585,7 +633,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) return 0; } - atomic_set(&dp->hpd_state, ST_DISCONNECT_PENDING); + dp->hpd_state = ST_DISCONNECT_PENDING; /* disable HPD plug interrupt until disconnect is done */ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK @@ -599,7 +647,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) */ dp_display_usbpd_disconnect_cb(&dp->pdev->dev); - /* start sanity checking */ + /* start sentinel checking in case of missing uevent */ dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND); /* signal the disconnect event early to ensure proper teardown */ @@ -620,10 +668,10 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data mutex_lock(&dp->event_mutex); - state = atomic_read(&dp->hpd_state); + state = dp->hpd_state; if (state == ST_DISCONNECT_PENDING) { dp_display_disable(dp, 0); - atomic_set(&dp->hpd_state, ST_DISCONNECTED); + dp->hpd_state = ST_DISCONNECTED; } mutex_unlock(&dp->event_mutex); @@ -634,17 +682,21 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) { u32 state; + int ret; mutex_lock(&dp->event_mutex); /* irq_hpd can happen at either connected or disconnected state */ - state = atomic_read(&dp->hpd_state); - if (state == ST_SUSPEND_PENDING) { + state = dp->hpd_state; + if (state == ST_DISPLAY_OFF) { mutex_unlock(&dp->event_mutex); return 0; } - dp_display_usbpd_attention_cb(&dp->pdev->dev); + ret = dp_display_usbpd_attention_cb(&dp->pdev->dev); + if (ret == -ECONNRESET) { /* cable unplugged */ + dp->core_initialized = false; + } mutex_unlock(&dp->event_mutex); @@ -698,7 +750,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp) goto error; } - dp->power = dp_power_get(dp->parser); + dp->power = dp_power_get(dev, dp->parser); if (IS_ERR(dp->power)) { rc = PTR_ERR(dp->power); DRM_ERROR("failed to initialize power, rc = %d\n", rc); @@ -798,8 +850,6 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data) if (!rc) dp_display->power_on = true; - /* complete resume_comp regardless it is armed or not */ - complete(&dp->resume_comp); return rc; } @@ -829,7 +879,7 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data) dp_display = g_dp_display; if (!dp_display->power_on) - return -EINVAL; + return 0; /* wait only if audio was enabled */ if (dp_display->audio_enabled) { @@ -1074,7 +1124,7 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id) } if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) { - /* delete connect pending event first */ + /* stop sentinel connect pending checking */ dp_del_event(dp, EV_CONNECT_PENDING_TIMEOUT); dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0); } @@ -1151,9 +1201,6 @@ static int dp_display_probe(struct platform_device *pdev) } mutex_init(&dp->event_mutex); - - init_completion(&dp->resume_comp); - g_dp_display = &dp->dp_display; /* Store DP audio handle inside DP display */ @@ -1189,20 +1236,54 @@ static int dp_display_remove(struct platform_device *pdev) static int dp_pm_resume(struct device *dev) { + struct platform_device *pdev = to_platform_device(dev); + struct msm_dp *dp_display = platform_get_drvdata(pdev); + struct dp_display_private *dp; + u32 status; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + mutex_lock(&dp->event_mutex); + + /* start from disconnected state */ + dp->hpd_state = ST_DISCONNECTED; + + /* turn on dp ctrl/phy */ + dp_display_host_init(dp); + + dp_catalog_ctrl_hpd_config(dp->catalog); + + status = dp_catalog_link_is_connected(dp->catalog); + + if (status) + dp->dp_display.is_connected = true; + else + dp->dp_display.is_connected = false; + + mutex_unlock(&dp->event_mutex); + return 0; } static int dp_pm_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); - struct dp_display_private *dp = platform_get_drvdata(pdev); + struct msm_dp *dp_display = platform_get_drvdata(pdev); + struct dp_display_private *dp; - if (!dp) { - DRM_ERROR("DP driver bind failed. Invalid driver data\n"); - return -EINVAL; - } + dp = container_of(dp_display, struct dp_display_private, dp_display); + + mutex_lock(&dp->event_mutex); + + if (dp->core_initialized == true) + dp_display_host_deinit(dp); + + dp->hpd_state = ST_SUSPENDED; + + /* host_init will be called at pm_resume */ + dp->core_initialized = false; - atomic_set(&dp->hpd_state, ST_SUSPENDED); + mutex_unlock(&dp->event_mutex); return 0; } @@ -1317,19 +1398,6 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, return 0; } -static int dp_display_wait4resume_done(struct dp_display_private *dp) -{ - int ret = 0; - - reinit_completion(&dp->resume_comp); - if (!wait_for_completion_timeout(&dp->resume_comp, - WAIT_FOR_RESUME_TIMEOUT_JIFFIES)) { - DRM_ERROR("wait4resume_done timedout\n"); - ret = -ETIMEDOUT; - } - return ret; -} - int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder) { int rc = 0; @@ -1344,6 +1412,9 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder) mutex_lock(&dp_display->event_mutex); + /* stop sentinel checking */ + dp_del_event(dp_display, EV_CONNECT_PENDING_TIMEOUT); + rc = dp_display_set_mode(dp, &dp_display->dp_mode); if (rc) { DRM_ERROR("Failed to perform a mode set, rc=%d\n", rc); @@ -1358,15 +1429,10 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder) return rc; } - state = atomic_read(&dp_display->hpd_state); - if (state == ST_SUSPENDED) { - /* start link training */ - dp_add_event(dp_display, EV_HPD_PLUG_INT, 0, 0); - mutex_unlock(&dp_display->event_mutex); + state = dp_display->hpd_state; - /* wait until dp interface is up */ - goto resume_done; - } + if (state == ST_DISPLAY_OFF) + dp_display_host_init(dp_display); dp_display_enable(dp_display, 0); @@ -1377,21 +1443,16 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder) dp_display_unprepare(dp); } - dp_del_event(dp_display, EV_CONNECT_PENDING_TIMEOUT); - - if (state == ST_SUSPEND_PENDING) + /* manual kick off plug event to train link */ + if (state == ST_DISPLAY_OFF) dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0); /* completed connection */ - atomic_set(&dp_display->hpd_state, ST_CONNECTED); + dp_display->hpd_state = ST_CONNECTED; mutex_unlock(&dp_display->event_mutex); return rc; - -resume_done: - dp_display_wait4resume_done(dp_display); - return rc; } int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder) @@ -1415,20 +1476,21 @@ int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder) mutex_lock(&dp_display->event_mutex); + /* stop sentinel checking */ + dp_del_event(dp_display, EV_DISCONNECT_PENDING_TIMEOUT); + dp_display_disable(dp_display, 0); rc = dp_display_unprepare(dp); if (rc) DRM_ERROR("DP display unprepare failed, rc=%d\n", rc); - dp_del_event(dp_display, EV_DISCONNECT_PENDING_TIMEOUT); - - state = atomic_read(&dp_display->hpd_state); + state = dp_display->hpd_state; if (state == ST_DISCONNECT_PENDING) { /* completed disconnection */ - atomic_set(&dp_display->hpd_state, ST_DISCONNECTED); + dp_display->hpd_state = ST_DISCONNECTED; } else { - atomic_set(&dp_display->hpd_state, ST_SUSPEND_PENDING); + dp_display->hpd_state = ST_DISPLAY_OFF; } mutex_unlock(&dp_display->event_mutex); diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c index c811da515fb3..be986da78c4a 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.c +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -773,7 +773,8 @@ static int dp_link_process_link_training_request(struct dp_link_private *link) link->request.test_lane_count); link->dp_link.link_params.num_lanes = link->request.test_lane_count; - link->dp_link.link_params.rate = link->request.test_link_rate; + link->dp_link.link_params.rate = + drm_dp_bw_code_to_link_rate(link->request.test_link_rate); return 0; } @@ -869,6 +870,9 @@ static int dp_link_parse_vx_px(struct dp_link_private *link) drm_dp_get_adjust_request_voltage(link->link_status, 0); link->dp_link.phy_params.p_level = drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0); + + link->dp_link.phy_params.p_level >>= DP_TRAIN_PRE_EMPHASIS_SHIFT; + DRM_DEBUG_DP("Requested: v_level = 0x%x, p_level = 0x%x\n", link->dp_link.phy_params.v_level, link->dp_link.phy_params.p_level); @@ -911,7 +915,8 @@ static int dp_link_process_phy_test_pattern_request( link->request.test_lane_count); link->dp_link.link_params.num_lanes = link->request.test_lane_count; - link->dp_link.link_params.rate = link->request.test_link_rate; + link->dp_link.link_params.rate = + drm_dp_bw_code_to_link_rate(link->request.test_link_rate); ret = dp_link_parse_vx_px(link); @@ -939,22 +944,20 @@ static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r) */ static int dp_link_process_link_status_update(struct dp_link_private *link) { - if (!(get_link_status(link->link_status, - DP_LANE_ALIGN_STATUS_UPDATED) & - DP_LINK_STATUS_UPDATED) || - (drm_dp_clock_recovery_ok(link->link_status, - link->dp_link.link_params.num_lanes) && - drm_dp_channel_eq_ok(link->link_status, - link->dp_link.link_params.num_lanes))) - return -EINVAL; + bool channel_eq_done = drm_dp_channel_eq_ok(link->link_status, + link->dp_link.link_params.num_lanes); - DRM_DEBUG_DP("channel_eq_done = %d, clock_recovery_done = %d\n", - drm_dp_clock_recovery_ok(link->link_status, - link->dp_link.link_params.num_lanes), - drm_dp_clock_recovery_ok(link->link_status, - link->dp_link.link_params.num_lanes)); + bool clock_recovery_done = drm_dp_clock_recovery_ok(link->link_status, + link->dp_link.link_params.num_lanes); - return 0; + DRM_DEBUG_DP("channel_eq_done = %d, clock_recovery_done = %d\n", + channel_eq_done, clock_recovery_done); + + if (channel_eq_done && clock_recovery_done) + return -EINVAL; + + + return 0; } /** @@ -1156,6 +1159,12 @@ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status) return 0; } +void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link) +{ + dp_link->phy_params.v_level = 0; + dp_link->phy_params.p_level = 0; +} + u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp) { u32 tbd; diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h index 49811b6221e5..9dd4dd926530 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.h +++ b/drivers/gpu/drm/msm/dp/dp_link.h @@ -135,6 +135,7 @@ static inline u32 dp_link_bit_depth_to_bpc(u32 tbd) } } +void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link); u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp); int dp_link_process_request(struct dp_link *dp_link); int dp_link_get_colorimetry_config(struct dp_link *dp_link); diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index 18cec4fc5e0b..97dca3e378b7 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -196,6 +196,11 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel, &panel->aux->ddc); if (!dp_panel->edid) { DRM_ERROR("panel edid read failed\n"); + /* check edid read fail is due to unplug */ + if (!dp_catalog_link_is_connected(panel->catalog)) { + rc = -ETIMEDOUT; + goto end; + } /* fail safe edid */ mutex_lock(&connector->dev->mode_config.mutex); diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c index 17c1fc6a2d44..9c4ea00a5f2a 100644 --- a/drivers/gpu/drm/msm/dp/dp_power.c +++ b/drivers/gpu/drm/msm/dp/dp_power.c @@ -8,12 +8,14 @@ #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/regulator/consumer.h> +#include <linux/pm_opp.h> #include "dp_power.h" #include "msm_drv.h" struct dp_power_private { struct dp_parser *parser; struct platform_device *pdev; + struct device *dev; struct clk *link_clk_src; struct clk *pixel_provider; struct clk *link_provider; @@ -148,18 +150,51 @@ static int dp_power_clk_deinit(struct dp_power_private *power) return 0; } +static int dp_power_clk_set_link_rate(struct dp_power_private *power, + struct dss_clk *clk_arry, int num_clk, int enable) +{ + u32 rate; + int i, rc = 0; + + for (i = 0; i < num_clk; i++) { + if (clk_arry[i].clk) { + if (clk_arry[i].type == DSS_CLK_PCLK) { + if (enable) + rate = clk_arry[i].rate; + else + rate = 0; + + rc = dev_pm_opp_set_rate(power->dev, rate); + if (rc) + break; + } + + } + } + return rc; +} + static int dp_power_clk_set_rate(struct dp_power_private *power, enum dp_pm_type module, bool enable) { int rc = 0; struct dss_module_power *mp = &power->parser->mp[module]; - if (enable) { - rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk); + if (module == DP_CTRL_PM) { + rc = dp_power_clk_set_link_rate(power, mp->clk_config, mp->num_clk, enable); if (rc) { - DRM_ERROR("failed to set clks rate.\n"); + DRM_ERROR("failed to set link clks rate\n"); return rc; } + } else { + + if (enable) { + rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk); + if (rc) { + DRM_ERROR("failed to set clks rate\n"); + return rc; + } + } } rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable); @@ -349,7 +384,7 @@ int dp_power_deinit(struct dp_power *dp_power) return 0; } -struct dp_power *dp_power_get(struct dp_parser *parser) +struct dp_power *dp_power_get(struct device *dev, struct dp_parser *parser) { struct dp_power_private *power; struct dp_power *dp_power; @@ -365,6 +400,7 @@ struct dp_power *dp_power_get(struct dp_parser *parser) power->parser = parser; power->pdev = parser->pdev; + power->dev = dev; dp_power = &power->dp_power; diff --git a/drivers/gpu/drm/msm/dp/dp_power.h b/drivers/gpu/drm/msm/dp/dp_power.h index 76743d755833..7d0327bbc0d5 100644 --- a/drivers/gpu/drm/msm/dp/dp_power.h +++ b/drivers/gpu/drm/msm/dp/dp_power.h @@ -102,6 +102,6 @@ void dp_power_client_deinit(struct dp_power *power); * methods to be called by the client to configure the power related * modueles. */ -struct dp_power *dp_power_get(struct dp_parser *parser); +struct dp_power *dp_power_get(struct device *dev, struct dp_parser *parser); #endif /* _DP_POWER_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h index 43042ff90a19..268602803d9a 100644 --- a/drivers/gpu/drm/msm/dp/dp_reg.h +++ b/drivers/gpu/drm/msm/dp/dp_reg.h @@ -32,6 +32,8 @@ #define DP_DP_IRQ_HPD_INT_ACK (0x00000002) #define DP_DP_HPD_REPLUG_INT_ACK (0x00000004) #define DP_DP_HPD_UNPLUG_INT_ACK (0x00000008) +#define DP_DP_HPD_STATE_STATUS_BITS_MASK (0x0000000F) +#define DP_DP_HPD_STATE_STATUS_BITS_SHIFT (0x1C) #define REG_DP_DP_HPD_INT_MASK (0x0000000C) #define DP_DP_HPD_PLUG_INT_MASK (0x00000001) diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index b17ac6c27554..ab281cba0f08 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -26,6 +26,7 @@ #include "sfpb.xml.h" #include "dsi_cfg.h" #include "msm_kms.h" +#include "msm_gem.h" #define DSI_RESET_TOGGLE_DELAY_MS 20 @@ -113,7 +114,6 @@ struct msm_dsi_host { struct clk *byte_intf_clk; struct opp_table *opp_table; - bool has_opp_table; u32 byte_clk_rate; u32 pixel_clk_rate; @@ -1657,7 +1657,7 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host, return ret; } -static struct mipi_dsi_host_ops dsi_host_ops = { +static const struct mipi_dsi_host_ops dsi_host_ops = { .attach = dsi_host_attach, .detach = dsi_host_detach, .transfer = dsi_host_transfer, @@ -1891,9 +1891,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) return PTR_ERR(msm_host->opp_table); /* OPP table is optional */ ret = dev_pm_opp_of_add_table(&pdev->dev); - if (!ret) { - msm_host->has_opp_table = true; - } else if (ret != -ENODEV) { + if (ret && ret != -ENODEV) { dev_err(&pdev->dev, "invalid OPP table in device tree\n"); dev_pm_opp_put_clkname(msm_host->opp_table); return ret; @@ -1934,8 +1932,7 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host) mutex_destroy(&msm_host->cmd_mutex); mutex_destroy(&msm_host->dev_mutex); - if (msm_host->has_opp_table) - dev_pm_opp_of_remove_table(&msm_host->pdev->dev); + dev_pm_opp_of_remove_table(&msm_host->pdev->dev); dev_pm_opp_put_clkname(msm_host->opp_table); pm_runtime_disable(&msm_host->pdev->dev); } diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c index 47403d4f2d28..d1b92d4dc197 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c @@ -192,6 +192,28 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy) { + void __iomem *base = phy->base; + u32 data; + + DBG(""); + + if (dsi_phy_hw_v3_0_is_pll_on(phy)) + pr_warn("Turning OFF PHY while PLL is on\n"); + + dsi_phy_hw_v3_0_config_lpcdrx(phy, false); + data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0); + + /* disable all lanes */ + data &= ~0x1F; + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0); + + /* Turn off all PHY blocks */ + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x00); + /* make sure phy is turned off */ + wmb(); + + DBG("DSI%d PHY disabled", phy->id); } static int dsi_10nm_phy_init(struct msm_dsi_phy *phy) diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index 255b5f5ab2ce..79c034ae075d 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -200,7 +200,28 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy) { - /* TODO */ + void __iomem *base = phy->base; + u32 data; + + DBG(""); + + if (dsi_phy_hw_v4_0_is_pll_on(phy)) + pr_warn("Turning OFF PHY while PLL is on\n"); + + dsi_phy_hw_v4_0_config_lpcdrx(phy, false); + data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_CTRL_0); + + /* disable all lanes */ + data &= ~0x1F; + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data); + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, 0); + + /* Turn off all PHY blocks */ + dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x00); + /* make sure phy is turned off */ + wmb(); + + DBG("DSI%d PHY disabled", phy->id); } static int dsi_7nm_phy_init(struct msm_dsi_phy *phy) diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c index 6ac04fc303f5..e4e9bf04b736 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c @@ -559,6 +559,7 @@ static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll) struct pll_10nm_cached_state *cached = &pll_10nm->cached_state; void __iomem *phy_base = pll_10nm->phy_cmn_mmio; u32 val; + int ret; val = pll_read(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE); val &= ~0x3; @@ -573,6 +574,13 @@ static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll) val |= cached->pll_mux; pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val); + ret = dsi_pll_10nm_vco_set_rate(&pll->clk_hw, pll_10nm->vco_current_rate, pll_10nm->vco_ref_clk_rate); + if (ret) { + DRM_DEV_ERROR(&pll_10nm->pdev->dev, + "restore vco rate failed. ret=%d\n", ret); + return ret; + } + DBG("DSI PLL%d", pll_10nm->id); return 0; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c index 6dffd7f4a99b..37a1f996a588 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c @@ -447,7 +447,10 @@ static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll) cached_state->postdiv1 = pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG); cached_state->byte_mux = pll_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG); - cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw); + if (dsi_pll_28nm_clk_is_enabled(&pll->clk_hw)) + cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw); + else + cached_state->vco_rate = 0; } static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll) diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c index de0dfb815125..93bf142e4a4e 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c @@ -585,6 +585,7 @@ static int dsi_pll_7nm_restore_state(struct msm_dsi_pll *pll) struct pll_7nm_cached_state *cached = &pll_7nm->cached_state; void __iomem *phy_base = pll_7nm->phy_cmn_mmio; u32 val; + int ret; val = pll_read(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE); val &= ~0x3; @@ -599,6 +600,13 @@ static int dsi_pll_7nm_restore_state(struct msm_dsi_pll *pll) val |= cached->pll_mux; pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, val); + ret = dsi_pll_7nm_vco_set_rate(&pll->clk_hw, pll_7nm->vco_current_rate, pll_7nm->vco_ref_clk_rate); + if (ret) { + DRM_DEV_ERROR(&pll_7nm->pdev->dev, + "restore vco rate failed. ret=%d\n", ret); + return ret; + } + DBG("DSI PLL%d", pll_7nm->id); return 0; diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 561bfa48841c..6a326761dc4a 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -55,16 +55,32 @@ static void vblank_put(struct msm_kms *kms, unsigned crtc_mask) } } +static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask) +{ + struct drm_crtc *crtc; + + for_each_crtc_mask(kms->dev, crtc, crtc_mask) + mutex_lock(&kms->commit_lock[drm_crtc_index(crtc)]); +} + +static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask) +{ + struct drm_crtc *crtc; + + for_each_crtc_mask_reverse(kms->dev, crtc, crtc_mask) + mutex_unlock(&kms->commit_lock[drm_crtc_index(crtc)]); +} + static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) { unsigned crtc_mask = BIT(crtc_idx); trace_msm_atomic_async_commit_start(crtc_mask); - mutex_lock(&kms->commit_lock); + lock_crtcs(kms, crtc_mask); if (!(kms->pending_crtc_mask & crtc_mask)) { - mutex_unlock(&kms->commit_lock); + unlock_crtcs(kms, crtc_mask); goto out; } @@ -79,7 +95,6 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) */ trace_msm_atomic_flush_commit(crtc_mask); kms->funcs->flush_commit(kms, crtc_mask); - mutex_unlock(&kms->commit_lock); /* * Wait for flush to complete: @@ -90,9 +105,8 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) vblank_put(kms, crtc_mask); - mutex_lock(&kms->commit_lock); kms->funcs->complete_commit(kms, crtc_mask); - mutex_unlock(&kms->commit_lock); + unlock_crtcs(kms, crtc_mask); kms->funcs->disable_commit(kms); out: @@ -103,14 +117,13 @@ static enum hrtimer_restart msm_atomic_pending_timer(struct hrtimer *t) { struct msm_pending_timer *timer = container_of(t, struct msm_pending_timer, timer); - struct msm_drm_private *priv = timer->kms->dev->dev_private; - queue_work(priv->wq, &timer->work); + kthread_queue_work(timer->worker, &timer->work); return HRTIMER_NORESTART; } -static void msm_atomic_pending_work(struct work_struct *work) +static void msm_atomic_pending_work(struct kthread_work *work) { struct msm_pending_timer *timer = container_of(work, struct msm_pending_timer, work); @@ -118,14 +131,30 @@ static void msm_atomic_pending_work(struct work_struct *work) msm_atomic_async_commit(timer->kms, timer->crtc_idx); } -void msm_atomic_init_pending_timer(struct msm_pending_timer *timer, +int msm_atomic_init_pending_timer(struct msm_pending_timer *timer, struct msm_kms *kms, int crtc_idx) { timer->kms = kms; timer->crtc_idx = crtc_idx; hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); timer->timer.function = msm_atomic_pending_timer; - INIT_WORK(&timer->work, msm_atomic_pending_work); + + timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx); + if (IS_ERR(timer->worker)) { + int ret = PTR_ERR(timer->worker); + timer->worker = NULL; + return ret; + } + sched_set_fifo(timer->worker->task); + kthread_init_work(&timer->work, msm_atomic_pending_work); + + return 0; +} + +void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer) +{ + if (timer->worker) + kthread_destroy_worker(timer->worker); } static bool can_do_async(struct drm_atomic_state *state, @@ -189,12 +218,11 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state) * Ensure any previous (potentially async) commit has * completed: */ + lock_crtcs(kms, crtc_mask); trace_msm_atomic_wait_flush_start(crtc_mask); kms->funcs->wait_flush(kms, crtc_mask); trace_msm_atomic_wait_flush_finish(crtc_mask); - mutex_lock(&kms->commit_lock); - /* * Now that there is no in-progress flush, prepare the * current update: @@ -232,8 +260,7 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state) } kms->funcs->disable_commit(kms); - mutex_unlock(&kms->commit_lock); - + unlock_crtcs(kms, crtc_mask); /* * At this point, from drm core's perspective, we * are done with the atomic update, so we can just @@ -260,8 +287,7 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state) */ trace_msm_atomic_flush_commit(crtc_mask); kms->funcs->flush_commit(kms, crtc_mask); - mutex_unlock(&kms->commit_lock); - + unlock_crtcs(kms, crtc_mask); /* * Wait for flush to complete: */ @@ -271,9 +297,9 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state) vblank_put(kms, crtc_mask); - mutex_lock(&kms->commit_lock); + lock_crtcs(kms, crtc_mask); kms->funcs->complete_commit(kms, crtc_mask); - mutex_unlock(&kms->commit_lock); + unlock_crtcs(kms, crtc_mask); kms->funcs->disable_commit(kms); drm_atomic_helper_commit_hw_done(state); diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index ee2e270f464c..85ad0babc326 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -112,6 +112,11 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m) { struct msm_drm_private *priv = dev->dev_private; struct msm_gpu *gpu = priv->gpu; + int ret; + + ret = mutex_lock_interruptible(&priv->mm_lock); + if (ret) + return ret; if (gpu) { seq_printf(m, "Active Objects (%s):\n", gpu->name); @@ -119,7 +124,10 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m) } seq_printf(m, "Inactive Objects:\n"); - msm_gem_describe_objects(&priv->inactive_list, m); + msm_gem_describe_objects(&priv->inactive_dontneed, m); + msm_gem_describe_objects(&priv->inactive_willneed, m); + + mutex_unlock(&priv->mm_lock); return 0; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index aa4509766d64..535a0263ceeb 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -7,6 +7,7 @@ #include <linux/dma-mapping.h> #include <linux/kthread.h> +#include <linux/sched/mm.h> #include <linux/uaccess.h> #include <uapi/linux/sched/types.h> @@ -120,8 +121,8 @@ struct clk *msm_clk_get(struct platform_device *pdev, const char *name) return clk; } -void __iomem *_msm_ioremap(struct platform_device *pdev, const char *name, - const char *dbgname, bool quiet) +static void __iomem *_msm_ioremap(struct platform_device *pdev, const char *name, + const char *dbgname, bool quiet) { struct resource *res; unsigned long size; @@ -180,6 +181,14 @@ u32 msm_readl(const void __iomem *addr) return val; } +void msm_rmw(void __iomem *addr, u32 mask, u32 or) +{ + u32 val = msm_readl(addr); + + val &= ~mask; + msm_writel(val | or, addr); +} + struct msm_vblank_work { struct work_struct work; int crtc_id; @@ -393,7 +402,7 @@ static int msm_init_vram(struct drm_device *dev) return ret; } -static int msm_drm_init(struct device *dev, struct drm_driver *drv) +static int msm_drm_init(struct device *dev, const struct drm_driver *drv) { struct platform_device *pdev = to_platform_device(dev); struct drm_device *ddev; @@ -437,10 +446,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) priv->wq = alloc_ordered_workqueue("msm", 0); - INIT_WORK(&priv->free_work, msm_gem_free_work); - init_llist_head(&priv->free_list); + INIT_LIST_HEAD(&priv->inactive_willneed); + INIT_LIST_HEAD(&priv->inactive_dontneed); + mutex_init(&priv->mm_lock); - INIT_LIST_HEAD(&priv->inactive_list); + /* Teach lockdep about lock ordering wrt. shrinker: */ + fs_reclaim_acquire(GFP_KERNEL); + might_lock(&priv->mm_lock); + fs_reclaim_release(GFP_KERNEL); drm_mode_config_init(ddev); @@ -908,14 +921,9 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data, return -EINVAL; } - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - obj = drm_gem_object_lookup(file, args->handle); if (!obj) { - ret = -ENOENT; - goto unlock; + return -ENOENT; } ret = msm_gem_madvise(obj, args->madv); @@ -924,10 +932,8 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data, ret = 0; } - drm_gem_object_put_locked(obj); + drm_gem_object_put(obj); -unlock: - mutex_unlock(&dev->struct_mutex); return ret; } @@ -984,7 +990,7 @@ static const struct file_operations fops = { .mmap = msm_gem_mmap, }; -static struct drm_driver msm_driver = { +static const struct drm_driver msm_driver = { .driver_features = DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC | diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index c45789f36e48..591c47a654e8 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -174,12 +174,21 @@ struct msm_drm_private { struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */ struct msm_perf_state *perf; - /* list of GEM objects: */ - struct list_head inactive_list; - - /* worker for delayed free of objects: */ - struct work_struct free_work; - struct llist_head free_list; + /* + * Lists of inactive GEM objects. Every bo is either in one of the + * inactive lists (depending on whether or not it is shrinkable) or + * gpu->active_list (for the gpu it is active on[1]) + * + * These lists are protected by mm_lock. If struct_mutex is involved, it + * should be aquired prior to mm_lock. One should *not* hold mm_lock in + * get_pages()/vmap()/etc paths, as they can trigger the shrinker. + * + * [1] if someone ever added support for the old 2d cores, there could be + * more than one gpu object + */ + struct list_head inactive_willneed; /* inactive + !shrinkable */ + struct list_head inactive_dontneed; /* inactive + shrinkable */ + struct mutex mm_lock; struct workqueue_struct *wq; @@ -228,8 +237,9 @@ struct msm_pending_timer; int msm_atomic_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state); -void msm_atomic_init_pending_timer(struct msm_pending_timer *timer, +int msm_atomic_init_pending_timer(struct msm_pending_timer *timer, struct msm_kms *kms, int crtc_idx); +void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer); void msm_atomic_commit_tail(struct drm_atomic_state *state); struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev); void msm_atomic_state_clear(struct drm_atomic_state *state); @@ -266,73 +276,20 @@ void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu); bool msm_use_mmu(struct drm_device *dev); -void msm_gem_submit_free(struct msm_gem_submit *submit); int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file); void msm_gem_shrinker_init(struct drm_device *dev); void msm_gem_shrinker_cleanup(struct drm_device *dev); -int msm_gem_mmap_obj(struct drm_gem_object *obj, - struct vm_area_struct *vma); -int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); -uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); -int msm_gem_get_iova(struct drm_gem_object *obj, - struct msm_gem_address_space *aspace, uint64_t *iova); -int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, - struct msm_gem_address_space *aspace, uint64_t *iova, - u64 range_start, u64 range_end); -int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, - struct msm_gem_address_space *aspace, uint64_t *iova); -uint64_t msm_gem_iova(struct drm_gem_object *obj, - struct msm_gem_address_space *aspace); -void msm_gem_unpin_iova(struct drm_gem_object *obj, - struct msm_gem_address_space *aspace); -struct page **msm_gem_get_pages(struct drm_gem_object *obj); -void msm_gem_put_pages(struct drm_gem_object *obj); -int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, - struct drm_mode_create_dumb *args); -int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, - uint32_t handle, uint64_t *offset); struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); -void *msm_gem_prime_vmap(struct drm_gem_object *obj); -void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); +void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); int msm_gem_prime_pin(struct drm_gem_object *obj); void msm_gem_prime_unpin(struct drm_gem_object *obj); -void *msm_gem_get_vaddr(struct drm_gem_object *obj); -void *msm_gem_get_vaddr_active(struct drm_gem_object *obj); -void msm_gem_put_vaddr(struct drm_gem_object *obj); -int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); -int msm_gem_sync_object(struct drm_gem_object *obj, - struct msm_fence_context *fctx, bool exclusive); -void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu); -void msm_gem_active_put(struct drm_gem_object *obj); -int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); -int msm_gem_cpu_fini(struct drm_gem_object *obj); -void msm_gem_free_object(struct drm_gem_object *obj); -int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, - uint32_t size, uint32_t flags, uint32_t *handle, char *name); -struct drm_gem_object *msm_gem_new(struct drm_device *dev, - uint32_t size, uint32_t flags); -struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, - uint32_t size, uint32_t flags); -void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, - uint32_t flags, struct msm_gem_address_space *aspace, - struct drm_gem_object **bo, uint64_t *iova); -void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, - uint32_t flags, struct msm_gem_address_space *aspace, - struct drm_gem_object **bo, uint64_t *iova); -void msm_gem_kernel_put(struct drm_gem_object *bo, - struct msm_gem_address_space *aspace, bool locked); -struct drm_gem_object *msm_gem_import(struct drm_device *dev, - struct dma_buf *dmabuf, struct sg_table *sgt); -void msm_gem_free_work(struct work_struct *work); - -__printf(2, 3) -void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...); int msm_framebuffer_prepare(struct drm_framebuffer *fb, struct msm_gem_address_space *aspace); @@ -422,6 +379,11 @@ static inline int msm_dp_display_disable(struct msm_dp *dp, { return -EINVAL; } +static inline int msm_dp_display_pre_disable(struct msm_dp *dp, + struct drm_encoder *encoder) +{ + return -EINVAL; +} static inline void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder, struct drm_display_mode *mode, @@ -446,8 +408,6 @@ void __init msm_dpu_register(void); void __exit msm_dpu_unregister(void); #ifdef CONFIG_DEBUG_FS -void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m); -void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m); int msm_debugfs_late_init(struct drm_device *dev); int msm_rd_debugfs_init(struct drm_minor *minor); @@ -477,6 +437,7 @@ void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name, const char *dbgname); void msm_writel(u32 data, void __iomem *addr); u32 msm_readl(const void __iomem *addr); +void msm_rmw(void __iomem *addr, u32 mask, u32 or); struct msm_gpu_submitqueue; int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx); diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 47235f8c5922..678dba1725a6 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -9,6 +9,7 @@ #include <drm/drm_fourcc.h> #include "msm_drv.h" +#include "msm_gem.h" #include "msm_kms.h" extern int msm_gem_mmap_obj(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 2e1bce7c0b19..82cbaf337b50 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -18,8 +18,7 @@ #include "msm_gpu.h" #include "msm_mmu.h" -static void msm_gem_vunmap_locked(struct drm_gem_object *obj); - +static void update_inactive(struct msm_gem_object *msm_obj); static dma_addr_t physaddr(struct drm_gem_object *obj) { @@ -177,15 +176,15 @@ struct page **msm_gem_get_pages(struct drm_gem_object *obj) struct msm_gem_object *msm_obj = to_msm_bo(obj); struct page **p; - mutex_lock(&msm_obj->lock); + msm_gem_lock(obj); if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); return ERR_PTR(-EBUSY); } p = get_pages(obj); - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); return p; } @@ -251,14 +250,14 @@ static vm_fault_t msm_gem_fault(struct vm_fault *vmf) * vm_ops.open/drm_gem_mmap_obj and close get and put * a reference on obj. So, we dont need to hold one here. */ - err = mutex_lock_interruptible(&msm_obj->lock); + err = msm_gem_lock_interruptible(obj); if (err) { ret = VM_FAULT_NOPAGE; goto out; } if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); return VM_FAULT_SIGBUS; } @@ -279,7 +278,7 @@ static vm_fault_t msm_gem_fault(struct vm_fault *vmf) ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); out_unlock: - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); out: return ret; } @@ -288,10 +287,9 @@ out: static uint64_t mmap_offset(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret; - WARN_ON(!mutex_is_locked(&msm_obj->lock)); + WARN_ON(!msm_gem_is_locked(obj)); /* Make it mmapable */ ret = drm_gem_create_mmap_offset(obj); @@ -307,11 +305,10 @@ static uint64_t mmap_offset(struct drm_gem_object *obj) uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) { uint64_t offset; - struct msm_gem_object *msm_obj = to_msm_bo(obj); - mutex_lock(&msm_obj->lock); + msm_gem_lock(obj); offset = mmap_offset(obj); - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); return offset; } @@ -321,7 +318,7 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; - WARN_ON(!mutex_is_locked(&msm_obj->lock)); + WARN_ON(!msm_gem_is_locked(obj)); vma = kzalloc(sizeof(*vma), GFP_KERNEL); if (!vma) @@ -340,7 +337,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; - WARN_ON(!mutex_is_locked(&msm_obj->lock)); + WARN_ON(!msm_gem_is_locked(obj)); list_for_each_entry(vma, &msm_obj->vmas, list) { if (vma->aspace == aspace) @@ -359,33 +356,45 @@ static void del_vma(struct msm_gem_vma *vma) kfree(vma); } -/* Called with msm_obj->lock locked */ +/* Called with msm_obj locked */ static void -put_iova(struct drm_gem_object *obj) +put_iova_spaces(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - struct msm_gem_vma *vma, *tmp; + struct msm_gem_vma *vma; - WARN_ON(!mutex_is_locked(&msm_obj->lock)); + WARN_ON(!msm_gem_is_locked(obj)); - list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { + list_for_each_entry(vma, &msm_obj->vmas, list) { if (vma->aspace) { msm_gem_purge_vma(vma->aspace, vma); msm_gem_close_vma(vma->aspace, vma); } + } +} + +/* Called with msm_obj locked */ +static void +put_iova_vmas(struct drm_gem_object *obj) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct msm_gem_vma *vma, *tmp; + + WARN_ON(!msm_gem_is_locked(obj)); + + list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { del_vma(vma); } } -static int msm_gem_get_iova_locked(struct drm_gem_object *obj, +static int get_iova_locked(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova, u64 range_start, u64 range_end) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; int ret = 0; - WARN_ON(!mutex_is_locked(&msm_obj->lock)); + WARN_ON(!msm_gem_is_locked(obj)); vma = lookup_vma(obj, aspace); @@ -420,7 +429,7 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj, if (msm_obj->flags & MSM_BO_MAP_PRIV) prot |= IOMMU_PRIV; - WARN_ON(!mutex_is_locked(&msm_obj->lock)); + WARN_ON(!msm_gem_is_locked(obj)); if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) return -EBUSY; @@ -437,21 +446,16 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj, msm_obj->sgt, obj->size >> PAGE_SHIFT); } -/* - * get iova and pin it. Should have a matching put - * limits iova to specified range (in pages) - */ -int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, +static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova, u64 range_start, u64 range_end) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); u64 local; int ret; - mutex_lock(&msm_obj->lock); + WARN_ON(!msm_gem_is_locked(obj)); - ret = msm_gem_get_iova_locked(obj, aspace, &local, + ret = get_iova_locked(obj, aspace, &local, range_start, range_end); if (!ret) @@ -460,10 +464,32 @@ int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, if (!ret) *iova = local; - mutex_unlock(&msm_obj->lock); return ret; } +/* + * get iova and pin it. Should have a matching put + * limits iova to specified range (in pages) + */ +int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint64_t *iova, + u64 range_start, u64 range_end) +{ + int ret; + + msm_gem_lock(obj); + ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end); + msm_gem_unlock(obj); + + return ret; +} + +int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint64_t *iova) +{ + return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX); +} + /* get iova and pin it. Should have a matching put */ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova) @@ -478,12 +504,11 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, int msm_gem_get_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret; - mutex_lock(&msm_obj->lock); - ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX); - mutex_unlock(&msm_obj->lock); + msm_gem_lock(obj); + ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX); + msm_gem_unlock(obj); return ret; } @@ -494,35 +519,43 @@ int msm_gem_get_iova(struct drm_gem_object *obj, uint64_t msm_gem_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; - mutex_lock(&msm_obj->lock); + msm_gem_lock(obj); vma = lookup_vma(obj, aspace); - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); WARN_ON(!vma); return vma ? vma->iova : 0; } /* - * Unpin a iova by updating the reference counts. The memory isn't actually - * purged until something else (shrinker, mm_notifier, destroy, etc) decides - * to get rid of it + * Locked variant of msm_gem_unpin_iova() */ -void msm_gem_unpin_iova(struct drm_gem_object *obj, +void msm_gem_unpin_iova_locked(struct drm_gem_object *obj, struct msm_gem_address_space *aspace) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; - mutex_lock(&msm_obj->lock); + WARN_ON(!msm_gem_is_locked(obj)); + vma = lookup_vma(obj, aspace); if (!WARN_ON(!vma)) msm_gem_unmap_vma(aspace, vma); +} - mutex_unlock(&msm_obj->lock); +/* + * Unpin a iova by updating the reference counts. The memory isn't actually + * purged until something else (shrinker, mm_notifier, destroy, etc) decides + * to get rid of it + */ +void msm_gem_unpin_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace) +{ + msm_gem_lock(obj); + msm_gem_unpin_iova_locked(obj, aspace); + msm_gem_unlock(obj); } int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, @@ -560,23 +593,22 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret = 0; + WARN_ON(!msm_gem_is_locked(obj)); + if (obj->import_attach) return ERR_PTR(-ENODEV); - mutex_lock(&msm_obj->lock); - if (WARN_ON(msm_obj->madv > madv)) { DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", msm_obj->madv, madv); - mutex_unlock(&msm_obj->lock); return ERR_PTR(-EBUSY); } /* increment vmap_count *before* vmap() call, so shrinker can - * check vmap_count (is_vunmapable()) outside of msm_obj->lock. + * check vmap_count (is_vunmapable()) outside of msm_obj lock. * This guarantees that we won't try to msm_gem_vunmap() this * same object from within the vmap() call (while we already - * hold msm_obj->lock) + * hold msm_obj lock) */ msm_obj->vmap_count++; @@ -594,20 +626,29 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) } } - mutex_unlock(&msm_obj->lock); return msm_obj->vaddr; fail: msm_obj->vmap_count--; - mutex_unlock(&msm_obj->lock); return ERR_PTR(ret); } -void *msm_gem_get_vaddr(struct drm_gem_object *obj) +void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) { return get_vaddr(obj, MSM_MADV_WILLNEED); } +void *msm_gem_get_vaddr(struct drm_gem_object *obj) +{ + void *ret; + + msm_gem_lock(obj); + ret = msm_gem_get_vaddr_locked(obj); + msm_gem_unlock(obj); + + return ret; +} + /* * Don't use this! It is for the very special case of dumping * submits from GPU hangs or faults, were the bo may already @@ -619,14 +660,21 @@ void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) return get_vaddr(obj, __MSM_MADV_PURGED); } -void msm_gem_put_vaddr(struct drm_gem_object *obj) +void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - mutex_lock(&msm_obj->lock); + WARN_ON(!msm_gem_is_locked(obj)); WARN_ON(msm_obj->vmap_count < 1); + msm_obj->vmap_count--; - mutex_unlock(&msm_obj->lock); +} + +void msm_gem_put_vaddr(struct drm_gem_object *obj) +{ + msm_gem_lock(obj); + msm_gem_put_vaddr_locked(obj); + msm_gem_unlock(obj); } /* Update madvise status, returns true if not purged, else @@ -636,37 +684,40 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - mutex_lock(&msm_obj->lock); - - WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); + msm_gem_lock(obj); if (msm_obj->madv != __MSM_MADV_PURGED) msm_obj->madv = madv; madv = msm_obj->madv; - mutex_unlock(&msm_obj->lock); + /* If the obj is inactive, we might need to move it + * between inactive lists + */ + if (msm_obj->active_count == 0) + update_inactive(msm_obj); + + msm_gem_unlock(obj); return (madv != __MSM_MADV_PURGED); } -void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass) +void msm_gem_purge(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct msm_gem_object *msm_obj = to_msm_bo(obj); - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); WARN_ON(!is_purgeable(msm_obj)); WARN_ON(obj->import_attach); - mutex_lock_nested(&msm_obj->lock, subclass); + put_iova_spaces(obj); - put_iova(obj); - - msm_gem_vunmap_locked(obj); + msm_gem_vunmap(obj); put_pages(obj); + put_iova_vmas(obj); + msm_obj->madv = __MSM_MADV_PURGED; drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); @@ -681,15 +732,13 @@ void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass) invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); - - mutex_unlock(&msm_obj->lock); } -static void msm_gem_vunmap_locked(struct drm_gem_object *obj) +void msm_gem_vunmap(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - WARN_ON(!mutex_is_locked(&msm_obj->lock)); + WARN_ON(!msm_gem_is_locked(obj)); if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) return; @@ -698,15 +747,6 @@ static void msm_gem_vunmap_locked(struct drm_gem_object *obj) msm_obj->vaddr = NULL; } -void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) -{ - struct msm_gem_object *msm_obj = to_msm_bo(obj); - - mutex_lock_nested(&msm_obj->lock, subclass); - msm_gem_vunmap_locked(obj); - mutex_unlock(&msm_obj->lock); -} - /* must be called before _move_to_active().. */ int msm_gem_sync_object(struct drm_gem_object *obj, struct msm_fence_context *fctx, bool exclusive) @@ -745,30 +785,48 @@ int msm_gem_sync_object(struct drm_gem_object *obj, void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); + struct msm_drm_private *priv = obj->dev->dev_private; + + might_sleep(); + WARN_ON(!msm_gem_is_locked(obj)); WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); - if (!atomic_fetch_inc(&msm_obj->active_count)) { - msm_obj->gpu = gpu; + if (msm_obj->active_count++ == 0) { + mutex_lock(&priv->mm_lock); list_del_init(&msm_obj->mm_list); list_add_tail(&msm_obj->mm_list, &gpu->active_list); + mutex_unlock(&priv->mm_lock); } } void msm_gem_active_put(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - struct msm_drm_private *priv = obj->dev->dev_private; - WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); + might_sleep(); + WARN_ON(!msm_gem_is_locked(obj)); - if (!atomic_dec_return(&msm_obj->active_count)) { - msm_obj->gpu = NULL; - list_del_init(&msm_obj->mm_list); - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + if (--msm_obj->active_count == 0) { + update_inactive(msm_obj); } } +static void update_inactive(struct msm_gem_object *msm_obj) +{ + struct msm_drm_private *priv = msm_obj->base.dev->dev_private; + + mutex_lock(&priv->mm_lock); + WARN_ON(msm_obj->active_count != 0); + + list_del_init(&msm_obj->mm_list); + if (msm_obj->madv == MSM_MADV_WILLNEED) + list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); + else + list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed); + + mutex_unlock(&priv->mm_lock); +} + int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) { bool write = !!(op & MSM_PREP_WRITE); @@ -815,7 +873,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) uint64_t off = drm_vma_node_start(&obj->vma_node); const char *madv; - mutex_lock(&msm_obj->lock); + msm_gem_lock(obj); switch (msm_obj->madv) { case __MSM_MADV_PURGED: @@ -883,7 +941,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) describe_fence(fence, "Exclusive", m); rcu_read_unlock(); - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); } void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) @@ -912,25 +970,16 @@ void msm_gem_free_object(struct drm_gem_object *obj) struct drm_device *dev = obj->dev; struct msm_drm_private *priv = dev->dev_private; - if (llist_add(&msm_obj->freed, &priv->free_list)) - queue_work(priv->wq, &priv->free_work); -} - -static void free_object(struct msm_gem_object *msm_obj) -{ - struct drm_gem_object *obj = &msm_obj->base; - struct drm_device *dev = obj->dev; + mutex_lock(&priv->mm_lock); + list_del(&msm_obj->mm_list); + mutex_unlock(&priv->mm_lock); - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + msm_gem_lock(obj); /* object should not be on active list: */ WARN_ON(is_active(msm_obj)); - list_del(&msm_obj->mm_list); - - mutex_lock(&msm_obj->lock); - - put_iova(obj); + put_iova_spaces(obj); if (obj->import_attach) { WARN_ON(msm_obj->vaddr); @@ -941,41 +990,25 @@ static void free_object(struct msm_gem_object *msm_obj) if (msm_obj->pages) kvfree(msm_obj->pages); + /* dma_buf_detach() grabs resv lock, so we need to unlock + * prior to drm_prime_gem_destroy + */ + msm_gem_unlock(obj); + drm_prime_gem_destroy(obj, msm_obj->sgt); } else { - msm_gem_vunmap_locked(obj); + msm_gem_vunmap(obj); put_pages(obj); + msm_gem_unlock(obj); } + put_iova_vmas(obj); + drm_gem_object_release(obj); - mutex_unlock(&msm_obj->lock); kfree(msm_obj); } -void msm_gem_free_work(struct work_struct *work) -{ - struct msm_drm_private *priv = - container_of(work, struct msm_drm_private, free_work); - struct drm_device *dev = priv->dev; - struct llist_node *freed; - struct msm_gem_object *msm_obj, *next; - - while ((freed = llist_del_all(&priv->free_list))) { - - mutex_lock(&dev->struct_mutex); - - llist_for_each_entry_safe(msm_obj, next, - freed, freed) - free_object(msm_obj); - - mutex_unlock(&dev->struct_mutex); - - if (need_resched()) - break; - } -} - /* convenience method to construct a GEM buffer object, and userspace handle */ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, uint32_t size, uint32_t flags, uint32_t *handle, @@ -1037,8 +1070,6 @@ static int msm_gem_new_impl(struct drm_device *dev, if (!msm_obj) return -ENOMEM; - mutex_init(&msm_obj->lock); - msm_obj->flags = flags; msm_obj->madv = MSM_MADV_WILLNEED; @@ -1086,10 +1117,10 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, struct msm_gem_vma *vma; struct page **pages; - mutex_lock(&msm_obj->lock); + msm_gem_lock(obj); vma = add_vma(obj, NULL); - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto fail; @@ -1119,19 +1150,19 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); } - if (struct_mutex_locked) { - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); - } else { - mutex_lock(&dev->struct_mutex); - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); - mutex_unlock(&dev->struct_mutex); - } + mutex_lock(&priv->mm_lock); + /* Initially obj is idle, obj->madv == WILLNEED: */ + list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); + mutex_unlock(&priv->mm_lock); return obj; fail: - drm_gem_object_put(obj); + if (struct_mutex_locked) { + drm_gem_object_put_locked(obj); + } else { + drm_gem_object_put(obj); + } return ERR_PTR(ret); } @@ -1173,26 +1204,26 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, npages = size / PAGE_SIZE; msm_obj = to_msm_bo(obj); - mutex_lock(&msm_obj->lock); + msm_gem_lock(obj); msm_obj->sgt = sgt; msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); if (!msm_obj->pages) { - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); ret = -ENOMEM; goto fail; } ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); if (ret) { - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); goto fail; } - mutex_unlock(&msm_obj->lock); + msm_gem_unlock(obj); - mutex_lock(&dev->struct_mutex); - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); - mutex_unlock(&dev->struct_mutex); + mutex_lock(&priv->mm_lock); + list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); + mutex_unlock(&priv->mm_lock); return obj; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index a1bf741b9b89..b3a0a880cbab 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -64,7 +64,6 @@ struct msm_gem_object { * */ struct list_head mm_list; - struct msm_gpu *gpu; /* non-null if active */ /* Transiently in the process of submit ioctl, objects associated * with the submit are on submit->bo_list.. this only lasts for @@ -85,50 +84,124 @@ struct msm_gem_object { * an IOMMU. Also used for stolen/splashscreen buffer. */ struct drm_mm_node *vram_node; - struct mutex lock; /* Protects resources associated with bo */ char name[32]; /* Identifier to print for the debugfs files */ - atomic_t active_count; + int active_count; }; #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) +int msm_gem_mmap_obj(struct drm_gem_object *obj, + struct vm_area_struct *vma); +int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); +uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); +int msm_gem_get_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint64_t *iova); +int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint64_t *iova, + u64 range_start, u64 range_end); +int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint64_t *iova); +int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint64_t *iova); +uint64_t msm_gem_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace); +void msm_gem_unpin_iova_locked(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace); +void msm_gem_unpin_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace); +struct page **msm_gem_get_pages(struct drm_gem_object *obj); +void msm_gem_put_pages(struct drm_gem_object *obj); +int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args); +int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, + uint32_t handle, uint64_t *offset); +void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj); +void *msm_gem_get_vaddr(struct drm_gem_object *obj); +void *msm_gem_get_vaddr_active(struct drm_gem_object *obj); +void msm_gem_put_vaddr_locked(struct drm_gem_object *obj); +void msm_gem_put_vaddr(struct drm_gem_object *obj); +int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); +int msm_gem_sync_object(struct drm_gem_object *obj, + struct msm_fence_context *fctx, bool exclusive); +void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu); +void msm_gem_active_put(struct drm_gem_object *obj); +int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); +int msm_gem_cpu_fini(struct drm_gem_object *obj); +void msm_gem_free_object(struct drm_gem_object *obj); +int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, + uint32_t size, uint32_t flags, uint32_t *handle, char *name); +struct drm_gem_object *msm_gem_new(struct drm_device *dev, + uint32_t size, uint32_t flags); +struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, + uint32_t size, uint32_t flags); +void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, + uint32_t flags, struct msm_gem_address_space *aspace, + struct drm_gem_object **bo, uint64_t *iova); +void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, + uint32_t flags, struct msm_gem_address_space *aspace, + struct drm_gem_object **bo, uint64_t *iova); +void msm_gem_kernel_put(struct drm_gem_object *bo, + struct msm_gem_address_space *aspace, bool locked); +struct drm_gem_object *msm_gem_import(struct drm_device *dev, + struct dma_buf *dmabuf, struct sg_table *sgt); +__printf(2, 3) +void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...); +#ifdef CONFIG_DEBUG_FS +void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m); +void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); +#endif + +static inline void +msm_gem_lock(struct drm_gem_object *obj) +{ + dma_resv_lock(obj->resv, NULL); +} + +static inline bool __must_check +msm_gem_trylock(struct drm_gem_object *obj) +{ + return dma_resv_trylock(obj->resv); +} + +static inline int +msm_gem_lock_interruptible(struct drm_gem_object *obj) +{ + return dma_resv_lock_interruptible(obj->resv, NULL); +} + +static inline void +msm_gem_unlock(struct drm_gem_object *obj) +{ + dma_resv_unlock(obj->resv); +} + +static inline bool +msm_gem_is_locked(struct drm_gem_object *obj) +{ + return dma_resv_is_locked(obj->resv); +} + static inline bool is_active(struct msm_gem_object *msm_obj) { - return atomic_read(&msm_obj->active_count); + WARN_ON(!msm_gem_is_locked(&msm_obj->base)); + return msm_obj->active_count; } static inline bool is_purgeable(struct msm_gem_object *msm_obj) { - WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex)); return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt && !msm_obj->base.dma_buf && !msm_obj->base.import_attach; } static inline bool is_vunmapable(struct msm_gem_object *msm_obj) { + WARN_ON(!msm_gem_is_locked(&msm_obj->base)); return (msm_obj->vmap_count == 0) && msm_obj->vaddr; } -/* The shrinker can be triggered while we hold objA->lock, and need - * to grab objB->lock to purge it. Lockdep just sees these as a single - * class of lock, so we use subclasses to teach it the difference. - * - * OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and - * OBJ_LOCK_SHRINKER is used by shrinker. - * - * It is *essential* that we never go down paths that could trigger the - * shrinker for a purgable object. This is ensured by checking that - * msm_obj->madv == MSM_MADV_WILLNEED. - */ -enum msm_gem_lock { - OBJ_LOCK_NORMAL, - OBJ_LOCK_SHRINKER, -}; - -void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass); -void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass); -void msm_gem_free_work(struct work_struct *work); +void msm_gem_purge(struct drm_gem_object *obj); +void msm_gem_vunmap(struct drm_gem_object *obj); /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, * associated with the cmdstream submission for synchronization (and @@ -136,6 +209,7 @@ void msm_gem_free_work(struct work_struct *work); * lasts for the duration of the submit-ioctl. */ struct msm_gem_submit { + struct kref ref; struct drm_device *dev; struct msm_gpu *gpu; struct msm_gem_address_space *aspace; @@ -157,7 +231,10 @@ struct msm_gem_submit { uint32_t type; uint32_t size; /* in dwords */ uint64_t iova; + uint32_t offset;/* in dwords */ uint32_t idx; /* cmdstream buffer idx in bos[] */ + uint32_t nr_relocs; + struct drm_msm_gem_submit_reloc *relocs; } *cmd; /* array of size nr_cmds */ struct { uint32_t flags; @@ -169,6 +246,18 @@ struct msm_gem_submit { } bos[]; }; +void __msm_gem_submit_destroy(struct kref *kref); + +static inline void msm_gem_submit_get(struct msm_gem_submit *submit) +{ + kref_get(&submit->ref); +} + +static inline void msm_gem_submit_put(struct msm_gem_submit *submit) +{ + kref_put(&submit->ref, __msm_gem_submit_destroy); +} + /* helper to determine of a buffer in submit should be dumped, used for both * devcoredump and debugfs cmdstream dumping: */ diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 515ef80816a0..9880348a4dc7 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -22,12 +22,19 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages); } -void *msm_gem_prime_vmap(struct drm_gem_object *obj) +int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { - return msm_gem_get_vaddr(obj); + void *vaddr; + + vaddr = msm_gem_get_vaddr(obj); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + dma_buf_map_set_vaddr(map, vaddr); + + return 0; } -void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) { msm_gem_put_vaddr(obj); } diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 482576d7a39a..9d5248be746f 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -6,58 +6,28 @@ #include "msm_drv.h" #include "msm_gem.h" +#include "msm_gpu.h" #include "msm_gpu_trace.h" -static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock) -{ - /* NOTE: we are *closer* to being able to get rid of - * mutex_trylock_recursive().. the msm_gem code itself does - * not need struct_mutex, although codepaths that can trigger - * shrinker are still called in code-paths that hold the - * struct_mutex. - * - * Also, msm_obj->madv is protected by struct_mutex. - * - * The next step is probably split out a seperate lock for - * protecting inactive_list, so that shrinker does not need - * struct_mutex. - */ - switch (mutex_trylock_recursive(&dev->struct_mutex)) { - case MUTEX_TRYLOCK_FAILED: - return false; - - case MUTEX_TRYLOCK_SUCCESS: - *unlock = true; - return true; - - case MUTEX_TRYLOCK_RECURSIVE: - *unlock = false; - return true; - } - - BUG(); -} - static unsigned long msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { struct msm_drm_private *priv = container_of(shrinker, struct msm_drm_private, shrinker); - struct drm_device *dev = priv->dev; struct msm_gem_object *msm_obj; unsigned long count = 0; - bool unlock; - if (!msm_gem_shrinker_lock(dev, &unlock)) - return 0; + mutex_lock(&priv->mm_lock); - list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) { + list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) { + if (!msm_gem_trylock(&msm_obj->base)) + continue; if (is_purgeable(msm_obj)) count += msm_obj->base.size >> PAGE_SHIFT; + msm_gem_unlock(&msm_obj->base); } - if (unlock) - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&priv->mm_lock); return count; } @@ -67,25 +37,24 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) { struct msm_drm_private *priv = container_of(shrinker, struct msm_drm_private, shrinker); - struct drm_device *dev = priv->dev; struct msm_gem_object *msm_obj; unsigned long freed = 0; - bool unlock; - if (!msm_gem_shrinker_lock(dev, &unlock)) - return SHRINK_STOP; + mutex_lock(&priv->mm_lock); - list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) { + list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) { if (freed >= sc->nr_to_scan) break; + if (!msm_gem_trylock(&msm_obj->base)) + continue; if (is_purgeable(msm_obj)) { - msm_gem_purge(&msm_obj->base, OBJ_LOCK_SHRINKER); + msm_gem_purge(&msm_obj->base); freed += msm_obj->base.size >> PAGE_SHIFT; } + msm_gem_unlock(&msm_obj->base); } - if (unlock) - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&priv->mm_lock); if (freed > 0) trace_msm_gem_purge(freed << PAGE_SHIFT); @@ -93,33 +62,57 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) return freed; } +/* since we don't know any better, lets bail after a few + * and if necessary the shrinker will be invoked again. + * Seems better than unmapping *everything* + */ +static const int vmap_shrink_limit = 15; + +static unsigned +vmap_shrink(struct list_head *mm_list) +{ + struct msm_gem_object *msm_obj; + unsigned unmapped = 0; + + list_for_each_entry(msm_obj, mm_list, mm_list) { + if (!msm_gem_trylock(&msm_obj->base)) + continue; + if (is_vunmapable(msm_obj)) { + msm_gem_vunmap(&msm_obj->base); + unmapped++; + } + msm_gem_unlock(&msm_obj->base); + + if (++unmapped >= vmap_shrink_limit) + break; + } + + return unmapped; +} + static int msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) { struct msm_drm_private *priv = container_of(nb, struct msm_drm_private, vmap_notifier); - struct drm_device *dev = priv->dev; - struct msm_gem_object *msm_obj; - unsigned unmapped = 0; - bool unlock; + struct list_head *mm_lists[] = { + &priv->inactive_dontneed, + &priv->inactive_willneed, + priv->gpu ? &priv->gpu->active_list : NULL, + NULL, + }; + unsigned idx, unmapped = 0; - if (!msm_gem_shrinker_lock(dev, &unlock)) - return NOTIFY_DONE; + mutex_lock(&priv->mm_lock); - list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) { - if (is_vunmapable(msm_obj)) { - msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER); - /* since we don't know any better, lets bail after a few - * and if necessary the shrinker will be invoked again. - * Seems better than unmapping *everything* - */ - if (++unmapped >= 15) - break; - } + for (idx = 0; mm_lists[idx]; idx++) { + unmapped += vmap_shrink(mm_lists[idx]); + + if (unmapped >= vmap_shrink_limit) + break; } - if (unlock) - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&priv->mm_lock); *(unsigned long *)ptr += unmapped; @@ -131,7 +124,7 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) /** * msm_gem_shrinker_init - Initialize msm shrinker - * @dev_priv: msm device + * @dev: drm device * * This function registers and sets up the msm shrinker. */ @@ -149,7 +142,7 @@ void msm_gem_shrinker_init(struct drm_device *dev) /** * msm_gem_shrinker_cleanup - Clean up msm shrinker - * @dev_priv: msm device + * @dev: drm device * * This function unregisters the msm shrinker. */ diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index aa5c60a7132d..d04c349d8112 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -42,6 +42,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, if (!submit) return NULL; + kref_init(&submit->ref); submit->dev = dev; submit->aspace = queue->ctx->aspace; submit->gpu = gpu; @@ -60,13 +61,19 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, return submit; } -void msm_gem_submit_free(struct msm_gem_submit *submit) +void __msm_gem_submit_destroy(struct kref *kref) { + struct msm_gem_submit *submit = + container_of(kref, struct msm_gem_submit, ref); + unsigned i; + dma_fence_put(submit->fence); - list_del(&submit->node); put_pid(submit->pid); msm_submitqueue_put(submit->queue); + for (i = 0; i < submit->nr_cmds; i++) + kfree(submit->cmd[i].relocs); + kfree(submit); } @@ -150,13 +157,73 @@ out: return ret; } +static int submit_lookup_cmds(struct msm_gem_submit *submit, + struct drm_msm_gem_submit *args, struct drm_file *file) +{ + unsigned i, sz; + int ret = 0; + + for (i = 0; i < args->nr_cmds; i++) { + struct drm_msm_gem_submit_cmd submit_cmd; + void __user *userptr = + u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd))); + + ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd)); + if (ret) { + ret = -EFAULT; + goto out; + } + + /* validate input from userspace: */ + switch (submit_cmd.type) { + case MSM_SUBMIT_CMD_BUF: + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + break; + default: + DRM_ERROR("invalid type: %08x\n", submit_cmd.type); + return -EINVAL; + } + + if (submit_cmd.size % 4) { + DRM_ERROR("non-aligned cmdstream buffer size: %u\n", + submit_cmd.size); + ret = -EINVAL; + goto out; + } + + submit->cmd[i].type = submit_cmd.type; + submit->cmd[i].size = submit_cmd.size / 4; + submit->cmd[i].offset = submit_cmd.submit_offset / 4; + submit->cmd[i].idx = submit_cmd.submit_idx; + submit->cmd[i].nr_relocs = submit_cmd.nr_relocs; + + sz = array_size(submit_cmd.nr_relocs, + sizeof(struct drm_msm_gem_submit_reloc)); + /* check for overflow: */ + if (sz == SIZE_MAX) { + ret = -ENOMEM; + goto out; + } + submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL); + ret = copy_from_user(submit->cmd[i].relocs, userptr, sz); + if (ret) { + ret = -EFAULT; + goto out; + } + } + +out: + return ret; +} + static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i, bool backoff) { struct msm_gem_object *msm_obj = submit->bos[i].obj; if (submit->bos[i].flags & BO_PINNED) - msm_gem_unpin_iova(&msm_obj->base, submit->aspace); + msm_gem_unpin_iova_locked(&msm_obj->base, submit->aspace); if (submit->bos[i].flags & BO_LOCKED) dma_resv_unlock(msm_obj->base.resv); @@ -259,7 +326,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit) uint64_t iova; /* if locking succeeded, pin bo: */ - ret = msm_gem_get_and_pin_iova(&msm_obj->base, + ret = msm_gem_get_and_pin_iova_locked(&msm_obj->base, submit->aspace, &iova); if (ret) @@ -301,7 +368,7 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, /* process the reloc's and patch up the cmdstream as needed: */ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj, - uint32_t offset, uint32_t nr_relocs, uint64_t relocs) + uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs) { uint32_t i, last_offset = 0; uint32_t *ptr; @@ -318,7 +385,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob /* For now, just map the entire thing. Eventually we probably * to do it page-by-page, w/ kmap() if not vmap()d.. */ - ptr = msm_gem_get_vaddr(&obj->base); + ptr = msm_gem_get_vaddr_locked(&obj->base); if (IS_ERR(ptr)) { ret = PTR_ERR(ptr); @@ -327,18 +394,11 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob } for (i = 0; i < nr_relocs; i++) { - struct drm_msm_gem_submit_reloc submit_reloc; - void __user *userptr = - u64_to_user_ptr(relocs + (i * sizeof(submit_reloc))); + struct drm_msm_gem_submit_reloc submit_reloc = relocs[i]; uint32_t off; uint64_t iova; bool valid; - if (copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc))) { - ret = -EFAULT; - goto out; - } - if (submit_reloc.submit_offset % 4) { DRM_ERROR("non-aligned reloc offset: %u\n", submit_reloc.submit_offset); @@ -376,7 +436,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob } out: - msm_gem_put_vaddr(&obj->base); + msm_gem_put_vaddr_locked(&obj->base); return ret; } @@ -692,7 +752,20 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ret = submit_lookup_objects(submit, args, file); if (ret) - goto out; + goto out_pre_pm; + + ret = submit_lookup_cmds(submit, args, file); + if (ret) + goto out_pre_pm; + + /* + * Thanks to dev_pm_opp opp_table_lock interactions with mm->mmap_sem + * in the resume path, we need to to rpm get before we lock objs. + * Which unfortunately might involve powering up the GPU sooner than + * is necessary. But at least in the explicit fencing case, we will + * have already done all the fence waiting. + */ + pm_runtime_get_sync(&gpu->pdev->dev); /* copy_*_user while holding a ww ticket upsets lockdep */ ww_acquire_init(&submit->ticket, &reservation_ww_class); @@ -710,60 +783,29 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, goto out; for (i = 0; i < args->nr_cmds; i++) { - struct drm_msm_gem_submit_cmd submit_cmd; - void __user *userptr = - u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd))); struct msm_gem_object *msm_obj; uint64_t iova; - ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd)); - if (ret) { - ret = -EFAULT; - goto out; - } - - /* validate input from userspace: */ - switch (submit_cmd.type) { - case MSM_SUBMIT_CMD_BUF: - case MSM_SUBMIT_CMD_IB_TARGET_BUF: - case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - break; - default: - DRM_ERROR("invalid type: %08x\n", submit_cmd.type); - ret = -EINVAL; - goto out; - } - - ret = submit_bo(submit, submit_cmd.submit_idx, + ret = submit_bo(submit, submit->cmd[i].idx, &msm_obj, &iova, NULL); if (ret) goto out; - if (submit_cmd.size % 4) { - DRM_ERROR("non-aligned cmdstream buffer size: %u\n", - submit_cmd.size); + if (!submit->cmd[i].size || + ((submit->cmd[i].size + submit->cmd[i].offset) > + msm_obj->base.size / 4)) { + DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4); ret = -EINVAL; goto out; } - if (!submit_cmd.size || - ((submit_cmd.size + submit_cmd.submit_offset) > - msm_obj->base.size)) { - DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size); - ret = -EINVAL; - goto out; - } - - submit->cmd[i].type = submit_cmd.type; - submit->cmd[i].size = submit_cmd.size / 4; - submit->cmd[i].iova = iova + submit_cmd.submit_offset; - submit->cmd[i].idx = submit_cmd.submit_idx; + submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4); if (submit->valid) continue; - ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset, - submit_cmd.nr_relocs, submit_cmd.relocs); + ret = submit_reloc(submit, msm_obj, submit->cmd[i].offset * 4, + submit->cmd[i].nr_relocs, submit->cmd[i].relocs); if (ret) goto out; } @@ -800,11 +842,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, out: + pm_runtime_put(&gpu->pdev->dev); +out_pre_pm: submit_cleanup(submit); if (has_ww_ticket) ww_acquire_fini(&submit->ticket); - if (ret) - msm_gem_submit_free(submit); + msm_gem_submit_put(submit); out_unlock: if (ret && (out_fence_fd >= 0)) put_unused_fd(out_fence_fd); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 55d16489d0f3..ab7c167b0623 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -14,6 +14,7 @@ #include <generated/utsrelease.h> #include <linux/string_helpers.h> #include <linux/devfreq.h> +#include <linux/devfreq_cooling.h> #include <linux/devcoredump.h> #include <linux/sched/task.h> @@ -107,9 +108,18 @@ static void msm_devfreq_init(struct msm_gpu *gpu) if (IS_ERR(gpu->devfreq.devfreq)) { DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); gpu->devfreq.devfreq = NULL; + return; } devfreq_suspend_device(gpu->devfreq.devfreq); + + gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, + gpu->devfreq.devfreq); + if (IS_ERR(gpu->cooling)) { + DRM_DEV_ERROR(&gpu->pdev->dev, + "Couldn't register GPU cooling device\n"); + gpu->cooling = NULL; + } } static int enable_pwrrail(struct msm_gpu *gpu) @@ -177,15 +187,12 @@ static int disable_clk(struct msm_gpu *gpu) static int enable_axi(struct msm_gpu *gpu) { - if (gpu->ebi1_clk) - clk_prepare_enable(gpu->ebi1_clk); - return 0; + return clk_prepare_enable(gpu->ebi1_clk); } static int disable_axi(struct msm_gpu *gpu) { - if (gpu->ebi1_clk) - clk_disable_unprepare(gpu->ebi1_clk); + clk_disable_unprepare(gpu->ebi1_clk); return 0; } @@ -265,6 +272,22 @@ int msm_gpu_hw_init(struct msm_gpu *gpu) return ret; } +static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring, + uint32_t fence) +{ + struct msm_gem_submit *submit; + + spin_lock(&ring->submit_lock); + list_for_each_entry(submit, &ring->submits, node) { + if (submit->seqno > fence) + break; + + msm_update_fence(submit->ring->fctx, + submit->fence->seqno); + } + spin_unlock(&ring->submit_lock); +} + #ifdef CONFIG_DEV_COREDUMP static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset, size_t count, void *data, size_t datalen) @@ -326,7 +349,9 @@ static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state, if (!state_bo->data) goto out; + msm_gem_lock(&obj->base); ptr = msm_gem_get_vaddr_active(&obj->base); + msm_gem_unlock(&obj->base); if (IS_ERR(ptr)) { kvfree(state_bo->data); state_bo->data = NULL; @@ -411,37 +436,26 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, * Hangcheck detection for locked gpu: */ -static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring, - uint32_t fence) -{ - struct msm_gem_submit *submit; - - list_for_each_entry(submit, &ring->submits, node) { - if (submit->seqno > fence) - break; - - msm_update_fence(submit->ring->fctx, - submit->fence->seqno); - } -} - static struct msm_gem_submit * find_submit(struct msm_ringbuffer *ring, uint32_t fence) { struct msm_gem_submit *submit; - WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex)); - - list_for_each_entry(submit, &ring->submits, node) - if (submit->seqno == fence) + spin_lock(&ring->submit_lock); + list_for_each_entry(submit, &ring->submits, node) { + if (submit->seqno == fence) { + spin_unlock(&ring->submit_lock); return submit; + } + } + spin_unlock(&ring->submit_lock); return NULL; } static void retire_submits(struct msm_gpu *gpu); -static void recover_worker(struct work_struct *work) +static void recover_worker(struct kthread_work *work) { struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); struct drm_device *dev = gpu->dev; @@ -470,14 +484,22 @@ static void recover_worker(struct work_struct *work) put_task_struct(task); } + /* msm_rd_dump_submit() needs bo locked to dump: */ + for (i = 0; i < submit->nr_bos; i++) + msm_gem_lock(&submit->bos[i].obj->base); + if (comm && cmd) { DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n", gpu->name, comm, cmd); msm_rd_dump_submit(priv->hangrd, submit, "offending task: %s (%s)", comm, cmd); - } else + } else { msm_rd_dump_submit(priv->hangrd, submit, NULL); + } + + for (i = 0; i < submit->nr_bos; i++) + msm_gem_unlock(&submit->bos[i].obj->base); } /* Record the crash state */ @@ -523,8 +545,10 @@ static void recover_worker(struct work_struct *work) for (i = 0; i < gpu->nr_rings; i++) { struct msm_ringbuffer *ring = gpu->rb[i]; + spin_lock(&ring->submit_lock); list_for_each_entry(submit, &ring->submits, node) gpu->funcs->submit(gpu, submit); + spin_unlock(&ring->submit_lock); } } @@ -535,7 +559,6 @@ static void recover_worker(struct work_struct *work) static void hangcheck_timer_reset(struct msm_gpu *gpu) { - DBG("%s", gpu->name); mod_timer(&gpu->hangcheck_timer, round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES)); } @@ -544,7 +567,6 @@ static void hangcheck_handler(struct timer_list *t) { struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer); struct drm_device *dev = gpu->dev; - struct msm_drm_private *priv = dev->dev_private; struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); uint32_t fence = ring->memptrs->fence; @@ -561,7 +583,7 @@ static void hangcheck_handler(struct timer_list *t) DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n", gpu->name, ring->seqno); - queue_work(priv->wq, &gpu->recover_work); + kthread_queue_work(gpu->worker, &gpu->recover_work); } /* if still more pending work, reset the hangcheck timer: */ @@ -569,7 +591,7 @@ static void hangcheck_handler(struct timer_list *t) hangcheck_timer_reset(gpu); /* workaround for missing irq: */ - queue_work(priv->wq, &gpu->retire_work); + kthread_queue_work(gpu->worker, &gpu->retire_work); } /* @@ -697,56 +719,70 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, stats->alwayson_start, stats->alwayson_end); for (i = 0; i < submit->nr_bos; i++) { - struct msm_gem_object *msm_obj = submit->bos[i].obj; + struct drm_gem_object *obj = &submit->bos[i].obj->base; - msm_gem_active_put(&msm_obj->base); - msm_gem_unpin_iova(&msm_obj->base, submit->aspace); - drm_gem_object_put_locked(&msm_obj->base); + msm_gem_lock(obj); + msm_gem_active_put(obj); + msm_gem_unpin_iova_locked(obj, submit->aspace); + msm_gem_unlock(obj); + drm_gem_object_put(obj); } pm_runtime_mark_last_busy(&gpu->pdev->dev); pm_runtime_put_autosuspend(&gpu->pdev->dev); - msm_gem_submit_free(submit); + + spin_lock(&ring->submit_lock); + list_del(&submit->node); + spin_unlock(&ring->submit_lock); + + msm_gem_submit_put(submit); } static void retire_submits(struct msm_gpu *gpu) { - struct drm_device *dev = gpu->dev; - struct msm_gem_submit *submit, *tmp; int i; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - /* Retire the commits starting with highest priority */ for (i = 0; i < gpu->nr_rings; i++) { struct msm_ringbuffer *ring = gpu->rb[i]; - list_for_each_entry_safe(submit, tmp, &ring->submits, node) { - if (dma_fence_is_signaled(submit->fence)) + while (true) { + struct msm_gem_submit *submit = NULL; + + spin_lock(&ring->submit_lock); + submit = list_first_entry_or_null(&ring->submits, + struct msm_gem_submit, node); + spin_unlock(&ring->submit_lock); + + /* + * If no submit, we are done. If submit->fence hasn't + * been signalled, then later submits are not signalled + * either, so we are also done. + */ + if (submit && dma_fence_is_signaled(submit->fence)) { retire_submit(gpu, ring, submit); + } else { + break; + } } } } -static void retire_worker(struct work_struct *work) +static void retire_worker(struct kthread_work *work) { struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); - struct drm_device *dev = gpu->dev; int i; for (i = 0; i < gpu->nr_rings; i++) update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence); - mutex_lock(&dev->struct_mutex); retire_submits(gpu); - mutex_unlock(&dev->struct_mutex); } /* call from irq handler to schedule work to retire bo's */ void msm_gpu_retire(struct msm_gpu *gpu) { - struct msm_drm_private *priv = gpu->dev->dev_private; - queue_work(priv->wq, &gpu->retire_work); + kthread_queue_work(gpu->worker, &gpu->retire_work); update_sw_cntrs(gpu); } @@ -766,8 +802,6 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) submit->seqno = ++ring->seqno; - list_add_tail(&submit->node, &ring->submits); - msm_rd_dump_submit(priv->rd, submit, NULL); update_sw_cntrs(gpu); @@ -777,14 +811,9 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct drm_gem_object *drm_obj = &msm_obj->base; uint64_t iova; - /* can't happen yet.. but when we add 2d support we'll have - * to deal w/ cross-ring synchronization: - */ - WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu)); - /* submit takes a reference to the bo and iova until retired: */ drm_gem_object_get(&msm_obj->base); - msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova); + msm_gem_get_and_pin_iova_locked(&msm_obj->base, submit->aspace, &iova); if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) dma_resv_add_excl_fence(drm_obj->resv, submit->fence); @@ -794,6 +823,16 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) msm_gem_active_get(drm_obj, gpu); } + /* + * ring->submits holds a ref to the submit, to deal with the case + * that a submit completes before msm_ioctl_gem_submit() returns. + */ + msm_gem_submit_get(submit); + + spin_lock(&ring->submit_lock); + list_add_tail(&submit->node, &ring->submits); + spin_unlock(&ring->submit_lock); + gpu->funcs->submit(gpu, submit); priv->lastctx = submit->queue->ctx; @@ -869,10 +908,18 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, gpu->funcs = funcs; gpu->name = name; - INIT_LIST_HEAD(&gpu->active_list); - INIT_WORK(&gpu->retire_work, retire_worker); - INIT_WORK(&gpu->recover_work, recover_worker); + gpu->worker = kthread_create_worker(0, "%s-worker", gpu->name); + if (IS_ERR(gpu->worker)) { + ret = PTR_ERR(gpu->worker); + gpu->worker = NULL; + goto fail; + } + sched_set_fifo_low(gpu->worker->task); + + INIT_LIST_HEAD(&gpu->active_list); + kthread_init_work(&gpu->retire_work, retire_worker); + kthread_init_work(&gpu->recover_work, recover_worker); timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0); @@ -1005,4 +1052,10 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu); msm_gem_address_space_put(gpu->aspace); } + + if (gpu->worker) { + kthread_destroy_worker(gpu->worker); + } + + devfreq_cooling_unregister(gpu->cooling); } diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 6c9e1fdc1a76..d7cd02cd2109 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -94,7 +94,10 @@ struct msm_gpu { struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS]; int nr_rings; - /* list of GEM active objects: */ + /* + * List of GEM active objects on this gpu. Protected by + * msm_drm_private::mm_lock + */ struct list_head active_list; /* does gpu need hw_init? */ @@ -103,9 +106,6 @@ struct msm_gpu { /* number of GPU hangs (for all contexts) */ int global_faults; - /* worker for handling active-list retiring: */ - struct work_struct retire_work; - void __iomem *mmio; int irq; @@ -134,7 +134,15 @@ struct msm_gpu { #define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */ #define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD) struct timer_list hangcheck_timer; - struct work_struct recover_work; + + /* work for handling GPU recovery: */ + struct kthread_work recover_work; + + /* work for handling active-list retiring: */ + struct kthread_work retire_work; + + /* worker for retire/recover: */ + struct kthread_worker *worker; struct drm_gem_object *memptrs_bo; @@ -147,6 +155,8 @@ struct msm_gpu { struct msm_gpu_state *crashstate; /* True if the hardware supports expanded apriv (a650 and newer) */ bool hw_apriv; + + struct thermal_cooling_device *cooling; }; static inline struct msm_gpu *dev_to_gpu(struct device *dev) @@ -246,10 +256,7 @@ static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg) static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or) { - uint32_t val = gpu_read(gpu, reg); - - val &= ~mask; - gpu_write(gpu, reg, val | or); + msm_rmw(gpu->mmio + (reg << 2), mask, or); } static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi) diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 1cbef6b200b7..d8151a89e163 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -136,7 +136,8 @@ struct msm_kms; */ struct msm_pending_timer { struct hrtimer timer; - struct work_struct work; + struct kthread_work work; + struct kthread_worker *worker; struct msm_kms *kms; unsigned crtc_idx; }; @@ -155,21 +156,37 @@ struct msm_kms { * For async commit, where ->flush_commit() and later happens * from the crtc's pending_timer close to end of the frame: */ - struct mutex commit_lock; + struct mutex commit_lock[MAX_CRTCS]; unsigned pending_crtc_mask; struct msm_pending_timer pending_timers[MAX_CRTCS]; }; -static inline void msm_kms_init(struct msm_kms *kms, +static inline int msm_kms_init(struct msm_kms *kms, const struct msm_kms_funcs *funcs) { - unsigned i; + unsigned i, ret; + + for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) + mutex_init(&kms->commit_lock[i]); - mutex_init(&kms->commit_lock); kms->funcs = funcs; + for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++) { + ret = msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i); + if (ret) { + return ret; + } + } + + return 0; +} + +static inline void msm_kms_destroy(struct msm_kms *kms) +{ + unsigned i; + for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++) - msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i); + msm_atomic_destroy_pending_timer(&kms->pending_timers[i]); } struct msm_kms *mdp4_kms_init(struct drm_device *dev); @@ -194,4 +211,8 @@ int dpu_mdss_init(struct drm_device *dev); drm_for_each_crtc(crtc, dev) \ for_each_if (drm_crtc_mask(crtc) & (crtc_mask)) +#define for_each_crtc_mask_reverse(dev, crtc, crtc_mask) \ + drm_for_each_crtc_reverse(crtc, dev) \ + for_each_if (drm_crtc_mask(crtc) & (crtc_mask)) + #endif /* __MSM_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index fea30e7aa9e8..659e5cc4b40a 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -333,7 +333,7 @@ static void snapshot_buf(struct msm_rd_state *rd, rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size); - msm_gem_put_vaddr(&obj->base); + msm_gem_put_vaddr_locked(&obj->base); } /* called under struct_mutex */ diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 935bf9b1d941..4d2a2a4abef8 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -46,7 +46,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, ring->memptrs_iova = memptrs_iova; INIT_LIST_HEAD(&ring->submits); - spin_lock_init(&ring->lock); + spin_lock_init(&ring->submit_lock); + spin_lock_init(&ring->preempt_lock); snprintf(name, sizeof(name), "gpu-ring-%d", ring->id); diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h index 0987d6bf848c..fe55d4a1aa16 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.h +++ b/drivers/gpu/drm/msm/msm_ringbuffer.h @@ -39,14 +39,25 @@ struct msm_ringbuffer { int id; struct drm_gem_object *bo; uint32_t *start, *end, *cur, *next; + + /* + * List of in-flight submits on this ring. Protected by submit_lock. + */ struct list_head submits; + spinlock_t submit_lock; + uint64_t iova; uint32_t seqno; uint32_t hangcheck_fence; struct msm_rbmemptrs *memptrs; uint64_t memptrs_iova; struct msm_fence_context *fctx; - spinlock_t lock; + + /* + * preempt_lock protects preemption and serializes wptr updates against + * preemption. Can be aquired from irq context. + */ + spinlock_t preempt_lock; }; struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 35122aef037b..6faf17b6408d 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -289,7 +289,7 @@ static irqreturn_t mxsfb_irq_handler(int irq, void *data) DEFINE_DRM_GEM_CMA_FOPS(fops); -static struct drm_driver mxsfb_driver = { +static const struct drm_driver mxsfb_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .irq_handler = mxsfb_irq_handler, .irq_preinstall = mxsfb_irq_disable, diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c index 9040835289a8..a6b3d6e84c52 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c @@ -486,6 +486,13 @@ static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane, writel(ctrl, mxsfb->base + LCDC_AS_CTRL); } +static bool mxsfb_format_mod_supported(struct drm_plane *plane, + uint32_t format, + uint64_t modifier) +{ + return modifier == DRM_FORMAT_MOD_LINEAR; +} + static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = { .atomic_check = mxsfb_plane_atomic_check, .atomic_update = mxsfb_plane_primary_atomic_update, @@ -497,6 +504,7 @@ static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = { }; static const struct drm_plane_funcs mxsfb_plane_funcs = { + .format_mod_supported = mxsfb_format_mod_supported, .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_cleanup, diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index 5dec1e5694b7..9436310d0854 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -6,6 +6,7 @@ config DRM_NOUVEAU select FW_LOADER select DRM_KMS_HELPER select DRM_TTM + select DRM_TTM_HELPER select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT select X86_PLATFORM_DEVICES if ACPI && X86 diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 8133377d865d..62a4fdffd0ae 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -861,96 +861,6 @@ nouveau_bo_move_init(struct nouveau_drm *drm) NV_INFO(drm, "MM: using %s for buffer copies\n", name); } -static int -nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_resource *new_reg) -{ - struct ttm_place placement_memtype = { - .fpfn = 0, - .lpfn = 0, - .mem_type = TTM_PL_TT, - .flags = 0 - }; - struct ttm_placement placement; - struct ttm_resource tmp_reg; - int ret; - - placement.num_placement = placement.num_busy_placement = 1; - placement.placement = placement.busy_placement = &placement_memtype; - - tmp_reg = *new_reg; - tmp_reg.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, ctx); - if (ret) - return ret; - - ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); - if (ret) - goto out; - - ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg); - if (ret) - goto out; - - ret = nouveau_bo_move_m2mf(bo, true, ctx, &tmp_reg); - if (ret) - goto out; - - ret = ttm_bo_wait_ctx(bo, ctx); - if (ret) - goto out; - - nouveau_ttm_tt_unbind(bo->bdev, bo->ttm); - ttm_resource_free(bo, &bo->mem); - ttm_bo_assign_mem(bo, &tmp_reg); -out: - ttm_resource_free(bo, &tmp_reg); - return ret; -} - -static int -nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_resource *new_reg) -{ - struct ttm_place placement_memtype = { - .fpfn = 0, - .lpfn = 0, - .mem_type = TTM_PL_TT, - .flags = 0 - }; - struct ttm_placement placement; - struct ttm_resource tmp_reg; - int ret; - - placement.num_placement = placement.num_busy_placement = 1; - placement.placement = placement.busy_placement = &placement_memtype; - - tmp_reg = *new_reg; - tmp_reg.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, ctx); - if (ret) - return ret; - - ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); - if (unlikely(ret != 0)) - return ret; - - ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg); - if (unlikely(ret != 0)) - return ret; - - ttm_bo_assign_mem(bo, &tmp_reg); - ret = nouveau_bo_move_m2mf(bo, true, ctx, new_reg); - if (ret) - goto out; - -out: - ttm_resource_free(bo, &tmp_reg); - return ret; -} - static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *new_reg) @@ -1023,7 +933,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, static int nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_resource *new_reg) + struct ttm_resource *new_reg, + struct ttm_place *hop) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); @@ -1031,6 +942,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, struct nouveau_drm_tile *new_tile = NULL; int ret = 0; + if ((old_reg->mem_type == TTM_PL_SYSTEM && + new_reg->mem_type == TTM_PL_VRAM) || + (old_reg->mem_type == TTM_PL_VRAM && + new_reg->mem_type == TTM_PL_SYSTEM)) { + hop->fpfn = 0; + hop->lpfn = 0; + hop->mem_type = TTM_PL_TT; + hop->flags = 0; + return -EMULTIHOP; + } + if (new_reg->mem_type == TTM_PL_TT) { ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg); if (ret) @@ -1073,15 +995,8 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, /* Hardware assisted copy. */ if (drm->ttm.move) { - if (new_reg->mem_type == TTM_PL_SYSTEM) - ret = nouveau_bo_move_flipd(bo, evict, ctx, - new_reg); - else if (old_reg->mem_type == TTM_PL_SYSTEM) - ret = nouveau_bo_move_flips(bo, evict, ctx, - new_reg); - else - ret = nouveau_bo_move_m2mf(bo, evict, ctx, - new_reg); + ret = nouveau_bo_move_m2mf(bo, evict, ctx, + new_reg); if (!ret) goto out; } @@ -1142,7 +1057,6 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) struct nvkm_device *device = nvxx_device(&drm->client.device); struct nouveau_mem *mem = nouveau_mem(reg); struct nvif_mmu *mmu = &drm->client.mmu; - const u8 type = mmu->type[drm->ttm.type_vram].type; int ret; mutex_lock(&drm->ttm.io_reserve_mutex); @@ -1175,7 +1089,7 @@ retry: /* Some BARs do not support being ioremapped WC */ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && - type & NVIF_MEM_UNCACHED) + mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED) reg->bus.caching = ttm_uncached; else reg->bus.caching = ttm_write_combined; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index 641ef6298a0e..6045b85a762a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -39,8 +39,6 @@ struct nouveau_bo { unsigned mode; struct nouveau_drm_tile *tile; - - struct ttm_bo_kmap_obj dma_buf_vmap; }; static inline struct nouveau_bo * diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index dd51cd0ae20c..787d05eefd9c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -24,6 +24,8 @@ * */ +#include <drm/drm_gem_ttm_helper.h> + #include "nouveau_drv.h" #include "nouveau_dma.h" #include "nouveau_fence.h" @@ -176,8 +178,8 @@ const struct drm_gem_object_funcs nouveau_gem_object_funcs = { .pin = nouveau_gem_prime_pin, .unpin = nouveau_gem_prime_unpin, .get_sg_table = nouveau_gem_prime_get_sg_table, - .vmap = nouveau_gem_prime_vmap, - .vunmap = nouveau_gem_prime_vunmap, + .vmap = drm_gem_ttm_vmap, + .vunmap = drm_gem_ttm_vunmap, }; int diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h index b35c180322e2..3b919c7c931c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.h +++ b/drivers/gpu/drm/nouveau/nouveau_gem.h @@ -37,7 +37,5 @@ extern void nouveau_gem_prime_unpin(struct drm_gem_object *); extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *); extern struct drm_gem_object *nouveau_gem_prime_import_sg_table( struct drm_device *, struct dma_buf_attachment *, struct sg_table *); -extern void *nouveau_gem_prime_vmap(struct drm_gem_object *); -extern void nouveau_gem_prime_vunmap(struct drm_gem_object *, void *); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index a8264aebf3d4..2f16b5249283 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -35,26 +35,6 @@ struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj) return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages); } -void *nouveau_gem_prime_vmap(struct drm_gem_object *obj) -{ - struct nouveau_bo *nvbo = nouveau_gem_object(obj); - int ret; - - ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages, - &nvbo->dma_buf_vmap); - if (ret) - return ERR_PTR(ret); - - return nvbo->dma_buf_vmap.virtual; -} - -void nouveau_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) -{ - struct nouveau_bo *nvbo = nouveau_gem_object(obj); - - ttm_bo_kunmap(&nvbo->dma_buf_vmap); -} - struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg) diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c index 8b25367917ca..ca1f8463cff5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c @@ -58,9 +58,10 @@ nvkm_firmware_load_blob(const struct nvkm_subdev *subdev, const char *base, /** * nvkm_firmware_get - load firmware from the official nvidia/chip/ directory - * @subdev subdevice that will use that firmware - * @fwname name of firmware file to load - * @fw firmware structure to load to + * @subdev: subdevice that will use that firmware + * @fwname: name of firmware file to load + * @ver: firmware version to load + * @fw: firmware structure to load to * * Use this function to load firmware files in the form nvidia/chip/fwname.bin. * Firmware files released by NVIDIA will always follow this format. @@ -98,7 +99,7 @@ nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname, int ver, return -ENOENT; } -/** +/* * nvkm_firmware_put - release firmware loaded with nvkm_firmware_get */ void diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c index 350f10a3de37..2ec84b8a3b3a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c @@ -123,7 +123,6 @@ pll_map(struct nvkm_bios *bios) case NV_20: case NV_30: return nv04_pll_mapping; - break; case NV_40: return nv40_pll_mapping; case NV_50: diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c index efa50274df97..4884eb4a9221 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c @@ -140,17 +140,14 @@ mcp77_clk_read(struct nvkm_clk *base, enum nv_clk_src src) break; case nv_clk_src_mem: return 0; - break; case nv_clk_src_vdec: P = (read_div(clk) & 0x00000700) >> 8; switch (mast & 0x00400000) { case 0x00400000: return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P; - break; default: return 500000 >> P; - break; } break; default: diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c index 2ccb4b6be153..7b1eb44ff3da 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c @@ -171,7 +171,6 @@ nv50_ram_timing_read(struct nv50_ram *ram, u32 *timing) break; default: return -ENOSYS; - break; } T(WR) = ((timing[1] >> 24) & 0xff) - 1 - T(CWL); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c index e01746ce9fc4..1156634533f9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c @@ -90,7 +90,6 @@ gk104_top_oneinit(struct nvkm_top *top) case 0x00000010: B_(NVDEC ); break; case 0x00000013: B_(CE ); break; case 0x00000014: C_(GSP ); break; - break; default: break; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index faca5c873bde..e39ce0c0c9a9 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -2,7 +2,7 @@ /* * Generic DSI Command Mode panel driver * - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/dss/Kconfig b/drivers/gpu/drm/omapdrm/dss/Kconfig index 2658c521b702..e11b258a2294 100644 --- a/drivers/gpu/drm/omapdrm/dss/Kconfig +++ b/drivers/gpu/drm/omapdrm/dss/Kconfig @@ -80,7 +80,7 @@ config OMAP5_DSS_HDMI select OMAP2_DSS_HDMI_COMMON help HDMI Interface for OMAP5 and similar cores. This adds the High - Definition Multimedia Interface. See http://www.hdmi.org/ for HDMI + Definition Multimedia Interface. See https://www.hdmi.org/ for HDMI specification. config OMAP2_DSS_SDI @@ -101,7 +101,7 @@ config OMAP2_DSS_DSI DSI is a high speed half-duplex serial interface between the host processor and a peripheral, such as a display or a framebuffer chip. - See http://www.mipi.org/ for DSI specifications. + See https://www.mipi.org/ for DSI specifications. config OMAP2_DSS_MIN_FCK_PER_PCK int "Minimum FCK/PCK ratio (for scaling)" diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index c7650a7c155d..cf50430e6363 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -2,7 +2,7 @@ /* * OMAP Display Subsystem Base * - * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015-2017 Texas Instruments Incorporated - https://www.ti.com/ */ #include <linux/kernel.h> diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index 48593932bddf..599183879caf 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -653,8 +653,11 @@ int dispc_runtime_get(struct dispc_device *dispc) DSSDBG("dispc_runtime_get\n"); r = pm_runtime_get_sync(&dispc->pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_noidle(&dispc->pdev->dev); + return r; + } + return 0; } void dispc_runtime_put(struct dispc_device *dispc) diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.h b/drivers/gpu/drm/omapdrm/dss/dispc.h index 2348faf88768..3f842c1ff81a 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.h +++ b/drivers/gpu/drm/omapdrm/dss/dispc.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Archit Taneja <archit@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c index bccb28de5a59..d1f3a93b8efd 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Chandrabhanu Mahapatra <cmahapatra@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index eeccf40bae41..735a4e9027d0 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -1112,8 +1112,11 @@ static int dsi_runtime_get(struct dsi_data *dsi) DSSDBG("dsi_runtime_get\n"); r = pm_runtime_get_sync(dsi->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_noidle(dsi->dev); + return r; + } + return 0; } static void dsi_runtime_put(struct dsi_data *dsi) @@ -1128,13 +1131,12 @@ static void dsi_runtime_put(struct dsi_data *dsi) static void _dsi_print_reset_status(struct dsi_data *dsi) { - u32 l; int b0, b1, b2; /* A dummy read using the SCP interface to any DSIPHY register is * required after DSIPHY reset to complete the reset of the DSI complex * I/O. */ - l = dsi_read_reg(dsi, DSI_DSIPHY_CFG5); + dsi_read_reg(dsi, DSI_DSIPHY_CFG5); if (dsi->data->quirks & DSI_QUIRK_REVERSE_TXCLKESC) { b0 = 28; @@ -3940,7 +3942,6 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel, void (*callback)(int, void *), void *data) { struct dsi_data *dsi = to_dsi_data(dssdev); - u16 dw, dh; dsi_perf_mark_setup(dsi); @@ -3949,11 +3950,8 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel, dsi->framedone_callback = callback; dsi->framedone_data = data; - dw = dsi->vm.hactive; - dh = dsi->vm.vactive; - #ifdef DSI_PERF_MEASURE - dsi->update_bytes = dw * dh * + dsi->update_bytes = dsi->vm.hactive * dsi->vm.vactive * dsi_get_pixel_size(dsi->pix_fmt) / 8; #endif dsi_update_screen_dispc(dsi); diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 6ccbc29c4ce4..d7b2f5bcac16 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -858,8 +858,11 @@ int dss_runtime_get(struct dss_device *dss) DSSDBG("dss_runtime_get\n"); r = pm_runtime_get_sync(&dss->pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_noidle(&dss->pdev->dev); + return r; + } + return 0; } void dss_runtime_put(struct dss_device *dss) diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h index 3a40833d3368..c4a4e07f0b99 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi.h +++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h @@ -2,7 +2,7 @@ /* * HDMI driver definition for TI OMAP4 Processor. * - * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef _HDMI_H diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index a14fbf06cb30..8de41e74e8f8 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -2,7 +2,7 @@ /* * HDMI interface DSS driver for TI's OMAP4 family of SoCs. * - * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com/ * Authors: Yong Zhi * Mythri pk <mythripk@ti.com> */ @@ -43,10 +43,10 @@ static int hdmi_runtime_get(struct omap_hdmi *hdmi) DSSDBG("hdmi_runtime_get\n"); r = pm_runtime_get_sync(&hdmi->pdev->dev); - WARN_ON(r < 0); - if (r < 0) + if (WARN_ON(r < 0)) { + pm_runtime_put_noidle(&hdmi->pdev->dev); return r; - + } return 0; } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c index ebf9c96d43ee..43592c1cf081 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c @@ -3,7 +3,7 @@ * * Based on the CEC code from hdmi_ti_4xxx_ip.c from Android. * - * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com/ * Authors: Yong Zhi * Mythri pk <mythripk@ti.com> * diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index 751985a2679a..35faa7f028c4 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c @@ -2,7 +2,7 @@ /* * HDMI TI81xx, TI38xx, TI OMAP4 etc IP driver Library * - * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com/ * Authors: Yong Zhi * Mythri pk <mythripk@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h index dc64ae2aa300..3c9e1f600fbe 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h @@ -2,7 +2,7 @@ /* * HDMI header definition for OMAP4 HDMI core IP * - * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef _HDMI4_CORE_H_ diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index b738d9750686..54e5cb5aa52d 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -2,7 +2,7 @@ /* * HDMI driver for OMAP5 * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/ * * Authors: * Yong Zhi @@ -44,10 +44,10 @@ static int hdmi_runtime_get(struct omap_hdmi *hdmi) DSSDBG("hdmi_runtime_get\n"); r = pm_runtime_get_sync(&hdmi->pdev->dev); - WARN_ON(r < 0); - if (r < 0) + if (WARN_ON(r < 0)) { + pm_runtime_put_noidle(&hdmi->pdev->dev); return r; - + } return 0; } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c index 7dd587035160..6cc2ad7a420c 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c @@ -2,7 +2,7 @@ /* * OMAP5 HDMI CORE IP driver library * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/ * Authors: * Yong Zhi * Mythri pk diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h index 65eadefdb3f9..070cbf5fb57d 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h @@ -2,7 +2,7 @@ /* * HDMI driver definition for TI OMAP5 processors. * - * Copyright (C) 2011-2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011-2012 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef _HDMI5_CORE_H_ diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c index 00bbf24488c1..5dc200f09c3c 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c @@ -2,7 +2,7 @@ /* * HDMI PHY * - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/ */ #include <linux/kernel.h> diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c index cf2b000f397f..13bf649aba52 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c @@ -2,7 +2,7 @@ /* * HDMI PLL * - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/ */ #define DSS_SUBSYS_NAME "HDMIPLL" diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c index 32f45f4f569d..9d830584a762 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c @@ -2,7 +2,7 @@ /* * HDMI wrapper * - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/ */ #define DSS_SUBSYS_NAME "HDMIWP" diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c index 72a7da7bfff1..f21b5df31213 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c +++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index ab19d4af8de7..a48a9a254e33 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index ce21c798cca6..5affdf078134 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ * Author: Archit Taneja <archit@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c index 1212f3cc52d1..241a338ace29 100644 --- a/drivers/gpu/drm/omapdrm/dss/pll.c +++ b/drivers/gpu/drm/omapdrm/dss/pll.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/ */ #define DSS_SUBSYS_NAME "PLL" diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 5c027c81760f..94cf50d837b0 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -361,8 +361,11 @@ static int venc_runtime_get(struct venc_device *venc) DSSDBG("venc_runtime_get\n"); r = pm_runtime_get_sync(&venc->pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_noidle(&venc->pdev->dev); + return r; + } + return 0; } static void venc_runtime_put(struct venc_device *venc) diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c index a612e2696dbc..b72c3ffddc9a 100644 --- a/drivers/gpu/drm/omapdrm/dss/video-pll.c +++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/ */ #include <linux/clk.h> diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index de95dc1b861c..47719b92e22b 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 49621b2e1ab5..7d66269ad998 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c index b57fbe8a0ac2..2d3909a37f51 100644 --- a/drivers/gpu/drm/omapdrm/omap_debugfs.c +++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob.clark@linaro.org> */ diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h index 60bb3f9297bc..58a8239d3e69 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h +++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> * Andy Gross <andy.gross@ti.com> * diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index 42ec51bb7b1b..ed770caf55c2 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -1,7 +1,7 @@ /* * DMM IOMMU driver support functions for TI OMAP processors. * - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> * Andy Gross <andy.gross@ti.com> * @@ -306,7 +306,7 @@ static irqreturn_t omap_dmm_irq_handler(int irq, void *arg) return IRQ_HANDLED; } -/** +/* * Get a handle for a DMM transaction */ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm) @@ -344,7 +344,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm) return txn; } -/** +/* * Add region to DMM transaction. If pages or pages[i] is NULL, then the * corresponding slot is cleared (ie. dummy_pa is programmed) */ @@ -392,7 +392,7 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area, return; } -/** +/* * Commit the DMM transaction. */ static int dmm_txn_commit(struct dmm_txn *txn, bool wait) @@ -889,6 +889,7 @@ static int omap_dmm_probe(struct platform_device *dev) &omap_dmm->refill_pa, GFP_KERNEL); if (!omap_dmm->refill_va) { dev_err(&dev->dev, "could not allocate refill memory\n"); + ret = -ENOMEM; goto fail; } diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h index 43c1d096b021..2f8918fe06d5 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> * Andy Gross <andy.gross@ti.com> * diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 2e598b8b72af..42c2ed752095 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> */ @@ -533,7 +533,7 @@ static const struct file_operations omapdriver_fops = { .llseek = noop_llseek, }; -static struct drm_driver omap_drm_driver = { +static const struct drm_driver omap_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_RENDER, .open = dev_open, diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 8a1fac680138..ae57e7ada876 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index ae4b867a67a3..57e92a4d5937 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 05f30e2618c9..190afc564914 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 3f6cfc24fb64..42eac6ad12bd 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> */ diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index d8e09792793a..68c271f4250b 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob.clark@linaro.org> */ @@ -580,7 +580,7 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj, /** * omap_gem_dumb_create - create a dumb buffer - * @drm_file: our client file + * @file: our client file * @dev: our device * @args: the requested arguments copied from userspace * @@ -610,6 +610,7 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, * @file: our drm client file * @dev: drm device * @handle: GEM handle to the object (from dumb_create) + * @offset: memory map offset placeholder * * Do the necessary setup to allow the mapping of the frame buffer * into user memory. We don't have to do much here at the moment. diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index b319fe7f2371..f4cde3a169d8 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob.clark@linaro.org> */ diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c index 382bcdc72ac0..97c83b959f7e 100644 --- a/drivers/gpu/drm/omapdrm/omap_irq.c +++ b/drivers/gpu/drm/omapdrm/omap_irq.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob.clark@linaro.org> */ @@ -100,8 +100,7 @@ int omap_irq_enable_framedone(struct drm_crtc *crtc, bool enable) /** * enable_vblank - enable vblank interrupt events - * @dev: DRM device - * @pipe: which irq to enable + * @crtc: DRM CRTC * * Enable vblank interrupts for @crtc. If the device doesn't have * a hardware vblank counter, this routine should be a no-op, since @@ -131,8 +130,7 @@ int omap_irq_enable_vblank(struct drm_crtc *crtc) /** * disable_vblank - disable vblank interrupt events - * @dev: DRM device - * @pipe: which irq to enable + * @crtc: DRM CRTC * * Disable vblank interrupts for @crtc. If the device doesn't have * a hardware vblank counter, this routine should be a no-op, since diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c index 73ec99819a3d..21e0b9785599 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.c +++ b/drivers/gpu/drm/omapdrm/omap_plane.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob.clark@linaro.org> */ diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c index 817be3c41863..9e1acbd2c7aa 100644 --- a/drivers/gpu/drm/omapdrm/tcm-sita.c +++ b/drivers/gpu/drm/omapdrm/tcm-sita.c @@ -5,7 +5,7 @@ * Lajos Molnar <molnar@ti.com> * Andy Gross <andy.gross@ti.com> * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index e386524b2d77..b4e021ea30f9 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -8,6 +8,15 @@ config DRM_PANEL menu "Display Panels" depends on DRM && DRM_PANEL +config DRM_PANEL_ABT_Y030XX067A + tristate "ABT Y030XX067A 320x480 LCD panel" + depends on OF && SPI + select REGMAP_SPI + help + Say Y here to enable support for the Asia Better Technology Ltd. + Y030XX067A 320x480 3.0" panel as found in the YLM RG-280M, RG-300 + and RG-99 handheld gaming consoles. + config DRM_PANEL_ARM_VERSATILE tristate "ARM Versatile panel driver" depends on OF @@ -371,6 +380,18 @@ config DRM_PANEL_SAMSUNG_S6E8AA0 select DRM_MIPI_DSI select VIDEOMODE_HELPERS +config DRM_PANEL_SAMSUNG_SOFEF00 + tristate "Samsung sofef00/s6e3fc2x01 OnePlus 6/6T DSI cmd mode panels" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + select VIDEOMODE_HELPERS + help + Say Y or M here if you want to enable support for the Samsung AMOLED + command mode panels found in the OnePlus 6/6T smartphones. + + The panels are 2280x1080@60Hz and 2340x1080@60Hz respectively + config DRM_PANEL_SEIKO_43WVF1G tristate "Seiko 43WVF1G panel" depends on OF diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index d1f8cc572f37..ebbf488c7eac 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_DRM_PANEL_ABT_Y030XX067A) += panel-abt-y030xx067a.o obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.o obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o @@ -39,6 +40,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_SPI) += panel-samsung-s6e63m0-spi.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI) += panel-samsung-s6e63m0-dsi.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01) += panel-samsung-s6e88a0-ams452ef01.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_SOFEF00) += panel-samsung-sofef00.o obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o obj-$(CONFIG_DRM_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o diff --git a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c new file mode 100644 index 000000000000..2d8794d495d0 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Asia Better Technology Ltd. Y030XX067A IPS LCD panel driver + * + * Copyright (C) 2020, Paul Cercueil <paul@crapouillou.net> + * Copyright (C) 2020, Christophe Branchereau <cbranchereau@gmail.com> + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/gpio/consumer.h> +#include <linux/media-bus-format.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/spi/spi.h> + +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +#define REG00_VBRT_CTRL(val) (val) + +#define REG01_COM_DC(val) (val) + +#define REG02_DA_CONTRAST(val) (val) +#define REG02_VESA_SEL(val) ((val) << 5) +#define REG02_COMDC_SW BIT(7) + +#define REG03_VPOSITION(val) (val) +#define REG03_BSMOUNT BIT(5) +#define REG03_COMTST BIT(6) +#define REG03_HPOSITION1 BIT(7) + +#define REG04_HPOSITION1(val) (val) + +#define REG05_CLIP BIT(0) +#define REG05_NVM_VREFRESH BIT(1) +#define REG05_SLFR BIT(2) +#define REG05_SLBRCHARGE(val) ((val) << 3) +#define REG05_PRECHARGE_LEVEL(val) ((val) << 6) + +#define REG06_TEST5 BIT(0) +#define REG06_SLDWN BIT(1) +#define REG06_SLRGT BIT(2) +#define REG06_TEST2 BIT(3) +#define REG06_XPSAVE BIT(4) +#define REG06_GAMMA_SEL(val) ((val) << 5) +#define REG06_NT BIT(7) + +#define REG07_TEST1 BIT(0) +#define REG07_HDVD_POL BIT(1) +#define REG07_CK_POL BIT(2) +#define REG07_TEST3 BIT(3) +#define REG07_TEST4 BIT(4) +#define REG07_480_LINEMASK BIT(5) +#define REG07_AMPTST(val) ((val) << 6) + +#define REG08_SLHRC(val) (val) +#define REG08_CLOCK_DIV(val) ((val) << 2) +#define REG08_PANEL(val) ((val) << 5) + +#define REG09_SUB_BRIGHT_R(val) (val) +#define REG09_NW_NB BIT(6) +#define REG09_IPCON BIT(7) + +#define REG0A_SUB_BRIGHT_B(val) (val) +#define REG0A_PAIR BIT(6) +#define REG0A_DE_SEL BIT(7) + +#define REG0B_MBK_POSITION(val) (val) +#define REG0B_HD_FREERUN BIT(4) +#define REG0B_VD_FREERUN BIT(5) +#define REG0B_YUV2BIN(val) ((val) << 6) + +#define REG0C_CONTRAST_R(val) (val) +#define REG0C_DOUBLEREAD BIT(7) + +#define REG0D_CONTRAST_G(val) (val) +#define REG0D_RGB_YUV BIT(7) + +#define REG0E_CONTRAST_B(val) (val) +#define REG0E_PIXELCOLORDRIVE BIT(7) + +#define REG0F_ASPECT BIT(0) +#define REG0F_OVERSCAN(val) ((val) << 1) +#define REG0F_FRAMEWIDTH(val) ((val) << 3) + +#define REG10_BRIGHT(val) (val) + +#define REG11_SIG_GAIN(val) (val) +#define REG11_SIGC_CNTL BIT(6) +#define REG11_SIGC_POL BIT(7) + +#define REG12_COLOR(val) (val) +#define REG12_PWCKSEL(val) ((val) << 6) + +#define REG13_4096LEVEL_CNTL(val) (val) +#define REG13_SL4096(val) ((val) << 4) +#define REG13_LIMITER_CONTROL BIT(7) + +#define REG14_PANEL_TEST(val) (val) + +#define REG15_NVM_LINK0 BIT(0) +#define REG15_NVM_LINK1 BIT(1) +#define REG15_NVM_LINK2 BIT(2) +#define REG15_NVM_LINK3 BIT(3) +#define REG15_NVM_LINK4 BIT(4) +#define REG15_NVM_LINK5 BIT(5) +#define REG15_NVM_LINK6 BIT(6) +#define REG15_NVM_LINK7 BIT(7) + +struct y030xx067a_info { + const struct drm_display_mode *display_modes; + unsigned int num_modes; + u16 width_mm, height_mm; + u32 bus_format, bus_flags; +}; + +struct y030xx067a { + struct drm_panel panel; + struct spi_device *spi; + struct regmap *map; + + const struct y030xx067a_info *panel_info; + + struct regulator *supply; + struct gpio_desc *reset_gpio; +}; + +static inline struct y030xx067a *to_y030xx067a(struct drm_panel *panel) +{ + return container_of(panel, struct y030xx067a, panel); +} + +static const struct reg_sequence y030xx067a_init_sequence[] = { + { 0x00, REG00_VBRT_CTRL(0x7f) }, + { 0x01, REG01_COM_DC(0x3c) }, + { 0x02, REG02_VESA_SEL(0x3) | REG02_DA_CONTRAST(0x1f) }, + { 0x03, REG03_VPOSITION(0x0a) }, + { 0x04, REG04_HPOSITION1(0xd2) }, + { 0x05, REG05_CLIP | REG05_NVM_VREFRESH | REG05_SLBRCHARGE(0x2) }, + { 0x06, REG06_XPSAVE | REG06_NT }, + { 0x07, 0 }, + { 0x08, REG08_PANEL(0x1) | REG08_CLOCK_DIV(0x2) }, + { 0x09, REG09_SUB_BRIGHT_R(0x20) }, + { 0x0a, REG0A_SUB_BRIGHT_B(0x20) }, + { 0x0b, REG0B_HD_FREERUN | REG0B_VD_FREERUN }, + { 0x0c, REG0C_CONTRAST_R(0x10) }, + { 0x0d, REG0D_CONTRAST_G(0x10) }, + { 0x0e, REG0E_CONTRAST_B(0x10) }, + { 0x0f, 0 }, + { 0x10, REG10_BRIGHT(0x7f) }, + { 0x11, REG11_SIGC_CNTL | REG11_SIG_GAIN(0x3f) }, + { 0x12, REG12_COLOR(0x20) | REG12_PWCKSEL(0x1) }, + { 0x13, REG13_4096LEVEL_CNTL(0x8) }, + { 0x14, 0 }, + { 0x15, 0 }, +}; + +static int y030xx067a_prepare(struct drm_panel *panel) +{ + struct y030xx067a *priv = to_y030xx067a(panel); + struct device *dev = &priv->spi->dev; + int err; + + err = regulator_enable(priv->supply); + if (err) { + dev_err(dev, "Failed to enable power supply: %d\n", err); + return err; + } + + /* Reset the chip */ + gpiod_set_value_cansleep(priv->reset_gpio, 1); + usleep_range(1000, 20000); + gpiod_set_value_cansleep(priv->reset_gpio, 0); + usleep_range(1000, 20000); + + err = regmap_multi_reg_write(priv->map, y030xx067a_init_sequence, + ARRAY_SIZE(y030xx067a_init_sequence)); + if (err) { + dev_err(dev, "Failed to init registers: %d\n", err); + goto err_disable_regulator; + } + + msleep(120); + + return 0; + +err_disable_regulator: + regulator_disable(priv->supply); + return err; +} + +static int y030xx067a_unprepare(struct drm_panel *panel) +{ + struct y030xx067a *priv = to_y030xx067a(panel); + + gpiod_set_value_cansleep(priv->reset_gpio, 1); + regulator_disable(priv->supply); + + return 0; +} + +static int y030xx067a_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct y030xx067a *priv = to_y030xx067a(panel); + const struct y030xx067a_info *panel_info = priv->panel_info; + struct drm_display_mode *mode; + unsigned int i; + + for (i = 0; i < panel_info->num_modes; i++) { + mode = drm_mode_duplicate(connector->dev, + &panel_info->display_modes[i]); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER; + if (panel_info->num_modes == 1) + mode->type |= DRM_MODE_TYPE_PREFERRED; + + drm_mode_probed_add(connector, mode); + } + + connector->display_info.bpc = 8; + connector->display_info.width_mm = panel_info->width_mm; + connector->display_info.height_mm = panel_info->height_mm; + + drm_display_info_set_bus_formats(&connector->display_info, + &panel_info->bus_format, 1); + connector->display_info.bus_flags = panel_info->bus_flags; + + return panel_info->num_modes; +} + +static const struct drm_panel_funcs y030xx067a_funcs = { + .prepare = y030xx067a_prepare, + .unprepare = y030xx067a_unprepare, + .get_modes = y030xx067a_get_modes, +}; + +static const struct regmap_config y030xx067a_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x15, +}; + +static int y030xx067a_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct y030xx067a *priv; + int err; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->spi = spi; + spi_set_drvdata(spi, priv); + + priv->map = devm_regmap_init_spi(spi, &y030xx067a_regmap_config); + if (IS_ERR(priv->map)) { + dev_err(dev, "Unable to init regmap\n"); + return PTR_ERR(priv->map); + } + + priv->panel_info = of_device_get_match_data(dev); + if (!priv->panel_info) + return -EINVAL; + + priv->supply = devm_regulator_get(dev, "power"); + if (IS_ERR(priv->supply)) { + dev_err(dev, "Failed to get power supply\n"); + return PTR_ERR(priv->supply); + } + + priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(priv->reset_gpio)) { + dev_err(dev, "Failed to get reset GPIO\n"); + return PTR_ERR(priv->reset_gpio); + } + + drm_panel_init(&priv->panel, dev, &y030xx067a_funcs, + DRM_MODE_CONNECTOR_DPI); + + err = drm_panel_of_backlight(&priv->panel); + if (err) + return err; + + drm_panel_add(&priv->panel); + + return 0; +} + +static int y030xx067a_remove(struct spi_device *spi) +{ + struct y030xx067a *priv = spi_get_drvdata(spi); + + drm_panel_remove(&priv->panel); + drm_panel_disable(&priv->panel); + drm_panel_unprepare(&priv->panel); + + return 0; +} + +static const struct drm_display_mode y030xx067a_modes[] = { + { /* 60 Hz */ + .clock = 14400, + .hdisplay = 320, + .hsync_start = 320 + 10, + .hsync_end = 320 + 10 + 37, + .htotal = 320 + 10 + 37 + 33, + .vdisplay = 480, + .vsync_start = 480 + 84, + .vsync_end = 480 + 84 + 20, + .vtotal = 480 + 84 + 20 + 16, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, + }, + { /* 50 Hz */ + .clock = 12000, + .hdisplay = 320, + .hsync_start = 320 + 10, + .hsync_end = 320 + 10 + 37, + .htotal = 320 + 10 + 37 + 33, + .vdisplay = 480, + .vsync_start = 480 + 84, + .vsync_end = 480 + 84 + 20, + .vtotal = 480 + 84 + 20 + 16, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, + }, +}; + +static const struct y030xx067a_info y030xx067a_info = { + .display_modes = y030xx067a_modes, + .num_modes = ARRAY_SIZE(y030xx067a_modes), + .width_mm = 69, + .height_mm = 51, + .bus_format = MEDIA_BUS_FMT_RGB888_3X8_DELTA, + .bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE | DRM_BUS_FLAG_DE_LOW, +}; + +static const struct of_device_id y030xx067a_of_match[] = { + { .compatible = "abt,y030xx067a", .data = &y030xx067a_info }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, y030xx067a_of_match); + +static struct spi_driver y030xx067a_driver = { + .driver = { + .name = "abt-y030xx067a", + .of_match_table = y030xx067a_of_match, + }, + .probe = y030xx067a_probe, + .remove = y030xx067a_remove, +}; +module_spi_driver(y030xx067a_driver); + +MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>"); +MODULE_AUTHOR("Christophe Branchereau <cbranchereau@gmail.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c index 074e18559b9f..8e84df9a0033 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c @@ -152,7 +152,7 @@ #define ILI9322_GAMMA_7 0x16 #define ILI9322_GAMMA_8 0x17 -/** +/* * enum ili9322_input - the format of the incoming signal to the panel * * The panel can be connected to various input streams and four of them can diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c index d298d780220d..326deb3177b6 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c @@ -13,28 +13,28 @@ static int s6e63m0_spi_dcs_read(struct device *dev, const u8 cmd, u8 *data) { - /* - * FIXME: implement reading DCS commands over SPI so we can - * properly identify which physical panel is connected. - */ - *data = 0; + struct spi_device *spi = to_spi_device(dev); + u16 buf[1]; + u16 rbuf[1]; + int ret; + + /* SPI buffers are always in CPU order */ + buf[0] = (u16)cmd; + ret = spi_write_then_read(spi, buf, 2, rbuf, 2); + dev_dbg(dev, "READ CMD: %04x RET: %04x\n", buf[0], rbuf[0]); + if (!ret) + /* These high 8 bits of the 9 contains the readout */ + *data = (rbuf[0] & 0x1ff) >> 1; - return 0; + return ret; } static int s6e63m0_spi_write_word(struct device *dev, u16 data) { struct spi_device *spi = to_spi_device(dev); - struct spi_transfer xfer = { - .len = 2, - .tx_buf = &data, - }; - struct spi_message msg; - - spi_message_init(&msg); - spi_message_add_tail(&xfer, &msg); - return spi_sync(spi, &msg); + /* SPI buffers are always in CPU order */ + return spi_write(spi, &data, 2); } static int s6e63m0_spi_dcs_write(struct device *dev, const u8 *data, size_t len) @@ -42,10 +42,17 @@ static int s6e63m0_spi_dcs_write(struct device *dev, const u8 *data, size_t len) int ret = 0; dev_dbg(dev, "SPI writing dcs seq: %*ph\n", (int)len, data); + + /* + * This sends 9 bits with the first bit (bit 8) set to 0 + * This indicates that this is a command. Anything after the + * command is data. + */ ret = s6e63m0_spi_write_word(dev, *data); while (!ret && --len) { ++data; + /* This sends 9 bits with the first bit (bit 8) set to 1 */ ret = s6e63m0_spi_write_word(dev, *data | DATA_MASK); } @@ -65,7 +72,8 @@ static int s6e63m0_spi_probe(struct spi_device *spi) int ret; spi->bits_per_word = 9; - spi->mode = SPI_MODE_3; + /* Preserve e.g. SPI_3WIRE setting */ + spi->mode |= SPI_MODE_3; ret = spi_setup(spi); if (ret < 0) { dev_err(dev, "spi setup failed.\n"); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c index 3eee67e2d86a..210e70da3a15 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c @@ -16,6 +16,7 @@ #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/regulator/consumer.h> +#include <linux/media-bus-format.h> #include <video/mipi_display.h> @@ -410,6 +411,7 @@ static int s6e63m0_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct drm_display_mode *mode; + static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; mode = drm_mode_duplicate(connector->dev, &default_mode); if (!mode) { @@ -419,6 +421,13 @@ static int s6e63m0_get_modes(struct drm_panel *panel, return -ENOMEM; } + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; + drm_display_info_set_bus_formats(&connector->display_info, + &bus_format, 1); + connector->display_info.bus_flags = DRM_BUS_FLAG_DE_LOW | + DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; + drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c new file mode 100644 index 000000000000..8cb1853574bb --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2020 Caleb Connolly <caleb@connolly.tech> + * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree: + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/regulator/consumer.h> +#include <linux/swab.h> +#include <linux/backlight.h> + +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +struct sofef00_panel { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct regulator *supply; + struct gpio_desc *reset_gpio; + const struct drm_display_mode *mode; + bool prepared; +}; + +static inline +struct sofef00_panel *to_sofef00_panel(struct drm_panel *panel) +{ + return container_of(panel, struct sofef00_panel, panel); +} + +#define dsi_dcs_write_seq(dsi, seq...) do { \ + static const u8 d[] = { seq }; \ + int ret; \ + ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \ + if (ret < 0) \ + return ret; \ + } while (0) + +static void sofef00_panel_reset(struct sofef00_panel *ctx) +{ + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(5000, 6000); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(2000, 3000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(12000, 13000); +} + +static int sofef00_panel_on(struct sofef00_panel *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + ret = mipi_dsi_dcs_exit_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to exit sleep mode: %d\n", ret); + return ret; + } + usleep_range(10000, 11000); + + dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a); + + ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + if (ret < 0) { + dev_err(dev, "Failed to set tear on: %d\n", ret); + return ret; + } + + dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5); + dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a); + dsi_dcs_write_seq(dsi, 0xb0, 0x07); + dsi_dcs_write_seq(dsi, 0xb6, 0x12); + dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5); + dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20); + dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00); + + ret = mipi_dsi_dcs_set_display_on(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display on: %d\n", ret); + return ret; + } + + return 0; +} + +static int sofef00_panel_off(struct sofef00_panel *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + ret = mipi_dsi_dcs_set_display_off(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display off: %d\n", ret); + return ret; + } + msleep(40); + + ret = mipi_dsi_dcs_enter_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to enter sleep mode: %d\n", ret); + return ret; + } + msleep(160); + + return 0; +} + +static int sofef00_panel_prepare(struct drm_panel *panel) +{ + struct sofef00_panel *ctx = to_sofef00_panel(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + if (ctx->prepared) + return 0; + + ret = regulator_enable(ctx->supply); + if (ret < 0) { + dev_err(dev, "Failed to enable regulator: %d\n", ret); + return ret; + } + + sofef00_panel_reset(ctx); + + ret = sofef00_panel_on(ctx); + if (ret < 0) { + dev_err(dev, "Failed to initialize panel: %d\n", ret); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + return ret; + } + + ctx->prepared = true; + return 0; +} + +static int sofef00_panel_unprepare(struct drm_panel *panel) +{ + struct sofef00_panel *ctx = to_sofef00_panel(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + if (!ctx->prepared) + return 0; + + ret = sofef00_panel_off(ctx); + if (ret < 0) + dev_err(dev, "Failed to un-initialize panel: %d\n", ret); + + regulator_disable(ctx->supply); + + ctx->prepared = false; + return 0; +} + +static const struct drm_display_mode enchilada_panel_mode = { + .clock = (1080 + 112 + 16 + 36) * (2280 + 36 + 8 + 12) * 60 / 1000, + .hdisplay = 1080, + .hsync_start = 1080 + 112, + .hsync_end = 1080 + 112 + 16, + .htotal = 1080 + 112 + 16 + 36, + .vdisplay = 2280, + .vsync_start = 2280 + 36, + .vsync_end = 2280 + 36 + 8, + .vtotal = 2280 + 36 + 8 + 12, + .width_mm = 68, + .height_mm = 145, +}; + +static const struct drm_display_mode fajita_panel_mode = { + .clock = (1080 + 72 + 16 + 36) * (2340 + 32 + 4 + 18) * 60 / 1000, + .hdisplay = 1080, + .hsync_start = 1080 + 72, + .hsync_end = 1080 + 72 + 16, + .htotal = 1080 + 72 + 16 + 36, + .vdisplay = 2340, + .vsync_start = 2340 + 32, + .vsync_end = 2340 + 32 + 4, + .vtotal = 2340 + 32 + 4 + 18, + .width_mm = 68, + .height_mm = 145, +}; + +static int sofef00_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector) +{ + struct drm_display_mode *mode; + struct sofef00_panel *ctx = to_sofef00_panel(panel); + + mode = drm_mode_duplicate(connector->dev, ctx->mode); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; + drm_mode_probed_add(connector, mode); + + return 1; +} + +static const struct drm_panel_funcs sofef00_panel_panel_funcs = { + .prepare = sofef00_panel_prepare, + .unprepare = sofef00_panel_unprepare, + .get_modes = sofef00_panel_get_modes, +}; + +static int sofef00_panel_bl_update_status(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + int err; + u16 brightness; + + brightness = (u16)backlight_get_brightness(bl); + // This panel needs the high and low bytes swapped for the brightness value + brightness = __swab16(brightness); + + err = mipi_dsi_dcs_set_display_brightness(dsi, brightness); + if (err < 0) + return err; + + return 0; +} + +static const struct backlight_ops sofef00_panel_bl_ops = { + .update_status = sofef00_panel_bl_update_status, +}; + +static struct backlight_device * +sofef00_create_backlight(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + const struct backlight_properties props = { + .type = BACKLIGHT_PLATFORM, + .brightness = 1023, + .max_brightness = 1023, + }; + + return devm_backlight_device_register(dev, dev_name(dev), dev, dsi, + &sofef00_panel_bl_ops, &props); +} + +static int sofef00_panel_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct sofef00_panel *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->mode = of_device_get_match_data(dev); + + if (!ctx->mode) { + dev_err(dev, "Missing device mode\n"); + return -ENODEV; + } + + ctx->supply = devm_regulator_get(dev, "vddio"); + if (IS_ERR(ctx->supply)) { + ret = PTR_ERR(ctx->supply); + dev_err(dev, "Failed to get vddio regulator: %d\n", ret); + return ret; + } + + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset_gpio)) { + ret = PTR_ERR(ctx->reset_gpio); + dev_warn(dev, "Failed to get reset-gpios: %d\n", ret); + return ret; + } + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + + drm_panel_init(&ctx->panel, dev, &sofef00_panel_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + + ctx->panel.backlight = sofef00_create_backlight(dsi); + if (IS_ERR(ctx->panel.backlight)) + return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight), + "Failed to create backlight\n"); + + drm_panel_add(&ctx->panel); + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err(dev, "Failed to attach to DSI host: %d\n", ret); + return ret; + } + + return 0; +} + +static int sofef00_panel_remove(struct mipi_dsi_device *dsi) +{ + struct sofef00_panel *ctx = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); + + drm_panel_remove(&ctx->panel); + + return 0; +} + +static const struct of_device_id sofef00_panel_of_match[] = { + { // OnePlus 6 / enchilada + .compatible = "samsung,sofef00", + .data = &enchilada_panel_mode, + }, + { // OnePlus 6T / fajita + .compatible = "samsung,s6e3fc2x01", + .data = &fajita_panel_mode, + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sofef00_panel_of_match); + +static struct mipi_dsi_driver sofef00_panel_driver = { + .probe = sofef00_panel_probe, + .remove = sofef00_panel_remove, + .driver = { + .name = "panel-oneplus6", + .of_match_table = sofef00_panel_of_match, + }, +}; + +module_mipi_dsi_driver(sofef00_panel_driver); + +MODULE_AUTHOR("Caleb Connolly <caleb@connolly.tech>"); +MODULE_DESCRIPTION("DRM driver for Samsung AMOLED DSI panels found in OnePlus 6/6T phones"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 8b82ec33f08a..597f676a6591 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -39,6 +39,7 @@ #include <drm/drm_panel.h> /** + * struct panel_desc * @modes: Pointer to array of fixed modes appropriate for this panel. If * only one mode then this can just be the address of this the mode. * NOTE: cannot be used with "timings" and also if this is specified @@ -53,6 +54,7 @@ * @delay: Structure containing various delay values for this panel. * @bus_format: See MEDIA_BUS_FMT_... defines. * @bus_flags: See DRM_BUS_FLAG_... defines. + * @connector_type: LVDS, eDP, DSI, DPI, etc. */ struct panel_desc { const struct drm_display_mode *modes; @@ -1327,6 +1329,7 @@ static const struct drm_display_mode boe_nv133fhm_n61_modes = { .vsync_start = 1080 + 3, .vsync_end = 1080 + 3 + 6, .vtotal = 1080 + 3 + 6 + 31, + .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, }; /* Also used for boe_nv133fhm_n62 */ @@ -1812,6 +1815,7 @@ static const struct panel_desc edt_etm0700g0dh6 = { }, .bus_format = MEDIA_BUS_FMT_RGB666_1X18, .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, + .connector_type = DRM_MODE_CONNECTOR_DPI, }; static const struct panel_desc edt_etm0700g0bdh6 = { @@ -4673,8 +4677,10 @@ static int __init panel_simple_init(void) if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) { err = mipi_dsi_driver_register(&panel_simple_dsi_driver); - if (err < 0) + if (err < 0) { + platform_driver_unregister(&panel_simple_platform_driver); return err; + } } return 0; diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c index d57ed75a977c..e3791dad6830 100644 --- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c +++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c @@ -76,7 +76,7 @@ struct tpg110 { */ struct drm_panel panel; /** - * @panel_type: the panel mode as detected + * @panel_mode: the panel mode as detected */ const struct tpg110_panel_mode *panel_mode; /** @@ -362,6 +362,7 @@ static int tpg110_enable(struct drm_panel *panel) /** * tpg110_get_modes() - return the appropriate mode * @panel: the panel to get the mode for + * @connector: reference to the central DRM connector control structure * * This currently does not present a forest of modes, instead it * presents the mode that is configured for the system under use, diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c index 1daf9322954a..fbcf5edbe367 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.c +++ b/drivers/gpu/drm/panfrost/panfrost_device.c @@ -200,7 +200,6 @@ int panfrost_device_init(struct panfrost_device *pfdev) struct resource *res; mutex_init(&pfdev->sched_lock); - mutex_init(&pfdev->reset_lock); INIT_LIST_HEAD(&pfdev->scheduled_jobs); INIT_LIST_HEAD(&pfdev->as_lru_list); diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h index 140e004a3790..597cf1459b0a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.h +++ b/drivers/gpu/drm/panfrost/panfrost_device.h @@ -106,7 +106,11 @@ struct panfrost_device { struct panfrost_perfcnt *perfcnt; struct mutex sched_lock; - struct mutex reset_lock; + + struct { + struct work_struct work; + atomic_t pending; + } reset; struct mutex shrinker_lock; struct list_head shrinker_list; diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 689be734ed20..83a461bdeea8 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -548,7 +548,7 @@ DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops); * - 1.0 - initial interface * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO */ -static struct drm_driver panfrost_drm_driver = { +static const struct drm_driver panfrost_drm_driver = { .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, .open = panfrost_open, .postclose = panfrost_postclose, diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index e75b7d2192f7..04e6f6f9b742 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -20,12 +20,21 @@ #include "panfrost_gpu.h" #include "panfrost_mmu.h" +#define JOB_TIMEOUT_MS 500 + #define job_write(dev, reg, data) writel(data, dev->iomem + (reg)) #define job_read(dev, reg) readl(dev->iomem + (reg)) +enum panfrost_queue_status { + PANFROST_QUEUE_STATUS_ACTIVE, + PANFROST_QUEUE_STATUS_STOPPED, + PANFROST_QUEUE_STATUS_STARTING, + PANFROST_QUEUE_STATUS_FAULT_PENDING, +}; + struct panfrost_queue_state { struct drm_gpu_scheduler sched; - bool stopped; + atomic_t status; struct mutex lock; u64 fence_context; u64 emit_seqno; @@ -373,28 +382,61 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev) static bool panfrost_scheduler_stop(struct panfrost_queue_state *queue, struct drm_sched_job *bad) { + enum panfrost_queue_status old_status; bool stopped = false; mutex_lock(&queue->lock); - if (!queue->stopped) { - drm_sched_stop(&queue->sched, bad); - if (bad) - drm_sched_increase_karma(bad); - queue->stopped = true; - stopped = true; - } + old_status = atomic_xchg(&queue->status, + PANFROST_QUEUE_STATUS_STOPPED); + if (old_status == PANFROST_QUEUE_STATUS_STOPPED) + goto out; + + WARN_ON(old_status != PANFROST_QUEUE_STATUS_ACTIVE); + drm_sched_stop(&queue->sched, bad); + if (bad) + drm_sched_increase_karma(bad); + + stopped = true; + + /* + * Set the timeout to max so the timer doesn't get started + * when we return from the timeout handler (restored in + * panfrost_scheduler_start()). + */ + queue->sched.timeout = MAX_SCHEDULE_TIMEOUT; + +out: mutex_unlock(&queue->lock); return stopped; } +static void panfrost_scheduler_start(struct panfrost_queue_state *queue) +{ + enum panfrost_queue_status old_status; + + mutex_lock(&queue->lock); + old_status = atomic_xchg(&queue->status, + PANFROST_QUEUE_STATUS_STARTING); + WARN_ON(old_status != PANFROST_QUEUE_STATUS_STOPPED); + + /* Restore the original timeout before starting the scheduler. */ + queue->sched.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS); + drm_sched_resubmit_jobs(&queue->sched); + drm_sched_start(&queue->sched, true); + old_status = atomic_xchg(&queue->status, + PANFROST_QUEUE_STATUS_ACTIVE); + if (old_status == PANFROST_QUEUE_STATUS_FAULT_PENDING) + drm_sched_fault(&queue->sched); + + mutex_unlock(&queue->lock); +} + static void panfrost_job_timedout(struct drm_sched_job *sched_job) { struct panfrost_job *job = to_panfrost_job(sched_job); struct panfrost_device *pfdev = job->pfdev; int js = panfrost_job_get_slot(job); - unsigned long flags; - int i; /* * If the GPU managed to complete this jobs fence, the timeout is @@ -415,56 +457,9 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job) if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job)) return; - if (!mutex_trylock(&pfdev->reset_lock)) - return; - - for (i = 0; i < NUM_JOB_SLOTS; i++) { - struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched; - - /* - * If the queue is still active, make sure we wait for any - * pending timeouts. - */ - if (!pfdev->js->queue[i].stopped) - cancel_delayed_work_sync(&sched->work_tdr); - - /* - * If the scheduler was not already stopped, there's a tiny - * chance a timeout has expired just before we stopped it, and - * drm_sched_stop() does not flush pending works. Let's flush - * them now so the timeout handler doesn't get called in the - * middle of a reset. - */ - if (panfrost_scheduler_stop(&pfdev->js->queue[i], NULL)) - cancel_delayed_work_sync(&sched->work_tdr); - - /* - * Now that we cancelled the pending timeouts, we can safely - * reset the stopped state. - */ - pfdev->js->queue[i].stopped = false; - } - - spin_lock_irqsave(&pfdev->js->job_lock, flags); - for (i = 0; i < NUM_JOB_SLOTS; i++) { - if (pfdev->jobs[i]) { - pm_runtime_put_noidle(pfdev->dev); - panfrost_devfreq_record_idle(&pfdev->pfdevfreq); - pfdev->jobs[i] = NULL; - } - } - spin_unlock_irqrestore(&pfdev->js->job_lock, flags); - - panfrost_device_reset(pfdev); - - for (i = 0; i < NUM_JOB_SLOTS; i++) - drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched); - - mutex_unlock(&pfdev->reset_lock); - - /* restart scheduler after GPU is usable again */ - for (i = 0; i < NUM_JOB_SLOTS; i++) - drm_sched_start(&pfdev->js->queue[i].sched, true); + /* Schedule a reset if there's no reset in progress. */ + if (!atomic_xchg(&pfdev->reset.pending, 1)) + schedule_work(&pfdev->reset.work); } static const struct drm_sched_backend_ops panfrost_sched_ops = { @@ -496,6 +491,8 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data) job_write(pfdev, JOB_INT_CLEAR, mask); if (status & JOB_INT_MASK_ERR(j)) { + enum panfrost_queue_status old_status; + job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP); dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x", @@ -504,7 +501,18 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data) job_read(pfdev, JS_HEAD_LO(j)), job_read(pfdev, JS_TAIL_LO(j))); - drm_sched_fault(&pfdev->js->queue[j].sched); + /* + * When the queue is being restarted we don't report + * faults directly to avoid races between the timeout + * and reset handlers. panfrost_scheduler_start() will + * call drm_sched_fault() after the queue has been + * started if status == FAULT_PENDING. + */ + old_status = atomic_cmpxchg(&pfdev->js->queue[j].status, + PANFROST_QUEUE_STATUS_STARTING, + PANFROST_QUEUE_STATUS_FAULT_PENDING); + if (old_status == PANFROST_QUEUE_STATUS_ACTIVE) + drm_sched_fault(&pfdev->js->queue[j].sched); } if (status & JOB_INT_MASK_DONE(j)) { @@ -531,11 +539,66 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data) return IRQ_HANDLED; } +static void panfrost_reset(struct work_struct *work) +{ + struct panfrost_device *pfdev = container_of(work, + struct panfrost_device, + reset.work); + unsigned long flags; + unsigned int i; + bool cookie; + + cookie = dma_fence_begin_signalling(); + for (i = 0; i < NUM_JOB_SLOTS; i++) { + /* + * We want pending timeouts to be handled before we attempt + * to stop the scheduler. If we don't do that and the timeout + * handler is in flight, it might have removed the bad job + * from the list, and we'll lose this job if the reset handler + * enters the critical section in panfrost_scheduler_stop() + * before the timeout handler. + * + * Timeout is set to MAX_SCHEDULE_TIMEOUT - 1 because we need + * something big enough to make sure the timer will not expire + * before we manage to stop the scheduler, but we can't use + * MAX_SCHEDULE_TIMEOUT because drm_sched_get_cleanup_job() + * considers that as 'timer is not running' and will dequeue + * the job without making sure the timeout handler is not + * running. + */ + pfdev->js->queue[i].sched.timeout = MAX_SCHEDULE_TIMEOUT - 1; + cancel_delayed_work_sync(&pfdev->js->queue[i].sched.work_tdr); + panfrost_scheduler_stop(&pfdev->js->queue[i], NULL); + } + + /* All timers have been stopped, we can safely reset the pending state. */ + atomic_set(&pfdev->reset.pending, 0); + + spin_lock_irqsave(&pfdev->js->job_lock, flags); + for (i = 0; i < NUM_JOB_SLOTS; i++) { + if (pfdev->jobs[i]) { + pm_runtime_put_noidle(pfdev->dev); + panfrost_devfreq_record_idle(&pfdev->pfdevfreq); + pfdev->jobs[i] = NULL; + } + } + spin_unlock_irqrestore(&pfdev->js->job_lock, flags); + + panfrost_device_reset(pfdev); + + for (i = 0; i < NUM_JOB_SLOTS; i++) + panfrost_scheduler_start(&pfdev->js->queue[i]); + + dma_fence_end_signalling(cookie); +} + int panfrost_job_init(struct panfrost_device *pfdev) { struct panfrost_job_slot *js; int ret, j, irq; + INIT_WORK(&pfdev->reset.work, panfrost_reset); + pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL); if (!js) return -ENOMEM; @@ -560,7 +623,7 @@ int panfrost_job_init(struct panfrost_device *pfdev) ret = drm_sched_init(&js->queue[j].sched, &panfrost_sched_ops, - 1, 0, msecs_to_jiffies(500), + 1, 0, msecs_to_jiffies(JOB_TIMEOUT_MS), "pan_js"); if (ret) { dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret); diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index fdbc8d949135..5ab03d605f57 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -5,6 +5,7 @@ #include <drm/drm_gem_shmem_helper.h> #include <drm/panfrost_drm.h> #include <linux/completion.h> +#include <linux/dma-buf-map.h> #include <linux/iopoll.h> #include <linux/pm_runtime.h> #include <linux/slab.h> @@ -72,6 +73,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, { struct panfrost_file_priv *user = file_priv->driver_priv; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; + struct dma_buf_map map; struct drm_gem_shmem_object *bo; u32 cfg, as; int ret; @@ -103,11 +105,10 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, goto err_close_bo; } - perfcnt->buf = drm_gem_shmem_vmap(&bo->base); - if (IS_ERR(perfcnt->buf)) { - ret = PTR_ERR(perfcnt->buf); + ret = drm_gem_shmem_vmap(&bo->base, &map); + if (ret) goto err_put_mapping; - } + perfcnt->buf = map.vaddr; /* * Invalidate the cache and clear the counters to start from a fresh @@ -163,7 +164,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, return 0; err_vunmap: - drm_gem_shmem_vunmap(&bo->base, perfcnt->buf); + drm_gem_shmem_vunmap(&bo->base, &map); err_put_mapping: panfrost_gem_mapping_put(perfcnt->mapping); err_close_bo: @@ -180,6 +181,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, { struct panfrost_file_priv *user = file_priv->driver_priv; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; + struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(perfcnt->buf); if (user != perfcnt->user) return -EINVAL; @@ -192,7 +194,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF)); perfcnt->user = NULL; - drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf); + drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, &map); perfcnt->buf = NULL; panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv); panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu); diff --git a/drivers/gpu/drm/pl111/pl111_debugfs.c b/drivers/gpu/drm/pl111/pl111_debugfs.c index 317f68abf18b..6744fa16f464 100644 --- a/drivers/gpu/drm/pl111/pl111_debugfs.c +++ b/drivers/gpu/drm/pl111/pl111_debugfs.c @@ -30,7 +30,7 @@ static const struct { REGDEF(CLCD_PL111_LCUR), }; -int pl111_debugfs_regs(struct seq_file *m, void *unused) +static int pl111_debugfs_regs(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c index b3e8697cafcf..69c02e7c82b7 100644 --- a/drivers/gpu/drm/pl111/pl111_display.c +++ b/drivers/gpu/drm/pl111/pl111_display.c @@ -353,7 +353,7 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe, drm_crtc_vblank_on(crtc); } -void pl111_display_disable(struct drm_simple_display_pipe *pipe) +static void pl111_display_disable(struct drm_simple_display_pipe *pipe) { struct drm_crtc *crtc = &pipe->crtc; struct drm_device *drm = crtc->dev; diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index ecef8a2383d2..40e6708fbbe2 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -213,7 +213,7 @@ pl111_gem_import_sg_table(struct drm_device *dev, DEFINE_DRM_GEM_CMA_FOPS(drm_fops); -static struct drm_driver pl111_drm_driver = { +static const struct drm_driver pl111_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .ioctls = NULL, @@ -224,7 +224,6 @@ static struct drm_driver pl111_drm_driver = { .major = 1, .minor = 0, .patchlevel = 0, - .gem_create_object = drm_gem_cma_create_object_default_funcs, .dumb_create = drm_gem_cma_dumb_create, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 07a3e3c23f09..012bce0cdb65 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -25,6 +25,7 @@ #include <linux/crc32.h> #include <linux/delay.h> +#include <linux/dma-buf-map.h> #include <drm/drm_drv.h> #include <drm/drm_atomic.h> @@ -581,6 +582,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, struct drm_gem_object *obj; struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL; int ret; + struct dma_buf_map user_map; + struct dma_buf_map cursor_map; void *user_ptr; int size = 64*64*4; @@ -595,9 +598,10 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, user_bo = gem_to_qxl_bo(obj); /* pinning is done in the prepare/cleanup framevbuffer */ - ret = qxl_bo_kmap(user_bo, &user_ptr); + ret = qxl_bo_kmap(user_bo, &user_map); if (ret) goto out_free_release; + user_ptr = user_map.vaddr; /* TODO: Use mapping abstraction properly */ ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_cursor) + size, @@ -613,9 +617,13 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, if (ret) goto out_unpin; - ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); + ret = qxl_bo_kmap(cursor_bo, &cursor_map); if (ret) goto out_backoff; + if (cursor_map.is_iomem) /* TODO: Use mapping abstraction properly */ + cursor = (struct qxl_cursor __force *)cursor_map.vaddr_iomem; + else + cursor = (struct qxl_cursor *)cursor_map.vaddr; cursor->header.unique = 0; cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; @@ -1133,6 +1141,7 @@ int qxl_create_monitors_object(struct qxl_device *qdev) { int ret; struct drm_gem_object *gobj; + struct dma_buf_map map; int monitors_config_size = sizeof(struct qxl_monitors_config) + qxl_num_crtc * sizeof(struct qxl_head); @@ -1149,7 +1158,7 @@ int qxl_create_monitors_object(struct qxl_device *qdev) if (ret) return ret; - qxl_bo_kmap(qdev->monitors_config_bo, NULL); + qxl_bo_kmap(qdev->monitors_config_bo, &map); qdev->monitors_config = qdev->monitors_config_bo->kptr; qdev->ram_header->monitors_config = diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 3599db096973..7b7acb910780 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c @@ -20,6 +20,8 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +#include <linux/dma-buf-map.h> + #include <drm/drm_fourcc.h> #include "qxl_drv.h" @@ -42,13 +44,15 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, unsigned int num_clips, struct qxl_bo *clips_bo) { + struct dma_buf_map map; struct qxl_clip_rects *dev_clips; int ret; - ret = qxl_bo_kmap(clips_bo, (void **)&dev_clips); - if (ret) { + ret = qxl_bo_kmap(clips_bo, &map); + if (ret) return NULL; - } + dev_clips = map.vaddr; /* TODO: Use mapping abstraction properly */ + dev_clips->num_rects = num_clips; dev_clips->chunk.next_chunk = 0; dev_clips->chunk.prev_chunk = 0; @@ -142,6 +146,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, int stride = fb->pitches[0]; /* depth is not actually interesting, we don't mask with it */ int depth = fb->format->cpp[0] * 8; + struct dma_buf_map surface_map; uint8_t *surface_base; struct qxl_release *release; struct qxl_bo *clips_bo; @@ -197,9 +202,10 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, if (ret) goto out_release_backoff; - ret = qxl_bo_kmap(bo, (void **)&surface_base); + ret = qxl_bo_kmap(bo, &surface_map); if (ret) goto out_release_backoff; + surface_base = surface_map.vaddr; /* TODO: Use mapping abstraction properly */ ret = qxl_image_init(qdev, release, dimage, surface_base, left - dumb_shadow_offset, diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 3602e8b34189..8bd0f916dfbc 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -30,6 +30,7 @@ * Definitions taken from spice-protocol, plus kernel driver specific bits. */ +#include <linux/dma-buf-map.h> #include <linux/dma-fence.h> #include <linux/firmware.h> #include <linux/platform_device.h> @@ -50,6 +51,8 @@ #include "qxl_dev.h" +struct dma_buf_map; + #define DRIVER_AUTHOR "Dave Airlie" #define DRIVER_NAME "qxl" @@ -79,7 +82,7 @@ struct qxl_bo { /* Protected by tbo.reserved */ struct ttm_place placements[3]; struct ttm_placement placement; - struct ttm_bo_kmap_obj kmap; + struct dma_buf_map map; void *kptr; unsigned int map_count; int type; @@ -166,20 +169,6 @@ struct qxl_drm_image { struct list_head chunk_list; }; -struct qxl_fb_image { - struct qxl_device *qdev; - uint32_t pseudo_palette[16]; - struct fb_image fb_image; - uint32_t visual; -}; - -struct qxl_draw_fill { - struct qxl_device *qdev; - struct qxl_rect rect; - uint32_t color; - uint16_t rop; -}; - /* * Debugfs */ @@ -188,8 +177,6 @@ struct qxl_debugfs { unsigned int num_files; }; -int qxl_debugfs_fence_init(struct qxl_device *rdev); - struct qxl_device { struct drm_device ddev; @@ -271,6 +258,8 @@ struct qxl_device { #define to_qxl(dev) container_of(dev, struct qxl_device, ddev) +int qxl_debugfs_fence_init(struct qxl_device *rdev); + extern const struct drm_ioctl_desc qxl_ioctls[]; extern int qxl_max_ioctl; @@ -335,7 +324,6 @@ int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); void qxl_gem_object_close(struct drm_gem_object *obj, struct drm_file *file_priv); void qxl_bo_force_delete(struct qxl_device *qdev); -int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); /* qxl_dumb.c */ int qxl_mode_dumb_create(struct drm_file *file_priv, @@ -445,8 +433,9 @@ struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *qxl_gem_prime_import_sg_table( struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); -void *qxl_gem_prime_vmap(struct drm_gem_object *obj); -void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); +void qxl_gem_prime_vunmap(struct drm_gem_object *obj, + struct dma_buf_map *map); int qxl_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 0bab9ec6adc1..16e1e589508e 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -160,7 +160,6 @@ static int qxl_process_single_command(struct qxl_device *qdev, default: DRM_DEBUG("Only draw commands in execbuffers\n"); return -EINVAL; - break; } if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info)) diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index dc5b3850a4d4..228e2b9198f1 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -231,11 +231,11 @@ int qxl_device_init(struct qxl_device *qdev, goto cursor_ring_free; } - idr_init(&qdev->release_idr); + idr_init_base(&qdev->release_idr, 1); spin_lock_init(&qdev->release_idr_lock); spin_lock_init(&qdev->release_lock); - idr_init(&qdev->surf_id_idr); + idr_init_base(&qdev->surf_id_idr, 1); spin_lock_init(&qdev->surf_id_idr_lock); mutex_init(&qdev->async_io_mutex); diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index 547d46c14d56..ceebc5881f68 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -23,10 +23,12 @@ * Alon Levy */ +#include <linux/dma-buf-map.h> +#include <linux/io-mapping.h> + #include "qxl_drv.h" #include "qxl_object.h" -#include <linux/io-mapping.h> static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) { struct qxl_bo *bo; @@ -152,24 +154,27 @@ int qxl_bo_create(struct qxl_device *qdev, return 0; } -int qxl_bo_kmap(struct qxl_bo *bo, void **ptr) +int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map) { - bool is_iomem; int r; if (bo->kptr) { - if (ptr) - *ptr = bo->kptr; bo->map_count++; - return 0; + goto out; } - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); + r = ttm_bo_vmap(&bo->tbo, &bo->map); if (r) return r; - bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); - if (ptr) - *ptr = bo->kptr; bo->map_count = 1; + + /* TODO: Remove kptr in favor of map everywhere. */ + if (bo->map.is_iomem) + bo->kptr = (void *)bo->map.vaddr_iomem; + else + bo->kptr = bo->map.vaddr; + +out: + *map = bo->map; return 0; } @@ -180,6 +185,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, void *rptr; int ret; struct io_mapping *map; + struct dma_buf_map bo_map; if (bo->tbo.mem.mem_type == TTM_PL_VRAM) map = qdev->vram_mapping; @@ -196,9 +202,10 @@ fallback: return rptr; } - ret = qxl_bo_kmap(bo, &rptr); + ret = qxl_bo_kmap(bo, &bo_map); if (ret) return NULL; + rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */ rptr += page_offset * PAGE_SIZE; return rptr; @@ -212,7 +219,7 @@ void qxl_bo_kunmap(struct qxl_bo *bo) if (bo->map_count > 0) return; bo->kptr = NULL; - ttm_bo_kunmap(&bo->kmap); + ttm_bo_vunmap(&bo->tbo, &bo->map); } void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index 09a5c818324d..ebf24c9d2bf2 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h @@ -63,7 +63,7 @@ extern int qxl_bo_create(struct qxl_device *qdev, bool kernel, bool pinned, u32 domain, struct qxl_surface *surf, struct qxl_bo **bo_ptr); -extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); +extern int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map); extern void qxl_bo_kunmap(struct qxl_bo *bo); void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset); void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map); diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c index 7d3816fca5a8..4aa949799446 100644 --- a/drivers/gpu/drm/qxl/qxl_prime.c +++ b/drivers/gpu/drm/qxl/qxl_prime.c @@ -54,20 +54,20 @@ struct drm_gem_object *qxl_gem_prime_import_sg_table( return ERR_PTR(-ENOSYS); } -void *qxl_gem_prime_vmap(struct drm_gem_object *obj) +int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct qxl_bo *bo = gem_to_qxl_bo(obj); - void *ptr; int ret; - ret = qxl_bo_kmap(bo, &ptr); + ret = qxl_bo_kmap(bo, map); if (ret < 0) - return ERR_PTR(ret); + return ret; - return ptr; + return 0; } -void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +void qxl_gem_prime_vunmap(struct drm_gem_object *obj, + struct dma_buf_map *map) { struct qxl_bo *bo = gem_to_qxl_bo(obj); diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index a80d59634143..128c38c8a837 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -140,7 +140,8 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo, static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) + struct ttm_resource *new_mem, + struct ttm_place *hop) { struct ttm_resource *old_mem = &bo->mem; int ret; diff --git a/drivers/gpu/drm/r128/ati_pcigart.c b/drivers/gpu/drm/r128/ati_pcigart.c index 3e76ae5a17ee..1234ec60c0af 100644 --- a/drivers/gpu/drm/r128/ati_pcigart.c +++ b/drivers/gpu/drm/r128/ati_pcigart.c @@ -1,4 +1,4 @@ -/** +/* * \file ati_pcigart.c * ATI PCI GART support * diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 961a31b8805c..5f3adba43e47 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -509,7 +509,6 @@ struct radeon_bo { /* Constant after initialization */ struct radeon_device *rdev; - struct ttm_bo_kmap_obj dma_buf_vmap; pid_t pid; #ifdef CONFIG_MMU_NOTIFIER @@ -2245,6 +2244,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); /* VRAM scratch page for HDP bug, default vram page */ struct r600_vram_scratch { diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 84f5d56528ee..e45d7344ac2b 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -51,6 +51,7 @@ #include <drm/radeon_drm.h> #include "radeon_drv.h" +#include "radeon.h" #include "radeon_kms.h" #include "radeon_ttm.h" #include "radeon_device.h" @@ -123,8 +124,6 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc, ktime_t *stime, ktime_t *etime, const struct drm_display_mode *mode); extern bool radeon_is_px(struct drm_device *dev); -extern const struct drm_ioctl_desc radeon_ioctls_kms[]; -extern int radeon_max_kms_ioctl; int radeon_mode_dumb_mmap(struct drm_file *filp, struct drm_device *dev, uint32_t handle, uint64_t *offset_p); @@ -288,7 +287,7 @@ static struct pci_device_id pciidlist[] = { MODULE_DEVICE_TABLE(pci, pciidlist); -static struct drm_driver kms_driver; +static const struct drm_driver kms_driver; static int radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -571,9 +570,55 @@ static const struct file_operations radeon_driver_kms_fops = { #endif }; -static struct drm_driver kms_driver = { +static const struct drm_ioctl_desc radeon_ioctls_kms[] = { + DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_CP_START, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_RESET, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_SWAP, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_CLEAR, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_VERTEX, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_INDICES, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_FLIP, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_ALLOC, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_FREE, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH), + /* KMS */ + DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), +}; + +static const struct drm_driver kms_driver = { .driver_features = - DRIVER_GEM | DRIVER_RENDER, + DRIVER_GEM | DRIVER_RENDER | DRIVER_MODESET, .load = radeon_driver_load_kms, .open = radeon_driver_open_kms, .postclose = radeon_driver_postclose_kms, @@ -584,6 +629,7 @@ static struct drm_driver kms_driver = { .irq_uninstall = radeon_driver_irq_uninstall_kms, .irq_handler = radeon_driver_irq_handler_kms, .ioctls = radeon_ioctls_kms, + .num_ioctls = ARRAY_SIZE(radeon_ioctls_kms), .dumb_create = radeon_mode_dumb_create, .dumb_map_offset = radeon_mode_dumb_mmap, .fops = &radeon_driver_kms_fops, @@ -600,9 +646,6 @@ static struct drm_driver kms_driver = { .patchlevel = KMS_DRIVER_PATCHLEVEL, }; -static struct drm_driver *driver; -static struct pci_driver *pdriver; - static struct pci_driver radeon_kms_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, @@ -612,41 +655,33 @@ static struct pci_driver radeon_kms_pci_driver = { .driver.pm = &radeon_pm_ops, }; -static int __init radeon_init(void) +static int __init radeon_module_init(void) { if (vgacon_text_force() && radeon_modeset == -1) { DRM_INFO("VGACON disable radeon kernel modesetting.\n"); radeon_modeset = 0; } - /* set to modesetting by default if not nomodeset */ - if (radeon_modeset == -1) - radeon_modeset = 1; - - if (radeon_modeset == 1) { - DRM_INFO("radeon kernel modesetting enabled.\n"); - driver = &kms_driver; - pdriver = &radeon_kms_pci_driver; - driver->driver_features |= DRIVER_MODESET; - driver->num_ioctls = radeon_max_kms_ioctl; - radeon_register_atpx_handler(); - - } else { + + if (radeon_modeset == 0) { DRM_ERROR("No UMS support in radeon module!\n"); return -EINVAL; } - return pci_register_driver(pdriver); + DRM_INFO("radeon kernel modesetting enabled.\n"); + radeon_register_atpx_handler(); + + return pci_register_driver(&radeon_kms_pci_driver); } -static void __exit radeon_exit(void) +static void __exit radeon_module_exit(void) { - pci_unregister_driver(pdriver); + pci_unregister_driver(&radeon_kms_pci_driver); radeon_unregister_atpx_handler(); mmu_notifier_synchronize(); } -module_init(radeon_init); -module_exit(radeon_exit); +module_init(radeon_module_init); +module_exit(radeon_module_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 4d0921072df0..b6b21d2e7262 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -31,6 +31,7 @@ #include <drm/drm_debugfs.h> #include <drm/drm_device.h> #include <drm/drm_file.h> +#include <drm/drm_gem_ttm_helper.h> #include <drm/radeon_drm.h> #include "radeon.h" @@ -41,8 +42,6 @@ struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj, struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); int radeon_gem_prime_pin(struct drm_gem_object *obj); void radeon_gem_prime_unpin(struct drm_gem_object *obj); -void *radeon_gem_prime_vmap(struct drm_gem_object *obj); -void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); static const struct drm_gem_object_funcs radeon_gem_object_funcs; @@ -236,8 +235,8 @@ static const struct drm_gem_object_funcs radeon_gem_object_funcs = { .pin = radeon_gem_prime_pin, .unpin = radeon_gem_prime_unpin, .get_sg_table = radeon_gem_prime_get_sg_table, - .vmap = radeon_gem_prime_vmap, - .vunmap = radeon_gem_prime_vunmap, + .vmap = drm_gem_ttm_vmap, + .vunmap = drm_gem_ttm_vunmap, }; /* diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 95fa3df36da8..50cee4880bb4 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -223,7 +223,7 @@ static void radeon_set_filp_rights(struct drm_device *dev, * etc. (all asics). * Returns 0 on success, -EINVAL on failure. */ -static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) +int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct radeon_device *rdev = dev->dev_private; struct drm_radeon_info *info = data; @@ -868,50 +868,3 @@ void radeon_disable_vblank_kms(struct drm_crtc *crtc) radeon_irq_set(rdev); spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } - -const struct drm_ioctl_desc radeon_ioctls_kms[] = { - DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(RADEON_CP_START, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_RESET, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_SWAP, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_CLEAR, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_VERTEX, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_INDICES, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_FLIP, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_ALLOC, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_FREE, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH), - /* KMS */ - DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH), - DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), -}; -int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms); diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index 789e076efbdc..dd482edc819c 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c @@ -40,26 +40,6 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj) return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages); } -void *radeon_gem_prime_vmap(struct drm_gem_object *obj) -{ - struct radeon_bo *bo = gem_to_radeon_bo(obj); - int ret; - - ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, - &bo->dma_buf_vmap); - if (ret) - return ERR_PTR(ret); - - return bo->dma_buf_vmap.virtual; -} - -void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) -{ - struct radeon_bo *bo = gem_to_radeon_bo(obj); - - ttm_bo_kunmap(&bo->dma_buf_vmap); -} - struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg) diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index c040f32a5485..28b300ed200e 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -207,110 +207,27 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, return r; } -static int radeon_move_vram_ram(struct ttm_buffer_object *bo, - bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) -{ - struct ttm_resource *old_mem = &bo->mem; - struct ttm_resource tmp_mem; - struct ttm_place placements; - struct ttm_placement placement; - int r; - - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; - placement.num_placement = 1; - placement.placement = &placements; - placement.num_busy_placement = 1; - placement.busy_placement = &placements; - placements.fpfn = 0; - placements.lpfn = 0; - placements.mem_type = TTM_PL_TT; - placements.flags = 0; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); - if (unlikely(r)) { - return r; - } - - r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); - if (unlikely(r)) { - goto out_cleanup; - } - - r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_mem); - if (unlikely(r)) { - goto out_cleanup; - } - r = radeon_move_blit(bo, true, &tmp_mem, old_mem); - if (unlikely(r)) { - goto out_cleanup; - } - r = ttm_bo_wait_ctx(bo, ctx); - if (unlikely(r)) - goto out_cleanup; - - radeon_ttm_tt_unbind(bo->bdev, bo->ttm); - ttm_resource_free(bo, &bo->mem); - ttm_bo_assign_mem(bo, new_mem); -out_cleanup: - ttm_resource_free(bo, &tmp_mem); - return r; -} - -static int radeon_move_ram_vram(struct ttm_buffer_object *bo, - bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) -{ - struct ttm_resource *old_mem = &bo->mem; - struct ttm_resource tmp_mem; - struct ttm_placement placement; - struct ttm_place placements; - int r; - - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; - placement.num_placement = 1; - placement.placement = &placements; - placement.num_busy_placement = 1; - placement.busy_placement = &placements; - placements.fpfn = 0; - placements.lpfn = 0; - placements.mem_type = TTM_PL_TT; - placements.flags = 0; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); - if (unlikely(r)) { - return r; - } - - r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); - if (unlikely(r)) - goto out_cleanup; - - r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_mem); - if (unlikely(r)) - goto out_cleanup; - - ttm_bo_assign_mem(bo, &tmp_mem); - r = radeon_move_blit(bo, true, new_mem, old_mem); - if (unlikely(r)) { - goto out_cleanup; - } -out_cleanup: - ttm_resource_free(bo, &tmp_mem); - return r; -} - static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) + struct ttm_resource *new_mem, + struct ttm_place *hop) { struct radeon_device *rdev; struct radeon_bo *rbo; struct ttm_resource *old_mem = &bo->mem; int r; + if ((old_mem->mem_type == TTM_PL_SYSTEM && + new_mem->mem_type == TTM_PL_VRAM) || + (old_mem->mem_type == TTM_PL_VRAM && + new_mem->mem_type == TTM_PL_SYSTEM)) { + hop->fpfn = 0; + hop->lpfn = 0; + hop->mem_type = TTM_PL_TT; + hop->flags = 0; + return -EMULTIHOP; + } + if (new_mem->mem_type == TTM_PL_TT) { r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem); if (r) @@ -351,17 +268,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, goto memcpy; } - if (old_mem->mem_type == TTM_PL_VRAM && - new_mem->mem_type == TTM_PL_SYSTEM) { - r = radeon_move_vram_ram(bo, evict, ctx, new_mem); - } else if (old_mem->mem_type == TTM_PL_SYSTEM && - new_mem->mem_type == TTM_PL_VRAM) { - r = radeon_move_ram_vram(bo, evict, ctx, new_mem); - } else { - r = radeon_move_blit(bo, evict, - new_mem, old_mem); - } - + r = radeon_move_blit(bo, evict, new_mem, old_mem); if (r) { memcpy: r = ttm_bo_move_memcpy(bo, ctx, new_mem); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 447be991fa25..600056dff374 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -507,7 +507,7 @@ MODULE_DEVICE_TABLE(of, rcar_du_of_table); DEFINE_DRM_GEM_CMA_FOPS(rcar_du_fops); -static struct drm_driver rcar_du_driver = { +static const struct drm_driver rcar_du_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(rcar_du_dumb_create), .fops = &rcar_du_fops, diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c index 542dcf7eddd6..e84325e56d98 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c @@ -350,7 +350,7 @@ static void dw_mipi_dsi_phy_write(struct dw_mipi_dsi_rockchip *dsi, dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR); } -/** +/* * ns2bc - Nanoseconds to byte clock cycles */ static inline unsigned int ns2bc(struct dw_mipi_dsi_rockchip *dsi, int ns) @@ -358,7 +358,7 @@ static inline unsigned int ns2bc(struct dw_mipi_dsi_rockchip *dsi, int ns) return DIV_ROUND_UP(ns * dsi->lane_mbps / 8, 1000); } -/** +/* * ns2ui - Nanoseconds to UI time periods */ static inline unsigned int ns2ui(struct dw_mipi_dsi_rockchip *dsi, int ns) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index b7654f5e4225..212bd87c0c4a 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -35,7 +35,7 @@ #define DRIVER_MINOR 0 static bool is_support_iommu = true; -static struct drm_driver rockchip_drm_driver; +static const struct drm_driver rockchip_drm_driver; /* * Attach a (component) device to the shared drm dma mapping from master drm @@ -209,7 +209,7 @@ static const struct file_operations rockchip_drm_driver_fops = { .release = drm_release, }; -static struct drm_driver rockchip_drm_driver = { +static const struct drm_driver rockchip_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .lastclose = drm_fb_helper_lastclose, .dumb_create = rockchip_gem_dumb_create, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 7d5ebb10323b..7971f57436dd 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -532,26 +532,32 @@ err_free_rk_obj: return ERR_PTR(ret); } -void *rockchip_gem_prime_vmap(struct drm_gem_object *obj) +int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); - if (rk_obj->pages) - return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, - pgprot_writecombine(PAGE_KERNEL)); + if (rk_obj->pages) { + void *vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); + if (!vaddr) + return -ENOMEM; + dma_buf_map_set_vaddr(map, vaddr); + return 0; + } if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) - return NULL; + return -ENOMEM; + dma_buf_map_set_vaddr(map, rk_obj->kvaddr); - return rk_obj->kvaddr; + return 0; } -void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); if (rk_obj->pages) { - vunmap(vaddr); + vunmap(map->vaddr); return; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h index 7ffc541bea07..5a70a56cd406 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h @@ -31,8 +31,8 @@ struct drm_gem_object * rockchip_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); -void *rockchip_gem_prime_vmap(struct drm_gem_object *obj); -void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); +void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); /* drm driver mmap file operations */ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 8cd39fca81a3..d1e05482641b 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -1248,6 +1248,8 @@ static void vop_crtc_gamma_set(struct vop *vop, struct drm_crtc *crtc, static void vop_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); struct vop *vop = to_vop(crtc); @@ -1256,8 +1258,8 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc, * Only update GAMMA if the 'active' flag is not changed, * otherwise it's updated by .atomic_enable. */ - if (crtc->state->color_mgmt_changed && - !crtc->state->active_changed) + if (crtc_state->color_mgmt_changed && + !crtc_state->active_changed) vop_crtc_gamma_set(vop, crtc, old_crtc_state); } diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index f292c6a6e20f..8658ef82d937 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -42,7 +42,7 @@ struct rockchip_lvds; container_of(c, struct rockchip_lvds, encoder) /** - * rockchip_lvds_soc_data - rockchip lvds Soc private data + * struct rockchip_lvds_soc_data - rockchip lvds Soc private data * @probe: LVDS platform probe function * @helper_funcs: LVDS connector helper functions */ diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c index 9a771af5d0c9..c079714477d8 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.c +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c @@ -18,6 +18,7 @@ #include "rockchip_drm_drv.h" #include "rockchip_drm_vop.h" +#include "rockchip_rgb.h" #define encoder_to_rgb(c) container_of(c, struct rockchip_rgb, encoder) diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c index 6889d6534eba..606e5b807a6e 100644 --- a/drivers/gpu/drm/savage/savage_bci.c +++ b/drivers/gpu/drm/savage/savage_bci.c @@ -573,19 +573,12 @@ int savage_driver_firstopen(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; unsigned long mmio_base, fb_base, fb_size, aperture_base; - /* fb_rsrc and aper_rsrc aren't really used currently, but still exist - * in case we decide we need information on the BAR for BSD in the - * future. - */ - unsigned int fb_rsrc, aper_rsrc; int ret = 0; if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { - fb_rsrc = 0; fb_base = pci_resource_start(dev->pdev, 0); fb_size = SAVAGE_FB_SIZE_S3; mmio_base = fb_base + SAVAGE_FB_SIZE_S3; - aper_rsrc = 0; aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; /* this should always be true */ if (pci_resource_len(dev->pdev, 0) == 0x08000000) { @@ -607,10 +600,8 @@ int savage_driver_firstopen(struct drm_device *dev) } else if (dev_priv->chipset != S3_SUPERSAVAGE && dev_priv->chipset != S3_SAVAGE2000) { mmio_base = pci_resource_start(dev->pdev, 0); - fb_rsrc = 1; fb_base = pci_resource_start(dev->pdev, 1); fb_size = SAVAGE_FB_SIZE_S4; - aper_rsrc = 1; aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; /* this should always be true */ if (pci_resource_len(dev->pdev, 1) == 0x08000000) { @@ -626,10 +617,8 @@ int savage_driver_firstopen(struct drm_device *dev) } } else { mmio_base = pci_resource_start(dev->pdev, 0); - fb_rsrc = 1; fb_base = pci_resource_start(dev->pdev, 1); fb_size = pci_resource_len(dev->pdev, 1); - aper_rsrc = 2; aperture_base = pci_resource_start(dev->pdev, 2); /* Automatic MTRR setup will do the right thing. */ } diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index da24c4e8b9fb..b498d474ef9e 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -447,7 +447,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) EXPORT_SYMBOL(drm_sched_stop); /** - * drm_sched_job_recovery - recover jobs after a reset + * drm_sched_start - recover jobs after a reset * * @sched: scheduler instance * @full_recovery: proceed with complete sched restart diff --git a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c index 1d696ec001cf..6b4759ed6bfd 100644 --- a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c +++ b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c @@ -120,44 +120,58 @@ sideband_msg_req_equal(const struct drm_dp_sideband_msg_req_body *in, static bool sideband_msg_req_encode_decode(struct drm_dp_sideband_msg_req_body *in) { - struct drm_dp_sideband_msg_req_body out = {0}; + struct drm_dp_sideband_msg_req_body *out; struct drm_printer p = drm_err_printer(PREFIX_STR); - struct drm_dp_sideband_msg_tx txmsg; + struct drm_dp_sideband_msg_tx *txmsg; int i, ret; + bool result = true; - drm_dp_encode_sideband_req(in, &txmsg); - ret = drm_dp_decode_sideband_req(&txmsg, &out); + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return false; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) + return false; + + drm_dp_encode_sideband_req(in, txmsg); + ret = drm_dp_decode_sideband_req(txmsg, out); if (ret < 0) { drm_printf(&p, "Failed to decode sideband request: %d\n", ret); - return false; + result = false; + goto out; } - if (!sideband_msg_req_equal(in, &out)) { + if (!sideband_msg_req_equal(in, out)) { drm_printf(&p, "Encode/decode failed, expected:\n"); drm_dp_dump_sideband_msg_req_body(in, 1, &p); drm_printf(&p, "Got:\n"); - drm_dp_dump_sideband_msg_req_body(&out, 1, &p); - return false; + drm_dp_dump_sideband_msg_req_body(out, 1, &p); + result = false; + goto out; } switch (in->req_type) { case DP_REMOTE_DPCD_WRITE: - kfree(out.u.dpcd_write.bytes); + kfree(out->u.dpcd_write.bytes); break; case DP_REMOTE_I2C_READ: - for (i = 0; i < out.u.i2c_read.num_transactions; i++) - kfree(out.u.i2c_read.transactions[i].bytes); + for (i = 0; i < out->u.i2c_read.num_transactions; i++) + kfree(out->u.i2c_read.transactions[i].bytes); break; case DP_REMOTE_I2C_WRITE: - kfree(out.u.i2c_write.bytes); + kfree(out->u.i2c_write.bytes); break; } /* Clear everything but the req_type for the input */ memset(&in->u, 0, sizeof(in->u)); - return true; +out: + kfree(out); + kfree(txmsg); + return result; } int igt_dp_mst_sideband_msg_req_decode(void *unused) diff --git a/drivers/gpu/drm/selftests/test-drm_framebuffer.c b/drivers/gpu/drm/selftests/test-drm_framebuffer.c index 2d29ea6f92e2..789f22773dbc 100644 --- a/drivers/gpu/drm/selftests/test-drm_framebuffer.c +++ b/drivers/gpu/drm/selftests/test-drm_framebuffer.c @@ -330,10 +330,9 @@ static struct drm_device mock_drm_device = { static int execute_drm_mode_fb_cmd2(struct drm_mode_fb_cmd2 *r) { int buffer_created = 0; - struct drm_framebuffer *fb; mock_drm_device.dev_private = &buffer_created; - fb = drm_internal_framebuffer_create(&mock_drm_device, r, NULL); + drm_internal_framebuffer_create(&mock_drm_device, r, NULL); return buffer_created; } diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c index 95e212a9a74d..b768b53c4aee 100644 --- a/drivers/gpu/drm/selftests/test-drm_mm.c +++ b/drivers/gpu/drm/selftests/test-drm_mm.c @@ -54,7 +54,7 @@ static int igt_sanitycheck(void *ignored) static bool assert_no_holes(const struct drm_mm *mm) { struct drm_mm_node *hole; - u64 hole_start, hole_end; + u64 hole_start, __always_unused hole_end; unsigned long count; count = 0; diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index 26a15c214bd3..0a02b7092c04 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -128,7 +128,7 @@ static irqreturn_t shmob_drm_irq(int irq, void *arg) DEFINE_DRM_GEM_CMA_FOPS(shmob_drm_fops); -static struct drm_driver shmob_drm_driver = { +static const struct drm_driver shmob_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET, .irq_handler = shmob_drm_irq, DRM_GEM_CMA_DRIVER_OPS, diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 3f54efa36098..c7efb43b83ee 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -130,7 +130,7 @@ static void sti_mode_config_init(struct drm_device *dev) DEFINE_DRM_GEM_CMA_FOPS(sti_driver_fops); -static struct drm_driver sti_driver = { +static const struct drm_driver sti_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .fops = &sti_driver_fops, DRM_GEM_CMA_DRIVER_OPS, diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 38a558768e53..f3ace11209dd 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -167,6 +167,12 @@ struct sti_hdmi_connector { #define to_sti_hdmi_connector(x) \ container_of(x, struct sti_hdmi_connector, drm_connector) +static const struct drm_prop_enum_list colorspace_mode_names[] = { + { HDMI_COLORSPACE_RGB, "rgb" }, + { HDMI_COLORSPACE_YUV422, "yuv422" }, + { HDMI_COLORSPACE_YUV444, "yuv444" }, +}; + u32 hdmi_read(struct sti_hdmi *hdmi, int offset) { return readl(hdmi->regs + offset); diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h index 1f6dc90b5d83..05b2f3d0d48d 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.h +++ b/drivers/gpu/drm/sti/sti_hdmi.h @@ -33,12 +33,6 @@ struct hdmi_audio_params { struct hdmi_audio_infoframe cea; }; -static const struct drm_prop_enum_list colorspace_mode_names[] = { - { HDMI_COLORSPACE_RGB, "rgb" }, - { HDMI_COLORSPACE_YUV422, "yuv422" }, - { HDMI_COLORSPACE_YUV444, "yuv444" }, -}; - #define DEFAULT_COLORSPACE_MODE HDMI_COLORSPACE_RGB /** diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c index 411103f013e2..222869b232ae 100644 --- a/drivers/gpu/drm/stm/drv.c +++ b/drivers/gpu/drm/stm/drv.c @@ -53,7 +53,7 @@ static int stm_gem_cma_dumb_create(struct drm_file *file, DEFINE_DRM_GEM_CMA_FOPS(drv_driver_fops); -static struct drm_driver drv_driver = { +static const struct drm_driver drv_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .name = "stm", .desc = "STMicroelectronics SoC DRM", diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 29861fc81b35..91502937f26d 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -40,7 +40,7 @@ static int drm_sun4i_gem_dumb_create(struct drm_file *file_priv, DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops); -static struct drm_driver sun4i_drv_driver = { +static const struct drm_driver sun4i_drv_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, /* Generic Operations */ diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 2d86627b0d4e..85dd7131553a 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1939,15 +1939,17 @@ static void tegra_crtc_atomic_begin(struct drm_crtc *crtc, static void tegra_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { - struct tegra_dc_state *crtc_state = to_dc_state(crtc->state); + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct tegra_dc_state *dc_state = to_dc_state(crtc_state); struct tegra_dc *dc = to_tegra_dc(crtc); u32 value; - value = crtc_state->planes << 8 | GENERAL_UPDATE; + value = dc_state->planes << 8 | GENERAL_UPDATE; tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL); - value = crtc_state->planes | GENERAL_ACT_REQ; + value = dc_state->planes | GENERAL_ACT_REQ; tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL); } diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index f0f581cd345e..19ffb0626505 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -847,7 +847,7 @@ static void tegra_debugfs_init(struct drm_minor *minor) } #endif -static struct drm_driver tegra_drm_driver = { +static const struct drm_driver tegra_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_RENDER, .open = tegra_drm_open, @@ -1081,12 +1081,11 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev) static int host1x_drm_probe(struct host1x_device *dev) { - struct drm_driver *driver = &tegra_drm_driver; struct tegra_drm *tegra; struct drm_device *drm; int err; - drm = drm_dev_alloc(driver, &dev->dev); + drm = drm_dev_alloc(&tegra_drm_driver, &dev->dev); if (IS_ERR(drm)) return PTR_ERR(drm); diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c index b669168ae7cb..60b92df615aa 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.c +++ b/drivers/gpu/drm/tidss/tidss_dispc.c @@ -2608,16 +2608,9 @@ void dispc_remove(struct tidss_device *tidss) static int dispc_iomap_resource(struct platform_device *pdev, const char *name, void __iomem **base) { - struct resource *res; void __iomem *b; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); - if (!res) { - dev_err(&pdev->dev, "cannot get mem resource '%s'\n", name); - return -EINVAL; - } - - b = devm_ioremap_resource(&pdev->dev, res); + b = devm_platform_ioremap_resource_byname(pdev, name); if (IS_ERR(b)) { dev_err(&pdev->dev, "cannot ioremap resource '%s'\n", name); return PTR_ERR(b); diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index 9179ea18f625..66e3c86eb5c7 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -108,7 +108,7 @@ static void tidss_release(struct drm_device *ddev) DEFINE_DRM_GEM_CMA_FOPS(tidss_fops); -static struct drm_driver tidss_driver = { +static const struct drm_driver tidss_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &tidss_fops, .release = tidss_release, diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 3d7e4db756b7..f1d3a9f919fd 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -166,7 +166,7 @@ static void tilcdc_fini(struct drm_device *dev) drm_dev_put(dev); } -static int tilcdc_init(struct drm_driver *ddrv, struct device *dev) +static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev) { struct drm_device *ddev; struct platform_device *pdev = to_platform_device(dev); @@ -452,7 +452,7 @@ static void tilcdc_debugfs_init(struct drm_minor *minor) DEFINE_DRM_GEM_CMA_FOPS(fops); -static struct drm_driver tilcdc_driver = { +static const struct drm_driver tilcdc_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .irq_handler = tilcdc_irq, DRM_GEM_CMA_DRIVER_OPS, diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c index 744a8e337e41..561c49d8657a 100644 --- a/drivers/gpu/drm/tiny/cirrus.c +++ b/drivers/gpu/drm/tiny/cirrus.c @@ -17,6 +17,7 @@ */ #include <linux/console.h> +#include <linux/dma-buf-map.h> #include <linux/module.h> #include <linux/pci.h> @@ -314,6 +315,7 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, struct drm_rect *rect) { struct cirrus_device *cirrus = to_cirrus(fb->dev); + struct dma_buf_map map; void *vmap; int idx, ret; @@ -321,10 +323,10 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, if (!drm_dev_enter(&cirrus->dev, &idx)) goto out; - ret = -ENOMEM; - vmap = drm_gem_shmem_vmap(fb->obj[0]); - if (!vmap) + ret = drm_gem_shmem_vmap(fb->obj[0], &map); + if (ret) goto out_dev_exit; + vmap = map.vaddr; /* TODO: Use mapping abstraction properly */ if (cirrus->cpp == fb->format->cpp[0]) drm_fb_memcpy_dstclip(cirrus->vram, @@ -343,7 +345,7 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, else WARN_ON_ONCE("cpp mismatch"); - drm_gem_shmem_vunmap(fb->obj[0], vmap); + drm_gem_shmem_vunmap(fb->obj[0], &map); ret = 0; out_dev_exit: @@ -536,7 +538,7 @@ static int cirrus_mode_config_init(struct cirrus_device *cirrus) DEFINE_DRM_GEM_FOPS(cirrus_fops); -static struct drm_driver cirrus_driver = { +static const struct drm_driver cirrus_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c index cc397671f689..33f65f4626e5 100644 --- a/drivers/gpu/drm/tiny/gm12u320.c +++ b/drivers/gpu/drm/tiny/gm12u320.c @@ -45,7 +45,7 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)"); #define GM12U320_BLOCK_COUNT 20 #define GM12U320_ERR(fmt, ...) \ - DRM_DEV_ERROR(&gm12u320->udev->dev, fmt, ##__VA_ARGS__) + DRM_DEV_ERROR(gm12u320->dev.dev, fmt, ##__VA_ARGS__) #define MISC_RCV_EPT 1 #define DATA_RCV_EPT 2 @@ -85,7 +85,6 @@ struct gm12u320_device { struct drm_device dev; struct drm_simple_display_pipe pipe; struct drm_connector conn; - struct usb_device *udev; unsigned char *cmd_buf; unsigned char *data_buf[GM12U320_BLOCK_COUNT]; struct { @@ -155,6 +154,11 @@ static const char data_block_footer[DATA_BLOCK_FOOTER_SIZE] = { 0x80, 0x00, 0x00, 0x4f }; +static inline struct usb_device *gm12u320_to_usb_device(struct gm12u320_device *gm12u320) +{ + return interface_to_usbdev(to_usb_interface(gm12u320->dev.dev)); +} + static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320) { int i, block_size; @@ -191,6 +195,7 @@ static int gm12u320_misc_request(struct gm12u320_device *gm12u320, u8 req_a, u8 req_b, u8 arg_a, u8 arg_b, u8 arg_c, u8 arg_d) { + struct usb_device *udev = gm12u320_to_usb_device(gm12u320); int ret, len; memcpy(gm12u320->cmd_buf, &cmd_misc, CMD_SIZE); @@ -202,8 +207,7 @@ static int gm12u320_misc_request(struct gm12u320_device *gm12u320, gm12u320->cmd_buf[25] = arg_d; /* Send request */ - ret = usb_bulk_msg(gm12u320->udev, - usb_sndbulkpipe(gm12u320->udev, MISC_SND_EPT), + ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, MISC_SND_EPT), gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT); if (ret || len != CMD_SIZE) { GM12U320_ERR("Misc. req. error %d\n", ret); @@ -211,8 +215,7 @@ static int gm12u320_misc_request(struct gm12u320_device *gm12u320, } /* Read value */ - ret = usb_bulk_msg(gm12u320->udev, - usb_rcvbulkpipe(gm12u320->udev, MISC_RCV_EPT), + ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, MISC_RCV_EPT), gm12u320->cmd_buf, MISC_VALUE_SIZE, &len, DATA_TIMEOUT); if (ret || len != MISC_VALUE_SIZE) { @@ -222,8 +225,7 @@ static int gm12u320_misc_request(struct gm12u320_device *gm12u320, /* cmd_buf[0] now contains the read value, which we don't use */ /* Read status */ - ret = usb_bulk_msg(gm12u320->udev, - usb_rcvbulkpipe(gm12u320->udev, MISC_RCV_EPT), + ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, MISC_RCV_EPT), gm12u320->cmd_buf, READ_STATUS_SIZE, &len, CMD_TIMEOUT); if (ret || len != READ_STATUS_SIZE) { @@ -248,6 +250,7 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320) { int block, dst_offset, len, remain, ret, x1, x2, y1, y2; struct drm_framebuffer *fb; + struct dma_buf_map map; void *vaddr; u8 *src; @@ -262,11 +265,12 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320) y1 = gm12u320->fb_update.rect.y1; y2 = gm12u320->fb_update.rect.y2; - vaddr = drm_gem_shmem_vmap(fb->obj[0]); - if (IS_ERR(vaddr)) { - GM12U320_ERR("failed to vmap fb: %ld\n", PTR_ERR(vaddr)); + ret = drm_gem_shmem_vmap(fb->obj[0], &map); + if (ret) { + GM12U320_ERR("failed to vmap fb: %d\n", ret); goto put_fb; } + vaddr = map.vaddr; /* TODO: Use mapping abstraction properly */ if (fb->obj[0]->import_attach) { ret = dma_buf_begin_cpu_access( @@ -318,7 +322,7 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320) GM12U320_ERR("dma_buf_end_cpu_access err: %d\n", ret); } vunmap: - drm_gem_shmem_vunmap(fb->obj[0], vaddr); + drm_gem_shmem_vunmap(fb->obj[0], &map); put_fb: drm_framebuffer_put(fb); gm12u320->fb_update.fb = NULL; @@ -331,6 +335,7 @@ static void gm12u320_fb_update_work(struct work_struct *work) struct gm12u320_device *gm12u320 = container_of(to_delayed_work(work), struct gm12u320_device, fb_update.work); + struct usb_device *udev = gm12u320_to_usb_device(gm12u320); int block, block_size, len; int ret = 0; @@ -350,43 +355,41 @@ static void gm12u320_fb_update_work(struct work_struct *work) gm12u320->cmd_buf[21] = block | (gm12u320->fb_update.frame << 7); - ret = usb_bulk_msg(gm12u320->udev, - usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT), - gm12u320->cmd_buf, CMD_SIZE, &len, - CMD_TIMEOUT); + ret = usb_bulk_msg(udev, + usb_sndbulkpipe(udev, DATA_SND_EPT), + gm12u320->cmd_buf, CMD_SIZE, &len, + CMD_TIMEOUT); if (ret || len != CMD_SIZE) goto err; /* Send data block to device */ - ret = usb_bulk_msg(gm12u320->udev, - usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT), - gm12u320->data_buf[block], block_size, - &len, DATA_TIMEOUT); + ret = usb_bulk_msg(udev, + usb_sndbulkpipe(udev, DATA_SND_EPT), + gm12u320->data_buf[block], block_size, + &len, DATA_TIMEOUT); if (ret || len != block_size) goto err; /* Read status */ - ret = usb_bulk_msg(gm12u320->udev, - usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT), - gm12u320->cmd_buf, READ_STATUS_SIZE, &len, - CMD_TIMEOUT); + ret = usb_bulk_msg(udev, + usb_rcvbulkpipe(udev, DATA_RCV_EPT), + gm12u320->cmd_buf, READ_STATUS_SIZE, &len, + CMD_TIMEOUT); if (ret || len != READ_STATUS_SIZE) goto err; } /* Send draw command to device */ memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE); - ret = usb_bulk_msg(gm12u320->udev, - usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT), - gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT); + ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, DATA_SND_EPT), + gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT); if (ret || len != CMD_SIZE) goto err; /* Read status */ - ret = usb_bulk_msg(gm12u320->udev, - usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT), - gm12u320->cmd_buf, READ_STATUS_SIZE, &len, - gm12u320->fb_update.draw_status_timeout); + ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, DATA_RCV_EPT), + gm12u320->cmd_buf, READ_STATUS_SIZE, &len, + gm12u320->fb_update.draw_status_timeout); if (ret || len != READ_STATUS_SIZE) goto err; @@ -600,7 +603,7 @@ static const uint64_t gm12u320_pipe_modifiers[] = { DEFINE_DRM_GEM_FOPS(gm12u320_fops); -static struct drm_driver gm12u320_drm_driver = { +static const struct drm_driver gm12u320_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .name = DRIVER_NAME, @@ -638,7 +641,6 @@ static int gm12u320_usb_probe(struct usb_interface *interface, if (IS_ERR(gm12u320)) return PTR_ERR(gm12u320); - gm12u320->udev = interface_to_usbdev(interface); INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work); mutex_init(&gm12u320->fb_update.lock); diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c index 0998309b0d95..c6525cd02bc2 100644 --- a/drivers/gpu/drm/tiny/hx8357d.c +++ b/drivers/gpu/drm/tiny/hx8357d.c @@ -193,7 +193,7 @@ static const struct drm_display_mode yx350hv15_mode = { DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops); -static struct drm_driver hx8357d_driver = { +static const struct drm_driver hx8357d_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &hx8357d_fops, DRM_GEM_CMA_DRIVER_OPS_VMAP, diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c index 97a77262d791..8e98962db5a2 100644 --- a/drivers/gpu/drm/tiny/ili9225.c +++ b/drivers/gpu/drm/tiny/ili9225.c @@ -337,7 +337,7 @@ static const struct drm_display_mode ili9225_mode = { DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops); -static struct drm_driver ili9225_driver = { +static const struct drm_driver ili9225_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9225_fops, DRM_GEM_CMA_DRIVER_OPS_VMAP, diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c index d39c39df56ad..6ce97f0698eb 100644 --- a/drivers/gpu/drm/tiny/ili9341.c +++ b/drivers/gpu/drm/tiny/ili9341.c @@ -149,7 +149,7 @@ static const struct drm_display_mode yx240qv29_mode = { DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops); -static struct drm_driver ili9341_driver = { +static const struct drm_driver ili9341_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9341_fops, DRM_GEM_CMA_DRIVER_OPS_VMAP, diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c index 403af68fa440..d7ce40eb166a 100644 --- a/drivers/gpu/drm/tiny/ili9486.c +++ b/drivers/gpu/drm/tiny/ili9486.c @@ -162,7 +162,7 @@ static const struct drm_display_mode waveshare_mode = { DEFINE_DRM_GEM_CMA_FOPS(ili9486_fops); -static struct drm_driver ili9486_driver = { +static const struct drm_driver ili9486_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &ili9486_fops, DRM_GEM_CMA_DRIVER_OPS_VMAP, diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c index 2131b4268c00..ff77f983f803 100644 --- a/drivers/gpu/drm/tiny/mi0283qt.c +++ b/drivers/gpu/drm/tiny/mi0283qt.c @@ -153,7 +153,7 @@ static const struct drm_display_mode mi0283qt_mode = { DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops); -static struct drm_driver mi0283qt_driver = { +static const struct drm_driver mi0283qt_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &mi0283qt_fops, DRM_GEM_CMA_DRIVER_OPS_VMAP, diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index 2e01cf0a9876..11c602fc9897 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -936,7 +936,7 @@ static const u8 repaper_e2271cs021_cs[] = { 0x00, 0x00, 0x00, 0x7f, DEFINE_DRM_GEM_CMA_FOPS(repaper_fops); -static struct drm_driver repaper_driver = { +static const struct drm_driver repaper_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &repaper_fops, DRM_GEM_CMA_DRIVER_OPS_VMAP, diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c index d05de03891f8..ff5cf60f4bd7 100644 --- a/drivers/gpu/drm/tiny/st7586.c +++ b/drivers/gpu/drm/tiny/st7586.c @@ -277,7 +277,7 @@ static const struct drm_display_mode st7586_mode = { DEFINE_DRM_GEM_CMA_FOPS(st7586_fops); -static struct drm_driver st7586_driver = { +static const struct drm_driver st7586_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &st7586_fops, DRM_GEM_CMA_DRIVER_OPS_VMAP, diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c index c0bc2a18edde..faaba0a033ea 100644 --- a/drivers/gpu/drm/tiny/st7735r.c +++ b/drivers/gpu/drm/tiny/st7735r.c @@ -154,7 +154,7 @@ static const struct st7735r_cfg rh128128t_cfg = { DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops); -static struct drm_driver st7735r_driver = { +static const struct drm_driver st7735r_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &st7735r_fops, DRM_GEM_CMA_DRIVER_OPS_VMAP, diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index e2a124b3affb..9a03c7834b1e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -45,7 +45,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj); -/** +/* * ttm_global_mutex - protecting the global BO state */ DEFINE_MUTEX(ttm_global_mutex); @@ -231,7 +231,8 @@ EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail); static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_resource *mem, bool evict, - struct ttm_operation_ctx *ctx) + struct ttm_operation_ctx *ctx, + struct ttm_place *hop) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type); @@ -259,9 +260,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, } } - ret = bdev->driver->move(bo, evict, ctx, mem); - if (ret) + ret = bdev->driver->move(bo, evict, ctx, mem, hop); + if (ret) { + if (ret == -EMULTIHOP) + return ret; goto out_err; + } ctx->bytes_moved += bo->num_pages << PAGE_SHIFT; return 0; @@ -274,7 +278,7 @@ out_err: return ret; } -/** +/* * Call bo::reserved. * Will release GPU memory type usage on destruction. * This is the place to put in driver specific hooks to release @@ -348,9 +352,10 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) * Must be called with lru_lock and reservation held, this function * will drop the lru lock and optionally the reservation lock before returning. * - * @interruptible Any sleeps should occur interruptibly. - * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. - * @unlock_resv Unlock the reservation lock as well. + * @bo: The buffer object to clean-up + * @interruptible: Any sleeps should occur interruptibly. + * @no_wait_gpu: Never wait for gpu. Return -EBUSY instead. + * @unlock_resv: Unlock the reservation lock as well. */ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, @@ -416,7 +421,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, return 0; } -/** +/* * Traverse the delayed list, and call ttm_bo_cleanup_refs on all * encountered buffers. */ @@ -566,8 +571,11 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, struct ttm_bo_device *bdev = bo->bdev; struct ttm_resource evict_mem; struct ttm_placement placement; + struct ttm_place hop; int ret = 0; + memset(&hop, 0, sizeof(hop)); + dma_resv_assert_held(bo->base.resv); placement.num_placement = 0; @@ -596,8 +604,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, goto out; } - ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx); + ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx, &hop); if (unlikely(ret)) { + WARN(ret == -EMULTIHOP, "Unexpected multihop in eviction - likely driver bug\n"); if (ret != -ERESTARTSYS) pr_err("Buffer eviction failed\n"); ttm_resource_free(bo, &evict_mem); @@ -620,7 +629,7 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_eviction_valuable); -/** +/* * Check the target bo is allowable to be evicted or swapout, including cases: * * a. if share same reservation object with ctx->resv, have assumption @@ -759,7 +768,7 @@ int ttm_mem_evict_first(struct ttm_bo_device *bdev, return ret; } -/** +/* * Add the last move fence to the BO and reserve a new shared slot. */ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, @@ -795,7 +804,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, return 0; } -/** +/* * Repeatedly evict memory from the LRU for @mem_type until we create enough * space, or we've evicted everything and there isn't enough space. */ @@ -857,7 +866,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, return 0; } -/** +/* * Creates space for memory region @mem according to its type. * * This function first searches for free space in compatible memory types in @@ -936,15 +945,45 @@ error: } EXPORT_SYMBOL(ttm_bo_mem_space); +static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo, + struct ttm_resource *mem, + struct ttm_operation_ctx *ctx, + struct ttm_place *hop) +{ + struct ttm_placement hop_placement; + int ret; + struct ttm_resource hop_mem = *mem; + + hop_mem.mm_node = NULL; + hop_mem.mem_type = TTM_PL_SYSTEM; + hop_mem.placement = 0; + + hop_placement.num_placement = hop_placement.num_busy_placement = 1; + hop_placement.placement = hop_placement.busy_placement = hop; + + /* find space in the bounce domain */ + ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx); + if (ret) + return ret; + /* move to the bounce domain */ + ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL); + if (ret) + return ret; + return 0; +} + static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_operation_ctx *ctx) { int ret = 0; + struct ttm_place hop; struct ttm_resource mem; dma_resv_assert_held(bo->base.resv); + memset(&hop, 0, sizeof(hop)); + mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.page_alignment = bo->mem.page_alignment; @@ -954,12 +993,25 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, /* * Determine where to move the buffer. + * + * If driver determines move is going to need + * an extra step then it will return -EMULTIHOP + * and the buffer will be moved to the temporary + * stop and the driver will be called to make + * the second hop. */ +bounce: ret = ttm_bo_mem_space(bo, placement, &mem, ctx); if (ret) - goto out_unlock; - ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx); -out_unlock: + return ret; + ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop); + if (ret == -EMULTIHOP) { + ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop); + if (ret) + return ret; + /* try and move to final place now. */ + goto bounce; + } if (ret) ttm_resource_free(bo, &mem); return ret; @@ -1379,7 +1431,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_wait); -/** +/* * A buffer object shrink method that tries to swap out the first * buffer object on the bo_global::swap_lru list. */ @@ -1432,15 +1484,20 @@ int ttm_bo_swapout(struct ttm_operation_ctx *ctx) if (bo->mem.mem_type != TTM_PL_SYSTEM) { struct ttm_operation_ctx ctx = { false, false }; struct ttm_resource evict_mem; + struct ttm_place hop; + + memset(&hop, 0, sizeof(hop)); evict_mem = bo->mem; evict_mem.mm_node = NULL; evict_mem.placement = 0; evict_mem.mem_type = TTM_PL_SYSTEM; - ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx); - if (unlikely(ret != 0)) + ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx, &hop); + if (unlikely(ret != 0)) { + WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n"); goto out; + } } /** diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index ecb54415d1ca..7ccb2295cac1 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -32,6 +32,7 @@ #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> #include <drm/drm_vma_manager.h> +#include <linux/dma-buf-map.h> #include <linux/io.h> #include <linux/highmem.h> #include <linux/wait.h> @@ -471,6 +472,77 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) } EXPORT_SYMBOL(ttm_bo_kunmap); +int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) +{ + struct ttm_resource *mem = &bo->mem; + int ret; + + ret = ttm_mem_io_reserve(bo->bdev, mem); + if (ret) + return ret; + + if (mem->bus.is_iomem) { + void __iomem *vaddr_iomem; + size_t size = bo->num_pages << PAGE_SHIFT; + + if (mem->bus.addr) + vaddr_iomem = (void __iomem *)mem->bus.addr; + else if (mem->bus.caching == ttm_write_combined) + vaddr_iomem = ioremap_wc(mem->bus.offset, size); + else + vaddr_iomem = ioremap(mem->bus.offset, size); + + if (!vaddr_iomem) + return -ENOMEM; + + dma_buf_map_set_vaddr_iomem(map, vaddr_iomem); + + } else { + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false + }; + struct ttm_tt *ttm = bo->ttm; + pgprot_t prot; + void *vaddr; + + ret = ttm_tt_populate(bo->bdev, ttm, &ctx); + if (ret) + return ret; + + /* + * We need to use vmap to get the desired page protection + * or to make the buffer object look contiguous. + */ + prot = ttm_io_prot(bo, mem, PAGE_KERNEL); + vaddr = vmap(ttm->pages, bo->num_pages, 0, prot); + if (!vaddr) + return -ENOMEM; + + dma_buf_map_set_vaddr(map, vaddr); + } + + return 0; +} +EXPORT_SYMBOL(ttm_bo_vmap); + +void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) +{ + struct ttm_resource *mem = &bo->mem; + + if (dma_buf_map_is_null(map)) + return; + + if (!map->is_iomem) + vunmap(map->vaddr); + else if (!mem->bus.addr) + iounmap(map->vaddr_iomem); + dma_buf_map_clear(map); + + ttm_mem_io_free(bo->bdev, &bo->mem); +} +EXPORT_SYMBOL(ttm_bo_vunmap); + static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo, bool dst_use_tt) { diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index 44ec41aa78d6..1b96780b4989 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -135,7 +135,7 @@ static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching, set_pages_wb(p, 1 << order); #endif - if (!pool->use_dma_alloc) { + if (!pool || !pool->use_dma_alloc) { __free_pages(p, order); return; } diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index ea77919569a2..e0952444cea9 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -37,7 +37,7 @@ #include <linux/spinlock.h> #include <linux/module.h> -/** +/* * Currently we use a spinlock for the lock, but a mutex *may* be * more appropriate to reduce scheduling latency if the range manager * ends up with very fragmented allocation patterns. diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index cfd633c7e764..da9eeffe0c6d 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -38,7 +38,7 @@ #include <drm/drm_cache.h> #include <drm/ttm/ttm_bo_driver.h> -/** +/* * Allocates a ttm structure for the given BO. */ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) @@ -73,7 +73,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) return 0; } -/** +/* * Allocates storage for pointers to the pages that back the ttm. */ static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm) diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c index b5259cb1383f..07140e0b90a3 100644 --- a/drivers/gpu/drm/tve200/tve200_drv.c +++ b/drivers/gpu/drm/tve200/tve200_drv.c @@ -137,7 +137,7 @@ finish: DEFINE_DRM_GEM_CMA_FOPS(drm_fops); -static struct drm_driver tve200_drm_driver = { +static const struct drm_driver tve200_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .ioctls = NULL, .fops = &drm_fops, diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index cdc1c42e1669..3750fd216131 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -20,6 +20,7 @@ static int udl_get_edid_block(void *data, u8 *buf, unsigned int block, int ret, i; u8 *read_buff; struct udl_device *udl = data; + struct usb_device *udev = udl_to_usb_device(udl); read_buff = kmalloc(2, GFP_KERNEL); if (!read_buff) @@ -27,10 +28,9 @@ static int udl_get_edid_block(void *data, u8 *buf, unsigned int block, for (i = 0; i < len; i++) { int bval = (i + block * EDID_LENGTH) << 8; - ret = usb_control_msg(udl->udev, - usb_rcvctrlpipe(udl->udev, 0), - (0x02), (0x80 | (0x02 << 5)), bval, - 0xA1, read_buff, 2, HZ); + ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), + 0x02, (0x80 | (0x02 << 5)), bval, + 0xA1, read_buff, 2, HZ); if (ret < 1) { DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); kfree(read_buff); diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 96d4317a2c1b..b5a8dd9fdf02 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -34,7 +34,7 @@ static int udl_usb_resume(struct usb_interface *interface) DEFINE_DRM_GEM_FOPS(udl_driver_fops); -static struct drm_driver driver = { +static const struct drm_driver driver = { .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET, /* GEM hooks */ @@ -53,7 +53,6 @@ static struct drm_driver driver = { static struct udl_device *udl_driver_create(struct usb_interface *interface) { - struct usb_device *udev = interface_to_usbdev(interface); struct udl_device *udl; int r; @@ -62,8 +61,6 @@ static struct udl_device *udl_driver_create(struct usb_interface *interface) if (IS_ERR(udl)) return udl; - udl->udev = udev; - r = udl_init(udl); if (r) return ERR_PTR(r); diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index b1461f30780b..875e73551ae9 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -50,7 +50,6 @@ struct urb_list { struct udl_device { struct drm_device drm; struct device *dev; - struct usb_device *udev; struct drm_simple_display_pipe display_pipe; @@ -66,6 +65,11 @@ struct udl_device { #define to_udl(x) container_of(x, struct udl_device, drm) +static inline struct usb_device *udl_to_usb_device(struct udl_device *udl) +{ + return interface_to_usbdev(to_usb_interface(udl->drm.dev)); +} + /* modeset */ int udl_modeset_init(struct drm_device *dev); struct drm_connector *udl_connector_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index f5d27f2a5654..0e2a376cb075 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -26,10 +26,9 @@ #define GET_URB_TIMEOUT HZ #define FREE_URB_TIMEOUT (HZ*2) -static int udl_parse_vendor_descriptor(struct drm_device *dev, - struct usb_device *usbdev) +static int udl_parse_vendor_descriptor(struct udl_device *udl) { - struct udl_device *udl = to_udl(dev); + struct usb_device *udev = udl_to_usb_device(udl); char *desc; char *buf; char *desc_end; @@ -41,7 +40,7 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev, return false; desc = buf; - total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */ + total_len = usb_get_descriptor(udev, 0x5f, /* vendor specific */ 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE); if (total_len > 5) { DRM_INFO("vendor descriptor length:%x data:%11ph\n", @@ -98,19 +97,20 @@ success: */ static int udl_select_std_channel(struct udl_device *udl) { - int ret; static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7, 0x1C, 0x88, 0x5E, 0x15, 0x60, 0xFE, 0xC6, 0x97, 0x16, 0x3D, 0x47, 0xF2}; + void *sendbuf; + int ret; + struct usb_device *udev = udl_to_usb_device(udl); sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL); if (!sendbuf) return -ENOMEM; - ret = usb_control_msg(udl->udev, - usb_sndctrlpipe(udl->udev, 0), + ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), NR_USB_REQUEST_CHANNEL, (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, sendbuf, sizeof(set_def_chn), @@ -202,6 +202,7 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) struct urb_node *unode; char *buf; size_t wanted_size = count * size; + struct usb_device *udev = udl_to_usb_device(udl); spin_lock_init(&udl->urbs.lock); @@ -229,7 +230,7 @@ retry: } unode->urb = urb; - buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL, + buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &urb->transfer_dma); if (!buf) { kfree(unode); @@ -243,8 +244,8 @@ retry: } /* urb->transfer_buffer_length set to actual before submit */ - usb_fill_bulk_urb(urb, udl->udev, usb_sndbulkpipe(udl->udev, 1), - buf, size, udl_urb_completion, unode); + usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, 1), + buf, size, udl_urb_completion, unode); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; list_add_tail(&unode->entry, &udl->urbs.list); @@ -316,7 +317,7 @@ int udl_init(struct udl_device *udl) mutex_init(&udl->gem_lock); - if (!udl_parse_vendor_descriptor(dev, udl->udev)) { + if (!udl_parse_vendor_descriptor(udl)) { ret = -ENODEV; DRM_ERROR("firmware not recognized. Assume incompatible device\n"); goto err; diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index fef43f4e3bac..9d34ec9d03f6 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -276,6 +276,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y, struct urb *urb; struct drm_rect clip; int log_bpp; + struct dma_buf_map map; void *vaddr; ret = udl_log_cpp(fb->format->cpp[0]); @@ -296,15 +297,18 @@ static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y, return ret; } - vaddr = drm_gem_shmem_vmap(fb->obj[0]); - if (IS_ERR(vaddr)) { + ret = drm_gem_shmem_vmap(fb->obj[0], &map); + if (ret) { DRM_ERROR("failed to vmap fb\n"); goto out_dma_buf_end_cpu_access; } + vaddr = map.vaddr; /* TODO: Use mapping abstraction properly */ urb = udl_get_urb(dev); - if (!urb) + if (!urb) { + ret = -ENOMEM; goto out_drm_gem_shmem_vunmap; + } cmd = urb->transfer_buffer; for (i = clip.y1; i < clip.y2; i++) { @@ -333,7 +337,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y, ret = 0; out_drm_gem_shmem_vunmap: - drm_gem_shmem_vunmap(fb->obj[0], vaddr); + drm_gem_shmem_vunmap(fb->obj[0], &map); out_dma_buf_end_cpu_access: if (import_attach) { tmp_ret = dma_buf_end_cpu_access(import_attach->dmabuf, diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 9f7c26193831..42d401fd244e 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -38,42 +38,6 @@ #define DRIVER_MINOR 0 #define DRIVER_PATCHLEVEL 0 -#ifdef CONFIG_PM -static int v3d_runtime_suspend(struct device *dev) -{ - struct drm_device *drm = dev_get_drvdata(dev); - struct v3d_dev *v3d = to_v3d_dev(drm); - - v3d_irq_disable(v3d); - - clk_disable_unprepare(v3d->clk); - - return 0; -} - -static int v3d_runtime_resume(struct device *dev) -{ - struct drm_device *drm = dev_get_drvdata(dev); - struct v3d_dev *v3d = to_v3d_dev(drm); - int ret; - - ret = clk_prepare_enable(v3d->clk); - if (ret != 0) - return ret; - - /* XXX: VPM base */ - - v3d_mmu_set_page_table(v3d); - v3d_irq_enable(v3d); - - return 0; -} -#endif - -static const struct dev_pm_ops v3d_v3d_pm_ops = { - SET_RUNTIME_PM_OPS(v3d_runtime_suspend, v3d_runtime_resume, NULL) -}; - static int v3d_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -194,7 +158,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CSD, v3d_submit_csd_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), }; -static struct drm_driver v3d_drm_driver = { +static const struct drm_driver v3d_drm_driver = { .driver_features = (DRIVER_GEM | DRIVER_RENDER | DRIVER_SYNCOBJ), diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 182c586525eb..4eb354226972 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -275,6 +275,8 @@ v3d_lock_bo_reservations(struct v3d_job *job, * @dev: DRM device * @file_priv: DRM file for this fd * @job: V3D job being set up + * @bo_handles: GEM handles + * @bo_count: Number of GEM handles passed in * * The command validator needs to reference BOs by their index within * the submitted job's BO list. This does the validation of the job's diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 0747614a78f0..452682e2209f 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -63,7 +63,7 @@ v3d_job_free(struct drm_sched_job *sched_job) v3d_job_put(job); } -/** +/* * Returns the fences that the job depends on, one by one. * * If placed in the scheduler's .dependency method, the corresponding diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c index cf2e3e6a2388..f3eac72cb46e 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.c +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -26,7 +26,7 @@ static int vbox_modeset = -1; MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); module_param_named(modeset, vbox_modeset, int, 0400); -static struct drm_driver driver; +static const struct drm_driver driver; static const struct pci_device_id pciidlist[] = { { PCI_DEVICE(0x80ee, 0xbeef) }, @@ -175,7 +175,7 @@ static struct pci_driver vbox_pci_driver = { DEFINE_DRM_GEM_FOPS(vbox_fops); -static struct drm_driver driver = { +static const struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c index 322bf7133ba1..dbc0dd53c69e 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_mode.c +++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c @@ -9,6 +9,8 @@ * Michael Thayer <michael.thayer@oracle.com, * Hans de Goede <hdegoede@redhat.com> */ + +#include <linux/dma-buf-map.h> #include <linux/export.h> #include <drm/drm_atomic.h> @@ -384,6 +386,8 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, u32 height = plane->state->crtc_h; size_t data_size, mask_size; u32 flags; + struct dma_buf_map map; + int ret; u8 *src; /* @@ -397,8 +401,8 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, vbox_crtc->cursor_enabled = true; - src = drm_gem_vram_vmap(gbo); - if (IS_ERR(src)) { + ret = drm_gem_vram_vmap(gbo, &map); + if (ret) { /* * BUG: we should have pinned the BO in prepare_fb(). */ @@ -406,6 +410,7 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, DRM_WARN("Could not map cursor bo, skipping update\n"); return; } + src = map.vaddr; /* TODO: Use mapping abstraction properly */ /* * The mask must be calculated based on the alpha @@ -416,7 +421,7 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, data_size = width * height * 4 + mask_size; copy_cursor_image(src, vbox->cursor_data, width, height, mask_size); - drm_gem_vram_vunmap(gbo, src); + drm_gem_vram_vunmap(gbo, &map); flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA; diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index c2dead3b0733..469d1b4f2643 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -387,12 +387,11 @@ static const struct drm_gem_object_funcs vc4_gem_object_funcs = { .export = vc4_prime_export, .get_sg_table = drm_gem_cma_prime_get_sg_table, .vmap = vc4_prime_vmap, - .vunmap = drm_gem_cma_prime_vunmap, .vm_ops = &vc4_vm_ops, }; /** - * vc4_gem_create_object - Implementation of driver->gem_create_object. + * vc4_create_object - Implementation of driver->gem_create_object. * @dev: DRM device * @size: Size in bytes of the memory the object will reference * @@ -786,16 +785,16 @@ int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) return drm_gem_cma_prime_mmap(obj, vma); } -void *vc4_prime_vmap(struct drm_gem_object *obj) +int vc4_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct vc4_bo *bo = to_vc4_bo(obj); if (bo->validated_shader) { DRM_DEBUG("mmaping of shader BOs not allowed.\n"); - return ERR_PTR(-EINVAL); + return -EINVAL; } - return drm_gem_cma_prime_vmap(obj); + return drm_gem_cma_prime_vmap(obj, map); } struct drm_gem_object * diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c index 4fbbf980a299..6da22af4ee91 100644 --- a/drivers/gpu/drm/vc4/vc4_debugfs.c +++ b/drivers/gpu/drm/vc4/vc4_debugfs.c @@ -16,7 +16,7 @@ struct vc4_debugfs_info_entry { struct drm_info_list info; }; -/** +/* * Called at drm_dev_register() time on each of the minors registered * by the DRM device, to attach the debugfs files. */ @@ -46,7 +46,7 @@ static int vc4_debugfs_regset32(struct seq_file *m, void *unused) return 0; } -/** +/* * Registers a debugfs file with a callback function for a vc4 component. * * This is like drm_debugfs_create_files(), but that can only be diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 40f1192fff02..137c382256d5 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -807,7 +807,7 @@ int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); -void *vc4_prime_vmap(struct drm_gem_object *obj); +int vc4_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); int vc4_bo_cache_init(struct drm_device *dev); int vc4_bo_inc_usecnt(struct vc4_bo *bo); void vc4_bo_dec_usecnt(struct vc4_bo *bo); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 95779d50cca0..b80eb9d3d9d5 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -331,9 +331,8 @@ static void vc4_hdmi_set_audio_infoframe(struct drm_encoder *encoder) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); union hdmi_infoframe frame; - int ret; - ret = hdmi_audio_infoframe_init(&frame.audio); + hdmi_audio_infoframe_init(&frame.audio); frame.audio.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM; frame.audio.sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h index 7c6b4818f245..96d764ebfe67 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h @@ -142,7 +142,7 @@ struct vc4_hdmi_register { #define VC5_RAM_REG(reg, offset) _VC4_REG(VC5_RAM, reg, offset) #define VC5_RM_REG(reg, offset) _VC4_REG(VC5_RM, reg, offset) -static const struct vc4_hdmi_register vc4_hdmi_fields[] = { +static const struct vc4_hdmi_register __maybe_unused vc4_hdmi_fields[] = { VC4_HD_REG(HDMI_M_CTL, 0x000c), VC4_HD_REG(HDMI_MAI_CTL, 0x0014), VC4_HD_REG(HDMI_MAI_THR, 0x0018), @@ -203,7 +203,7 @@ static const struct vc4_hdmi_register vc4_hdmi_fields[] = { VC4_HDMI_REG(HDMI_RAM_PACKET_START, 0x0400), }; -static const struct vc4_hdmi_register vc5_hdmi_hdmi0_fields[] = { +static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi0_fields[] = { VC4_HD_REG(HDMI_DVP_CTL, 0x0000), VC4_HD_REG(HDMI_MAI_CTL, 0x0010), VC4_HD_REG(HDMI_MAI_THR, 0x0014), @@ -279,7 +279,7 @@ static const struct vc4_hdmi_register vc5_hdmi_hdmi0_fields[] = { VC5_CSC_REG(HDMI_CSC_34_33, 0x018), }; -static const struct vc4_hdmi_register vc5_hdmi_hdmi1_fields[] = { +static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi1_fields[] = { VC4_HD_REG(HDMI_DVP_CTL, 0x0000), VC4_HD_REG(HDMI_MAI_CTL, 0x0030), VC4_HD_REG(HDMI_MAI_THR, 0x0034), diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c index f4aa75efd16b..18abc06335c1 100644 --- a/drivers/gpu/drm/vc4/vc4_perfmon.c +++ b/drivers/gpu/drm/vc4/vc4_perfmon.c @@ -77,7 +77,7 @@ struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id) void vc4_perfmon_open_file(struct vc4_file *vc4file) { mutex_init(&vc4file->perfmon.lock); - idr_init(&vc4file->perfmon.idr); + idr_init_base(&vc4file->perfmon.idr, VC4_PERFMONID_MIN); } static int vc4_perfmon_idr_del(int id, void *elem, void *data) diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c index 65d0dac69b0b..73d63d72575b 100644 --- a/drivers/gpu/drm/vc4/vc4_v3d.c +++ b/drivers/gpu/drm/vc4/vc4_v3d.c @@ -122,7 +122,7 @@ static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused) return 0; } -/** +/* * Wraps pm_runtime_get_sync() in a refcount, so that we can reliably * get the pm_runtime refcount to 0 in vc4_reset(). */ @@ -205,7 +205,7 @@ try_again: return -ENOMEM; } -/** +/* * bin_bo_alloc() - allocates the memory that will be used for * tile binning. * diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index fa54a6d1403d..9a413091abb6 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -361,24 +361,30 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev, return &obj->base; } -static void *vgem_prime_vmap(struct drm_gem_object *obj) +static int vgem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct drm_vgem_gem_object *bo = to_vgem_bo(obj); long n_pages = obj->size >> PAGE_SHIFT; struct page **pages; + void *vaddr; pages = vgem_pin_pages(bo); if (IS_ERR(pages)) - return NULL; + return PTR_ERR(pages); + + vaddr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL)); + if (!vaddr) + return -ENOMEM; + dma_buf_map_set_vaddr(map, vaddr); - return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL)); + return 0; } -static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +static void vgem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct drm_vgem_gem_object *bo = to_vgem_bo(obj); - vunmap(vaddr); + vunmap(map->vaddr); vgem_unpin_pages(bo); } @@ -415,7 +421,7 @@ static const struct drm_gem_object_funcs vgem_gem_object_funcs = { .vm_ops = &vgem_gem_vm_ops, }; -static struct drm_driver vgem_driver = { +static const struct drm_driver vgem_driver = { .driver_features = DRIVER_GEM | DRIVER_RENDER, .open = vgem_open, .postclose = vgem_postclose, diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index 17f32f550dd9..2902dc6e64fa 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -233,7 +233,7 @@ int vgem_fence_signal_ioctl(struct drm_device *dev, int vgem_fence_open(struct vgem_file *vfile) { mutex_init(&vfile->fence_mutex); - idr_init(&vfile->fence_idr); + idr_init_base(&vfile->fence_idr, 1); return 0; } diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c index 1208445e341d..cd56ffa3df58 100644 --- a/drivers/gpu/drm/via/via_dma.c +++ b/drivers/gpu/drm/via/via_dma.c @@ -593,13 +593,11 @@ static inline void via_dummy_bitblt(drm_via_private_t *dev_priv) static void via_cmdbuf_jump(drm_via_private_t *dev_priv) { - uint32_t agp_base; uint32_t pause_addr_lo, pause_addr_hi; uint32_t jump_addr_lo, jump_addr_hi; volatile uint32_t *last_pause_ptr; uint32_t dma_low_save1, dma_low_save2; - agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi, &jump_addr_lo, 0); diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 4bf74836bd53..a6caebd4a0dd 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -119,6 +119,8 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc, static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc); /* @@ -127,7 +129,7 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, * in the plane update callback, and here we just check * whenever we must force the modeset. */ - if (drm_atomic_crtc_needs_modeset(crtc->state)) { + if (drm_atomic_crtc_needs_modeset(crtc_state)) { output->needs_modeset = true; } } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 86330f1ade72..27f13bd29c13 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -37,7 +37,7 @@ #include "virtgpu_drv.h" -static struct drm_driver driver; +static const struct drm_driver driver; static int virtio_gpu_modeset = -1; @@ -190,7 +190,7 @@ MODULE_AUTHOR("Alon Levy"); DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops); -static struct drm_driver driver = { +static const struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC, .open = virtio_gpu_driver_open, .postclose = virtio_gpu_driver_postclose, diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 2d3aa7baffe4..d9ad27e00905 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -184,8 +184,9 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, *nents = shmem->pages->orig_nents; } - *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry), - GFP_KERNEL); + *ents = kvmalloc_array(*nents, + sizeof(struct virtio_gpu_mem_entry), + GFP_KERNEL); if (!(*ents)) { DRM_ERROR("failed to allocate ent list\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index 1ef1e2f22633..807a27a16365 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -43,7 +43,7 @@ static int virtgpu_virtio_get_uuid(struct dma_buf *buf, return 0; } -const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { +static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { .ops = { .cache_sgt_mapping = true, .attach = virtio_dma_buf_attach, diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 857f730747b6..cf84d382dd41 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -1211,10 +1211,8 @@ int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev, struct virtio_gpu_resp_map_info *resp_buf; resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL); - if (!resp_buf) { - virtio_gpu_array_put_free(objs); + if (!resp_buf) return -ENOMEM; - } cmd_p = virtio_gpu_alloc_cmd_resp (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p), diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 25faba5aac08..1a1b5bc8e121 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -78,7 +78,7 @@ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_cleanup_planes(dev, old_state); } -static struct drm_driver vkms_driver = { +static const struct drm_driver vkms_driver = { .driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM, .release = vkms_release, .fops = &vkms_driver_fops, diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 9890137bcb8d..0824327cc860 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ +#include <linux/dma-buf-map.h> + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> @@ -146,15 +148,16 @@ static int vkms_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) { struct drm_gem_object *gem_obj; - void *vaddr; + struct dma_buf_map map; + int ret; if (!state->fb) return 0; gem_obj = drm_gem_fb_get_obj(state->fb, 0); - vaddr = drm_gem_shmem_vmap(gem_obj); - if (IS_ERR(vaddr)) - DRM_ERROR("vmap failed: %li\n", PTR_ERR(vaddr)); + ret = drm_gem_shmem_vmap(gem_obj, &map); + if (ret) + DRM_ERROR("vmap failed: %d\n", ret); return drm_gem_fb_prepare_fb(plane, state); } @@ -164,13 +167,15 @@ static void vkms_cleanup_fb(struct drm_plane *plane, { struct drm_gem_object *gem_obj; struct drm_gem_shmem_object *shmem_obj; + struct dma_buf_map map; if (!old_state->fb) return; gem_obj = drm_gem_fb_get_obj(old_state->fb, 0); shmem_obj = to_drm_gem_shmem_obj(drm_gem_fb_get_obj(old_state->fb, 0)); - drm_gem_shmem_vunmap(gem_obj, shmem_obj->vaddr); + dma_buf_map_set_vaddr(&map, shmem_obj->vaddr); + drm_gem_shmem_vunmap(gem_obj, &map); } static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = { diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c index 26b903926872..67f80ab1e85f 100644 --- a/drivers/gpu/drm/vkms/vkms_writeback.c +++ b/drivers/gpu/drm/vkms/vkms_writeback.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ -#include "vkms_drv.h" +#include <linux/dma-buf-map.h> + #include <drm/drm_fourcc.h> #include <drm/drm_writeback.h> #include <drm/drm_probe_helper.h> @@ -8,6 +9,8 @@ #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_shmem_helper.h> +#include "vkms_drv.h" + static const u32 vkms_wb_formats[] = { DRM_FORMAT_XRGB8888, }; @@ -65,19 +68,20 @@ static int vkms_wb_prepare_job(struct drm_writeback_connector *wb_connector, struct drm_writeback_job *job) { struct drm_gem_object *gem_obj; - void *vaddr; + struct dma_buf_map map; + int ret; if (!job->fb) return 0; gem_obj = drm_gem_fb_get_obj(job->fb, 0); - vaddr = drm_gem_shmem_vmap(gem_obj); - if (IS_ERR(vaddr)) { - DRM_ERROR("vmap failed: %li\n", PTR_ERR(vaddr)); - return PTR_ERR(vaddr); + ret = drm_gem_shmem_vmap(gem_obj, &map); + if (ret) { + DRM_ERROR("vmap failed: %d\n", ret); + return ret; } - job->priv = vaddr; + job->priv = map.vaddr; return 0; } @@ -87,12 +91,14 @@ static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector, { struct drm_gem_object *gem_obj; struct vkms_device *vkmsdev; + struct dma_buf_map map; if (!job->fb) return; gem_obj = drm_gem_fb_get_obj(job->fb, 0); - drm_gem_shmem_vunmap(gem_obj, job->priv); + dma_buf_map_set_vaddr(&map, job->priv); + drm_gem_shmem_vunmap(gem_obj, &map); vkmsdev = drm_device_to_vkms_device(gem_obj->dev); vkms_set_composer(&vkmsdev->output, false); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 0c42d2c05f43..216daf93022c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1469,7 +1469,7 @@ static const struct file_operations vmwgfx_driver_fops = { .get_unmapped_area = vmw_get_unmapped_area, }; -static struct drm_driver driver = { +static const struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC, .ioctls = vmw_ioctls, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 51f70bea41cc..6a04261ce760 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -695,7 +695,8 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo) static int vmw_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) + struct ttm_resource *new_mem, + struct ttm_place *hop) { struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->mem.mem_type); struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type); diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 98b6d2ba088a..30d9adf31c84 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -483,7 +483,7 @@ static const struct file_operations xen_drm_dev_fops = { .mmap = xen_drm_front_gem_mmap, }; -static struct drm_driver xen_drm_driver = { +static const struct drm_driver xen_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .release = xen_drm_drv_release, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index 4f34ef34ba60..74db5a840bed 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -290,22 +290,28 @@ int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma) return gem_mmap_obj(xen_obj, vma); } -void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj) +int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, struct dma_buf_map *map) { struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); + void *vaddr; if (!xen_obj->pages) - return NULL; + return -ENOMEM; /* Please see comment in gem_mmap_obj on mapping and attributes. */ - return vmap(xen_obj->pages, xen_obj->num_pages, - VM_MAP, PAGE_KERNEL); + vaddr = vmap(xen_obj->pages, xen_obj->num_pages, + VM_MAP, PAGE_KERNEL); + if (!vaddr) + return -ENOMEM; + dma_buf_map_set_vaddr(map, vaddr); + + return 0; } void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj, - void *vaddr) + struct dma_buf_map *map) { - vunmap(vaddr); + vunmap(map->vaddr); } int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj, diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.h b/drivers/gpu/drm/xen/xen_drm_front_gem.h index a39675fa31b2..a4e67d0a149c 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.h +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.h @@ -12,6 +12,7 @@ #define __XEN_DRM_FRONT_GEM_H struct dma_buf_attachment; +struct dma_buf_map; struct drm_device; struct drm_gem_object; struct file; @@ -34,10 +35,11 @@ void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj); int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma); -void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj); +int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, + struct dma_buf_map *map); void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj, - void *vaddr); + struct dma_buf_map *map); int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *vma); diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c index f3ffc3703a0e..0c1c50271a88 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c @@ -76,7 +76,7 @@ static const struct drm_mode_config_funcs zynqmp_dpsub_mode_config_funcs = { DEFINE_DRM_GEM_CMA_FOPS(zynqmp_dpsub_drm_fops); -static struct drm_driver zynqmp_dpsub_drm_driver = { +static const struct drm_driver zynqmp_dpsub_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c index 31014a451f8b..5506336594e2 100644 --- a/drivers/gpu/drm/zte/zx_drm_drv.c +++ b/drivers/gpu/drm/zte/zx_drm_drv.c @@ -34,7 +34,7 @@ static const struct drm_mode_config_funcs zx_drm_mode_config_funcs = { DEFINE_DRM_GEM_CMA_FOPS(zx_drm_fops); -static struct drm_driver zx_drm_driver = { +static const struct drm_driver zx_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_CMA_DRIVER_OPS, .fops = &zx_drm_fops, diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index e201f62d62c0..347fb962b6c9 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -32,6 +32,7 @@ struct host1x_subdev { /** * host1x_subdev_add() - add a new subdevice with an associated device node * @device: host1x device to add the subdevice to + * @driver: host1x driver containing the subdevices * @np: device node */ static int host1x_subdev_add(struct host1x_device *device, |