From 944fc36c31ed685cf8d3d125eb681ae7198f06fc Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 9 Jul 2014 22:08:15 -0400 Subject: drm/msm: use upstream iommu Downstream kernel IOMMU had a non-standard way of dealing with multiple devices and multiple ports/contexts. We don't need that on upstream kernel, so rip out the crazy. Note that we have to move the pinning of the ringbuffer to after the IOMMU is attached. No idea how that managed to work properly on the downstream kernel. For now, I am leaving the IOMMU port name stuff in place, to simplify things for folks trying to backport latest drm/msm to device kernels. Once we no longer have to care about pre-DT kernels, we can drop this and instead backport upstream IOMMU driver. Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/msm_gpu.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gpu.c') diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index c6322197db8c..915240b4b80a 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -606,7 +606,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, iommu = iommu_domain_alloc(&platform_bus_type); if (iommu) { dev_info(drm->dev, "%s: using IOMMU\n", name); - gpu->mmu = msm_iommu_new(drm, iommu); + gpu->mmu = msm_iommu_new(&pdev->dev, iommu); } else { dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); } @@ -621,13 +621,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, goto fail; } - ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova); - if (ret) { - gpu->rb_iova = 0; - dev_err(drm->dev, "could not map ringbuffer: %d\n", ret); - goto fail; - } - bs_init(gpu); return 0; -- cgit v1.2.3 From a1ad35233345e7ddd9ea3ea7b841432f4723d743 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Fri, 11 Jul 2014 11:59:22 -0400 Subject: drm/msm: fix potential deadlock in gpu init Somewhere along the way, the firmware loader sprouted another lock dependency, resulting in possible deadlock scenario: &dev->struct_mutex --> &sb->s_type->i_mutex_key#2 --> &mm->mmap_sem which is problematic vs things like gem mmap. So introduce a separate mutex to synchronize gpu init. Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 8 +++++--- drivers/gpu/drm/msm/msm_drv.c | 13 ++++++++----- drivers/gpu/drm/msm/msm_gpu.c | 3 +++ 3 files changed, 16 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gpu.c') diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 76c1df73e747..655ce5b14ad0 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -95,7 +95,7 @@ int adreno_hw_init(struct msm_gpu *gpu) DBG("%s", gpu->name); - ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova); + ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova); if (ret) { gpu->rb_iova = 0; dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret); @@ -370,8 +370,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return ret; } + mutex_lock(&drm->struct_mutex); gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs), MSM_BO_UNCACHED); + mutex_unlock(&drm->struct_mutex); if (IS_ERR(gpu->memptrs_bo)) { ret = PTR_ERR(gpu->memptrs_bo); gpu->memptrs_bo = NULL; @@ -379,13 +381,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return ret; } - gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo); + gpu->memptrs = msm_gem_vaddr(gpu->memptrs_bo); if (!gpu->memptrs) { dev_err(drm->dev, "could not vmap memptrs\n"); return -ENOMEM; } - ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id, + ret = msm_gem_get_iova(gpu->memptrs_bo, gpu->base.id, &gpu->memptrs_iova); if (ret) { dev_err(drm->dev, "could not map memptrs: %d\n", ret); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index a2f5bf6da6f3..b447c01ad89c 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -181,7 +181,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags) struct msm_kms *kms; int ret; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(dev->dev, "failed to allocate private data\n"); @@ -314,13 +313,15 @@ fail: static void load_gpu(struct drm_device *dev) { + static DEFINE_MUTEX(init_lock); struct msm_drm_private *priv = dev->dev_private; struct msm_gpu *gpu; + mutex_lock(&init_lock); + if (priv->gpu) - return; + goto out; - mutex_lock(&dev->struct_mutex); gpu = a3xx_gpu_init(dev); if (IS_ERR(gpu)) { dev_warn(dev->dev, "failed to load a3xx gpu\n"); @@ -330,7 +331,9 @@ static void load_gpu(struct drm_device *dev) if (gpu) { int ret; + mutex_lock(&dev->struct_mutex); gpu->funcs->pm_resume(gpu); + mutex_unlock(&dev->struct_mutex); ret = gpu->funcs->hw_init(gpu); if (ret) { dev_err(dev->dev, "gpu hw init failed: %d\n", ret); @@ -340,12 +343,12 @@ static void load_gpu(struct drm_device *dev) /* give inactive pm a chance to kick in: */ msm_gpu_retire(gpu); } - } priv->gpu = gpu; - mutex_unlock(&dev->struct_mutex); +out: + mutex_unlock(&init_lock); } static int msm_open(struct drm_device *dev, struct drm_file *file) diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 915240b4b80a..4a0dce587745 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -612,8 +612,11 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, } gpu->id = msm_register_mmu(drm, gpu->mmu); + /* Create ringbuffer: */ + mutex_lock(&drm->struct_mutex); gpu->rb = msm_ringbuffer_new(gpu, ringsz); + mutex_unlock(&drm->struct_mutex); if (IS_ERR(gpu->rb)) { ret = PTR_ERR(gpu->rb); gpu->rb = NULL; -- cgit v1.2.3