summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/adreno/a6xx_gmu.c')
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c70
1 files changed, 46 insertions, 24 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 38e2cfa9cec7..2ca470eb5cb8 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -74,7 +74,7 @@ bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
u32 val;
/* This can be called from gpu state code so make sure GMU is valid */
- if (IS_ERR_OR_NULL(gmu->mmio))
+ if (!gmu->initialized)
return false;
val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
@@ -90,7 +90,7 @@ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
u32 val;
/* This can be called from gpu state code so make sure GMU is valid */
- if (IS_ERR_OR_NULL(gmu->mmio))
+ if (!gmu->initialized)
return false;
val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
@@ -504,8 +504,10 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
wmb();
err:
- devm_iounmap(gmu->dev, pdcptr);
- devm_iounmap(gmu->dev, seqptr);
+ if (!IS_ERR_OR_NULL(pdcptr))
+ iounmap(pdcptr);
+ if (!IS_ERR_OR_NULL(seqptr))
+ iounmap(seqptr);
}
/*
@@ -695,7 +697,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
int status, ret;
- if (WARN(!gmu->mmio, "The GMU is not set up yet\n"))
+ if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
return 0;
gmu->hung = false;
@@ -765,7 +767,7 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
{
u32 reg;
- if (!gmu->mmio)
+ if (!gmu->initialized)
return true;
reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
@@ -1195,7 +1197,7 @@ static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
return ERR_PTR(-EINVAL);
}
- ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ ret = ioremap(res->start, resource_size(res));
if (!ret) {
DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
return ERR_PTR(-EINVAL);
@@ -1211,10 +1213,10 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
irq = platform_get_irq_byname(pdev, name);
- ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
- name, gmu);
+ ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
if (ret) {
- DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
+ name, ret);
return ret;
}
@@ -1227,27 +1229,35 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
{
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
- if (IS_ERR_OR_NULL(gmu->mmio))
+ if (!gmu->initialized)
return;
- a6xx_gmu_stop(a6xx_gpu);
-
- pm_runtime_disable(gmu->dev);
+ pm_runtime_force_suspend(gmu->dev);
if (!IS_ERR_OR_NULL(gmu->gxpd)) {
pm_runtime_disable(gmu->gxpd);
dev_pm_domain_detach(gmu->gxpd, false);
}
- a6xx_gmu_irq_disable(gmu);
+ iounmap(gmu->mmio);
+ gmu->mmio = NULL;
+
a6xx_gmu_memory_free(gmu, gmu->hfi);
iommu_detach_device(gmu->domain, gmu->dev);
iommu_domain_free(gmu->domain);
+
+ free_irq(gmu->gmu_irq, gmu);
+ free_irq(gmu->hfi_irq, gmu);
+
+ /* Drop reference taken in of_find_device_by_node */
+ put_device(gmu->dev);
+
+ gmu->initialized = false;
}
-int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
+int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
{
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
struct platform_device *pdev = of_find_device_by_node(node);
@@ -1268,34 +1278,34 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
/* Get the list of clocks */
ret = a6xx_gmu_clocks_probe(gmu);
if (ret)
- return ret;
+ goto err_put_device;
/* Set up the IOMMU context bank */
ret = a6xx_gmu_memory_probe(gmu);
if (ret)
- return ret;
+ goto err_put_device;
/* Allocate memory for for the HFI queues */
gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
if (IS_ERR(gmu->hfi))
- goto err;
+ goto err_memory;
/* Allocate memory for the GMU debug region */
gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
if (IS_ERR(gmu->debug))
- goto err;
+ goto err_memory;
/* Map the GMU registers */
gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
if (IS_ERR(gmu->mmio))
- goto err;
+ goto err_memory;
/* Get the HFI and GMU interrupts */
gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
- goto err;
+ goto err_mmio;
/*
* Get a link to the GX power domain to reset the GPU in case of GMU
@@ -1309,8 +1319,15 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
/* Set up the HFI queues */
a6xx_hfi_init(gmu);
+ gmu->initialized = true;
+
return 0;
-err:
+
+err_mmio:
+ iounmap(gmu->mmio);
+ free_irq(gmu->gmu_irq, gmu);
+ free_irq(gmu->hfi_irq, gmu);
+err_memory:
a6xx_gmu_memory_free(gmu, gmu->hfi);
if (gmu->domain) {
@@ -1318,6 +1335,11 @@ err:
iommu_domain_free(gmu->domain);
}
+ ret = -ENODEV;
- return -ENODEV;
+err_put_device:
+ /* Drop reference taken in of_find_device_by_node */
+ put_device(gmu->dev);
+
+ return ret;
}