summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c4
-rw-r--r--drivers/gpu/drm/drm_mm.c41
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c12
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c4
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c77
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c12
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c23
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c160
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c76
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h147
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc13
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h157
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c128
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c65
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nve0.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c86
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c30
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h14
-rw-r--r--drivers/gpu/drm/radeon/ni.c106
-rw-r--r--drivers/gpu/drm/radeon/nid.h3
-rw-r--r--drivers/gpu/drm/radeon/r600.c89
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c51
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c24
-rw-r--r--drivers/gpu/drm/radeon/si.c78
-rw-r--r--drivers/gpu/drm/radeon/sid.h18
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c10
-rw-r--r--drivers/gpu/drm/tegra/dc.c11
-rw-r--r--drivers/gpu/drm/tegra/drm.h18
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c25
-rw-r--r--drivers/gpu/drm/tegra/host1x.c2
79 files changed, 1318 insertions, 551 deletions
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 31123b6a0be5..2d2c2f8d6dc6 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -60,8 +60,7 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
MODULE_DEVICE_TABLE(pci, pciidlist);
-static int __devinit
-ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
return drm_get_pci_dev(pdev, ent, &driver);
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index dcd1a8c029eb..8ecb601152ef 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -56,8 +56,8 @@ static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
return 0;
}
-static int __devinit
-cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int cirrus_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int ret;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 0761a03cdbb2..2bf9670ba29b 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -184,19 +184,27 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
* -ENOSPC if no suitable free area is available. The preallocated memory node
* must be cleared.
*/
-int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment)
+int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long color)
{
struct drm_mm_node *hole_node;
- hole_node = drm_mm_search_free(mm, size, alignment, false);
+ hole_node = drm_mm_search_free_generic(mm, size, alignment,
+ color, 0);
if (!hole_node)
return -ENOSPC;
- drm_mm_insert_helper(hole_node, node, size, alignment, 0);
-
+ drm_mm_insert_helper(hole_node, node, size, alignment, color);
return 0;
}
+EXPORT_SYMBOL(drm_mm_insert_node_generic);
+
+int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment)
+{
+ return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
+}
EXPORT_SYMBOL(drm_mm_insert_node);
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
@@ -275,22 +283,31 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
* -ENOSPC if no suitable free area is available. This is for range
* restricted allocations. The preallocated memory node must be cleared.
*/
-int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment,
- unsigned long start, unsigned long end)
+int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment, unsigned long color,
+ unsigned long start, unsigned long end)
{
struct drm_mm_node *hole_node;
- hole_node = drm_mm_search_free_in_range(mm, size, alignment,
- start, end, false);
+ hole_node = drm_mm_search_free_in_range_generic(mm,
+ size, alignment, color,
+ start, end, 0);
if (!hole_node)
return -ENOSPC;
- drm_mm_insert_helper_range(hole_node, node, size, alignment, 0,
+ drm_mm_insert_helper_range(hole_node, node,
+ size, alignment, color,
start, end);
-
return 0;
}
+EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
+
+int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end)
+{
+ return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
+}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
/**
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index bef43e0342a6..4e9b5ba8edff 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -66,6 +66,6 @@ struct i2c_driver ddc_driver = {
},
.id_table = ddc_idtable,
.probe = s5p_ddc_probe,
- .remove = __devexit_p(s5p_ddc_remove),
+ .remove = s5p_ddc_remove,
.command = NULL,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index e0a8e8024b01..1713e53d4751 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -325,7 +325,7 @@ static int exynos_drm_platform_remove(struct platform_device *pdev)
static struct platform_driver exynos_drm_platform_driver = {
.probe = exynos_drm_platform_probe,
- .remove = __devexit_p(exynos_drm_platform_remove),
+ .remove = exynos_drm_platform_remove,
.driver = {
.owner = THIS_MODULE,
.name = "exynos-drm",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 61ea24296b52..d9963171fd0b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1714,7 +1714,7 @@ static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
fimc_write(cfg, EXYNOS_CIGCTRL);
}
-static int __devinit fimc_probe(struct platform_device *pdev)
+static int fimc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimc_context *ctx;
@@ -1891,7 +1891,7 @@ err_ctx:
return ret;
}
-static int __devexit fimc_remove(struct platform_device *pdev)
+static int fimc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimc_context *ctx = get_fimc_context(dev);
@@ -1990,7 +1990,7 @@ static const struct dev_pm_ops fimc_pm_ops = {
struct platform_driver fimc_driver = {
.probe = fimc_probe,
- .remove = __devexit_p(fimc_remove),
+ .remove = fimc_remove,
.id_table = fimc_driver_ids,
.driver = {
.name = "exynos-drm-fimc",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index bf0d9baca2bc..3ac911894dc7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -898,7 +898,7 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
return 0;
}
-static int __devinit fimd_probe(struct platform_device *pdev)
+static int fimd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimd_context *ctx;
@@ -997,7 +997,7 @@ static int __devinit fimd_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit fimd_remove(struct platform_device *pdev)
+static int fimd_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimd_context *ctx = platform_get_drvdata(pdev);
@@ -1105,7 +1105,7 @@ static const struct dev_pm_ops fimd_pm_ops = {
struct platform_driver fimd_driver = {
.probe = fimd_probe,
- .remove = __devexit_p(fimd_remove),
+ .remove = fimd_remove,
.id_table = fimd_driver_ids,
.driver = {
.name = "exynos4-fb",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 6ffa0763c078..36c3905536a6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1090,7 +1090,7 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
kfree(file_priv->g2d_priv);
}
-static int __devinit g2d_probe(struct platform_device *pdev)
+static int g2d_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
@@ -1188,7 +1188,7 @@ err_destroy_slab:
return ret;
}
-static int __devexit g2d_remove(struct platform_device *pdev)
+static int g2d_remove(struct platform_device *pdev)
{
struct g2d_data *g2d = platform_get_drvdata(pdev);
@@ -1242,7 +1242,7 @@ static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
struct platform_driver g2d_driver = {
.probe = g2d_probe,
- .remove = __devexit_p(g2d_remove),
+ .remove = g2d_remove,
.driver = {
.name = "s5p-g2d",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 5639353d47b9..4b0e8589bd19 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1683,7 +1683,7 @@ static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
gsc_write(cfg, GSC_ENABLE);
}
-static int __devinit gsc_probe(struct platform_device *pdev)
+static int gsc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct gsc_context *ctx;
@@ -1784,7 +1784,7 @@ err_ctx:
return ret;
}
-static int __devexit gsc_remove(struct platform_device *pdev)
+static int gsc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct gsc_context *ctx = get_gsc_context(dev);
@@ -1860,7 +1860,7 @@ static const struct dev_pm_ops gsc_pm_ops = {
struct platform_driver gsc_driver = {
.probe = gsc_probe,
- .remove = __devexit_p(gsc_remove),
+ .remove = gsc_remove,
.driver = {
.name = "exynos-drm-gsc",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 55793c46e3c2..850e9950b7da 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -385,7 +385,7 @@ static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
}
-static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
+static int exynos_drm_hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_drm_subdrv *subdrv;
@@ -413,7 +413,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
+static int exynos_drm_hdmi_remove(struct platform_device *pdev)
{
struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
@@ -426,7 +426,7 @@ static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
struct platform_driver exynos_drm_common_hdmi_driver = {
.probe = exynos_drm_hdmi_probe,
- .remove = __devexit_p(exynos_drm_hdmi_remove),
+ .remove = exynos_drm_hdmi_remove,
.driver = {
.name = "exynos-drm-hdmi",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 49eebe948ed2..c1f12301224c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -1888,7 +1888,7 @@ err_clear:
return;
}
-static int __devinit ipp_probe(struct platform_device *pdev)
+static int ipp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ipp_context *ctx;
@@ -1963,7 +1963,7 @@ err_clear:
return ret;
}
-static int __devexit ipp_remove(struct platform_device *pdev)
+static int ipp_remove(struct platform_device *pdev)
{
struct ipp_context *ctx = platform_get_drvdata(pdev);
@@ -2050,7 +2050,7 @@ static const struct dev_pm_ops ipp_pm_ops = {
struct platform_driver ipp_driver = {
.probe = ipp_probe,
- .remove = __devexit_p(ipp_remove),
+ .remove = ipp_remove,
.driver = {
.name = "exynos-drm-ipp",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 1c2366083c70..c748850737a1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -638,7 +638,7 @@ static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
return 0;
}
-static int __devinit rotator_probe(struct platform_device *pdev)
+static int rotator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rot_context *rot;
@@ -730,7 +730,7 @@ err_get_resource:
return ret;
}
-static int __devexit rotator_remove(struct platform_device *pdev)
+static int rotator_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rot_context *rot = dev_get_drvdata(dev);
@@ -845,7 +845,7 @@ static const struct dev_pm_ops rotator_pm_ops = {
struct platform_driver rotator_driver = {
.probe = rotator_probe,
- .remove = __devexit_p(rotator_remove),
+ .remove = rotator_remove,
.id_table = rotator_driver_ids,
.driver = {
.name = "exynos-rot",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 99bfc38dfaa2..a5c5a179e3b6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -609,7 +609,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
return 0;
}
-static int __devinit vidi_probe(struct platform_device *pdev)
+static int vidi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct vidi_context *ctx;
@@ -645,7 +645,7 @@ static int __devinit vidi_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit vidi_remove(struct platform_device *pdev)
+static int vidi_remove(struct platform_device *pdev)
{
struct vidi_context *ctx = platform_get_drvdata(pdev);
@@ -683,7 +683,7 @@ static const struct dev_pm_ops vidi_pm_ops = {
struct platform_driver vidi_driver = {
.probe = vidi_probe,
- .remove = __devexit_p(vidi_remove),
+ .remove = vidi_remove,
.driver = {
.name = "exynos-drm-vidi",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2c46b6c0b82c..41ff79d8ac8e 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -2305,7 +2305,7 @@ static irqreturn_t hdmi_internal_irq_thread(int irq, void *arg)
return IRQ_HANDLED;
}
-static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
+static int hdmi_resources_init(struct hdmi_context *hdata)
{
struct device *dev = hdata->dev;
struct hdmi_resources *res = &hdata->res;
@@ -2451,7 +2451,7 @@ static struct of_device_id hdmi_match_types[] = {
};
#endif
-static int __devinit hdmi_probe(struct platform_device *pdev)
+static int hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_drm_hdmi_context *drm_hdmi_ctx;
@@ -2607,7 +2607,7 @@ err_ddc:
return ret;
}
-static int __devexit hdmi_remove(struct platform_device *pdev)
+static int hdmi_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
@@ -2708,7 +2708,7 @@ static const struct dev_pm_ops hdmi_pm_ops = {
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
- .remove = __devexit_p(hdmi_remove),
+ .remove = hdmi_remove,
.id_table = hdmi_driver_types,
.driver = {
.name = "exynos-hdmi",
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index 6206056f4a33..ea49d132ecf6 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -64,7 +64,7 @@ struct i2c_driver hdmiphy_driver = {
},
.id_table = hdmiphy_id,
.probe = hdmiphy_probe,
- .remove = __devexit_p(hdmiphy_remove),
+ .remove = hdmiphy_remove,
.command = NULL,
};
EXPORT_SYMBOL(hdmiphy_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 21db89530fc7..3f68b34fa127 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1029,8 +1029,8 @@ out:
return IRQ_HANDLED;
}
-static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
- struct platform_device *pdev)
+static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
+ struct platform_device *pdev)
{
struct mixer_context *mixer_ctx = ctx->ctx;
struct device *dev = &pdev->dev;
@@ -1081,8 +1081,8 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
return 0;
}
-static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
- struct platform_device *pdev)
+static int vp_resources_init(struct exynos_drm_hdmi_context *ctx,
+ struct platform_device *pdev)
{
struct mixer_context *mixer_ctx = ctx->ctx;
struct device *dev = &pdev->dev;
@@ -1155,7 +1155,7 @@ static struct of_device_id mixer_match_types[] = {
}
};
-static int __devinit mixer_probe(struct platform_device *pdev)
+static int mixer_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_drm_hdmi_context *drm_hdmi_ctx;
@@ -1316,6 +1316,6 @@ struct platform_driver mixer_driver = {
.of_match_table = mixer_match_types,
},
.probe = mixer_probe,
- .remove = __devexit_p(mixer_remove),
+ .remove = mixer_remove,
.id_table = mixer_driver_types,
};
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
index 4a07ab596174..771ff66711af 100644
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
+++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
@@ -700,7 +700,7 @@ static struct i2c_driver tc35876x_bridge_i2c_driver = {
},
.id_table = tc35876x_bridge_id,
.probe = tc35876x_bridge_probe,
- .remove = __devexit_p(tc35876x_bridge_remove),
+ .remove = tc35876x_bridge_remove,
};
/* LCD panel I2C */
@@ -741,7 +741,7 @@ static struct i2c_driver cmi_lcd_i2c_driver = {
},
.id_table = cmi_lcd_i2c_id,
.probe = cmi_lcd_i2c_probe,
- .remove = __devexit_p(cmi_lcd_i2c_remove),
+ .remove = cmi_lcd_i2c_remove,
};
/* HACK to create I2C device while it's not created by platform code */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 8f63cd5de4b4..99daa896105d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -989,6 +989,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
break;
+ case I915_PARAM_HAS_PINNED_BATCHES:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 530db83ef320..117265840b1f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -877,8 +877,7 @@ int i915_reset(struct drm_device *dev)
return 0;
}
-static int __devinit
-i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 557843dd4b2e..ed3059575576 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -780,6 +780,7 @@ typedef struct drm_i915_private {
struct i915_hw_ppgtt *aliasing_ppgtt;
struct shrinker inactive_shrinker;
+ bool shrinker_no_lock_stealing;
/**
* List of objects currently involved in rendering.
@@ -1100,6 +1101,7 @@ struct drm_i915_gem_object {
*/
atomic_t pending_flip;
};
+#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
@@ -1166,6 +1168,9 @@ struct drm_i915_file_private {
#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
(dev)->pci_device == 0x0152 || \
(dev)->pci_device == 0x015a)
+#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
+ (dev)->pci_device == 0x0106 || \
+ (dev)->pci_device == 0x010A)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
@@ -1196,6 +1201,9 @@ struct drm_i915_file_private {
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+/* Early gen2 have a totally busted CS tlb and require pinned batches. */
+#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 742206e45103..da3c82e301b1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1517,9 +1517,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
if (obj->base.map_list.map)
return 0;
+ dev_priv->mm.shrinker_no_lock_stealing = true;
+
ret = drm_gem_create_mmap_offset(&obj->base);
if (ret != -ENOSPC)
- return ret;
+ goto out;
/* Badly fragmented mmap space? The only way we can recover
* space is by destroying unwanted objects. We can't randomly release
@@ -1531,10 +1533,14 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
ret = drm_gem_create_mmap_offset(&obj->base);
if (ret != -ENOSPC)
- return ret;
+ goto out;
i915_gem_shrink_all(dev_priv);
- return drm_gem_create_mmap_offset(&obj->base);
+ ret = drm_gem_create_mmap_offset(&obj->base);
+out:
+ dev_priv->mm.shrinker_no_lock_stealing = false;
+
+ return ret;
}
static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
@@ -2890,7 +2896,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_mm_node *free_space;
+ struct drm_mm_node *node;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
int ret;
@@ -2936,66 +2942,54 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (node == NULL) {
+ i915_gem_object_unpin_pages(obj);
+ return -ENOMEM;
+ }
+
search_free:
if (map_and_fenceable)
- free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
- size, alignment, obj->cache_level,
- 0, dev_priv->mm.gtt_mappable_end,
- false);
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+ size, alignment, obj->cache_level,
+ 0, dev_priv->mm.gtt_mappable_end);
else
- free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
- size, alignment, obj->cache_level,
- false);
-
- if (free_space != NULL) {
- if (map_and_fenceable)
- free_space =
- drm_mm_get_block_range_generic(free_space,
- size, alignment, obj->cache_level,
- 0, dev_priv->mm.gtt_mappable_end,
- false);
- else
- free_space =
- drm_mm_get_block_generic(free_space,
- size, alignment, obj->cache_level,
- false);
- }
- if (free_space == NULL) {
+ ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
+ size, alignment, obj->cache_level);
+ if (ret) {
ret = i915_gem_evict_something(dev, size, alignment,
obj->cache_level,
map_and_fenceable,
nonblocking);
- if (ret) {
- i915_gem_object_unpin_pages(obj);
- return ret;
- }
+ if (ret == 0)
+ goto search_free;
- goto search_free;
+ i915_gem_object_unpin_pages(obj);
+ kfree(node);
+ return ret;
}
- if (WARN_ON(!i915_gem_valid_gtt_space(dev,
- free_space,
- obj->cache_level))) {
+ if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
i915_gem_object_unpin_pages(obj);
- drm_mm_put_block(free_space);
+ drm_mm_put_block(node);
return -EINVAL;
}
ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
i915_gem_object_unpin_pages(obj);
- drm_mm_put_block(free_space);
+ drm_mm_put_block(node);
return ret;
}
list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- obj->gtt_space = free_space;
- obj->gtt_offset = free_space->start;
+ obj->gtt_space = node;
+ obj->gtt_offset = node->start;
fenceable =
- free_space->size == fence_size &&
- (free_space->start & (fence_alignment - 1)) == 0;
+ node->size == fence_size &&
+ (node->start & (fence_alignment - 1)) == 0;
mappable =
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
@@ -4392,6 +4386,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
if (!mutex_is_locked_by(&dev->struct_mutex, current))
return 0;
+ if (dev_priv->mm.shrinker_no_lock_stealing)
+ return 0;
+
unlock = false;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 773ef77b6c22..7be4241e8242 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -226,7 +226,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
+ return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index ee8f97f0539e..d6a994a07393 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -808,6 +808,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
flags |= I915_DISPATCH_SECURE;
}
+ if (args->flags & I915_EXEC_IS_PINNED)
+ flags |= I915_DISPATCH_PINNED;
switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a4dc97f8b9f0..2220dec3e5d9 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1087,6 +1087,18 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (!ring->get_seqno)
return NULL;
+ if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
+ u32 acthd = I915_READ(ACTHD);
+
+ if (WARN_ON(ring->id != RCS))
+ return NULL;
+
+ obj = ring->private;
+ if (acthd >= obj->gtt_offset &&
+ acthd < obj->gtt_offset + obj->base.size)
+ return i915_error_object_create(dev_priv, obj);
+ }
+
seqno = ring->get_seqno(ring, false);
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (obj->ring != ring)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3f75cfaf1c3f..186ee5c85b51 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -517,6 +517,7 @@
* the enables for writing to the corresponding low bit.
*/
#define _3D_CHICKEN 0x02084
+#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
#define _3D_CHICKEN2 0x0208c
/* Disables pipelining of read flushes past the SF-WIZ interface.
* Required on all Ironlake steppings according to the B-Spec, but the
@@ -532,7 +533,8 @@
# define MI_FLUSH_ENABLE (1 << 12)
#define GEN6_GT_MODE 0x20d0
-#define GEN6_GT_MODE_HI (1 << 9)
+#define GEN6_GT_MODE_HI (1 << 9)
+#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
#define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5d127e068950..a9fb046b94a1 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8144,10 +8144,6 @@ intel_modeset_stage_output_state(struct drm_device *dev,
DRM_DEBUG_KMS("encoder changed, full mode switch\n");
config->mode_changed = true;
}
-
- /* Disable all disconnected encoders. */
- if (connector->base.status == connector_status_disconnected)
- connector->new_encoder = NULL;
}
/* connector->new_encoder is now updated for all connectors. */
@@ -9167,6 +9163,23 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* the crtc fixup. */
}
+static void i915_redisable_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 vga_reg;
+
+ if (HAS_PCH_SPLIT(dev))
+ vga_reg = CPU_VGACNTRL;
+ else
+ vga_reg = VGACNTRL;
+
+ if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
+ DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+ POSTING_READ(vga_reg);
+ }
+}
+
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
* and i915 state tracking structures. */
void intel_modeset_setup_hw_state(struct drm_device *dev,
@@ -9275,6 +9288,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
intel_set_mode(&crtc->base, &crtc->base.mode,
crtc->base.x, crtc->base.y, crtc->base.fb);
}
+
+ i915_redisable_vga(dev);
} else {
intel_modeset_update_staged_output_state(dev);
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 496caa73eb70..e6f54ffab3ba 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -405,7 +405,7 @@ void intel_update_fbc(struct drm_device *dev)
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
- if (tmp_crtc->enabled &&
+ if (to_intel_crtc(tmp_crtc)->active &&
!to_intel_crtc(tmp_crtc)->primary_disabled &&
tmp_crtc->fb) {
if (crtc) {
@@ -992,7 +992,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
struct drm_crtc *crtc, *enabled = NULL;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->enabled && crtc->fb) {
+ if (to_intel_crtc(crtc)->active && crtc->fb) {
if (enabled)
return NULL;
enabled = crtc;
@@ -1086,7 +1086,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled) {
+ if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
*cursor_wm = cursor->guard_size;
*plane_wm = display->guard_size;
return false;
@@ -1215,7 +1215,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
int entries;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled)
+ if (crtc->fb == NULL || !to_intel_crtc(crtc)->active)
return false;
clock = crtc->mode.clock; /* VESA DOT Clock */
@@ -1286,6 +1286,7 @@ static void valleyview_update_wm(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
+ int ignore_plane_sr, ignore_cursor_sr;
unsigned int enabled = 0;
vlv_update_drain_latency(dev);
@@ -1302,17 +1303,23 @@ static void valleyview_update_wm(struct drm_device *dev)
&planeb_wm, &cursorb_wm))
enabled |= 2;
- plane_sr = cursor_sr = 0;
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&valleyview_wm_info,
&valleyview_cursor_wm_info,
- &plane_sr, &cursor_sr))
+ &plane_sr, &ignore_cursor_sr) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ 2*sr_latency_ns,
+ &valleyview_wm_info,
+ &valleyview_cursor_wm_info,
+ &ignore_plane_sr, &cursor_sr)) {
I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
- else
+ } else {
I915_WRITE(FW_BLC_SELF_VLV,
I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+ plane_sr = cursor_sr = 0;
+ }
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
@@ -1352,17 +1359,18 @@ static void g4x_update_wm(struct drm_device *dev)
&planeb_wm, &cursorb_wm))
enabled |= 2;
- plane_sr = cursor_sr = 0;
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&g4x_wm_info,
&g4x_cursor_wm_info,
- &plane_sr, &cursor_sr))
+ &plane_sr, &cursor_sr)) {
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
- else
+ } else {
I915_WRITE(FW_BLC_SELF,
I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+ plane_sr = cursor_sr = 0;
+ }
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
@@ -1468,7 +1476,7 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
- if (crtc->enabled && crtc->fb) {
+ if (to_intel_crtc(crtc)->active && crtc->fb) {
int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
@@ -1482,7 +1490,7 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1);
- if (crtc->enabled && crtc->fb) {
+ if (to_intel_crtc(crtc)->active && crtc->fb) {
int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
@@ -1811,8 +1819,110 @@ static void sandybridge_update_wm(struct drm_device *dev)
enabled |= 2;
}
- if ((dev_priv->num_pipe == 3) &&
- g4x_compute_wm0(dev, 2,
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ *
+ * SNB support 3 levels of watermark.
+ *
+ * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+ * and disabled in the descending order
+ *
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled) ||
+ dev_priv->sprite_scaling_enabled)
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM3 */
+ if (!ironlake_compute_srwm(dev, 3, enabled,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM3_LP_ILK,
+ WM3_LP_EN |
+ (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+}
+
+static void ivybridge_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int fbc_wm, plane_wm, cursor_wm;
+ int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, 0,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEA_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1;
+ }
+
+ if (g4x_compute_wm0(dev, 1,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEB_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 2;
+ }
+
+ if (g4x_compute_wm0(dev, 2,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
@@ -1875,12 +1985,17 @@ static void sandybridge_update_wm(struct drm_device *dev)
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
- /* WM3 */
+ /* WM3, note we have to correct the cursor latency */
if (!ironlake_compute_srwm(dev, 3, enabled,
SNB_READ_WM3_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
+ &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
+ !ironlake_compute_srwm(dev, 3, enabled,
+ 2 * SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
return;
I915_WRITE(WM3_LP_ILK,
@@ -1929,7 +2044,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled) {
+ if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
*sprite_wm = display->guard_size;
return false;
}
@@ -3471,6 +3586,15 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
+ /* WaDisableHiZPlanesWhenMSAAEnabled */
+ I915_WRITE(_3D_CHICKEN,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
+
+ /* WaSetupGtModeTdRowDispatch */
+ if (IS_SNB_GT1(dev))
+ I915_WRITE(GEN6_GT_MODE,
+ _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
+
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
@@ -3999,7 +4123,7 @@ void intel_init_pm(struct drm_device *dev)
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
if (SNB_READ_WM0_LATENCY()) {
- dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_wm = ivybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 2346b920bd86..ae253e04c391 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -547,9 +547,14 @@ static int init_render_ring(struct intel_ring_buffer *ring)
static void render_ring_cleanup(struct intel_ring_buffer *ring)
{
+ struct drm_device *dev = ring->dev;
+
if (!ring->private)
return;
+ if (HAS_BROKEN_CS_TLB(dev))
+ drm_gem_object_unreference(to_gem_object(ring->private));
+
cleanup_pipe_control(ring);
}
@@ -969,6 +974,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
return 0;
}
+/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
+#define I830_BATCH_LIMIT (256*1024)
static int
i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len,
@@ -976,15 +983,47 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
{
int ret;
- ret = intel_ring_begin(ring, 4);
- if (ret)
- return ret;
+ if (flags & I915_DISPATCH_PINNED) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER);
- intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
- intel_ring_emit(ring, offset + len - 8);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ } else {
+ struct drm_i915_gem_object *obj = ring->private;
+ u32 cs_offset = obj->gtt_offset;
+
+ if (len > I830_BATCH_LIMIT)
+ return -ENOSPC;
+
+ ret = intel_ring_begin(ring, 9+3);
+ if (ret)
+ return ret;
+ /* Blit the batch (which has now all relocs applied) to the stable batch
+ * scratch bo area (so that the CS never stumbles over its tlb
+ * invalidation bug) ... */
+ intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
+ XY_SRC_COPY_BLT_WRITE_ALPHA |
+ XY_SRC_COPY_BLT_WRITE_RGB);
+ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
+ intel_ring_emit(ring, cs_offset);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 4096);
+ intel_ring_emit(ring, offset);
+ intel_ring_emit(ring, MI_FLUSH);
+
+ /* ... and execute it. */
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+ intel_ring_emit(ring, cs_offset + len - 8);
+ intel_ring_advance(ring);
+ }
return 0;
}
@@ -1596,6 +1635,27 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->init = init_render_ring;
ring->cleanup = render_ring_cleanup;
+ /* Workaround batchbuffer to combat CS tlb bug. */
+ if (HAS_BROKEN_CS_TLB(dev)) {
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate batch bo\n");
+ return -ENOMEM;
+ }
+
+ ret = i915_gem_object_pin(obj, 0, true, false);
+ if (ret != 0) {
+ drm_gem_object_unreference(&obj->base);
+ DRM_ERROR("Failed to ping batch bo\n");
+ return ret;
+ }
+
+ ring->private = obj;
+ }
+
return intel_init_ring_buffer(dev, ring);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 526182ed0c6d..6af87cd05725 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -94,6 +94,7 @@ struct intel_ring_buffer {
u32 offset, u32 length,
unsigned flags);
#define I915_DISPATCH_SECURE 0x1
+#define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_ring_buffer *ring);
int (*sync_to)(struct intel_ring_buffer *ring,
struct intel_ring_buffer *to,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 1e910117b0a2..122b571ccc7c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -60,8 +60,7 @@ static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
}
-static int __devinit
-mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
mgag200_kick_out_firmware_fb(pdev);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
index 7b715fda2763..62ab231cd6b6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
@@ -57,6 +57,11 @@ chipsets:
.b16 #nve4_gpc_mmio_tail
.b16 #nve4_tpc_mmio_head
.b16 #nve4_tpc_mmio_tail
+.b8 0xe6 0 0 0
+.b16 #nve4_gpc_mmio_head
+.b16 #nve4_gpc_mmio_tail
+.b16 #nve4_tpc_mmio_head
+.b16 #nve4_tpc_mmio_tail
.b8 0 0 0 0
// GPC mmio lists
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
index 26c2165bad0f..09ee4702c8b2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
@@ -34,13 +34,16 @@ uint32_t nve0_grgpc_data[] = {
0x00000000,
/* 0x0064: chipsets */
0x000000e4,
- 0x01040080,
- 0x014c0104,
+ 0x0110008c,
+ 0x01580110,
0x000000e7,
- 0x01040080,
- 0x014c0104,
+ 0x0110008c,
+ 0x01580110,
+ 0x000000e6,
+ 0x0110008c,
+ 0x01580110,
0x00000000,
-/* 0x0080: nve4_gpc_mmio_head */
+/* 0x008c: nve4_gpc_mmio_head */
0x00000380,
0x04000400,
0x0800040c,
@@ -74,8 +77,8 @@ uint32_t nve0_grgpc_data[] = {
0x14003100,
0x000031d0,
0x040031e0,
-/* 0x0104: nve4_gpc_mmio_tail */
-/* 0x0104: nve4_tpc_mmio_head */
+/* 0x0110: nve4_gpc_mmio_tail */
+/* 0x0110: nve4_tpc_mmio_head */
0x00000048,
0x00000064,
0x00000088,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
index acfc457654bd..0bcfa4d447e5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
@@ -754,6 +754,16 @@ ctx_mmio_exec:
// on load it means: "a save preceeded this load"
//
ctx_xfer:
+ // according to mwk, some kind of wait for idle
+ mov $r15 0xc00
+ shl b32 $r15 6
+ mov $r14 4
+ iowr I[$r15 + 0x200] $r14
+ ctx_xfer_idle:
+ iord $r14 I[$r15 + 0x000]
+ and $r14 0x2000
+ bra ne #ctx_xfer_idle
+
bra not $p1 #ctx_xfer_pre
bra $p2 #ctx_xfer_pre_load
ctx_xfer_pre:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
index 85a8d556f484..bb03d2a1d57b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
@@ -799,79 +799,80 @@ uint32_t nvc0_grhub_code[] = {
0x01fa0613,
0xf803f806,
/* 0x0829: ctx_xfer */
- 0x0611f400,
-/* 0x082f: ctx_xfer_pre */
- 0xf01102f4,
- 0x21f510f7,
- 0x21f50698,
- 0x11f40631,
-/* 0x083d: ctx_xfer_pre_load */
- 0x02f7f01c,
- 0x065721f5,
- 0x066621f5,
- 0x067821f5,
- 0x21f5f4bd,
- 0x21f50657,
-/* 0x0856: ctx_xfer_exec */
- 0x019806b8,
- 0x1427f116,
- 0x0624b604,
- 0xf10020d0,
- 0xf0a500e7,
- 0x1fb941e3,
- 0x8d21f402,
- 0xf004e0b6,
- 0x2cf001fc,
- 0x0124b602,
- 0xf405f2fd,
- 0x17f18d21,
- 0x13f04afc,
- 0x0c27f002,
- 0xf50012d0,
- 0xf1020721,
- 0xf047fc27,
- 0x20d00223,
- 0x012cf000,
- 0xd00320b6,
- 0xacf00012,
- 0x06a5f001,
- 0x9800b7f0,
- 0x0d98140c,
- 0x00e7f015,
- 0x015c21f5,
- 0xf508a7f0,
- 0xf5010321,
- 0xf4020721,
- 0xa7f02201,
- 0xc921f40c,
- 0x0a1017f1,
- 0xf00614b6,
- 0x12d00527,
-/* 0x08dd: ctx_xfer_post_save_wait */
- 0x0012cf00,
- 0xf40522fd,
- 0x02f4fa1b,
-/* 0x08e9: ctx_xfer_post */
- 0x02f7f032,
- 0x065721f5,
- 0x21f5f4bd,
- 0x21f50698,
- 0x21f50226,
- 0xf4bd0666,
- 0x065721f5,
- 0x981011f4,
- 0x11fd8001,
- 0x070bf405,
- 0x07df21f5,
-/* 0x0914: ctx_xfer_no_post_mmio */
- 0x064921f5,
-/* 0x0918: ctx_xfer_done */
- 0x000000f8,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x00f7f100,
+ 0x06f4b60c,
+ 0xd004e7f0,
+/* 0x0836: ctx_xfer_idle */
+ 0xfecf80fe,
+ 0x00e4f100,
+ 0xf91bf420,
+ 0xf40611f4,
+/* 0x0846: ctx_xfer_pre */
+ 0xf7f01102,
+ 0x9821f510,
+ 0x3121f506,
+ 0x1c11f406,
+/* 0x0854: ctx_xfer_pre_load */
+ 0xf502f7f0,
+ 0xf5065721,
+ 0xf5066621,
+ 0xbd067821,
+ 0x5721f5f4,
+ 0xb821f506,
+/* 0x086d: ctx_xfer_exec */
+ 0x16019806,
+ 0x041427f1,
+ 0xd00624b6,
+ 0xe7f10020,
+ 0xe3f0a500,
+ 0x021fb941,
+ 0xb68d21f4,
+ 0xfcf004e0,
+ 0x022cf001,
+ 0xfd0124b6,
+ 0x21f405f2,
+ 0xfc17f18d,
+ 0x0213f04a,
+ 0xd00c27f0,
+ 0x21f50012,
+ 0x27f10207,
+ 0x23f047fc,
+ 0x0020d002,
+ 0xb6012cf0,
+ 0x12d00320,
+ 0x01acf000,
+ 0xf006a5f0,
+ 0x0c9800b7,
+ 0x150d9814,
+ 0xf500e7f0,
+ 0xf0015c21,
+ 0x21f508a7,
+ 0x21f50103,
+ 0x01f40207,
+ 0x0ca7f022,
+ 0xf1c921f4,
+ 0xb60a1017,
+ 0x27f00614,
+ 0x0012d005,
+/* 0x08f4: ctx_xfer_post_save_wait */
+ 0xfd0012cf,
+ 0x1bf40522,
+ 0x3202f4fa,
+/* 0x0900: ctx_xfer_post */
+ 0xf502f7f0,
+ 0xbd065721,
+ 0x9821f5f4,
+ 0x2621f506,
+ 0x6621f502,
+ 0xf5f4bd06,
+ 0xf4065721,
+ 0x01981011,
+ 0x0511fd80,
+ 0xf5070bf4,
+/* 0x092b: ctx_xfer_no_post_mmio */
+ 0xf507df21,
+/* 0x092f: ctx_xfer_done */
+ 0xf8064921,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
index 138eeaa28665..7fe9d7cf486b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
@@ -44,6 +44,9 @@ chipsets:
.b8 0xe7 0 0 0
.b16 #nve4_hub_mmio_head
.b16 #nve4_hub_mmio_tail
+.b8 0xe6 0 0 0
+.b16 #nve4_hub_mmio_head
+.b16 #nve4_hub_mmio_tail
.b8 0 0 0 0
nve4_hub_mmio_head:
@@ -680,6 +683,16 @@ ctx_mmio_exec:
// on load it means: "a save preceeded this load"
//
ctx_xfer:
+ // according to mwk, some kind of wait for idle
+ mov $r15 0xc00
+ shl b32 $r15 6
+ mov $r14 4
+ iowr I[$r15 + 0x200] $r14
+ ctx_xfer_idle:
+ iord $r14 I[$r15 + 0x000]
+ and $r14 0x2000
+ bra ne #ctx_xfer_idle
+
bra not $p1 #ctx_xfer_pre
bra $p2 #ctx_xfer_pre_load
ctx_xfer_pre:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
index decf0c60ca3b..e3421af68ab9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
@@ -30,11 +30,13 @@ uint32_t nve0_grhub_data[] = {
0x00000000,
/* 0x005c: chipsets */
0x000000e4,
- 0x013c0070,
+ 0x01440078,
0x000000e7,
- 0x013c0070,
+ 0x01440078,
+ 0x000000e6,
+ 0x01440078,
0x00000000,
-/* 0x0070: nve4_hub_mmio_head */
+/* 0x0078: nve4_hub_mmio_head */
0x0417e91c,
0x04400204,
0x18404010,
@@ -86,9 +88,7 @@ uint32_t nve0_grhub_data[] = {
0x00408840,
0x08408900,
0x00408980,
-/* 0x013c: nve4_hub_mmio_tail */
- 0x00000000,
- 0x00000000,
+/* 0x0144: nve4_hub_mmio_tail */
0x00000000,
0x00000000,
0x00000000,
@@ -781,77 +781,78 @@ uint32_t nve0_grhub_code[] = {
0x0613f002,
0xf80601fa,
/* 0x07fb: ctx_xfer */
- 0xf400f803,
- 0x02f40611,
-/* 0x0801: ctx_xfer_pre */
- 0x10f7f00d,
- 0x067221f5,
-/* 0x080b: ctx_xfer_pre_load */
- 0xf01c11f4,
- 0x21f502f7,
- 0x21f50631,
- 0x21f50640,
- 0xf4bd0652,
- 0x063121f5,
- 0x069221f5,
-/* 0x0824: ctx_xfer_exec */
- 0xf1160198,
- 0xb6041427,
- 0x20d00624,
- 0x00e7f100,
- 0x41e3f0a5,
- 0xf4021fb9,
- 0xe0b68d21,
- 0x01fcf004,
- 0xb6022cf0,
- 0xf2fd0124,
- 0x8d21f405,
- 0x4afc17f1,
- 0xf00213f0,
- 0x12d00c27,
- 0x0721f500,
- 0xfc27f102,
- 0x0223f047,
- 0xf00020d0,
- 0x20b6012c,
- 0x0012d003,
- 0xf001acf0,
- 0xb7f006a5,
- 0x140c9800,
- 0xf0150d98,
- 0x21f500e7,
- 0xa7f0015c,
- 0x0321f508,
- 0x0721f501,
- 0x2201f402,
- 0xf40ca7f0,
- 0x17f1c921,
- 0x14b60a10,
- 0x0527f006,
-/* 0x08ab: ctx_xfer_post_save_wait */
- 0xcf0012d0,
- 0x22fd0012,
- 0xfa1bf405,
-/* 0x08b7: ctx_xfer_post */
- 0xf02e02f4,
- 0x21f502f7,
- 0xf4bd0631,
- 0x067221f5,
- 0x022621f5,
- 0x064021f5,
- 0x21f5f4bd,
- 0x11f40631,
- 0x80019810,
- 0xf40511fd,
- 0x21f5070b,
-/* 0x08e2: ctx_xfer_no_post_mmio */
-/* 0x08e2: ctx_xfer_done */
- 0x00f807b1,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0xf100f803,
+ 0xb60c00f7,
+ 0xe7f006f4,
+ 0x80fed004,
+/* 0x0808: ctx_xfer_idle */
+ 0xf100fecf,
+ 0xf42000e4,
+ 0x11f4f91b,
+ 0x0d02f406,
+/* 0x0818: ctx_xfer_pre */
+ 0xf510f7f0,
+ 0xf4067221,
+/* 0x0822: ctx_xfer_pre_load */
+ 0xf7f01c11,
+ 0x3121f502,
+ 0x4021f506,
+ 0x5221f506,
+ 0xf5f4bd06,
+ 0xf5063121,
+/* 0x083b: ctx_xfer_exec */
+ 0x98069221,
+ 0x27f11601,
+ 0x24b60414,
+ 0x0020d006,
+ 0xa500e7f1,
+ 0xb941e3f0,
+ 0x21f4021f,
+ 0x04e0b68d,
+ 0xf001fcf0,
+ 0x24b6022c,
+ 0x05f2fd01,
+ 0xf18d21f4,
+ 0xf04afc17,
+ 0x27f00213,
+ 0x0012d00c,
+ 0x020721f5,
+ 0x47fc27f1,
+ 0xd00223f0,
+ 0x2cf00020,
+ 0x0320b601,
+ 0xf00012d0,
+ 0xa5f001ac,
+ 0x00b7f006,
+ 0x98140c98,
+ 0xe7f0150d,
+ 0x5c21f500,
+ 0x08a7f001,
+ 0x010321f5,
+ 0x020721f5,
+ 0xf02201f4,
+ 0x21f40ca7,
+ 0x1017f1c9,
+ 0x0614b60a,
+ 0xd00527f0,
+/* 0x08c2: ctx_xfer_post_save_wait */
+ 0x12cf0012,
+ 0x0522fd00,
+ 0xf4fa1bf4,
+/* 0x08ce: ctx_xfer_post */
+ 0xf7f02e02,
+ 0x3121f502,
+ 0xf5f4bd06,
+ 0xf5067221,
+ 0xf5022621,
+ 0xbd064021,
+ 0x3121f5f4,
+ 0x1011f406,
+ 0xfd800198,
+ 0x0bf40511,
+ 0xb121f507,
+/* 0x08f9: ctx_xfer_no_post_mmio */
+/* 0x08f9: ctx_xfer_done */
+ 0x0000f807,
0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 47a02081d708..45aff5f5085a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -516,18 +516,9 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
{
struct nouveau_device *device = nv_device(parent);
struct nvc0_graph_priv *priv;
- bool enable = true;
int ret, i;
- switch (device->chipset) {
- case 0xd9: /* known broken without binary driver firmware */
- enable = false;
- break;
- default:
- break;
- }
-
- ret = nouveau_graph_create(parent, engine, oclass, enable, &priv);
+ ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index 18d2210e12eb..a1e78de46456 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -121,6 +121,7 @@ nvc0_graph_class(void *obj)
return 0x9297;
case 0xe4:
case 0xe7:
+ case 0xe6:
return 0xa097;
default:
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index 539d4c72f192..9f82e9702b46 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -203,7 +203,7 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvc0_graph_priv *priv;
int ret, i;
- ret = nouveau_graph_create(parent, engine, oclass, false, &priv);
+ ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -252,6 +252,7 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->magic_not_rop_nr = 1;
break;
case 0xe7:
+ case 0xe6:
priv->magic_not_rop_nr = 1;
break;
default:
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
index d145b25e6be4..5bd1ca8cd20d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
@@ -17,6 +17,7 @@ struct nouveau_bios {
u8 chip;
u8 minor;
u8 micro;
+ u8 patch;
} version;
};
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
index 2bf178082a36..e6563b5cb08e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
@@ -25,9 +25,11 @@ struct dcb_gpio_func {
u8 param;
};
-u16 dcb_gpio_table(struct nouveau_bios *);
-u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver);
-int dcb_gpio_parse(struct nouveau_bios *, int idx, u8 func, u8 line,
+u16 dcb_gpio_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len);
+u16 dcb_gpio_parse(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len,
struct dcb_gpio_func *);
+u16 dcb_gpio_match(struct nouveau_bios *, int idx, u8 func, u8 line,
+ u8 *ver, u8 *len, struct dcb_gpio_func *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
index e69a8bdc6e97..ca2f6bf37f46 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
@@ -13,6 +13,7 @@ struct nvbios_init {
u32 nested;
u16 repeat;
u16 repend;
+ u32 ramcfg;
};
int nvbios_exec(struct nvbios_init *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
index 9ea2b12cc15d..b75e8f18e52c 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -11,7 +11,7 @@ struct nouveau_gpio {
struct nouveau_subdev base;
/* hardware interfaces */
- void (*reset)(struct nouveau_gpio *);
+ void (*reset)(struct nouveau_gpio *, u8 func);
int (*drive)(struct nouveau_gpio *, int line, int dir, int out);
int (*sense)(struct nouveau_gpio *, int line);
void (*irq_enable)(struct nouveau_gpio *, int line, bool);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index dd111947eb86..f621f69fa1a2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -447,6 +447,7 @@ nouveau_bios_ctor(struct nouveau_object *parent,
bios->version.chip = nv_ro08(bios, bit_i.offset + 2);
bios->version.minor = nv_ro08(bios, bit_i.offset + 1);
bios->version.micro = nv_ro08(bios, bit_i.offset + 0);
+ bios->version.patch = nv_ro08(bios, bit_i.offset + 4);
} else
if (bmp_version(bios)) {
bios->version.major = nv_ro08(bios, bios->bmp_offset + 13);
@@ -455,9 +456,9 @@ nouveau_bios_ctor(struct nouveau_object *parent,
bios->version.micro = nv_ro08(bios, bios->bmp_offset + 10);
}
- nv_info(bios, "version %02x.%02x.%02x.%02x\n",
+ nv_info(bios, "version %02x.%02x.%02x.%02x.%02x\n",
bios->version.major, bios->version.chip,
- bios->version.minor, bios->version.micro);
+ bios->version.minor, bios->version.micro, bios->version.patch);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
index c90d4aa3ae4f..c84e93fa6d95 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
@@ -27,84 +27,105 @@
#include <subdev/bios/gpio.h>
u16
-dcb_gpio_table(struct nouveau_bios *bios)
+dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
- u8 ver, hdr, cnt, len;
- u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
+ u16 data = 0x0000;
+ u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
if (dcb) {
- if (ver >= 0x30 && hdr >= 0x0c)
- return nv_ro16(bios, dcb + 0x0a);
- if (ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
- return nv_ro16(bios, dcb - 0x0f);
+ if (*ver >= 0x30 && *hdr >= 0x0c)
+ data = nv_ro16(bios, dcb + 0x0a);
+ else
+ if (*ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
+ data = nv_ro16(bios, dcb - 0x0f);
+
+ if (data) {
+ *ver = nv_ro08(bios, data + 0x00);
+ if (*ver < 0x30) {
+ *hdr = 3;
+ *cnt = nv_ro08(bios, data + 0x02);
+ *len = nv_ro08(bios, data + 0x01);
+ } else
+ if (*ver <= 0x41) {
+ *hdr = nv_ro08(bios, data + 0x01);
+ *cnt = nv_ro08(bios, data + 0x02);
+ *len = nv_ro08(bios, data + 0x03);
+ } else {
+ data = 0x0000;
+ }
+ }
}
- return 0x0000;
+ return data;
}
u16
-dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver)
+dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len)
{
- u16 gpio = dcb_gpio_table(bios);
- if (gpio) {
- *ver = nv_ro08(bios, gpio);
- if (*ver < 0x30 && ent < nv_ro08(bios, gpio + 2))
- return gpio + 3 + (ent * nv_ro08(bios, gpio + 1));
- else if (ent < nv_ro08(bios, gpio + 2))
- return gpio + nv_ro08(bios, gpio + 1) +
- (ent * nv_ro08(bios, gpio + 3));
- }
+ u8 hdr, cnt;
+ u16 gpio = !idx ? dcb_gpio_table(bios, ver, &hdr, &cnt, len) : 0x0000;
+ if (gpio && ent < cnt)
+ return gpio + hdr + (ent * *len);
return 0x0000;
}
-int
-dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
+u16
+dcb_gpio_parse(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len,
struct dcb_gpio_func *gpio)
{
- u8 ver, hdr, cnt, len;
- u16 entry;
- int i = -1;
-
- while ((entry = dcb_gpio_entry(bios, idx, ++i, &ver))) {
- if (ver < 0x40) {
- u16 data = nv_ro16(bios, entry);
+ u16 data = dcb_gpio_entry(bios, idx, ent, ver, len);
+ if (data) {
+ if (*ver < 0x40) {
+ u16 info = nv_ro16(bios, data);
*gpio = (struct dcb_gpio_func) {
- .line = (data & 0x001f) >> 0,
- .func = (data & 0x07e0) >> 5,
- .log[0] = (data & 0x1800) >> 11,
- .log[1] = (data & 0x6000) >> 13,
- .param = !!(data & 0x8000),
+ .line = (info & 0x001f) >> 0,
+ .func = (info & 0x07e0) >> 5,
+ .log[0] = (info & 0x1800) >> 11,
+ .log[1] = (info & 0x6000) >> 13,
+ .param = !!(info & 0x8000),
};
} else
- if (ver < 0x41) {
- u32 data = nv_ro32(bios, entry);
+ if (*ver < 0x41) {
+ u32 info = nv_ro32(bios, data);
*gpio = (struct dcb_gpio_func) {
- .line = (data & 0x0000001f) >> 0,
- .func = (data & 0x0000ff00) >> 8,
- .log[0] = (data & 0x18000000) >> 27,
- .log[1] = (data & 0x60000000) >> 29,
- .param = !!(data & 0x80000000),
+ .line = (info & 0x0000001f) >> 0,
+ .func = (info & 0x0000ff00) >> 8,
+ .log[0] = (info & 0x18000000) >> 27,
+ .log[1] = (info & 0x60000000) >> 29,
+ .param = !!(info & 0x80000000),
};
} else {
- u32 data = nv_ro32(bios, entry + 0);
- u8 data1 = nv_ro32(bios, entry + 4);
+ u32 info = nv_ro32(bios, data + 0);
+ u8 info1 = nv_ro32(bios, data + 4);
*gpio = (struct dcb_gpio_func) {
- .line = (data & 0x0000003f) >> 0,
- .func = (data & 0x0000ff00) >> 8,
- .log[0] = (data1 & 0x30) >> 4,
- .log[1] = (data1 & 0xc0) >> 6,
- .param = !!(data & 0x80000000),
+ .line = (info & 0x0000003f) >> 0,
+ .func = (info & 0x0000ff00) >> 8,
+ .log[0] = (info1 & 0x30) >> 4,
+ .log[1] = (info1 & 0xc0) >> 6,
+ .param = !!(info & 0x80000000),
};
}
+ }
+
+ return data;
+}
+u16
+dcb_gpio_match(struct nouveau_bios *bios, int idx, u8 func, u8 line,
+ u8 *ver, u8 *len, struct dcb_gpio_func *gpio)
+{
+ u8 hdr, cnt, i = 0;
+ u16 data;
+
+ while ((data = dcb_gpio_parse(bios, idx, i++, ver, len, gpio))) {
if ((line == 0xff || line == gpio->line) &&
(func == 0xff || func == gpio->func))
- return 0;
+ return data;
}
/* DCB 2.2, fixed TVDAC GPIO data */
- if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len))) {
- if (ver >= 0x22 && ver < 0x30 && func == DCB_GPIO_TVDAC0) {
- u8 conf = nv_ro08(bios, entry - 5);
- u8 addr = nv_ro08(bios, entry - 4);
+ if ((data = dcb_table(bios, ver, &hdr, &cnt, len))) {
+ if (*ver >= 0x22 && *ver < 0x30 && func == DCB_GPIO_TVDAC0) {
+ u8 conf = nv_ro08(bios, data - 5);
+ u8 addr = nv_ro08(bios, data - 4);
if (conf & 0x01) {
*gpio = (struct dcb_gpio_func) {
.func = DCB_GPIO_TVDAC0,
@@ -112,10 +133,11 @@ dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
.log[0] = !!(conf & 0x02),
.log[1] = !(conf & 0x02),
};
- return 0;
+ *ver = 0x00;
+ return data;
}
}
}
- return -EINVAL;
+ return 0x0000;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index ae168bbb86d8..2917d552689b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -2,11 +2,12 @@
#include <core/device.h>
#include <subdev/bios.h>
-#include <subdev/bios/conn.h>
#include <subdev/bios/bmp.h>
#include <subdev/bios/bit.h>
+#include <subdev/bios/conn.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/dp.h>
+#include <subdev/bios/gpio.h>
#include <subdev/bios/init.h>
#include <subdev/devinit.h>
#include <subdev/clock.h>
@@ -410,9 +411,25 @@ init_ram_restrict_group_count(struct nvbios_init *init)
}
static u8
+init_ram_restrict_strap(struct nvbios_init *init)
+{
+ /* This appears to be the behaviour of the VBIOS parser, and *is*
+ * important to cache the NV_PEXTDEV_BOOT0 on later chipsets to
+ * avoid fucking up the memory controller (somehow) by reading it
+ * on every INIT_RAM_RESTRICT_ZM_GROUP opcode.
+ *
+ * Preserving the non-caching behaviour on earlier chipsets just
+ * in case *not* re-reading the strap causes similar breakage.
+ */
+ if (!init->ramcfg || init->bios->version.major < 0x70)
+ init->ramcfg = init_rd32(init, 0x101000);
+ return (init->ramcfg & 0x00000003c) >> 2;
+}
+
+static u8
init_ram_restrict(struct nvbios_init *init)
{
- u32 strap = (init_rd32(init, 0x101000) & 0x0000003c) >> 2;
+ u8 strap = init_ram_restrict_strap(init);
u16 table = init_ram_restrict_table(init);
if (table)
return nv_ro08(init->bios, table + strap);
@@ -1781,7 +1798,7 @@ init_gpio(struct nvbios_init *init)
init->offset += 1;
if (init_exec(init) && gpio && gpio->reset)
- gpio->reset(gpio);
+ gpio->reset(gpio, DCB_GPIO_UNUSED);
}
/**
@@ -1995,6 +2012,47 @@ init_i2c_long_if(struct nvbios_init *init)
init_exec_set(init, false);
}
+/**
+ * INIT_GPIO_NE - opcode 0xa9
+ *
+ */
+static void
+init_gpio_ne(struct nvbios_init *init)
+{
+ struct nouveau_bios *bios = init->bios;
+ struct nouveau_gpio *gpio = nouveau_gpio(bios);
+ struct dcb_gpio_func func;
+ u8 count = nv_ro08(bios, init->offset + 1);
+ u8 idx = 0, ver, len;
+ u16 data, i;
+
+ trace("GPIO_NE\t");
+ init->offset += 2;
+
+ for (i = init->offset; i < init->offset + count; i++)
+ cont("0x%02x ", nv_ro08(bios, i));
+ cont("\n");
+
+ while ((data = dcb_gpio_parse(bios, 0, idx++, &ver, &len, &func))) {
+ if (func.func != DCB_GPIO_UNUSED) {
+ for (i = init->offset; i < init->offset + count; i++) {
+ if (func.func == nv_ro08(bios, i))
+ break;
+ }
+
+ trace("\tFUNC[0x%02x]", func.func);
+ if (i == (init->offset + count)) {
+ cont(" *");
+ if (init_exec(init) && gpio && gpio->reset)
+ gpio->reset(gpio, func.func);
+ }
+ cont("\n");
+ }
+ }
+
+ init->offset += count;
+}
+
static struct nvbios_init_opcode {
void (*exec)(struct nvbios_init *);
} init_opcode[] = {
@@ -2059,6 +2117,7 @@ static struct nvbios_init_opcode {
[0x98] = { init_auxch },
[0x99] = { init_zm_auxch },
[0x9a] = { init_i2c_long_if },
+ [0xa9] = { init_gpio_ne },
};
#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
index 9b7881e76634..03a652876e73 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
@@ -109,6 +109,34 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
break;
+ case 0xe6:
+ device->cname = "GK106";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ break;
default:
nv_fatal(device, "unknown Kepler chipset\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index acf818c58bf0..9fb0f9b92d49 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -43,10 +43,15 @@ static int
nouveau_gpio_find(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
struct dcb_gpio_func *func)
{
+ struct nouveau_bios *bios = nouveau_bios(gpio);
+ u8 ver, len;
+ u16 data;
+
if (line == 0xff && tag == 0xff)
return -EINVAL;
- if (!dcb_gpio_parse(nouveau_bios(gpio), idx, tag, line, func))
+ data = dcb_gpio_match(bios, idx, tag, line, &ver, &len, func);
+ if (data)
return 0;
/* Apple iMac G4 NV18 */
@@ -265,7 +270,7 @@ nouveau_gpio_init(struct nouveau_gpio *gpio)
int ret = nouveau_subdev_init(&gpio->base);
if (ret == 0 && gpio->reset) {
if (dmi_check_system(gpio_reset_ids))
- gpio->reset(gpio);
+ gpio->reset(gpio, DCB_GPIO_UNUSED);
}
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index f3502c961cd9..bf13a1200f26 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -29,15 +29,15 @@ struct nv50_gpio_priv {
};
static void
-nv50_gpio_reset(struct nouveau_gpio *gpio)
+nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
{
struct nouveau_bios *bios = nouveau_bios(gpio);
struct nv50_gpio_priv *priv = (void *)gpio;
+ u8 ver, len;
u16 entry;
- u8 ver;
int ent = -1;
- while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) {
+ while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
static const u32 regs[] = { 0xe100, 0xe28c };
u32 data = nv_ro32(bios, entry);
u8 line = (data & 0x0000001f);
@@ -48,7 +48,8 @@ nv50_gpio_reset(struct nouveau_gpio *gpio)
u32 val = (unk1 << 16) | unk0;
u32 reg = regs[line >> 4]; line &= 0x0f;
- if (func == 0xff)
+ if ( func == DCB_GPIO_UNUSED ||
+ (match != DCB_GPIO_UNUSED && match != func))
continue;
gpio->set(gpio, 0, func, line, defs);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
index 8d18fcad26e0..83e8b8f16e6a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
@@ -29,15 +29,15 @@ struct nvd0_gpio_priv {
};
static void
-nvd0_gpio_reset(struct nouveau_gpio *gpio)
+nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
{
struct nouveau_bios *bios = nouveau_bios(gpio);
struct nvd0_gpio_priv *priv = (void *)gpio;
+ u8 ver, len;
u16 entry;
- u8 ver;
int ent = -1;
- while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) {
+ while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
u32 data = nv_ro32(bios, entry);
u8 line = (data & 0x0000003f);
u8 defs = !!(data & 0x00000080);
@@ -45,7 +45,8 @@ nvd0_gpio_reset(struct nouveau_gpio *gpio)
u8 unk0 = (data & 0x00ff0000) >> 16;
u8 unk1 = (data & 0x1f000000) >> 24;
- if (func == 0xff)
+ if ( func == DCB_GPIO_UNUSED ||
+ (match != DCB_GPIO_UNUSED && match != func))
continue;
gpio->set(gpio, 0, func, line, defs);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
index 93e3ddf7303a..e286e132c7e7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
@@ -260,7 +260,7 @@ nouveau_mxm_create_(struct nouveau_object *parent,
data = mxm_table(bios, &ver, &len);
if (!data || !(ver = nv_ro08(bios, data))) {
- nv_info(mxm, "no VBIOS data, nothing to do\n");
+ nv_debug(mxm, "no VBIOS data, nothing to do\n");
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 5614c89148cb..69d7b1d0b9d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1276,7 +1276,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
if (drm->agp.stat == ENABLED) {
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = drm->agp.base;
- mem->bus.is_iomem = true;
+ mem->bus.is_iomem = !dev->agp->cant_use_aperture;
}
#endif
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 01c403ddb99b..180a45e3b525 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -189,8 +189,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
nouveau_bo_move_init(drm);
}
-static int __devinit
-nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent)
+static int nouveau_drm_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pent)
{
struct nouveau_device *device;
struct apertures_struct *aper;
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 5566172774df..a701ff5ffa5b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -698,10 +698,10 @@ static int
nouveau_hwmon_init(struct drm_device *dev)
{
struct nouveau_pm *pm = nouveau_pm(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
struct device *hwmon_dev;
int ret = 0;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f95d7fc1f5e0..061fa0a28900 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2306,22 +2306,20 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
return radeon_ring_test_lockup(rdev, ring);
}
-static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
+static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
{
- struct evergreen_mc_save save;
u32 grbm_reset = 0;
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return 0;
+ return;
- dev_info(rdev->dev, "GPU softreset \n");
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
@@ -2331,10 +2329,7 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
- evergreen_mc_stop(rdev, &save);
- if (evergreen_mc_wait_for_idle(rdev)) {
- dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
- }
+
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
@@ -2358,15 +2353,14 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
udelay(50);
WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET);
- /* Wait a little for things to settle down */
- udelay(50);
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+
+ dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
@@ -2376,13 +2370,65 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
+}
+
+static void evergreen_gpu_soft_reset_dma(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ return;
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+}
+
+static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+ struct evergreen_mc_save save;
+
+ if (reset_mask == 0)
+ return 0;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+ evergreen_gpu_soft_reset_gfx(rdev);
+
+ if (reset_mask & RADEON_RESET_DMA)
+ evergreen_gpu_soft_reset_dma(rdev);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+
evergreen_mc_resume(rdev, &save);
return 0;
}
int evergreen_asic_reset(struct radeon_device *rdev)
{
- return evergreen_gpu_soft_reset(rdev);
+ return evergreen_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_DMA));
}
/* Interrupts */
@@ -3215,7 +3261,7 @@ void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
/* flush HDP */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | HDP_MEM_COHERENCY_FLUSH_CNTL);
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
radeon_ring_write(ring, 1);
}
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 74c6b42d2597..7a445666e71f 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -2654,6 +2654,35 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+4] = upper_32_bits(offset) & 0xff;
}
break;
+ case PACKET3_MEM_WRITE:
+ {
+ u64 offset;
+
+ if (pkt->count != 3) {
+ DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+0);
+ offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
+ if (offset & 0x7) {
+ DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+ return -EINVAL;
+ }
+ if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+0] = offset;
+ ib[idx+1] = upper_32_bits(offset) & 0xff;
+ break;
+ }
case PACKET3_COPY_DW:
if (pkt->count != 4) {
DRM_ERROR("bad COPY_DW (invalid count)\n");
@@ -3287,6 +3316,7 @@ static bool evergreen_vm_reg_valid(u32 reg)
/* check config regs */
switch (reg) {
+ case WAIT_UNTIL:
case GRBM_GFX_INDEX:
case CP_STRMOUT_CNTL:
case CP_COHER_CNTL:
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index cb9baaac9e85..0bfd0e9e469b 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -742,8 +742,9 @@
#define SOFT_RESET_ROM (1 << 14)
#define SOFT_RESET_SEM (1 << 15)
#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_DMA (1 << 20)
#define SOFT_RESET_TST (1 << 21)
-#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
/* display watermarks */
@@ -2027,4 +2028,15 @@
/* cayman packet3 addition */
#define CAYMAN_PACKET3_DEALLOC_STATE 0x14
+/* DMA regs common on r6xx/r7xx/evergreen/ni */
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
+
#endif
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 7bdbcb00aaf2..896f1cbc58a5 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1306,22 +1306,20 @@ void cayman_dma_fini(struct radeon_device *rdev)
radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
}
-static int cayman_gpu_soft_reset(struct radeon_device *rdev)
+static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev)
{
- struct evergreen_mc_save save;
u32 grbm_reset = 0;
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return 0;
+ return;
- dev_info(rdev->dev, "GPU softreset \n");
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
@@ -1331,19 +1329,7 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
- dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
- RREG32(0x14F8));
- dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
- RREG32(0x14D8));
- dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- RREG32(0x14FC));
- dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- RREG32(0x14DC));
- evergreen_mc_stop(rdev, &save);
- if (evergreen_mc_wait_for_idle(rdev)) {
- dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
- }
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
@@ -1368,16 +1354,14 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
udelay(50);
WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET);
- /* Wait a little for things to settle down */
- udelay(50);
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
@@ -1387,13 +1371,81 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
+
+}
+
+static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ return;
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+
+ /* dma0 */
+ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+
+ /* dma1 */
+ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+
+}
+
+static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+ struct evergreen_mc_save save;
+
+ if (reset_mask == 0)
+ return 0;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(0x14F8));
+ dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(0x14D8));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(0x14FC));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(0x14DC));
+
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+ cayman_gpu_soft_reset_gfx(rdev);
+
+ if (reset_mask & RADEON_RESET_DMA)
+ cayman_gpu_soft_reset_dma(rdev);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+
evergreen_mc_resume(rdev, &save);
return 0;
}
int cayman_asic_reset(struct radeon_device *rdev)
{
- return cayman_gpu_soft_reset(rdev);
+ return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_DMA));
}
/**
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index b93186b8ee4b..48e5022ee921 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -65,7 +65,7 @@
#define SOFT_RESET_VMC (1 << 17)
#define SOFT_RESET_DMA (1 << 20)
#define SOFT_RESET_TST (1 << 21)
-#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
@@ -675,4 +675,3 @@
#define DMA_PACKET_NOP 0xf
#endif
-
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2aaf147969bd..923f93647042 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1258,9 +1258,8 @@ void r600_vram_scratch_fini(struct radeon_device *rdev)
* reset, it's up to the caller to determine if the GPU needs one. We
* might add an helper function to check that.
*/
-static int r600_gpu_soft_reset(struct radeon_device *rdev)
+static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev)
{
- struct rv515_mc_save save;
u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
@@ -1280,14 +1279,13 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
u32 tmp;
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return 0;
+ return;
- dev_info(rdev->dev, "GPU softreset \n");
- dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
RREG32(R_008010_GRBM_STATUS));
- dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
+ dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
RREG32(R_008014_GRBM_STATUS2));
- dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
@@ -1297,12 +1295,10 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
- rv515_mc_stop(rdev, &save);
- if (r600_mc_wait_for_idle(rdev)) {
- dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
- }
+
/* Disable CP parsing/prefetching */
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+
/* Check if any of the rendering block is busy and reset it */
if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
(RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
@@ -1332,13 +1328,12 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
RREG32(R_008020_GRBM_SOFT_RESET);
mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
- /* Wait a little for things to settle down */
- mdelay(1);
- dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
+
+ dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
RREG32(R_008010_GRBM_STATUS));
- dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
+ dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
RREG32(R_008014_GRBM_STATUS2));
- dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
@@ -1348,6 +1343,60 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
+
+}
+
+static void r600_gpu_soft_reset_dma(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ return;
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+
+ /* Reset dma */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+ else
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+}
+
+static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+ struct rv515_mc_save save;
+
+ if (reset_mask == 0)
+ return 0;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ rv515_mc_stop(rdev, &save);
+ if (r600_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+ r600_gpu_soft_reset_gfx(rdev);
+
+ if (reset_mask & RADEON_RESET_DMA)
+ r600_gpu_soft_reset_dma(rdev);
+
+ /* Wait a little for things to settle down */
+ mdelay(1);
+
rv515_mc_resume(rdev, &save);
return 0;
}
@@ -1395,7 +1444,9 @@ bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
int r600_asic_reset(struct radeon_device *rdev)
{
- return r600_gpu_soft_reset(rdev);
+ return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_DMA));
}
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
@@ -2636,8 +2687,8 @@ int r600_copy_dma(struct radeon_device *rdev,
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
- if (cur_size_in_dw > 0xFFFF)
- cur_size_in_dw = 0xFFFF;
+ if (cur_size_in_dw > 0xFFFE)
+ cur_size_in_dw = 0xFFFE;
size_in_dw -= cur_size_in_dw;
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
radeon_ring_write(ring, dst_offset & 0xfffffffc);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0be768be530c..9ea13d07cc55 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -2294,6 +2294,35 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
ib[idx+4] = upper_32_bits(offset) & 0xff;
}
break;
+ case PACKET3_MEM_WRITE:
+ {
+ u64 offset;
+
+ if (pkt->count != 3) {
+ DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+ return -EINVAL;
+ }
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx+0);
+ offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
+ if (offset & 0x7) {
+ DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+ return -EINVAL;
+ }
+ if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ offset += reloc->lobj.gpu_offset;
+ ib[idx+0] = offset;
+ ib[idx+1] = upper_32_bits(offset) & 0xff;
+ break;
+ }
case PACKET3_COPY_DW:
if (pkt->count != 4) {
DRM_ERROR("bad COPY_DW (invalid count)\n");
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5dc744d43d12..34e52304a525 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -132,6 +132,11 @@ extern int radeon_lockup_timeout;
#define RADEON_VA_RESERVED_SIZE (8 << 20)
#define RADEON_IB_VM_MAX_SIZE (64 << 10)
+/* reset flags */
+#define RADEON_RESET_GFX (1 << 0)
+#define RADEON_RESET_COMPUTE (1 << 1)
+#define RADEON_RESET_DMA (1 << 2)
+
/*
* Errata workarounds.
*/
@@ -225,12 +230,13 @@ struct radeon_fence {
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev);
+void radeon_fence_driver_force_completion(struct radeon_device *rdev);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
-void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
struct radeon_fence **fences,
bool intr);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 4af89126e223..33a56a09ff10 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1548,6 +1548,9 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
of_machine_is_compatible("PowerBook6,7")) {
/* ibook */
rdev->mode_info.connector_table = CT_IBOOK;
+ } else if (of_machine_is_compatible("PowerMac3,5")) {
+ /* PowerMac G4 Silver radeon 7500 */
+ rdev->mode_info.connector_table = CT_MAC_G4_SILVER;
} else if (of_machine_is_compatible("PowerMac4,4")) {
/* emac */
rdev->mode_info.connector_table = CT_EMAC;
@@ -2212,6 +2215,54 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_SVIDEO,
&hpd);
break;
+ case CT_MAC_G4_SILVER:
+ DRM_INFO("Connector Table: %d (mac g4 silver)\n",
+ rdev->mode_info.connector_table);
+ /* DVI-I - tv dac, int tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ /* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
rdev->mode_info.connector_table);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 47bf162ab9c6..2399f25ec037 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -741,7 +741,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
ret = connector_status_disconnected;
if (radeon_connector->ddc_bus)
- dret = radeon_ddc_probe(radeon_connector);
+ dret = radeon_ddc_probe(radeon_connector, false);
if (dret) {
radeon_connector->detected_by_load = false;
if (radeon_connector->edid) {
@@ -947,7 +947,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
return connector->status;
if (radeon_connector->ddc_bus)
- dret = radeon_ddc_probe(radeon_connector);
+ dret = radeon_ddc_probe(radeon_connector, false);
if (dret) {
radeon_connector->detected_by_load = false;
if (radeon_connector->edid) {
@@ -1401,7 +1401,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (encoder) {
/* setup ddc on the bridge */
radeon_atom_ext_encoder_setup_ddc(encoder);
- if (radeon_ddc_probe(radeon_connector)) /* try DDC */
+ /* bridge chips are always aux */
+ if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */
ret = connector_status_connected;
else if (radeon_connector->dac_load_detect) { /* try load detection */
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -1419,7 +1420,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
} else {
- if (radeon_ddc_probe(radeon_connector))
+ /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
+ if (radeon_ddc_probe(radeon_connector, false))
ret = connector_status_connected;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 49b06590001e..cd756262924d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1164,6 +1164,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
struct drm_crtc *crtc;
struct drm_connector *connector;
int i, r;
+ bool force_completion = false;
if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
@@ -1206,8 +1207,16 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
mutex_lock(&rdev->ring_lock);
/* wait for gpu to finish processing current batch */
- for (i = 0; i < RADEON_NUM_RINGS; i++)
- radeon_fence_wait_empty_locked(rdev, i);
+ for (i = 0; i < RADEON_NUM_RINGS; i++) {
+ r = radeon_fence_wait_empty_locked(rdev, i);
+ if (r) {
+ /* delay GPU reset to resume */
+ force_completion = true;
+ }
+ }
+ if (force_completion) {
+ radeon_fence_driver_force_completion(rdev);
+ }
mutex_unlock(&rdev->ring_lock);
radeon_save_bios_scratch_regs(rdev);
@@ -1338,7 +1347,6 @@ retry:
}
radeon_restore_bios_scratch_regs(rdev);
- drm_helper_resume_force_mode(rdev->ddev);
if (!r) {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -1358,11 +1366,14 @@ retry:
}
}
} else {
+ radeon_fence_driver_force_completion(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
kfree(ring_data[i]);
}
}
+ drm_helper_resume_force_mode(rdev->ddev);
+
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
if (r) {
/* bad news, how to tell it to userspace ? */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 310c0e5254ba..1da2386d7cf7 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -699,10 +699,15 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
- if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
- (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
- (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
- ENCODER_OBJECT_ID_NONE)) {
+ if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
+ ENCODER_OBJECT_ID_NONE) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+ if (dig->dp_i2c_bus)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &dig->dp_i2c_bus->adapter);
+ } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 9b1a727d3c9e..dff6cf77f953 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -68,9 +68,10 @@
* 2.25.0 - eg+: new info request for num SE and num SH
* 2.26.0 - r600-eg: fix htile size computation
* 2.27.0 - r600-SI: Add CS ioctl support for async DMA
+ * 2.28.0 - r600-eg: Add MEM_WRITE packet support
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 27
+#define KMS_DRIVER_MINOR 28
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -305,8 +306,8 @@ static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
return 0;
}
-static int __devinit
-radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int radeon_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int ret;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 410a975a8eec..34356252567a 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -609,26 +609,20 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
* Returns 0 if the fences have passed, error for all other cases.
* Caller must hold ring lock.
*/
-void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
{
uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
+ int r;
- while(1) {
- int r;
- r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+ r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+ if (r) {
if (r == -EDEADLK) {
- mutex_unlock(&rdev->ring_lock);
- r = radeon_gpu_reset(rdev);
- mutex_lock(&rdev->ring_lock);
- if (!r)
- continue;
- }
- if (r) {
- dev_err(rdev->dev, "error waiting for ring to become"
- " idle (%d)\n", r);
+ return -EDEADLK;
}
- return;
+ dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
+ ring, r);
}
+ return 0;
}
/**
@@ -854,13 +848,17 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
*/
void radeon_fence_driver_fini(struct radeon_device *rdev)
{
- int ring;
+ int ring, r;
mutex_lock(&rdev->ring_lock);
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
if (!rdev->fence_drv[ring].initialized)
continue;
- radeon_fence_wait_empty_locked(rdev, ring);
+ r = radeon_fence_wait_empty_locked(rdev, ring);
+ if (r) {
+ /* no need to trigger GPU reset as we are unloading */
+ radeon_fence_driver_force_completion(rdev);
+ }
wake_up_all(&rdev->fence_queue);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
rdev->fence_drv[ring].initialized = false;
@@ -868,6 +866,25 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
mutex_unlock(&rdev->ring_lock);
}
+/**
+ * radeon_fence_driver_force_completion - force all fence waiter to complete
+ *
+ * @rdev: radeon device pointer
+ *
+ * In case of GPU reset failure make sure no process keep waiting on fence
+ * that will never complete.
+ */
+void radeon_fence_driver_force_completion(struct radeon_device *rdev)
+{
+ int ring;
+
+ for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+ if (!rdev->fence_drv[ring].initialized)
+ continue;
+ radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
+ }
+}
+
/*
* Fence debugfs
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index c5bddd630eb9..fc60b74ee304 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -39,7 +39,7 @@ extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap);
* radeon_ddc_probe
*
*/
-bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
+bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux)
{
u8 out = 0x0;
u8 buf[8];
@@ -63,7 +63,13 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
- ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
+ if (use_aux) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+ ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2);
+ } else {
+ ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
+ }
+
if (ret != 2)
/* Couldn't find an accessible DDC on this connector */
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index d818b503b42f..4003f5a68c09 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -209,7 +209,8 @@ enum radeon_connector_table {
CT_RN50_POWER,
CT_MAC_X800,
CT_MAC_G5_9600,
- CT_SAM440EP
+ CT_SAM440EP,
+ CT_MAC_G4_SILVER
};
enum radeon_dvo_chip {
@@ -558,7 +559,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
u8 val);
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
-extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
+extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index aa14dbb7e4fb..0bfa656aa87d 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -234,7 +234,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
static void radeon_pm_set_clocks(struct radeon_device *rdev)
{
- int i;
+ int i, r;
/* no need to take locks, etc. if nothing's going to change */
if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
@@ -248,8 +248,17 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
/* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
struct radeon_ring *ring = &rdev->ring[i];
- if (ring->ready)
- radeon_fence_wait_empty_locked(rdev, i);
+ if (!ring->ready) {
+ continue;
+ }
+ r = radeon_fence_wait_empty_locked(rdev, i);
+ if (r) {
+ /* needs a GPU reset dont reset here */
+ mutex_unlock(&rdev->ring_lock);
+ up_write(&rdev->pm.mclk_lock);
+ mutex_unlock(&rdev->ddev->struct_mutex);
+ return;
+ }
}
radeon_unmap_vram_bos(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index ebd69562ef6c..141f2b6a9cf2 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -770,22 +770,28 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
int ridx = *(int*)node->info_ent->data;
struct radeon_ring *ring = &rdev->ring[ridx];
unsigned count, i, j;
+ u32 tmp;
radeon_ring_free_size(rdev, ring);
count = (ring->ring_size / 4) - ring->ring_free_dw;
- seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
- seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
+ tmp = RREG32(ring->wptr_reg) >> ring->ptr_reg_shift;
+ seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp);
+ tmp = RREG32(ring->rptr_reg) >> ring->ptr_reg_shift;
+ seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp);
if (ring->rptr_save_reg) {
seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
RREG32(ring->rptr_save_reg));
}
- seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
- seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
+ seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
+ seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
- i = ring->rptr;
- for (j = 0; j <= count; j++) {
- seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+ /* print 8 dw before current rptr as often it's the last executed
+ * packet that is the root issue
+ */
+ i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
+ for (j = 0; j <= (count + 32); j++) {
+ seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
i = (i + 1) & ring->ptr_mask;
}
return 0;
@@ -794,11 +800,15 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
+static int radeon_ring_type_dma1_index = R600_RING_TYPE_DMA_INDEX;
+static int radeon_ring_type_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
static struct drm_info_list radeon_debugfs_ring_info_list[] = {
{"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
{"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
+ {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma1_index},
+ {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma2_index},
};
static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index ef683653f0b7..3240a3d64f30 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2126,15 +2126,13 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
-static int si_gpu_soft_reset(struct radeon_device *rdev)
+static void si_gpu_soft_reset_gfx(struct radeon_device *rdev)
{
- struct evergreen_mc_save save;
u32 grbm_reset = 0;
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return 0;
+ return;
- dev_info(rdev->dev, "GPU softreset \n");
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
@@ -2145,10 +2143,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
- evergreen_mc_stop(rdev, &save);
- if (radeon_mc_wait_for_idle(rdev)) {
- dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
- }
+
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
@@ -2173,8 +2168,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
udelay(50);
WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET);
- /* Wait a little for things to settle down */
- udelay(50);
+
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
@@ -2185,13 +2179,75 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
+}
+
+static void si_gpu_soft_reset_dma(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ return;
+
+ dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+
+ /* dma0 */
+ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+
+ /* dma1 */
+ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+}
+
+static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+ struct evergreen_mc_save save;
+
+ if (reset_mask == 0)
+ return 0;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+
+ evergreen_mc_stop(rdev, &save);
+ if (radeon_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+ si_gpu_soft_reset_gfx(rdev);
+
+ if (reset_mask & RADEON_RESET_DMA)
+ si_gpu_soft_reset_dma(rdev);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+
evergreen_mc_resume(rdev, &save);
return 0;
}
int si_asic_reset(struct radeon_device *rdev)
{
- return si_gpu_soft_reset(rdev);
+ return si_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_DMA));
}
/* MC */
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 62b46215d423..c056aae814f0 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -62,6 +62,22 @@
#define SRBM_STATUS 0xE50
+#define SRBM_SOFT_RESET 0x0E60
+#define SOFT_RESET_BIF (1 << 1)
+#define SOFT_RESET_DC (1 << 5)
+#define SOFT_RESET_DMA1 (1 << 6)
+#define SOFT_RESET_GRBM (1 << 8)
+#define SOFT_RESET_HDP (1 << 9)
+#define SOFT_RESET_IH (1 << 10)
+#define SOFT_RESET_MC (1 << 11)
+#define SOFT_RESET_ROM (1 << 14)
+#define SOFT_RESET_SEM (1 << 15)
+#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_DMA (1 << 20)
+#define SOFT_RESET_TST (1 << 21)
+#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_ORB (1 << 23)
+
#define CC_SYS_RB_BACKEND_DISABLE 0xe80
#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
@@ -1013,6 +1029,8 @@
# define DATA_SWAP_ENABLE (1 << 3)
# define FENCE_SWAP_ENABLE (1 << 4)
# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
#define DMA_TILING_CONFIG 0xd0b8
#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 1c350fc4e449..d1d5306ebf24 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -33,7 +33,7 @@
* Hardware initialization
*/
-static int __devinit shmob_drm_init_interface(struct shmob_drm_device *sdev)
+static int shmob_drm_init_interface(struct shmob_drm_device *sdev)
{
static const u32 ldmt1r[] = {
[SHMOB_DRM_IFACE_RGB8] = LDMT1R_MIFTYP_RGB8,
@@ -67,7 +67,7 @@ static int __devinit shmob_drm_init_interface(struct shmob_drm_device *sdev)
return 0;
}
-static int __devinit shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
+static int shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
enum shmob_drm_clk_source clksrc)
{
struct clk *clk;
@@ -330,12 +330,12 @@ static const struct dev_pm_ops shmob_drm_pm_ops = {
* Platform driver
*/
-static int __devinit shmob_drm_probe(struct platform_device *pdev)
+static int shmob_drm_probe(struct platform_device *pdev)
{
return drm_platform_init(&shmob_drm_driver, pdev);
}
-static int __devexit shmob_drm_remove(struct platform_device *pdev)
+static int shmob_drm_remove(struct platform_device *pdev)
{
drm_platform_exit(&shmob_drm_driver, pdev);
@@ -344,7 +344,7 @@ static int __devexit shmob_drm_remove(struct platform_device *pdev)
static struct platform_driver shmob_drm_platform_driver = {
.probe = shmob_drm_probe,
- .remove = __devexit_p(shmob_drm_remove),
+ .remove = shmob_drm_remove,
.driver = {
.owner = THIS_MODULE,
.name = "shmob-drm",
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 074410371e2a..656b2e3334a6 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -102,12 +102,12 @@ static int tegra_dc_set_timings(struct tegra_dc *dc,
((mode->hsync_end - mode->hsync_start) << 0);
tegra_dc_writel(dc, value, DC_DISP_SYNC_WIDTH);
- value = ((mode->vsync_start - mode->vdisplay) << 16) |
- ((mode->hsync_start - mode->hdisplay) << 0);
- tegra_dc_writel(dc, value, DC_DISP_BACK_PORCH);
-
value = ((mode->vtotal - mode->vsync_end) << 16) |
((mode->htotal - mode->hsync_end) << 0);
+ tegra_dc_writel(dc, value, DC_DISP_BACK_PORCH);
+
+ value = ((mode->vsync_start - mode->vdisplay) << 16) |
+ ((mode->hsync_start - mode->hdisplay) << 0);
tegra_dc_writel(dc, value, DC_DISP_FRONT_PORCH);
value = (mode->vdisplay << 16) | mode->hdisplay;
@@ -221,8 +221,7 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
win.stride = crtc->fb->pitches[0];
/* program window registers */
- value = tegra_dc_readl(dc, DC_CMD_DISPLAY_WINDOW_HEADER);
- value |= WINDOW_A_SELECT;
+ value = WINDOW_A_SELECT;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
tegra_dc_writel(dc, win.fmt, DC_WIN_COLOR_DEPTH);
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 3a843a77ddc7..741b5dc2742c 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -204,24 +204,6 @@ extern int tegra_output_parse_dt(struct tegra_output *output);
extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
extern int tegra_output_exit(struct tegra_output *output);
-/* from gem.c */
-extern struct tegra_gem_object *tegra_gem_alloc(struct drm_device *drm,
- size_t size);
-extern int tegra_gem_handle_create(struct drm_device *drm,
- struct drm_file *file, size_t size,
- unsigned long flags, uint32_t *handle);
-extern int tegra_gem_dumb_create(struct drm_file *file, struct drm_device *drm,
- struct drm_mode_create_dumb *args);
-extern int tegra_gem_dumb_map_offset(struct drm_file *file,
- struct drm_device *drm, uint32_t handle,
- uint64_t *offset);
-extern int tegra_gem_dumb_destroy(struct drm_file *file,
- struct drm_device *drm, uint32_t handle);
-extern int tegra_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-extern int tegra_gem_init_object(struct drm_gem_object *obj);
-extern void tegra_gem_free_object(struct drm_gem_object *obj);
-extern struct vm_operations_struct tegra_gem_vm_ops;
-
/* from fb.c */
extern int tegra_drm_fb_init(struct drm_device *drm);
extern void tegra_drm_fb_exit(struct drm_device *drm);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index ab4016412bbf..e060c7e6434d 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -149,7 +149,7 @@ struct tmds_config {
};
static const struct tmds_config tegra2_tmds_config[] = {
- { /* 480p modes */
+ { /* slow pixel clock modes */
.pclk = 27000000,
.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
@@ -163,21 +163,8 @@ static const struct tmds_config tegra2_tmds_config[] = {
DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
- }, { /* 720p modes */
- .pclk = 74250000,
- .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
- SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
- SOR_PLL_TX_REG_LOAD(3),
- .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
- .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
- PE_CURRENT1(PE_CURRENT_6_0_mA) |
- PE_CURRENT2(PE_CURRENT_6_0_mA) |
- PE_CURRENT3(PE_CURRENT_6_0_mA),
- .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
- DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
- DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
- DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
- }, { /* 1080p modes */
+ },
+ { /* high pixel clock modes */
.pclk = UINT_MAX,
.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
@@ -479,7 +466,7 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
return;
}
- h_front_porch = mode->htotal - mode->hsync_end;
+ h_front_porch = mode->hsync_start - mode->hdisplay;
memset(&frame, 0, sizeof(frame));
frame.r = HDMI_AVI_R_SAME;
@@ -634,8 +621,8 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
pclk = mode->clock * 1000;
h_sync_width = mode->hsync_end - mode->hsync_start;
- h_front_porch = mode->htotal - mode->hsync_end;
- h_back_porch = mode->hsync_start - mode->hdisplay;
+ h_back_porch = mode->htotal - mode->hsync_end;
+ h_front_porch = mode->hsync_start - mode->hdisplay;
err = regulator_enable(hdmi->vdd);
if (err < 0) {
diff --git a/drivers/gpu/drm/tegra/host1x.c b/drivers/gpu/drm/tegra/host1x.c
index bdb97a564d82..5d17b113a6fc 100644
--- a/drivers/gpu/drm/tegra/host1x.c
+++ b/drivers/gpu/drm/tegra/host1x.c
@@ -239,6 +239,8 @@ int host1x_register_client(struct host1x *host1x, struct host1x_client *client)
}
}
+ client->host1x = host1x;
+
return 0;
}