summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/radeon
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-03 06:40:34 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-03 06:40:34 +0400
commit20a2078ce7705a6e0722ef5184336eb8657a58d8 (patch)
tree5b927c96516380aa0ecd68d8a609f7cd72120ad5 /drivers/gpu/drm/radeon
parent0279b3c0ada1d78882f24acf94ac4595bd657a89 (diff)
parent307b9c022720f9de90d58e51743e01e9a42aec59 (diff)
downloadlinux-20a2078ce7705a6e0722ef5184336eb8657a58d8.tar.xz
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "This is the main drm pull request for 3.10. Wierd bits: - OMAP drm changes required OMAP dss changes, in drivers/video, so I took them in here. - one more fbcon fix for font handover - VT switch avoidance in pm code - scatterlist helpers for gpu drivers - have acks from akpm Highlights: - qxl kms driver - driver for the spice qxl virtual GPU Nouveau: - fermi/kepler VRAM compression - GK110/nvf0 modesetting support. Tegra: - host1x core merged with 2D engine support i915: - vt switchless resume - more valleyview support - vblank fixes - modesetting pipe config rework radeon: - UVD engine support - SI chip tiling support - GPU registers initialisation from golden values. exynos: - device tree changes - fimc block support Otherwise: - bunches of fixes all over the place." * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (513 commits) qxl: update to new idr interfaces. drm/nouveau: fix build with nv50->nvc0 drm/radeon: fix handling of v6 power tables drm/radeon: clarify family checks in pm table parsing drm/radeon: consolidate UVD clock programming drm/radeon: fix UPLL_REF_DIV_MASK definition radeon: add bo tracking debugfs drm/radeon: add new richland pci ids drm/radeon: add some new SI PCI ids drm/radeon: fix scratch reg handling for UVD fence drm/radeon: allocate SA bo in the requested domain drm/radeon: fix possible segfault when parsing pm tables drm/radeon: fix endian bugs in atom_allocate_fb_scratch() OMAPDSS: TFP410: return EPROBE_DEFER if the i2c adapter not found OMAPDSS: VENC: Add error handling for venc_probe_pdata OMAPDSS: HDMI: Add error handling for hdmi_probe_pdata OMAPDSS: RFBI: Add error handling for rfbi_probe_pdata OMAPDSS: DSI: Add error handling for dsi_probe_pdata OMAPDSS: SDI: Add error handling for sdi_probe_pdata OMAPDSS: DPI: Add error handling for dpi_probe_pdata ...
Diffstat (limited to 'drivers/gpu/drm/radeon')
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atom.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c17
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1187
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c169
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h48
-rw-r--r--drivers/gpu/drm/radeon/ni.c414
-rw-r--r--drivers/gpu/drm/radeon/nid.h21
-rw-r--r--drivers/gpu/drm/radeon/r100.c77
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r600.c404
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c64
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c150
-rw-r--r--drivers/gpu/drm/radeon/r600d.h72
-rw-r--r--drivers/gpu/drm/radeon/radeon.h94
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c100
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h28
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c132
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c187
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h23
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c72
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c831
-rw-r--r--drivers/gpu/drm/radeon/rs600.c52
-rw-r--r--drivers/gpu/drm/radeon/rs690.c23
-rw-r--r--drivers/gpu/drm/radeon/rs690d.h3
-rw-r--r--drivers/gpu/drm/radeon/rv515.c56
-rw-r--r--drivers/gpu/drm/radeon/rv770.c909
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h43
-rw-r--r--drivers/gpu/drm/radeon/si.c979
-rw-r--r--drivers/gpu/drm/radeon/sid.h40
43 files changed, 5916 insertions, 567 deletions
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index bf172522ea68..86c5e3611892 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -76,7 +76,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
- si_blit_shaders.o radeon_prime.o
+ si_blit_shaders.o radeon_prime.o radeon_uvd.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 46a9c3772850..fb441a790f3d 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1394,10 +1394,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
DRM_DEBUG("atom firmware requested %08x %dkb\n",
- firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
- firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
+ le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
+ le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
- usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
+ usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 4b04ba3828e8..0ee573743de9 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -458,6 +458,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
union
{
ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
+ ULONG ulClockParams; //ULONG access for BE
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
};
UCHAR ucRefDiv; //Output Parameter
@@ -490,6 +491,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
union
{
ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
+ ULONG ulClockParams; //ULONG access for BE
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
};
UCHAR ucRefDiv; //Output Parameter
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 21a892c6ab9c..6d6fdb3ba0d0 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -557,6 +557,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
/* use frac fb div on APUs */
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ /* use frac fb div on RS780/RS880 */
+ if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+ radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
} else {
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 4552d4aff317..44a7da66e081 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2150,13 +2150,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
atombios_apply_encoder_quirks(encoder, adjusted_mode);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
- r600_hdmi_enable(encoder);
- if (ASIC_IS_DCE6(rdev))
- ; /* TODO (use pointers instead of if-s?) */
- else if (ASIC_IS_DCE4(rdev))
- evergreen_hdmi_setmode(encoder, adjusted_mode);
- else
- r600_hdmi_setmode(encoder, adjusted_mode);
+ if (rdev->asic->display.hdmi_enable)
+ radeon_hdmi_enable(rdev, encoder, true);
+ if (rdev->asic->display.hdmi_setmode)
+ radeon_hdmi_setmode(rdev, encoder, adjusted_mode);
}
}
@@ -2413,8 +2410,10 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
disable_done:
if (radeon_encoder_is_digital(encoder)) {
- if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
- r600_hdmi_disable(encoder);
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+ if (rdev->asic->display.hdmi_enable)
+ radeon_hdmi_enable(rdev, encoder, false);
+ }
dig = radeon_encoder->enc_priv;
dig->dig_encoder = -1;
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 305a657bf215..105bafb6c29d 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -53,6 +53,864 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl);
+static const u32 evergreen_golden_registers[] =
+{
+ 0x3f90, 0xffff0000, 0xff000000,
+ 0x9148, 0xffff0000, 0xff000000,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x9b7c, 0xffffffff, 0x00000000,
+ 0x8a14, 0xffffffff, 0x00000007,
+ 0x8b10, 0xffffffff, 0x00000000,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0xffffffff, 0x000000c2,
+ 0x88d4, 0xffffffff, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5e78, 0xffffffff, 0x001000f0,
+ 0x6104, 0x01000300, 0x00000000,
+ 0x5bc0, 0x00300000, 0x00000000,
+ 0x7030, 0xffffffff, 0x00000011,
+ 0x7c30, 0xffffffff, 0x00000011,
+ 0x10830, 0xffffffff, 0x00000011,
+ 0x11430, 0xffffffff, 0x00000011,
+ 0x12030, 0xffffffff, 0x00000011,
+ 0x12c30, 0xffffffff, 0x00000011,
+ 0xd02c, 0xffffffff, 0x08421000,
+ 0x240c, 0xffffffff, 0x00000380,
+ 0x8b24, 0xffffffff, 0x00ff0fff,
+ 0x28a4c, 0x06000000, 0x06000000,
+ 0x10c, 0x00000001, 0x00000001,
+ 0x8d00, 0xffffffff, 0x100e4848,
+ 0x8d04, 0xffffffff, 0x00164745,
+ 0x8c00, 0xffffffff, 0xe4000003,
+ 0x8c04, 0xffffffff, 0x40600060,
+ 0x8c08, 0xffffffff, 0x001c001c,
+ 0x8cf0, 0xffffffff, 0x08e00620,
+ 0x8c20, 0xffffffff, 0x00800080,
+ 0x8c24, 0xffffffff, 0x00800080,
+ 0x8c18, 0xffffffff, 0x20202078,
+ 0x8c1c, 0xffffffff, 0x00001010,
+ 0x28350, 0xffffffff, 0x00000000,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0x5cc, 0xffffffff, 0x00000001,
+ 0x9508, 0xffffffff, 0x00000002,
+ 0x913c, 0x0000000f, 0x0000000a
+};
+
+static const u32 evergreen_golden_registers2[] =
+{
+ 0x2f4c, 0xffffffff, 0x00000000,
+ 0x54f4, 0xffffffff, 0x00000000,
+ 0x54f0, 0xffffffff, 0x00000000,
+ 0x5498, 0xffffffff, 0x00000000,
+ 0x549c, 0xffffffff, 0x00000000,
+ 0x5494, 0xffffffff, 0x00000000,
+ 0x53cc, 0xffffffff, 0x00000000,
+ 0x53c8, 0xffffffff, 0x00000000,
+ 0x53c4, 0xffffffff, 0x00000000,
+ 0x53c0, 0xffffffff, 0x00000000,
+ 0x53bc, 0xffffffff, 0x00000000,
+ 0x53b8, 0xffffffff, 0x00000000,
+ 0x53b4, 0xffffffff, 0x00000000,
+ 0x53b0, 0xffffffff, 0x00000000
+};
+
+static const u32 cypress_mgcg_init[] =
+{
+ 0x802c, 0xffffffff, 0xc0000000,
+ 0x5448, 0xffffffff, 0x00000100,
+ 0x55e4, 0xffffffff, 0x00000100,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x5644, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x8d58, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x9654, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0x9040, 0xffffffff, 0x00000100,
+ 0xa200, 0xffffffff, 0x00000100,
+ 0xa204, 0xffffffff, 0x00000100,
+ 0xa208, 0xffffffff, 0x00000100,
+ 0xa20c, 0xffffffff, 0x00000100,
+ 0x971c, 0xffffffff, 0x00000100,
+ 0x977c, 0xffffffff, 0x00000100,
+ 0x3f80, 0xffffffff, 0x00000100,
+ 0xa210, 0xffffffff, 0x00000100,
+ 0xa214, 0xffffffff, 0x00000100,
+ 0x4d8, 0xffffffff, 0x00000100,
+ 0x9784, 0xffffffff, 0x00000100,
+ 0x9698, 0xffffffff, 0x00000100,
+ 0x4d4, 0xffffffff, 0x00000200,
+ 0x30cc, 0xffffffff, 0x00000100,
+ 0xd0c0, 0xffffffff, 0xff000100,
+ 0x802c, 0xffffffff, 0x40000000,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x9160, 0xffffffff, 0x00030002,
+ 0x9178, 0xffffffff, 0x00070000,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x9180, 0xffffffff, 0x00050004,
+ 0x918c, 0xffffffff, 0x00010006,
+ 0x9190, 0xffffffff, 0x00090008,
+ 0x9194, 0xffffffff, 0x00070000,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x919c, 0xffffffff, 0x00050004,
+ 0x91a8, 0xffffffff, 0x00010006,
+ 0x91ac, 0xffffffff, 0x00090008,
+ 0x91b0, 0xffffffff, 0x00070000,
+ 0x91b4, 0xffffffff, 0x00030002,
+ 0x91b8, 0xffffffff, 0x00050004,
+ 0x91c4, 0xffffffff, 0x00010006,
+ 0x91c8, 0xffffffff, 0x00090008,
+ 0x91cc, 0xffffffff, 0x00070000,
+ 0x91d0, 0xffffffff, 0x00030002,
+ 0x91d4, 0xffffffff, 0x00050004,
+ 0x91e0, 0xffffffff, 0x00010006,
+ 0x91e4, 0xffffffff, 0x00090008,
+ 0x91e8, 0xffffffff, 0x00000000,
+ 0x91ec, 0xffffffff, 0x00070000,
+ 0x91f0, 0xffffffff, 0x00030002,
+ 0x91f4, 0xffffffff, 0x00050004,
+ 0x9200, 0xffffffff, 0x00010006,
+ 0x9204, 0xffffffff, 0x00090008,
+ 0x9208, 0xffffffff, 0x00070000,
+ 0x920c, 0xffffffff, 0x00030002,
+ 0x9210, 0xffffffff, 0x00050004,
+ 0x921c, 0xffffffff, 0x00010006,
+ 0x9220, 0xffffffff, 0x00090008,
+ 0x9224, 0xffffffff, 0x00070000,
+ 0x9228, 0xffffffff, 0x00030002,
+ 0x922c, 0xffffffff, 0x00050004,
+ 0x9238, 0xffffffff, 0x00010006,
+ 0x923c, 0xffffffff, 0x00090008,
+ 0x9240, 0xffffffff, 0x00070000,
+ 0x9244, 0xffffffff, 0x00030002,
+ 0x9248, 0xffffffff, 0x00050004,
+ 0x9254, 0xffffffff, 0x00010006,
+ 0x9258, 0xffffffff, 0x00090008,
+ 0x925c, 0xffffffff, 0x00070000,
+ 0x9260, 0xffffffff, 0x00030002,
+ 0x9264, 0xffffffff, 0x00050004,
+ 0x9270, 0xffffffff, 0x00010006,
+ 0x9274, 0xffffffff, 0x00090008,
+ 0x9278, 0xffffffff, 0x00070000,
+ 0x927c, 0xffffffff, 0x00030002,
+ 0x9280, 0xffffffff, 0x00050004,
+ 0x928c, 0xffffffff, 0x00010006,
+ 0x9290, 0xffffffff, 0x00090008,
+ 0x9294, 0xffffffff, 0x00000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x802c, 0xffffffff, 0x40010000,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x9160, 0xffffffff, 0x00030002,
+ 0x9178, 0xffffffff, 0x00070000,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x9180, 0xffffffff, 0x00050004,
+ 0x918c, 0xffffffff, 0x00010006,
+ 0x9190, 0xffffffff, 0x00090008,
+ 0x9194, 0xffffffff, 0x00070000,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x919c, 0xffffffff, 0x00050004,
+ 0x91a8, 0xffffffff, 0x00010006,
+ 0x91ac, 0xffffffff, 0x00090008,
+ 0x91b0, 0xffffffff, 0x00070000,
+ 0x91b4, 0xffffffff, 0x00030002,
+ 0x91b8, 0xffffffff, 0x00050004,
+ 0x91c4, 0xffffffff, 0x00010006,
+ 0x91c8, 0xffffffff, 0x00090008,
+ 0x91cc, 0xffffffff, 0x00070000,
+ 0x91d0, 0xffffffff, 0x00030002,
+ 0x91d4, 0xffffffff, 0x00050004,
+ 0x91e0, 0xffffffff, 0x00010006,
+ 0x91e4, 0xffffffff, 0x00090008,
+ 0x91e8, 0xffffffff, 0x00000000,
+ 0x91ec, 0xffffffff, 0x00070000,
+ 0x91f0, 0xffffffff, 0x00030002,
+ 0x91f4, 0xffffffff, 0x00050004,
+ 0x9200, 0xffffffff, 0x00010006,
+ 0x9204, 0xffffffff, 0x00090008,
+ 0x9208, 0xffffffff, 0x00070000,
+ 0x920c, 0xffffffff, 0x00030002,
+ 0x9210, 0xffffffff, 0x00050004,
+ 0x921c, 0xffffffff, 0x00010006,
+ 0x9220, 0xffffffff, 0x00090008,
+ 0x9224, 0xffffffff, 0x00070000,
+ 0x9228, 0xffffffff, 0x00030002,
+ 0x922c, 0xffffffff, 0x00050004,
+ 0x9238, 0xffffffff, 0x00010006,
+ 0x923c, 0xffffffff, 0x00090008,
+ 0x9240, 0xffffffff, 0x00070000,
+ 0x9244, 0xffffffff, 0x00030002,
+ 0x9248, 0xffffffff, 0x00050004,
+ 0x9254, 0xffffffff, 0x00010006,
+ 0x9258, 0xffffffff, 0x00090008,
+ 0x925c, 0xffffffff, 0x00070000,
+ 0x9260, 0xffffffff, 0x00030002,
+ 0x9264, 0xffffffff, 0x00050004,
+ 0x9270, 0xffffffff, 0x00010006,
+ 0x9274, 0xffffffff, 0x00090008,
+ 0x9278, 0xffffffff, 0x00070000,
+ 0x927c, 0xffffffff, 0x00030002,
+ 0x9280, 0xffffffff, 0x00050004,
+ 0x928c, 0xffffffff, 0x00010006,
+ 0x9290, 0xffffffff, 0x00090008,
+ 0x9294, 0xffffffff, 0x00000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 redwood_mgcg_init[] =
+{
+ 0x802c, 0xffffffff, 0xc0000000,
+ 0x5448, 0xffffffff, 0x00000100,
+ 0x55e4, 0xffffffff, 0x00000100,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x5644, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x8d58, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x9654, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0x9040, 0xffffffff, 0x00000100,
+ 0xa200, 0xffffffff, 0x00000100,
+ 0xa204, 0xffffffff, 0x00000100,
+ 0xa208, 0xffffffff, 0x00000100,
+ 0xa20c, 0xffffffff, 0x00000100,
+ 0x971c, 0xffffffff, 0x00000100,
+ 0x977c, 0xffffffff, 0x00000100,
+ 0x3f80, 0xffffffff, 0x00000100,
+ 0xa210, 0xffffffff, 0x00000100,
+ 0xa214, 0xffffffff, 0x00000100,
+ 0x4d8, 0xffffffff, 0x00000100,
+ 0x9784, 0xffffffff, 0x00000100,
+ 0x9698, 0xffffffff, 0x00000100,
+ 0x4d4, 0xffffffff, 0x00000200,
+ 0x30cc, 0xffffffff, 0x00000100,
+ 0xd0c0, 0xffffffff, 0xff000100,
+ 0x802c, 0xffffffff, 0x40000000,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x9160, 0xffffffff, 0x00030002,
+ 0x9178, 0xffffffff, 0x00070000,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x9180, 0xffffffff, 0x00050004,
+ 0x918c, 0xffffffff, 0x00010006,
+ 0x9190, 0xffffffff, 0x00090008,
+ 0x9194, 0xffffffff, 0x00070000,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x919c, 0xffffffff, 0x00050004,
+ 0x91a8, 0xffffffff, 0x00010006,
+ 0x91ac, 0xffffffff, 0x00090008,
+ 0x91b0, 0xffffffff, 0x00070000,
+ 0x91b4, 0xffffffff, 0x00030002,
+ 0x91b8, 0xffffffff, 0x00050004,
+ 0x91c4, 0xffffffff, 0x00010006,
+ 0x91c8, 0xffffffff, 0x00090008,
+ 0x91cc, 0xffffffff, 0x00070000,
+ 0x91d0, 0xffffffff, 0x00030002,
+ 0x91d4, 0xffffffff, 0x00050004,
+ 0x91e0, 0xffffffff, 0x00010006,
+ 0x91e4, 0xffffffff, 0x00090008,
+ 0x91e8, 0xffffffff, 0x00000000,
+ 0x91ec, 0xffffffff, 0x00070000,
+ 0x91f0, 0xffffffff, 0x00030002,
+ 0x91f4, 0xffffffff, 0x00050004,
+ 0x9200, 0xffffffff, 0x00010006,
+ 0x9204, 0xffffffff, 0x00090008,
+ 0x9294, 0xffffffff, 0x00000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 cedar_golden_registers[] =
+{
+ 0x3f90, 0xffff0000, 0xff000000,
+ 0x9148, 0xffff0000, 0xff000000,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x9b7c, 0xffffffff, 0x00000000,
+ 0x8a14, 0xffffffff, 0x00000007,
+ 0x8b10, 0xffffffff, 0x00000000,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0xffffffff, 0x000000c2,
+ 0x88d4, 0xffffffff, 0x00000000,
+ 0x8974, 0xffffffff, 0x00000000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5e78, 0xffffffff, 0x001000f0,
+ 0x6104, 0x01000300, 0x00000000,
+ 0x5bc0, 0x00300000, 0x00000000,
+ 0x7030, 0xffffffff, 0x00000011,
+ 0x7c30, 0xffffffff, 0x00000011,
+ 0x10830, 0xffffffff, 0x00000011,
+ 0x11430, 0xffffffff, 0x00000011,
+ 0xd02c, 0xffffffff, 0x08421000,
+ 0x240c, 0xffffffff, 0x00000380,
+ 0x8b24, 0xffffffff, 0x00ff0fff,
+ 0x28a4c, 0x06000000, 0x06000000,
+ 0x10c, 0x00000001, 0x00000001,
+ 0x8d00, 0xffffffff, 0x100e4848,
+ 0x8d04, 0xffffffff, 0x00164745,
+ 0x8c00, 0xffffffff, 0xe4000003,
+ 0x8c04, 0xffffffff, 0x40600060,
+ 0x8c08, 0xffffffff, 0x001c001c,
+ 0x8cf0, 0xffffffff, 0x08e00410,
+ 0x8c20, 0xffffffff, 0x00800080,
+ 0x8c24, 0xffffffff, 0x00800080,
+ 0x8c18, 0xffffffff, 0x20202078,
+ 0x8c1c, 0xffffffff, 0x00001010,
+ 0x28350, 0xffffffff, 0x00000000,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0x5cc, 0xffffffff, 0x00000001,
+ 0x9508, 0xffffffff, 0x00000002
+};
+
+static const u32 cedar_mgcg_init[] =
+{
+ 0x802c, 0xffffffff, 0xc0000000,
+ 0x5448, 0xffffffff, 0x00000100,
+ 0x55e4, 0xffffffff, 0x00000100,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x5644, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x8d58, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x9654, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0x9040, 0xffffffff, 0x00000100,
+ 0xa200, 0xffffffff, 0x00000100,
+ 0xa204, 0xffffffff, 0x00000100,
+ 0xa208, 0xffffffff, 0x00000100,
+ 0xa20c, 0xffffffff, 0x00000100,
+ 0x971c, 0xffffffff, 0x00000100,
+ 0x977c, 0xffffffff, 0x00000100,
+ 0x3f80, 0xffffffff, 0x00000100,
+ 0xa210, 0xffffffff, 0x00000100,
+ 0xa214, 0xffffffff, 0x00000100,
+ 0x4d8, 0xffffffff, 0x00000100,
+ 0x9784, 0xffffffff, 0x00000100,
+ 0x9698, 0xffffffff, 0x00000100,
+ 0x4d4, 0xffffffff, 0x00000200,
+ 0x30cc, 0xffffffff, 0x00000100,
+ 0xd0c0, 0xffffffff, 0xff000100,
+ 0x802c, 0xffffffff, 0x40000000,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x9178, 0xffffffff, 0x00050000,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00010004,
+ 0x9190, 0xffffffff, 0x00070006,
+ 0x9194, 0xffffffff, 0x00050000,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x91a8, 0xffffffff, 0x00010004,
+ 0x91ac, 0xffffffff, 0x00070006,
+ 0x91e8, 0xffffffff, 0x00000000,
+ 0x9294, 0xffffffff, 0x00000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 juniper_mgcg_init[] =
+{
+ 0x802c, 0xffffffff, 0xc0000000,
+ 0x5448, 0xffffffff, 0x00000100,
+ 0x55e4, 0xffffffff, 0x00000100,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x5644, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x8d58, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x9654, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0x9040, 0xffffffff, 0x00000100,
+ 0xa200, 0xffffffff, 0x00000100,
+ 0xa204, 0xffffffff, 0x00000100,
+ 0xa208, 0xffffffff, 0x00000100,
+ 0xa20c, 0xffffffff, 0x00000100,
+ 0x971c, 0xffffffff, 0x00000100,
+ 0xd0c0, 0xffffffff, 0xff000100,
+ 0x802c, 0xffffffff, 0x40000000,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x9160, 0xffffffff, 0x00030002,
+ 0x9178, 0xffffffff, 0x00070000,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x9180, 0xffffffff, 0x00050004,
+ 0x918c, 0xffffffff, 0x00010006,
+ 0x9190, 0xffffffff, 0x00090008,
+ 0x9194, 0xffffffff, 0x00070000,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x919c, 0xffffffff, 0x00050004,
+ 0x91a8, 0xffffffff, 0x00010006,
+ 0x91ac, 0xffffffff, 0x00090008,
+ 0x91b0, 0xffffffff, 0x00070000,
+ 0x91b4, 0xffffffff, 0x00030002,
+ 0x91b8, 0xffffffff, 0x00050004,
+ 0x91c4, 0xffffffff, 0x00010006,
+ 0x91c8, 0xffffffff, 0x00090008,
+ 0x91cc, 0xffffffff, 0x00070000,
+ 0x91d0, 0xffffffff, 0x00030002,
+ 0x91d4, 0xffffffff, 0x00050004,
+ 0x91e0, 0xffffffff, 0x00010006,
+ 0x91e4, 0xffffffff, 0x00090008,
+ 0x91e8, 0xffffffff, 0x00000000,
+ 0x91ec, 0xffffffff, 0x00070000,
+ 0x91f0, 0xffffffff, 0x00030002,
+ 0x91f4, 0xffffffff, 0x00050004,
+ 0x9200, 0xffffffff, 0x00010006,
+ 0x9204, 0xffffffff, 0x00090008,
+ 0x9208, 0xffffffff, 0x00070000,
+ 0x920c, 0xffffffff, 0x00030002,
+ 0x9210, 0xffffffff, 0x00050004,
+ 0x921c, 0xffffffff, 0x00010006,
+ 0x9220, 0xffffffff, 0x00090008,
+ 0x9224, 0xffffffff, 0x00070000,
+ 0x9228, 0xffffffff, 0x00030002,
+ 0x922c, 0xffffffff, 0x00050004,
+ 0x9238, 0xffffffff, 0x00010006,
+ 0x923c, 0xffffffff, 0x00090008,
+ 0x9240, 0xffffffff, 0x00070000,
+ 0x9244, 0xffffffff, 0x00030002,
+ 0x9248, 0xffffffff, 0x00050004,
+ 0x9254, 0xffffffff, 0x00010006,
+ 0x9258, 0xffffffff, 0x00090008,
+ 0x925c, 0xffffffff, 0x00070000,
+ 0x9260, 0xffffffff, 0x00030002,
+ 0x9264, 0xffffffff, 0x00050004,
+ 0x9270, 0xffffffff, 0x00010006,
+ 0x9274, 0xffffffff, 0x00090008,
+ 0x9278, 0xffffffff, 0x00070000,
+ 0x927c, 0xffffffff, 0x00030002,
+ 0x9280, 0xffffffff, 0x00050004,
+ 0x928c, 0xffffffff, 0x00010006,
+ 0x9290, 0xffffffff, 0x00090008,
+ 0x9294, 0xffffffff, 0x00000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x802c, 0xffffffff, 0xc0000000,
+ 0x977c, 0xffffffff, 0x00000100,
+ 0x3f80, 0xffffffff, 0x00000100,
+ 0xa210, 0xffffffff, 0x00000100,
+ 0xa214, 0xffffffff, 0x00000100,
+ 0x4d8, 0xffffffff, 0x00000100,
+ 0x9784, 0xffffffff, 0x00000100,
+ 0x9698, 0xffffffff, 0x00000100,
+ 0x4d4, 0xffffffff, 0x00000200,
+ 0x30cc, 0xffffffff, 0x00000100,
+ 0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 supersumo_golden_registers[] =
+{
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5cc, 0xffffffff, 0x00000001,
+ 0x7030, 0xffffffff, 0x00000011,
+ 0x7c30, 0xffffffff, 0x00000011,
+ 0x6104, 0x01000300, 0x00000000,
+ 0x5bc0, 0x00300000, 0x00000000,
+ 0x8c04, 0xffffffff, 0x40600060,
+ 0x8c08, 0xffffffff, 0x001c001c,
+ 0x8c20, 0xffffffff, 0x00800080,
+ 0x8c24, 0xffffffff, 0x00800080,
+ 0x8c18, 0xffffffff, 0x20202078,
+ 0x8c1c, 0xffffffff, 0x00001010,
+ 0x918c, 0xffffffff, 0x00010006,
+ 0x91a8, 0xffffffff, 0x00010006,
+ 0x91c4, 0xffffffff, 0x00010006,
+ 0x91e0, 0xffffffff, 0x00010006,
+ 0x9200, 0xffffffff, 0x00010006,
+ 0x9150, 0xffffffff, 0x6e944040,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x9180, 0xffffffff, 0x00050004,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x919c, 0xffffffff, 0x00050004,
+ 0x91b4, 0xffffffff, 0x00030002,
+ 0x91b8, 0xffffffff, 0x00050004,
+ 0x91d0, 0xffffffff, 0x00030002,
+ 0x91d4, 0xffffffff, 0x00050004,
+ 0x91f0, 0xffffffff, 0x00030002,
+ 0x91f4, 0xffffffff, 0x00050004,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x9160, 0xffffffff, 0x00030002,
+ 0x3f90, 0xffff0000, 0xff000000,
+ 0x9178, 0xffffffff, 0x00070000,
+ 0x9194, 0xffffffff, 0x00070000,
+ 0x91b0, 0xffffffff, 0x00070000,
+ 0x91cc, 0xffffffff, 0x00070000,
+ 0x91ec, 0xffffffff, 0x00070000,
+ 0x9148, 0xffff0000, 0xff000000,
+ 0x9190, 0xffffffff, 0x00090008,
+ 0x91ac, 0xffffffff, 0x00090008,
+ 0x91c8, 0xffffffff, 0x00090008,
+ 0x91e4, 0xffffffff, 0x00090008,
+ 0x9204, 0xffffffff, 0x00090008,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x5644, 0xffffffff, 0x00000100,
+ 0x9b7c, 0xffffffff, 0x00000000,
+ 0x8030, 0xffffffff, 0x0000100a,
+ 0x8a14, 0xffffffff, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ff0fff,
+ 0x8b10, 0xffffffff, 0x00000000,
+ 0x28a4c, 0x06000000, 0x06000000,
+ 0x4d8, 0xffffffff, 0x00000100,
+ 0x913c, 0xffff000f, 0x0100000a,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0xffffffff, 0x000000c2,
+ 0x88d4, 0xffffffff, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0x5e78, 0xffffffff, 0x001000f0,
+ 0xd02c, 0xffffffff, 0x08421000,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0x8d00, 0xffffffff, 0x100e4848,
+ 0x8d04, 0xffffffff, 0x00164745,
+ 0x8c00, 0xffffffff, 0xe4000003,
+ 0x8cf0, 0x1fffffff, 0x08e00620,
+ 0x28350, 0xffffffff, 0x00000000,
+ 0x9508, 0xffffffff, 0x00000002
+};
+
+static const u32 sumo_golden_registers[] =
+{
+ 0x900c, 0x00ffffff, 0x0017071f,
+ 0x8c18, 0xffffffff, 0x10101060,
+ 0x8c1c, 0xffffffff, 0x00001010,
+ 0x8c30, 0x0000000f, 0x00000005,
+ 0x9688, 0x0000000f, 0x00000007
+};
+
+static const u32 wrestler_golden_registers[] =
+{
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5cc, 0xffffffff, 0x00000001,
+ 0x7030, 0xffffffff, 0x00000011,
+ 0x7c30, 0xffffffff, 0x00000011,
+ 0x6104, 0x01000300, 0x00000000,
+ 0x5bc0, 0x00300000, 0x00000000,
+ 0x918c, 0xffffffff, 0x00010006,
+ 0x91a8, 0xffffffff, 0x00010006,
+ 0x9150, 0xffffffff, 0x6e944040,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x915c, 0xffffffff, 0x00010000,
+ 0x3f90, 0xffff0000, 0xff000000,
+ 0x9178, 0xffffffff, 0x00070000,
+ 0x9194, 0xffffffff, 0x00070000,
+ 0x9148, 0xffff0000, 0xff000000,
+ 0x9190, 0xffffffff, 0x00090008,
+ 0x91ac, 0xffffffff, 0x00090008,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x929c, 0xffffffff, 0x00000001,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x9b7c, 0xffffffff, 0x00000000,
+ 0x8030, 0xffffffff, 0x0000100a,
+ 0x8a14, 0xffffffff, 0x00000001,
+ 0x8b24, 0xffffffff, 0x00ff0fff,
+ 0x8b10, 0xffffffff, 0x00000000,
+ 0x28a4c, 0x06000000, 0x06000000,
+ 0x4d8, 0xffffffff, 0x00000100,
+ 0x913c, 0xffff000f, 0x0100000a,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0xffffffff, 0x000000c2,
+ 0x88d4, 0xffffffff, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0x5e78, 0xffffffff, 0x001000f0,
+ 0xd02c, 0xffffffff, 0x08421000,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0x8d00, 0xffffffff, 0x100e4848,
+ 0x8d04, 0xffffffff, 0x00164745,
+ 0x8c00, 0xffffffff, 0xe4000003,
+ 0x8cf0, 0x1fffffff, 0x08e00410,
+ 0x28350, 0xffffffff, 0x00000000,
+ 0x9508, 0xffffffff, 0x00000002,
+ 0x900c, 0xffffffff, 0x0017071f,
+ 0x8c18, 0xffffffff, 0x10101060,
+ 0x8c1c, 0xffffffff, 0x00001010
+};
+
+static const u32 barts_golden_registers[] =
+{
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5e78, 0x8f311ff1, 0x001000f0,
+ 0x3f90, 0xffff0000, 0xff000000,
+ 0x9148, 0xffff0000, 0xff000000,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0xbd4, 0x70073777, 0x00010001,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd0b8, 0x03773777, 0x02011003,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x98f8, 0x33773777, 0x02011003,
+ 0x98fc, 0xffffffff, 0x76543210,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2f48, 0x00000007, 0x02011003,
+ 0x6b28, 0x00000010, 0x00000012,
+ 0x7728, 0x00000010, 0x00000012,
+ 0x10328, 0x00000010, 0x00000012,
+ 0x10f28, 0x00000010, 0x00000012,
+ 0x11b28, 0x00000010, 0x00000012,
+ 0x12728, 0x00000010, 0x00000012,
+ 0x240c, 0x000007ff, 0x00000380,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x10c, 0x00000001, 0x00010003,
+ 0xa02c, 0xffffffff, 0x0000009b,
+ 0x913c, 0x0000000f, 0x0100000a,
+ 0x8d00, 0xffff7f7f, 0x100e4848,
+ 0x8d04, 0x00ffffff, 0x00164745,
+ 0x8c00, 0xfffc0003, 0xe4000003,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x8c08, 0x00ff00ff, 0x001c001c,
+ 0x8cf0, 0x1fff1fff, 0x08e00620,
+ 0x8c20, 0x0fff0fff, 0x00800080,
+ 0x8c24, 0x0fff0fff, 0x00800080,
+ 0x8c18, 0xffffffff, 0x20202078,
+ 0x8c1c, 0x0000ffff, 0x00001010,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x9508, 0x3700001f, 0x00000002,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0x001f3ae3, 0x000000c2,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 turks_golden_registers[] =
+{
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5e78, 0x8f311ff1, 0x001000f0,
+ 0x8c8, 0x00003000, 0x00001070,
+ 0x8cc, 0x000fffff, 0x00040035,
+ 0x3f90, 0xffff0000, 0xfff00000,
+ 0x9148, 0xffff0000, 0xfff00000,
+ 0x3f94, 0xffff0000, 0xfff00000,
+ 0x914c, 0xffff0000, 0xfff00000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0xbd4, 0x00073007, 0x00010002,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd0b8, 0x03773777, 0x02010002,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x98f8, 0x33773777, 0x00010002,
+ 0x98fc, 0xffffffff, 0x33221100,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2f48, 0x33773777, 0x00010002,
+ 0x6b28, 0x00000010, 0x00000012,
+ 0x7728, 0x00000010, 0x00000012,
+ 0x10328, 0x00000010, 0x00000012,
+ 0x10f28, 0x00000010, 0x00000012,
+ 0x11b28, 0x00000010, 0x00000012,
+ 0x12728, 0x00000010, 0x00000012,
+ 0x240c, 0x000007ff, 0x00000380,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x10c, 0x00000001, 0x00010003,
+ 0xa02c, 0xffffffff, 0x0000009b,
+ 0x913c, 0x0000000f, 0x0100000a,
+ 0x8d00, 0xffff7f7f, 0x100e4848,
+ 0x8d04, 0x00ffffff, 0x00164745,
+ 0x8c00, 0xfffc0003, 0xe4000003,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x8c08, 0x00ff00ff, 0x001c001c,
+ 0x8cf0, 0x1fff1fff, 0x08e00410,
+ 0x8c20, 0x0fff0fff, 0x00800080,
+ 0x8c24, 0x0fff0fff, 0x00800080,
+ 0x8c18, 0xffffffff, 0x20202078,
+ 0x8c1c, 0x0000ffff, 0x00001010,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x9508, 0x3700001f, 0x00000002,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0x001f3ae3, 0x000000c2,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 caicos_golden_registers[] =
+{
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5e78, 0x8f311ff1, 0x001000f0,
+ 0x8c8, 0x00003420, 0x00001450,
+ 0x8cc, 0x000fffff, 0x00040035,
+ 0x3f90, 0xffff0000, 0xfffc0000,
+ 0x9148, 0xffff0000, 0xfffc0000,
+ 0x3f94, 0xffff0000, 0xfffc0000,
+ 0x914c, 0xffff0000, 0xfffc0000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0xbd4, 0x00073007, 0x00010001,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd0b8, 0x03773777, 0x02010001,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x98f8, 0x33773777, 0x02010001,
+ 0x98fc, 0xffffffff, 0x33221100,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2f48, 0x33773777, 0x02010001,
+ 0x6b28, 0x00000010, 0x00000012,
+ 0x7728, 0x00000010, 0x00000012,
+ 0x10328, 0x00000010, 0x00000012,
+ 0x10f28, 0x00000010, 0x00000012,
+ 0x11b28, 0x00000010, 0x00000012,
+ 0x12728, 0x00000010, 0x00000012,
+ 0x240c, 0x000007ff, 0x00000380,
+ 0x8a14, 0xf000001f, 0x00000001,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x10c, 0x00000001, 0x00010003,
+ 0xa02c, 0xffffffff, 0x0000009b,
+ 0x913c, 0x0000000f, 0x0100000a,
+ 0x8d00, 0xffff7f7f, 0x100e4848,
+ 0x8d04, 0x00ffffff, 0x00164745,
+ 0x8c00, 0xfffc0003, 0xe4000003,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x8c08, 0x00ff00ff, 0x001c001c,
+ 0x8cf0, 0x1fff1fff, 0x08e00410,
+ 0x8c20, 0x0fff0fff, 0x00800080,
+ 0x8c24, 0x0fff0fff, 0x00800080,
+ 0x8c18, 0xffffffff, 0x20202078,
+ 0x8c1c, 0x0000ffff, 0x00001010,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x9508, 0x3700001f, 0x00000002,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0x001f3ae3, 0x000000c2,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000
+};
+
+static void evergreen_init_golden_registers(struct radeon_device *rdev)
+{
+ switch (rdev->family) {
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers));
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers2,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+ radeon_program_register_sequence(rdev,
+ cypress_mgcg_init,
+ (const u32)ARRAY_SIZE(cypress_mgcg_init));
+ break;
+ case CHIP_JUNIPER:
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers));
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers2,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+ radeon_program_register_sequence(rdev,
+ juniper_mgcg_init,
+ (const u32)ARRAY_SIZE(juniper_mgcg_init));
+ break;
+ case CHIP_REDWOOD:
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers));
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers2,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+ radeon_program_register_sequence(rdev,
+ redwood_mgcg_init,
+ (const u32)ARRAY_SIZE(redwood_mgcg_init));
+ break;
+ case CHIP_CEDAR:
+ radeon_program_register_sequence(rdev,
+ cedar_golden_registers,
+ (const u32)ARRAY_SIZE(cedar_golden_registers));
+ radeon_program_register_sequence(rdev,
+ evergreen_golden_registers2,
+ (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+ radeon_program_register_sequence(rdev,
+ cedar_mgcg_init,
+ (const u32)ARRAY_SIZE(cedar_mgcg_init));
+ break;
+ case CHIP_PALM:
+ radeon_program_register_sequence(rdev,
+ wrestler_golden_registers,
+ (const u32)ARRAY_SIZE(wrestler_golden_registers));
+ break;
+ case CHIP_SUMO:
+ radeon_program_register_sequence(rdev,
+ supersumo_golden_registers,
+ (const u32)ARRAY_SIZE(supersumo_golden_registers));
+ break;
+ case CHIP_SUMO2:
+ radeon_program_register_sequence(rdev,
+ supersumo_golden_registers,
+ (const u32)ARRAY_SIZE(supersumo_golden_registers));
+ radeon_program_register_sequence(rdev,
+ sumo_golden_registers,
+ (const u32)ARRAY_SIZE(sumo_golden_registers));
+ break;
+ case CHIP_BARTS:
+ radeon_program_register_sequence(rdev,
+ barts_golden_registers,
+ (const u32)ARRAY_SIZE(barts_golden_registers));
+ break;
+ case CHIP_TURKS:
+ radeon_program_register_sequence(rdev,
+ turks_golden_registers,
+ (const u32)ARRAY_SIZE(turks_golden_registers));
+ break;
+ case CHIP_CAICOS:
+ radeon_program_register_sequence(rdev,
+ caicos_golden_registers,
+ (const u32)ARRAY_SIZE(caicos_golden_registers));
+ break;
+ default:
+ break;
+ }
+}
+
void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
unsigned *bankh, unsigned *mtaspect,
unsigned *tile_split)
@@ -84,6 +942,142 @@ void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
}
}
+static int sumo_set_uvd_clock(struct radeon_device *rdev, u32 clock,
+ u32 cntl_reg, u32 status_reg)
+{
+ int r, i;
+ struct atom_clock_dividers dividers;
+
+ r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ clock, false, &dividers);
+ if (r)
+ return r;
+
+ WREG32_P(cntl_reg, dividers.post_div, ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK));
+
+ for (i = 0; i < 100; i++) {
+ if (RREG32(status_reg) & DCLK_STATUS)
+ break;
+ mdelay(10);
+ }
+ if (i == 100)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+ int r = 0;
+ u32 cg_scratch = RREG32(CG_SCRATCH1);
+
+ r = sumo_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
+ if (r)
+ goto done;
+ cg_scratch &= 0xffff0000;
+ cg_scratch |= vclk / 100; /* Mhz */
+
+ r = sumo_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
+ if (r)
+ goto done;
+ cg_scratch &= 0x0000ffff;
+ cg_scratch |= (dclk / 100) << 16; /* Mhz */
+
+done:
+ WREG32(CG_SCRATCH1, cg_scratch);
+
+ return r;
+}
+
+int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+ /* start off with something large */
+ unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
+ int r;
+
+ /* bypass vclk and dclk with bclk */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ /* put PLL in bypass mode */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+
+ if (!vclk || !dclk) {
+ /* keep the Bypass mode, put PLL to sleep */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+ return 0;
+ }
+
+ r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
+ 16384, 0x03FFFFFF, 0, 128, 5,
+ &fb_div, &vclk_div, &dclk_div);
+ if (r)
+ return r;
+
+ /* set VCO_MODE to 1 */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
+
+ /* toggle UPLL_SLEEP to 1 then back to 0 */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
+
+ /* deassert UPLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+ mdelay(1);
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* assert UPLL_RESET again */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+ /* disable spread spectrum. */
+ WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
+
+ /* set feedback divider */
+ WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
+
+ /* set ref divider to 0 */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
+
+ if (fb_div < 307200)
+ WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
+ else
+ WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
+
+ /* set PDIV_A and PDIV_B */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
+ ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
+
+ /* give the PLL some time to settle */
+ mdelay(15);
+
+ /* deassert PLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+ mdelay(15);
+
+ /* switch from bypass mode to normal mode */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* switch VCLK and DCLK selection */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ mdelay(100);
+
+ return 0;
+}
+
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
{
u16 ctl, v;
@@ -105,6 +1099,27 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
}
}
+static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+ if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+ return true;
+ else
+ return false;
+}
+
+static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+ u32 pos1, pos2;
+
+ pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+ if (pos1 != pos2)
+ return true;
+ else
+ return false;
+}
+
/**
* dce4_wait_for_vblank - vblank wait asic callback.
*
@@ -115,21 +1130,28 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
*/
void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
- int i;
+ unsigned i = 0;
if (crtc >= rdev->num_crtc)
return;
- if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
+ if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
+ return;
+
+ /* depending on when we hit vblank, we may be close to active; if so,
+ * wait for another frame.
+ */
+ while (dce4_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!dce4_is_counter_moving(rdev, crtc))
break;
- udelay(1);
}
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+ }
+
+ while (!dce4_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!dce4_is_counter_moving(rdev, crtc))
break;
- udelay(1);
}
}
}
@@ -608,6 +1630,16 @@ void evergreen_hpd_init(struct radeon_device *rdev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ continue;
+ }
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp);
@@ -1325,17 +2357,16 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
radeon_wait_for_vblank(rdev, i);
- tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
radeon_wait_for_vblank(rdev, i);
- tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
@@ -1347,6 +2378,15 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
break;
udelay(1);
}
+
+ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ save->crtc_enabled[i] = false;
+ /* ***** */
} else {
save->crtc_enabled[i] = false;
}
@@ -1364,6 +2404,22 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
}
/* wait for the MC to settle */
udelay(100);
+
+ /* lock double buffered regs */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
+ tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (!(tmp & 1)) {
+ tmp |= 1;
+ WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ }
+ }
}
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1385,6 +2441,33 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+ /* unlock regs and wait for update */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
+ if ((tmp & 0x3) != 0) {
+ tmp &= ~0x3;
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
+ tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (tmp & 1) {
+ tmp &= ~1;
+ WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
+ break;
+ udelay(1);
+ }
+ }
+ }
+
/* unblackout the MC */
tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
tmp &= ~BLACKOUT_MODE_MASK;
@@ -2050,6 +3133,14 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
}
/* enabled rb are just the one not disabled :) */
disabled_rb_mask = tmp;
+ tmp = 0;
+ for (i = 0; i < rdev->config.evergreen.max_backends; i++)
+ tmp |= (1 << i);
+ /* if all the backends are disabled, fix it up here */
+ if ((disabled_rb_mask & tmp) == tmp) {
+ for (i = 0; i < rdev->config.evergreen.max_backends; i++)
+ disabled_rb_mask &= ~(1 << i);
+ }
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
@@ -2058,6 +3149,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(DMA_TILING_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
if ((rdev->config.evergreen.max_backends == 1) &&
(rdev->flags & RADEON_IS_IGP)) {
@@ -3360,6 +4454,9 @@ restart_ih:
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
+ case 124: /* UVD */
+ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
break;
case 146:
case 147:
@@ -3571,7 +4668,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
static int evergreen_startup(struct radeon_device *rdev)
{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ struct radeon_ring *ring;
int r;
/* enable pcie gen2 link */
@@ -3638,6 +4735,17 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
+ r = rv770_uvd_resume(rdev);
+ if (!r) {
+ r = radeon_fence_driver_start_ring(rdev,
+ R600_RING_TYPE_UVD_INDEX);
+ if (r)
+ dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ }
+
+ if (r)
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -3647,6 +4755,7 @@ static int evergreen_startup(struct radeon_device *rdev)
}
evergreen_irq_set(rdev);
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
@@ -3670,6 +4779,19 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ if (ring->ring_size) {
+ r = radeon_ring_init(rdev, ring, ring->ring_size,
+ R600_WB_UVD_RPTR_OFFSET,
+ UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (!r)
+ r = r600_uvd_init(rdev);
+
+ if (r)
+ DRM_ERROR("radeon: error initializing UVD (%d).\n", r);
+ }
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -3701,6 +4823,9 @@ int evergreen_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ /* init golden registers */
+ evergreen_init_golden_registers(rdev);
+
rdev->accel_working = true;
r = evergreen_startup(rdev);
if (r) {
@@ -3716,8 +4841,10 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ radeon_uvd_suspend(rdev);
r700_cp_stop(rdev);
r600_dma_stop(rdev);
+ r600_uvd_rbc_stop(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
@@ -3762,6 +4889,8 @@ int evergreen_init(struct radeon_device *rdev)
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
+ /* init golden registers */
+ evergreen_init_golden_registers(rdev);
/* Initialize scratch registers */
r600_scratch_init(rdev);
/* Initialize surface registers */
@@ -3797,6 +4926,13 @@ int evergreen_init(struct radeon_device *rdev)
rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+ r = radeon_uvd_init(rdev);
+ if (!r) {
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
+ 4096);
+ }
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -3843,6 +4979,7 @@ void evergreen_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
+ radeon_uvd_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
@@ -3878,7 +5015,7 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
if (!(mask & DRM_PCIE_SPEED_50))
return;
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
if (speed_cntl & LC_CURRENT_DATA_RATE) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
@@ -3889,33 +5026,33 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
- link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_GEN2_EN_STRAP;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
} else {
- link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
if (1)
link_width_cntl |= LC_UPCONFIGURE_DIS;
else
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 4fdecc2b4040..b4ab8ceb1654 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -54,6 +54,68 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
}
+static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ struct cea_sad *sads;
+ int i, sad_count;
+
+ static const u16 eld_reg_to_type[][2] = {
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+ };
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ radeon_connector = to_radeon_connector(connector);
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ return;
+ }
+ BUG_ON(!sads);
+
+ for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+ u32 value = 0;
+ int j;
+
+ for (j = 0; j < sad_count; j++) {
+ struct cea_sad *sad = &sads[j];
+
+ if (sad->format == eld_reg_to_type[i][1]) {
+ value = MAX_CHANNELS(sad->channels) |
+ DESCRIPTOR_BYTE_2(sad->byte2) |
+ SUPPORTED_FREQUENCIES(sad->freq);
+ if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+ value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
+ break;
+ }
+ }
+ WREG32(eld_reg_to_type[i][0], value);
+ }
+
+ kfree(sads);
+}
+
/*
* build a HDMI Video Info Frame
*/
@@ -85,6 +147,30 @@ static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
frame[0xC] | (frame[0xD] << 8));
}
+static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ u32 base_rate = 48000;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ /* XXX: properly calculate this */
+ /* XXX two dtos; generally use dto0 for hdmi */
+ /* Express [24MHz / target pixel clock] as an exact rational
+ * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
+ * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ */
+ WREG32(DCCG_AUDIO_DTO0_PHASE, (base_rate*50) & 0xffffff);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, (clock*100) & 0xffffff);
+ WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
+}
+
+
/*
* update the info frames with the data from the current display mode
*/
@@ -104,33 +190,19 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
return;
offset = dig->afmt->offset;
- r600_audio_set_clock(encoder, mode->clock);
+ evergreen_audio_set_dto(encoder, mode->clock);
WREG32(HDMI_VBI_PACKET_CONTROL + offset,
HDMI_NULL_SEND); /* send null packets when required */
WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
- WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
- HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
- HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
-
- WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
- AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
- AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
-
- WREG32(HDMI_ACR_PACKET_CONTROL + offset,
- HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
- HDMI_ACR_SOURCE); /* select SW CTS value */
-
WREG32(HDMI_VBI_PACKET_CONTROL + offset,
HDMI_NULL_SEND | /* send null packets when required */
HDMI_GC_SEND | /* send general control packets */
HDMI_GC_CONT); /* send general control packets every frame */
WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
- HDMI_AVI_INFO_SEND | /* enable AVI info frames */
- HDMI_AVI_INFO_CONT | /* send AVI info frames every frame/field */
HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
@@ -138,11 +210,47 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
- HDMI_AVI_INFO_LINE(2) | /* anything other than 0 */
HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
+ WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
+ HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+ HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+
+ /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
+
+ WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+ HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
+ HDMI_ACR_SOURCE); /* select SW CTS value */
+
+ evergreen_hdmi_update_ACR(encoder, mode->clock);
+
+ WREG32(AFMT_60958_0 + offset,
+ AFMT_60958_CS_CHANNEL_NUMBER_L(1));
+
+ WREG32(AFMT_60958_1 + offset,
+ AFMT_60958_CS_CHANNEL_NUMBER_R(2));
+
+ WREG32(AFMT_60958_2 + offset,
+ AFMT_60958_CS_CHANNEL_NUMBER_2(3) |
+ AFMT_60958_CS_CHANNEL_NUMBER_3(4) |
+ AFMT_60958_CS_CHANNEL_NUMBER_4(5) |
+ AFMT_60958_CS_CHANNEL_NUMBER_5(6) |
+ AFMT_60958_CS_CHANNEL_NUMBER_6(7) |
+ AFMT_60958_CS_CHANNEL_NUMBER_7(8));
+
+ /* fglrx sets 0x0001005f | (x & 0x00fc0000) in 0x5f78 here */
+
+ WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
+ AFMT_AUDIO_CHANNEL_ENABLE(0xff));
+
+ /* fglrx sets 0x40 in 0x5f80 here */
+ evergreen_hdmi_write_sad_regs(encoder);
+
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
@@ -156,7 +264,17 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
}
evergreen_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
- evergreen_hdmi_update_ACR(encoder, mode->clock);
+
+ WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset,
+ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
+
+ WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset,
+ HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
+ ~HDMI_AVI_INFO_LINE_MASK);
+
+ WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_SAMPLE_SEND); /* send audio packets */
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
@@ -164,3 +282,20 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
}
+
+void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (enable && dig->afmt->enabled)
+ return;
+ if (!enable && !dig->afmt->enabled)
+ return;
+
+ dig->afmt->enabled = enable;
+
+ DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+ enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index f585be16e2d5..881aba23c477 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -226,6 +226,8 @@
#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
+#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
+#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
#define EVERGREEN_DC_GPIO_HPD_A 0x64b4
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 982d25ad9af3..75c05631146d 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -53,6 +53,43 @@
#define RCU_IND_INDEX 0x100
#define RCU_IND_DATA 0x104
+/* discrete uvd clocks */
+#define CG_UPLL_FUNC_CNTL 0x718
+# define UPLL_RESET_MASK 0x00000001
+# define UPLL_SLEEP_MASK 0x00000002
+# define UPLL_BYPASS_EN_MASK 0x00000004
+# define UPLL_CTLREQ_MASK 0x00000008
+# define UPLL_REF_DIV_MASK 0x003F0000
+# define UPLL_VCO_MODE_MASK 0x00000200
+# define UPLL_CTLACK_MASK 0x40000000
+# define UPLL_CTLACK2_MASK 0x80000000
+#define CG_UPLL_FUNC_CNTL_2 0x71c
+# define UPLL_PDIV_A(x) ((x) << 0)
+# define UPLL_PDIV_A_MASK 0x0000007F
+# define UPLL_PDIV_B(x) ((x) << 8)
+# define UPLL_PDIV_B_MASK 0x00007F00
+# define VCLK_SRC_SEL(x) ((x) << 20)
+# define VCLK_SRC_SEL_MASK 0x01F00000
+# define DCLK_SRC_SEL(x) ((x) << 25)
+# define DCLK_SRC_SEL_MASK 0x3E000000
+#define CG_UPLL_FUNC_CNTL_3 0x720
+# define UPLL_FB_DIV(x) ((x) << 0)
+# define UPLL_FB_DIV_MASK 0x01FFFFFF
+#define CG_UPLL_FUNC_CNTL_4 0x854
+# define UPLL_SPARE_ISPARE9 0x00020000
+#define CG_UPLL_SPREAD_SPECTRUM 0x79c
+# define SSEN_MASK 0x00000001
+
+/* fusion uvd clocks */
+#define CG_DCLK_CNTL 0x610
+# define DCLK_DIVIDER_MASK 0x7f
+# define DCLK_DIR_CNTL_EN (1 << 8)
+#define CG_DCLK_STATUS 0x614
+# define DCLK_STATUS (1 << 0)
+#define CG_VCLK_CNTL 0x618
+#define CG_VCLK_STATUS 0x61c
+#define CG_SCRATCH1 0x820
+
#define GRBM_GFX_INDEX 0x802C
#define INSTANCE_INDEX(x) ((x) << 0)
#define SE_INDEX(x) ((x) << 16)
@@ -197,6 +234,7 @@
# define HDMI_MPEG_INFO_CONT (1 << 9)
#define HDMI_INFOFRAME_CONTROL1 0x7048
# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
+# define HDMI_AVI_INFO_LINE_MASK (0x3f << 0)
# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
#define HDMI_GENERIC_PACKET_CONTROL 0x704c
@@ -992,6 +1030,16 @@
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
+
+/*
+ * UVD
+ */
+#define UVD_UDEC_ADDR_CONFIG 0xef4c
+#define UVD_UDEC_DB_ADDR_CONFIG 0xef50
+#define UVD_UDEC_DBW_ADDR_CONFIG 0xef54
+#define UVD_RBC_RB_RPTR 0xf690
+#define UVD_RBC_RB_WPTR 0xf694
+
/*
* PM4
*/
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 27769e724b6d..7969c0c8ec20 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -78,6 +78,282 @@ MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
MODULE_FIRMWARE("radeon/ARUBA_me.bin");
MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
+
+static const u32 cayman_golden_registers2[] =
+{
+ 0x3e5c, 0xffffffff, 0x00000000,
+ 0x3e48, 0xffffffff, 0x00000000,
+ 0x3e4c, 0xffffffff, 0x00000000,
+ 0x3e64, 0xffffffff, 0x00000000,
+ 0x3e50, 0xffffffff, 0x00000000,
+ 0x3e60, 0xffffffff, 0x00000000
+};
+
+static const u32 cayman_golden_registers[] =
+{
+ 0x5eb4, 0xffffffff, 0x00000002,
+ 0x5e78, 0x8f311ff1, 0x001000f0,
+ 0x3f90, 0xffff0000, 0xff000000,
+ 0x9148, 0xffff0000, 0xff000000,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0xc78, 0x00000080, 0x00000080,
+ 0xbd4, 0x70073777, 0x00011003,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd0b8, 0x73773777, 0x02011003,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x98f8, 0x33773777, 0x02011003,
+ 0x98fc, 0xffffffff, 0x76541032,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2f48, 0x33773777, 0x42010001,
+ 0x6b28, 0x00000010, 0x00000012,
+ 0x7728, 0x00000010, 0x00000012,
+ 0x10328, 0x00000010, 0x00000012,
+ 0x10f28, 0x00000010, 0x00000012,
+ 0x11b28, 0x00000010, 0x00000012,
+ 0x12728, 0x00000010, 0x00000012,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x10c, 0x00000001, 0x00010003,
+ 0xa02c, 0xffffffff, 0x0000009b,
+ 0x913c, 0x0000010f, 0x01000100,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x9508, 0x3700001f, 0x00000002,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x88c4, 0x001f3ae3, 0x00000082,
+ 0x88d0, 0xffffffff, 0x0f40df40,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 dvst_golden_registers2[] =
+{
+ 0x8f8, 0xffffffff, 0,
+ 0x8fc, 0x00380000, 0,
+ 0x8f8, 0xffffffff, 1,
+ 0x8fc, 0x0e000000, 0
+};
+
+static const u32 dvst_golden_registers[] =
+{
+ 0x690, 0x3fff3fff, 0x20c00033,
+ 0x918c, 0x0fff0fff, 0x00010006,
+ 0x91a8, 0x0fff0fff, 0x00010006,
+ 0x9150, 0xffffdfff, 0x6e944040,
+ 0x917c, 0x0fff0fff, 0x00030002,
+ 0x9198, 0x0fff0fff, 0x00030002,
+ 0x915c, 0x0fff0fff, 0x00010000,
+ 0x3f90, 0xffff0001, 0xff000000,
+ 0x9178, 0x0fff0fff, 0x00070000,
+ 0x9194, 0x0fff0fff, 0x00070000,
+ 0x9148, 0xffff0001, 0xff000000,
+ 0x9190, 0x0fff0fff, 0x00090008,
+ 0x91ac, 0x0fff0fff, 0x00090008,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x929c, 0x00000fff, 0x00000001,
+ 0x55e4, 0xff607fff, 0xfc000100,
+ 0x8a18, 0xff000fff, 0x00000100,
+ 0x8b28, 0xff000fff, 0x00000100,
+ 0x9144, 0xfffc0fff, 0x00000100,
+ 0x6ed8, 0x00010101, 0x00010000,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0xfffffffe, 0x00000000,
+ 0xd0c0, 0xff000fff, 0x00000100,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd0b8, 0x73773777, 0x12010001,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x98f8, 0x73773777, 0x12010001,
+ 0x98fc, 0xffffffff, 0x00000010,
+ 0x9b7c, 0x00ff0000, 0x00fc0000,
+ 0x8030, 0x00001f0f, 0x0000100a,
+ 0x2f48, 0x73773777, 0x12010001,
+ 0x2408, 0x00030000, 0x000c007f,
+ 0x8a14, 0xf000003f, 0x00000007,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x4d8, 0x00000fff, 0x00000100,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0x913c, 0xffff03ff, 0x01000100,
+ 0x8c00, 0x000000ff, 0x00000003,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x8cf0, 0x1fff1fff, 0x08e00410,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x9508, 0xf700071f, 0x00000002,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x20ef8, 0x01ff01ff, 0x00000002,
+ 0x20e98, 0xfffffbff, 0x00200000,
+ 0x2015c, 0xffffffff, 0x00000f40,
+ 0x88c4, 0x001f3ae3, 0x00000082,
+ 0x8978, 0x3fffffff, 0x04050140,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 scrapper_golden_registers[] =
+{
+ 0x690, 0x3fff3fff, 0x20c00033,
+ 0x918c, 0x0fff0fff, 0x00010006,
+ 0x918c, 0x0fff0fff, 0x00010006,
+ 0x91a8, 0x0fff0fff, 0x00010006,
+ 0x91a8, 0x0fff0fff, 0x00010006,
+ 0x9150, 0xffffdfff, 0x6e944040,
+ 0x9150, 0xffffdfff, 0x6e944040,
+ 0x917c, 0x0fff0fff, 0x00030002,
+ 0x917c, 0x0fff0fff, 0x00030002,
+ 0x9198, 0x0fff0fff, 0x00030002,
+ 0x9198, 0x0fff0fff, 0x00030002,
+ 0x915c, 0x0fff0fff, 0x00010000,
+ 0x915c, 0x0fff0fff, 0x00010000,
+ 0x3f90, 0xffff0001, 0xff000000,
+ 0x3f90, 0xffff0001, 0xff000000,
+ 0x9178, 0x0fff0fff, 0x00070000,
+ 0x9178, 0x0fff0fff, 0x00070000,
+ 0x9194, 0x0fff0fff, 0x00070000,
+ 0x9194, 0x0fff0fff, 0x00070000,
+ 0x9148, 0xffff0001, 0xff000000,
+ 0x9148, 0xffff0001, 0xff000000,
+ 0x9190, 0x0fff0fff, 0x00090008,
+ 0x9190, 0x0fff0fff, 0x00090008,
+ 0x91ac, 0x0fff0fff, 0x00090008,
+ 0x91ac, 0x0fff0fff, 0x00090008,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x3f94, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x914c, 0xffff0000, 0xff000000,
+ 0x929c, 0x00000fff, 0x00000001,
+ 0x929c, 0x00000fff, 0x00000001,
+ 0x55e4, 0xff607fff, 0xfc000100,
+ 0x8a18, 0xff000fff, 0x00000100,
+ 0x8a18, 0xff000fff, 0x00000100,
+ 0x8b28, 0xff000fff, 0x00000100,
+ 0x8b28, 0xff000fff, 0x00000100,
+ 0x9144, 0xfffc0fff, 0x00000100,
+ 0x9144, 0xfffc0fff, 0x00000100,
+ 0x6ed8, 0x00010101, 0x00010000,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0xfffffffe, 0x00000000,
+ 0x9838, 0xfffffffe, 0x00000000,
+ 0xd0c0, 0xff000fff, 0x00000100,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd02c, 0xbfffff1f, 0x08421000,
+ 0xd0b8, 0x73773777, 0x12010001,
+ 0xd0b8, 0x73773777, 0x12010001,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x98f8, 0x73773777, 0x12010001,
+ 0x98f8, 0x73773777, 0x12010001,
+ 0x98fc, 0xffffffff, 0x00000010,
+ 0x98fc, 0xffffffff, 0x00000010,
+ 0x9b7c, 0x00ff0000, 0x00fc0000,
+ 0x9b7c, 0x00ff0000, 0x00fc0000,
+ 0x8030, 0x00001f0f, 0x0000100a,
+ 0x8030, 0x00001f0f, 0x0000100a,
+ 0x2f48, 0x73773777, 0x12010001,
+ 0x2f48, 0x73773777, 0x12010001,
+ 0x2408, 0x00030000, 0x000c007f,
+ 0x8a14, 0xf000003f, 0x00000007,
+ 0x8a14, 0xf000003f, 0x00000007,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b24, 0x3fff3fff, 0x00ff0fff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x4d8, 0x00000fff, 0x00000100,
+ 0x4d8, 0x00000fff, 0x00000100,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0xa008, 0xffffffff, 0x00010000,
+ 0x913c, 0xffff03ff, 0x01000100,
+ 0x913c, 0xffff03ff, 0x01000100,
+ 0x90e8, 0x001fffff, 0x010400c0,
+ 0x8c00, 0x000000ff, 0x00000003,
+ 0x8c00, 0x000000ff, 0x00000003,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x8c04, 0xf8ff00ff, 0x40600060,
+ 0x8c30, 0x0000000f, 0x00040005,
+ 0x8cf0, 0x1fff1fff, 0x08e00410,
+ 0x8cf0, 0x1fff1fff, 0x08e00410,
+ 0x900c, 0x00ffffff, 0x0017071f,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x28350, 0x00000f01, 0x00000000,
+ 0x9508, 0xf700071f, 0x00000002,
+ 0x9508, 0xf700071f, 0x00000002,
+ 0x9688, 0x00300000, 0x0017000f,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x960c, 0xffffffff, 0x54763210,
+ 0x20ef8, 0x01ff01ff, 0x00000002,
+ 0x20e98, 0xfffffbff, 0x00200000,
+ 0x2015c, 0xffffffff, 0x00000f40,
+ 0x88c4, 0x001f3ae3, 0x00000082,
+ 0x88c4, 0x001f3ae3, 0x00000082,
+ 0x8978, 0x3fffffff, 0x04050140,
+ 0x8978, 0x3fffffff, 0x04050140,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x8974, 0xffffffff, 0x00000000,
+ 0x8974, 0xffffffff, 0x00000000
+};
+
+static void ni_init_golden_registers(struct radeon_device *rdev)
+{
+ switch (rdev->family) {
+ case CHIP_CAYMAN:
+ radeon_program_register_sequence(rdev,
+ cayman_golden_registers,
+ (const u32)ARRAY_SIZE(cayman_golden_registers));
+ radeon_program_register_sequence(rdev,
+ cayman_golden_registers2,
+ (const u32)ARRAY_SIZE(cayman_golden_registers2));
+ break;
+ case CHIP_ARUBA:
+ if ((rdev->pdev->device == 0x9900) ||
+ (rdev->pdev->device == 0x9901) ||
+ (rdev->pdev->device == 0x9903) ||
+ (rdev->pdev->device == 0x9904) ||
+ (rdev->pdev->device == 0x9905) ||
+ (rdev->pdev->device == 0x9906) ||
+ (rdev->pdev->device == 0x9907) ||
+ (rdev->pdev->device == 0x9908) ||
+ (rdev->pdev->device == 0x9909) ||
+ (rdev->pdev->device == 0x990A) ||
+ (rdev->pdev->device == 0x990B) ||
+ (rdev->pdev->device == 0x990C) ||
+ (rdev->pdev->device == 0x990D) ||
+ (rdev->pdev->device == 0x990E) ||
+ (rdev->pdev->device == 0x990F) ||
+ (rdev->pdev->device == 0x9910) ||
+ (rdev->pdev->device == 0x9913) ||
+ (rdev->pdev->device == 0x9917) ||
+ (rdev->pdev->device == 0x9918)) {
+ radeon_program_register_sequence(rdev,
+ dvst_golden_registers,
+ (const u32)ARRAY_SIZE(dvst_golden_registers));
+ radeon_program_register_sequence(rdev,
+ dvst_golden_registers2,
+ (const u32)ARRAY_SIZE(dvst_golden_registers2));
+ } else {
+ radeon_program_register_sequence(rdev,
+ scrapper_golden_registers,
+ (const u32)ARRAY_SIZE(scrapper_golden_registers));
+ radeon_program_register_sequence(rdev,
+ dvst_golden_registers2,
+ (const u32)ARRAY_SIZE(dvst_golden_registers2));
+ }
+ break;
+ default:
+ break;
+ }
+}
+
#define BTC_IO_MC_REGS_SIZE 29
static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
@@ -473,7 +749,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x990F) ||
(rdev->pdev->device == 0x9910) ||
(rdev->pdev->device == 0x9917) ||
- (rdev->pdev->device == 0x9999)) {
+ (rdev->pdev->device == 0x9999) ||
+ (rdev->pdev->device == 0x999C)) {
rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x9903) ||
@@ -482,7 +759,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x990D) ||
(rdev->pdev->device == 0x990E) ||
(rdev->pdev->device == 0x9913) ||
- (rdev->pdev->device == 0x9918)) {
+ (rdev->pdev->device == 0x9918) ||
+ (rdev->pdev->device == 0x999D)) {
rdev->config.cayman.max_simds_per_se = 4;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x9919) ||
@@ -615,15 +893,28 @@ static void cayman_gpu_init(struct radeon_device *rdev)
}
/* enabled rb are just the one not disabled :) */
disabled_rb_mask = tmp;
+ tmp = 0;
+ for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
+ tmp |= (1 << i);
+ /* if all the backends are disabled, fix it up here */
+ if ((disabled_rb_mask & tmp) == tmp) {
+ for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
+ disabled_rb_mask &= ~(1 << i);
+ }
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ if (ASIC_IS_DCE6(rdev))
+ WREG32(DMIF_ADDR_CALC, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+ WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
if ((rdev->config.cayman.max_backends_per_se == 1) &&
(rdev->flags & RADEON_IS_IGP)) {
@@ -931,6 +1222,23 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(ring, 10); /* poll interval */
}
+void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+ radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+ radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+ radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
+}
+
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
@@ -1682,6 +1990,16 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
}
+ r = rv770_uvd_resume(rdev);
+ if (!r) {
+ r = radeon_fence_driver_start_ring(rdev,
+ R600_RING_TYPE_UVD_INDEX);
+ if (r)
+ dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ }
+ if (r)
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+
r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
if (r) {
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
@@ -1748,6 +2066,18 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ if (ring->ring_size) {
+ r = radeon_ring_init(rdev, ring, ring->ring_size,
+ R600_WB_UVD_RPTR_OFFSET,
+ UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (!r)
+ r = r600_uvd_init(rdev);
+ if (r)
+ DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+ }
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -1778,6 +2108,9 @@ int cayman_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ /* init golden registers */
+ ni_init_golden_registers(rdev);
+
rdev->accel_working = true;
r = cayman_startup(rdev);
if (r) {
@@ -1794,6 +2127,8 @@ int cayman_suspend(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
cayman_cp_enable(rdev, false);
cayman_dma_stop(rdev);
+ r600_uvd_rbc_stop(rdev);
+ radeon_uvd_suspend(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
cayman_pcie_gart_disable(rdev);
@@ -1834,6 +2169,8 @@ int cayman_init(struct radeon_device *rdev)
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
+ /* init golden registers */
+ ni_init_golden_registers(rdev);
/* Initialize scratch registers */
r600_scratch_init(rdev);
/* Initialize surface registers */
@@ -1868,6 +2205,13 @@ int cayman_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 64 * 1024);
+ r = radeon_uvd_init(rdev);
+ if (!r) {
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 4096);
+ }
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1919,6 +2263,7 @@ void cayman_fini(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
+ radeon_uvd_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
@@ -2017,28 +2362,57 @@ void cayman_vm_set_page(struct radeon_device *rdev,
}
}
} else {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
+ if ((flags & RADEON_VM_PAGE_SYSTEM) ||
+ (count == 1)) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ }
+ }
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
+ if (flags & RADEON_VM_PAGE_VALID)
value = addr;
- } else {
+ else
value = 0;
- }
- addr += incr;
- value |= r600_flags;
- ib->ptr[ib->length_dw++] = value;
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
}
}
while (ib->length_dw & 0x7)
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 079dee202a9e..e226faf16fea 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -45,6 +45,10 @@
#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
#define DMIF_ADDR_CONFIG 0xBD4
+
+/* DCE6 only */
+#define DMIF_ADDR_CALC 0xC00
+
#define SRBM_GFX_CNTL 0x0E44
#define RINGID(x) (((x) & 0x3) << 0)
#define VMID(x) (((x) & 0x7) << 0)
@@ -486,6 +490,18 @@
# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
/*
+ * UVD
+ */
+#define UVD_SEMA_ADDR_LOW 0xEF00
+#define UVD_SEMA_ADDR_HIGH 0xEF04
+#define UVD_SEMA_CMD 0xEF08
+#define UVD_UDEC_ADDR_CONFIG 0xEF4C
+#define UVD_UDEC_DB_ADDR_CONFIG 0xEF50
+#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54
+#define UVD_RBC_RB_RPTR 0xF690
+#define UVD_RBC_RB_WPTR 0xF694
+
+/*
* PM4
*/
#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
@@ -668,6 +684,11 @@
(((vmid) & 0xF) << 20) | \
(((n) & 0xFFFFF) << 0))
+#define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \
+ (1 << 26) | \
+ (1 << 21) | \
+ (((n) & 0xFFFFF) << 0))
+
/* async DMA Packet types */
#define DMA_PACKET_WRITE 0x2
#define DMA_PACKET_COPY 0x3
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 9db58530be37..4973bff37fec 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -69,6 +69,38 @@ MODULE_FIRMWARE(FIRMWARE_R520);
* and others in some cases.
*/
+static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+ if (crtc == 0) {
+ if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
+ return true;
+ else
+ return false;
+ } else {
+ if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
+ return true;
+ else
+ return false;
+ }
+}
+
+static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+ u32 vline1, vline2;
+
+ if (crtc == 0) {
+ vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ } else {
+ vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+ }
+ if (vline1 != vline2)
+ return true;
+ else
+ return false;
+}
+
/**
* r100_wait_for_vblank - vblank wait asic callback.
*
@@ -79,36 +111,33 @@ MODULE_FIRMWARE(FIRMWARE_R520);
*/
void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
- int i;
+ unsigned i = 0;
if (crtc >= rdev->num_crtc)
return;
if (crtc == 0) {
- if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
- break;
- udelay(1);
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
- break;
- udelay(1);
- }
- }
+ if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN))
+ return;
} else {
- if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR))
- break;
- udelay(1);
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
- break;
- udelay(1);
- }
+ if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN))
+ return;
+ }
+
+ /* depending on when we hit vblank, we may be close to active; if so,
+ * wait for another frame.
+ */
+ while (r100_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!r100_is_counter_moving(rdev, crtc))
+ break;
+ }
+ }
+
+ while (!r100_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!r100_is_counter_moving(rdev, crtc))
+ break;
}
}
}
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index c0dc8d3ba0bb..1dd0d32993d5 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -358,7 +358,9 @@
#define AVIVO_D1CRTC_STATUS_HV_COUNT 0x60ac
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
+#define AVIVO_D1MODE_MASTER_UPDATE_LOCK 0x60e0
#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
+#define AVIVO_D1CRTC_UPDATE_LOCK 0x60e8
/* master controls */
#define AVIVO_DC_CRTC_MASTER_EN 0x60f8
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 0740db3fcd22..1a08008c978b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1145,7 +1145,7 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc
}
if (rdev->flags & RADEON_IS_AGP) {
size_bf = mc->gtt_start;
- size_af = 0xFFFFFFFF - mc->gtt_end;
+ size_af = mc->mc_mask - mc->gtt_end;
if (size_bf > size_af) {
if (mc->mc_vram_size > size_bf) {
dev_warn(rdev->dev, "limiting VRAM\n");
@@ -2552,6 +2552,193 @@ void r600_dma_fini(struct radeon_device *rdev)
}
/*
+ * UVD
+ */
+int r600_uvd_rbc_start(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ uint64_t rptr_addr;
+ uint32_t rb_bufsz, tmp;
+ int r;
+
+ rptr_addr = rdev->wb.gpu_addr + R600_WB_UVD_RPTR_OFFSET;
+
+ if (upper_32_bits(rptr_addr) != upper_32_bits(ring->gpu_addr)) {
+ DRM_ERROR("UVD ring and rptr not in the same 4GB segment!\n");
+ return -EINVAL;
+ }
+
+ /* force RBC into idle state */
+ WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+ /* Set the write pointer delay */
+ WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
+
+ /* set the wb address */
+ WREG32(UVD_RBC_RB_RPTR_ADDR, rptr_addr >> 2);
+
+ /* programm the 4GB memory segment for rptr and ring buffer */
+ WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(rptr_addr) |
+ (0x7 << 16) | (0x1 << 31));
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(UVD_RBC_RB_RPTR, 0x0);
+
+ ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
+ WREG32(UVD_RBC_RB_WPTR, ring->wptr);
+
+ /* set the ring address */
+ WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
+
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(ring->ring_size);
+ rb_bufsz = (0x1 << 8) | rb_bufsz;
+ WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
+
+ ring->ready = true;
+ r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ r = radeon_ring_lock(rdev, ring, 10);
+ if (r) {
+ DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
+ return r;
+ }
+
+ tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, 0xFFFFF);
+
+ tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, 0xFFFFF);
+
+ tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, 0xFFFFF);
+
+ /* Clear timeout status bits */
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
+ radeon_ring_write(ring, 0x8);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
+ radeon_ring_write(ring, 3);
+
+ radeon_ring_unlock_commit(rdev, ring);
+
+ return 0;
+}
+
+void r600_uvd_rbc_stop(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+
+ /* force RBC into idle state */
+ WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+ ring->ready = false;
+}
+
+int r600_uvd_init(struct radeon_device *rdev)
+{
+ int i, j, r;
+
+ /* raise clocks while booting up the VCPU */
+ radeon_set_uvd_clocks(rdev, 53300, 40000);
+
+ /* disable clock gating */
+ WREG32(UVD_CGC_GATE, 0);
+
+ /* disable interupt */
+ WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
+
+ /* put LMI, VCPU, RBC etc... into reset */
+ WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
+ LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
+ CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
+ mdelay(5);
+
+ /* take UVD block out of reset */
+ WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
+ mdelay(5);
+
+ /* initialize UVD memory controller */
+ WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
+ (1 << 21) | (1 << 9) | (1 << 20));
+
+ /* disable byte swapping */
+ WREG32(UVD_LMI_SWAP_CNTL, 0);
+ WREG32(UVD_MP_SWAP_CNTL, 0);
+
+ WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
+ WREG32(UVD_MPC_SET_MUXA1, 0x0);
+ WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
+ WREG32(UVD_MPC_SET_MUXB1, 0x0);
+ WREG32(UVD_MPC_SET_ALU, 0);
+ WREG32(UVD_MPC_SET_MUX, 0x88);
+
+ /* Stall UMC */
+ WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+
+ /* take all subblocks out of reset, except VCPU */
+ WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+ mdelay(5);
+
+ /* enable VCPU clock */
+ WREG32(UVD_VCPU_CNTL, 1 << 9);
+
+ /* enable UMC */
+ WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+
+ /* boot up the VCPU */
+ WREG32(UVD_SOFT_RESET, 0);
+ mdelay(10);
+
+ WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+
+ for (i = 0; i < 10; ++i) {
+ uint32_t status;
+ for (j = 0; j < 100; ++j) {
+ status = RREG32(UVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ }
+ r = 0;
+ if (status & 2)
+ break;
+
+ DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
+ WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
+ mdelay(10);
+ WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
+ mdelay(10);
+ r = -1;
+ }
+
+ if (r) {
+ DRM_ERROR("UVD not responding, giving up!!!\n");
+ radeon_set_uvd_clocks(rdev, 0, 0);
+ return r;
+ }
+
+ /* enable interupt */
+ WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
+
+ r = r600_uvd_rbc_start(rdev);
+ if (!r)
+ DRM_INFO("UVD initialized successfully.\n");
+
+ /* lower clocks again */
+ radeon_set_uvd_clocks(rdev, 0, 0);
+
+ return r;
+}
+
+/*
* GPU scratch registers helpers function.
*/
void r600_scratch_init(struct radeon_device *rdev)
@@ -2660,6 +2847,40 @@ int r600_dma_ring_test(struct radeon_device *rdev,
return r;
}
+int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
+ r = radeon_ring_lock(rdev, ring, 3);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
+ ring->idx, r);
+ return r;
+ }
+ radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(UVD_CONTEXT_ID);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+}
+
/*
* CP fences/semaphores
*/
@@ -2711,6 +2932,30 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
}
}
+void r600_uvd_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+ radeon_ring_write(ring, 2);
+ return;
+}
+
void r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
@@ -2780,6 +3025,23 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
}
+void r600_uvd_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+ radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+ radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+ radeon_ring_write(ring, emit_wait ? 1 : 0);
+}
+
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
@@ -3183,6 +3445,16 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(ring, ib->length_dw);
}
+void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
+ radeon_ring_write(ring, ib->gpu_addr);
+ radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
+ radeon_ring_write(ring, ib->length_dw);
+}
+
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
struct radeon_ib ib;
@@ -3300,6 +3572,41 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
+int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_fence *fence = NULL;
+ int r;
+
+ r = radeon_set_uvd_clocks(rdev, 53300, 40000);
+ if (r) {
+ DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
+ if (r) {
+ DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
+ goto error;
+ }
+
+ r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
+ if (r) {
+ DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
+ goto error;
+ }
+
+ r = radeon_fence_wait(fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ goto error;
+ }
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+error:
+ radeon_fence_unref(&fence);
+ radeon_set_uvd_clocks(rdev, 0, 0);
+ return r;
+}
+
/**
* r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
*
@@ -4232,7 +4539,7 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
{
- u32 link_width_cntl, mask, target_reg;
+ u32 link_width_cntl, mask;
if (rdev->flags & RADEON_IS_IGP)
return;
@@ -4244,7 +4551,7 @@ void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
if (ASIC_IS_X2(rdev))
return;
- /* FIXME wait for idle */
+ radeon_gui_idle(rdev);
switch (lanes) {
case 0:
@@ -4263,53 +4570,24 @@ void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
break;
case 12:
+ /* not actually supported */
mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
break;
case 16:
- default:
mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
break;
- }
-
- link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
-
- if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
- (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
- return;
-
- if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
+ default:
+ DRM_ERROR("invalid pcie lane request: %d\n", lanes);
return;
+ }
- link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
- RADEON_PCIE_LC_RECONFIG_NOW |
- R600_PCIE_LC_RENEGOTIATE_EN |
- R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
- link_width_cntl |= mask;
-
- WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
-
- /* some northbridges can renegotiate the link rather than requiring
- * a complete re-config.
- * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
- */
- if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
- link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
- else
- link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
-
- WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
- RADEON_PCIE_LC_RECONFIG_NOW));
-
- if (rdev->family >= CHIP_RV770)
- target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
- else
- target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
-
- /* wait for lane set to complete */
- link_width_cntl = RREG32(target_reg);
- while (link_width_cntl == 0xffffffff)
- link_width_cntl = RREG32(target_reg);
+ link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
+ link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
+ link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
+ R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
+ WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
int r600_get_pcie_lanes(struct radeon_device *rdev)
@@ -4326,13 +4604,11 @@ int r600_get_pcie_lanes(struct radeon_device *rdev)
if (ASIC_IS_X2(rdev))
return 0;
- /* FIXME wait for idle */
+ radeon_gui_idle(rdev);
- link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
- case RADEON_PCIE_LC_LINK_WIDTH_X0:
- return 0;
case RADEON_PCIE_LC_LINK_WIDTH_X1:
return 1;
case RADEON_PCIE_LC_LINK_WIDTH_X2:
@@ -4341,6 +4617,10 @@ int r600_get_pcie_lanes(struct radeon_device *rdev)
return 4;
case RADEON_PCIE_LC_LINK_WIDTH_X8:
return 8;
+ case RADEON_PCIE_LC_LINK_WIDTH_X12:
+ /* not actually supported */
+ return 12;
+ case RADEON_PCIE_LC_LINK_WIDTH_X0:
case RADEON_PCIE_LC_LINK_WIDTH_X16:
default:
return 16;
@@ -4378,7 +4658,7 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
if (!(mask & DRM_PCIE_SPEED_50))
return;
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
if (speed_cntl & LC_CURRENT_DATA_RATE) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
@@ -4391,23 +4671,23 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
(rdev->family == CHIP_RV620) ||
(rdev->family == CHIP_RV635)) {
/* advertise upconfig capability */
- link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
- link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
LC_RECONFIG_ARC_MISSING_ESCAPE);
link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
} else {
link_width_cntl |= LC_UPCONFIGURE_DIS;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
@@ -4428,7 +4708,7 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
tmp = RREG32(0x541c);
WREG32(0x541c, tmp | 0x8);
@@ -4442,27 +4722,27 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
if ((rdev->family == CHIP_RV670) ||
(rdev->family == CHIP_RV620) ||
(rdev->family == CHIP_RV635)) {
- training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
+ training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
training_cntl &= ~LC_POINT_7_PLUS_EN;
- WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
} else {
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
}
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_GEN2_EN_STRAP;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
} else {
- link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
if (1)
link_width_cntl |= LC_UPCONFIGURE_DIS;
else
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index cb03fe22b0ab..c92eb86a8e55 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -57,10 +57,7 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder)
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
- return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE6(rdev))
- || rdev->family == CHIP_RS600
- || rdev->family == CHIP_RS690
- || rdev->family == CHIP_RS740;
+ return ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE6(rdev);
}
struct r600_audio r600_audio_status(struct radeon_device *rdev)
@@ -184,65 +181,6 @@ int r600_audio_init(struct radeon_device *rdev)
}
/*
- * atach the audio codec to the clock source of the encoder
- */
-void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- int base_rate = 48000;
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
- break;
- default:
- dev_err(rdev->dev, "Unsupported encoder type 0x%02X\n",
- radeon_encoder->encoder_id);
- return;
- }
-
- if (ASIC_IS_DCE4(rdev)) {
- /* TODO: other PLLs? */
- WREG32(EVERGREEN_AUDIO_PLL1_MUL, base_rate * 10);
- WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
- WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
-
- /* Select DTO source */
- WREG32(0x5ac, radeon_crtc->crtc_id);
- } else {
- switch (dig->dig_encoder) {
- case 0:
- WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
- WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
- WREG32(R600_AUDIO_CLK_SRCSEL, 0);
- break;
-
- case 1:
- WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
- WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
- WREG32(R600_AUDIO_CLK_SRCSEL, 1);
- break;
- default:
- dev_err(rdev->dev,
- "Unsupported DIG on encoder 0x%02X\n",
- radeon_encoder->encoder_id);
- return;
- }
- }
-}
-
-/*
* release the audio timer
* TODO: How to do this correctly on SMP systems?
*/
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 21ecc0e12dc4..47f180a79352 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -226,6 +226,39 @@ static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
value, ~HDMI0_AUDIO_TEST_EN);
}
+void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u32 base_rate = 48000;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
+ * doesn't matter which one you use. Just use the first one.
+ */
+ /* XXX: properly calculate this */
+ /* XXX two dtos; generally use dto0 for hdmi */
+ /* Express [24MHz / target pixel clock] as an exact rational
+ * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
+ * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ */
+ if (ASIC_IS_DCE3(rdev)) {
+ /* according to the reg specs, this should DCE3.2 only, but in
+ * practice it seems to cover DCE3.0 as well.
+ */
+ WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 50);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+ } else {
+ /* according to the reg specs, this should be DCE2.0 and DCE3.0 */
+ WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate * 50) |
+ AUDIO_DTO_MODULE(clock * 100));
+ }
+}
/*
* update the info frames with the data from the current display mode
@@ -246,7 +279,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
return;
offset = dig->afmt->offset;
- r600_audio_set_clock(encoder, mode->clock);
+ r600_audio_set_dto(encoder, mode->clock);
WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
HDMI0_NULL_SEND); /* send null packets when required */
@@ -415,114 +448,73 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
/*
* enable the HDMI engine
*/
-void r600_hdmi_enable(struct drm_encoder *encoder)
+void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- uint32_t offset;
- u32 hdmi;
-
- if (ASIC_IS_DCE6(rdev))
- return;
+ u32 hdmi = HDMI0_ERROR_ACK;
/* Silent, r600_hdmi_enable will raise WARN for us */
- if (dig->afmt->enabled)
+ if (enable && dig->afmt->enabled)
+ return;
+ if (!enable && !dig->afmt->enabled)
return;
- offset = dig->afmt->offset;
/* Older chipsets require setting HDMI and routing manually */
- if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
- hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE;
+ if (!ASIC_IS_DCE3(rdev)) {
+ if (enable)
+ hdmi |= HDMI0_ENABLE;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
- ~AVIVO_TMDSA_CNTL_HDMI_EN);
- hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
+ if (enable) {
+ WREG32_OR(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
+ } else {
+ WREG32_AND(AVIVO_TMDSA_CNTL, ~AVIVO_TMDSA_CNTL_HDMI_EN);
+ }
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
- ~AVIVO_LVTMA_CNTL_HDMI_EN);
- hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
+ if (enable) {
+ WREG32_OR(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
+ } else {
+ WREG32_AND(AVIVO_LVTMA_CNTL, ~AVIVO_LVTMA_CNTL_HDMI_EN);
+ }
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
- WREG32_P(DDIA_CNTL, DDIA_HDMI_EN, ~DDIA_HDMI_EN);
- hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
+ if (enable) {
+ WREG32_OR(DDIA_CNTL, DDIA_HDMI_EN);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
+ } else {
+ WREG32_AND(DDIA_CNTL, ~DDIA_HDMI_EN);
+ }
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
+ if (enable)
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
break;
default:
dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
radeon_encoder->encoder_id);
break;
}
- WREG32(HDMI0_CONTROL + offset, hdmi);
+ WREG32(HDMI0_CONTROL + dig->afmt->offset, hdmi);
}
if (rdev->irq.installed) {
/* if irq is available use it */
- radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
+ /* XXX: shouldn't need this on any asics. Double check DCE2/3 */
+ if (enable)
+ radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
+ else
+ radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
}
- dig->afmt->enabled = true;
+ dig->afmt->enabled = enable;
- DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
- offset, radeon_encoder->encoder_id);
+ DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+ enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
}
-/*
- * disable the HDMI engine
- */
-void r600_hdmi_disable(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- uint32_t offset;
-
- if (ASIC_IS_DCE6(rdev))
- return;
-
- /* Called for ATOM_ENCODER_MODE_HDMI only */
- if (!dig || !dig->afmt) {
- return;
- }
- if (!dig->afmt->enabled)
- return;
- offset = dig->afmt->offset;
-
- DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
- offset, radeon_encoder->encoder_id);
-
- /* disable irq */
- radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
-
- /* Older chipsets not handled by AtomBIOS */
- if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- WREG32_P(AVIVO_TMDSA_CNTL, 0,
- ~AVIVO_TMDSA_CNTL_HDMI_EN);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- WREG32_P(AVIVO_LVTMA_CNTL, 0,
- ~AVIVO_LVTMA_CNTL_HDMI_EN);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DDI:
- WREG32_P(DDIA_CNTL, 0, ~DDIA_HDMI_EN);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- break;
- default:
- dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
- radeon_encoder->encoder_id);
- break;
- }
- WREG32(HDMI0_CONTROL + offset, HDMI0_ERROR_ACK);
- }
-
- dig->afmt->enabled = false;
-}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index a42ba11a3bed..acb146c06973 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -691,6 +691,7 @@
#define SRBM_SOFT_RESET 0xe60
# define SOFT_RESET_DMA (1 << 12)
# define SOFT_RESET_RLC (1 << 13)
+# define SOFT_RESET_UVD (1 << 18)
# define RV770_SOFT_RESET_DMA (1 << 20)
#define CP_INT_CNTL 0xc124
@@ -909,7 +910,12 @@
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
-/* Audio clocks */
+/* Audio clocks DCE 2.0/3.0 */
+#define AUDIO_DTO 0x7340
+# define AUDIO_DTO_PHASE(x) (((x) & 0xffff) << 0)
+# define AUDIO_DTO_MODULE(x) (((x) & 0xffff) << 16)
+
+/* Audio clocks DCE 3.2 */
#define DCCG_AUDIO_DTO0_PHASE 0x0514
#define DCCG_AUDIO_DTO0_MODULE 0x0518
#define DCCG_AUDIO_DTO0_LOAD 0x051c
@@ -1143,6 +1149,70 @@
# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
/*
+ * UVD
+ */
+#define UVD_SEMA_ADDR_LOW 0xef00
+#define UVD_SEMA_ADDR_HIGH 0xef04
+#define UVD_SEMA_CMD 0xef08
+
+#define UVD_GPCOM_VCPU_CMD 0xef0c
+#define UVD_GPCOM_VCPU_DATA0 0xef10
+#define UVD_GPCOM_VCPU_DATA1 0xef14
+#define UVD_ENGINE_CNTL 0xef18
+
+#define UVD_SEMA_CNTL 0xf400
+#define UVD_RB_ARB_CTRL 0xf480
+
+#define UVD_LMI_EXT40_ADDR 0xf498
+#define UVD_CGC_GATE 0xf4a8
+#define UVD_LMI_CTRL2 0xf4f4
+#define UVD_MASTINT_EN 0xf500
+#define UVD_LMI_ADDR_EXT 0xf594
+#define UVD_LMI_CTRL 0xf598
+#define UVD_LMI_SWAP_CNTL 0xf5b4
+#define UVD_MP_SWAP_CNTL 0xf5bC
+#define UVD_MPC_CNTL 0xf5dC
+#define UVD_MPC_SET_MUXA0 0xf5e4
+#define UVD_MPC_SET_MUXA1 0xf5e8
+#define UVD_MPC_SET_MUXB0 0xf5eC
+#define UVD_MPC_SET_MUXB1 0xf5f0
+#define UVD_MPC_SET_MUX 0xf5f4
+#define UVD_MPC_SET_ALU 0xf5f8
+
+#define UVD_VCPU_CNTL 0xf660
+#define UVD_SOFT_RESET 0xf680
+#define RBC_SOFT_RESET (1<<0)
+#define LBSI_SOFT_RESET (1<<1)
+#define LMI_SOFT_RESET (1<<2)
+#define VCPU_SOFT_RESET (1<<3)
+#define CSM_SOFT_RESET (1<<5)
+#define CXW_SOFT_RESET (1<<6)
+#define TAP_SOFT_RESET (1<<7)
+#define LMI_UMC_SOFT_RESET (1<<13)
+#define UVD_RBC_IB_BASE 0xf684
+#define UVD_RBC_IB_SIZE 0xf688
+#define UVD_RBC_RB_BASE 0xf68c
+#define UVD_RBC_RB_RPTR 0xf690
+#define UVD_RBC_RB_WPTR 0xf694
+#define UVD_RBC_RB_WPTR_CNTL 0xf698
+
+#define UVD_STATUS 0xf6bc
+
+#define UVD_SEMA_TIMEOUT_STATUS 0xf6c0
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL 0xf6c4
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL 0xf6c8
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL 0xf6cc
+
+#define UVD_RBC_RB_CNTL 0xf6a4
+#define UVD_RBC_RB_RPTR_ADDR 0xf6a8
+
+#define UVD_CONTEXT_ID 0xf6f4
+
+# define UPLL_CTLREQ_MASK 0x00000008
+# define UPLL_CTLACK_MASK 0x40000000
+# define UPLL_CTLACK2_MASK 0x80000000
+
+/*
* PM4
*/
#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8263af3fd832..1442ce765d48 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -95,6 +95,7 @@ extern int radeon_hw_i2c;
extern int radeon_pcie_gen2;
extern int radeon_msi;
extern int radeon_lockup_timeout;
+extern int radeon_fastfb;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -109,24 +110,27 @@ extern int radeon_lockup_timeout;
#define RADEON_BIOS_NUM_SCRATCH 8
/* max number of rings */
-#define RADEON_NUM_RINGS 5
+#define RADEON_NUM_RINGS 6
/* fence seq are set to this number when signaled */
#define RADEON_FENCE_SIGNALED_SEQ 0LL
/* internal ring indices */
/* r1xx+ has gfx CP ring */
-#define RADEON_RING_TYPE_GFX_INDEX 0
+#define RADEON_RING_TYPE_GFX_INDEX 0
/* cayman has 2 compute CP rings */
-#define CAYMAN_RING_TYPE_CP1_INDEX 1
-#define CAYMAN_RING_TYPE_CP2_INDEX 2
+#define CAYMAN_RING_TYPE_CP1_INDEX 1
+#define CAYMAN_RING_TYPE_CP2_INDEX 2
/* R600+ has an async dma ring */
#define R600_RING_TYPE_DMA_INDEX 3
/* cayman add a second async dma ring */
#define CAYMAN_RING_TYPE_DMA1_INDEX 4
+/* R600+ */
+#define R600_RING_TYPE_UVD_INDEX 5
+
/* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
@@ -202,6 +206,11 @@ void radeon_pm_suspend(struct radeon_device *rdev);
void radeon_pm_resume(struct radeon_device *rdev);
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
+int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
+ u8 clock_type,
+ u32 clock,
+ bool strobe_mode,
+ struct atom_clock_dividers *dividers);
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
void rs690_pm_info(struct radeon_device *rdev);
extern int rv6xx_get_temp(struct radeon_device *rdev);
@@ -349,7 +358,8 @@ struct radeon_bo {
struct radeon_device *rdev;
struct drm_gem_object gem_base;
- struct ttm_bo_kmap_obj dma_buf_vmap;
+ struct ttm_bo_kmap_obj dma_buf_vmap;
+ pid_t pid;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
@@ -357,11 +367,14 @@ struct radeon_bo_list {
struct ttm_validate_buffer tv;
struct radeon_bo *bo;
uint64_t gpu_offset;
- unsigned rdomain;
- unsigned wdomain;
+ bool written;
+ unsigned domain;
+ unsigned alt_domain;
u32 tiling_flags;
};
+int radeon_gem_debugfs_init(struct radeon_device *rdev);
+
/* sub-allocation manager, it has to be protected by another lock.
* By conception this is an helper for other part of the driver
* like the indirect buffer or semaphore, which both have their
@@ -517,6 +530,7 @@ struct radeon_mc {
bool vram_is_ddr;
bool igp_sideport_enabled;
u64 gtt_base_align;
+ u64 mc_mask;
};
bool radeon_combios_sideport_present(struct radeon_device *rdev);
@@ -918,6 +932,7 @@ struct radeon_wb {
#define R600_WB_DMA_RPTR_OFFSET 1792
#define R600_WB_IH_WPTR_OFFSET 2048
#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
+#define R600_WB_UVD_RPTR_OFFSET 2560
#define R600_WB_EVENT_OFFSET 3072
/**
@@ -1118,6 +1133,46 @@ struct radeon_pm {
int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
int instance);
+/*
+ * UVD
+ */
+#define RADEON_MAX_UVD_HANDLES 10
+#define RADEON_UVD_STACK_SIZE (1024*1024)
+#define RADEON_UVD_HEAP_SIZE (1024*1024)
+
+struct radeon_uvd {
+ struct radeon_bo *vcpu_bo;
+ void *cpu_addr;
+ uint64_t gpu_addr;
+ atomic_t handles[RADEON_MAX_UVD_HANDLES];
+ struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
+ struct delayed_work idle_work;
+};
+
+int radeon_uvd_init(struct radeon_device *rdev);
+void radeon_uvd_fini(struct radeon_device *rdev);
+int radeon_uvd_suspend(struct radeon_device *rdev);
+int radeon_uvd_resume(struct radeon_device *rdev);
+int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
+ uint32_t handle, struct radeon_fence **fence);
+int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
+ uint32_t handle, struct radeon_fence **fence);
+void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo);
+void radeon_uvd_free_handles(struct radeon_device *rdev,
+ struct drm_file *filp);
+int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
+void radeon_uvd_note_usage(struct radeon_device *rdev);
+int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
+ unsigned vclk, unsigned dclk,
+ unsigned vco_min, unsigned vco_max,
+ unsigned fb_factor, unsigned fb_mask,
+ unsigned pd_min, unsigned pd_max,
+ unsigned pd_even,
+ unsigned *optimal_fb_div,
+ unsigned *optimal_vclk_div,
+ unsigned *optimal_dclk_div);
+int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
+ unsigned cg_upll_func_cntl);
struct r600_audio {
int channels;
@@ -1229,6 +1284,9 @@ struct radeon_asic {
void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
/* get backlight level */
u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
+ /* audio callbacks */
+ void (*hdmi_enable)(struct drm_encoder *encoder, bool enable);
+ void (*hdmi_setmode)(struct drm_encoder *encoder, struct drm_display_mode *mode);
} display;
/* copy functions for bo handling */
struct {
@@ -1281,6 +1339,7 @@ struct radeon_asic {
int (*get_pcie_lanes)(struct radeon_device *rdev);
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
void (*set_clock_gating)(struct radeon_device *rdev, int enable);
+ int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk);
} pm;
/* pageflipping */
struct {
@@ -1443,6 +1502,7 @@ struct si_asic {
unsigned multi_gpu_tile_size;
unsigned tile_config;
+ uint32_t tile_mode_array[32];
};
union radeon_asic_config {
@@ -1608,6 +1668,7 @@ struct radeon_device {
struct radeon_asic *asic;
struct radeon_gem gem;
struct radeon_pm pm;
+ struct radeon_uvd uvd;
uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
struct radeon_wb wb;
struct radeon_dummy_page dummy_page;
@@ -1615,12 +1676,14 @@ struct radeon_device {
bool suspend;
bool need_dma32;
bool accel_working;
+ bool fastfb_working; /* IGP feature*/
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
const struct firmware *mc_fw; /* NI MC firmware */
const struct firmware *ce_fw; /* SI CE firmware */
+ const struct firmware *uvd_fw; /* UVD firmware */
struct r600_blit r600_blit;
struct r600_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
@@ -1688,8 +1751,8 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
-#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
-#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
+#define RREG32_PCIE_PORT(reg) rdev->pciep_rreg(rdev, (reg))
+#define WREG32_PCIE_PORT(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
#define WREG32_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32(reg); \
@@ -1697,6 +1760,8 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
tmp_ |= ((val) & ~(mask)); \
WREG32(reg, tmp_); \
} while (0)
+#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
+#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or)
#define WREG32_PLL_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32_PLL(reg); \
@@ -1830,6 +1895,8 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
+#define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b))
+#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
@@ -1845,6 +1912,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
+#define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d))
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
@@ -1892,6 +1960,9 @@ extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc
extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
+extern void radeon_program_register_sequence(struct radeon_device *rdev,
+ const u32 *registers,
+ const u32 array_size);
/*
* vm
@@ -1964,9 +2035,6 @@ struct radeon_hdmi_acr {
extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
-extern void r600_hdmi_enable(struct drm_encoder *encoder);
-extern void r600_hdmi_disable(struct drm_encoder *encoder);
-extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
@@ -1977,8 +2045,6 @@ extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
* evergreen functions used by radeon_encoder.c
*/
-extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
-
extern int ni_init_microcode(struct radeon_device *rdev);
extern int ni_mc_load_microcode(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index aba0a893ea98..6417132c50cf 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -656,6 +656,8 @@ static struct radeon_asic rs600_asic = {
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &r600_hdmi_enable,
+ .hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
.blit = &r100_copy_blit,
@@ -732,6 +734,8 @@ static struct radeon_asic rs690_asic = {
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &r600_hdmi_enable,
+ .hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
.blit = &r100_copy_blit,
@@ -970,6 +974,8 @@ static struct radeon_asic r600_asic = {
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &r600_hdmi_enable,
+ .hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1056,6 +1062,8 @@ static struct radeon_asic rs780_asic = {
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &r600_hdmi_enable,
+ .hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1130,6 +1138,15 @@ static struct radeon_asic rv770_asic = {
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &r600_dma_is_lockup,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &r600_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
@@ -1142,6 +1159,8 @@ static struct radeon_asic rv770_asic = {
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &r600_hdmi_enable,
+ .hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1174,6 +1193,7 @@ static struct radeon_asic rv770_asic = {
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_uvd_clocks = &rv770_set_uvd_clocks,
},
.pflip = {
.pre_page_flip = &rs600_pre_page_flip,
@@ -1216,6 +1236,15 @@ static struct radeon_asic evergreen_asic = {
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &evergreen_dma_is_lockup,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &r600_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
@@ -1228,6 +1257,8 @@ static struct radeon_asic evergreen_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1260,6 +1291,7 @@ static struct radeon_asic evergreen_asic = {
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
+ .set_uvd_clocks = &evergreen_set_uvd_clocks,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -1302,6 +1334,15 @@ static struct radeon_asic sumo_asic = {
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &evergreen_dma_is_lockup,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &r600_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
@@ -1314,6 +1355,8 @@ static struct radeon_asic sumo_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1346,6 +1389,7 @@ static struct radeon_asic sumo_asic = {
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
+ .set_uvd_clocks = &sumo_set_uvd_clocks,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -1388,6 +1432,15 @@ static struct radeon_asic btc_asic = {
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &evergreen_dma_is_lockup,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &r600_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
@@ -1400,6 +1453,8 @@ static struct radeon_asic btc_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1429,9 +1484,10 @@ static struct radeon_asic btc_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
+ .set_uvd_clocks = &evergreen_set_uvd_clocks,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -1517,6 +1573,15 @@ static struct radeon_asic cayman_asic = {
.ib_test = &r600_dma_ib_test,
.is_lockup = &cayman_dma_is_lockup,
.vm_flush = &cayman_dma_vm_flush,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &cayman_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
@@ -1529,6 +1594,8 @@ static struct radeon_asic cayman_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_blit,
@@ -1558,9 +1625,10 @@ static struct radeon_asic cayman_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
+ .set_uvd_clocks = &evergreen_set_uvd_clocks,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -1646,6 +1714,15 @@ static struct radeon_asic trinity_asic = {
.ib_test = &r600_dma_ib_test,
.is_lockup = &cayman_dma_is_lockup,
.vm_flush = &cayman_dma_vm_flush,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &cayman_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
@@ -1690,6 +1767,7 @@ static struct radeon_asic trinity_asic = {
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
+ .set_uvd_clocks = &sumo_set_uvd_clocks,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -1775,6 +1853,15 @@ static struct radeon_asic si_asic = {
.ib_test = &r600_dma_ib_test,
.is_lockup = &si_dma_is_lockup,
.vm_flush = &si_dma_vm_flush,
+ },
+ [R600_RING_TYPE_UVD_INDEX] = {
+ .ib_execute = &r600_uvd_ib_execute,
+ .emit_fence = &r600_uvd_fence_emit,
+ .emit_semaphore = &cayman_uvd_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &r600_uvd_ring_test,
+ .ib_test = &r600_uvd_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
@@ -1816,9 +1903,10 @@ static struct radeon_asic si_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
+ .set_uvd_clocks = &si_set_uvd_clocks,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3535f73ad3e2..2c87365d345f 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -330,6 +330,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence **fence);
@@ -373,11 +374,12 @@ void r600_disable_interrupts(struct radeon_device *rdev);
void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
int r600_audio_init(struct radeon_device *rdev);
-void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
struct r600_audio r600_audio_status(struct radeon_device *rdev);
void r600_audio_fini(struct radeon_device *rdev);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
+void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
/* r600 blit */
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
struct radeon_fence **fence, struct radeon_sa_bo **vb,
@@ -392,6 +394,19 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
u32 r600_get_xclk(struct radeon_device *rdev);
uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
+/* uvd */
+int r600_uvd_init(struct radeon_device *rdev);
+int r600_uvd_rbc_start(struct radeon_device *rdev);
+void r600_uvd_rbc_stop(struct radeon_device *rdev);
+int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+void r600_uvd_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+void r600_uvd_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+
/*
* rv770,rv730,rv710,rv740
*/
@@ -409,6 +424,8 @@ int rv770_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence **fence);
u32 rv770_get_xclk(struct radeon_device *rdev);
+int rv770_uvd_resume(struct radeon_device *rdev);
+int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
/*
* evergreen
@@ -444,6 +461,8 @@ extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);
extern void sumo_pm_init_profile(struct radeon_device *rdev);
extern void btc_pm_init_profile(struct radeon_device *rdev);
+int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
@@ -459,12 +478,18 @@ int evergreen_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
+void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
+void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
/*
* cayman
*/
void cayman_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
+void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
@@ -524,5 +549,6 @@ int si_copy_dma(struct radeon_device *rdev,
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
u32 si_get_xclk(struct radeon_device *rdev);
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
+int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f22eb5713528..dea6f63c9724 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2028,6 +2028,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
num_modes = power_info->info.ucNumOfPowerModeEntries;
if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+ if (num_modes == 0)
+ return state_index;
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
if (!rdev->pm.power_state)
return state_index;
@@ -2307,7 +2309,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
- if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
+ if ((rdev->family >= CHIP_BARTS) && !(rdev->flags & RADEON_IS_IGP)) {
/* NI chips post without MC ucode, so default clocks are strobe mode only */
rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
@@ -2345,7 +2347,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
}
- } else if (ASIC_IS_DCE6(rdev)) {
+ } else if (rdev->family >= CHIP_TAHITI) {
sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
sclk |= clock_info->si.ucEngineClockHigh << 16;
mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
@@ -2358,7 +2360,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
le16_to_cpu(clock_info->si.usVDDC);
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
le16_to_cpu(clock_info->si.usVDDCI);
- } else if (ASIC_IS_DCE4(rdev)) {
+ } else if (rdev->family >= CHIP_CEDAR) {
sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
@@ -2432,6 +2434,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+ if (power_info->pplib.ucNumStates == 0)
+ return state_index;
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
power_info->pplib.ucNumStates, GFP_KERNEL);
if (!rdev->pm.power_state)
@@ -2514,6 +2518,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
u16 data_offset;
u8 frev, crev;
+ u8 *power_state_offset;
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset))
@@ -2530,15 +2535,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
non_clock_info_array = (struct _NonClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+ if (state_array->ucNumEntries == 0)
+ return state_index;
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
state_array->ucNumEntries, GFP_KERNEL);
if (!rdev->pm.power_state)
return state_index;
+ power_state_offset = (u8 *)state_array->states;
for (i = 0; i < state_array->ucNumEntries; i++) {
mode_index = 0;
- power_state = (union pplib_power_state *)&state_array->states[i];
- /* XXX this might be an inagua bug... */
- non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
+ power_state = (union pplib_power_state *)power_state_offset;
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
@@ -2550,9 +2557,6 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
if (power_state->v2.ucNumDPMLevels) {
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
clock_array_index = power_state->v2.clockInfoIndex[j];
- /* XXX this might be an inagua bug... */
- if (clock_array_index >= clock_info_array->ucNumEntries)
- continue;
clock_info = (union pplib_clock_info *)
&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
valid = radeon_atombios_parse_pplib_clock_info(rdev,
@@ -2574,6 +2578,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
non_clock_info);
state_index++;
}
+ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
}
/* if multiple clock modes, mark the lowest as no display */
for (i = 0; i < state_index; i++) {
@@ -2620,7 +2625,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
default:
break;
}
- } else {
+ }
+
+ if (state_index == 0) {
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
if (rdev->pm.power_state) {
rdev->pm.power_state[0].clock_info =
@@ -2654,6 +2661,111 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.current_vddc = 0;
}
+union get_clock_dividers {
+ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS v1;
+ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 v2;
+ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3;
+ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4;
+ struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5;
+};
+
+int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
+ u8 clock_type,
+ u32 clock,
+ bool strobe_mode,
+ struct atom_clock_dividers *dividers)
+{
+ union get_clock_dividers args;
+ int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL);
+ u8 frev, crev;
+
+ memset(&args, 0, sizeof(args));
+ memset(dividers, 0, sizeof(struct atom_clock_dividers));
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return -EINVAL;
+
+ switch (crev) {
+ case 1:
+ /* r4xx, r5xx */
+ args.v1.ucAction = clock_type;
+ args.v1.ulClock = cpu_to_le32(clock); /* 10 khz */
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ dividers->post_div = args.v1.ucPostDiv;
+ dividers->fb_div = args.v1.ucFbDiv;
+ dividers->enable_post_div = true;
+ break;
+ case 2:
+ case 3:
+ /* r6xx, r7xx, evergreen, ni */
+ if (rdev->family <= CHIP_RV770) {
+ args.v2.ucAction = clock_type;
+ args.v2.ulClock = cpu_to_le32(clock); /* 10 khz */
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ dividers->post_div = args.v2.ucPostDiv;
+ dividers->fb_div = le16_to_cpu(args.v2.usFbDiv);
+ dividers->ref_div = args.v2.ucAction;
+ if (rdev->family == CHIP_RV770) {
+ dividers->enable_post_div = (le32_to_cpu(args.v2.ulClock) & (1 << 24)) ?
+ true : false;
+ dividers->vco_mode = (le32_to_cpu(args.v2.ulClock) & (1 << 25)) ? 1 : 0;
+ } else
+ dividers->enable_post_div = (dividers->fb_div & 1) ? true : false;
+ } else {
+ if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
+ args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ dividers->post_div = args.v3.ucPostDiv;
+ dividers->enable_post_div = (args.v3.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+ dividers->enable_dithen = (args.v3.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+ dividers->fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
+ dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
+ dividers->ref_div = args.v3.ucRefDiv;
+ dividers->vco_mode = (args.v3.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+ } else {
+ args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+ if (strobe_mode)
+ args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ dividers->post_div = args.v5.ucPostDiv;
+ dividers->enable_post_div = (args.v5.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+ dividers->enable_dithen = (args.v5.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+ dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv);
+ dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac);
+ dividers->ref_div = args.v5.ucRefDiv;
+ dividers->vco_mode = (args.v5.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+ }
+ }
+ break;
+ case 4:
+ /* fusion */
+ args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ dividers->post_div = args.v4.ucPostDiv;
+ dividers->real_clock = le32_to_cpu(args.v4.ulClock);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
{
DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 70d38241b083..7e265a58141f 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -63,30 +63,50 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
break;
}
}
- if (!duplicate) {
- p->relocs[i].gobj = drm_gem_object_lookup(ddev,
- p->filp,
- r->handle);
- if (p->relocs[i].gobj == NULL) {
- DRM_ERROR("gem object lookup failed 0x%x\n",
- r->handle);
- return -ENOENT;
- }
- p->relocs_ptr[i] = &p->relocs[i];
- p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
- p->relocs[i].lobj.bo = p->relocs[i].robj;
- p->relocs[i].lobj.wdomain = r->write_domain;
- p->relocs[i].lobj.rdomain = r->read_domains;
- p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
- p->relocs[i].handle = r->handle;
- p->relocs[i].flags = r->flags;
- radeon_bo_list_add_object(&p->relocs[i].lobj,
- &p->validated);
-
- } else
+ if (duplicate) {
p->relocs[i].handle = 0;
+ continue;
+ }
+
+ p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
+ r->handle);
+ if (p->relocs[i].gobj == NULL) {
+ DRM_ERROR("gem object lookup failed 0x%x\n",
+ r->handle);
+ return -ENOENT;
+ }
+ p->relocs_ptr[i] = &p->relocs[i];
+ p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
+ p->relocs[i].lobj.bo = p->relocs[i].robj;
+ p->relocs[i].lobj.written = !!r->write_domain;
+
+ /* the first reloc of an UVD job is the
+ msg and that must be in VRAM */
+ if (p->ring == R600_RING_TYPE_UVD_INDEX && i == 0) {
+ /* TODO: is this still needed for NI+ ? */
+ p->relocs[i].lobj.domain =
+ RADEON_GEM_DOMAIN_VRAM;
+
+ p->relocs[i].lobj.alt_domain =
+ RADEON_GEM_DOMAIN_VRAM;
+
+ } else {
+ uint32_t domain = r->write_domain ?
+ r->write_domain : r->read_domains;
+
+ p->relocs[i].lobj.domain = domain;
+ if (domain == RADEON_GEM_DOMAIN_VRAM)
+ domain |= RADEON_GEM_DOMAIN_GTT;
+ p->relocs[i].lobj.alt_domain = domain;
+ }
+
+ p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
+ p->relocs[i].handle = r->handle;
+
+ radeon_bo_list_add_object(&p->relocs[i].lobj,
+ &p->validated);
}
- return radeon_bo_list_validate(&p->validated);
+ return radeon_bo_list_validate(&p->validated, p->ring);
}
static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
@@ -121,6 +141,9 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
return -EINVAL;
}
break;
+ case RADEON_CS_RING_UVD:
+ p->ring = R600_RING_TYPE_UVD_INDEX;
+ break;
}
return 0;
}
@@ -241,15 +264,15 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EINVAL;
}
- /* we only support VM on SI+ */
- if ((p->rdev->family >= CHIP_TAHITI) &&
- ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
- DRM_ERROR("VM required on SI+!\n");
+ if (radeon_cs_get_ring(p, ring, priority))
return -EINVAL;
- }
- if (radeon_cs_get_ring(p, ring, priority))
+ /* we only support VM on some SI+ rings */
+ if ((p->rdev->asic->ring[p->ring].cs_parse == NULL) &&
+ ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
+ DRM_ERROR("Ring %d requires VM!\n", p->ring);
return -EINVAL;
+ }
}
/* deal with non-vm */
@@ -526,6 +549,10 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = radeon_cs_handle_lockup(rdev, r);
return r;
}
+
+ if (parser.ring == R600_RING_TYPE_UVD_INDEX)
+ radeon_uvd_note_usage(rdev);
+
r = radeon_cs_ib_chunk(rdev, &parser);
if (r) {
goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 44b8034a400d..a8f608903989 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -98,6 +98,42 @@ static const char radeon_family_name[][16] = {
};
/**
+ * radeon_program_register_sequence - program an array of registers.
+ *
+ * @rdev: radeon_device pointer
+ * @registers: pointer to the register array
+ * @array_size: size of the register array
+ *
+ * Programs an array or registers with and and or masks.
+ * This is a helper for setting golden registers.
+ */
+void radeon_program_register_sequence(struct radeon_device *rdev,
+ const u32 *registers,
+ const u32 array_size)
+{
+ u32 tmp, reg, and_mask, or_mask;
+ int i;
+
+ if (array_size % 3)
+ return;
+
+ for (i = 0; i < array_size; i +=3) {
+ reg = registers[i + 0];
+ and_mask = registers[i + 1];
+ or_mask = registers[i + 2];
+
+ if (and_mask == 0xffffffff) {
+ tmp = or_mask;
+ } else {
+ tmp = RREG32(reg);
+ tmp &= ~and_mask;
+ tmp |= or_mask;
+ }
+ WREG32(reg, tmp);
+ }
+}
+
+/**
* radeon_surface_init - Clear GPU surface registers.
*
* @rdev: radeon_device pointer
@@ -359,7 +395,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
uint64_t limit = (uint64_t)radeon_vram_limit << 20;
mc->vram_start = base;
- if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
+ if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
mc->real_vram_size = mc->aper_size;
mc->mc_vram_size = mc->aper_size;
@@ -394,7 +430,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
u64 size_af, size_bf;
- size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
+ size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
size_bf = mc->vram_start & ~mc->gtt_base_align;
if (size_bf > size_af) {
if (mc->gtt_size > size_bf) {
@@ -1068,6 +1104,17 @@ int radeon_device_init(struct radeon_device *rdev,
radeon_agp_disable(rdev);
}
+ /* Set the internal MC address mask
+ * This is the max address of the GPU's
+ * internal address space.
+ */
+ if (rdev->family >= CHIP_CAYMAN)
+ rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
+ else if (rdev->family >= CHIP_CEDAR)
+ rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
+ else
+ rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
+
/* set DMA mask + need_dma32 flags.
* PCIE - can handle 40-bits.
* IGP - can handle 40-bits
@@ -1131,6 +1178,11 @@ int radeon_device_init(struct radeon_device *rdev,
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
+ r = radeon_gem_debugfs_init(rdev);
+ if (r) {
+ DRM_ERROR("registering gem debugfs failed (%d).\n", r);
+ }
+
if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
/* Acceleration not working on AGP card try again
* with fallback to PCI or PCIE GART
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 66a7f0fd9620..d33f484ace48 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -71,9 +71,12 @@
* 2.28.0 - r600-eg: Add MEM_WRITE packet support
* 2.29.0 - R500 FP16 color clear registers
* 2.30.0 - fix for FMASK texturing
+ * 2.31.0 - Add fastfb support for rs690
+ * 2.32.0 - new info request for rings working
+ * 2.33.0 - Add SI tiling mode array query
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 30
+#define KMS_DRIVER_MINOR 33
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -160,6 +163,7 @@ int radeon_hw_i2c = 0;
int radeon_pcie_gen2 = -1;
int radeon_msi = -1;
int radeon_lockup_timeout = 10000;
+int radeon_fastfb = 0;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -212,6 +216,9 @@ module_param_named(msi, radeon_msi, int, 0444);
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
+MODULE_PARM_DESC(fastfb, "Direct FB access for IGP chips (0 = disable, 1 = enable)");
+module_param_named(fastfb, radeon_fastfb, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 34356252567a..5b937dfe6f65 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -31,9 +31,9 @@
#include <linux/seq_file.h>
#include <linux/atomic.h>
#include <linux/wait.h>
-#include <linux/list.h>
#include <linux/kref.h>
#include <linux/slab.h>
+#include <linux/firmware.h>
#include <drm/drmP.h>
#include "radeon_reg.h"
#include "radeon.h"
@@ -768,7 +768,19 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
rdev->fence_drv[ring].scratch_reg = 0;
- index = R600_WB_EVENT_OFFSET + ring * 4;
+ if (ring != R600_RING_TYPE_UVD_INDEX) {
+ index = R600_WB_EVENT_OFFSET + ring * 4;
+ rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+ rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
+ index;
+
+ } else {
+ /* put fence directly behind firmware */
+ index = ALIGN(rdev->uvd_fw->size, 8);
+ rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
+ rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
+ }
+
} else {
r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
if (r) {
@@ -778,9 +790,9 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
index = RADEON_WB_SCRATCH_OFFSET +
rdev->fence_drv[ring].scratch_reg -
rdev->scratch.reg_base;
+ rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+ rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
}
- rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
- rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
rdev->fence_drv[ring].initialized = true;
dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index fe5c1f6b7957..aa796031ab65 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -84,6 +84,7 @@ retry:
return r;
}
*obj = &robj->gem_base;
+ robj->pid = task_pid_nr(current);
mutex_lock(&rdev->gem.mutex);
list_add_tail(&robj->list, &rdev->gem.objects);
@@ -575,3 +576,52 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
{
return drm_gem_handle_delete(file_priv, handle);
}
+
+#if defined(CONFIG_DEBUG_FS)
+static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_bo *rbo;
+ unsigned i = 0;
+
+ mutex_lock(&rdev->gem.mutex);
+ list_for_each_entry(rbo, &rdev->gem.objects, list) {
+ unsigned domain;
+ const char *placement;
+
+ domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
+ switch (domain) {
+ case RADEON_GEM_DOMAIN_VRAM:
+ placement = "VRAM";
+ break;
+ case RADEON_GEM_DOMAIN_GTT:
+ placement = " GTT";
+ break;
+ case RADEON_GEM_DOMAIN_CPU:
+ default:
+ placement = " CPU";
+ break;
+ }
+ seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
+ i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
+ placement, (unsigned long)rbo->pid);
+ i++;
+ }
+ mutex_unlock(&rdev->gem.mutex);
+ return 0;
+}
+
+static struct drm_info_list radeon_debugfs_gem_list[] = {
+ {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
+};
+#endif
+
+int radeon_gem_debugfs_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
+#endif
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index c75cb2c6ba71..4f2d4f4c1dab 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -50,9 +50,13 @@ int radeon_driver_unload_kms(struct drm_device *dev)
if (rdev == NULL)
return 0;
+ if (rdev->rmmio == NULL)
+ goto done_free;
radeon_acpi_fini(rdev);
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
+
+done_free:
kfree(rdev);
dev->dev_private = NULL;
return 0;
@@ -176,80 +180,65 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_info *info = data;
struct radeon_mode_info *minfo = &rdev->mode_info;
- uint32_t value, *value_ptr;
- uint64_t value64, *value_ptr64;
+ uint32_t *value, value_tmp, *value_ptr, value_size;
+ uint64_t value64;
struct drm_crtc *crtc;
int i, found;
- /* TIMESTAMP is a 64-bit value, needs special handling. */
- if (info->request == RADEON_INFO_TIMESTAMP) {
- if (rdev->family >= CHIP_R600) {
- value_ptr64 = (uint64_t*)((unsigned long)info->value);
- value64 = radeon_get_gpu_clock_counter(rdev);
-
- if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
- DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
- return -EFAULT;
- }
- return 0;
- } else {
- DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
- return -EINVAL;
- }
- }
-
value_ptr = (uint32_t *)((unsigned long)info->value);
- if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) {
- DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
- return -EFAULT;
- }
+ value = &value_tmp;
+ value_size = sizeof(uint32_t);
switch (info->request) {
case RADEON_INFO_DEVICE_ID:
- value = dev->pci_device;
+ *value = dev->pci_device;
break;
case RADEON_INFO_NUM_GB_PIPES:
- value = rdev->num_gb_pipes;
+ *value = rdev->num_gb_pipes;
break;
case RADEON_INFO_NUM_Z_PIPES:
- value = rdev->num_z_pipes;
+ *value = rdev->num_z_pipes;
break;
case RADEON_INFO_ACCEL_WORKING:
/* xf86-video-ati 6.13.0 relies on this being false for evergreen */
if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
- value = false;
+ *value = false;
else
- value = rdev->accel_working;
+ *value = rdev->accel_working;
break;
case RADEON_INFO_CRTC_FROM_ID:
+ if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+ return -EFAULT;
+ }
for (i = 0, found = 0; i < rdev->num_crtc; i++) {
crtc = (struct drm_crtc *)minfo->crtcs[i];
- if (crtc && crtc->base.id == value) {
+ if (crtc && crtc->base.id == *value) {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- value = radeon_crtc->crtc_id;
+ *value = radeon_crtc->crtc_id;
found = 1;
break;
}
}
if (!found) {
- DRM_DEBUG_KMS("unknown crtc id %d\n", value);
+ DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
return -EINVAL;
}
break;
case RADEON_INFO_ACCEL_WORKING2:
- value = rdev->accel_working;
+ *value = rdev->accel_working;
break;
case RADEON_INFO_TILING_CONFIG:
if (rdev->family >= CHIP_TAHITI)
- value = rdev->config.si.tile_config;
+ *value = rdev->config.si.tile_config;
else if (rdev->family >= CHIP_CAYMAN)
- value = rdev->config.cayman.tile_config;
+ *value = rdev->config.cayman.tile_config;
else if (rdev->family >= CHIP_CEDAR)
- value = rdev->config.evergreen.tile_config;
+ *value = rdev->config.evergreen.tile_config;
else if (rdev->family >= CHIP_RV770)
- value = rdev->config.rv770.tile_config;
+ *value = rdev->config.rv770.tile_config;
else if (rdev->family >= CHIP_R600)
- value = rdev->config.r600.tile_config;
+ *value = rdev->config.r600.tile_config;
else {
DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
return -EINVAL;
@@ -262,73 +251,81 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
*
* When returning, the value is 1 if filp owns hyper-z access,
* 0 otherwise. */
- if (value >= 2) {
- DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value);
+ if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ if (*value >= 2) {
+ DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
return -EINVAL;
}
- radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value);
+ radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
break;
case RADEON_INFO_WANT_CMASK:
/* The same logic as Hyper-Z. */
- if (value >= 2) {
- DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value);
+ if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ if (*value >= 2) {
+ DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
return -EINVAL;
}
- radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
+ radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
break;
case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
/* return clock value in KHz */
if (rdev->asic->get_xclk)
- value = radeon_get_xclk(rdev) * 10;
+ *value = radeon_get_xclk(rdev) * 10;
else
- value = rdev->clock.spll.reference_freq * 10;
+ *value = rdev->clock.spll.reference_freq * 10;
break;
case RADEON_INFO_NUM_BACKENDS:
if (rdev->family >= CHIP_TAHITI)
- value = rdev->config.si.max_backends_per_se *
+ *value = rdev->config.si.max_backends_per_se *
rdev->config.si.max_shader_engines;
else if (rdev->family >= CHIP_CAYMAN)
- value = rdev->config.cayman.max_backends_per_se *
+ *value = rdev->config.cayman.max_backends_per_se *
rdev->config.cayman.max_shader_engines;
else if (rdev->family >= CHIP_CEDAR)
- value = rdev->config.evergreen.max_backends;
+ *value = rdev->config.evergreen.max_backends;
else if (rdev->family >= CHIP_RV770)
- value = rdev->config.rv770.max_backends;
+ *value = rdev->config.rv770.max_backends;
else if (rdev->family >= CHIP_R600)
- value = rdev->config.r600.max_backends;
+ *value = rdev->config.r600.max_backends;
else {
return -EINVAL;
}
break;
case RADEON_INFO_NUM_TILE_PIPES:
if (rdev->family >= CHIP_TAHITI)
- value = rdev->config.si.max_tile_pipes;
+ *value = rdev->config.si.max_tile_pipes;
else if (rdev->family >= CHIP_CAYMAN)
- value = rdev->config.cayman.max_tile_pipes;
+ *value = rdev->config.cayman.max_tile_pipes;
else if (rdev->family >= CHIP_CEDAR)
- value = rdev->config.evergreen.max_tile_pipes;
+ *value = rdev->config.evergreen.max_tile_pipes;
else if (rdev->family >= CHIP_RV770)
- value = rdev->config.rv770.max_tile_pipes;
+ *value = rdev->config.rv770.max_tile_pipes;
else if (rdev->family >= CHIP_R600)
- value = rdev->config.r600.max_tile_pipes;
+ *value = rdev->config.r600.max_tile_pipes;
else {
return -EINVAL;
}
break;
case RADEON_INFO_FUSION_GART_WORKING:
- value = 1;
+ *value = 1;
break;
case RADEON_INFO_BACKEND_MAP:
if (rdev->family >= CHIP_TAHITI)
- value = rdev->config.si.backend_map;
+ *value = rdev->config.si.backend_map;
else if (rdev->family >= CHIP_CAYMAN)
- value = rdev->config.cayman.backend_map;
+ *value = rdev->config.cayman.backend_map;
else if (rdev->family >= CHIP_CEDAR)
- value = rdev->config.evergreen.backend_map;
+ *value = rdev->config.evergreen.backend_map;
else if (rdev->family >= CHIP_RV770)
- value = rdev->config.rv770.backend_map;
+ *value = rdev->config.rv770.backend_map;
else if (rdev->family >= CHIP_R600)
- value = rdev->config.r600.backend_map;
+ *value = rdev->config.r600.backend_map;
else {
return -EINVAL;
}
@@ -337,50 +334,91 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
/* this is where we report if vm is supported or not */
if (rdev->family < CHIP_CAYMAN)
return -EINVAL;
- value = RADEON_VA_RESERVED_SIZE;
+ *value = RADEON_VA_RESERVED_SIZE;
break;
case RADEON_INFO_IB_VM_MAX_SIZE:
/* this is where we report if vm is supported or not */
if (rdev->family < CHIP_CAYMAN)
return -EINVAL;
- value = RADEON_IB_VM_MAX_SIZE;
+ *value = RADEON_IB_VM_MAX_SIZE;
break;
case RADEON_INFO_MAX_PIPES:
if (rdev->family >= CHIP_TAHITI)
- value = rdev->config.si.max_cu_per_sh;
+ *value = rdev->config.si.max_cu_per_sh;
else if (rdev->family >= CHIP_CAYMAN)
- value = rdev->config.cayman.max_pipes_per_simd;
+ *value = rdev->config.cayman.max_pipes_per_simd;
else if (rdev->family >= CHIP_CEDAR)
- value = rdev->config.evergreen.max_pipes;
+ *value = rdev->config.evergreen.max_pipes;
else if (rdev->family >= CHIP_RV770)
- value = rdev->config.rv770.max_pipes;
+ *value = rdev->config.rv770.max_pipes;
else if (rdev->family >= CHIP_R600)
- value = rdev->config.r600.max_pipes;
+ *value = rdev->config.r600.max_pipes;
else {
return -EINVAL;
}
break;
+ case RADEON_INFO_TIMESTAMP:
+ if (rdev->family < CHIP_R600) {
+ DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
+ return -EINVAL;
+ }
+ value = (uint32_t*)&value64;
+ value_size = sizeof(uint64_t);
+ value64 = radeon_get_gpu_clock_counter(rdev);
+ break;
case RADEON_INFO_MAX_SE:
if (rdev->family >= CHIP_TAHITI)
- value = rdev->config.si.max_shader_engines;
+ *value = rdev->config.si.max_shader_engines;
else if (rdev->family >= CHIP_CAYMAN)
- value = rdev->config.cayman.max_shader_engines;
+ *value = rdev->config.cayman.max_shader_engines;
else if (rdev->family >= CHIP_CEDAR)
- value = rdev->config.evergreen.num_ses;
+ *value = rdev->config.evergreen.num_ses;
else
- value = 1;
+ *value = 1;
break;
case RADEON_INFO_MAX_SH_PER_SE:
if (rdev->family >= CHIP_TAHITI)
- value = rdev->config.si.max_sh_per_se;
+ *value = rdev->config.si.max_sh_per_se;
else
return -EINVAL;
break;
+ case RADEON_INFO_FASTFB_WORKING:
+ *value = rdev->fastfb_working;
+ break;
+ case RADEON_INFO_RING_WORKING:
+ if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+ DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ switch (*value) {
+ case RADEON_CS_RING_GFX:
+ case RADEON_CS_RING_COMPUTE:
+ *value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
+ break;
+ case RADEON_CS_RING_DMA:
+ *value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
+ *value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
+ break;
+ case RADEON_CS_RING_UVD:
+ *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case RADEON_INFO_SI_TILE_MODE_ARRAY:
+ if (rdev->family < CHIP_TAHITI) {
+ DRM_DEBUG_KMS("tile mode array is si only!\n");
+ return -EINVAL;
+ }
+ value = rdev->config.si.tile_mode_array;
+ value_size = sizeof(uint32_t)*32;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
}
- if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
+ if (DRM_COPY_TO_USER(value_ptr, (char*)value, value_size)) {
DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
}
@@ -513,6 +551,7 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
rdev->hyperz_filp = NULL;
if (rdev->cmask_filp == file_priv)
rdev->cmask_filp = NULL;
+ radeon_uvd_free_handles(rdev, file_priv);
}
/*
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 4003f5a68c09..44e579e75fd0 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -492,6 +492,29 @@ struct radeon_framebuffer {
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
((em) == ATOM_ENCODER_MODE_DP_MST))
+struct atom_clock_dividers {
+ u32 post_div;
+ union {
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 reserved : 6;
+ u32 whole_fb_div : 12;
+ u32 frac_fb_div : 14;
+#else
+ u32 frac_fb_div : 14;
+ u32 whole_fb_div : 12;
+ u32 reserved : 6;
+#endif
+ };
+ u32 fb_div;
+ };
+ u32 ref_div;
+ bool enable_post_div;
+ bool enable_dithen;
+ u32 vco_mode;
+ u32 real_clock;
+};
+
extern enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev);
extern enum radeon_tv_std
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d3aface2d12d..1424ccde2377 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -321,8 +321,10 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
int radeon_bo_init(struct radeon_device *rdev)
{
/* Add an MTRR for the VRAM */
- rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
+ if (!rdev->fastfb_working) {
+ rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
MTRR_TYPE_WRCOMB, 1);
+ }
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
rdev->mc.mc_vram_size >> 20,
(unsigned long long)rdev->mc.aper_size >> 20);
@@ -339,14 +341,14 @@ void radeon_bo_fini(struct radeon_device *rdev)
void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head)
{
- if (lobj->wdomain) {
+ if (lobj->written) {
list_add(&lobj->tv.head, head);
} else {
list_add_tail(&lobj->tv.head, head);
}
}
-int radeon_bo_list_validate(struct list_head *head)
+int radeon_bo_list_validate(struct list_head *head, int ring)
{
struct radeon_bo_list *lobj;
struct radeon_bo *bo;
@@ -360,15 +362,17 @@ int radeon_bo_list_validate(struct list_head *head)
list_for_each_entry(lobj, head, tv.head) {
bo = lobj->bo;
if (!bo->pin_count) {
- domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
+ domain = lobj->domain;
retry:
radeon_ttm_placement_from_domain(bo, domain);
+ if (ring == R600_RING_TYPE_UVD_INDEX)
+ radeon_uvd_force_into_uvd_segment(bo);
r = ttm_bo_validate(&bo->tbo, &bo->placement,
true, false);
if (unlikely(r)) {
- if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
- domain |= RADEON_GEM_DOMAIN_GTT;
+ if (r != -ERESTARTSYS && domain != lobj->alt_domain) {
+ domain = lobj->alt_domain;
goto retry;
}
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 5fc86b03043b..e2cb80a96b51 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -128,7 +128,7 @@ extern int radeon_bo_init(struct radeon_device *rdev);
extern void radeon_bo_fini(struct radeon_device *rdev);
extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head);
-extern int radeon_bo_list_validate(struct list_head *head);
+extern int radeon_bo_list_validate(struct list_head *head, int ring);
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma);
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 338fd6a74e87..788c64cb4b47 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -843,7 +843,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct radeon_device *rdev = dev->dev_private;
seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
- seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+ /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
+ if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
+ seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
+ else
+ seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
if (rdev->asic->pm.get_memory_clock)
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 8d58e268ff6d..e17faa7cf732 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -180,7 +180,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
radeon_semaphore_free(rdev, &ib->semaphore, NULL);
}
/* if we can't remember our last VM flush then flush now! */
- if (ib->vm && !ib->vm->last_flush) {
+ /* XXX figure out why we have to flush for every IB */
+ if (ib->vm /*&& !ib->vm->last_flush*/) {
radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
}
if (const_ib) {
@@ -368,7 +369,7 @@ void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 rptr;
- if (rdev->wb.enabled)
+ if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX])
rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
else
rptr = RREG32(ring->rptr_reg);
@@ -821,18 +822,20 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
return 0;
}
-static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
-static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
-static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
-static int radeon_ring_type_dma1_index = R600_RING_TYPE_DMA_INDEX;
-static int radeon_ring_type_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
+static int radeon_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
+static int cayman_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
+static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
+static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX;
+static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
+static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX;
static struct drm_info_list radeon_debugfs_ring_info_list[] = {
- {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
- {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
- {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
- {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma1_index},
- {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma2_index},
+ {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index},
+ {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_cp1_index},
+ {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_cp2_index},
+ {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index},
+ {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index},
+ {"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index},
};
static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index cb800995d4f9..0abe5a9431bb 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -64,7 +64,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
}
r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_CPU, NULL, &sa_manager->bo);
+ domain, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index fda09c9ea689..bbed4af8d0bc 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -252,6 +252,36 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
}
+static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_fence **fence)
+{
+ int r;
+
+ if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
+ r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
+ if (r) {
+ DRM_ERROR("Failed to get dummy create msg\n");
+ return r;
+ }
+
+ r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, fence);
+ if (r) {
+ DRM_ERROR("Failed to get dummy destroy msg\n");
+ return r;
+ }
+ } else {
+ r = radeon_ring_lock(rdev, ring, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
+ return r;
+ }
+ radeon_fence_emit(rdev, fence, ring->idx);
+ radeon_ring_unlock_commit(rdev, ring);
+ }
+ return 0;
+}
+
void radeon_test_ring_sync(struct radeon_device *rdev,
struct radeon_ring *ringA,
struct radeon_ring *ringB)
@@ -272,21 +302,24 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
- r = radeon_fence_emit(rdev, &fence1, ringA->idx);
- if (r) {
- DRM_ERROR("Failed to emit fence 1\n");
- radeon_ring_unlock_undo(rdev, ringA);
+ radeon_ring_unlock_commit(rdev, ringA);
+
+ r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
+ if (r)
goto out_cleanup;
- }
- radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
- r = radeon_fence_emit(rdev, &fence2, ringA->idx);
+
+ r = radeon_ring_lock(rdev, ringA, 64);
if (r) {
- DRM_ERROR("Failed to emit fence 2\n");
- radeon_ring_unlock_undo(rdev, ringA);
+ DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
goto out_cleanup;
}
+ radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringA);
+ r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
+ if (r)
+ goto out_cleanup;
+
mdelay(1000);
if (radeon_fence_signaled(fence1)) {
@@ -364,27 +397,22 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
- r = radeon_fence_emit(rdev, &fenceA, ringA->idx);
- if (r) {
- DRM_ERROR("Failed to emit sync fence 1\n");
- radeon_ring_unlock_undo(rdev, ringA);
- goto out_cleanup;
- }
radeon_ring_unlock_commit(rdev, ringA);
+ r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
+ if (r)
+ goto out_cleanup;
+
r = radeon_ring_lock(rdev, ringB, 64);
if (r) {
DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
goto out_cleanup;
}
radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
- r = radeon_fence_emit(rdev, &fenceB, ringB->idx);
- if (r) {
- DRM_ERROR("Failed to create sync fence 2\n");
- radeon_ring_unlock_undo(rdev, ringB);
- goto out_cleanup;
- }
radeon_ring_unlock_commit(rdev, ringB);
+ r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
+ if (r)
+ goto out_cleanup;
mdelay(1000);
@@ -393,7 +421,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup;
}
if (radeon_fence_signaled(fenceB)) {
- DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
+ DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
new file mode 100644
index 000000000000..906e5c0ca3b9
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -0,0 +1,831 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Christian König <deathsimple@vodafone.de>
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "radeon.h"
+#include "r600d.h"
+
+/* 1 second timeout */
+#define UVD_IDLE_TIMEOUT_MS 1000
+
+/* Firmware Names */
+#define FIRMWARE_RV710 "radeon/RV710_uvd.bin"
+#define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin"
+#define FIRMWARE_SUMO "radeon/SUMO_uvd.bin"
+#define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin"
+
+MODULE_FIRMWARE(FIRMWARE_RV710);
+MODULE_FIRMWARE(FIRMWARE_CYPRESS);
+MODULE_FIRMWARE(FIRMWARE_SUMO);
+MODULE_FIRMWARE(FIRMWARE_TAHITI);
+
+static void radeon_uvd_idle_work_handler(struct work_struct *work);
+
+int radeon_uvd_init(struct radeon_device *rdev)
+{
+ struct platform_device *pdev;
+ unsigned long bo_size;
+ const char *fw_name;
+ int i, r;
+
+ INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
+
+ pdev = platform_device_register_simple("radeon_uvd", 0, NULL, 0);
+ r = IS_ERR(pdev);
+ if (r) {
+ dev_err(rdev->dev, "radeon_uvd: Failed to register firmware\n");
+ return -EINVAL;
+ }
+
+ switch (rdev->family) {
+ case CHIP_RV710:
+ case CHIP_RV730:
+ case CHIP_RV740:
+ fw_name = FIRMWARE_RV710;
+ break;
+
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ case CHIP_JUNIPER:
+ case CHIP_REDWOOD:
+ case CHIP_CEDAR:
+ fw_name = FIRMWARE_CYPRESS;
+ break;
+
+ case CHIP_SUMO:
+ case CHIP_SUMO2:
+ case CHIP_PALM:
+ case CHIP_CAYMAN:
+ case CHIP_BARTS:
+ case CHIP_TURKS:
+ case CHIP_CAICOS:
+ fw_name = FIRMWARE_SUMO;
+ break;
+
+ case CHIP_TAHITI:
+ case CHIP_VERDE:
+ case CHIP_PITCAIRN:
+ case CHIP_ARUBA:
+ fw_name = FIRMWARE_TAHITI;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ r = request_firmware(&rdev->uvd_fw, fw_name, &pdev->dev);
+ if (r) {
+ dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
+ fw_name);
+ platform_device_unregister(pdev);
+ return r;
+ }
+
+ platform_device_unregister(pdev);
+
+ bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
+ RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
+ r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
+ return r;
+ }
+
+ r = radeon_uvd_resume(rdev);
+ if (r)
+ return r;
+
+ memset(rdev->uvd.cpu_addr, 0, bo_size);
+ memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+
+ r = radeon_uvd_suspend(rdev);
+ if (r)
+ return r;
+
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ atomic_set(&rdev->uvd.handles[i], 0);
+ rdev->uvd.filp[i] = NULL;
+ }
+
+ return 0;
+}
+
+void radeon_uvd_fini(struct radeon_device *rdev)
+{
+ radeon_uvd_suspend(rdev);
+ radeon_bo_unref(&rdev->uvd.vcpu_bo);
+}
+
+int radeon_uvd_suspend(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->uvd.vcpu_bo == NULL)
+ return 0;
+
+ r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
+ if (!r) {
+ radeon_bo_kunmap(rdev->uvd.vcpu_bo);
+ radeon_bo_unpin(rdev->uvd.vcpu_bo);
+ radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+ }
+ return r;
+}
+
+int radeon_uvd_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->uvd.vcpu_bo == NULL)
+ return -EINVAL;
+
+ r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
+ if (r) {
+ radeon_bo_unref(&rdev->uvd.vcpu_bo);
+ dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
+ return r;
+ }
+
+ r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->uvd.gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+ radeon_bo_unref(&rdev->uvd.vcpu_bo);
+ dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
+ return r;
+ }
+
+ r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
+ if (r) {
+ dev_err(rdev->dev, "(%d) UVD map failed\n", r);
+ return r;
+ }
+
+ radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+
+ return 0;
+}
+
+void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo)
+{
+ rbo->placement.fpfn = 0 >> PAGE_SHIFT;
+ rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
+}
+
+void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
+{
+ int i, r;
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ if (rdev->uvd.filp[i] == filp) {
+ uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+ struct radeon_fence *fence;
+
+ r = radeon_uvd_get_destroy_msg(rdev,
+ R600_RING_TYPE_UVD_INDEX, handle, &fence);
+ if (r) {
+ DRM_ERROR("Error destroying UVD (%d)!\n", r);
+ continue;
+ }
+
+ radeon_fence_wait(fence, false);
+ radeon_fence_unref(&fence);
+
+ rdev->uvd.filp[i] = NULL;
+ atomic_set(&rdev->uvd.handles[i], 0);
+ }
+ }
+}
+
+static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
+{
+ unsigned stream_type = msg[4];
+ unsigned width = msg[6];
+ unsigned height = msg[7];
+ unsigned dpb_size = msg[9];
+ unsigned pitch = msg[28];
+
+ unsigned width_in_mb = width / 16;
+ unsigned height_in_mb = ALIGN(height / 16, 2);
+
+ unsigned image_size, tmp, min_dpb_size;
+
+ image_size = width * height;
+ image_size += image_size / 2;
+ image_size = ALIGN(image_size, 1024);
+
+ switch (stream_type) {
+ case 0: /* H264 */
+
+ /* reference picture buffer */
+ min_dpb_size = image_size * 17;
+
+ /* macroblock context buffer */
+ min_dpb_size += width_in_mb * height_in_mb * 17 * 192;
+
+ /* IT surface buffer */
+ min_dpb_size += width_in_mb * height_in_mb * 32;
+ break;
+
+ case 1: /* VC1 */
+
+ /* reference picture buffer */
+ min_dpb_size = image_size * 3;
+
+ /* CONTEXT_BUFFER */
+ min_dpb_size += width_in_mb * height_in_mb * 128;
+
+ /* IT surface buffer */
+ min_dpb_size += width_in_mb * 64;
+
+ /* DB surface buffer */
+ min_dpb_size += width_in_mb * 128;
+
+ /* BP */
+ tmp = max(width_in_mb, height_in_mb);
+ min_dpb_size += ALIGN(tmp * 7 * 16, 64);
+ break;
+
+ case 3: /* MPEG2 */
+
+ /* reference picture buffer */
+ min_dpb_size = image_size * 3;
+ break;
+
+ case 4: /* MPEG4 */
+
+ /* reference picture buffer */
+ min_dpb_size = image_size * 3;
+
+ /* CM */
+ min_dpb_size += width_in_mb * height_in_mb * 64;
+
+ /* IT surface buffer */
+ min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
+ break;
+
+ default:
+ DRM_ERROR("UVD codec not handled %d!\n", stream_type);
+ return -EINVAL;
+ }
+
+ if (width > pitch) {
+ DRM_ERROR("Invalid UVD decoding target pitch!\n");
+ return -EINVAL;
+ }
+
+ if (dpb_size < min_dpb_size) {
+ DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
+ dpb_size, min_dpb_size);
+ return -EINVAL;
+ }
+
+ buf_sizes[0x1] = dpb_size;
+ buf_sizes[0x2] = image_size;
+ return 0;
+}
+
+static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
+ unsigned offset, unsigned buf_sizes[])
+{
+ int32_t *msg, msg_type, handle;
+ void *ptr;
+
+ int i, r;
+
+ if (offset & 0x3F) {
+ DRM_ERROR("UVD messages must be 64 byte aligned!\n");
+ return -EINVAL;
+ }
+
+ r = radeon_bo_kmap(bo, &ptr);
+ if (r)
+ return r;
+
+ msg = ptr + offset;
+
+ msg_type = msg[1];
+ handle = msg[2];
+
+ if (handle == 0) {
+ DRM_ERROR("Invalid UVD handle!\n");
+ return -EINVAL;
+ }
+
+ if (msg_type == 1) {
+ /* it's a decode msg, calc buffer sizes */
+ r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
+ radeon_bo_kunmap(bo);
+ if (r)
+ return r;
+
+ } else if (msg_type == 2) {
+ /* it's a destroy msg, free the handle */
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
+ atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
+ radeon_bo_kunmap(bo);
+ return 0;
+ } else {
+ /* it's a create msg, no special handling needed */
+ radeon_bo_kunmap(bo);
+ }
+
+ /* create or decode, validate the handle */
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
+ return 0;
+ }
+
+ /* handle not found try to alloc a new one */
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
+ p->rdev->uvd.filp[i] = p->filp;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("No more free UVD handles!\n");
+ return -EINVAL;
+}
+
+static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
+ int data0, int data1,
+ unsigned buf_sizes[])
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ struct radeon_cs_reloc *reloc;
+ unsigned idx, cmd, offset;
+ uint64_t start, end;
+ int r;
+
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ offset = radeon_get_ib_value(p, data0);
+ idx = radeon_get_ib_value(p, data1);
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ return -EINVAL;
+ }
+
+ reloc = p->relocs_ptr[(idx / 4)];
+ start = reloc->lobj.gpu_offset;
+ end = start + radeon_bo_size(reloc->robj);
+ start += offset;
+
+ p->ib.ptr[data0] = start & 0xFFFFFFFF;
+ p->ib.ptr[data1] = start >> 32;
+
+ cmd = radeon_get_ib_value(p, p->idx) >> 1;
+
+ if (cmd < 0x4) {
+ if ((end - start) < buf_sizes[cmd]) {
+ DRM_ERROR("buffer to small (%d / %d)!\n",
+ (unsigned)(end - start), buf_sizes[cmd]);
+ return -EINVAL;
+ }
+
+ } else if (cmd != 0x100) {
+ DRM_ERROR("invalid UVD command %X!\n", cmd);
+ return -EINVAL;
+ }
+
+ if ((start >> 28) != (end >> 28)) {
+ DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
+ start, end);
+ return -EINVAL;
+ }
+
+ /* TODO: is this still necessary on NI+ ? */
+ if ((cmd == 0 || cmd == 0x3) &&
+ (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
+ DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
+ start, end);
+ return -EINVAL;
+ }
+
+ if (cmd == 0) {
+ r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ int *data0, int *data1,
+ unsigned buf_sizes[])
+{
+ int i, r;
+
+ p->idx++;
+ for (i = 0; i <= pkt->count; ++i) {
+ switch (pkt->reg + i*4) {
+ case UVD_GPCOM_VCPU_DATA0:
+ *data0 = p->idx;
+ break;
+ case UVD_GPCOM_VCPU_DATA1:
+ *data1 = p->idx;
+ break;
+ case UVD_GPCOM_VCPU_CMD:
+ r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes);
+ if (r)
+ return r;
+ break;
+ case UVD_ENGINE_CNTL:
+ break;
+ default:
+ DRM_ERROR("Invalid reg 0x%X!\n",
+ pkt->reg + i*4);
+ return -EINVAL;
+ }
+ p->idx++;
+ }
+ return 0;
+}
+
+int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet pkt;
+ int r, data0 = 0, data1 = 0;
+
+ /* minimum buffer sizes */
+ unsigned buf_sizes[] = {
+ [0x00000000] = 2048,
+ [0x00000001] = 32 * 1024 * 1024,
+ [0x00000002] = 2048 * 1152 * 3,
+ [0x00000003] = 2048,
+ };
+
+ if (p->chunks[p->chunk_ib_idx].length_dw % 16) {
+ DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
+ p->chunks[p->chunk_ib_idx].length_dw);
+ return -EINVAL;
+ }
+
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+
+
+ do {
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
+ if (r)
+ return r;
+ switch (pkt.type) {
+ case RADEON_PACKET_TYPE0:
+ r = radeon_uvd_cs_reg(p, &pkt, &data0,
+ &data1, buf_sizes);
+ if (r)
+ return r;
+ break;
+ case RADEON_PACKET_TYPE2:
+ p->idx += pkt.count + 2;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ return -EINVAL;
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ return 0;
+}
+
+static int radeon_uvd_send_msg(struct radeon_device *rdev,
+ int ring, struct radeon_bo *bo,
+ struct radeon_fence **fence)
+{
+ struct ttm_validate_buffer tv;
+ struct list_head head;
+ struct radeon_ib ib;
+ uint64_t addr;
+ int i, r;
+
+ memset(&tv, 0, sizeof(tv));
+ tv.bo = &bo->tbo;
+
+ INIT_LIST_HEAD(&head);
+ list_add(&tv.head, &head);
+
+ r = ttm_eu_reserve_buffers(&head);
+ if (r)
+ return r;
+
+ radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM);
+ radeon_uvd_force_into_uvd_segment(bo);
+
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ if (r) {
+ ttm_eu_backoff_reservation(&head);
+ return r;
+ }
+
+ r = radeon_ib_get(rdev, ring, &ib, NULL, 16);
+ if (r) {
+ ttm_eu_backoff_reservation(&head);
+ return r;
+ }
+
+ addr = radeon_bo_gpu_offset(bo);
+ ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
+ ib.ptr[1] = addr;
+ ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
+ ib.ptr[3] = addr >> 32;
+ ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0);
+ ib.ptr[5] = 0;
+ for (i = 6; i < 16; ++i)
+ ib.ptr[i] = PACKET2(0);
+ ib.length_dw = 16;
+
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ ttm_eu_backoff_reservation(&head);
+ return r;
+ }
+ ttm_eu_fence_buffer_objects(&head, ib.fence);
+
+ if (fence)
+ *fence = radeon_fence_ref(ib.fence);
+
+ radeon_ib_free(rdev, &ib);
+ radeon_bo_unref(&bo);
+ return 0;
+}
+
+/* multiple fence commands without any stream commands in between can
+ crash the vcpu so just try to emmit a dummy create/destroy msg to
+ avoid this */
+int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
+ uint32_t handle, struct radeon_fence **fence)
+{
+ struct radeon_bo *bo;
+ uint32_t *msg;
+ int r, i;
+
+ r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
+ if (r)
+ return r;
+
+ r = radeon_bo_reserve(bo, false);
+ if (r) {
+ radeon_bo_unref(&bo);
+ return r;
+ }
+
+ r = radeon_bo_kmap(bo, (void **)&msg);
+ if (r) {
+ radeon_bo_unreserve(bo);
+ radeon_bo_unref(&bo);
+ return r;
+ }
+
+ /* stitch together an UVD create msg */
+ msg[0] = 0x00000de4;
+ msg[1] = 0x00000000;
+ msg[2] = handle;
+ msg[3] = 0x00000000;
+ msg[4] = 0x00000000;
+ msg[5] = 0x00000000;
+ msg[6] = 0x00000000;
+ msg[7] = 0x00000780;
+ msg[8] = 0x00000440;
+ msg[9] = 0x00000000;
+ msg[10] = 0x01b37000;
+ for (i = 11; i < 1024; ++i)
+ msg[i] = 0x0;
+
+ radeon_bo_kunmap(bo);
+ radeon_bo_unreserve(bo);
+
+ return radeon_uvd_send_msg(rdev, ring, bo, fence);
+}
+
+int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
+ uint32_t handle, struct radeon_fence **fence)
+{
+ struct radeon_bo *bo;
+ uint32_t *msg;
+ int r, i;
+
+ r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
+ if (r)
+ return r;
+
+ r = radeon_bo_reserve(bo, false);
+ if (r) {
+ radeon_bo_unref(&bo);
+ return r;
+ }
+
+ r = radeon_bo_kmap(bo, (void **)&msg);
+ if (r) {
+ radeon_bo_unreserve(bo);
+ radeon_bo_unref(&bo);
+ return r;
+ }
+
+ /* stitch together an UVD destroy msg */
+ msg[0] = 0x00000de4;
+ msg[1] = 0x00000002;
+ msg[2] = handle;
+ msg[3] = 0x00000000;
+ for (i = 4; i < 1024; ++i)
+ msg[i] = 0x0;
+
+ radeon_bo_kunmap(bo);
+ radeon_bo_unreserve(bo);
+
+ return radeon_uvd_send_msg(rdev, ring, bo, fence);
+}
+
+static void radeon_uvd_idle_work_handler(struct work_struct *work)
+{
+ struct radeon_device *rdev =
+ container_of(work, struct radeon_device, uvd.idle_work.work);
+
+ if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0)
+ radeon_set_uvd_clocks(rdev, 0, 0);
+ else
+ schedule_delayed_work(&rdev->uvd.idle_work,
+ msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+}
+
+void radeon_uvd_note_usage(struct radeon_device *rdev)
+{
+ bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
+ set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
+ msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+ if (set_clocks)
+ radeon_set_uvd_clocks(rdev, 53300, 40000);
+}
+
+static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,
+ unsigned target_freq,
+ unsigned pd_min,
+ unsigned pd_even)
+{
+ unsigned post_div = vco_freq / target_freq;
+
+ /* adjust to post divider minimum value */
+ if (post_div < pd_min)
+ post_div = pd_min;
+
+ /* we alway need a frequency less than or equal the target */
+ if ((vco_freq / post_div) > target_freq)
+ post_div += 1;
+
+ /* post dividers above a certain value must be even */
+ if (post_div > pd_even && post_div % 2)
+ post_div += 1;
+
+ return post_div;
+}
+
+/**
+ * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
+ *
+ * @rdev: radeon_device pointer
+ * @vclk: wanted VCLK
+ * @dclk: wanted DCLK
+ * @vco_min: minimum VCO frequency
+ * @vco_max: maximum VCO frequency
+ * @fb_factor: factor to multiply vco freq with
+ * @fb_mask: limit and bitmask for feedback divider
+ * @pd_min: post divider minimum
+ * @pd_max: post divider maximum
+ * @pd_even: post divider must be even above this value
+ * @optimal_fb_div: resulting feedback divider
+ * @optimal_vclk_div: resulting vclk post divider
+ * @optimal_dclk_div: resulting dclk post divider
+ *
+ * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
+ * Returns zero on success -EINVAL on error.
+ */
+int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
+ unsigned vclk, unsigned dclk,
+ unsigned vco_min, unsigned vco_max,
+ unsigned fb_factor, unsigned fb_mask,
+ unsigned pd_min, unsigned pd_max,
+ unsigned pd_even,
+ unsigned *optimal_fb_div,
+ unsigned *optimal_vclk_div,
+ unsigned *optimal_dclk_div)
+{
+ unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq;
+
+ /* start off with something large */
+ unsigned optimal_score = ~0;
+
+ /* loop through vco from low to high */
+ vco_min = max(max(vco_min, vclk), dclk);
+ for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) {
+
+ uint64_t fb_div = (uint64_t)vco_freq * fb_factor;
+ unsigned vclk_div, dclk_div, score;
+
+ do_div(fb_div, ref_freq);
+
+ /* fb div out of range ? */
+ if (fb_div > fb_mask)
+ break; /* it can oly get worse */
+
+ fb_div &= fb_mask;
+
+ /* calc vclk divider with current vco freq */
+ vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk,
+ pd_min, pd_even);
+ if (vclk_div > pd_max)
+ break; /* vco is too big, it has to stop */
+
+ /* calc dclk divider with current vco freq */
+ dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
+ pd_min, pd_even);
+ if (vclk_div > pd_max)
+ break; /* vco is too big, it has to stop */
+
+ /* calc score with current vco freq */
+ score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div);
+
+ /* determine if this vco setting is better than current optimal settings */
+ if (score < optimal_score) {
+ *optimal_fb_div = fb_div;
+ *optimal_vclk_div = vclk_div;
+ *optimal_dclk_div = dclk_div;
+ optimal_score = score;
+ if (optimal_score == 0)
+ break; /* it can't get better than this */
+ }
+ }
+
+ /* did we found a valid setup ? */
+ if (optimal_score == ~0)
+ return -EINVAL;
+
+ return 0;
+}
+
+int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
+ unsigned cg_upll_func_cntl)
+{
+ unsigned i;
+
+ /* make sure UPLL_CTLREQ is deasserted */
+ WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
+
+ mdelay(10);
+
+ /* assert UPLL_CTLREQ */
+ WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
+
+ /* wait for CTLACK and CTLACK2 to get asserted */
+ for (i = 0; i < 100; ++i) {
+ uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
+ if ((RREG32(cg_upll_func_cntl) & mask) == mask)
+ break;
+ mdelay(10);
+ }
+
+ /* deassert UPLL_CTLREQ */
+ WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
+
+ if (i == 100) {
+ DRM_ERROR("Timeout setting UVD clocks!\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5a0fc74c2ba6..46fa1b07c560 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -52,23 +52,59 @@ static const u32 crtc_offsets[2] =
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
};
+static bool avivo_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+ if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
+ return true;
+ else
+ return false;
+}
+
+static bool avivo_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+ u32 pos1, pos2;
+
+ pos1 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ pos2 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+ if (pos1 != pos2)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * avivo_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (r5xx-r7xx).
+ */
void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
- int i;
+ unsigned i = 0;
if (crtc >= rdev->num_crtc)
return;
- if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN) {
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (!(RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK))
+ if (!(RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN))
+ return;
+
+ /* depending on when we hit vblank, we may be close to active; if so,
+ * wait for another frame.
+ */
+ while (avivo_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!avivo_is_counter_moving(rdev, crtc))
break;
- udelay(1);
}
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
+ }
+
+ while (!avivo_is_in_vblank(rdev, crtc)) {
+ if (i++ % 100 == 0) {
+ if (!avivo_is_counter_moving(rdev, crtc))
break;
- udelay(1);
}
}
}
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 5706d2ac75ab..ab4c86cfd552 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -148,6 +148,8 @@ void rs690_pm_info(struct radeon_device *rdev)
static void rs690_mc_init(struct radeon_device *rdev)
{
u64 base;
+ uint32_t h_addr, l_addr;
+ unsigned long long k8_addr;
rs400_gart_adjust_size(rdev);
rdev->mc.vram_is_ddr = true;
@@ -160,6 +162,27 @@ static void rs690_mc_init(struct radeon_device *rdev)
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
base = G_000100_MC_FB_START(base) << 16;
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+
+ /* Use K8 direct mapping for fast fb access. */
+ rdev->fastfb_working = false;
+ h_addr = G_00005F_K8_ADDR_EXT(RREG32_MC(R_00005F_MC_MISC_UMA_CNTL));
+ l_addr = RREG32_MC(R_00001E_K8_FB_LOCATION);
+ k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
+#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
+ if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
+#endif
+ {
+ /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
+ * memory is present.
+ */
+ if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
+ DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
+ (unsigned long long)rdev->mc.aper_base, k8_addr);
+ rdev->mc.aper_base = (resource_size_t)k8_addr;
+ rdev->fastfb_working = true;
+ }
+ }
+
rs690_pm_info(rdev);
radeon_vram_location(rdev, &rdev->mc, base);
rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h
index 36e6398a98ae..8af3ccf20cc0 100644
--- a/drivers/gpu/drm/radeon/rs690d.h
+++ b/drivers/gpu/drm/radeon/rs690d.h
@@ -29,6 +29,9 @@
#define __RS690D_H__
/* Registers */
+#define R_00001E_K8_FB_LOCATION 0x00001E
+#define R_00005F_MC_MISC_UMA_CNTL 0x00005F
+#define G_00005F_K8_ADDR_EXT(x) (((x) >> 0) & 0xFF)
#define R_000078_MC_INDEX 0x000078
#define S_000078_MC_IND_ADDR(x) (((x) & 0x1FF) << 0)
#define G_000078_MC_IND_ADDR(x) (((x) >> 0) & 0x1FF)
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 435ed3551364..ffcba730c57c 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -303,8 +303,10 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
radeon_wait_for_vblank(rdev, i);
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
/* wait for the next frame */
frame_count = radeon_get_vblank_counter(rdev, i);
@@ -313,6 +315,15 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
break;
udelay(1);
}
+
+ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~AVIVO_CRTC_EN;
+ WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ save->crtc_enabled[i] = false;
+ /* ***** */
} else {
save->crtc_enabled[i] = false;
}
@@ -338,6 +349,22 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
}
/* wait for the MC to settle */
udelay(100);
+
+ /* lock double buffered regs */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+ if (!(tmp & AVIVO_D1GRPH_UPDATE_LOCK)) {
+ tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (!(tmp & 1)) {
+ tmp |= 1;
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ }
+ }
}
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
@@ -348,7 +375,7 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
/* update crtc base addresses */
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->family >= CHIP_RV770) {
- if (i == 1) {
+ if (i == 0) {
WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
upper_32_bits(rdev->mc.vram_start));
WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
@@ -367,6 +394,33 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
}
WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+ /* unlock regs and wait for update */
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
+ if ((tmp & 0x3) != 0) {
+ tmp &= ~0x3;
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+ if (tmp & AVIVO_D1GRPH_UPDATE_LOCK) {
+ tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (tmp & 1) {
+ tmp &= ~1;
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ for (j = 0; j < rdev->usec_timeout; j++) {
+ tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+ if ((tmp & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) == 0)
+ break;
+ udelay(1);
+ }
+ }
+ }
+
if (rdev->family >= CHIP_R600) {
/* unblackout the MC */
if (rdev->family >= CHIP_RV770)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index d63fe1d0f53f..83f612a9500b 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -42,6 +42,739 @@
static void rv770_gpu_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
+int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+
+int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+ unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
+ int r;
+
+ /* RV740 uses evergreen uvd clk programming */
+ if (rdev->family == CHIP_RV740)
+ return evergreen_set_uvd_clocks(rdev, vclk, dclk);
+
+ /* bypass vclk and dclk with bclk */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ if (!vclk || !dclk) {
+ /* keep the Bypass mode, put PLL to sleep */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+ return 0;
+ }
+
+ r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
+ 43663, 0x03FFFFFE, 1, 30, ~0,
+ &fb_div, &vclk_div, &dclk_div);
+ if (r)
+ return r;
+
+ fb_div |= 1;
+ vclk_div -= 1;
+ dclk_div -= 1;
+
+ /* set UPLL_FB_DIV to 0x50000 */
+ WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(0x50000), ~UPLL_FB_DIV_MASK);
+
+ /* deassert UPLL_RESET and UPLL_SLEEP */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~(UPLL_RESET_MASK | UPLL_SLEEP_MASK));
+
+ /* assert BYPASS EN and FB_DIV[0] <- ??? why? */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+ WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(1), ~UPLL_FB_DIV(1));
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* assert PLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+ /* set the required FB_DIV, REF_DIV, Post divder values */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REF_DIV(1), ~UPLL_REF_DIV_MASK);
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ UPLL_SW_HILEN(vclk_div >> 1) |
+ UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
+ UPLL_SW_HILEN2(dclk_div >> 1) |
+ UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)),
+ ~UPLL_SW_MASK);
+
+ WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div),
+ ~UPLL_FB_DIV_MASK);
+
+ /* give the PLL some time to settle */
+ mdelay(15);
+
+ /* deassert PLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+ mdelay(15);
+
+ /* deassert BYPASS EN and FB_DIV[0] <- ??? why? */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+ WREG32_P(CG_UPLL_FUNC_CNTL_3, 0, ~UPLL_FB_DIV(1));
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* switch VCLK and DCLK selection */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ mdelay(100);
+
+ return 0;
+}
+
+static const u32 r7xx_golden_registers[] =
+{
+ 0x8d00, 0xffffffff, 0x0e0e0074,
+ 0x8d04, 0xffffffff, 0x013a2b34,
+ 0x9508, 0xffffffff, 0x00000002,
+ 0x8b20, 0xffffffff, 0,
+ 0x88c4, 0xffffffff, 0x000000c2,
+ 0x28350, 0xffffffff, 0,
+ 0x9058, 0xffffffff, 0x0fffc40f,
+ 0x240c, 0xffffffff, 0x00000380,
+ 0x733c, 0xffffffff, 0x00000002,
+ 0x2650, 0x00040000, 0,
+ 0x20bc, 0x00040000, 0,
+ 0x7300, 0xffffffff, 0x001000f0
+};
+
+static const u32 r7xx_golden_dyn_gpr_registers[] =
+{
+ 0x8db0, 0xffffffff, 0x98989898,
+ 0x8db4, 0xffffffff, 0x98989898,
+ 0x8db8, 0xffffffff, 0x98989898,
+ 0x8dbc, 0xffffffff, 0x98989898,
+ 0x8dc0, 0xffffffff, 0x98989898,
+ 0x8dc4, 0xffffffff, 0x98989898,
+ 0x8dc8, 0xffffffff, 0x98989898,
+ 0x8dcc, 0xffffffff, 0x98989898,
+ 0x88c4, 0xffffffff, 0x00000082
+};
+
+static const u32 rv770_golden_registers[] =
+{
+ 0x562c, 0xffffffff, 0,
+ 0x3f90, 0xffffffff, 0,
+ 0x9148, 0xffffffff, 0,
+ 0x3f94, 0xffffffff, 0,
+ 0x914c, 0xffffffff, 0,
+ 0x9698, 0x18000000, 0x18000000
+};
+
+static const u32 rv770ce_golden_registers[] =
+{
+ 0x562c, 0xffffffff, 0,
+ 0x3f90, 0xffffffff, 0x00cc0000,
+ 0x9148, 0xffffffff, 0x00cc0000,
+ 0x3f94, 0xffffffff, 0x00cc0000,
+ 0x914c, 0xffffffff, 0x00cc0000,
+ 0x9b7c, 0xffffffff, 0x00fa0000,
+ 0x3f8c, 0xffffffff, 0x00fa0000,
+ 0x9698, 0x18000000, 0x18000000
+};
+
+static const u32 rv770_mgcg_init[] =
+{
+ 0x8bcc, 0xffffffff, 0x130300f9,
+ 0x5448, 0xffffffff, 0x100,
+ 0x55e4, 0xffffffff, 0x100,
+ 0x160c, 0xffffffff, 0x100,
+ 0x5644, 0xffffffff, 0x100,
+ 0xc164, 0xffffffff, 0x100,
+ 0x8a18, 0xffffffff, 0x100,
+ 0x897c, 0xffffffff, 0x8000100,
+ 0x8b28, 0xffffffff, 0x3c000100,
+ 0x9144, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10000,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10001,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10002,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10003,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x0,
+ 0x9870, 0xffffffff, 0x100,
+ 0x8d58, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x0,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x1,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x2,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x3,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x4,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x5,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x6,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x7,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x8,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x9,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x8000,
+ 0x9490, 0xffffffff, 0x0,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x1,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x2,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x3,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x4,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x5,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x6,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x7,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x8,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x9,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x8000,
+ 0x9604, 0xffffffff, 0x0,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x1,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x2,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x3,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x4,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x5,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x6,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x7,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x8,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x9,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x80000000,
+ 0x9030, 0xffffffff, 0x100,
+ 0x9034, 0xffffffff, 0x100,
+ 0x9038, 0xffffffff, 0x100,
+ 0x903c, 0xffffffff, 0x100,
+ 0x9040, 0xffffffff, 0x100,
+ 0xa200, 0xffffffff, 0x100,
+ 0xa204, 0xffffffff, 0x100,
+ 0xa208, 0xffffffff, 0x100,
+ 0xa20c, 0xffffffff, 0x100,
+ 0x971c, 0xffffffff, 0x100,
+ 0x915c, 0xffffffff, 0x00020001,
+ 0x9160, 0xffffffff, 0x00040003,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00080007,
+ 0x9174, 0xffffffff, 0x000a0009,
+ 0x9178, 0xffffffff, 0x000c000b,
+ 0x917c, 0xffffffff, 0x000e000d,
+ 0x9180, 0xffffffff, 0x0010000f,
+ 0x918c, 0xffffffff, 0x00120011,
+ 0x9190, 0xffffffff, 0x00140013,
+ 0x9194, 0xffffffff, 0x00020001,
+ 0x9198, 0xffffffff, 0x00040003,
+ 0x919c, 0xffffffff, 0x00060005,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000a0009,
+ 0x91b0, 0xffffffff, 0x000c000b,
+ 0x91b4, 0xffffffff, 0x000e000d,
+ 0x91b8, 0xffffffff, 0x0010000f,
+ 0x91c4, 0xffffffff, 0x00120011,
+ 0x91c8, 0xffffffff, 0x00140013,
+ 0x91cc, 0xffffffff, 0x00020001,
+ 0x91d0, 0xffffffff, 0x00040003,
+ 0x91d4, 0xffffffff, 0x00060005,
+ 0x91e0, 0xffffffff, 0x00080007,
+ 0x91e4, 0xffffffff, 0x000a0009,
+ 0x91e8, 0xffffffff, 0x000c000b,
+ 0x91ec, 0xffffffff, 0x00020001,
+ 0x91f0, 0xffffffff, 0x00040003,
+ 0x91f4, 0xffffffff, 0x00060005,
+ 0x9200, 0xffffffff, 0x00080007,
+ 0x9204, 0xffffffff, 0x000a0009,
+ 0x9208, 0xffffffff, 0x000c000b,
+ 0x920c, 0xffffffff, 0x000e000d,
+ 0x9210, 0xffffffff, 0x0010000f,
+ 0x921c, 0xffffffff, 0x00120011,
+ 0x9220, 0xffffffff, 0x00140013,
+ 0x9224, 0xffffffff, 0x00020001,
+ 0x9228, 0xffffffff, 0x00040003,
+ 0x922c, 0xffffffff, 0x00060005,
+ 0x9238, 0xffffffff, 0x00080007,
+ 0x923c, 0xffffffff, 0x000a0009,
+ 0x9240, 0xffffffff, 0x000c000b,
+ 0x9244, 0xffffffff, 0x000e000d,
+ 0x9248, 0xffffffff, 0x0010000f,
+ 0x9254, 0xffffffff, 0x00120011,
+ 0x9258, 0xffffffff, 0x00140013,
+ 0x925c, 0xffffffff, 0x00020001,
+ 0x9260, 0xffffffff, 0x00040003,
+ 0x9264, 0xffffffff, 0x00060005,
+ 0x9270, 0xffffffff, 0x00080007,
+ 0x9274, 0xffffffff, 0x000a0009,
+ 0x9278, 0xffffffff, 0x000c000b,
+ 0x927c, 0xffffffff, 0x000e000d,
+ 0x9280, 0xffffffff, 0x0010000f,
+ 0x928c, 0xffffffff, 0x00120011,
+ 0x9290, 0xffffffff, 0x00140013,
+ 0x9294, 0xffffffff, 0x00020001,
+ 0x929c, 0xffffffff, 0x00040003,
+ 0x92a0, 0xffffffff, 0x00060005,
+ 0x92a4, 0xffffffff, 0x00080007
+};
+
+static const u32 rv710_golden_registers[] =
+{
+ 0x3f90, 0x00ff0000, 0x00fc0000,
+ 0x9148, 0x00ff0000, 0x00fc0000,
+ 0x3f94, 0x00ff0000, 0x00fc0000,
+ 0x914c, 0x00ff0000, 0x00fc0000,
+ 0xb4c, 0x00000020, 0x00000020,
+ 0xa180, 0xffffffff, 0x00003f3f
+};
+
+static const u32 rv710_mgcg_init[] =
+{
+ 0x8bcc, 0xffffffff, 0x13030040,
+ 0x5448, 0xffffffff, 0x100,
+ 0x55e4, 0xffffffff, 0x100,
+ 0x160c, 0xffffffff, 0x100,
+ 0x5644, 0xffffffff, 0x100,
+ 0xc164, 0xffffffff, 0x100,
+ 0x8a18, 0xffffffff, 0x100,
+ 0x897c, 0xffffffff, 0x8000100,
+ 0x8b28, 0xffffffff, 0x3c000100,
+ 0x9144, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10000,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x0,
+ 0x9870, 0xffffffff, 0x100,
+ 0x8d58, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x0,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x1,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x8000,
+ 0x9490, 0xffffffff, 0x0,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x1,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x8000,
+ 0x9604, 0xffffffff, 0x0,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x1,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x80000000,
+ 0x9030, 0xffffffff, 0x100,
+ 0x9034, 0xffffffff, 0x100,
+ 0x9038, 0xffffffff, 0x100,
+ 0x903c, 0xffffffff, 0x100,
+ 0x9040, 0xffffffff, 0x100,
+ 0xa200, 0xffffffff, 0x100,
+ 0xa204, 0xffffffff, 0x100,
+ 0xa208, 0xffffffff, 0x100,
+ 0xa20c, 0xffffffff, 0x100,
+ 0x971c, 0xffffffff, 0x100,
+ 0x915c, 0xffffffff, 0x00020001,
+ 0x9174, 0xffffffff, 0x00000003,
+ 0x9178, 0xffffffff, 0x00050001,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00000004,
+ 0x9190, 0xffffffff, 0x00070006,
+ 0x9194, 0xffffffff, 0x00050001,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x91a8, 0xffffffff, 0x00000004,
+ 0x91ac, 0xffffffff, 0x00070006,
+ 0x91e8, 0xffffffff, 0x00000001,
+ 0x9294, 0xffffffff, 0x00000001,
+ 0x929c, 0xffffffff, 0x00000002,
+ 0x92a0, 0xffffffff, 0x00040003,
+ 0x9150, 0xffffffff, 0x4d940000
+};
+
+static const u32 rv730_golden_registers[] =
+{
+ 0x3f90, 0x00ff0000, 0x00f00000,
+ 0x9148, 0x00ff0000, 0x00f00000,
+ 0x3f94, 0x00ff0000, 0x00f00000,
+ 0x914c, 0x00ff0000, 0x00f00000,
+ 0x900c, 0xffffffff, 0x003b033f,
+ 0xb4c, 0x00000020, 0x00000020,
+ 0xa180, 0xffffffff, 0x00003f3f
+};
+
+static const u32 rv730_mgcg_init[] =
+{
+ 0x8bcc, 0xffffffff, 0x130300f9,
+ 0x5448, 0xffffffff, 0x100,
+ 0x55e4, 0xffffffff, 0x100,
+ 0x160c, 0xffffffff, 0x100,
+ 0x5644, 0xffffffff, 0x100,
+ 0xc164, 0xffffffff, 0x100,
+ 0x8a18, 0xffffffff, 0x100,
+ 0x897c, 0xffffffff, 0x8000100,
+ 0x8b28, 0xffffffff, 0x3c000100,
+ 0x9144, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10000,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10001,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x0,
+ 0x9870, 0xffffffff, 0x100,
+ 0x8d58, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x0,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x1,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x2,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x3,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x4,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x5,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x6,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x7,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x8000,
+ 0x9490, 0xffffffff, 0x0,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x1,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x2,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x3,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x4,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x5,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x6,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x7,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x8000,
+ 0x9604, 0xffffffff, 0x0,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x1,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x2,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x3,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x4,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x5,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x6,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x7,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x80000000,
+ 0x9030, 0xffffffff, 0x100,
+ 0x9034, 0xffffffff, 0x100,
+ 0x9038, 0xffffffff, 0x100,
+ 0x903c, 0xffffffff, 0x100,
+ 0x9040, 0xffffffff, 0x100,
+ 0xa200, 0xffffffff, 0x100,
+ 0xa204, 0xffffffff, 0x100,
+ 0xa208, 0xffffffff, 0x100,
+ 0xa20c, 0xffffffff, 0x100,
+ 0x971c, 0xffffffff, 0x100,
+ 0x915c, 0xffffffff, 0x00020001,
+ 0x916c, 0xffffffff, 0x00040003,
+ 0x9170, 0xffffffff, 0x00000005,
+ 0x9178, 0xffffffff, 0x00050001,
+ 0x917c, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00000004,
+ 0x9190, 0xffffffff, 0x00070006,
+ 0x9194, 0xffffffff, 0x00050001,
+ 0x9198, 0xffffffff, 0x00030002,
+ 0x91a8, 0xffffffff, 0x00000004,
+ 0x91ac, 0xffffffff, 0x00070006,
+ 0x91b0, 0xffffffff, 0x00050001,
+ 0x91b4, 0xffffffff, 0x00030002,
+ 0x91c4, 0xffffffff, 0x00000004,
+ 0x91c8, 0xffffffff, 0x00070006,
+ 0x91cc, 0xffffffff, 0x00050001,
+ 0x91d0, 0xffffffff, 0x00030002,
+ 0x91e0, 0xffffffff, 0x00000004,
+ 0x91e4, 0xffffffff, 0x00070006,
+ 0x91e8, 0xffffffff, 0x00000001,
+ 0x91ec, 0xffffffff, 0x00050001,
+ 0x91f0, 0xffffffff, 0x00030002,
+ 0x9200, 0xffffffff, 0x00000004,
+ 0x9204, 0xffffffff, 0x00070006,
+ 0x9208, 0xffffffff, 0x00050001,
+ 0x920c, 0xffffffff, 0x00030002,
+ 0x921c, 0xffffffff, 0x00000004,
+ 0x9220, 0xffffffff, 0x00070006,
+ 0x9224, 0xffffffff, 0x00050001,
+ 0x9228, 0xffffffff, 0x00030002,
+ 0x9238, 0xffffffff, 0x00000004,
+ 0x923c, 0xffffffff, 0x00070006,
+ 0x9240, 0xffffffff, 0x00050001,
+ 0x9244, 0xffffffff, 0x00030002,
+ 0x9254, 0xffffffff, 0x00000004,
+ 0x9258, 0xffffffff, 0x00070006,
+ 0x9294, 0xffffffff, 0x00000001,
+ 0x929c, 0xffffffff, 0x00000002,
+ 0x92a0, 0xffffffff, 0x00040003,
+ 0x92a4, 0xffffffff, 0x00000005
+};
+
+static const u32 rv740_golden_registers[] =
+{
+ 0x88c4, 0xffffffff, 0x00000082,
+ 0x28a50, 0xfffffffc, 0x00000004,
+ 0x2650, 0x00040000, 0,
+ 0x20bc, 0x00040000, 0,
+ 0x733c, 0xffffffff, 0x00000002,
+ 0x7300, 0xffffffff, 0x001000f0,
+ 0x3f90, 0x00ff0000, 0,
+ 0x9148, 0x00ff0000, 0,
+ 0x3f94, 0x00ff0000, 0,
+ 0x914c, 0x00ff0000, 0,
+ 0x240c, 0xffffffff, 0x00000380,
+ 0x8a14, 0x00000007, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ff0fff,
+ 0x28a4c, 0xffffffff, 0x00004000,
+ 0xa180, 0xffffffff, 0x00003f3f,
+ 0x8d00, 0xffffffff, 0x0e0e003a,
+ 0x8d04, 0xffffffff, 0x013a0e2a,
+ 0x8c00, 0xffffffff, 0xe400000f,
+ 0x8db0, 0xffffffff, 0x98989898,
+ 0x8db4, 0xffffffff, 0x98989898,
+ 0x8db8, 0xffffffff, 0x98989898,
+ 0x8dbc, 0xffffffff, 0x98989898,
+ 0x8dc0, 0xffffffff, 0x98989898,
+ 0x8dc4, 0xffffffff, 0x98989898,
+ 0x8dc8, 0xffffffff, 0x98989898,
+ 0x8dcc, 0xffffffff, 0x98989898,
+ 0x9058, 0xffffffff, 0x0fffc40f,
+ 0x900c, 0xffffffff, 0x003b033f,
+ 0x28350, 0xffffffff, 0,
+ 0x8cf0, 0x1fffffff, 0x08e00420,
+ 0x9508, 0xffffffff, 0x00000002,
+ 0x88c4, 0xffffffff, 0x000000c2,
+ 0x9698, 0x18000000, 0x18000000
+};
+
+static const u32 rv740_mgcg_init[] =
+{
+ 0x8bcc, 0xffffffff, 0x13030100,
+ 0x5448, 0xffffffff, 0x100,
+ 0x55e4, 0xffffffff, 0x100,
+ 0x160c, 0xffffffff, 0x100,
+ 0x5644, 0xffffffff, 0x100,
+ 0xc164, 0xffffffff, 0x100,
+ 0x8a18, 0xffffffff, 0x100,
+ 0x897c, 0xffffffff, 0x100,
+ 0x8b28, 0xffffffff, 0x100,
+ 0x9144, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10000,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10001,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10002,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x10003,
+ 0x9a50, 0xffffffff, 0x100,
+ 0x9a1c, 0xffffffff, 0x0,
+ 0x9870, 0xffffffff, 0x100,
+ 0x8d58, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x0,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x1,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x2,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x3,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x4,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x5,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x6,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x7,
+ 0x9510, 0xffffffff, 0x100,
+ 0x9500, 0xffffffff, 0x8000,
+ 0x9490, 0xffffffff, 0x0,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x1,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x2,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x3,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x4,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x5,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x6,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x7,
+ 0x949c, 0xffffffff, 0x100,
+ 0x9490, 0xffffffff, 0x8000,
+ 0x9604, 0xffffffff, 0x0,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x1,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x2,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x3,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x4,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x5,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x6,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x7,
+ 0x9654, 0xffffffff, 0x100,
+ 0x9604, 0xffffffff, 0x80000000,
+ 0x9030, 0xffffffff, 0x100,
+ 0x9034, 0xffffffff, 0x100,
+ 0x9038, 0xffffffff, 0x100,
+ 0x903c, 0xffffffff, 0x100,
+ 0x9040, 0xffffffff, 0x100,
+ 0xa200, 0xffffffff, 0x100,
+ 0xa204, 0xffffffff, 0x100,
+ 0xa208, 0xffffffff, 0x100,
+ 0xa20c, 0xffffffff, 0x100,
+ 0x971c, 0xffffffff, 0x100,
+ 0x915c, 0xffffffff, 0x00020001,
+ 0x9160, 0xffffffff, 0x00040003,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00080007,
+ 0x9174, 0xffffffff, 0x000a0009,
+ 0x9178, 0xffffffff, 0x000c000b,
+ 0x917c, 0xffffffff, 0x000e000d,
+ 0x9180, 0xffffffff, 0x0010000f,
+ 0x918c, 0xffffffff, 0x00120011,
+ 0x9190, 0xffffffff, 0x00140013,
+ 0x9194, 0xffffffff, 0x00020001,
+ 0x9198, 0xffffffff, 0x00040003,
+ 0x919c, 0xffffffff, 0x00060005,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000a0009,
+ 0x91b0, 0xffffffff, 0x000c000b,
+ 0x91b4, 0xffffffff, 0x000e000d,
+ 0x91b8, 0xffffffff, 0x0010000f,
+ 0x91c4, 0xffffffff, 0x00120011,
+ 0x91c8, 0xffffffff, 0x00140013,
+ 0x91cc, 0xffffffff, 0x00020001,
+ 0x91d0, 0xffffffff, 0x00040003,
+ 0x91d4, 0xffffffff, 0x00060005,
+ 0x91e0, 0xffffffff, 0x00080007,
+ 0x91e4, 0xffffffff, 0x000a0009,
+ 0x91e8, 0xffffffff, 0x000c000b,
+ 0x91ec, 0xffffffff, 0x00020001,
+ 0x91f0, 0xffffffff, 0x00040003,
+ 0x91f4, 0xffffffff, 0x00060005,
+ 0x9200, 0xffffffff, 0x00080007,
+ 0x9204, 0xffffffff, 0x000a0009,
+ 0x9208, 0xffffffff, 0x000c000b,
+ 0x920c, 0xffffffff, 0x000e000d,
+ 0x9210, 0xffffffff, 0x0010000f,
+ 0x921c, 0xffffffff, 0x00120011,
+ 0x9220, 0xffffffff, 0x00140013,
+ 0x9224, 0xffffffff, 0x00020001,
+ 0x9228, 0xffffffff, 0x00040003,
+ 0x922c, 0xffffffff, 0x00060005,
+ 0x9238, 0xffffffff, 0x00080007,
+ 0x923c, 0xffffffff, 0x000a0009,
+ 0x9240, 0xffffffff, 0x000c000b,
+ 0x9244, 0xffffffff, 0x000e000d,
+ 0x9248, 0xffffffff, 0x0010000f,
+ 0x9254, 0xffffffff, 0x00120011,
+ 0x9258, 0xffffffff, 0x00140013,
+ 0x9294, 0xffffffff, 0x00020001,
+ 0x929c, 0xffffffff, 0x00040003,
+ 0x92a0, 0xffffffff, 0x00060005,
+ 0x92a4, 0xffffffff, 0x00080007
+};
+
+static void rv770_init_golden_registers(struct radeon_device *rdev)
+{
+ switch (rdev->family) {
+ case CHIP_RV770:
+ radeon_program_register_sequence(rdev,
+ r7xx_golden_registers,
+ (const u32)ARRAY_SIZE(r7xx_golden_registers));
+ radeon_program_register_sequence(rdev,
+ r7xx_golden_dyn_gpr_registers,
+ (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
+ if (rdev->pdev->device == 0x994e)
+ radeon_program_register_sequence(rdev,
+ rv770ce_golden_registers,
+ (const u32)ARRAY_SIZE(rv770ce_golden_registers));
+ else
+ radeon_program_register_sequence(rdev,
+ rv770_golden_registers,
+ (const u32)ARRAY_SIZE(rv770_golden_registers));
+ radeon_program_register_sequence(rdev,
+ rv770_mgcg_init,
+ (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ break;
+ case CHIP_RV730:
+ radeon_program_register_sequence(rdev,
+ r7xx_golden_registers,
+ (const u32)ARRAY_SIZE(r7xx_golden_registers));
+ radeon_program_register_sequence(rdev,
+ r7xx_golden_dyn_gpr_registers,
+ (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
+ radeon_program_register_sequence(rdev,
+ rv730_golden_registers,
+ (const u32)ARRAY_SIZE(rv770_golden_registers));
+ radeon_program_register_sequence(rdev,
+ rv730_mgcg_init,
+ (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ break;
+ case CHIP_RV710:
+ radeon_program_register_sequence(rdev,
+ r7xx_golden_registers,
+ (const u32)ARRAY_SIZE(r7xx_golden_registers));
+ radeon_program_register_sequence(rdev,
+ r7xx_golden_dyn_gpr_registers,
+ (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
+ radeon_program_register_sequence(rdev,
+ rv710_golden_registers,
+ (const u32)ARRAY_SIZE(rv770_golden_registers));
+ radeon_program_register_sequence(rdev,
+ rv710_mgcg_init,
+ (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ break;
+ case CHIP_RV740:
+ radeon_program_register_sequence(rdev,
+ rv740_golden_registers,
+ (const u32)ARRAY_SIZE(rv770_golden_registers));
+ radeon_program_register_sequence(rdev,
+ rv740_mgcg_init,
+ (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ break;
+ default:
+ break;
+ }
+}
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
@@ -68,6 +801,105 @@ u32 rv770_get_xclk(struct radeon_device *rdev)
return reference_clock;
}
+int rv770_uvd_resume(struct radeon_device *rdev)
+{
+ uint64_t addr;
+ uint32_t chip_id, size;
+ int r;
+
+ r = radeon_uvd_resume(rdev);
+ if (r)
+ return r;
+
+ /* programm the VCPU memory controller bits 0-27 */
+ addr = rdev->uvd.gpu_addr >> 3;
+ size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE0, size);
+
+ addr += size;
+ size = RADEON_UVD_STACK_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE1, size);
+
+ addr += size;
+ size = RADEON_UVD_HEAP_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE2, size);
+
+ /* bits 28-31 */
+ addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
+ WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+
+ /* bits 32-39 */
+ addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
+ WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+
+ /* tell firmware which hardware it is running on */
+ switch (rdev->family) {
+ default:
+ return -EINVAL;
+ case CHIP_RV710:
+ chip_id = 0x01000005;
+ break;
+ case CHIP_RV730:
+ chip_id = 0x01000006;
+ break;
+ case CHIP_RV740:
+ chip_id = 0x01000007;
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ chip_id = 0x01000008;
+ break;
+ case CHIP_JUNIPER:
+ chip_id = 0x01000009;
+ break;
+ case CHIP_REDWOOD:
+ chip_id = 0x0100000a;
+ break;
+ case CHIP_CEDAR:
+ chip_id = 0x0100000b;
+ break;
+ case CHIP_SUMO:
+ chip_id = 0x0100000c;
+ break;
+ case CHIP_SUMO2:
+ chip_id = 0x0100000d;
+ break;
+ case CHIP_PALM:
+ chip_id = 0x0100000e;
+ break;
+ case CHIP_CAYMAN:
+ chip_id = 0x0100000f;
+ break;
+ case CHIP_BARTS:
+ chip_id = 0x01000010;
+ break;
+ case CHIP_TURKS:
+ chip_id = 0x01000011;
+ break;
+ case CHIP_CAICOS:
+ chip_id = 0x01000012;
+ break;
+ case CHIP_TAHITI:
+ chip_id = 0x01000014;
+ break;
+ case CHIP_VERDE:
+ chip_id = 0x01000015;
+ break;
+ case CHIP_PITCAIRN:
+ chip_id = 0x01000016;
+ break;
+ case CHIP_ARUBA:
+ chip_id = 0x01000017;
+ break;
+ }
+ WREG32(UVD_VCPU_CHIP_ID, chip_id);
+
+ return 0;
+}
+
u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
@@ -611,6 +1443,11 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
+ if (rdev->family == CHIP_RV730) {
+ WREG32(UVD_UDEC_DB_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ WREG32(UVD_UDEC_DBW_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ WREG32(UVD_UDEC_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ }
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
@@ -840,7 +1677,7 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
}
if (rdev->flags & RADEON_IS_AGP) {
size_bf = mc->gtt_start;
- size_af = 0xFFFFFFFF - mc->gtt_end;
+ size_af = mc->mc_mask - mc->gtt_end;
if (size_bf > size_af) {
if (mc->mc_vram_size > size_bf) {
dev_warn(rdev->dev, "limiting VRAM\n");
@@ -1040,6 +1877,17 @@ static int rv770_startup(struct radeon_device *rdev)
return r;
}
+ r = rv770_uvd_resume(rdev);
+ if (!r) {
+ r = radeon_fence_driver_start_ring(rdev,
+ R600_RING_TYPE_UVD_INDEX);
+ if (r)
+ dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ }
+
+ if (r)
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -1074,6 +1922,19 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ if (ring->ring_size) {
+ r = radeon_ring_init(rdev, ring, ring->ring_size,
+ R600_WB_UVD_RPTR_OFFSET,
+ UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (!r)
+ r = r600_uvd_init(rdev);
+
+ if (r)
+ DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+ }
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -1100,6 +1961,9 @@ int rv770_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ /* init golden registers */
+ rv770_init_golden_registers(rdev);
+
rdev->accel_working = true;
r = rv770_startup(rdev);
if (r) {
@@ -1115,6 +1979,7 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ radeon_uvd_suspend(rdev);
r700_cp_stop(rdev);
r600_dma_stop(rdev);
r600_irq_suspend(rdev);
@@ -1156,6 +2021,8 @@ int rv770_init(struct radeon_device *rdev)
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
+ /* init golden registers */
+ rv770_init_golden_registers(rdev);
/* Initialize scratch registers */
r600_scratch_init(rdev);
/* Initialize surface registers */
@@ -1190,6 +2057,13 @@ int rv770_init(struct radeon_device *rdev)
rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+ r = radeon_uvd_init(rdev);
+ if (!r) {
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
+ 4096);
+ }
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1224,6 +2098,7 @@ void rv770_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
+ radeon_uvd_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
@@ -1264,23 +2139,23 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
/* advertise upconfig capability */
- link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
- link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
LC_RECONFIG_ARC_MISSING_ESCAPE);
link_width_cntl |= lanes | LC_RECONFIG_NOW |
LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
} else {
link_width_cntl |= LC_UPCONFIGURE_DIS;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
@@ -1293,29 +2168,29 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
WREG16(0x4088, link_cntl2);
WREG32(MM_CFGREGS_CNTL, 0);
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_GEN2_EN_STRAP;
- WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
} else {
- link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
if (1)
link_width_cntl |= LC_UPCONFIGURE_DIS;
else
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index c55f950a4af7..85b16266f748 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -38,6 +38,30 @@
#define R7XX_MAX_PIPES 8
#define R7XX_MAX_PIPES_MASK 0xff
+/* discrete uvd clocks */
+#define CG_UPLL_FUNC_CNTL 0x718
+# define UPLL_RESET_MASK 0x00000001
+# define UPLL_SLEEP_MASK 0x00000002
+# define UPLL_BYPASS_EN_MASK 0x00000004
+# define UPLL_CTLREQ_MASK 0x00000008
+# define UPLL_REF_DIV(x) ((x) << 16)
+# define UPLL_REF_DIV_MASK 0x003F0000
+# define UPLL_CTLACK_MASK 0x40000000
+# define UPLL_CTLACK2_MASK 0x80000000
+#define CG_UPLL_FUNC_CNTL_2 0x71c
+# define UPLL_SW_HILEN(x) ((x) << 0)
+# define UPLL_SW_LOLEN(x) ((x) << 4)
+# define UPLL_SW_HILEN2(x) ((x) << 8)
+# define UPLL_SW_LOLEN2(x) ((x) << 12)
+# define UPLL_SW_MASK 0x0000FFFF
+# define VCLK_SRC_SEL(x) ((x) << 20)
+# define VCLK_SRC_SEL_MASK 0x01F00000
+# define DCLK_SRC_SEL(x) ((x) << 25)
+# define DCLK_SRC_SEL_MASK 0x3E000000
+#define CG_UPLL_FUNC_CNTL_3 0x720
+# define UPLL_FB_DIV(x) ((x) << 0)
+# define UPLL_FB_DIV_MASK 0x01FFFFFF
+
/* Registers */
#define CB_COLOR0_BASE 0x28040
#define CB_COLOR1_BASE 0x28044
@@ -112,6 +136,11 @@
#define DMA_TILING_CONFIG 0x3ec8
#define DMA_TILING_CONFIG2 0xd0b8
+/* RV730 only */
+#define UVD_UDEC_TILING_CONFIG 0xef40
+#define UVD_UDEC_DB_TILING_CONFIG 0xef44
+#define UVD_UDEC_DBW_TILING_CONFIG 0xef48
+
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
@@ -671,4 +700,18 @@
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
+/* UVD */
+#define UVD_LMI_EXT40_ADDR 0xf498
+#define UVD_VCPU_CHIP_ID 0xf4d4
+#define UVD_VCPU_CACHE_OFFSET0 0xf4d8
+#define UVD_VCPU_CACHE_SIZE0 0xf4dc
+#define UVD_VCPU_CACHE_OFFSET1 0xf4e0
+#define UVD_VCPU_CACHE_SIZE1 0xf4e4
+#define UVD_VCPU_CACHE_OFFSET2 0xf4e8
+#define UVD_VCPU_CACHE_SIZE2 0xf4ec
+#define UVD_LMI_ADDR_EXT 0xf594
+
+#define UVD_RBC_RB_RPTR 0xf690
+#define UVD_RBC_RB_WPTR 0xf694
+
#endif
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index bafbe3216952..f0b6c2f87c4d 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -70,6 +70,794 @@ extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+static const u32 tahiti_golden_rlc_registers[] =
+{
+ 0xc424, 0xffffffff, 0x00601005,
+ 0xc47c, 0xffffffff, 0x10104040,
+ 0xc488, 0xffffffff, 0x0100000a,
+ 0xc314, 0xffffffff, 0x00000800,
+ 0xc30c, 0xffffffff, 0x800000f4,
+ 0xf4a8, 0xffffffff, 0x00000000
+};
+
+static const u32 tahiti_golden_registers[] =
+{
+ 0x9a10, 0x00010000, 0x00018208,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0x0002021c, 0x00020200,
+ 0xc78, 0x00000080, 0x00000000,
+ 0xd030, 0x000300c0, 0x00800040,
+ 0xd830, 0x000300c0, 0x00800040,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x277c, 0x00000003, 0x000007ff,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ffffff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x4e000000,
+ 0x28350, 0x3f3f3fff, 0x2a00126a,
+ 0x30, 0x000000ff, 0x0040,
+ 0x34, 0x00000040, 0x00004040,
+ 0x9100, 0x07ffffff, 0x03000000,
+ 0x8e88, 0x01ff1f3f, 0x00000000,
+ 0x8e84, 0x01ff1f3f, 0x00000000,
+ 0x9060, 0x0000007f, 0x00000020,
+ 0x9508, 0x00010000, 0x00010000,
+ 0xac14, 0x00000200, 0x000002fb,
+ 0xac10, 0xffffffff, 0x0000543b,
+ 0xac0c, 0xffffffff, 0xa9210876,
+ 0x88d0, 0xffffffff, 0x000fff40,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x1410, 0x20000000, 0x20fffed8,
+ 0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 tahiti_golden_registers2[] =
+{
+ 0xc64, 0x00000001, 0x00000001
+};
+
+static const u32 pitcairn_golden_rlc_registers[] =
+{
+ 0xc424, 0xffffffff, 0x00601004,
+ 0xc47c, 0xffffffff, 0x10102020,
+ 0xc488, 0xffffffff, 0x01000020,
+ 0xc314, 0xffffffff, 0x00000800,
+ 0xc30c, 0xffffffff, 0x800000a4
+};
+
+static const u32 pitcairn_golden_registers[] =
+{
+ 0x9a10, 0x00010000, 0x00018208,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0x0002021c, 0x00020200,
+ 0xc78, 0x00000080, 0x00000000,
+ 0xd030, 0x000300c0, 0x00800040,
+ 0xd830, 0x000300c0, 0x00800040,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2ae4, 0x00073ffe, 0x000022a2,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ffffff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x4e000000,
+ 0x28350, 0x3f3f3fff, 0x2a00126a,
+ 0x30, 0x000000ff, 0x0040,
+ 0x34, 0x00000040, 0x00004040,
+ 0x9100, 0x07ffffff, 0x03000000,
+ 0x9060, 0x0000007f, 0x00000020,
+ 0x9508, 0x00010000, 0x00010000,
+ 0xac14, 0x000003ff, 0x000000f7,
+ 0xac10, 0xffffffff, 0x00000000,
+ 0xac0c, 0xffffffff, 0x32761054,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 verde_golden_rlc_registers[] =
+{
+ 0xc424, 0xffffffff, 0x033f1005,
+ 0xc47c, 0xffffffff, 0x10808020,
+ 0xc488, 0xffffffff, 0x00800008,
+ 0xc314, 0xffffffff, 0x00001000,
+ 0xc30c, 0xffffffff, 0x80010014
+};
+
+static const u32 verde_golden_registers[] =
+{
+ 0x9a10, 0x00010000, 0x00018208,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0x0002021c, 0x00020200,
+ 0xc78, 0x00000080, 0x00000000,
+ 0xd030, 0x000300c0, 0x00800040,
+ 0xd030, 0x000300c0, 0x00800040,
+ 0xd830, 0x000300c0, 0x00800040,
+ 0xd830, 0x000300c0, 0x00800040,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2ae4, 0x00073ffe, 0x000022a2,
+ 0x2ae4, 0x00073ffe, 0x000022a2,
+ 0x2ae4, 0x00073ffe, 0x000022a2,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ffffff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x4e000000,
+ 0x28350, 0x3f3f3fff, 0x0000124a,
+ 0x28350, 0x3f3f3fff, 0x0000124a,
+ 0x28350, 0x3f3f3fff, 0x0000124a,
+ 0x30, 0x000000ff, 0x0040,
+ 0x34, 0x00000040, 0x00004040,
+ 0x9100, 0x07ffffff, 0x03000000,
+ 0x9100, 0x07ffffff, 0x03000000,
+ 0x8e88, 0x01ff1f3f, 0x00000000,
+ 0x8e88, 0x01ff1f3f, 0x00000000,
+ 0x8e88, 0x01ff1f3f, 0x00000000,
+ 0x8e84, 0x01ff1f3f, 0x00000000,
+ 0x8e84, 0x01ff1f3f, 0x00000000,
+ 0x8e84, 0x01ff1f3f, 0x00000000,
+ 0x9060, 0x0000007f, 0x00000020,
+ 0x9508, 0x00010000, 0x00010000,
+ 0xac14, 0x000003ff, 0x00000003,
+ 0xac14, 0x000003ff, 0x00000003,
+ 0xac14, 0x000003ff, 0x00000003,
+ 0xac10, 0xffffffff, 0x00000000,
+ 0xac10, 0xffffffff, 0x00000000,
+ 0xac10, 0xffffffff, 0x00000000,
+ 0xac0c, 0xffffffff, 0x00001032,
+ 0xac0c, 0xffffffff, 0x00001032,
+ 0xac0c, 0xffffffff, 0x00001032,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 oland_golden_rlc_registers[] =
+{
+ 0xc424, 0xffffffff, 0x00601005,
+ 0xc47c, 0xffffffff, 0x10104040,
+ 0xc488, 0xffffffff, 0x0100000a,
+ 0xc314, 0xffffffff, 0x00000800,
+ 0xc30c, 0xffffffff, 0x800000f4
+};
+
+static const u32 oland_golden_registers[] =
+{
+ 0x9a10, 0x00010000, 0x00018208,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x9834, 0xf00fffff, 0x00000400,
+ 0x9838, 0x0002021c, 0x00020200,
+ 0xc78, 0x00000080, 0x00000000,
+ 0xd030, 0x000300c0, 0x00800040,
+ 0xd830, 0x000300c0, 0x00800040,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x5bc0, 0x00200000, 0x50100000,
+ 0x7030, 0x31000311, 0x00000011,
+ 0x2ae4, 0x00073ffe, 0x000022a2,
+ 0x240c, 0x000007ff, 0x00000000,
+ 0x8a14, 0xf000001f, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ffffff,
+ 0x8b10, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x4e000000,
+ 0x28350, 0x3f3f3fff, 0x00000082,
+ 0x30, 0x000000ff, 0x0040,
+ 0x34, 0x00000040, 0x00004040,
+ 0x9100, 0x07ffffff, 0x03000000,
+ 0x9060, 0x0000007f, 0x00000020,
+ 0x9508, 0x00010000, 0x00010000,
+ 0xac14, 0x000003ff, 0x000000f3,
+ 0xac10, 0xffffffff, 0x00000000,
+ 0xac0c, 0xffffffff, 0x00003210,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 tahiti_mgcg_cgcg_init[] =
+{
+ 0xc400, 0xffffffff, 0xfffffffc,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x92a4, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x9774, 0xffffffff, 0x00000100,
+ 0x8984, 0xffffffff, 0x06000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x92a0, 0xffffffff, 0x00000100,
+ 0xc380, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x8d88, 0xffffffff, 0x00000100,
+ 0x8d8c, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0xad80, 0xffffffff, 0x00000100,
+ 0xac54, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0xaf04, 0xffffffff, 0x00000100,
+ 0xae04, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9160, 0xffffffff, 0x00010000,
+ 0x9164, 0xffffffff, 0x00030002,
+ 0x9168, 0xffffffff, 0x00040007,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00090008,
+ 0x9174, 0xffffffff, 0x00020001,
+ 0x9178, 0xffffffff, 0x00040003,
+ 0x917c, 0xffffffff, 0x00000007,
+ 0x9180, 0xffffffff, 0x00060005,
+ 0x9184, 0xffffffff, 0x00090008,
+ 0x9188, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00050004,
+ 0x9190, 0xffffffff, 0x00000008,
+ 0x9194, 0xffffffff, 0x00070006,
+ 0x9198, 0xffffffff, 0x000a0009,
+ 0x919c, 0xffffffff, 0x00040003,
+ 0x91a0, 0xffffffff, 0x00060005,
+ 0x91a4, 0xffffffff, 0x00000009,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000b000a,
+ 0x91b0, 0xffffffff, 0x00050004,
+ 0x91b4, 0xffffffff, 0x00070006,
+ 0x91b8, 0xffffffff, 0x0008000b,
+ 0x91bc, 0xffffffff, 0x000a0009,
+ 0x91c0, 0xffffffff, 0x000d000c,
+ 0x91c4, 0xffffffff, 0x00060005,
+ 0x91c8, 0xffffffff, 0x00080007,
+ 0x91cc, 0xffffffff, 0x0000000b,
+ 0x91d0, 0xffffffff, 0x000a0009,
+ 0x91d4, 0xffffffff, 0x000d000c,
+ 0x91d8, 0xffffffff, 0x00070006,
+ 0x91dc, 0xffffffff, 0x00090008,
+ 0x91e0, 0xffffffff, 0x0000000c,
+ 0x91e4, 0xffffffff, 0x000b000a,
+ 0x91e8, 0xffffffff, 0x000e000d,
+ 0x91ec, 0xffffffff, 0x00080007,
+ 0x91f0, 0xffffffff, 0x000a0009,
+ 0x91f4, 0xffffffff, 0x0000000d,
+ 0x91f8, 0xffffffff, 0x000c000b,
+ 0x91fc, 0xffffffff, 0x000f000e,
+ 0x9200, 0xffffffff, 0x00090008,
+ 0x9204, 0xffffffff, 0x000b000a,
+ 0x9208, 0xffffffff, 0x000c000f,
+ 0x920c, 0xffffffff, 0x000e000d,
+ 0x9210, 0xffffffff, 0x00110010,
+ 0x9214, 0xffffffff, 0x000a0009,
+ 0x9218, 0xffffffff, 0x000c000b,
+ 0x921c, 0xffffffff, 0x0000000f,
+ 0x9220, 0xffffffff, 0x000e000d,
+ 0x9224, 0xffffffff, 0x00110010,
+ 0x9228, 0xffffffff, 0x000b000a,
+ 0x922c, 0xffffffff, 0x000d000c,
+ 0x9230, 0xffffffff, 0x00000010,
+ 0x9234, 0xffffffff, 0x000f000e,
+ 0x9238, 0xffffffff, 0x00120011,
+ 0x923c, 0xffffffff, 0x000c000b,
+ 0x9240, 0xffffffff, 0x000e000d,
+ 0x9244, 0xffffffff, 0x00000011,
+ 0x9248, 0xffffffff, 0x0010000f,
+ 0x924c, 0xffffffff, 0x00130012,
+ 0x9250, 0xffffffff, 0x000d000c,
+ 0x9254, 0xffffffff, 0x000f000e,
+ 0x9258, 0xffffffff, 0x00100013,
+ 0x925c, 0xffffffff, 0x00120011,
+ 0x9260, 0xffffffff, 0x00150014,
+ 0x9264, 0xffffffff, 0x000e000d,
+ 0x9268, 0xffffffff, 0x0010000f,
+ 0x926c, 0xffffffff, 0x00000013,
+ 0x9270, 0xffffffff, 0x00120011,
+ 0x9274, 0xffffffff, 0x00150014,
+ 0x9278, 0xffffffff, 0x000f000e,
+ 0x927c, 0xffffffff, 0x00110010,
+ 0x9280, 0xffffffff, 0x00000014,
+ 0x9284, 0xffffffff, 0x00130012,
+ 0x9288, 0xffffffff, 0x00160015,
+ 0x928c, 0xffffffff, 0x0010000f,
+ 0x9290, 0xffffffff, 0x00120011,
+ 0x9294, 0xffffffff, 0x00000015,
+ 0x9298, 0xffffffff, 0x00140013,
+ 0x929c, 0xffffffff, 0x00170016,
+ 0x9150, 0xffffffff, 0x96940200,
+ 0x8708, 0xffffffff, 0x00900100,
+ 0xc478, 0xffffffff, 0x00000080,
+ 0xc404, 0xffffffff, 0x0020003f,
+ 0x30, 0xffffffff, 0x0000001c,
+ 0x34, 0x000f0000, 0x000f0000,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x1024, 0xffffffff, 0x00000100,
+ 0x102c, 0x00000101, 0x00000000,
+ 0x20a8, 0xffffffff, 0x00000104,
+ 0x264c, 0x000c0000, 0x000c0000,
+ 0x2648, 0x000c0000, 0x000c0000,
+ 0x55e4, 0xff000fff, 0x00000100,
+ 0x55e8, 0x00000001, 0x00000001,
+ 0x2f50, 0x00000001, 0x00000001,
+ 0x30cc, 0xc0000fff, 0x00000104,
+ 0xc1e4, 0x00000001, 0x00000001,
+ 0xd0c0, 0xfffffff0, 0x00000100,
+ 0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 pitcairn_mgcg_cgcg_init[] =
+{
+ 0xc400, 0xffffffff, 0xfffffffc,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x92a4, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x9774, 0xffffffff, 0x00000100,
+ 0x8984, 0xffffffff, 0x06000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x92a0, 0xffffffff, 0x00000100,
+ 0xc380, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x8d88, 0xffffffff, 0x00000100,
+ 0x8d8c, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0xad80, 0xffffffff, 0x00000100,
+ 0xac54, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0xaf04, 0xffffffff, 0x00000100,
+ 0xae04, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9160, 0xffffffff, 0x00010000,
+ 0x9164, 0xffffffff, 0x00030002,
+ 0x9168, 0xffffffff, 0x00040007,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00090008,
+ 0x9174, 0xffffffff, 0x00020001,
+ 0x9178, 0xffffffff, 0x00040003,
+ 0x917c, 0xffffffff, 0x00000007,
+ 0x9180, 0xffffffff, 0x00060005,
+ 0x9184, 0xffffffff, 0x00090008,
+ 0x9188, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00050004,
+ 0x9190, 0xffffffff, 0x00000008,
+ 0x9194, 0xffffffff, 0x00070006,
+ 0x9198, 0xffffffff, 0x000a0009,
+ 0x919c, 0xffffffff, 0x00040003,
+ 0x91a0, 0xffffffff, 0x00060005,
+ 0x91a4, 0xffffffff, 0x00000009,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000b000a,
+ 0x91b0, 0xffffffff, 0x00050004,
+ 0x91b4, 0xffffffff, 0x00070006,
+ 0x91b8, 0xffffffff, 0x0008000b,
+ 0x91bc, 0xffffffff, 0x000a0009,
+ 0x91c0, 0xffffffff, 0x000d000c,
+ 0x9200, 0xffffffff, 0x00090008,
+ 0x9204, 0xffffffff, 0x000b000a,
+ 0x9208, 0xffffffff, 0x000c000f,
+ 0x920c, 0xffffffff, 0x000e000d,
+ 0x9210, 0xffffffff, 0x00110010,
+ 0x9214, 0xffffffff, 0x000a0009,
+ 0x9218, 0xffffffff, 0x000c000b,
+ 0x921c, 0xffffffff, 0x0000000f,
+ 0x9220, 0xffffffff, 0x000e000d,
+ 0x9224, 0xffffffff, 0x00110010,
+ 0x9228, 0xffffffff, 0x000b000a,
+ 0x922c, 0xffffffff, 0x000d000c,
+ 0x9230, 0xffffffff, 0x00000010,
+ 0x9234, 0xffffffff, 0x000f000e,
+ 0x9238, 0xffffffff, 0x00120011,
+ 0x923c, 0xffffffff, 0x000c000b,
+ 0x9240, 0xffffffff, 0x000e000d,
+ 0x9244, 0xffffffff, 0x00000011,
+ 0x9248, 0xffffffff, 0x0010000f,
+ 0x924c, 0xffffffff, 0x00130012,
+ 0x9250, 0xffffffff, 0x000d000c,
+ 0x9254, 0xffffffff, 0x000f000e,
+ 0x9258, 0xffffffff, 0x00100013,
+ 0x925c, 0xffffffff, 0x00120011,
+ 0x9260, 0xffffffff, 0x00150014,
+ 0x9150, 0xffffffff, 0x96940200,
+ 0x8708, 0xffffffff, 0x00900100,
+ 0xc478, 0xffffffff, 0x00000080,
+ 0xc404, 0xffffffff, 0x0020003f,
+ 0x30, 0xffffffff, 0x0000001c,
+ 0x34, 0x000f0000, 0x000f0000,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x1024, 0xffffffff, 0x00000100,
+ 0x102c, 0x00000101, 0x00000000,
+ 0x20a8, 0xffffffff, 0x00000104,
+ 0x55e4, 0xff000fff, 0x00000100,
+ 0x55e8, 0x00000001, 0x00000001,
+ 0x2f50, 0x00000001, 0x00000001,
+ 0x30cc, 0xc0000fff, 0x00000104,
+ 0xc1e4, 0x00000001, 0x00000001,
+ 0xd0c0, 0xfffffff0, 0x00000100,
+ 0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 verde_mgcg_cgcg_init[] =
+{
+ 0xc400, 0xffffffff, 0xfffffffc,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x92a4, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x9774, 0xffffffff, 0x00000100,
+ 0x8984, 0xffffffff, 0x06000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x92a0, 0xffffffff, 0x00000100,
+ 0xc380, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x8d88, 0xffffffff, 0x00000100,
+ 0x8d8c, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0xad80, 0xffffffff, 0x00000100,
+ 0xac54, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0xaf04, 0xffffffff, 0x00000100,
+ 0xae04, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9160, 0xffffffff, 0x00010000,
+ 0x9164, 0xffffffff, 0x00030002,
+ 0x9168, 0xffffffff, 0x00040007,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00090008,
+ 0x9174, 0xffffffff, 0x00020001,
+ 0x9178, 0xffffffff, 0x00040003,
+ 0x917c, 0xffffffff, 0x00000007,
+ 0x9180, 0xffffffff, 0x00060005,
+ 0x9184, 0xffffffff, 0x00090008,
+ 0x9188, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00050004,
+ 0x9190, 0xffffffff, 0x00000008,
+ 0x9194, 0xffffffff, 0x00070006,
+ 0x9198, 0xffffffff, 0x000a0009,
+ 0x919c, 0xffffffff, 0x00040003,
+ 0x91a0, 0xffffffff, 0x00060005,
+ 0x91a4, 0xffffffff, 0x00000009,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000b000a,
+ 0x91b0, 0xffffffff, 0x00050004,
+ 0x91b4, 0xffffffff, 0x00070006,
+ 0x91b8, 0xffffffff, 0x0008000b,
+ 0x91bc, 0xffffffff, 0x000a0009,
+ 0x91c0, 0xffffffff, 0x000d000c,
+ 0x9200, 0xffffffff, 0x00090008,
+ 0x9204, 0xffffffff, 0x000b000a,
+ 0x9208, 0xffffffff, 0x000c000f,
+ 0x920c, 0xffffffff, 0x000e000d,
+ 0x9210, 0xffffffff, 0x00110010,
+ 0x9214, 0xffffffff, 0x000a0009,
+ 0x9218, 0xffffffff, 0x000c000b,
+ 0x921c, 0xffffffff, 0x0000000f,
+ 0x9220, 0xffffffff, 0x000e000d,
+ 0x9224, 0xffffffff, 0x00110010,
+ 0x9228, 0xffffffff, 0x000b000a,
+ 0x922c, 0xffffffff, 0x000d000c,
+ 0x9230, 0xffffffff, 0x00000010,
+ 0x9234, 0xffffffff, 0x000f000e,
+ 0x9238, 0xffffffff, 0x00120011,
+ 0x923c, 0xffffffff, 0x000c000b,
+ 0x9240, 0xffffffff, 0x000e000d,
+ 0x9244, 0xffffffff, 0x00000011,
+ 0x9248, 0xffffffff, 0x0010000f,
+ 0x924c, 0xffffffff, 0x00130012,
+ 0x9250, 0xffffffff, 0x000d000c,
+ 0x9254, 0xffffffff, 0x000f000e,
+ 0x9258, 0xffffffff, 0x00100013,
+ 0x925c, 0xffffffff, 0x00120011,
+ 0x9260, 0xffffffff, 0x00150014,
+ 0x9150, 0xffffffff, 0x96940200,
+ 0x8708, 0xffffffff, 0x00900100,
+ 0xc478, 0xffffffff, 0x00000080,
+ 0xc404, 0xffffffff, 0x0020003f,
+ 0x30, 0xffffffff, 0x0000001c,
+ 0x34, 0x000f0000, 0x000f0000,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x1024, 0xffffffff, 0x00000100,
+ 0x102c, 0x00000101, 0x00000000,
+ 0x20a8, 0xffffffff, 0x00000104,
+ 0x264c, 0x000c0000, 0x000c0000,
+ 0x2648, 0x000c0000, 0x000c0000,
+ 0x55e4, 0xff000fff, 0x00000100,
+ 0x55e8, 0x00000001, 0x00000001,
+ 0x2f50, 0x00000001, 0x00000001,
+ 0x30cc, 0xc0000fff, 0x00000104,
+ 0xc1e4, 0x00000001, 0x00000001,
+ 0xd0c0, 0xfffffff0, 0x00000100,
+ 0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 oland_mgcg_cgcg_init[] =
+{
+ 0xc400, 0xffffffff, 0xfffffffc,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9a60, 0xffffffff, 0x00000100,
+ 0x92a4, 0xffffffff, 0x00000100,
+ 0xc164, 0xffffffff, 0x00000100,
+ 0x9774, 0xffffffff, 0x00000100,
+ 0x8984, 0xffffffff, 0x06000100,
+ 0x8a18, 0xffffffff, 0x00000100,
+ 0x92a0, 0xffffffff, 0x00000100,
+ 0xc380, 0xffffffff, 0x00000100,
+ 0x8b28, 0xffffffff, 0x00000100,
+ 0x9144, 0xffffffff, 0x00000100,
+ 0x8d88, 0xffffffff, 0x00000100,
+ 0x8d8c, 0xffffffff, 0x00000100,
+ 0x9030, 0xffffffff, 0x00000100,
+ 0x9034, 0xffffffff, 0x00000100,
+ 0x9038, 0xffffffff, 0x00000100,
+ 0x903c, 0xffffffff, 0x00000100,
+ 0xad80, 0xffffffff, 0x00000100,
+ 0xac54, 0xffffffff, 0x00000100,
+ 0x897c, 0xffffffff, 0x06000100,
+ 0x9868, 0xffffffff, 0x00000100,
+ 0x9510, 0xffffffff, 0x00000100,
+ 0xaf04, 0xffffffff, 0x00000100,
+ 0xae04, 0xffffffff, 0x00000100,
+ 0x949c, 0xffffffff, 0x00000100,
+ 0x802c, 0xffffffff, 0xe0000000,
+ 0x9160, 0xffffffff, 0x00010000,
+ 0x9164, 0xffffffff, 0x00030002,
+ 0x9168, 0xffffffff, 0x00040007,
+ 0x916c, 0xffffffff, 0x00060005,
+ 0x9170, 0xffffffff, 0x00090008,
+ 0x9174, 0xffffffff, 0x00020001,
+ 0x9178, 0xffffffff, 0x00040003,
+ 0x917c, 0xffffffff, 0x00000007,
+ 0x9180, 0xffffffff, 0x00060005,
+ 0x9184, 0xffffffff, 0x00090008,
+ 0x9188, 0xffffffff, 0x00030002,
+ 0x918c, 0xffffffff, 0x00050004,
+ 0x9190, 0xffffffff, 0x00000008,
+ 0x9194, 0xffffffff, 0x00070006,
+ 0x9198, 0xffffffff, 0x000a0009,
+ 0x919c, 0xffffffff, 0x00040003,
+ 0x91a0, 0xffffffff, 0x00060005,
+ 0x91a4, 0xffffffff, 0x00000009,
+ 0x91a8, 0xffffffff, 0x00080007,
+ 0x91ac, 0xffffffff, 0x000b000a,
+ 0x91b0, 0xffffffff, 0x00050004,
+ 0x91b4, 0xffffffff, 0x00070006,
+ 0x91b8, 0xffffffff, 0x0008000b,
+ 0x91bc, 0xffffffff, 0x000a0009,
+ 0x91c0, 0xffffffff, 0x000d000c,
+ 0x91c4, 0xffffffff, 0x00060005,
+ 0x91c8, 0xffffffff, 0x00080007,
+ 0x91cc, 0xffffffff, 0x0000000b,
+ 0x91d0, 0xffffffff, 0x000a0009,
+ 0x91d4, 0xffffffff, 0x000d000c,
+ 0x9150, 0xffffffff, 0x96940200,
+ 0x8708, 0xffffffff, 0x00900100,
+ 0xc478, 0xffffffff, 0x00000080,
+ 0xc404, 0xffffffff, 0x0020003f,
+ 0x30, 0xffffffff, 0x0000001c,
+ 0x34, 0x000f0000, 0x000f0000,
+ 0x160c, 0xffffffff, 0x00000100,
+ 0x1024, 0xffffffff, 0x00000100,
+ 0x102c, 0x00000101, 0x00000000,
+ 0x20a8, 0xffffffff, 0x00000104,
+ 0x264c, 0x000c0000, 0x000c0000,
+ 0x2648, 0x000c0000, 0x000c0000,
+ 0x55e4, 0xff000fff, 0x00000100,
+ 0x55e8, 0x00000001, 0x00000001,
+ 0x2f50, 0x00000001, 0x00000001,
+ 0x30cc, 0xc0000fff, 0x00000104,
+ 0xc1e4, 0x00000001, 0x00000001,
+ 0xd0c0, 0xfffffff0, 0x00000100,
+ 0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static u32 verde_pg_init[] =
+{
+ 0x353c, 0xffffffff, 0x40000,
+ 0x3538, 0xffffffff, 0x200010ff,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x7007,
+ 0x3538, 0xffffffff, 0x300010ff,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x400000,
+ 0x3538, 0xffffffff, 0x100010ff,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x120200,
+ 0x3538, 0xffffffff, 0x500010ff,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x1e1e16,
+ 0x3538, 0xffffffff, 0x600010ff,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x171f1e,
+ 0x3538, 0xffffffff, 0x700010ff,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x353c, 0xffffffff, 0x0,
+ 0x3538, 0xffffffff, 0x9ff,
+ 0x3500, 0xffffffff, 0x0,
+ 0x3504, 0xffffffff, 0x10000800,
+ 0x3504, 0xffffffff, 0xf,
+ 0x3504, 0xffffffff, 0xf,
+ 0x3500, 0xffffffff, 0x4,
+ 0x3504, 0xffffffff, 0x1000051e,
+ 0x3504, 0xffffffff, 0xffff,
+ 0x3504, 0xffffffff, 0xffff,
+ 0x3500, 0xffffffff, 0x8,
+ 0x3504, 0xffffffff, 0x80500,
+ 0x3500, 0xffffffff, 0x12,
+ 0x3504, 0xffffffff, 0x9050c,
+ 0x3500, 0xffffffff, 0x1d,
+ 0x3504, 0xffffffff, 0xb052c,
+ 0x3500, 0xffffffff, 0x2a,
+ 0x3504, 0xffffffff, 0x1053e,
+ 0x3500, 0xffffffff, 0x2d,
+ 0x3504, 0xffffffff, 0x10546,
+ 0x3500, 0xffffffff, 0x30,
+ 0x3504, 0xffffffff, 0xa054e,
+ 0x3500, 0xffffffff, 0x3c,
+ 0x3504, 0xffffffff, 0x1055f,
+ 0x3500, 0xffffffff, 0x3f,
+ 0x3504, 0xffffffff, 0x10567,
+ 0x3500, 0xffffffff, 0x42,
+ 0x3504, 0xffffffff, 0x1056f,
+ 0x3500, 0xffffffff, 0x45,
+ 0x3504, 0xffffffff, 0x10572,
+ 0x3500, 0xffffffff, 0x48,
+ 0x3504, 0xffffffff, 0x20575,
+ 0x3500, 0xffffffff, 0x4c,
+ 0x3504, 0xffffffff, 0x190801,
+ 0x3500, 0xffffffff, 0x67,
+ 0x3504, 0xffffffff, 0x1082a,
+ 0x3500, 0xffffffff, 0x6a,
+ 0x3504, 0xffffffff, 0x1b082d,
+ 0x3500, 0xffffffff, 0x87,
+ 0x3504, 0xffffffff, 0x310851,
+ 0x3500, 0xffffffff, 0xba,
+ 0x3504, 0xffffffff, 0x891,
+ 0x3500, 0xffffffff, 0xbc,
+ 0x3504, 0xffffffff, 0x893,
+ 0x3500, 0xffffffff, 0xbe,
+ 0x3504, 0xffffffff, 0x20895,
+ 0x3500, 0xffffffff, 0xc2,
+ 0x3504, 0xffffffff, 0x20899,
+ 0x3500, 0xffffffff, 0xc6,
+ 0x3504, 0xffffffff, 0x2089d,
+ 0x3500, 0xffffffff, 0xca,
+ 0x3504, 0xffffffff, 0x8a1,
+ 0x3500, 0xffffffff, 0xcc,
+ 0x3504, 0xffffffff, 0x8a3,
+ 0x3500, 0xffffffff, 0xce,
+ 0x3504, 0xffffffff, 0x308a5,
+ 0x3500, 0xffffffff, 0xd3,
+ 0x3504, 0xffffffff, 0x6d08cd,
+ 0x3500, 0xffffffff, 0x142,
+ 0x3504, 0xffffffff, 0x2000095a,
+ 0x3504, 0xffffffff, 0x1,
+ 0x3500, 0xffffffff, 0x144,
+ 0x3504, 0xffffffff, 0x301f095b,
+ 0x3500, 0xffffffff, 0x165,
+ 0x3504, 0xffffffff, 0xc094d,
+ 0x3500, 0xffffffff, 0x173,
+ 0x3504, 0xffffffff, 0xf096d,
+ 0x3500, 0xffffffff, 0x184,
+ 0x3504, 0xffffffff, 0x15097f,
+ 0x3500, 0xffffffff, 0x19b,
+ 0x3504, 0xffffffff, 0xc0998,
+ 0x3500, 0xffffffff, 0x1a9,
+ 0x3504, 0xffffffff, 0x409a7,
+ 0x3500, 0xffffffff, 0x1af,
+ 0x3504, 0xffffffff, 0xcdc,
+ 0x3500, 0xffffffff, 0x1b1,
+ 0x3504, 0xffffffff, 0x800,
+ 0x3508, 0xffffffff, 0x6c9b2000,
+ 0x3510, 0xfc00, 0x2000,
+ 0x3544, 0xffffffff, 0xfc0,
+ 0x28d4, 0x00000100, 0x100
+};
+
+static void si_init_golden_registers(struct radeon_device *rdev)
+{
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ radeon_program_register_sequence(rdev,
+ tahiti_golden_registers,
+ (const u32)ARRAY_SIZE(tahiti_golden_registers));
+ radeon_program_register_sequence(rdev,
+ tahiti_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
+ radeon_program_register_sequence(rdev,
+ tahiti_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
+ radeon_program_register_sequence(rdev,
+ tahiti_golden_registers2,
+ (const u32)ARRAY_SIZE(tahiti_golden_registers2));
+ break;
+ case CHIP_PITCAIRN:
+ radeon_program_register_sequence(rdev,
+ pitcairn_golden_registers,
+ (const u32)ARRAY_SIZE(pitcairn_golden_registers));
+ radeon_program_register_sequence(rdev,
+ pitcairn_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
+ radeon_program_register_sequence(rdev,
+ pitcairn_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
+ break;
+ case CHIP_VERDE:
+ radeon_program_register_sequence(rdev,
+ verde_golden_registers,
+ (const u32)ARRAY_SIZE(verde_golden_registers));
+ radeon_program_register_sequence(rdev,
+ verde_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
+ radeon_program_register_sequence(rdev,
+ verde_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
+ radeon_program_register_sequence(rdev,
+ verde_pg_init,
+ (const u32)ARRAY_SIZE(verde_pg_init));
+ break;
+ case CHIP_OLAND:
+ radeon_program_register_sequence(rdev,
+ oland_golden_registers,
+ (const u32)ARRAY_SIZE(oland_golden_registers));
+ radeon_program_register_sequence(rdev,
+ oland_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
+ radeon_program_register_sequence(rdev,
+ oland_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
+ break;
+ default:
+ break;
+ }
+}
+
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
@@ -1211,6 +1999,7 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if ((rdev->family == CHIP_VERDE) ||
@@ -1451,6 +2240,7 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else
@@ -1463,7 +2253,7 @@ static void si_select_se_sh(struct radeon_device *rdev,
u32 data = INSTANCE_BROADCAST_WRITES;
if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
- data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+ data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
else if (se_num == 0xffffffff)
data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
else if (sh_num == 0xffffffff)
@@ -1765,9 +2555,13 @@ static void si_gpu_init(struct radeon_device *rdev)
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CALC, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+ WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
si_tiling_mode_table_init(rdev);
@@ -2538,46 +3332,6 @@ static void si_mc_program(struct radeon_device *rdev)
rv515_vga_render_disable(rdev);
}
-/* SI MC address space is 40 bits */
-static void si_vram_location(struct radeon_device *rdev,
- struct radeon_mc *mc, u64 base)
-{
- mc->vram_start = base;
- if (mc->mc_vram_size > (0xFFFFFFFFFFULL - base + 1)) {
- dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
- mc->real_vram_size = mc->aper_size;
- mc->mc_vram_size = mc->aper_size;
- }
- mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
- dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
- mc->mc_vram_size >> 20, mc->vram_start,
- mc->vram_end, mc->real_vram_size >> 20);
-}
-
-static void si_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
-{
- u64 size_af, size_bf;
-
- size_af = ((0xFFFFFFFFFFULL - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
- size_bf = mc->vram_start & ~mc->gtt_base_align;
- if (size_bf > size_af) {
- if (mc->gtt_size > size_bf) {
- dev_warn(rdev->dev, "limiting GTT\n");
- mc->gtt_size = size_bf;
- }
- mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
- } else {
- if (mc->gtt_size > size_af) {
- dev_warn(rdev->dev, "limiting GTT\n");
- mc->gtt_size = size_af;
- }
- mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
- }
- mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
- dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
- mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
-}
-
static void si_vram_gtt_location(struct radeon_device *rdev,
struct radeon_mc *mc)
{
@@ -2587,9 +3341,9 @@ static void si_vram_gtt_location(struct radeon_device *rdev,
mc->real_vram_size = 0xFFC0000000ULL;
mc->mc_vram_size = 0xFFC0000000ULL;
}
- si_vram_location(rdev, &rdev->mc, 0);
+ radeon_vram_location(rdev, &rdev->mc, 0);
rdev->mc.gtt_base_align = 0;
- si_gtt_location(rdev, mc);
+ radeon_gtt_location(rdev, mc);
}
static int si_mc_init(struct radeon_device *rdev)
@@ -4322,14 +5076,6 @@ static int si_startup(struct radeon_device *rdev)
return r;
si_gpu_init(rdev);
-#if 0
- r = evergreen_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy = NULL;
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
-#endif
/* allocate rlc buffers */
r = si_rlc_init(rdev);
if (r) {
@@ -4372,6 +5118,16 @@ static int si_startup(struct radeon_device *rdev)
return r;
}
+ r = rv770_uvd_resume(rdev);
+ if (!r) {
+ r = radeon_fence_driver_start_ring(rdev,
+ R600_RING_TYPE_UVD_INDEX);
+ if (r)
+ dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ }
+ if (r)
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+
/* Enable IRQ */
r = si_irq_init(rdev);
if (r) {
@@ -4429,6 +5185,18 @@ static int si_startup(struct radeon_device *rdev)
if (r)
return r;
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ if (ring->ring_size) {
+ r = radeon_ring_init(rdev, ring, ring->ring_size,
+ R600_WB_UVD_RPTR_OFFSET,
+ UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (!r)
+ r = r600_uvd_init(rdev);
+ if (r)
+ DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+ }
+
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -4455,6 +5223,9 @@ int si_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ /* init golden registers */
+ si_init_golden_registers(rdev);
+
rdev->accel_working = true;
r = si_startup(rdev);
if (r) {
@@ -4472,6 +5243,8 @@ int si_suspend(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
si_cp_enable(rdev, false);
cayman_dma_stop(rdev);
+ r600_uvd_rbc_stop(rdev);
+ radeon_uvd_suspend(rdev);
si_irq_suspend(rdev);
radeon_wb_disable(rdev);
si_pcie_gart_disable(rdev);
@@ -4512,6 +5285,8 @@ int si_init(struct radeon_device *rdev)
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
+ /* init golden registers */
+ si_init_golden_registers(rdev);
/* Initialize scratch registers */
si_scratch_init(rdev);
/* Initialize surface registers */
@@ -4557,6 +5332,13 @@ int si_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 64 * 1024);
+ r = radeon_uvd_init(rdev);
+ if (!r) {
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 4096);
+ }
+
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -4594,9 +5376,6 @@ int si_init(struct radeon_device *rdev)
void si_fini(struct radeon_device *rdev)
{
-#if 0
- r600_blit_fini(rdev);
-#endif
si_cp_fini(rdev);
cayman_dma_fini(rdev);
si_irq_fini(rdev);
@@ -4605,6 +5384,7 @@ void si_fini(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
+ radeon_uvd_fini(rdev);
si_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
@@ -4634,3 +5414,94 @@ uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
mutex_unlock(&rdev->gpu_clock_mutex);
return clock;
}
+
+int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+ unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
+ int r;
+
+ /* bypass vclk and dclk with bclk */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ /* put PLL in bypass mode */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+
+ if (!vclk || !dclk) {
+ /* keep the Bypass mode, put PLL to sleep */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+ return 0;
+ }
+
+ r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
+ 16384, 0x03FFFFFF, 0, 128, 5,
+ &fb_div, &vclk_div, &dclk_div);
+ if (r)
+ return r;
+
+ /* set RESET_ANTI_MUX to 0 */
+ WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
+
+ /* set VCO_MODE to 1 */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
+
+ /* toggle UPLL_SLEEP to 1 then back to 0 */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
+
+ /* deassert UPLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+ mdelay(1);
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* assert UPLL_RESET again */
+ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+ /* disable spread spectrum. */
+ WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
+
+ /* set feedback divider */
+ WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
+
+ /* set ref divider to 0 */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
+
+ if (fb_div < 307200)
+ WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
+ else
+ WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
+
+ /* set PDIV_A and PDIV_B */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
+ ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
+
+ /* give the PLL some time to settle */
+ mdelay(15);
+
+ /* deassert PLL_RESET */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+ mdelay(15);
+
+ /* switch from bypass mode to normal mode */
+ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+
+ r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+ if (r)
+ return r;
+
+ /* switch VCLK and DCLK selection */
+ WREG32_P(CG_UPLL_FUNC_CNTL_2,
+ VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+ ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+ mdelay(100);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 23fc08fc8e7f..222877ba6cf5 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -29,6 +29,35 @@
#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
+/* discrete uvd clocks */
+#define CG_UPLL_FUNC_CNTL 0x634
+# define UPLL_RESET_MASK 0x00000001
+# define UPLL_SLEEP_MASK 0x00000002
+# define UPLL_BYPASS_EN_MASK 0x00000004
+# define UPLL_CTLREQ_MASK 0x00000008
+# define UPLL_VCO_MODE_MASK 0x00000600
+# define UPLL_REF_DIV_MASK 0x003F0000
+# define UPLL_CTLACK_MASK 0x40000000
+# define UPLL_CTLACK2_MASK 0x80000000
+#define CG_UPLL_FUNC_CNTL_2 0x638
+# define UPLL_PDIV_A(x) ((x) << 0)
+# define UPLL_PDIV_A_MASK 0x0000007F
+# define UPLL_PDIV_B(x) ((x) << 8)
+# define UPLL_PDIV_B_MASK 0x00007F00
+# define VCLK_SRC_SEL(x) ((x) << 20)
+# define VCLK_SRC_SEL_MASK 0x01F00000
+# define DCLK_SRC_SEL(x) ((x) << 25)
+# define DCLK_SRC_SEL_MASK 0x3E000000
+#define CG_UPLL_FUNC_CNTL_3 0x63C
+# define UPLL_FB_DIV(x) ((x) << 0)
+# define UPLL_FB_DIV_MASK 0x01FFFFFF
+#define CG_UPLL_FUNC_CNTL_4 0x644
+# define UPLL_SPARE_ISPARE9 0x00020000
+#define CG_UPLL_FUNC_CNTL_5 0x648
+# define RESET_ANTI_MUX_MASK 0x00000200
+#define CG_UPLL_SPREAD_SPECTRUM 0x650
+# define SSEN_MASK 0x00000001
+
#define CG_MULT_THERMAL_STATUS 0x714
#define ASIC_MAX_TEMP(x) ((x) << 0)
#define ASIC_MAX_TEMP_MASK 0x000001ff
@@ -65,6 +94,8 @@
#define DMIF_ADDR_CONFIG 0xBD4
+#define DMIF_ADDR_CALC 0xC00
+
#define SRBM_STATUS 0xE50
#define GRBM_RQ_PENDING (1 << 5)
#define VMC_BUSY (1 << 8)
@@ -798,6 +829,15 @@
# define THREAD_TRACE_FINISH (55 << 0)
/*
+ * UVD
+ */
+#define UVD_UDEC_ADDR_CONFIG 0xEF4C
+#define UVD_UDEC_DB_ADDR_CONFIG 0xEF50
+#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54
+#define UVD_RBC_RB_RPTR 0xF690
+#define UVD_RBC_RB_WPTR 0xF694
+
+/*
* PM4
*/
#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \