summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/radeon
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon')
-rw-r--r--drivers/gpu/drm/radeon/Kconfig33
-rw-r--r--drivers/gpu/drm/radeon/Makefile10
-rw-r--r--drivers/gpu/drm/radeon/atom.c9
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c401
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1161
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c85
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h68
-rw-r--r--drivers/gpu/drm/radeon/ni.c337
-rw-r--r--drivers/gpu/drm/radeon/nid.h30
-rw-r--r--drivers/gpu/drm/radeon/r100.c224
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h4
-rw-r--r--drivers/gpu/drm/radeon/r100d.h11
-rw-r--r--drivers/gpu/drm/radeon/r200.c26
-rw-r--r--drivers/gpu/drm/radeon/r300.c42
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/radeon/r300d.h11
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/r600.c425
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c33
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c31
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c399
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c135
-rw-r--r--drivers/gpu/drm/radeon/r600d.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon.h46
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c68
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h28
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c73
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c59
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c187
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c41
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c98
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h16
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c60
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_mem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c169
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c48
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5152
-rw-r--r--drivers/gpu/drm/radeon/rv515.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515d.h11
-rw-r--r--drivers/gpu/drm/radeon/rv770.c99
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h4
-rw-r--r--drivers/gpu/drm/radeon/si.c495
-rw-r--r--drivers/gpu/drm/radeon/sid.h48
64 files changed, 3004 insertions, 2207 deletions
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index ea92bbe3ed37..970f8e92dbb7 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,31 +1,8 @@
-config DRM_RADEON_KMS
- bool "Enable modesetting on radeon by default - NEW DRIVER"
+config DRM_RADEON_UMS
+ bool "Enable userspace modesetting on radeon (DEPRECATED)"
depends on DRM_RADEON
- select BACKLIGHT_CLASS_DEVICE
help
- Choose this option if you want kernel modesetting enabled by default.
+ Choose this option if you still need userspace modesetting.
- This is a completely new driver. It's only part of the existing drm
- for compatibility reasons. It requires an entirely different graphics
- stack above it and works very differently from the old drm stack.
- i.e. don't enable this unless you know what you are doing it may
- cause issues or bugs compared to the previous userspace driver stack.
-
- When kernel modesetting is enabled the IOCTL of radeon/drm
- driver are considered as invalid and an error message is printed
- in the log and they return failure.
-
- KMS enabled userspace will use new API to talk with the radeon/drm
- driver. The new API provide functions to create/destroy/share/mmap
- buffer object which are then managed by the kernel memory manager
- (here TTM). In order to submit command to the GPU the userspace
- provide a buffer holding the command stream, along this buffer
- userspace have to provide a list of buffer object used by the
- command stream. The kernel radeon driver will then place buffer
- in GPU accessible memory and will update command stream to reflect
- the position of the different buffers.
-
- The kernel will also perform security check on command stream
- provided by the user, we want to catch and forbid any illegal use
- of the GPU such as DMA into random system memory or into memory
- not owned by the process supplying the command stream.
+ Userspace modesetting is deprecated for quite some time now, so
+ enable this only if you have ancient versions of the DDX drivers.
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index a6598fd66423..bf172522ea68 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -56,8 +56,12 @@ $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h
-radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
- radeon_irq.o r300_cmdbuf.o r600_cp.o
+radeon-y := radeon_drv.o
+
+# add UMS driver
+radeon-$(CONFIG_DRM_RADEON_UMS)+= radeon_cp.o radeon_state.o radeon_mem.o \
+ radeon_irq.o r300_cmdbuf.o r600_cp.o r600_blit.o
+
# add KMS driver
radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
@@ -67,7 +71,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
- r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
+ r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 5ce9bf51a8de..46a9c3772850 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1238,6 +1238,8 @@ static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
static void atom_index_iio(struct atom_context *ctx, int base)
{
ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
+ if (!ctx->iio)
+ return;
while (CU8(base) == ATOM_IIO_START) {
ctx->iio[CU8(base + 1)] = base + 2;
base += 2;
@@ -1287,6 +1289,10 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
+ if (!ctx->iio) {
+ atom_destroy(ctx);
+ return NULL;
+ }
str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
while (*str && ((*str == '\n') || (*str == '\r')))
@@ -1335,8 +1341,7 @@ int atom_asic_init(struct atom_context *ctx)
void atom_destroy(struct atom_context *ctx)
{
- if (ctx->iio)
- kfree(ctx->iio);
+ kfree(ctx->iio);
kfree(ctx);
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9175615bbd8a..21a892c6ab9c 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -252,8 +252,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
radeon_crtc->enabled = true;
/* adjust pm to dpms changes BEFORE enabling crtcs */
radeon_pm_compute_clocks(rdev);
- if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
- atombios_powergate_crtc(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
@@ -271,8 +269,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
radeon_crtc->enabled = false;
- if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
- atombios_powergate_crtc(crtc, ATOM_ENABLE);
/* adjust pm to dpms changes AFTER disabling crtcs */
radeon_pm_compute_clocks(rdev);
break;
@@ -1844,6 +1840,8 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
int i;
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (ASIC_IS_DCE6(rdev))
+ atombios_powergate_crtc(crtc, ATOM_ENABLE);
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->mode_info.crtcs[i] &&
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f95d7fc1f5e0..3c38ea46531c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -403,6 +403,19 @@ void evergreen_pm_misc(struct radeon_device *rdev)
rdev->pm.current_vddc = voltage->voltage;
DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
}
+
+ /* starting with BTC, there is one state that is used for both
+ * MH and SH. Difference is that we always use the high clock index for
+ * mclk and vddci.
+ */
+ if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
+ (rdev->family >= CHIP_BARTS) &&
+ rdev->pm.active_crtc_count &&
+ ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
+ (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
+ voltage = &rdev->pm.power_state[req_ps_idx].
+ clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
+
/* 0xff01 is a flag rather then an actual voltage */
if (voltage->vddci == 0xff01)
return;
@@ -1313,14 +1326,18 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
radeon_wait_for_vblank(rdev, i);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
radeon_wait_for_vblank(rdev, i);
tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
}
/* wait for the next frame */
@@ -1345,6 +1362,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
blackout &= ~BLACKOUT_MODE_MASK;
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
}
+ /* wait for the MC to settle */
+ udelay(100);
}
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1378,11 +1397,15 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
/* wait for the next frame */
frame_count = radeon_get_vblank_counter(rdev, i);
@@ -2036,9 +2059,20 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(DMA_TILING_CONFIG, gb_addr_config);
- tmp = gb_addr_config & NUM_PIPES_MASK;
- tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
- EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+ if ((rdev->config.evergreen.max_backends == 1) &&
+ (rdev->flags & RADEON_IS_IGP)) {
+ if ((disabled_rb_mask & 3) == 1) {
+ /* RB0 disabled, RB1 enabled */
+ tmp = 0x11111111;
+ } else {
+ /* RB1 disabled, RB0 enabled */
+ tmp = 0x00000000;
+ }
+ } else {
+ tmp = gb_addr_config & NUM_PIPES_MASK;
+ tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
+ EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+ }
WREG32(GB_BACKEND_MAP, tmp);
WREG32(CGTS_SYS_TCC_DISABLE, 0);
@@ -2287,42 +2321,18 @@ int evergreen_mc_init(struct radeon_device *rdev)
return 0;
}
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
{
- u32 srbm_status;
- u32 grbm_status;
- u32 grbm_status_se0, grbm_status_se1;
-
- srbm_status = RREG32(SRBM_STATUS);
- grbm_status = RREG32(GRBM_STATUS);
- grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
- grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
- if (!(grbm_status & GUI_ACTIVE)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force CP activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
-
-static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
-{
- struct evergreen_mc_save save;
- u32 grbm_reset = 0;
-
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return 0;
-
- dev_info(rdev->dev, "GPU softreset \n");
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS));
+ dev_info(rdev->dev, " SRBM_STATUS2 = 0x%08X\n",
+ RREG32(SRBM_STATUS2));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
@@ -2331,58 +2341,283 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
- evergreen_mc_stop(rdev, &save);
- if (evergreen_mc_wait_for_idle(rdev)) {
- dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+ if (rdev->family >= CHIP_CAYMAN) {
+ dev_info(rdev->dev, " R_00D834_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG + 0x800));
+ }
+}
+
+bool evergreen_is_display_hung(struct radeon_device *rdev)
+{
+ u32 crtc_hung = 0;
+ u32 crtc_status[6];
+ u32 i, j, tmp;
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN) {
+ crtc_status[i] = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ crtc_hung |= (1 << i);
+ }
}
+
+ for (j = 0; j < 10; j++) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (crtc_hung & (1 << i)) {
+ tmp = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ if (tmp != crtc_status[i])
+ crtc_hung &= ~(1 << i);
+ }
+ }
+ if (crtc_hung == 0)
+ return false;
+ udelay(100);
+ }
+
+ return true;
+}
+
+static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
+{
+ u32 reset_mask = 0;
+ u32 tmp;
+
+ /* GRBM_STATUS */
+ tmp = RREG32(GRBM_STATUS);
+ if (tmp & (PA_BUSY | SC_BUSY |
+ SH_BUSY | SX_BUSY |
+ TA_BUSY | VGT_BUSY |
+ DB_BUSY | CB_BUSY |
+ SPI_BUSY | VGT_BUSY_NO_DMA))
+ reset_mask |= RADEON_RESET_GFX;
+
+ if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+ CP_BUSY | CP_COHERENCY_BUSY))
+ reset_mask |= RADEON_RESET_CP;
+
+ if (tmp & GRBM_EE_BUSY)
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
+
+ /* DMA_STATUS_REG */
+ tmp = RREG32(DMA_STATUS_REG);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
+
+ /* SRBM_STATUS2 */
+ tmp = RREG32(SRBM_STATUS2);
+ if (tmp & DMA_BUSY)
+ reset_mask |= RADEON_RESET_DMA;
+
+ /* SRBM_STATUS */
+ tmp = RREG32(SRBM_STATUS);
+ if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+ reset_mask |= RADEON_RESET_RLC;
+
+ if (tmp & IH_BUSY)
+ reset_mask |= RADEON_RESET_IH;
+
+ if (tmp & SEM_BUSY)
+ reset_mask |= RADEON_RESET_SEM;
+
+ if (tmp & GRBM_RQ_PENDING)
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (tmp & VMC_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+ MCC_BUSY | MCD_BUSY))
+ reset_mask |= RADEON_RESET_MC;
+
+ if (evergreen_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+
+ /* VM_L2_STATUS */
+ tmp = RREG32(VM_L2_STATUS);
+ if (tmp & L2_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ return reset_mask;
+}
+
+static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+ struct evergreen_mc_save save;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
+
+ if (reset_mask == 0)
+ return;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ evergreen_print_gpu_status_regs(rdev);
+
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
- /* reset all the gfx blocks */
- grbm_reset = (SOFT_RESET_CP |
- SOFT_RESET_CB |
- SOFT_RESET_DB |
- SOFT_RESET_PA |
- SOFT_RESET_SC |
- SOFT_RESET_SPI |
- SOFT_RESET_SH |
- SOFT_RESET_SX |
- SOFT_RESET_TC |
- SOFT_RESET_TA |
- SOFT_RESET_VC |
- SOFT_RESET_VGT);
-
- dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
- WREG32(GRBM_SOFT_RESET, grbm_reset);
- (void)RREG32(GRBM_SOFT_RESET);
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+ }
+
udelay(50);
- WREG32(GRBM_SOFT_RESET, 0);
- (void)RREG32(GRBM_SOFT_RESET);
+
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+ grbm_soft_reset |= SOFT_RESET_DB |
+ SOFT_RESET_CB |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SX |
+ SOFT_RESET_SH |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VC |
+ SOFT_RESET_VGT;
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= SOFT_RESET_CP |
+ SOFT_RESET_VGT;
+
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+ }
+
+ if (reset_mask & RADEON_RESET_DMA)
+ srbm_soft_reset |= SOFT_RESET_DMA;
+
+ if (reset_mask & RADEON_RESET_DISPLAY)
+ srbm_soft_reset |= SOFT_RESET_DC;
+
+ if (reset_mask & RADEON_RESET_RLC)
+ srbm_soft_reset |= SOFT_RESET_RLC;
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= SOFT_RESET_SEM;
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= SOFT_RESET_IH;
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= SOFT_RESET_VMC;
+
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= SOFT_RESET_MC;
+ }
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
+
/* Wait a little for things to settle down */
udelay(50);
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
- RREG32(SRBM_STATUS));
- dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
- dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
- dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
- dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
+
evergreen_mc_resume(rdev, &save);
- return 0;
+ udelay(50);
+
+ evergreen_print_gpu_status_regs(rdev);
}
int evergreen_asic_reset(struct radeon_device *rdev)
{
- return evergreen_gpu_soft_reset(rdev);
+ u32 reset_mask;
+
+ reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ evergreen_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
+ return 0;
+}
+
+/**
+ * evergreen_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * evergreen_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & RADEON_RESET_DMA)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
/* Interrupts */
@@ -3207,15 +3442,15 @@ void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
/* write the fence */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
radeon_ring_write(ring, addr & 0xfffffffc);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
radeon_ring_write(ring, fence->seq);
/* generate an interrupt */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
/* flush HDP */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | HDP_MEM_COHERENCY_FLUSH_CNTL);
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
radeon_ring_write(ring, 1);
}
@@ -3237,7 +3472,7 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
while ((next_rptr & 7) != 5)
next_rptr++;
next_rptr += 3;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
radeon_ring_write(ring, next_rptr);
@@ -3247,8 +3482,8 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
* Pad as necessary with NOPs.
*/
while ((ring->wptr & 7) != 5)
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
@@ -3307,7 +3542,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
if (cur_size_in_dw > 0xFFFFF)
cur_size_in_dw = 0xFFFFF;
size_in_dw -= cur_size_in_dw;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
radeon_ring_write(ring, dst_offset & 0xfffffffc);
radeon_ring_write(ring, src_offset & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
@@ -3415,7 +3650,7 @@ static int evergreen_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR, DMA_RB_WPTR,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 7a445666e71f..99fb13286fd0 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -36,9 +36,6 @@
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
-static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc);
-
struct evergreen_cs_track {
u32 group_size;
u32 nbanks;
@@ -1009,223 +1006,35 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
}
/**
- * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
- * @parser: parser structure holding parsing context.
- * @pkt: where to store packet informations
- *
- * Assume that chunk_ib_index is properly set. Will return -EINVAL
- * if packet is bigger than remaining ib size. or if packets is unknown.
- **/
-static int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx)
-{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
- uint32_t header;
-
- if (idx >= ib_chunk->length_dw) {
- DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
- idx, ib_chunk->length_dw);
- return -EINVAL;
- }
- header = radeon_get_ib_value(p, idx);
- pkt->idx = idx;
- pkt->type = CP_PACKET_GET_TYPE(header);
- pkt->count = CP_PACKET_GET_COUNT(header);
- pkt->one_reg_wr = 0;
- switch (pkt->type) {
- case PACKET_TYPE0:
- pkt->reg = CP_PACKET0_GET_REG(header);
- break;
- case PACKET_TYPE3:
- pkt->opcode = CP_PACKET3_GET_OPCODE(header);
- break;
- case PACKET_TYPE2:
- pkt->count = -1;
- break;
- default:
- DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
- return -EINVAL;
- }
- if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
- DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
- pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
- return -EINVAL;
- }
- return 0;
-}
-
-/**
- * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
+ * evergreen_cs_packet_parse_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
- * @data: pointer to relocation data
- * @offset_start: starting offset
- * @offset_mask: offset mask (to align start offset on)
- * @reloc: reloc informations
*
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc)
-{
- struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_packet p3reloc;
- unsigned idx;
- int r;
-
- if (p->chunk_relocs_idx == -1) {
- DRM_ERROR("No relocation chunk !\n");
- return -EINVAL;
- }
- *cs_reloc = NULL;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
- r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return r;
- }
- p->idx += p3reloc.count + 2;
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- DRM_ERROR("No packet3 for relocation for packet at %d.\n",
- p3reloc.idx);
- return -EINVAL;
- }
- idx = radeon_get_ib_value(p, p3reloc.idx + 1);
- if (idx >= relocs_chunk->length_dw) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
- return -EINVAL;
- }
- /* FIXME: we assume reloc size is 4 dwords */
- *cs_reloc = p->relocs_ptr[(idx / 4)];
- return 0;
-}
-
-/**
- * evergreen_cs_packet_next_is_pkt3_nop() - test if the next packet is NOP
- * @p: structure holding the parser context.
- *
- * Check if the next packet is a relocation packet3.
- **/
-static bool evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
-{
- struct radeon_cs_packet p3reloc;
- int r;
-
- r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return false;
- }
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- return false;
- }
- return true;
-}
-
-/**
- * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
- * @parser: parser structure holding parsing context.
- *
- * Userspace sends a special sequence for VLINE waits.
- * PACKET0 - VLINE_START_END + value
- * PACKET3 - WAIT_REG_MEM poll vline status reg
- * RELOC (P3) - crtc_id in reloc.
- *
- * This function parses this and relocates the VLINE START END
- * and WAIT_REG_MEM packets to the correct crtc.
- * It also detects a switched off crtc and nulls out the
- * wait in that case.
+ * This is an Evergreen(+)-specific function for parsing VLINE packets.
+ * Real work is done by r600_cs_common_vline_parse function.
+ * Here we just set up ASIC-specific register table and call
+ * the common implementation function.
*/
static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
{
- struct drm_mode_object *obj;
- struct drm_crtc *crtc;
- struct radeon_crtc *radeon_crtc;
- struct radeon_cs_packet p3reloc, wait_reg_mem;
- int crtc_id;
- int r;
- uint32_t header, h_idx, reg, wait_reg_mem_info;
- volatile uint32_t *ib;
-
- ib = p->ib.ptr;
-
- /* parse the WAIT_REG_MEM */
- r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
- if (r)
- return r;
-
- /* check its a WAIT_REG_MEM */
- if (wait_reg_mem.type != PACKET_TYPE3 ||
- wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
- DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
- return -EINVAL;
- }
-
- wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
- /* bit 4 is reg (0) or mem (1) */
- if (wait_reg_mem_info & 0x10) {
- DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
- return -EINVAL;
- }
- /* waiting for value to be equal */
- if ((wait_reg_mem_info & 0x7) != 0x3) {
- DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
- return -EINVAL;
- }
- if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
- DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
- return -EINVAL;
- }
-
- if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
- DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
- return -EINVAL;
- }
-
- /* jump over the NOP */
- r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
- if (r)
- return r;
- h_idx = p->idx - 2;
- p->idx += wait_reg_mem.count + 2;
- p->idx += p3reloc.count + 2;
-
- header = radeon_get_ib_value(p, h_idx);
- crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
- reg = CP_PACKET0_GET_REG(header);
- obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- DRM_ERROR("cannot find crtc %d\n", crtc_id);
- return -EINVAL;
- }
- crtc = obj_to_crtc(obj);
- radeon_crtc = to_radeon_crtc(crtc);
- crtc_id = radeon_crtc->crtc_id;
-
- if (!crtc->enabled) {
- /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
- ib[h_idx + 2] = PACKET2(0);
- ib[h_idx + 3] = PACKET2(0);
- ib[h_idx + 4] = PACKET2(0);
- ib[h_idx + 5] = PACKET2(0);
- ib[h_idx + 6] = PACKET2(0);
- ib[h_idx + 7] = PACKET2(0);
- ib[h_idx + 8] = PACKET2(0);
- } else {
- switch (reg) {
- case EVERGREEN_VLINE_START_END:
- header &= ~R600_CP_PACKET0_REG_MASK;
- header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
- ib[h_idx] = header;
- ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
- break;
- default:
- DRM_ERROR("unknown crtc reloc\n");
- return -EINVAL;
- }
- }
- return 0;
+ static uint32_t vline_start_end[6] = {
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_VLINE_START_END + EVERGREEN_CRTC5_REGISTER_OFFSET
+ };
+ static uint32_t vline_status[6] = {
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET
+ };
+
+ return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
}
static int evergreen_packet0_check(struct radeon_cs_parser *p,
@@ -1347,7 +1156,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_LSTMP_RING_BASE:
case SQ_PSTMP_RING_BASE:
case SQ_VSTMP_RING_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1376,7 +1185,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_Z_INFO:
track->db_z_info = radeon_get_ib_value(p, idx);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1418,7 +1227,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_Z_READ_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1430,7 +1239,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_Z_WRITE_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1442,7 +1251,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_STENCIL_READ_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1454,7 +1263,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_STENCIL_WRITE_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1477,7 +1286,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case VGT_STRMOUT_BUFFER_BASE_1:
case VGT_STRMOUT_BUFFER_BASE_2:
case VGT_STRMOUT_BUFFER_BASE_3:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1499,7 +1308,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->streamout_dirty = true;
break;
case CP_COHER_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
"0x%04X\n", reg);
@@ -1563,7 +1372,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
tmp = (reg - CB_COLOR0_INFO) / 0x3c;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1581,7 +1390,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1642,7 +1451,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_ATTRIB:
case CB_COLOR6_ATTRIB:
case CB_COLOR7_ATTRIB:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1670,7 +1479,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR9_ATTRIB:
case CB_COLOR10_ATTRIB:
case CB_COLOR11_ATTRIB:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1703,7 +1512,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR6_FMASK:
case CB_COLOR7_FMASK:
tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1720,7 +1529,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR6_CMASK:
case CB_COLOR7_CMASK:
tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1758,7 +1567,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_BASE:
case CB_COLOR6_BASE:
case CB_COLOR7_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1774,7 +1583,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR9_BASE:
case CB_COLOR10_BASE:
case CB_COLOR11_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1787,7 +1596,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_dirty = true;
break;
case DB_HTILE_DATA_BASE:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1905,7 +1714,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_ALU_CONST_CACHE_LS_13:
case SQ_ALU_CONST_CACHE_LS_14:
case SQ_ALU_CONST_CACHE_LS_15:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1919,7 +1728,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONFIG_REG "
"0x%04X\n", reg);
@@ -1933,7 +1742,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -2018,7 +1827,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SET PREDICATION\n");
return -EINVAL;
@@ -2064,7 +1873,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad INDEX_BASE\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad INDEX_BASE\n");
return -EINVAL;
@@ -2091,7 +1900,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
@@ -2119,7 +1928,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX_2\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad DRAW_INDEX_2\n");
return -EINVAL;
@@ -2210,7 +2019,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DISPATCH_INDIRECT\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad DISPATCH_INDIRECT\n");
return -EINVAL;
@@ -2231,7 +2040,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x10) {
uint64_t offset;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad WAIT_REG_MEM\n");
return -EINVAL;
@@ -2243,6 +2052,9 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
ib[idx+2] = upper_32_bits(offset) & 0xff;
+ } else if (idx_value & 0x100) {
+ DRM_ERROR("cannot use PFP on REG wait\n");
+ return -EINVAL;
}
break;
case PACKET3_CP_DMA:
@@ -2282,7 +2094,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
/* src address space is memory */
if (((info & 0x60000000) >> 29) == 0) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad CP DMA SRC\n");
return -EINVAL;
@@ -2320,7 +2132,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
if (((info & 0x00300000) >> 20) == 0) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad CP DMA DST\n");
return -EINVAL;
@@ -2354,7 +2166,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* 0xffffffff/0x0 is flush all cache flag */
if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
radeon_get_ib_value(p, idx + 2) != 0) {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SURFACE_SYNC\n");
return -EINVAL;
@@ -2370,7 +2182,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (pkt->count) {
uint64_t offset;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
@@ -2391,7 +2203,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
@@ -2413,7 +2225,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE_EOS\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad EVENT_WRITE_EOS\n");
return -EINVAL;
@@ -2480,7 +2292,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
case SQ_TEX_VTX_VALID_TEXTURE:
/* tex base */
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SET_RESOURCE (tex)\n");
return -EINVAL;
@@ -2511,13 +2323,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
!mip_address &&
- !evergreen_cs_packet_next_is_pkt3_nop(p)) {
+ !radeon_cs_packet_next_is_pkt3_nop(p)) {
/* MIP_ADDRESS should point to FMASK for an MSAA texture.
* It should be 0 if FMASK is disabled. */
moffset = 0;
mipmap = NULL;
} else {
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SET_RESOURCE (tex)\n");
return -EINVAL;
@@ -2536,7 +2348,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
{
uint64_t offset64;
/* vtx base */
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad SET_RESOURCE (vtx)\n");
return -EINVAL;
@@ -2618,7 +2430,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* Updating memory at DST_ADDRESS. */
if (idx_value & 0x1) {
u64 offset;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
return -EINVAL;
@@ -2637,7 +2449,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* Reading data from SRC_ADDRESS. */
if (((idx_value >> 1) & 0x3) == 2) {
u64 offset;
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
return -EINVAL;
@@ -2662,7 +2474,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad MEM_WRITE (invalid count)\n");
return -EINVAL;
}
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
return -EINVAL;
@@ -2691,7 +2503,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x1) {
u64 offset;
/* SRC is memory. */
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad COPY_DW (missing src reloc)\n");
return -EINVAL;
@@ -2715,7 +2527,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x2) {
u64 offset;
/* DST is memory. */
- r = evergreen_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
return -EINVAL;
@@ -2819,7 +2631,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
p->track = track;
}
do {
- r = evergreen_cs_packet_parse(p, &pkt, p->idx);
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
if (r) {
kfree(p->track);
p->track = NULL;
@@ -2827,12 +2639,12 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
r = evergreen_cs_parse_packet0(p, &pkt);
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
break;
- case PACKET_TYPE3:
+ case RADEON_PACKET_TYPE3:
r = evergreen_packet3_check(p, &pkt);
break;
default:
@@ -2858,16 +2670,6 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
return 0;
}
-/*
- * DMA
- */
-
-#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
-#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
-#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
-#define GET_DMA_NEW(h) (((h) & 0x04000000) >> 26)
-#define GET_DMA_MISC(h) (((h) & 0x0700000) >> 20)
-
/**
* evergreen_dma_cs_parse() - parse the DMA IB
* @p: parser structure holding parsing context.
@@ -2881,9 +2683,9 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
{
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
- u32 header, cmd, count, tiled, new_cmd, misc;
+ u32 header, cmd, count, sub_cmd;
volatile u32 *ib = p->ib.ptr;
- u32 idx, idx_value;
+ u32 idx;
u64 src_offset, dst_offset, dst2_offset;
int r;
@@ -2897,9 +2699,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, idx);
cmd = GET_DMA_CMD(header);
count = GET_DMA_COUNT(header);
- tiled = GET_DMA_T(header);
- new_cmd = GET_DMA_NEW(header);
- misc = GET_DMA_MISC(header);
+ sub_cmd = GET_DMA_SUB_CMD(header);
switch (cmd) {
case DMA_PACKET_WRITE:
@@ -2908,19 +2708,27 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad DMA_PACKET_WRITE\n");
return -EINVAL;
}
- if (tiled) {
- dst_offset = ib[idx+1];
+ switch (sub_cmd) {
+ /* tiled */
+ case 8:
+ dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
p->idx += count + 7;
- } else {
- dst_offset = ib[idx+1];
- dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+ break;
+ /* linear */
+ case 0:
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
p->idx += count + 3;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
+ return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
@@ -2939,338 +2747,330 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad DMA_PACKET_COPY\n");
return -EINVAL;
}
- if (tiled) {
- idx_value = radeon_get_ib_value(p, idx + 2);
- if (new_cmd) {
- switch (misc) {
- case 0:
- /* L2T, frame to fields */
- if (idx_value & (1 << 31)) {
- DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = ib[idx+1];
- dst_offset <<= 8;
- dst2_offset = ib[idx+2];
- dst2_offset <<= 8;
- src_offset = ib[idx+8];
- src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
- ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 10;
- break;
- case 1:
- /* L2T, T2L partial */
- if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2T, T2L Partial is cayman only !\n");
- return -EINVAL;
- }
- /* detile bit */
- if (idx_value & (1 << 31)) {
- /* tiled src, linear dst */
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
-
- ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- } else {
- /* linear src, tiled dst */
- ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- }
- p->idx += 12;
- break;
- case 3:
- /* L2T, broadcast */
- if (idx_value & (1 << 31)) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = ib[idx+1];
- dst_offset <<= 8;
- dst2_offset = ib[idx+2];
- dst2_offset <<= 8;
- src_offset = ib[idx+8];
- src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
- ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 10;
- break;
- case 4:
- /* L2T, T2L */
- /* detile bit */
- if (idx_value & (1 << 31)) {
- /* tiled src, linear dst */
- src_offset = ib[idx+1];
- src_offset <<= 8;
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
-
- dst_offset = ib[idx+7];
- dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
- ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- } else {
- /* linear src, tiled dst */
- src_offset = ib[idx+7];
- src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
- ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-
- dst_offset = ib[idx+1];
- dst_offset <<= 8;
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- }
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- p->idx += 9;
- break;
- case 5:
- /* T2T partial */
- if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2T, T2L Partial is cayman only !\n");
- return -EINVAL;
- }
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
- ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- p->idx += 13;
- break;
- case 7:
- /* L2T, broadcast */
- if (idx_value & (1 << 31)) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = ib[idx+1];
- dst_offset <<= 8;
- dst2_offset = ib[idx+2];
- dst2_offset <<= 8;
- src_offset = ib[idx+8];
- src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
- ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 10;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
+ switch (sub_cmd) {
+ /* Copy L2L, DW aligned */
+ case 0x00:
+ /* L2L, dw */
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ break;
+ /* Copy L2T/T2L */
+ case 0x08:
+ /* detile bit */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = radeon_get_ib_value(p, idx+1);
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = radeon_get_ib_value(p, idx + 7);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
} else {
- switch (misc) {
- case 0:
- /* detile bit */
- if (idx_value & (1 << 31)) {
- /* tiled src, linear dst */
- src_offset = ib[idx+1];
- src_offset <<= 8;
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
-
- dst_offset = ib[idx+7];
- dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
- ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- } else {
- /* linear src, tiled dst */
- src_offset = ib[idx+7];
- src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
- ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
-
- dst_offset = ib[idx+1];
- dst_offset <<= 8;
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
- }
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- p->idx += 9;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
+ /* linear src, tiled dst */
+ src_offset = radeon_get_ib_value(p, idx+7);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
}
- } else {
- if (new_cmd) {
- switch (misc) {
- case 0:
- /* L2L, byte */
- src_offset = ib[idx+2];
- src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
- dst_offset = ib[idx+1];
- dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
- if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
- src_offset + count, radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
- dst_offset + count, radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 5;
- break;
- case 1:
- /* L2L, partial */
- if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2L Partial is cayman only !\n");
- return -EINVAL;
- }
- ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
- ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
-
- p->idx += 9;
- break;
- case 4:
- /* L2L, dw, broadcast */
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = ib[idx+1];
- dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
- dst2_offset = ib[idx+2];
- dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32;
- src_offset = ib[idx+3];
- src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 7;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ /* Copy L2L, byte aligned */
+ case 0x40:
+ /* L2L, byte */
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+ if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
+ src_offset + count, radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
+ dst_offset + count, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ break;
+ /* Copy L2L, partial */
+ case 0x41:
+ /* L2L, partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+
+ p->idx += 9;
+ break;
+ /* Copy L2L, DW aligned, broadcast */
+ case 0x44:
+ /* L2L, dw, broadcast */
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
+ src_offset = radeon_get_ib_value(p, idx+3);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 7;
+ break;
+ /* Copy L2T Frame to Field */
+ case 0x48:
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ /* Copy L2T/T2L, partial */
+ case 0x49:
+ /* L2T, T2L partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ /* detile bit */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ /* tiled src, linear dst */
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
} else {
- /* L2L, dw */
- src_offset = ib[idx+2];
- src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
- dst_offset = ib[idx+1];
- dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 5;
+ /* linear src, tiled dst */
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
}
+ p->idx += 12;
+ break;
+ /* Copy L2T broadcast */
+ case 0x4b:
+ /* L2T, broadcast */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ /* Copy L2T/T2L (tile units) */
+ case 0x4c:
+ /* L2T, T2L */
+ /* detile bit */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = radeon_get_ib_value(p, idx+1);
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = radeon_get_ib_value(p, idx+7);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ src_offset = radeon_get_ib_value(p, idx+7);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ /* Copy T2T, partial (tile units) */
+ case 0x4d:
+ /* T2T partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += 13;
+ break;
+ /* Copy L2T broadcast (tile units) */
+ case 0x4f:
+ /* L2T, broadcast */
+ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
+ return -EINVAL;
}
break;
case DMA_PACKET_CONSTANT_FILL:
@@ -3279,8 +3079,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
return -EINVAL;
}
- dst_offset = ib[idx+1];
- dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
dst_offset, radeon_bo_size(dst_reloc->robj));
@@ -3583,19 +3383,19 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
do {
pkt.idx = idx;
- pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
- pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
+ pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
+ pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
pkt.one_reg_wr = 0;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
dev_err(rdev->dev, "Packet0 not allowed!\n");
ret = -EINVAL;
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
idx += 1;
break;
- case PACKET_TYPE3:
- pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+ case RADEON_PACKET_TYPE3:
+ pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
idx += pkt.count + 2;
break;
@@ -3623,88 +3423,79 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
{
u32 idx = 0;
- u32 header, cmd, count, tiled, new_cmd, misc;
+ u32 header, cmd, count, sub_cmd;
do {
header = ib->ptr[idx];
cmd = GET_DMA_CMD(header);
count = GET_DMA_COUNT(header);
- tiled = GET_DMA_T(header);
- new_cmd = GET_DMA_NEW(header);
- misc = GET_DMA_MISC(header);
+ sub_cmd = GET_DMA_SUB_CMD(header);
switch (cmd) {
case DMA_PACKET_WRITE:
- if (tiled)
+ switch (sub_cmd) {
+ /* tiled */
+ case 8:
idx += count + 7;
- else
+ break;
+ /* linear */
+ case 0:
idx += count + 3;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]);
+ return -EINVAL;
+ }
break;
case DMA_PACKET_COPY:
- if (tiled) {
- if (new_cmd) {
- switch (misc) {
- case 0:
- /* L2T, frame to fields */
- idx += 10;
- break;
- case 1:
- /* L2T, T2L partial */
- idx += 12;
- break;
- case 3:
- /* L2T, broadcast */
- idx += 10;
- break;
- case 4:
- /* L2T, T2L */
- idx += 9;
- break;
- case 5:
- /* T2T partial */
- idx += 13;
- break;
- case 7:
- /* L2T, broadcast */
- idx += 10;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
- } else {
- switch (misc) {
- case 0:
- idx += 9;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
- }
- } else {
- if (new_cmd) {
- switch (misc) {
- case 0:
- /* L2L, byte */
- idx += 5;
- break;
- case 1:
- /* L2L, partial */
- idx += 9;
- break;
- case 4:
- /* L2L, dw, broadcast */
- idx += 7;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
- return -EINVAL;
- }
- } else {
- /* L2L, dw */
- idx += 5;
- }
+ switch (sub_cmd) {
+ /* Copy L2L, DW aligned */
+ case 0x00:
+ idx += 5;
+ break;
+ /* Copy L2T/T2L */
+ case 0x08:
+ idx += 9;
+ break;
+ /* Copy L2L, byte aligned */
+ case 0x40:
+ idx += 5;
+ break;
+ /* Copy L2L, partial */
+ case 0x41:
+ idx += 9;
+ break;
+ /* Copy L2L, DW aligned, broadcast */
+ case 0x44:
+ idx += 7;
+ break;
+ /* Copy L2T Frame to Field */
+ case 0x48:
+ idx += 10;
+ break;
+ /* Copy L2T/T2L, partial */
+ case 0x49:
+ idx += 12;
+ break;
+ /* Copy L2T broadcast */
+ case 0x4b:
+ idx += 10;
+ break;
+ /* Copy L2T/T2L (tile units) */
+ case 0x4c:
+ idx += 9;
+ break;
+ /* Copy T2T, partial (tile units) */
+ case 0x4d:
+ idx += 13;
+ break;
+ /* Copy L2T broadcast (tile units) */
+ case 0x4f:
+ idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]);
+ return -EINVAL;
}
break;
case DMA_PACKET_CONSTANT_FILL:
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 327c08b54180..4fdecc2b4040 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -24,6 +24,7 @@
* Authors: Christian König
* Rafał Miłecki
*/
+#include <linux/hdmi.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -54,79 +55,18 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
}
/*
- * calculate the crc for a given info frame
- */
-static void evergreen_hdmi_infoframe_checksum(uint8_t packetType,
- uint8_t versionNumber,
- uint8_t length,
- uint8_t *frame)
-{
- int i;
- frame[0] = packetType + versionNumber + length;
- for (i = 1; i <= length; i++)
- frame[0] += frame[i];
- frame[0] = 0x100 - frame[0];
-}
-
-/*
* build a HDMI Video Info Frame
*/
-static void evergreen_hdmi_videoinfoframe(
- struct drm_encoder *encoder,
- uint8_t color_format,
- int active_information_present,
- uint8_t active_format_aspect_ratio,
- uint8_t scan_information,
- uint8_t colorimetry,
- uint8_t ex_colorimetry,
- uint8_t quantization,
- int ITC,
- uint8_t picture_aspect_ratio,
- uint8_t video_format_identification,
- uint8_t pixel_repetition,
- uint8_t non_uniform_picture_scaling,
- uint8_t bar_info_data_valid,
- uint16_t top_bar,
- uint16_t bottom_bar,
- uint16_t left_bar,
- uint16_t right_bar
-)
+static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
+ void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
+ uint8_t *frame = buffer + 3;
- uint8_t frame[14];
-
- frame[0x0] = 0;
- frame[0x1] =
- (scan_information & 0x3) |
- ((bar_info_data_valid & 0x3) << 2) |
- ((active_information_present & 0x1) << 4) |
- ((color_format & 0x3) << 5);
- frame[0x2] =
- (active_format_aspect_ratio & 0xF) |
- ((picture_aspect_ratio & 0x3) << 4) |
- ((colorimetry & 0x3) << 6);
- frame[0x3] =
- (non_uniform_picture_scaling & 0x3) |
- ((quantization & 0x3) << 2) |
- ((ex_colorimetry & 0x7) << 4) |
- ((ITC & 0x1) << 7);
- frame[0x4] = (video_format_identification & 0x7F);
- frame[0x5] = (pixel_repetition & 0xF);
- frame[0x6] = (top_bar & 0xFF);
- frame[0x7] = (top_bar >> 8);
- frame[0x8] = (bottom_bar & 0xFF);
- frame[0x9] = (bottom_bar >> 8);
- frame[0xA] = (left_bar & 0xFF);
- frame[0xB] = (left_bar >> 8);
- frame[0xC] = (right_bar & 0xFF);
- frame[0xD] = (right_bar >> 8);
-
- evergreen_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
/* Our header values (type, version, length) should be alright, Intel
* is using the same. Checksum function also seems to be OK, it works
* fine for audio infoframe. However calculated value is always lower
@@ -154,7 +94,10 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+ struct hdmi_avi_infoframe frame;
uint32_t offset;
+ ssize_t err;
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
@@ -200,9 +143,19 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
- evergreen_hdmi_videoinfoframe(encoder, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
+ return;
+ }
+ evergreen_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
evergreen_hdmi_update_ACR(encoder, mode->clock);
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 034f4c22e5db..f585be16e2d5 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -223,6 +223,7 @@
#define EVERGREEN_CRTC_STATUS 0x6e8c
# define EVERGREEN_CRTC_V_BLANK (1 << 0)
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
+#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index cb9baaac9e85..982d25ad9af3 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -729,6 +729,18 @@
#define WAIT_UNTIL 0x8040
#define SRBM_STATUS 0x0E50
+#define RLC_RQ_PENDING (1 << 3)
+#define GRBM_RQ_PENDING (1 << 5)
+#define VMC_BUSY (1 << 8)
+#define MCB_BUSY (1 << 9)
+#define MCB_NON_DISPLAY_BUSY (1 << 10)
+#define MCC_BUSY (1 << 11)
+#define MCD_BUSY (1 << 12)
+#define SEM_BUSY (1 << 14)
+#define RLC_BUSY (1 << 15)
+#define IH_BUSY (1 << 17)
+#define SRBM_STATUS2 0x0EC4
+#define DMA_BUSY (1 << 5)
#define SRBM_SOFT_RESET 0x0E60
#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6
#define SOFT_RESET_BIF (1 << 1)
@@ -742,8 +754,9 @@
#define SOFT_RESET_ROM (1 << 14)
#define SOFT_RESET_SEM (1 << 15)
#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_DMA (1 << 20)
#define SOFT_RESET_TST (1 << 21)
-#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
/* display watermarks */
@@ -923,20 +936,23 @@
#define CAYMAN_DMA1_CNTL 0xd82c
/* async DMA packets */
-#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
- (((t) & 0x1) << 23) | \
- (((s) & 0x1) << 22) | \
- (((n) & 0xFFFFF) << 0))
+#define DMA_PACKET(cmd, sub_cmd, n) ((((cmd) & 0xF) << 28) | \
+ (((sub_cmd) & 0xFF) << 20) |\
+ (((n) & 0xFFFFF) << 0))
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
+#define GET_DMA_SUB_CMD(h) (((h) & 0x0ff00000) >> 20)
+
/* async DMA Packet types */
-#define DMA_PACKET_WRITE 0x2
-#define DMA_PACKET_COPY 0x3
-#define DMA_PACKET_INDIRECT_BUFFER 0x4
-#define DMA_PACKET_SEMAPHORE 0x5
-#define DMA_PACKET_FENCE 0x6
-#define DMA_PACKET_TRAP 0x7
-#define DMA_PACKET_SRBM_WRITE 0x9
-#define DMA_PACKET_CONSTANT_FILL 0xd
-#define DMA_PACKET_NOP 0xf
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
/* PCIE link stuff */
#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
@@ -979,16 +995,7 @@
/*
* PM4
*/
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
#define CP_PACKET2 0x80000000
@@ -997,7 +1004,7 @@
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
-#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
@@ -2027,4 +2034,15 @@
/* cayman packet3 addition */
#define CAYMAN_PACKET3_DEALLOC_STATE 0x14
+/* DMA regs common on r6xx/r7xx/evergreen/ni */
+#define DMA_RB_CNTL 0xd000
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
+
#endif
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 7bdbcb00aaf2..7cead763be9e 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -34,6 +34,8 @@
#include "ni_reg.h"
#include "cayman_blit_shaders.h"
+extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
@@ -1216,7 +1218,7 @@ void cayman_dma_stop(struct radeon_device *rdev)
int cayman_dma_resume(struct radeon_device *rdev)
{
struct radeon_ring *ring;
- u32 rb_cntl, dma_cntl;
+ u32 rb_cntl, dma_cntl, ib_cntl;
u32 rb_bufsz;
u32 reg_offset, wb_offset;
int i, r;
@@ -1265,7 +1267,11 @@ int cayman_dma_resume(struct radeon_device *rdev)
WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
/* enable DMA IBs */
- WREG32(DMA_IB_CNTL + reg_offset, DMA_IB_ENABLE | CMD_VMID_FORCE);
+ ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
dma_cntl = RREG32(DMA_CNTL + reg_offset);
dma_cntl &= ~CTXEMPTY_INT_ENABLE;
@@ -1306,31 +1312,90 @@ void cayman_dma_fini(struct radeon_device *rdev)
radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
}
-static int cayman_gpu_soft_reset(struct radeon_device *rdev)
+static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
+{
+ u32 reset_mask = 0;
+ u32 tmp;
+
+ /* GRBM_STATUS */
+ tmp = RREG32(GRBM_STATUS);
+ if (tmp & (PA_BUSY | SC_BUSY |
+ SH_BUSY | SX_BUSY |
+ TA_BUSY | VGT_BUSY |
+ DB_BUSY | CB_BUSY |
+ GDS_BUSY | SPI_BUSY |
+ IA_BUSY | IA_BUSY_NO_DMA))
+ reset_mask |= RADEON_RESET_GFX;
+
+ if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+ CP_BUSY | CP_COHERENCY_BUSY))
+ reset_mask |= RADEON_RESET_CP;
+
+ if (tmp & GRBM_EE_BUSY)
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
+
+ /* DMA_STATUS_REG 0 */
+ tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
+
+ /* DMA_STATUS_REG 1 */
+ tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA1;
+
+ /* SRBM_STATUS2 */
+ tmp = RREG32(SRBM_STATUS2);
+ if (tmp & DMA_BUSY)
+ reset_mask |= RADEON_RESET_DMA;
+
+ if (tmp & DMA1_BUSY)
+ reset_mask |= RADEON_RESET_DMA1;
+
+ /* SRBM_STATUS */
+ tmp = RREG32(SRBM_STATUS);
+ if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+ reset_mask |= RADEON_RESET_RLC;
+
+ if (tmp & IH_BUSY)
+ reset_mask |= RADEON_RESET_IH;
+
+ if (tmp & SEM_BUSY)
+ reset_mask |= RADEON_RESET_SEM;
+
+ if (tmp & GRBM_RQ_PENDING)
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (tmp & VMC_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+ MCC_BUSY | MCD_BUSY))
+ reset_mask |= RADEON_RESET_MC;
+
+ if (evergreen_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+
+ /* VM_L2_STATUS */
+ tmp = RREG32(VM_L2_STATUS);
+ if (tmp & L2_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ return reset_mask;
+}
+
+static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct evergreen_mc_save save;
- u32 grbm_reset = 0;
-
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return 0;
-
- dev_info(rdev->dev, "GPU softreset \n");
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
- RREG32(SRBM_STATUS));
- dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
- dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
- dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
- dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
+
+ if (reset_mask == 0)
+ return;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ evergreen_print_gpu_status_regs(rdev);
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(0x14F8));
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
@@ -1340,60 +1405,158 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(0x14DC));
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* dma0 */
+ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ }
+
+ if (reset_mask & RADEON_RESET_DMA1) {
+ /* dma1 */
+ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+ }
+
+ udelay(50);
+
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
- /* Disable CP parsing/prefetching */
- WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
- /* reset all the gfx blocks */
- grbm_reset = (SOFT_RESET_CP |
- SOFT_RESET_CB |
- SOFT_RESET_DB |
- SOFT_RESET_GDS |
- SOFT_RESET_PA |
- SOFT_RESET_SC |
- SOFT_RESET_SPI |
- SOFT_RESET_SH |
- SOFT_RESET_SX |
- SOFT_RESET_TC |
- SOFT_RESET_TA |
- SOFT_RESET_VGT |
- SOFT_RESET_IA);
-
- dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
- WREG32(GRBM_SOFT_RESET, grbm_reset);
- (void)RREG32(GRBM_SOFT_RESET);
- udelay(50);
- WREG32(GRBM_SOFT_RESET, 0);
- (void)RREG32(GRBM_SOFT_RESET);
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+ grbm_soft_reset = SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_GDS |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SH |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VGT |
+ SOFT_RESET_IA;
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
+
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+ }
+
+ if (reset_mask & RADEON_RESET_DMA)
+ srbm_soft_reset |= SOFT_RESET_DMA;
+
+ if (reset_mask & RADEON_RESET_DMA1)
+ srbm_soft_reset |= SOFT_RESET_DMA1;
+
+ if (reset_mask & RADEON_RESET_DISPLAY)
+ srbm_soft_reset |= SOFT_RESET_DC;
+
+ if (reset_mask & RADEON_RESET_RLC)
+ srbm_soft_reset |= SOFT_RESET_RLC;
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= SOFT_RESET_SEM;
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= SOFT_RESET_IH;
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= SOFT_RESET_VMC;
+
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= SOFT_RESET_MC;
+ }
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
+
/* Wait a little for things to settle down */
udelay(50);
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
- RREG32(SRBM_STATUS));
- dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
- dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
- dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
- dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
evergreen_mc_resume(rdev, &save);
- return 0;
+ udelay(50);
+
+ evergreen_print_gpu_status_regs(rdev);
}
int cayman_asic_reset(struct radeon_device *rdev)
{
- return cayman_gpu_soft_reset(rdev);
+ u32 reset_mask;
+
+ reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ cayman_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
+ return 0;
+}
+
+/**
+ * cayman_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
/**
@@ -1402,18 +1565,20 @@ int cayman_asic_reset(struct radeon_device *rdev)
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
- * Check if the async DMA engine is locked up (cayman-SI).
+ * Check if the async DMA engine is locked up.
* Returns true if the engine appears to be locked up, false if not.
*/
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
- u32 dma_status_reg;
+ u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
+ u32 mask;
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
- dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+ mask = RADEON_RESET_DMA;
else
- dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
- if (dma_status_reg & DMA_IDLE) {
+ mask = RADEON_RESET_DMA1;
+
+ if (!(reset_mask & mask)) {
radeon_ring_lockup_update(ring);
return false;
}
@@ -1781,19 +1946,21 @@ uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
* cayman_vm_set_page - update the page tables using the CP
*
* @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
- * Update the page tables using the CP (cayman-si).
+ * Update the page tables using the CP (cayman/TN).
*/
-void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+void cayman_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
@@ -1804,9 +1971,9 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
if (ndw > 0x3FFF)
ndw = 0x3FFF;
- radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 1; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
@@ -1818,8 +1985,8 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
}
addr += incr;
value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
} else {
@@ -1829,9 +1996,9 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
ndw = 0xFFFFE;
/* for non-physically contiguous pages (system) */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
@@ -1843,10 +2010,12 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
}
addr += incr;
value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
}
}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index b93186b8ee4b..079dee202a9e 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -49,6 +49,16 @@
#define RINGID(x) (((x) & 0x3) << 0)
#define VMID(x) (((x) & 0x7) << 0)
#define SRBM_STATUS 0x0E50
+#define RLC_RQ_PENDING (1 << 3)
+#define GRBM_RQ_PENDING (1 << 5)
+#define VMC_BUSY (1 << 8)
+#define MCB_BUSY (1 << 9)
+#define MCB_NON_DISPLAY_BUSY (1 << 10)
+#define MCC_BUSY (1 << 11)
+#define MCD_BUSY (1 << 12)
+#define SEM_BUSY (1 << 14)
+#define RLC_BUSY (1 << 15)
+#define IH_BUSY (1 << 17)
#define SRBM_SOFT_RESET 0x0E60
#define SOFT_RESET_BIF (1 << 1)
@@ -65,9 +75,13 @@
#define SOFT_RESET_VMC (1 << 17)
#define SOFT_RESET_DMA (1 << 20)
#define SOFT_RESET_TST (1 << 21)
-#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
+#define SRBM_STATUS2 0x0EC4
+#define DMA_BUSY (1 << 5)
+#define DMA1_BUSY (1 << 6)
+
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
#define RESPONSE_TYPE_MASK 0x000000F0
@@ -474,16 +488,7 @@
/*
* PM4
*/
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
#define CP_PACKET2 0x80000000
@@ -492,7 +497,7 @@
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
-#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
@@ -675,4 +680,3 @@
#define DMA_PACKET_NOP 0xf
#endif
-
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8ff7cac222dc..9db58530be37 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1215,11 +1215,11 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
struct radeon_cs_reloc *reloc;
u32 value;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1233,7 +1233,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
if (reg == RADEON_SRC_PITCH_OFFSET) {
DRM_ERROR("Cannot src blit from microtiled surface\n");
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return -EINVAL;
}
tile_flags |= RADEON_DST_TILE_MICRO;
@@ -1263,16 +1263,16 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
if (c > 16) {
DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return -EINVAL;
}
track->num_arrays = c;
for (i = 0; i < (c - 1); i+=2, idx+=3) {
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
idx_value = radeon_get_ib_value(p, idx);
@@ -1281,11 +1281,11 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize &= 0x7F;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
@@ -1294,11 +1294,11 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
track->arrays[i + 1].esize &= 0x7F;
}
if (c & 1) {
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
idx_value = radeon_get_ib_value(p, idx);
@@ -1355,67 +1355,6 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
return 0;
}
-void r100_cs_dump_packet(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt)
-{
- volatile uint32_t *ib;
- unsigned i;
- unsigned idx;
-
- ib = p->ib.ptr;
- idx = pkt->idx;
- for (i = 0; i <= (pkt->count + 1); i++, idx++) {
- DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
- }
-}
-
-/**
- * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
- * @parser: parser structure holding parsing context.
- * @pkt: where to store packet informations
- *
- * Assume that chunk_ib_index is properly set. Will return -EINVAL
- * if packet is bigger than remaining ib size. or if packets is unknown.
- **/
-int r100_cs_packet_parse(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx)
-{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
- uint32_t header;
-
- if (idx >= ib_chunk->length_dw) {
- DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
- idx, ib_chunk->length_dw);
- return -EINVAL;
- }
- header = radeon_get_ib_value(p, idx);
- pkt->idx = idx;
- pkt->type = CP_PACKET_GET_TYPE(header);
- pkt->count = CP_PACKET_GET_COUNT(header);
- switch (pkt->type) {
- case PACKET_TYPE0:
- pkt->reg = CP_PACKET0_GET_REG(header);
- pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
- break;
- case PACKET_TYPE3:
- pkt->opcode = CP_PACKET3_GET_OPCODE(header);
- break;
- case PACKET_TYPE2:
- pkt->count = -1;
- break;
- default:
- DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
- return -EINVAL;
- }
- if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
- DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
- pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
- return -EINVAL;
- }
- return 0;
-}
-
/**
* r100_cs_packet_next_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
@@ -1444,7 +1383,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
ib = p->ib.ptr;
/* parse the wait until */
- r = r100_cs_packet_parse(p, &waitreloc, p->idx);
+ r = radeon_cs_packet_parse(p, &waitreloc, p->idx);
if (r)
return r;
@@ -1461,7 +1400,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
}
/* jump over the NOP */
- r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
if (r)
return r;
@@ -1471,7 +1410,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 5);
- reg = CP_PACKET0_GET_REG(header);
+ reg = R100_CP_PACKET0_GET_REG(header);
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
@@ -1506,54 +1445,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
return 0;
}
-/**
- * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
- * @parser: parser structure holding parsing context.
- * @data: pointer to relocation data
- * @offset_start: starting offset
- * @offset_mask: offset mask (to align start offset on)
- * @reloc: reloc informations
- *
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc)
-{
- struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_packet p3reloc;
- unsigned idx;
- int r;
-
- if (p->chunk_relocs_idx == -1) {
- DRM_ERROR("No relocation chunk !\n");
- return -EINVAL;
- }
- *cs_reloc = NULL;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
- r = r100_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return r;
- }
- p->idx += p3reloc.count + 2;
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- DRM_ERROR("No packet3 for relocation for packet at %d.\n",
- p3reloc.idx);
- r100_cs_dump_packet(p, &p3reloc);
- return -EINVAL;
- }
- idx = radeon_get_ib_value(p, p3reloc.idx + 1);
- if (idx >= relocs_chunk->length_dw) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
- r100_cs_dump_packet(p, &p3reloc);
- return -EINVAL;
- }
- /* FIXME: we assume reloc size is 4 dwords */
- *cs_reloc = p->relocs_ptr[(idx / 4)];
- return 0;
-}
-
static int r100_get_vtx_size(uint32_t vtx_fmt)
{
int vtx_size;
@@ -1631,7 +1522,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
break;
@@ -1644,11 +1535,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return r;
break;
case RADEON_RB3D_DEPTHOFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->zb.robj = reloc->robj;
@@ -1657,11 +1548,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->cb[0].robj = reloc->robj;
@@ -1673,11 +1564,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_TXOFFSET_1:
case RADEON_PP_TXOFFSET_2:
i = (reg - RADEON_PP_TXOFFSET_0) / 24;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -1700,11 +1591,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_CUBIC_OFFSET_T0_3:
case RADEON_PP_CUBIC_OFFSET_T0_4:
i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->textures[0].cube_info[i].offset = idx_value;
@@ -1718,11 +1609,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_CUBIC_OFFSET_T1_3:
case RADEON_PP_CUBIC_OFFSET_T1_4:
i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->textures[1].cube_info[i].offset = idx_value;
@@ -1736,11 +1627,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_CUBIC_OFFSET_T2_3:
case RADEON_PP_CUBIC_OFFSET_T2_4:
i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->textures[2].cube_info[i].offset = idx_value;
@@ -1754,11 +1645,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_COLORPITCH:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -1825,11 +1716,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_ZPASS_ADDR:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
@@ -1986,10 +1877,10 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
return r;
break;
case PACKET3_INDX_BUFFER:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
@@ -2000,10 +1891,10 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
break;
case 0x23:
/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
@@ -2100,37 +1991,36 @@ int r100_cs_parse(struct radeon_cs_parser *p)
r100_cs_track_clear(p->rdev, track);
p->track = track;
do {
- r = r100_cs_packet_parse(p, &pkt, p->idx);
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
if (r) {
return r;
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case PACKET_TYPE0:
- if (p->rdev->family >= CHIP_R200)
- r = r100_cs_parse_packet0(p, &pkt,
- p->rdev->config.r100.reg_safe_bm,
- p->rdev->config.r100.reg_safe_bm_size,
- &r200_packet0_check);
- else
- r = r100_cs_parse_packet0(p, &pkt,
- p->rdev->config.r100.reg_safe_bm,
- p->rdev->config.r100.reg_safe_bm_size,
- &r100_packet0_check);
- break;
- case PACKET_TYPE2:
- break;
- case PACKET_TYPE3:
- r = r100_packet3_check(p, &pkt);
- break;
- default:
- DRM_ERROR("Unknown packet type %d !\n",
- pkt.type);
- return -EINVAL;
+ case RADEON_PACKET_TYPE0:
+ if (p->rdev->family >= CHIP_R200)
+ r = r100_cs_parse_packet0(p, &pkt,
+ p->rdev->config.r100.reg_safe_bm,
+ p->rdev->config.r100.reg_safe_bm_size,
+ &r200_packet0_check);
+ else
+ r = r100_cs_parse_packet0(p, &pkt,
+ p->rdev->config.r100.reg_safe_bm,
+ p->rdev->config.r100.reg_safe_bm_size,
+ &r100_packet0_check);
+ break;
+ case RADEON_PACKET_TYPE2:
+ break;
+ case RADEON_PACKET_TYPE3:
+ r = r100_packet3_check(p, &pkt);
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d !\n",
+ pkt.type);
+ return -EINVAL;
}
- if (r) {
+ if (r)
return r;
- }
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 6a603b378adb..eb40888bdfcc 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -81,10 +81,6 @@ struct r100_cs_track {
int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track);
-int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc);
-void r100_cs_dump_packet(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt);
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index eab91760fae0..f0f8ee69f480 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -64,17 +64,6 @@
REG_SET(PACKET3_IT_OPCODE, (op)) | \
REG_SET(PACKET3_COUNT, (n)))
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
-#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-
/* Registers */
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 98143a5c5b73..b3807edb1936 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -162,7 +162,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
break;
@@ -175,11 +175,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
return r;
break;
case RADEON_RB3D_DEPTHOFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->zb.robj = reloc->robj;
@@ -188,11 +188,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->cb[0].robj = reloc->robj;
@@ -207,11 +207,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_TXOFFSET_4:
case R200_PP_TXOFFSET_5:
i = (reg - R200_PP_TXOFFSET_0) / 24;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -260,11 +260,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_CUBIC_OFFSET_F5_5:
i = (reg - R200_PP_TXOFFSET_0) / 24;
face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->textures[i].cube_info[face - 1].offset = idx_value;
@@ -278,11 +278,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_COLORPITCH:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -355,11 +355,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_ZPASS_ADDR:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index d0ba6023a1f8..c60350e6872d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -615,7 +615,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
break;
@@ -630,11 +630,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_RB3D_COLOROFFSET2:
case R300_RB3D_COLOROFFSET3:
i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->cb[i].robj = reloc->robj;
@@ -643,11 +643,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R300_ZB_DEPTHOFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->zb.robj = reloc->robj;
@@ -672,11 +672,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_OFFSET_0+56:
case R300_TX_OFFSET_0+60:
i = (reg - R300_TX_OFFSET_0) >> 2;
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -745,11 +745,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* RB3D_COLORPITCH2 */
/* RB3D_COLORPITCH3 */
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -830,11 +830,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x4F24:
/* ZB_DEPTHPITCH */
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1045,11 +1045,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->tex_dirty = true;
break;
case R300_ZB_ZPASS_ADDR:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
@@ -1087,11 +1087,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->cb_dirty = true;
break;
case R300_RB3D_AARESOLVE_OFFSET:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
track->aa.robj = reloc->robj;
@@ -1156,10 +1156,10 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
return r;
break;
case PACKET3_INDX_BUFFER:
- r = r100_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
- r100_cs_dump_packet(p, pkt);
+ radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
@@ -1257,21 +1257,21 @@ int r300_cs_parse(struct radeon_cs_parser *p)
r100_cs_track_clear(p->rdev, track);
p->track = track;
do {
- r = r100_cs_packet_parse(p, &pkt, p->idx);
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
if (r) {
return r;
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
r = r100_cs_parse_packet0(p, &pkt,
p->rdev->config.r300.reg_safe_bm,
p->rdev->config.r300.reg_safe_bm_size,
&r300_packet0_check);
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
break;
- case PACKET_TYPE3:
+ case RADEON_PACKET_TYPE3:
r = r300_packet3_check(p, &pkt);
break;
default:
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 002ab038d2ab..865e2c9980db 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -29,6 +29,8 @@
*
* Authors:
* Nicolai Haehnle <prefect_@gmx.net>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 1f519a5ffb8c..ff229a00d273 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -65,17 +65,6 @@
REG_SET(PACKET3_IT_OPCODE, (op)) | \
REG_SET(PACKET3_COUNT, (n)))
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
-#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-
/* Registers */
#define R_000148_MC_FB_LOCATION 0x000148
#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0)
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index ec576aaafb73..c0dc8d3ba0bb 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -355,6 +355,7 @@
# define AVIVO_D1CRTC_V_BLANK (1 << 0)
#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
+#define AVIVO_D1CRTC_STATUS_HV_COUNT 0x60ac
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2aaf147969bd..6d4b5611daf4 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -94,6 +94,12 @@ MODULE_FIRMWARE("radeon/SUMO_me.bin");
MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
MODULE_FIRMWARE("radeon/SUMO2_me.bin");
+static const u32 crtc_offsets[2] =
+{
+ 0,
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
/* r600,rv610,rv630,rv620,rv635,rv670 */
@@ -103,6 +109,19 @@ void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
+/**
+ * r600_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (r6xx, IGPs, APUs).
+ */
+u32 r600_get_xclk(struct radeon_device *rdev)
+{
+ return rdev->clock.spll.reference_freq;
+}
+
/* get temperature in millidegrees */
int rv6xx_get_temp(struct radeon_device *rdev)
{
@@ -1254,114 +1273,301 @@ void r600_vram_scratch_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->vram_scratch.robj);
}
-/* We doesn't check that the GPU really needs a reset we simply do the
- * reset, it's up to the caller to determine if the GPU needs one. We
- * might add an helper function to check that.
- */
-static int r600_gpu_soft_reset(struct radeon_device *rdev)
+void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
{
- struct rv515_mc_save save;
- u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
- S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
- S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
- S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
- S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
- S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
- S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
- S_008010_GUI_ACTIVE(1);
- u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
- S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
- S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
- S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
- S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
- S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
- S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
- S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
- u32 tmp;
+ u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return 0;
+ if (hung)
+ tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+ else
+ tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
- dev_info(rdev->dev, "GPU softreset \n");
- dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
- RREG32(R_008010_GRBM_STATUS));
- dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
- RREG32(R_008014_GRBM_STATUS2));
- dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
- RREG32(R_000E50_SRBM_STATUS));
+ WREG32(R600_BIOS_3_SCRATCH, tmp);
+}
+
+static void r600_print_gpu_status_regs(struct radeon_device *rdev)
+{
+ dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
+ RREG32(R_008010_GRBM_STATUS));
+ dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
+ RREG32(R_008014_GRBM_STATUS2));
+ dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
+ RREG32(R_000E50_SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
+ RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
+ RREG32(CP_STALLED_STAT2));
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
+ RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
+ RREG32(CP_STAT));
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+}
+
+static bool r600_is_display_hung(struct radeon_device *rdev)
+{
+ u32 crtc_hung = 0;
+ u32 crtc_status[2];
+ u32 i, j, tmp;
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
+ crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ crtc_hung |= (1 << i);
+ }
+ }
+
+ for (j = 0; j < 10; j++) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (crtc_hung & (1 << i)) {
+ tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ if (tmp != crtc_status[i])
+ crtc_hung &= ~(1 << i);
+ }
+ }
+ if (crtc_hung == 0)
+ return false;
+ udelay(100);
+ }
+
+ return true;
+}
+
+static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
+{
+ u32 reset_mask = 0;
+ u32 tmp;
+
+ /* GRBM_STATUS */
+ tmp = RREG32(R_008010_GRBM_STATUS);
+ if (rdev->family >= CHIP_RV770) {
+ if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
+ G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
+ G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
+ G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
+ G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
+ reset_mask |= RADEON_RESET_GFX;
+ } else {
+ if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
+ G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
+ G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
+ G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
+ G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
+ reset_mask |= RADEON_RESET_GFX;
+ }
+
+ if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
+ G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
+ reset_mask |= RADEON_RESET_CP;
+
+ if (G_008010_GRBM_EE_BUSY(tmp))
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
+
+ /* DMA_STATUS_REG */
+ tmp = RREG32(DMA_STATUS_REG);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
+
+ /* SRBM_STATUS */
+ tmp = RREG32(R_000E50_SRBM_STATUS);
+ if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
+ reset_mask |= RADEON_RESET_RLC;
+
+ if (G_000E50_IH_BUSY(tmp))
+ reset_mask |= RADEON_RESET_IH;
+
+ if (G_000E50_SEM_BUSY(tmp))
+ reset_mask |= RADEON_RESET_SEM;
+
+ if (G_000E50_GRBM_RQ_PENDING(tmp))
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (G_000E50_VMC_BUSY(tmp))
+ reset_mask |= RADEON_RESET_VMC;
+
+ if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
+ G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
+ G_000E50_MCDW_BUSY(tmp))
+ reset_mask |= RADEON_RESET_MC;
+
+ if (r600_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+
+ return reset_mask;
+}
+
+static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+ struct rv515_mc_save save;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
+
+ if (reset_mask == 0)
+ return;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ r600_print_gpu_status_regs(rdev);
+
+ /* Disable CP parsing/prefetching */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
+ else
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+
+ /* disable the RLC */
+ WREG32(RLC_CNTL, 0);
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
+ }
+
+ mdelay(50);
+
rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
- /* Disable CP parsing/prefetching */
- WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
- /* Check if any of the rendering block is busy and reset it */
- if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
- (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
- tmp = S_008020_SOFT_RESET_CR(1) |
- S_008020_SOFT_RESET_DB(1) |
- S_008020_SOFT_RESET_CB(1) |
- S_008020_SOFT_RESET_PA(1) |
- S_008020_SOFT_RESET_SC(1) |
- S_008020_SOFT_RESET_SMX(1) |
- S_008020_SOFT_RESET_SPI(1) |
- S_008020_SOFT_RESET_SX(1) |
- S_008020_SOFT_RESET_SH(1) |
- S_008020_SOFT_RESET_TC(1) |
- S_008020_SOFT_RESET_TA(1) |
- S_008020_SOFT_RESET_VC(1) |
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+ if (rdev->family >= CHIP_RV770)
+ grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
+ S_008020_SOFT_RESET_CB(1) |
+ S_008020_SOFT_RESET_PA(1) |
+ S_008020_SOFT_RESET_SC(1) |
+ S_008020_SOFT_RESET_SPI(1) |
+ S_008020_SOFT_RESET_SX(1) |
+ S_008020_SOFT_RESET_SH(1) |
+ S_008020_SOFT_RESET_TC(1) |
+ S_008020_SOFT_RESET_TA(1) |
+ S_008020_SOFT_RESET_VC(1) |
+ S_008020_SOFT_RESET_VGT(1);
+ else
+ grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
+ S_008020_SOFT_RESET_DB(1) |
+ S_008020_SOFT_RESET_CB(1) |
+ S_008020_SOFT_RESET_PA(1) |
+ S_008020_SOFT_RESET_SC(1) |
+ S_008020_SOFT_RESET_SMX(1) |
+ S_008020_SOFT_RESET_SPI(1) |
+ S_008020_SOFT_RESET_SX(1) |
+ S_008020_SOFT_RESET_SH(1) |
+ S_008020_SOFT_RESET_TC(1) |
+ S_008020_SOFT_RESET_TA(1) |
+ S_008020_SOFT_RESET_VC(1) |
+ S_008020_SOFT_RESET_VGT(1);
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
S_008020_SOFT_RESET_VGT(1);
- dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
+
+ srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
+ }
+
+ if (reset_mask & RADEON_RESET_DMA) {
+ if (rdev->family >= CHIP_RV770)
+ srbm_soft_reset |= RV770_SOFT_RESET_DMA;
+ else
+ srbm_soft_reset |= SOFT_RESET_DMA;
+ }
+
+ if (reset_mask & RADEON_RESET_RLC)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
+
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
+ }
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- RREG32(R_008020_GRBM_SOFT_RESET);
- mdelay(15);
- WREG32(R_008020_GRBM_SOFT_RESET, 0);
+ tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(R_008020_GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(R_008020_GRBM_SOFT_RESET);
}
- /* Reset CP (we always reset CP) */
- tmp = S_008020_SOFT_RESET_CP(1);
- dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- RREG32(R_008020_GRBM_SOFT_RESET);
- mdelay(15);
- WREG32(R_008020_GRBM_SOFT_RESET, 0);
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
+
/* Wait a little for things to settle down */
mdelay(1);
- dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
- RREG32(R_008010_GRBM_STATUS));
- dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
- RREG32(R_008014_GRBM_STATUS2));
- dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
- RREG32(R_000E50_SRBM_STATUS));
- dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
- dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
- dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
- dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
+
rv515_mc_resume(rdev, &save);
+ udelay(50);
+
+ r600_print_gpu_status_regs(rdev);
+}
+
+int r600_asic_reset(struct radeon_device *rdev)
+{
+ u32 reset_mask;
+
+ reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ r600_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
return 0;
}
-bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+/**
+ * r600_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
- u32 srbm_status;
- u32 grbm_status;
- u32 grbm_status2;
-
- srbm_status = RREG32(R_000E50_SRBM_STATUS);
- grbm_status = RREG32(R_008010_GRBM_STATUS);
- grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
- if (!G_008010_GUI_ACTIVE(grbm_status)) {
+ u32 reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
radeon_ring_lockup_update(ring);
return false;
}
@@ -1376,15 +1582,14 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
- * Check if the async DMA engine is locked up (r6xx-evergreen).
+ * Check if the async DMA engine is locked up.
* Returns true if the engine appears to be locked up, false if not.
*/
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
- u32 dma_status_reg;
+ u32 reset_mask = r600_gpu_check_soft_reset(rdev);
- dma_status_reg = RREG32(DMA_STATUS_REG);
- if (dma_status_reg & DMA_IDLE) {
+ if (!(reset_mask & RADEON_RESET_DMA)) {
radeon_ring_lockup_update(ring);
return false;
}
@@ -1393,11 +1598,6 @@ bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
-int r600_asic_reset(struct radeon_device *rdev)
-{
- return r600_gpu_soft_reset(rdev);
-}
-
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
@@ -1405,12 +1605,15 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 disabled_rb_mask)
{
u32 rendering_pipe_num, rb_num_width, req_rb_num;
- u32 pipe_rb_ratio, pipe_rb_remain;
+ u32 pipe_rb_ratio, pipe_rb_remain, tmp;
u32 data = 0, mask = 1 << (max_rb_num - 1);
unsigned i, j;
/* mask out the RBs that don't exist on that asic */
- disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
+ tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
+ /* make sure at least one RB is available */
+ if ((tmp & 0xff) != 0xff)
+ disabled_rb_mask = tmp;
rendering_pipe_num = 1 << tiling_pipe_num;
req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
@@ -2256,7 +2459,7 @@ void r600_dma_stop(struct radeon_device *rdev)
int r600_dma_resume(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
- u32 rb_cntl, dma_cntl;
+ u32 rb_cntl, dma_cntl, ib_cntl;
u32 rb_bufsz;
int r;
@@ -2296,7 +2499,11 @@ int r600_dma_resume(struct radeon_device *rdev)
WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
/* enable DMA IBs */
- WREG32(DMA_IB_CNTL, DMA_IB_ENABLE);
+ ib_cntl = DMA_IB_ENABLE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL, ib_cntl);
dma_cntl = RREG32(DMA_CNTL);
dma_cntl &= ~CTXEMPTY_INT_ENABLE;
@@ -2595,7 +2802,7 @@ int r600_copy_blit(struct radeon_device *rdev,
* @num_gpu_pages: number of GPU pages to xfer
* @fence: radeon fence object
*
- * Copy GPU paging using the DMA engine (r6xx-r7xx).
+ * Copy GPU paging using the DMA engine (r6xx).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
@@ -2618,8 +2825,8 @@ int r600_copy_dma(struct radeon_device *rdev,
}
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
- num_loops = DIV_ROUND_UP(size_in_dw, 0xffff);
- r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
+ r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL);
@@ -2636,14 +2843,14 @@ int r600_copy_dma(struct radeon_device *rdev,
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
- if (cur_size_in_dw > 0xFFFF)
- cur_size_in_dw = 0xFFFF;
+ if (cur_size_in_dw > 0xFFFE)
+ cur_size_in_dw = 0xFFFE;
size_in_dw -= cur_size_in_dw;
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
radeon_ring_write(ring, dst_offset & 0xfffffffc);
radeon_ring_write(ring, src_offset & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
- radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
+ (upper_32_bits(src_offset) & 0xff)));
src_offset += cur_size_in_dw * 4;
dst_offset += cur_size_in_dw * 4;
}
@@ -4254,14 +4461,14 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
}
/**
- * r600_get_gpu_clock - return GPU clock counter snapshot
+ * r600_get_gpu_clock_counter - return GPU clock counter snapshot
*
* @rdev: radeon_device pointer
*
* Fetches a GPU clock counter snapshot (R6xx-cayman).
* Returns the 64 bit clock counter snapshot.
*/
-uint64_t r600_get_gpu_clock(struct radeon_device *rdev)
+uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
{
uint64_t clock;
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index 77da1f9c0b8e..f651881eb0ae 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -22,6 +22,8 @@
*
* Authors:
* Alex Deucher <alexander.deucher@amd.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
@@ -488,37 +490,6 @@ set_default_state(drm_radeon_private_t *dev_priv)
ADVANCE_RING();
}
-/* 23 bits of float fractional data */
-#define I2F_FRAC_BITS 23
-#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
-
-/*
- * Converts unsigned integer into 32-bit IEEE floating point representation.
- * Will be exact from 0 to 2^24. Above that, we round towards zero
- * as the fractional bits will not fit in a float. (It would be better to
- * round towards even as the fpu does, but that is slower.)
- */
-__pure uint32_t int2float(uint32_t x)
-{
- uint32_t msb, exponent, fraction;
-
- /* Zero is special */
- if (!x) return 0;
-
- /* Get location of the most significant bit */
- msb = __fls(x);
-
- /*
- * Use a rotate instead of a shift because that works both leftwards
- * and rightwards due to the mod(32) behaviour. This means we don't
- * need to check to see if we are above 2^24 or not.
- */
- fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
- exponent = (127 + msb) << I2F_FRAC_BITS;
-
- return fraction + exponent;
-}
-
static int r600_nomm_get_vb(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index e082dca6feee..9fb5780a552f 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -31,6 +31,37 @@
#include "r600_blit_shaders.h"
#include "radeon_blit_common.h"
+/* 23 bits of float fractional data */
+#define I2F_FRAC_BITS 23
+#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
+
+/*
+ * Converts unsigned integer into 32-bit IEEE floating point representation.
+ * Will be exact from 0 to 2^24. Above that, we round towards zero
+ * as the fractional bits will not fit in a float. (It would be better to
+ * round towards even as the fpu does, but that is slower.)
+ */
+__pure uint32_t int2float(uint32_t x)
+{
+ uint32_t msb, exponent, fraction;
+
+ /* Zero is special */
+ if (!x) return 0;
+
+ /* Get location of the most significant bit */
+ msb = __fls(x);
+
+ /*
+ * Use a rotate instead of a shift because that works both leftwards
+ * and rightwards due to the mod(32) behaviour. This means we don't
+ * need to check to see if we are above 2^24 or not.
+ */
+ fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+ exponent = (127 + msb) << I2F_FRAC_BITS;
+
+ return fraction + exponent;
+}
+
/* emits 21 on rv770+, 23 on r600 */
static void
set_render_target(struct radeon_device *rdev, int format,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index be85f75aedda..1c51c08b1fde 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -24,6 +24,8 @@
* Authors:
* Dave Airlie <airlied@redhat.com>
* Alex Deucher <alexander.deucher@amd.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <linux/module.h>
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 9ea13d07cc55..01a3ec83f284 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -31,12 +31,7 @@
#include "r600d.h"
#include "r600_reg_safe.h"
-static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc);
-static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc);
-typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
-static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
+static int r600_nomm;
extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
@@ -784,170 +779,29 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
}
/**
- * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
- * @parser: parser structure holding parsing context.
- * @pkt: where to store packet informations
- *
- * Assume that chunk_ib_index is properly set. Will return -EINVAL
- * if packet is bigger than remaining ib size. or if packets is unknown.
- **/
-static int r600_cs_packet_parse(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx)
-{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
- uint32_t header;
-
- if (idx >= ib_chunk->length_dw) {
- DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
- idx, ib_chunk->length_dw);
- return -EINVAL;
- }
- header = radeon_get_ib_value(p, idx);
- pkt->idx = idx;
- pkt->type = CP_PACKET_GET_TYPE(header);
- pkt->count = CP_PACKET_GET_COUNT(header);
- pkt->one_reg_wr = 0;
- switch (pkt->type) {
- case PACKET_TYPE0:
- pkt->reg = CP_PACKET0_GET_REG(header);
- break;
- case PACKET_TYPE3:
- pkt->opcode = CP_PACKET3_GET_OPCODE(header);
- break;
- case PACKET_TYPE2:
- pkt->count = -1;
- break;
- default:
- DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
- return -EINVAL;
- }
- if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
- DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
- pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
- return -EINVAL;
- }
- return 0;
-}
-
-/**
- * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
- * @parser: parser structure holding parsing context.
- * @data: pointer to relocation data
- * @offset_start: starting offset
- * @offset_mask: offset mask (to align start offset on)
- * @reloc: reloc informations
- *
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc)
-{
- struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_packet p3reloc;
- unsigned idx;
- int r;
-
- if (p->chunk_relocs_idx == -1) {
- DRM_ERROR("No relocation chunk !\n");
- return -EINVAL;
- }
- *cs_reloc = NULL;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
- r = r600_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return r;
- }
- p->idx += p3reloc.count + 2;
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- DRM_ERROR("No packet3 for relocation for packet at %d.\n",
- p3reloc.idx);
- return -EINVAL;
- }
- idx = radeon_get_ib_value(p, p3reloc.idx + 1);
- if (idx >= relocs_chunk->length_dw) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
- return -EINVAL;
- }
- /* FIXME: we assume reloc size is 4 dwords */
- *cs_reloc = p->relocs_ptr[(idx / 4)];
- return 0;
-}
-
-/**
- * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
- * @parser: parser structure holding parsing context.
- * @data: pointer to relocation data
- * @offset_start: starting offset
- * @offset_mask: offset mask (to align start offset on)
- * @reloc: reloc informations
- *
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc)
-{
- struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_packet p3reloc;
- unsigned idx;
- int r;
-
- if (p->chunk_relocs_idx == -1) {
- DRM_ERROR("No relocation chunk !\n");
- return -EINVAL;
- }
- *cs_reloc = NULL;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
- r = r600_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return r;
- }
- p->idx += p3reloc.count + 2;
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- DRM_ERROR("No packet3 for relocation for packet at %d.\n",
- p3reloc.idx);
- return -EINVAL;
- }
- idx = radeon_get_ib_value(p, p3reloc.idx + 1);
- if (idx >= relocs_chunk->length_dw) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
- return -EINVAL;
- }
- *cs_reloc = p->relocs;
- (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
- (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
- return 0;
-}
-
-/**
- * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
+ * r600_cs_packet_parse_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
*
- * Check next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+ * This is an R600-specific function for parsing VLINE packets.
+ * Real work is done by r600_cs_common_vline_parse function.
+ * Here we just set up ASIC-specific register table and call
+ * the common implementation function.
+ */
+static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
{
- struct radeon_cs_packet p3reloc;
- int r;
+ static uint32_t vline_start_end[2] = {AVIVO_D1MODE_VLINE_START_END,
+ AVIVO_D2MODE_VLINE_START_END};
+ static uint32_t vline_status[2] = {AVIVO_D1MODE_VLINE_STATUS,
+ AVIVO_D2MODE_VLINE_STATUS};
- r = r600_cs_packet_parse(p, &p3reloc, p->idx);
- if (r) {
- return 0;
- }
- if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
- return 0;
- }
- return 1;
+ return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
}
/**
- * r600_cs_packet_next_vline() - parse userspace VLINE packet
+ * r600_cs_common_vline_parse() - common vline parser
* @parser: parser structure holding parsing context.
+ * @vline_start_end: table of vline_start_end registers
+ * @vline_status: table of vline_status registers
*
* Userspace sends a special sequence for VLINE waits.
* PACKET0 - VLINE_START_END + value
@@ -957,9 +811,16 @@ static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
* This function parses this and relocates the VLINE START END
* and WAIT_REG_MEM packets to the correct crtc.
* It also detects a switched off crtc and nulls out the
- * wait in that case.
+ * wait in that case. This function is common for all ASICs that
+ * are R600 and newer. The parsing algorithm is the same, and only
+ * differs in which registers are used.
+ *
+ * Caller is the ASIC-specific function which passes the parser
+ * context and ASIC-specific register table
*/
-static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
+int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
+ uint32_t *vline_start_end,
+ uint32_t *vline_status)
{
struct drm_mode_object *obj;
struct drm_crtc *crtc;
@@ -973,12 +834,12 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
ib = p->ib.ptr;
/* parse the WAIT_REG_MEM */
- r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
+ r = radeon_cs_packet_parse(p, &wait_reg_mem, p->idx);
if (r)
return r;
/* check its a WAIT_REG_MEM */
- if (wait_reg_mem.type != PACKET_TYPE3 ||
+ if (wait_reg_mem.type != RADEON_PACKET_TYPE3 ||
wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
return -EINVAL;
@@ -987,7 +848,12 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
/* bit 4 is reg (0) or mem (1) */
if (wait_reg_mem_info & 0x10) {
- DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
+ DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n");
+ return -EINVAL;
+ }
+ /* bit 8 is me (0) or pfp (1) */
+ if (wait_reg_mem_info & 0x100) {
+ DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n");
return -EINVAL;
}
/* waiting for value to be equal */
@@ -995,18 +861,18 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
return -EINVAL;
}
- if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
+ if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
return -EINVAL;
}
- if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
+ if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
return -EINVAL;
}
/* jump over the NOP */
- r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
if (r)
return r;
@@ -1016,7 +882,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
- reg = CP_PACKET0_GET_REG(header);
+ reg = R600_CP_PACKET0_GET_REG(header);
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
@@ -1028,7 +894,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
crtc_id = radeon_crtc->crtc_id;
if (!crtc->enabled) {
- /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
+ /* CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
ib[h_idx + 2] = PACKET2(0);
ib[h_idx + 3] = PACKET2(0);
ib[h_idx + 4] = PACKET2(0);
@@ -1036,20 +902,15 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
ib[h_idx + 6] = PACKET2(0);
ib[h_idx + 7] = PACKET2(0);
ib[h_idx + 8] = PACKET2(0);
- } else if (crtc_id == 1) {
- switch (reg) {
- case AVIVO_D1MODE_VLINE_START_END:
- header &= ~R600_CP_PACKET0_REG_MASK;
- header |= AVIVO_D2MODE_VLINE_START_END >> 2;
- break;
- default:
- DRM_ERROR("unknown crtc reloc\n");
- return -EINVAL;
- }
+ } else if (reg == vline_start_end[0]) {
+ header &= ~R600_CP_PACKET0_REG_MASK;
+ header |= vline_start_end[crtc_id] >> 2;
ib[h_idx] = header;
- ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
+ ib[h_idx + 4] = vline_status[crtc_id] >> 2;
+ } else {
+ DRM_ERROR("unknown crtc reloc\n");
+ return -EINVAL;
}
-
return 0;
}
@@ -1155,8 +1016,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case R_028010_DB_DEPTH_INFO:
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
- r600_cs_packet_next_is_pkt3_nop(p)) {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ radeon_cs_packet_next_is_pkt3_nop(p)) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1198,7 +1059,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case VGT_STRMOUT_BUFFER_BASE_1:
case VGT_STRMOUT_BUFFER_BASE_2:
case VGT_STRMOUT_BUFFER_BASE_3:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1221,7 +1082,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->streamout_dirty = true;
break;
case CP_COHER_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
"0x%04X\n", reg);
@@ -1256,8 +1117,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280B8_CB_COLOR6_INFO:
case R_0280BC_CB_COLOR7_INFO:
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
- r600_cs_packet_next_is_pkt3_nop(p)) {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ radeon_cs_packet_next_is_pkt3_nop(p)) {
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1320,7 +1181,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280F8_CB_COLOR6_FRAG:
case R_0280FC_CB_COLOR7_FRAG:
tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
- if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
if (!track->cb_color_base_last[tmp]) {
dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
return -EINVAL;
@@ -1329,7 +1190,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
ib[idx] = track->cb_color_base_last[tmp];
} else {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1351,7 +1212,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280D8_CB_COLOR6_TILE:
case R_0280DC_CB_COLOR7_TILE:
tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
- if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
if (!track->cb_color_base_last[tmp]) {
dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
return -EINVAL;
@@ -1360,7 +1221,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
ib[idx] = track->cb_color_base_last[tmp];
} else {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
@@ -1395,7 +1256,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_BASE:
case CB_COLOR6_BASE:
case CB_COLOR7_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1410,7 +1271,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_dirty = true;
break;
case DB_DEPTH_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1423,7 +1284,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_HTILE_DATA_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1493,7 +1354,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_ALU_CONST_CACHE_VS_13:
case SQ_ALU_CONST_CACHE_VS_14:
case SQ_ALU_CONST_CACHE_VS_15:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1502,7 +1363,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case SX_MEMORY_EXPORT_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
dev_warn(p->dev, "bad SET_CONFIG_REG "
"0x%04X\n", reg);
@@ -1788,7 +1649,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SET PREDICATION\n");
return -EINVAL;
@@ -1829,7 +1690,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
@@ -1881,7 +1742,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x10) {
uint64_t offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad WAIT_REG_MEM\n");
return -EINVAL;
@@ -1893,6 +1754,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
ib[idx+2] = upper_32_bits(offset) & 0xff;
+ } else if (idx_value & 0x100) {
+ DRM_ERROR("cannot use PFP on REG wait\n");
+ return -EINVAL;
}
break;
case PACKET3_CP_DMA:
@@ -1915,7 +1779,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
/* src address space is memory */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad CP DMA SRC\n");
return -EINVAL;
@@ -1945,7 +1809,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("CP DMA DAIC only supported for registers\n");
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad CP DMA DST\n");
return -EINVAL;
@@ -1975,7 +1839,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* 0xffffffff/0x0 is flush all cache flag */
if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
radeon_get_ib_value(p, idx + 2) != 0) {
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SURFACE_SYNC\n");
return -EINVAL;
@@ -1991,7 +1855,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (pkt->count) {
uint64_t offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
@@ -2012,7 +1876,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
@@ -2078,7 +1942,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
case SQ_TEX_VTX_VALID_TEXTURE:
/* tex base */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
@@ -2092,7 +1956,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
texture = reloc->robj;
/* tex mip base */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
@@ -2113,7 +1977,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
{
uint64_t offset64;
/* vtx base */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
@@ -2214,7 +2078,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
{
u64 offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
return -EINVAL;
@@ -2258,7 +2122,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* Updating memory at DST_ADDRESS. */
if (idx_value & 0x1) {
u64 offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
return -EINVAL;
@@ -2277,7 +2141,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* Reading data from SRC_ADDRESS. */
if (((idx_value >> 1) & 0x3) == 2) {
u64 offset;
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
return -EINVAL;
@@ -2302,7 +2166,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad MEM_WRITE (invalid count)\n");
return -EINVAL;
}
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
return -EINVAL;
@@ -2331,7 +2195,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x1) {
u64 offset;
/* SRC is memory. */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad COPY_DW (missing src reloc)\n");
return -EINVAL;
@@ -2355,7 +2219,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x2) {
u64 offset;
/* DST is memory. */
- r = r600_cs_packet_next_reloc(p, &reloc);
+ r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
return -EINVAL;
@@ -2410,7 +2274,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
p->track = track;
}
do {
- r = r600_cs_packet_parse(p, &pkt, p->idx);
+ r = radeon_cs_packet_parse(p, &pkt, p->idx);
if (r) {
kfree(p->track);
p->track = NULL;
@@ -2418,12 +2282,12 @@ int r600_cs_parse(struct radeon_cs_parser *p)
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
r = r600_cs_parse_packet0(p, &pkt);
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
break;
- case PACKET_TYPE3:
+ case RADEON_PACKET_TYPE3:
r = r600_packet3_check(p, &pkt);
break;
default:
@@ -2449,17 +2313,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
return 0;
}
-static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
-{
- if (p->chunk_relocs_idx == -1) {
- return 0;
- }
- p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
- if (p->relocs == NULL) {
- return -ENOMEM;
- }
- return 0;
-}
+#ifdef CONFIG_DRM_RADEON_UMS
/**
* cs_parser_fini() - clean parser states
@@ -2476,13 +2330,27 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
kfree(parser->relocs);
for (i = 0; i < parser->nchunks; i++) {
kfree(parser->chunks[i].kdata);
- kfree(parser->chunks[i].kpage[0]);
- kfree(parser->chunks[i].kpage[1]);
+ if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
+ kfree(parser->chunks[i].kpage[0]);
+ kfree(parser->chunks[i].kpage[1]);
+ }
}
kfree(parser->chunks);
kfree(parser->chunks_array);
}
+static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
+{
+ if (p->chunk_relocs_idx == -1) {
+ return 0;
+ }
+ p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+ if (p->relocs == NULL) {
+ return -ENOMEM;
+ }
+ return 0;
+}
+
int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
unsigned family, u32 *ib, int *l)
{
@@ -2541,9 +2409,11 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
void r600_cs_legacy_init(void)
{
- r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
+ r600_nomm = 1;
}
+#endif
+
/*
* DMA
*/
@@ -2561,16 +2431,16 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_chunk *relocs_chunk;
unsigned idx;
+ *cs_reloc = NULL;
if (p->chunk_relocs_idx == -1) {
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
- *cs_reloc = NULL;
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
idx = p->dma_reloc_idx;
- if (idx >= relocs_chunk->length_dw) {
+ if (idx >= p->nrelocs) {
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
+ idx, p->nrelocs);
return -EINVAL;
}
*cs_reloc = p->relocs_ptr[idx];
@@ -2621,14 +2491,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
return -EINVAL;
}
if (tiled) {
- dst_offset = ib[idx+1];
+ dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
p->idx += count + 5;
} else {
- dst_offset = ib[idx+1];
- dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
@@ -2656,37 +2526,50 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
/* detile bit */
if (idx_value & (1 << 31)) {
/* tiled src, linear dst */
- src_offset = ib[idx+1];
+ src_offset = radeon_get_ib_value(p, idx+1);
src_offset <<= 8;
ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
- dst_offset = ib[idx+5];
- dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+5);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
} else {
/* linear src, tiled dst */
- src_offset = ib[idx+5];
- src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+ src_offset = radeon_get_ib_value(p, idx+5);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- dst_offset = ib[idx+1];
+ dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
}
p->idx += 7;
} else {
- src_offset = ib[idx+2];
- src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
- dst_offset = ib[idx+1];
- dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
-
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 5;
+ if (p->family >= CHIP_RV770) {
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ } else {
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16;
+ p->idx += 4;
+ }
}
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
@@ -2709,8 +2592,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad DMA_PACKET_WRITE\n");
return -EINVAL;
}
- dst_offset = ib[idx+1];
- dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index ff80efe9cb7d..21ecc0e12dc4 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -23,6 +23,7 @@
*
* Authors: Christian König
*/
+#include <linux/hdmi.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -121,79 +122,18 @@ static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
}
/*
- * calculate the crc for a given info frame
- */
-static void r600_hdmi_infoframe_checksum(uint8_t packetType,
- uint8_t versionNumber,
- uint8_t length,
- uint8_t *frame)
-{
- int i;
- frame[0] = packetType + versionNumber + length;
- for (i = 1; i <= length; i++)
- frame[0] += frame[i];
- frame[0] = 0x100 - frame[0];
-}
-
-/*
* build a HDMI Video Info Frame
*/
-static void r600_hdmi_videoinfoframe(
- struct drm_encoder *encoder,
- enum r600_hdmi_color_format color_format,
- int active_information_present,
- uint8_t active_format_aspect_ratio,
- uint8_t scan_information,
- uint8_t colorimetry,
- uint8_t ex_colorimetry,
- uint8_t quantization,
- int ITC,
- uint8_t picture_aspect_ratio,
- uint8_t video_format_identification,
- uint8_t pixel_repetition,
- uint8_t non_uniform_picture_scaling,
- uint8_t bar_info_data_valid,
- uint16_t top_bar,
- uint16_t bottom_bar,
- uint16_t left_bar,
- uint16_t right_bar
-)
+static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
+ void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
+ uint8_t *frame = buffer + 3;
- uint8_t frame[14];
-
- frame[0x0] = 0;
- frame[0x1] =
- (scan_information & 0x3) |
- ((bar_info_data_valid & 0x3) << 2) |
- ((active_information_present & 0x1) << 4) |
- ((color_format & 0x3) << 5);
- frame[0x2] =
- (active_format_aspect_ratio & 0xF) |
- ((picture_aspect_ratio & 0x3) << 4) |
- ((colorimetry & 0x3) << 6);
- frame[0x3] =
- (non_uniform_picture_scaling & 0x3) |
- ((quantization & 0x3) << 2) |
- ((ex_colorimetry & 0x7) << 4) |
- ((ITC & 0x1) << 7);
- frame[0x4] = (video_format_identification & 0x7F);
- frame[0x5] = (pixel_repetition & 0xF);
- frame[0x6] = (top_bar & 0xFF);
- frame[0x7] = (top_bar >> 8);
- frame[0x8] = (bottom_bar & 0xFF);
- frame[0x9] = (bottom_bar >> 8);
- frame[0xA] = (left_bar & 0xFF);
- frame[0xB] = (left_bar >> 8);
- frame[0xC] = (right_bar & 0xFF);
- frame[0xD] = (right_bar >> 8);
-
- r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
/* Our header values (type, version, length) should be alright, Intel
* is using the same. Checksum function also seems to be OK, it works
* fine for audio infoframe. However calculated value is always lower
@@ -215,39 +155,15 @@ static void r600_hdmi_videoinfoframe(
/*
* build a Audio Info Frame
*/
-static void r600_hdmi_audioinfoframe(
- struct drm_encoder *encoder,
- uint8_t channel_count,
- uint8_t coding_type,
- uint8_t sample_size,
- uint8_t sample_frequency,
- uint8_t format,
- uint8_t channel_allocation,
- uint8_t level_shift,
- int downmix_inhibit
-)
+static void r600_hdmi_update_audio_infoframe(struct drm_encoder *encoder,
+ const void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
-
- uint8_t frame[11];
-
- frame[0x0] = 0;
- frame[0x1] = (channel_count & 0x7) | ((coding_type & 0xF) << 4);
- frame[0x2] = (sample_size & 0x3) | ((sample_frequency & 0x7) << 2);
- frame[0x3] = format;
- frame[0x4] = channel_allocation;
- frame[0x5] = ((level_shift & 0xF) << 3) | ((downmix_inhibit & 0x1) << 7);
- frame[0x6] = 0;
- frame[0x7] = 0;
- frame[0x8] = 0;
- frame[0x9] = 0;
- frame[0xA] = 0;
-
- r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
+ const u8 *frame = buffer + 3;
WREG32(HDMI0_AUDIO_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -320,7 +236,10 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+ struct hdmi_avi_infoframe frame;
uint32_t offset;
+ ssize_t err;
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
@@ -371,9 +290,19 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
- r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
+ return;
+ }
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
r600_hdmi_update_ACR(encoder, mode->clock);
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
@@ -395,8 +324,11 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct r600_audio audio = r600_audio_status(rdev);
+ uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
+ struct hdmi_audio_infoframe frame;
uint32_t offset;
uint32_t iec;
+ ssize_t err;
if (!dig->afmt || !dig->afmt->enabled)
return;
@@ -462,9 +394,21 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
iec |= 0x5 << 16;
WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
- r600_hdmi_audioinfoframe(encoder, audio.channels - 1, 0, 0, 0, 0, 0, 0,
- 0);
+ err = hdmi_audio_infoframe_init(&frame);
+ if (err < 0) {
+ DRM_ERROR("failed to setup audio infoframe\n");
+ return;
+ }
+
+ frame.channels = audio.channels;
+
+ err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack audio infoframe\n");
+ return;
+ }
+ r600_hdmi_update_audio_infoframe(encoder, buffer, sizeof(buffer));
r600_hdmi_audio_workaround(encoder);
}
@@ -544,7 +488,6 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
/* Called for ATOM_ENCODER_MODE_HDMI only */
if (!dig || !dig->afmt) {
- WARN_ON(1);
return;
}
if (!dig->afmt->enabled)
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 4a53402b1852..a42ba11a3bed 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -182,6 +182,8 @@
#define CP_COHER_BASE 0x85F8
#define CP_DEBUG 0xC1FC
#define R_0086D8_CP_ME_CNTL 0x86D8
+#define S_0086D8_CP_PFP_HALT(x) (((x) & 1)<<26)
+#define C_0086D8_CP_PFP_HALT(x) ((x) & 0xFBFFFFFF)
#define S_0086D8_CP_ME_HALT(x) (((x) & 1)<<28)
#define C_0086D8_CP_ME_HALT(x) ((x) & 0xEFFFFFFF)
#define CP_ME_RAM_DATA 0xC160
@@ -1143,19 +1145,10 @@
/*
* PM4
*/
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
-#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
@@ -1328,6 +1321,7 @@
#define G_008010_VC_BUSY(x) (((x) >> 11) & 1)
#define G_008010_DB03_CLEAN(x) (((x) >> 12) & 1)
#define G_008010_CB03_CLEAN(x) (((x) >> 13) & 1)
+#define G_008010_TA_BUSY(x) (((x) >> 14) & 1)
#define G_008010_VGT_BUSY_NO_DMA(x) (((x) >> 16) & 1)
#define G_008010_VGT_BUSY(x) (((x) >> 17) & 1)
#define G_008010_TA03_BUSY(x) (((x) >> 18) & 1)
@@ -1395,6 +1389,7 @@
#define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1)
#define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1)
#define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1)
+#define G_000E50_IH_BUSY(x) (((x) >> 17) & 1)
#define G_000E50_BIF_BUSY(x) (((x) >> 29) & 1)
#define R_000E60_SRBM_SOFT_RESET 0x0E60
#define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 9b9422c4403a..8263af3fd832 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -132,6 +132,20 @@ extern int radeon_lockup_timeout;
#define RADEON_VA_RESERVED_SIZE (8 << 20)
#define RADEON_IB_VM_MAX_SIZE (64 << 10)
+/* reset flags */
+#define RADEON_RESET_GFX (1 << 0)
+#define RADEON_RESET_COMPUTE (1 << 1)
+#define RADEON_RESET_DMA (1 << 2)
+#define RADEON_RESET_CP (1 << 3)
+#define RADEON_RESET_GRBM (1 << 4)
+#define RADEON_RESET_DMA1 (1 << 5)
+#define RADEON_RESET_RLC (1 << 6)
+#define RADEON_RESET_SEM (1 << 7)
+#define RADEON_RESET_IH (1 << 8)
+#define RADEON_RESET_VMC (1 << 9)
+#define RADEON_RESET_MC (1 << 10)
+#define RADEON_RESET_DISPLAY (1 << 11)
+
/*
* Errata workarounds.
*/
@@ -319,7 +333,6 @@ struct radeon_bo {
struct list_head list;
/* Protected by tbo.reserved */
u32 placements[3];
- u32 busy_placements[3];
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
@@ -337,7 +350,6 @@ struct radeon_bo {
struct drm_gem_object gem_base;
struct ttm_bo_kmap_obj dma_buf_vmap;
- int vmapping_count;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
@@ -649,6 +661,8 @@ struct radeon_ring {
u32 ptr_reg_mask;
u32 nop;
u32 idx;
+ u64 last_semaphore_signal_addr;
+ u64 last_semaphore_wait_addr;
};
/*
@@ -765,6 +779,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
+void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
@@ -1163,6 +1178,10 @@ struct radeon_asic {
bool (*gui_idle)(struct radeon_device *rdev);
/* wait for mc_idle */
int (*mc_wait_for_idle)(struct radeon_device *rdev);
+ /* get the reference clock */
+ u32 (*get_xclk)(struct radeon_device *rdev);
+ /* get the gpu clock counter */
+ uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev);
/* gart */
struct {
void (*tlb_flush)(struct radeon_device *rdev);
@@ -1173,7 +1192,9 @@ struct radeon_asic {
void (*fini)(struct radeon_device *rdev);
u32 pt_ring_index;
- void (*set_page)(struct radeon_device *rdev, uint64_t pe,
+ void (*set_page)(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
} vm;
@@ -1751,6 +1772,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
(rdev->flags & RADEON_IS_IGP))
+#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
/*
* BIOS helpers.
@@ -1795,7 +1817,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
-#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
@@ -1841,10 +1863,13 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
+#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
+#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev))
/* Common functions */
/* AGP */
extern int radeon_gpu_reset(struct radeon_device *rdev);
+extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung);
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
@@ -1966,6 +1991,19 @@ static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
#endif
+int radeon_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx);
+bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
+void radeon_cs_dump_packet(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt);
+int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc,
+ int nomm);
+int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
+ uint32_t *vline_start_end,
+ uint32_t *vline_status);
+
#include "radeon_object.h"
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 596bcbe80ed0..aba0a893ea98 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -934,6 +934,8 @@ static struct radeon_asic r600_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -946,7 +948,7 @@ static struct radeon_asic r600_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &r600_gpu_is_lockup,
+ .is_lockup = &r600_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &r600_dma_ring_ib_execute,
@@ -1018,6 +1020,8 @@ static struct radeon_asic rs780_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1030,7 +1034,7 @@ static struct radeon_asic rs780_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &r600_gpu_is_lockup,
+ .is_lockup = &r600_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &r600_dma_ring_ib_execute,
@@ -1102,6 +1106,8 @@ static struct radeon_asic rv770_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1114,7 +1120,7 @@ static struct radeon_asic rv770_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &r600_gpu_is_lockup,
+ .is_lockup = &r600_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &r600_dma_ring_ib_execute,
@@ -1140,9 +1146,9 @@ static struct radeon_asic rv770_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = &r600_copy_dma,
+ .dma = &rv770_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
- .copy = &r600_copy_dma,
+ .copy = &rv770_copy_dma,
.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
@@ -1186,6 +1192,8 @@ static struct radeon_asic evergreen_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1198,7 +1206,7 @@ static struct radeon_asic evergreen_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &evergreen_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1207,7 +1215,7 @@ static struct radeon_asic evergreen_asic = {
.cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
+ .is_lockup = &evergreen_dma_is_lockup,
}
},
.irq = {
@@ -1270,6 +1278,8 @@ static struct radeon_asic sumo_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1282,7 +1292,7 @@ static struct radeon_asic sumo_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &evergreen_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1291,7 +1301,7 @@ static struct radeon_asic sumo_asic = {
.cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
+ .is_lockup = &evergreen_dma_is_lockup,
}
},
.irq = {
@@ -1354,6 +1364,8 @@ static struct radeon_asic btc_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1366,7 +1378,7 @@ static struct radeon_asic btc_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &evergreen_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1375,7 +1387,7 @@ static struct radeon_asic btc_asic = {
.cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
+ .is_lockup = &evergreen_dma_is_lockup,
}
},
.irq = {
@@ -1438,6 +1450,8 @@ static struct radeon_asic cayman_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &rv770_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1457,7 +1471,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
@@ -1468,7 +1482,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
@@ -1479,7 +1493,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[R600_RING_TYPE_DMA_INDEX] = {
@@ -1565,6 +1579,8 @@ static struct radeon_asic trinity_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &r600_get_xclk,
+ .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1584,7 +1600,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
@@ -1595,7 +1611,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
@@ -1606,7 +1622,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gpu_is_lockup,
+ .is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[R600_RING_TYPE_DMA_INDEX] = {
@@ -1692,6 +1708,8 @@ static struct radeon_asic si_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+ .get_xclk = &si_get_xclk,
+ .get_gpu_clock_counter = &si_get_gpu_clock_counter,
.gart = {
.tlb_flush = &si_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
@@ -1711,7 +1729,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &si_gpu_is_lockup,
+ .is_lockup = &si_gfx_is_lockup,
.vm_flush = &si_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
@@ -1722,7 +1740,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &si_gpu_is_lockup,
+ .is_lockup = &si_gfx_is_lockup,
.vm_flush = &si_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
@@ -1733,7 +1751,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
- .is_lockup = &si_gpu_is_lockup,
+ .is_lockup = &si_gfx_is_lockup,
.vm_flush = &si_vm_flush,
},
[R600_RING_TYPE_DMA_INDEX] = {
@@ -1744,7 +1762,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
+ .is_lockup = &si_dma_is_lockup,
.vm_flush = &si_dma_vm_flush,
},
[CAYMAN_RING_TYPE_DMA1_INDEX] = {
@@ -1755,7 +1773,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
+ .is_lockup = &si_dma_is_lockup,
.vm_flush = &si_dma_vm_flush,
}
},
@@ -1944,9 +1962,13 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
+ case CHIP_OLAND:
rdev->asic = &si_asic;
/* set num crtcs */
- rdev->num_crtc = 6;
+ if (rdev->family == CHIP_OLAND)
+ rdev->num_crtc = 2;
+ else
+ rdev->num_crtc = 6;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 5f4882cc2152..3535f73ad3e2 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -319,7 +319,7 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
bool emit_wait);
void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
-bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
@@ -389,7 +389,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_sa_bo *vb);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
-uint64_t r600_get_gpu_clock(struct radeon_device *rdev);
+u32 r600_get_xclk(struct radeon_device *rdev);
+uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
/*
* rv770,rv730,rv710,rv740
@@ -403,6 +404,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
void r700_cp_stop(struct radeon_device *rdev);
void r700_cp_fini(struct radeon_device *rdev);
+int rv770_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
+u32 rv770_get_xclk(struct radeon_device *rdev);
/*
* evergreen
@@ -418,7 +424,8 @@ int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
int evergreen_suspend(struct radeon_device *rdev);
int evergreen_resume(struct radeon_device *rdev);
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -469,13 +476,16 @@ int cayman_vm_init(struct radeon_device *rdev);
void cayman_vm_fini(struct radeon_device *rdev);
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
-void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+void cayman_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib);
+bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
@@ -492,23 +502,27 @@ int si_init(struct radeon_device *rdev);
void si_fini(struct radeon_device *rdev);
int si_suspend(struct radeon_device *rdev);
int si_resume(struct radeon_device *rdev);
-bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int si_asic_reset(struct radeon_device *rdev);
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int si_irq_set(struct radeon_device *rdev);
int si_irq_process(struct radeon_device *rdev);
int si_vm_init(struct radeon_device *rdev);
void si_vm_fini(struct radeon_device *rdev);
-void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+void si_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
-uint64_t si_get_gpu_clock(struct radeon_device *rdev);
int si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+u32 si_get_xclk(struct radeon_device *rdev);
+uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 15f5ded65e0c..d96070bf8388 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -43,6 +43,12 @@ struct atpx_verify_interface {
u32 function_bits; /* supported functions bit vector */
} __packed;
+struct atpx_px_params {
+ u16 size; /* structure size in bytes (includes size field) */
+ u32 valid_flags; /* which flags are valid */
+ u32 flags; /* flags */
+} __packed;
+
struct atpx_power_control {
u16 size;
u8 dgpu_state;
@@ -123,9 +129,61 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
}
/**
+ * radeon_atpx_validate_functions - validate ATPX functions
+ *
+ * @atpx: radeon atpx struct
+ *
+ * Validate that required functions are enabled (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atpx_validate(struct radeon_atpx *atpx)
+{
+ /* make sure required functions are enabled */
+ /* dGPU power control is required */
+ atpx->functions.power_cntl = true;
+
+ if (atpx->functions.px_params) {
+ union acpi_object *info;
+ struct atpx_px_params output;
+ size_t size;
+ u32 valid_bits;
+
+ info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
+ if (!info)
+ return -EIO;
+
+ memset(&output, 0, sizeof(output));
+
+ size = *(u16 *) info->buffer.pointer;
+ if (size < 10) {
+ printk("ATPX buffer is too small: %zu\n", size);
+ kfree(info);
+ return -EINVAL;
+ }
+ size = min(sizeof(output), size);
+
+ memcpy(&output, info->buffer.pointer, size);
+
+ valid_bits = output.flags & output.valid_flags;
+ /* if separate mux flag is set, mux controls are required */
+ if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
+ atpx->functions.i2c_mux_cntl = true;
+ atpx->functions.disp_mux_cntl = true;
+ }
+ /* if any outputs are muxed, mux controls are required */
+ if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
+ ATPX_TV_SIGNAL_MUXED |
+ ATPX_DFP_SIGNAL_MUXED))
+ atpx->functions.disp_mux_cntl = true;
+
+ kfree(info);
+ }
+ return 0;
+}
+
+/**
* radeon_atpx_verify_interface - verify ATPX
*
- * @handle: acpi handle
* @atpx: radeon atpx struct
*
* Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function
@@ -406,8 +464,19 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
*/
static int radeon_atpx_init(void)
{
+ int r;
+
/* set up the ATPX handle */
- return radeon_atpx_verify_interface(&radeon_atpx_priv.atpx);
+ r = radeon_atpx_verify_interface(&radeon_atpx_priv.atpx);
+ if (r)
+ return r;
+
+ /* validate the atpx setup */
+ r = radeon_atpx_validate(&radeon_atpx_priv.atpx);
+ if (r)
+ return r;
+
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 4af89126e223..3e403bdda58f 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1548,6 +1548,9 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
of_machine_is_compatible("PowerBook6,7")) {
/* ibook */
rdev->mode_info.connector_table = CT_IBOOK;
+ } else if (of_machine_is_compatible("PowerMac3,5")) {
+ /* PowerMac G4 Silver radeon 7500 */
+ rdev->mode_info.connector_table = CT_MAC_G4_SILVER;
} else if (of_machine_is_compatible("PowerMac4,4")) {
/* emac */
rdev->mode_info.connector_table = CT_EMAC;
@@ -2212,6 +2215,54 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_SVIDEO,
&hpd);
break;
+ case CT_MAC_G4_SILVER:
+ DRM_INFO("Connector Table: %d (mac g4 silver)\n",
+ rdev->mode_info.connector_table);
+ /* DVI-I - tv dac, int tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 0,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ /* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
rdev->mode_info.connector_table);
@@ -2419,6 +2470,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1),
ATOM_DEVICE_CRT1_SUPPORT);
}
+ /* RV100 board with external TDMS bit mis-set.
+ * Actually uses internal TMDS, clear the bit.
+ */
+ if (dev->pdev->device == 0x5159 &&
+ dev->pdev->subsystem_vendor == 0x1014 &&
+ dev->pdev->subsystem_device == 0x029A) {
+ tmp &= ~(1 << 4);
+ }
if ((tmp >> 4) & 0x1) {
devices |= ATOM_DEVICE_DFP2_SUPPORT;
radeon_add_legacy_encoder(dev,
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 47bf162ab9c6..2399f25ec037 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -741,7 +741,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
ret = connector_status_disconnected;
if (radeon_connector->ddc_bus)
- dret = radeon_ddc_probe(radeon_connector);
+ dret = radeon_ddc_probe(radeon_connector, false);
if (dret) {
radeon_connector->detected_by_load = false;
if (radeon_connector->edid) {
@@ -947,7 +947,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
return connector->status;
if (radeon_connector->ddc_bus)
- dret = radeon_ddc_probe(radeon_connector);
+ dret = radeon_ddc_probe(radeon_connector, false);
if (dret) {
radeon_connector->detected_by_load = false;
if (radeon_connector->edid) {
@@ -1401,7 +1401,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (encoder) {
/* setup ddc on the bridge */
radeon_atom_ext_encoder_setup_ddc(encoder);
- if (radeon_ddc_probe(radeon_connector)) /* try DDC */
+ /* bridge chips are always aux */
+ if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */
ret = connector_status_connected;
else if (radeon_connector->dac_load_detect) { /* try load detection */
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -1419,7 +1420,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
} else {
- if (radeon_ddc_probe(radeon_connector))
+ /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
+ if (radeon_ddc_probe(radeon_connector, false))
ret = connector_status_connected;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 9143fc45e35b..efc4f6441ef4 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -27,6 +27,8 @@
* Authors:
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <linux/module.h>
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 396baba0141a..70d38241b083 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -29,9 +29,6 @@
#include "radeon_reg.h"
#include "radeon.h"
-void r100_cs_dump_packet(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt);
-
static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
{
struct drm_device *ddev = p->rdev->ddev;
@@ -128,18 +125,6 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
return 0;
}
-static void radeon_cs_sync_to(struct radeon_cs_parser *p,
- struct radeon_fence *fence)
-{
- struct radeon_fence *other;
-
- if (!fence)
- return;
-
- other = p->ib.sync_to[fence->ring];
- p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other);
-}
-
static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
int i;
@@ -148,7 +133,7 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
if (!p->relocs[i].robj)
continue;
- radeon_cs_sync_to(p, p->relocs[i].robj->tbo.sync_obj);
+ radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj);
}
}
@@ -203,7 +188,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[i].length_dw = user_chunk.length_dw;
p->chunks[i].kdata = NULL;
p->chunks[i].chunk_id = user_chunk.chunk_id;
-
+ p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
p->chunk_relocs_idx = i;
}
@@ -226,9 +211,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EINVAL;
}
- p->chunks[i].length_dw = user_chunk.length_dw;
- p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
-
cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
(p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
@@ -279,13 +261,15 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[p->chunk_ib_idx].length_dw);
return -EINVAL;
}
- if ((p->rdev->flags & RADEON_IS_AGP)) {
+ if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
- kfree(p->chunks[i].kpage[0]);
- kfree(p->chunks[i].kpage[1]);
+ kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
+ kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
+ p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
+ p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
return -ENOMEM;
}
}
@@ -476,8 +460,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
goto out;
}
radeon_cs_sync_rings(parser);
- radeon_cs_sync_to(parser, vm->fence);
- radeon_cs_sync_to(parser, radeon_vm_grab_id(rdev, vm, parser->ring));
+ radeon_ib_sync_to(&parser->ib, vm->fence);
+ radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id(
+ rdev, vm, parser->ring));
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
@@ -583,7 +568,8 @@ static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
int i;
int size = PAGE_SIZE;
- bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true;
+ bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
+ false : true;
for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
@@ -645,3 +631,152 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
idx_value = ibc->kpage[new_page][pg_offset/4];
return idx_value;
}
+
+/**
+ * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser: parser structure holding parsing context.
+ * @pkt: where to store packet information
+ *
+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
+ * if packet is bigger than remaining ib size. or if packets is unknown.
+ **/
+int radeon_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_device *rdev = p->rdev;
+ uint32_t header;
+
+ if (idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ header = radeon_get_ib_value(p, idx);
+ pkt->idx = idx;
+ pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
+ pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
+ pkt->one_reg_wr = 0;
+ switch (pkt->type) {
+ case RADEON_PACKET_TYPE0:
+ if (rdev->family < CHIP_R600) {
+ pkt->reg = R100_CP_PACKET0_GET_REG(header);
+ pkt->one_reg_wr =
+ RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
+ } else
+ pkt->reg = R600_CP_PACKET0_GET_REG(header);
+ break;
+ case RADEON_PACKET_TYPE3:
+ pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
+ break;
+ case RADEON_PACKET_TYPE2:
+ pkt->count = -1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
+ return -EINVAL;
+ }
+ if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
+ DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
+ pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
+ * @p: structure holding the parser context.
+ *
+ * Check if the next packet is NOP relocation packet3.
+ **/
+bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet p3reloc;
+ int r;
+
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r)
+ return false;
+ if (p3reloc.type != RADEON_PACKET_TYPE3)
+ return false;
+ if (p3reloc.opcode != RADEON_PACKET3_NOP)
+ return false;
+ return true;
+}
+
+/**
+ * radeon_cs_dump_packet() - dump raw packet context
+ * @p: structure holding the parser context.
+ * @pkt: structure holding the packet.
+ *
+ * Used mostly for debugging and error reporting.
+ **/
+void radeon_cs_dump_packet(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ volatile uint32_t *ib;
+ unsigned i;
+ unsigned idx;
+
+ ib = p->ib.ptr;
+ idx = pkt->idx;
+ for (i = 0; i <= (pkt->count + 1); i++, idx++)
+ DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
+}
+
+/**
+ * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
+ * @parser: parser structure holding parsing context.
+ * @data: pointer to relocation data
+ * @offset_start: starting offset
+ * @offset_mask: offset mask (to align start offset on)
+ * @reloc: reloc informations
+ *
+ * Check if next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc,
+ int nomm)
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ struct radeon_cs_packet p3reloc;
+ unsigned idx;
+ int r;
+
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+ *cs_reloc = NULL;
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r)
+ return r;
+ p->idx += p3reloc.count + 2;
+ if (p3reloc.type != RADEON_PACKET_TYPE3 ||
+ p3reloc.opcode != RADEON_PACKET3_NOP) {
+ DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+ p3reloc.idx);
+ radeon_cs_dump_packet(p, &p3reloc);
+ return -EINVAL;
+ }
+ idx = radeon_get_ib_value(p, p3reloc.idx + 1);
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ radeon_cs_dump_packet(p, &p3reloc);
+ return -EINVAL;
+ }
+ /* FIXME: we assume reloc size is 4 dwords */
+ if (nomm) {
+ *cs_reloc = p->relocs;
+ (*cs_reloc)->lobj.gpu_offset =
+ (u64)relocs_chunk->kdata[idx + 3] << 32;
+ (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
+ } else
+ *cs_reloc = p->relocs_ptr[(idx / 4)];
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index ad6df625e8b8..b097d5b4ff39 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -241,12 +241,19 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
y = 0;
}
- if (ASIC_IS_AVIVO(rdev)) {
+ /* fixed on DCE6 and newer */
+ if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
int i = 0;
struct drm_crtc *crtc_p;
- /* avivo cursor image can't end on 128 pixel boundary or
+ /*
+ * avivo cursor image can't end on 128 pixel boundary or
* go past the end of the frame if both crtcs are enabled
+ *
+ * NOTE: It is safe to access crtc->enabled of other crtcs
+ * without holding either the mode_config lock or the other
+ * crtc's lock as long as write access to this flag _always_
+ * grabs all locks.
*/
list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
if (crtc_p->enabled)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index cd756262924d..44b8034a400d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -93,6 +93,7 @@ static const char radeon_family_name[][16] = {
"TAHITI",
"PITCAIRN",
"VERDE",
+ "OLAND",
"LAST",
};
@@ -429,7 +430,8 @@ bool radeon_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
- if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
+ if (efi_enabled(EFI_BOOT) &&
+ rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
return false;
/* first check CRTCs */
@@ -757,6 +759,11 @@ int radeon_atombios_init(struct radeon_device *rdev)
atom_card_info->pll_write = cail_pll_write;
rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
+ if (!rdev->mode_info.atom_context) {
+ radeon_atombios_fini(rdev);
+ return -ENOMEM;
+ }
+
mutex_init(&rdev->mode_info.atom_context->mutex);
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
atom_allocate_fb_scratch(rdev->mode_info.atom_context);
@@ -776,9 +783,11 @@ void radeon_atombios_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.atom_context) {
kfree(rdev->mode_info.atom_context->scratch);
- kfree(rdev->mode_info.atom_context);
}
+ kfree(rdev->mode_info.atom_context);
+ rdev->mode_info.atom_context = NULL;
kfree(rdev->mode_info.atom_card_info);
+ rdev->mode_info.atom_card_info = NULL;
}
/* COMBIOS */
@@ -897,6 +906,25 @@ static void radeon_check_arguments(struct radeon_device *rdev)
}
/**
+ * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
+ * needed for waking up.
+ *
+ * @pdev: pci dev pointer
+ */
+static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
+{
+
+ /* 6600m in a macbook pro */
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
+ pdev->subsystem_device == 0x00e2) {
+ printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
+ return true;
+ }
+
+ return false;
+}
+
+/**
* radeon_switcheroo_set_state - set switcheroo state
*
* @pdev: pci dev pointer
@@ -910,10 +938,19 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
+ unsigned d3_delay = dev->pdev->d3_delay;
+
printk(KERN_INFO "radeon: switched on\n");
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+
+ if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
+ dev->pdev->d3_delay = 20;
+
radeon_resume_kms(dev);
+
+ dev->pdev->d3_delay = d3_delay;
+
dev->switch_power_state = DRM_SWITCH_POWER_ON;
drm_kms_helper_poll_enable(dev);
} else {
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 310c0e5254ba..e38fd559f1ab 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -699,10 +699,15 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
- if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
- (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
- (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
- ENCODER_OBJECT_ID_NONE)) {
+ if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
+ ENCODER_OBJECT_ID_NONE) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+ if (dig->dp_i2c_bus)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &dig->dp_i2c_bus->adapter);
+ } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
@@ -1084,12 +1089,12 @@ radeon_framebuffer_init(struct drm_device *dev,
{
int ret;
rfb->obj = obj;
+ drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
if (ret) {
rfb->obj = NULL;
return ret;
}
- drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
return 0;
}
@@ -1110,14 +1115,16 @@ radeon_user_framebuffer_create(struct drm_device *dev,
}
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
- if (radeon_fb == NULL)
+ if (radeon_fb == NULL) {
+ drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(-ENOMEM);
+ }
ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
if (ret) {
kfree(radeon_fb);
drm_gem_object_unreference_unlocked(obj);
- return NULL;
+ return ERR_PTR(ret);
}
return &radeon_fb->base;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index ff7593498a74..167758488ed6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -69,9 +69,10 @@
* 2.26.0 - r600-eg: fix htile size computation
* 2.27.0 - r600-SI: Add CS ioctl support for async DMA
* 2.28.0 - r600-eg: Add MEM_WRITE packet support
+ * 2.29.0 - R500 FP16 color clear registers
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 28
+#define KMS_DRIVER_MINOR 29
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -117,20 +118,32 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
int radeon_mode_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle);
-struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj,
- int flags);
-struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf);
+struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg);
+int radeon_gem_prime_pin(struct drm_gem_object *obj);
+void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
+void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
#if defined(CONFIG_DEBUG_FS)
int radeon_debugfs_init(struct drm_minor *minor);
void radeon_debugfs_cleanup(struct drm_minor *minor);
#endif
+/* atpx handler */
+#if defined(CONFIG_VGA_SWITCHEROO)
+void radeon_register_atpx_handler(void);
+void radeon_unregister_atpx_handler(void);
+#else
+static inline void radeon_register_atpx_handler(void) {}
+static inline void radeon_unregister_atpx_handler(void) {}
+#endif
int radeon_no_wb;
-int radeon_modeset = -1;
+int radeon_modeset = 1;
int radeon_dynclks = -1;
int radeon_r4xx_atom = 0;
int radeon_agpmode = 0;
@@ -198,6 +211,14 @@ module_param_named(msi, radeon_msi, int, 0444);
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
+static struct pci_device_id pciidlist[] = {
+ radeon_PCI_IDS
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+#ifdef CONFIG_DRM_RADEON_UMS
+
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -226,14 +247,6 @@ static int radeon_resume(struct drm_device *dev)
return 0;
}
-static struct pci_device_id pciidlist[] = {
- radeon_PCI_IDS
-};
-
-#if defined(CONFIG_DRM_RADEON_KMS)
-MODULE_DEVICE_TABLE(pci, pciidlist);
-#endif
-
static const struct file_operations radeon_driver_old_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -283,6 +296,8 @@ static struct drm_driver driver_old = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+#endif
+
static struct drm_driver kms_driver;
static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
@@ -306,8 +321,8 @@ static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
return 0;
}
-static int __devinit
-radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int radeon_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int ret;
@@ -396,8 +411,13 @@ static struct drm_driver kms_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = radeon_gem_prime_export,
- .gem_prime_import = radeon_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_pin = radeon_gem_prime_pin,
+ .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
+ .gem_prime_vmap = radeon_gem_prime_vmap,
+ .gem_prime_vunmap = radeon_gem_prime_vunmap,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -410,10 +430,12 @@ static struct drm_driver kms_driver = {
static struct drm_driver *driver;
static struct pci_driver *pdriver;
+#ifdef CONFIG_DRM_RADEON_UMS
static struct pci_driver radeon_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
};
+#endif
static struct pci_driver radeon_kms_pci_driver = {
.name = DRIVER_NAME,
@@ -426,28 +448,6 @@ static struct pci_driver radeon_kms_pci_driver = {
static int __init radeon_init(void)
{
- driver = &driver_old;
- pdriver = &radeon_pci_driver;
- driver->num_ioctls = radeon_max_ioctl;
-#ifdef CONFIG_VGA_CONSOLE
- if (vgacon_text_force() && radeon_modeset == -1) {
- DRM_INFO("VGACON disable radeon kernel modesetting.\n");
- driver = &driver_old;
- pdriver = &radeon_pci_driver;
- driver->driver_features &= ~DRIVER_MODESET;
- radeon_modeset = 0;
- }
-#endif
- /* if enabled by default */
- if (radeon_modeset == -1) {
-#ifdef CONFIG_DRM_RADEON_KMS
- DRM_INFO("radeon defaulting to kernel modesetting.\n");
- radeon_modeset = 1;
-#else
- DRM_INFO("radeon defaulting to userspace modesetting.\n");
- radeon_modeset = 0;
-#endif
- }
if (radeon_modeset == 1) {
DRM_INFO("radeon kernel modesetting enabled.\n");
driver = &kms_driver;
@@ -455,9 +455,21 @@ static int __init radeon_init(void)
driver->driver_features |= DRIVER_MODESET;
driver->num_ioctls = radeon_max_kms_ioctl;
radeon_register_atpx_handler();
+
+ } else {
+#ifdef CONFIG_DRM_RADEON_UMS
+ DRM_INFO("radeon userspace modesetting enabled.\n");
+ driver = &driver_old;
+ pdriver = &radeon_pci_driver;
+ driver->driver_features &= ~DRIVER_MODESET;
+ driver->num_ioctls = radeon_max_ioctl;
+#else
+ DRM_ERROR("No UMS support in radeon module!\n");
+ return -EINVAL;
+#endif
}
- /* if the vga console setting is enabled still
- * let modprobe override it */
+
+ /* let modprobe override vga console setting */
return drm_pci_init(driver, pdriver);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index e7fdf163a8ca..b369d42f7de5 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -113,6 +113,9 @@
#define DRIVER_MINOR 33
#define DRIVER_PATCHLEVEL 0
+/* The rest of the file is DEPRECATED! */
+#ifdef CONFIG_DRM_RADEON_UMS
+
enum radeon_cp_microcode_version {
UCODE_R100,
UCODE_R200,
@@ -418,8 +421,6 @@ extern int radeon_driver_open(struct drm_device *dev,
struct drm_file *file_priv);
extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
-extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
extern int radeon_master_create(struct drm_device *dev, struct drm_master *master);
extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master);
@@ -462,15 +463,6 @@ extern void r600_blit_swap(struct drm_device *dev,
int sx, int sy, int dx, int dy,
int w, int h, int src_pitch, int dst_pitch, int cpp);
-/* atpx handler */
-#if defined(CONFIG_VGA_SWITCHEROO)
-void radeon_register_atpx_handler(void);
-void radeon_unregister_atpx_handler(void);
-#else
-static inline void radeon_register_atpx_handler(void) {}
-static inline void radeon_unregister_atpx_handler(void) {}
-#endif
-
/* Flags for stats.boxes
*/
#define RADEON_BOX_DMA_IDLE 0x1
@@ -2167,4 +2159,6 @@ extern void radeon_commit_ring(drm_radeon_private_t *dev_priv);
} while (0)
+#endif /* CONFIG_DRM_RADEON_UMS */
+
#endif /* __RADEON_DRV_H__ */
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index d1fafeabea09..2d91123f2759 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -91,6 +91,7 @@ enum radeon_family {
CHIP_TAHITI,
CHIP_PITCAIRN,
CHIP_VERDE,
+ CHIP_OLAND,
CHIP_LAST,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index cc8489d8c6d1..b1746741bc59 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -187,9 +187,10 @@ out_unref:
return ret;
}
-static int radeonfb_create(struct radeon_fbdev *rfbdev,
+static int radeonfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
+ struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
struct radeon_device *rdev = rfbdev->rdev;
struct fb_info *info;
struct drm_framebuffer *fb = NULL;
@@ -293,28 +294,13 @@ out_unref:
}
if (fb && ret) {
drm_gem_object_unreference(gobj);
+ drm_framebuffer_unregister_private(fb);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
return ret;
}
-static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = radeonfb_create(rfbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
void radeon_fb_output_poll_changed(struct radeon_device *rdev)
{
drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
@@ -339,6 +325,7 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
rfb->obj = NULL;
}
drm_fb_helper_fini(&rfbdev->helper);
+ drm_framebuffer_unregister_private(&rfb->base);
drm_framebuffer_cleanup(&rfb->base);
return 0;
@@ -347,7 +334,7 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
.gamma_set = radeon_crtc_fb_gamma_set,
.gamma_get = radeon_crtc_fb_gamma_get,
- .fb_probe = radeon_fb_find_or_create_single,
+ .fb_probe = radeonfb_create,
};
int radeon_fbdev_init(struct radeon_device *rdev)
@@ -377,6 +364,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
}
drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(rdev->ddev);
+
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 6e24f84755b5..2c1341f63dc5 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -929,6 +929,7 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
*/
static int radeon_vm_update_pdes(struct radeon_device *rdev,
struct radeon_vm *vm,
+ struct radeon_ib *ib,
uint64_t start, uint64_t end)
{
static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
@@ -971,7 +972,7 @@ retry:
((last_pt + incr * count) != pt)) {
if (count) {
- radeon_asic_vm_set_page(rdev, last_pde,
+ radeon_asic_vm_set_page(rdev, ib, last_pde,
last_pt, count, incr,
RADEON_VM_PAGE_VALID);
}
@@ -985,7 +986,7 @@ retry:
}
if (count) {
- radeon_asic_vm_set_page(rdev, last_pde, last_pt, count,
+ radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
incr, RADEON_VM_PAGE_VALID);
}
@@ -1009,6 +1010,7 @@ retry:
*/
static void radeon_vm_update_ptes(struct radeon_device *rdev,
struct radeon_vm *vm,
+ struct radeon_ib *ib,
uint64_t start, uint64_t end,
uint64_t dst, uint32_t flags)
{
@@ -1038,7 +1040,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
if ((last_pte + 8 * count) != pte) {
if (count) {
- radeon_asic_vm_set_page(rdev, last_pte,
+ radeon_asic_vm_set_page(rdev, ib, last_pte,
last_dst, count,
RADEON_GPU_PAGE_SIZE,
flags);
@@ -1056,7 +1058,8 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
}
if (count) {
- radeon_asic_vm_set_page(rdev, last_pte, last_dst, count,
+ radeon_asic_vm_set_page(rdev, ib, last_pte,
+ last_dst, count,
RADEON_GPU_PAGE_SIZE, flags);
}
}
@@ -1080,8 +1083,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct ttm_mem_reg *mem)
{
unsigned ridx = rdev->asic->vm.pt_ring_index;
- struct radeon_ring *ring = &rdev->ring[ridx];
- struct radeon_semaphore *sem = NULL;
+ struct radeon_ib ib;
struct radeon_bo_va *bo_va;
unsigned nptes, npdes, ndw;
uint64_t addr;
@@ -1124,25 +1126,13 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
bo_va->valid = false;
}
- if (vm->fence && radeon_fence_signaled(vm->fence)) {
- radeon_fence_unref(&vm->fence);
- }
-
- if (vm->fence && vm->fence->ring != ridx) {
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- return r;
- }
- }
-
nptes = radeon_bo_ngpu_pages(bo);
/* assume two extra pdes in case the mapping overlaps the borders */
npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
- /* estimate number of dw needed */
- /* semaphore, fence and padding */
- ndw = 32;
+ /* padding, etc. */
+ ndw = 64;
if (RADEON_VM_BLOCK_SIZE > 11)
/* reserve space for one header for every 2k dwords */
@@ -1161,33 +1151,31 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
/* reserve space for pde addresses */
ndw += npdes * 2;
- r = radeon_ring_lock(rdev, ring, ndw);
- if (r) {
- return r;
- }
+ /* update too big for an IB */
+ if (ndw > 0xfffff)
+ return -ENOMEM;
- if (sem && radeon_fence_need_sync(vm->fence, ridx)) {
- radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx);
- radeon_fence_note_sync(vm->fence, ridx);
- }
+ r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4);
+ ib.length_dw = 0;
- r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset);
+ r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
if (r) {
- radeon_ring_unlock_undo(rdev, ring);
+ radeon_ib_free(rdev, &ib);
return r;
}
- radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset,
+ radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
addr, bo_va->flags);
- radeon_fence_unref(&vm->fence);
- r = radeon_fence_emit(rdev, &vm->fence, ridx);
+ radeon_ib_sync_to(&ib, vm->fence);
+ r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
- radeon_ring_unlock_undo(rdev, ring);
+ radeon_ib_free(rdev, &ib);
return r;
}
- radeon_ring_unlock_commit(rdev, ring);
- radeon_semaphore_free(rdev, &sem, vm->fence);
+ radeon_fence_unref(&vm->fence);
+ vm->fence = radeon_fence_ref(ib.fence);
+ radeon_ib_free(rdev, &ib);
radeon_fence_unref(&vm->last_flush);
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index c5bddd630eb9..fc60b74ee304 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -39,7 +39,7 @@ extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap);
* radeon_ddc_probe
*
*/
-bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
+bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux)
{
u8 out = 0x0;
u8 buf[8];
@@ -63,7 +63,13 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
- ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
+ if (use_aux) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+ ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2);
+ } else {
+ ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
+ }
+
if (ret != 2)
/* Couldn't find an accessible DDC on this connector */
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index e7710339a6a7..8d68e972789a 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -28,6 +28,8 @@
* Authors:
* Keith Whitwell <keith@tungstengraphics.com>
* Michel D�zer <michel@daenzer.net>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 9c312f9afb68..c75cb2c6ba71 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -185,11 +185,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (info->request == RADEON_INFO_TIMESTAMP) {
if (rdev->family >= CHIP_R600) {
value_ptr64 = (uint64_t*)((unsigned long)info->value);
- if (rdev->family >= CHIP_TAHITI) {
- value64 = si_get_gpu_clock(rdev);
- } else {
- value64 = r600_get_gpu_clock(rdev);
- }
+ value64 = radeon_get_gpu_clock_counter(rdev);
if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
@@ -282,7 +278,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
break;
case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
/* return clock value in KHz */
- value = rdev->clock.spll.reference_freq * 10;
+ if (rdev->asic->get_xclk)
+ value = radeon_get_xclk(rdev) * 10;
+ else
+ value = rdev->clock.spll.reference_freq * 10;
break;
case RADEON_INFO_NUM_BACKENDS:
if (rdev->family >= CHIP_TAHITI)
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index f5ba2241dacc..62cd512f5c8d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -640,6 +640,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
enum drm_connector_status found = connector_status_disconnected;
bool color = true;
+ /* just don't bother on RN50 those chip are often connected to remoting
+ * console hw and often we get failure to load detect those. So to make
+ * everyone happy report the encoder as always connected.
+ */
+ if (ASIC_IS_RN50(rdev)) {
+ return connector_status_connected;
+ }
+
/* save the regs we need */
vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c
index b9f067241633..d54d2d7c9031 100644
--- a/drivers/gpu/drm/radeon/radeon_mem.c
+++ b/drivers/gpu/drm/radeon/radeon_mem.c
@@ -27,6 +27,8 @@
*
* Authors:
* Keith Whitwell <keith@tungstengraphics.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index d818b503b42f..4003f5a68c09 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -209,7 +209,8 @@ enum radeon_connector_table {
CT_RN50_POWER,
CT_MAC_X800,
CT_MAC_G5_9600,
- CT_SAM440EP
+ CT_SAM440EP,
+ CT_MAC_G4_SILVER
};
enum radeon_dvo_chip {
@@ -558,7 +559,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
u8 val);
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
-extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
+extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 883c95d8d90f..d3aface2d12d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -84,6 +84,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->placement.fpfn = 0;
rbo->placement.lpfn = 0;
rbo->placement.placement = rbo->placements;
+ rbo->placement.busy_placement = rbo->placements;
if (domain & RADEON_GEM_DOMAIN_VRAM)
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
@@ -104,14 +105,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
if (!c)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
rbo->placement.num_placement = c;
-
- c = 0;
- rbo->placement.busy_placement = rbo->busy_placements;
- if (rbo->rdev->flags & RADEON_IS_AGP) {
- rbo->busy_placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
- } else {
- rbo->busy_placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
- }
rbo->placement.num_busy_placement = c;
}
@@ -357,6 +350,7 @@ int radeon_bo_list_validate(struct list_head *head)
{
struct radeon_bo_list *lobj;
struct radeon_bo *bo;
+ u32 domain;
int r;
r = ttm_eu_reserve_buffers(head);
@@ -366,9 +360,17 @@ int radeon_bo_list_validate(struct list_head *head)
list_for_each_entry(lobj, head, tv.head) {
bo = lobj->bo;
if (!bo->pin_count) {
+ domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
+
+ retry:
+ radeon_ttm_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement,
true, false);
if (unlikely(r)) {
+ if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
+ domain |= RADEON_GEM_DOMAIN_GTT;
+ goto retry;
+ }
return r;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 0bfa656aa87d..338fd6a74e87 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -169,7 +169,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
/* starting with BTC, there is one state that is used for both
* MH and SH. Difference is that we always use the high clock index for
- * mclk.
+ * mclk and vddci.
*/
if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
(rdev->family >= CHIP_BARTS) &&
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index e09521858f64..4940af7e75e6 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -28,198 +28,71 @@
#include "radeon.h"
#include <drm/radeon_drm.h>
-#include <linux/dma-buf.h>
-
-static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment,
- enum dma_data_direction dir)
+struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
- struct radeon_bo *bo = attachment->dmabuf->priv;
- struct drm_device *dev = bo->rdev->ddev;
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
int npages = bo->tbo.num_pages;
- struct sg_table *sg;
- int nents;
-
- mutex_lock(&dev->struct_mutex);
- sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
- nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
- mutex_unlock(&dev->struct_mutex);
- return sg;
-}
-
-static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *sg, enum dma_data_direction dir)
-{
- dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
- sg_free_table(sg);
- kfree(sg);
-}
-
-static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf)
-{
- struct radeon_bo *bo = dma_buf->priv;
-
- if (bo->gem_base.export_dma_buf == dma_buf) {
- DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base);
- bo->gem_base.export_dma_buf = NULL;
- drm_gem_object_unreference_unlocked(&bo->gem_base);
- }
-}
-
-static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
-}
-
-static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
-
-}
-static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
-}
-
-static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
+ return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
}
-static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
{
- return -EINVAL;
-}
-
-static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
-{
- struct radeon_bo *bo = dma_buf->priv;
- struct drm_device *dev = bo->rdev->ddev;
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
int ret;
- mutex_lock(&dev->struct_mutex);
- if (bo->vmapping_count) {
- bo->vmapping_count++;
- goto out_unlock;
- }
-
ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
&bo->dma_buf_vmap);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
+ if (ret)
return ERR_PTR(ret);
- }
- bo->vmapping_count = 1;
-out_unlock:
- mutex_unlock(&dev->struct_mutex);
+
return bo->dma_buf_vmap.virtual;
}
-static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
- struct radeon_bo *bo = dma_buf->priv;
- struct drm_device *dev = bo->rdev->ddev;
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
- mutex_lock(&dev->struct_mutex);
- bo->vmapping_count--;
- if (bo->vmapping_count == 0) {
- ttm_bo_kunmap(&bo->dma_buf_vmap);
- }
- mutex_unlock(&dev->struct_mutex);
+ ttm_bo_kunmap(&bo->dma_buf_vmap);
}
-const static struct dma_buf_ops radeon_dmabuf_ops = {
- .map_dma_buf = radeon_gem_map_dma_buf,
- .unmap_dma_buf = radeon_gem_unmap_dma_buf,
- .release = radeon_gem_dmabuf_release,
- .kmap = radeon_gem_kmap,
- .kmap_atomic = radeon_gem_kmap_atomic,
- .kunmap = radeon_gem_kunmap,
- .kunmap_atomic = radeon_gem_kunmap_atomic,
- .mmap = radeon_gem_prime_mmap,
- .vmap = radeon_gem_prime_vmap,
- .vunmap = radeon_gem_prime_vunmap,
-};
-
-static int radeon_prime_create(struct drm_device *dev,
- size_t size,
- struct sg_table *sg,
- struct radeon_bo **pbo)
+
+struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg)
{
struct radeon_device *rdev = dev->dev_private;
struct radeon_bo *bo;
int ret;
ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
- RADEON_GEM_DOMAIN_GTT, sg, pbo);
+ RADEON_GEM_DOMAIN_GTT, sg, &bo);
if (ret)
- return ret;
- bo = *pbo;
+ return ERR_PTR(ret);
bo->gem_base.driver_private = bo;
mutex_lock(&rdev->gem.mutex);
list_add_tail(&bo->list, &rdev->gem.objects);
mutex_unlock(&rdev->gem.mutex);
- return 0;
+ return &bo->gem_base;
}
-struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj,
- int flags)
+int radeon_gem_prime_pin(struct drm_gem_object *obj)
{
struct radeon_bo *bo = gem_to_radeon_bo(obj);
int ret = 0;
ret = radeon_bo_reserve(bo, false);
if (unlikely(ret != 0))
- return ERR_PTR(ret);
+ return ret;
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
if (ret) {
radeon_bo_unreserve(bo);
- return ERR_PTR(ret);
+ return ret;
}
radeon_bo_unreserve(bo);
- return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
-}
-struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct dma_buf_attachment *attach;
- struct sg_table *sg;
- struct radeon_bo *bo;
- int ret;
-
- if (dma_buf->ops == &radeon_dmabuf_ops) {
- bo = dma_buf->priv;
- if (bo->gem_base.dev == dev) {
- drm_gem_object_reference(&bo->gem_base);
- return &bo->gem_base;
- }
- }
-
- /* need to attach */
- attach = dma_buf_attach(dma_buf, dev->dev);
- if (IS_ERR(attach))
- return ERR_CAST(attach);
-
- sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto fail_detach;
- }
-
- ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
- if (ret)
- goto fail_unmap;
-
- bo->gem_base.import_attach = attach;
-
- return &bo->gem_base;
-
-fail_unmap:
- dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
-fail_detach:
- dma_buf_detach(dma_buf, attach);
- return ERR_PTR(ret);
+ return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 5d8f735d6aaf..7e2c2b7cf188 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -3706,4 +3706,19 @@
#define RV530_GB_PIPE_SELECT2 0x4124
+#define RADEON_CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define RADEON_CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define RADEON_CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
+#define RADEON_CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define R100_CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
+#define R600_CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define RADEON_PACKET_TYPE0 0
+#define RADEON_PACKET_TYPE1 1
+#define RADEON_PACKET_TYPE2 2
+#define RADEON_PACKET_TYPE3 3
+
+#define RADEON_PACKET3_NOP 0x10
+
+#define RADEON_VLINE_STAT (1 << 12)
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index ebd69562ef6c..8d58e268ff6d 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -109,6 +109,25 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
}
/**
+ * radeon_ib_sync_to - sync to fence before executing the IB
+ *
+ * @ib: IB object to add fence to
+ * @fence: fence to sync to
+ *
+ * Sync to the fence before executing the IB
+ */
+void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence)
+{
+ struct radeon_fence *other;
+
+ if (!fence)
+ return;
+
+ other = ib->sync_to[fence->ring];
+ ib->sync_to[fence->ring] = radeon_fence_later(fence, other);
+}
+
+/**
* radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
*
* @rdev: radeon_device pointer
@@ -377,6 +396,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
{
int r;
+ /* make sure we aren't trying to allocate more space than there is on the ring */
+ if (ndw > (ring->ring_size / 4))
+ return -ENOMEM;
/* Align requested size with padding so unlock_commit can
* pad safely */
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
@@ -770,22 +792,30 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
int ridx = *(int*)node->info_ent->data;
struct radeon_ring *ring = &rdev->ring[ridx];
unsigned count, i, j;
+ u32 tmp;
radeon_ring_free_size(rdev, ring);
count = (ring->ring_size / 4) - ring->ring_free_dw;
- seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
- seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
+ tmp = RREG32(ring->wptr_reg) >> ring->ptr_reg_shift;
+ seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp);
+ tmp = RREG32(ring->rptr_reg) >> ring->ptr_reg_shift;
+ seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp);
if (ring->rptr_save_reg) {
seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
RREG32(ring->rptr_save_reg));
}
- seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
- seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
+ seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
+ seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
+ seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr);
+ seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
- i = ring->rptr;
- for (j = 0; j <= count; j++) {
- seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+ /* print 8 dw before current rptr as often it's the last executed
+ * packet that is the root issue
+ */
+ i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
+ for (j = 0; j <= (count + 32); j++) {
+ seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
i = (i + 1) & ring->ptr_mask;
}
return 0;
@@ -794,11 +824,15 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
+static int radeon_ring_type_dma1_index = R600_RING_TYPE_DMA_INDEX;
+static int radeon_ring_type_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
static struct drm_info_list radeon_debugfs_ring_info_list[] = {
{"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
{"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
+ {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma1_index},
+ {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma2_index},
};
static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 97f3ece81cd2..8dcc20f53d73 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -95,6 +95,10 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
/* we assume caller has already allocated space on waiters ring */
radeon_semaphore_emit_wait(rdev, waiter, semaphore);
+ /* for debugging lockup only, used by sysfs debug files */
+ rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
+ rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 8e9057b6a365..4d20910899d4 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -25,6 +25,8 @@
* Authors:
* Gareth Hughes <gareth@valinux.com>
* Kevin E. Martin <martin@valinux.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 1d8ff2f850ba..93f760e27a92 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -38,6 +38,7 @@
#include <drm/radeon_drm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/swiotlb.h>
#include "radeon_reg.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index 0f656b111c15..a072fa8c46b0 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -1,5 +1,6 @@
cayman 0x9400
0x0000802C GRBM_GFX_INDEX
+0x00008040 WAIT_UNTIL
0x000084FC CP_STRMOUT_CNTL
0x000085F0 CP_COHER_CNTL
0x000085F4 CP_COHER_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index 911a8fbd32bb..78d5e99d759d 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -324,6 +324,8 @@ rv515 0x6d40
0x46AC US_OUT_FMT_2
0x46B0 US_OUT_FMT_3
0x46B4 US_W_FMT
+0x46C0 RB3D_COLOR_CLEAR_VALUE_AR
+0x46C4 RB3D_COLOR_CLEAR_VALUE_GB
0x4BC0 FG_FOG_BLEND
0x4BC4 FG_FOG_FACTOR
0x4BC8 FG_FOG_COLOR_R
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 2bb6d0e84b3d..435ed3551364 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -336,6 +336,8 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
WREG32(R600_CITF_CNTL, blackout);
}
}
+ /* wait for the MC to settle */
+ udelay(100);
}
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h
index 590309a710b1..6927a200daf4 100644
--- a/drivers/gpu/drm/radeon/rv515d.h
+++ b/drivers/gpu/drm/radeon/rv515d.h
@@ -205,17 +205,6 @@
REG_SET(PACKET3_IT_OPCODE, (op)) | \
REG_SET(PACKET3_COUNT, (n)))
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
-#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-
/* Registers */
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 87c979c4f721..d63fe1d0f53f 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -43,6 +43,31 @@ static void rv770_gpu_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
+#define PCIE_BUS_CLK 10000
+#define TCLK (PCIE_BUS_CLK / 10)
+
+/**
+ * rv770_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (r7xx-cayman).
+ */
+u32 rv770_get_xclk(struct radeon_device *rdev)
+{
+ u32 reference_clock = rdev->clock.spll.reference_freq;
+ u32 tmp = RREG32(CG_CLKPIN_CNTL);
+
+ if (tmp & MUX_TCLK_TO_XCLK)
+ return TCLK;
+
+ if (tmp & XTALIN_DIVIDE)
+ return reference_clock / 4;
+
+ return reference_clock;
+}
+
u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
@@ -887,6 +912,80 @@ static int rv770_mc_init(struct radeon_device *rdev)
return 0;
}
+/**
+ * rv770_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r7xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int rv770_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFF)
+ cur_size_in_dw = 0xFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
static int rv770_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 20e29d23d348..c55f950a4af7 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -128,6 +128,10 @@
#define GUI_ACTIVE (1<<31)
#define GRBM_STATUS2 0x8014
+#define CG_CLKPIN_CNTL 0x660
+# define MUX_TCLK_TO_XCLK (1 << 8)
+# define XTALIN_DIVIDE (1 << 9)
+
#define CG_MULT_THERMAL_STATUS 0x740
#define ASIC_T(x) ((x) << 16)
#define ASIC_T_MASK 0x3FF0000
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index ef683653f0b7..80979ed951eb 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -38,6 +38,7 @@
#define SI_CE_UCODE_SIZE 2144
#define SI_RLC_UCODE_SIZE 2048
#define SI_MC_UCODE_SIZE 7769
+#define OLAND_MC_UCODE_SIZE 7863
MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
MODULE_FIRMWARE("radeon/TAHITI_me.bin");
@@ -54,6 +55,11 @@ MODULE_FIRMWARE("radeon/VERDE_me.bin");
MODULE_FIRMWARE("radeon/VERDE_ce.bin");
MODULE_FIRMWARE("radeon/VERDE_mc.bin");
MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
+MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
+MODULE_FIRMWARE("radeon/OLAND_me.bin");
+MODULE_FIRMWARE("radeon/OLAND_ce.bin");
+MODULE_FIRMWARE("radeon/OLAND_mc.bin");
+MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
extern int r600_ih_ring_alloc(struct radeon_device *rdev);
extern void r600_ih_ring_fini(struct radeon_device *rdev);
@@ -61,6 +67,35 @@ extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
+extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
+extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+
+#define PCIE_BUS_CLK 10000
+#define TCLK (PCIE_BUS_CLK / 10)
+
+/**
+ * si_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (SI).
+ */
+u32 si_get_xclk(struct radeon_device *rdev)
+{
+ u32 reference_clock = rdev->clock.spll.reference_freq;
+ u32 tmp;
+
+ tmp = RREG32(CG_CLKPIN_CNTL_2);
+ if (tmp & MUX_TCLK_TO_XCLK)
+ return TCLK;
+
+ tmp = RREG32(CG_CLKPIN_CNTL);
+ if (tmp & XTALIN_DIVIDE)
+ return reference_clock / 4;
+
+ return reference_clock;
+}
/* get temperature in millidegrees */
int si_get_temp(struct radeon_device *rdev)
@@ -200,6 +235,45 @@ static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
{0x0000009f, 0x00a37400}
};
+static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+ {0x0000006f, 0x03044000},
+ {0x00000070, 0x0480c018},
+ {0x00000071, 0x00000040},
+ {0x00000072, 0x01000000},
+ {0x00000074, 0x000000ff},
+ {0x00000075, 0x00143400},
+ {0x00000076, 0x08ec0800},
+ {0x00000077, 0x040000cc},
+ {0x00000079, 0x00000000},
+ {0x0000007a, 0x21000409},
+ {0x0000007c, 0x00000000},
+ {0x0000007d, 0xe8000000},
+ {0x0000007e, 0x044408a8},
+ {0x0000007f, 0x00000003},
+ {0x00000080, 0x00000000},
+ {0x00000081, 0x01000000},
+ {0x00000082, 0x02000000},
+ {0x00000083, 0x00000000},
+ {0x00000084, 0xe3f3e4f4},
+ {0x00000085, 0x00052024},
+ {0x00000087, 0x00000000},
+ {0x00000088, 0x66036603},
+ {0x00000089, 0x01000000},
+ {0x0000008b, 0x1c0a0000},
+ {0x0000008c, 0xff010000},
+ {0x0000008e, 0xffffefff},
+ {0x0000008f, 0xfff3efff},
+ {0x00000090, 0xfff3efbf},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00a17730}
+};
+
/* ucode loading */
static int si_mc_load_microcode(struct radeon_device *rdev)
{
@@ -228,6 +302,11 @@ static int si_mc_load_microcode(struct radeon_device *rdev)
ucode_size = SI_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
+ case CHIP_OLAND:
+ io_mc_regs = (u32 *)&oland_io_mc_regs;
+ ucode_size = OLAND_MC_UCODE_SIZE;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
}
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -322,6 +401,15 @@ static int si_init_microcode(struct radeon_device *rdev)
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = SI_MC_UCODE_SIZE * 4;
break;
+ case CHIP_OLAND:
+ chip_name = "OLAND";
+ rlc_chip_name = "OLAND";
+ pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+ me_req_size = SI_PM4_UCODE_SIZE * 4;
+ ce_req_size = SI_CE_UCODE_SIZE * 4;
+ rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+ mc_req_size = OLAND_MC_UCODE_SIZE * 4;
+ break;
default: BUG();
}
@@ -1125,7 +1213,8 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
}
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
- } else if (rdev->family == CHIP_VERDE) {
+ } else if ((rdev->family == CHIP_VERDE) ||
+ (rdev->family == CHIP_OLAND)) {
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
case 0: /* non-AA compressed depth or any compressed stencil */
@@ -1570,6 +1659,23 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
break;
+ case CHIP_OLAND:
+ rdev->config.si.max_shader_engines = 1;
+ rdev->config.si.max_tile_pipes = 4;
+ rdev->config.si.max_cu_per_sh = 6;
+ rdev->config.si.max_sh_per_se = 1;
+ rdev->config.si.max_backends_per_se = 2;
+ rdev->config.si.max_texture_channel_caches = 4;
+ rdev->config.si.max_gprs = 256;
+ rdev->config.si.max_gs_threads = 16;
+ rdev->config.si.max_hw_contexts = 8;
+
+ rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+ rdev->config.si.sc_prim_fifo_size_backend = 0x40;
+ rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+ break;
}
/* Initialize HDP */
@@ -2106,92 +2212,275 @@ static int si_cp_resume(struct radeon_device *rdev)
return 0;
}
-bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+static u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
{
- u32 srbm_status;
- u32 grbm_status, grbm_status2;
- u32 grbm_status_se0, grbm_status_se1;
-
- srbm_status = RREG32(SRBM_STATUS);
- grbm_status = RREG32(GRBM_STATUS);
- grbm_status2 = RREG32(GRBM_STATUS2);
- grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
- grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
- if (!(grbm_status & GUI_ACTIVE)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force CP activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
+ u32 reset_mask = 0;
+ u32 tmp;
+
+ /* GRBM_STATUS */
+ tmp = RREG32(GRBM_STATUS);
+ if (tmp & (PA_BUSY | SC_BUSY |
+ BCI_BUSY | SX_BUSY |
+ TA_BUSY | VGT_BUSY |
+ DB_BUSY | CB_BUSY |
+ GDS_BUSY | SPI_BUSY |
+ IA_BUSY | IA_BUSY_NO_DMA))
+ reset_mask |= RADEON_RESET_GFX;
+
+ if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+ CP_BUSY | CP_COHERENCY_BUSY))
+ reset_mask |= RADEON_RESET_CP;
+
+ if (tmp & GRBM_EE_BUSY)
+ reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
+
+ /* GRBM_STATUS2 */
+ tmp = RREG32(GRBM_STATUS2);
+ if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+ reset_mask |= RADEON_RESET_RLC;
+
+ /* DMA_STATUS_REG 0 */
+ tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA;
+
+ /* DMA_STATUS_REG 1 */
+ tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+ if (!(tmp & DMA_IDLE))
+ reset_mask |= RADEON_RESET_DMA1;
+
+ /* SRBM_STATUS2 */
+ tmp = RREG32(SRBM_STATUS2);
+ if (tmp & DMA_BUSY)
+ reset_mask |= RADEON_RESET_DMA;
+
+ if (tmp & DMA1_BUSY)
+ reset_mask |= RADEON_RESET_DMA1;
+
+ /* SRBM_STATUS */
+ tmp = RREG32(SRBM_STATUS);
+
+ if (tmp & IH_BUSY)
+ reset_mask |= RADEON_RESET_IH;
+
+ if (tmp & SEM_BUSY)
+ reset_mask |= RADEON_RESET_SEM;
+
+ if (tmp & GRBM_RQ_PENDING)
+ reset_mask |= RADEON_RESET_GRBM;
+
+ if (tmp & VMC_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+ MCC_BUSY | MCD_BUSY))
+ reset_mask |= RADEON_RESET_MC;
+
+ if (evergreen_is_display_hung(rdev))
+ reset_mask |= RADEON_RESET_DISPLAY;
+
+ /* VM_L2_STATUS */
+ tmp = RREG32(VM_L2_STATUS);
+ if (tmp & L2_BUSY)
+ reset_mask |= RADEON_RESET_VMC;
+
+ return reset_mask;
}
-static int si_gpu_soft_reset(struct radeon_device *rdev)
+static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct evergreen_mc_save save;
- u32 grbm_reset = 0;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
- if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
- return 0;
+ if (reset_mask == 0)
+ return;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ evergreen_print_gpu_status_regs(rdev);
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
- dev_info(rdev->dev, "GPU softreset \n");
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
- RREG32(GRBM_STATUS2));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
- RREG32(SRBM_STATUS));
- evergreen_mc_stop(rdev, &save);
- if (radeon_mc_wait_for_idle(rdev)) {
- dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
- }
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
- /* reset all the gfx blocks */
- grbm_reset = (SOFT_RESET_CP |
- SOFT_RESET_CB |
- SOFT_RESET_DB |
- SOFT_RESET_GDS |
- SOFT_RESET_PA |
- SOFT_RESET_SC |
- SOFT_RESET_BCI |
- SOFT_RESET_SPI |
- SOFT_RESET_SX |
- SOFT_RESET_TC |
- SOFT_RESET_TA |
- SOFT_RESET_VGT |
- SOFT_RESET_IA);
-
- dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
- WREG32(GRBM_SOFT_RESET, grbm_reset);
- (void)RREG32(GRBM_SOFT_RESET);
+ if (reset_mask & RADEON_RESET_DMA) {
+ /* dma0 */
+ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+ }
+ if (reset_mask & RADEON_RESET_DMA1) {
+ /* dma1 */
+ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+ }
+
udelay(50);
- WREG32(GRBM_SOFT_RESET, 0);
- (void)RREG32(GRBM_SOFT_RESET);
+
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
+ grbm_soft_reset = SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_GDS |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_BCI |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VGT |
+ SOFT_RESET_IA;
+ }
+
+ if (reset_mask & RADEON_RESET_CP) {
+ grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
+
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+ }
+
+ if (reset_mask & RADEON_RESET_DMA)
+ srbm_soft_reset |= SOFT_RESET_DMA;
+
+ if (reset_mask & RADEON_RESET_DMA1)
+ srbm_soft_reset |= SOFT_RESET_DMA1;
+
+ if (reset_mask & RADEON_RESET_DISPLAY)
+ srbm_soft_reset |= SOFT_RESET_DC;
+
+ if (reset_mask & RADEON_RESET_RLC)
+ grbm_soft_reset |= SOFT_RESET_RLC;
+
+ if (reset_mask & RADEON_RESET_SEM)
+ srbm_soft_reset |= SOFT_RESET_SEM;
+
+ if (reset_mask & RADEON_RESET_IH)
+ srbm_soft_reset |= SOFT_RESET_IH;
+
+ if (reset_mask & RADEON_RESET_GRBM)
+ srbm_soft_reset |= SOFT_RESET_GRBM;
+
+ if (reset_mask & RADEON_RESET_VMC)
+ srbm_soft_reset |= SOFT_RESET_VMC;
+
+ if (reset_mask & RADEON_RESET_MC)
+ srbm_soft_reset |= SOFT_RESET_MC;
+
+ if (grbm_soft_reset) {
+ tmp = RREG32(GRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32(GRBM_SOFT_RESET, tmp);
+ tmp = RREG32(GRBM_SOFT_RESET);
+ }
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+ }
+
/* Wait a little for things to settle down */
udelay(50);
- dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
- RREG32(GRBM_STATUS));
- dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
- RREG32(GRBM_STATUS2));
- dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
- RREG32(GRBM_STATUS_SE0));
- dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
- RREG32(GRBM_STATUS_SE1));
- dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
- RREG32(SRBM_STATUS));
+
evergreen_mc_resume(rdev, &save);
- return 0;
+ udelay(50);
+
+ evergreen_print_gpu_status_regs(rdev);
}
int si_asic_reset(struct radeon_device *rdev)
{
- return si_gpu_soft_reset(rdev);
+ u32 reset_mask;
+
+ reset_mask = si_gpu_check_soft_reset(rdev);
+
+ if (reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, true);
+
+ si_gpu_soft_reset(rdev, reset_mask);
+
+ reset_mask = si_gpu_check_soft_reset(rdev);
+
+ if (!reset_mask)
+ r600_set_bios_scratch_engine_hung(rdev, false);
+
+ return 0;
+}
+
+/**
+ * si_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = si_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_CP))) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * si_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = si_gpu_check_soft_reset(rdev);
+ u32 mask;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ mask = RADEON_RESET_DMA;
+ else
+ mask = RADEON_RESET_DMA1;
+
+ if (!(reset_mask & mask)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
/* MC */
@@ -2793,19 +3082,19 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
do {
pkt.idx = idx;
- pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
- pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
+ pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
+ pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
pkt.one_reg_wr = 0;
switch (pkt.type) {
- case PACKET_TYPE0:
+ case RADEON_PACKET_TYPE0:
dev_err(rdev->dev, "Packet0 not allowed!\n");
ret = -EINVAL;
break;
- case PACKET_TYPE2:
+ case RADEON_PACKET_TYPE2:
idx += 1;
break;
- case PACKET_TYPE3:
- pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+ case RADEON_PACKET_TYPE3:
+ pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
if (ib->is_const_ib)
ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
else {
@@ -2858,19 +3147,21 @@ void si_vm_fini(struct radeon_device *rdev)
* si_vm_set_page - update the page tables using the CP
*
* @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
- * Update the page tables using the CP (cayman-si).
+ * Update the page tables using the CP (SI).
*/
-void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+void si_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
@@ -2881,11 +3172,11 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
if (ndw > 0x3FFE)
ndw = 0x3FFE;
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(1)));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe));
+ ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
+ ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(1));
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
for (; ndw > 2; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
@@ -2897,8 +3188,8 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
}
addr += incr;
value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
} else {
@@ -2910,9 +3201,9 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
ndw = 0xFFFFE;
/* for non-physically contiguous pages (system) */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
@@ -2924,8 +3215,8 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
}
addr += incr;
value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
} else {
@@ -2939,20 +3230,22 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
else
value = 0;
/* for physically contiguous pages (vram) */
- radeon_ring_write(ring, DMA_PTE_PDE_PACKET(ndw));
- radeon_ring_write(ring, pe); /* dst addr */
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
- radeon_ring_write(ring, r600_flags); /* mask */
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, value); /* value */
- radeon_ring_write(ring, upper_32_bits(value));
- radeon_ring_write(ring, incr); /* increment size */
- radeon_ring_write(ring, 0);
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
pe += ndw * 4;
addr += (ndw / 2) * incr;
count -= ndw / 2;
}
}
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
}
}
@@ -4316,14 +4609,14 @@ void si_fini(struct radeon_device *rdev)
}
/**
- * si_get_gpu_clock - return GPU clock counter snapshot
+ * si_get_gpu_clock_counter - return GPU clock counter snapshot
*
* @rdev: radeon_device pointer
*
* Fetches a GPU clock counter snapshot (SI).
* Returns the 64 bit clock counter snapshot.
*/
-uint64_t si_get_gpu_clock(struct radeon_device *rdev)
+uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
{
uint64_t clock;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 62b46215d423..23fc08fc8e7f 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -58,13 +58,46 @@
#define VGA_HDP_CONTROL 0x328
#define VGA_MEMORY_DISABLE (1 << 4)
+#define CG_CLKPIN_CNTL 0x660
+# define XTALIN_DIVIDE (1 << 1)
+#define CG_CLKPIN_CNTL_2 0x664
+# define MUX_TCLK_TO_XCLK (1 << 8)
+
#define DMIF_ADDR_CONFIG 0xBD4
#define SRBM_STATUS 0xE50
+#define GRBM_RQ_PENDING (1 << 5)
+#define VMC_BUSY (1 << 8)
+#define MCB_BUSY (1 << 9)
+#define MCB_NON_DISPLAY_BUSY (1 << 10)
+#define MCC_BUSY (1 << 11)
+#define MCD_BUSY (1 << 12)
+#define SEM_BUSY (1 << 14)
+#define IH_BUSY (1 << 17)
+
+#define SRBM_SOFT_RESET 0x0E60
+#define SOFT_RESET_BIF (1 << 1)
+#define SOFT_RESET_DC (1 << 5)
+#define SOFT_RESET_DMA1 (1 << 6)
+#define SOFT_RESET_GRBM (1 << 8)
+#define SOFT_RESET_HDP (1 << 9)
+#define SOFT_RESET_IH (1 << 10)
+#define SOFT_RESET_MC (1 << 11)
+#define SOFT_RESET_ROM (1 << 14)
+#define SOFT_RESET_SEM (1 << 15)
+#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_DMA (1 << 20)
+#define SOFT_RESET_TST (1 << 21)
+#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_ORB (1 << 23)
#define CC_SYS_RB_BACKEND_DISABLE 0xe80
#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
+#define SRBM_STATUS2 0x0EC4
+#define DMA_BUSY (1 << 5)
+#define DMA1_BUSY (1 << 6)
+
#define VM_L2_CNTL 0x1400
#define ENABLE_L2_CACHE (1 << 0)
#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
@@ -767,16 +800,7 @@
/*
* PM4
*/
-#define PACKET_TYPE0 0
-#define PACKET_TYPE1 1
-#define PACKET_TYPE2 2
-#define PACKET_TYPE3 3
-
-#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
-#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
#define CP_PACKET2 0x80000000
@@ -785,7 +809,7 @@
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
-#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
@@ -1013,6 +1037,8 @@
# define DATA_SWAP_ENABLE (1 << 3)
# define FENCE_SWAP_ENABLE (1 << 4)
# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_STATUS_REG 0xd034
+# define DMA_IDLE (1 << 0)
#define DMA_TILING_CONFIG 0xd0b8
#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \