diff options
Diffstat (limited to 'drivers/gpu/drm')
63 files changed, 2605 insertions, 947 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 54ac8a845e9f..63208e5c1588 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1718,6 +1718,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv); void amdgpu_driver_preclose_kms(struct drm_device *dev, struct drm_file *file_priv); +int amdgpu_suspend(struct amdgpu_device *adev); int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon); int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon); u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 4f973a9c7b87..8ec1967a850b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -305,8 +305,9 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) GOP_VBIOS_CONTENT *vbios; VFCT_IMAGE_HEADER *vhdr; - if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size))) + if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr))) return false; + tbl_size = hdr->length; if (tbl_size < sizeof(UEFI_ACPI_VFCT)) { DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n"); goto out_unmap; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f6394b5793c2..60bd4afe45c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1517,7 +1517,7 @@ static int amdgpu_fini(struct amdgpu_device *adev) return 0; } -static int amdgpu_suspend(struct amdgpu_device *adev) +int amdgpu_suspend(struct amdgpu_device *adev) { int i, r; @@ -2523,7 +2523,7 @@ static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev) static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { - struct amdgpu_device *adev = f->f_inode->i_private; + struct amdgpu_device *adev = file_inode(f)->i_private; ssize_t result = 0; int r; bool pm_pg_lock, use_bank; @@ -2599,7 +2599,7 @@ end: static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { - struct amdgpu_device *adev = f->f_inode->i_private; + struct amdgpu_device *adev = file_inode(f)->i_private; ssize_t result = 0; int r; bool pm_pg_lock, use_bank; @@ -2673,7 +2673,7 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { - struct amdgpu_device *adev = f->f_inode->i_private; + struct amdgpu_device *adev = file_inode(f)->i_private; ssize_t result = 0; int r; @@ -2700,7 +2700,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { - struct amdgpu_device *adev = f->f_inode->i_private; + struct amdgpu_device *adev = file_inode(f)->i_private; ssize_t result = 0; int r; @@ -2728,7 +2728,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { - struct amdgpu_device *adev = f->f_inode->i_private; + struct amdgpu_device *adev = file_inode(f)->i_private; ssize_t result = 0; int r; @@ -2755,7 +2755,7 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { - struct amdgpu_device *adev = f->f_inode->i_private; + struct amdgpu_device *adev = file_inode(f)->i_private; ssize_t result = 0; int r; @@ -2783,7 +2783,7 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { - struct amdgpu_device *adev = f->f_inode->i_private; + struct amdgpu_device *adev = file_inode(f)->i_private; ssize_t result = 0; int r; @@ -2810,7 +2810,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { - struct amdgpu_device *adev = f->f_inode->i_private; + struct amdgpu_device *adev = file_inode(f)->i_private; ssize_t result = 0; int r; @@ -2838,7 +2838,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { - struct amdgpu_device *adev = f->f_inode->i_private; + struct amdgpu_device *adev = file_inode(f)->i_private; ssize_t result = 0; int r; uint32_t *config, no_regs = 0; @@ -2908,7 +2908,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { - struct amdgpu_device *adev = f->f_inode->i_private; + struct amdgpu_device *adev = file_inode(f)->i_private; int idx, r; int32_t value; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 7914f999b1bc..8cb937b2bfcc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -486,12 +486,15 @@ amdgpu_pci_remove(struct pci_dev *pdev) static void amdgpu_pci_shutdown(struct pci_dev *pdev) { + struct drm_device *dev = pci_get_drvdata(pdev); + struct amdgpu_device *adev = dev->dev_private; + /* if we are running in a VM, make sure the device * torn down properly on reboot/shutdown. * unfortunately we can't detect certain * hypervisors so just do this all the time. */ - amdgpu_pci_remove(pdev); + amdgpu_suspend(adev); } static int amdgpu_pmops_suspend(struct device *dev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 4c992826d2d6..a47628395914 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -280,7 +280,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { - struct amdgpu_ring *ring = (struct amdgpu_ring*)f->f_inode->i_private; + struct amdgpu_ring *ring = file_inode(f)->i_private; int r, i; uint32_t value, result, early[3]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index bc70f80260d8..8e35c1ff59e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1511,7 +1511,7 @@ static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { - struct amdgpu_device *adev = f->f_inode->i_private; + struct amdgpu_device *adev = file_inode(f)->i_private; ssize_t result = 0; int r; @@ -1555,7 +1555,7 @@ static const struct file_operations amdgpu_ttm_vram_fops = { static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { - struct amdgpu_device *adev = f->f_inode->i_private; + struct amdgpu_device *adev = file_inode(f)->i_private; ssize_t result = 0; int r; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 59eff6e9a883..44f024c9b9aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -1944,9 +1944,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v6_0_lock_cursor(crtc, true); - if (width != amdgpu_crtc->cursor_width || - height != amdgpu_crtc->cursor_height || - hot_x != amdgpu_crtc->cursor_hot_x || + if (hot_x != amdgpu_crtc->cursor_hot_x || hot_y != amdgpu_crtc->cursor_hot_y) { int x, y; @@ -1955,8 +1953,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v6_0_cursor_move_locked(crtc, x, y); - amdgpu_crtc->cursor_width = width; - amdgpu_crtc->cursor_height = height; amdgpu_crtc->cursor_hot_x = hot_x; amdgpu_crtc->cursor_hot_y = hot_y; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 8de832dd981d..30945fe55ac7 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2438,8 +2438,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v8_0_cursor_move_locked(crtc, x, y); - amdgpu_crtc->cursor_width = width; - amdgpu_crtc->cursor_height = height; amdgpu_crtc->cursor_hot_x = hot_x; amdgpu_crtc->cursor_hot_y = hot_y; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index e4a5a5ac0ff3..762f8e82ceb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -752,7 +752,7 @@ static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vbla drm_handle_vblank(ddev, amdgpu_crtc->crtc_id); dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id); - hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), + hrtimer_start(vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD, HRTIMER_MODE_REL); return HRTIMER_NORESTART; @@ -772,11 +772,11 @@ static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *ad hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer, - ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD)); + DCE_VIRTUAL_VBLANK_PERIOD); adev->mode_info.crtcs[crtc]->vblank_timer.function = dce_virtual_vblank_timer_handle; hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer, - ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL); + DCE_VIRTUAL_VBLANK_PERIOD, HRTIMER_MODE_REL); } else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) { DRM_DEBUG("Disable software vsync timer\n"); hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 558640aee15a..b323f5ef64d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -411,244 +411,587 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev) break; } - if (adev->asic_type == CHIP_VERDE || - adev->asic_type == CHIP_OLAND || - adev->asic_type == CHIP_HAINAN) { + if (adev->asic_type == CHIP_VERDE) { for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { switch (reg_offset) { case 0: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); break; case 1: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); break; case 2: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); break; case 3: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK) | + TILE_SPLIT(split_equal_to_row_size)); break; case 4: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | - PIPE_CONFIG(ADDR_SURF_P4_8x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - NUM_BANKS(ADDR_SURF_16_BANK) | - BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16)); break; case 5: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | - TILE_SPLIT(split_equal_to_row_size) | - NUM_BANKS(ADDR_SURF_16_BANK) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); break; case 6: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | - TILE_SPLIT(split_equal_to_row_size) | - NUM_BANKS(ADDR_SURF_16_BANK) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); break; case 7: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | - TILE_SPLIT(split_equal_to_row_size) | - NUM_BANKS(ADDR_SURF_16_BANK) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_2_BANK)); break; case 8: - gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | - MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | - PIPE_CONFIG(ADDR_SURF_P4_8x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - NUM_BANKS(ADDR_SURF_16_BANK) | - BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED)); break; case 9: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | - PIPE_CONFIG(ADDR_SURF_P4_8x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - NUM_BANKS(ADDR_SURF_16_BANK) | - BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16)); break; case 10: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); break; case 11: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); break; case 12: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); break; case 13: - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | - PIPE_CONFIG(ADDR_SURF_P4_8x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - NUM_BANKS(ADDR_SURF_16_BANK) | - BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16)); break; case 14: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); break; case 15: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); break; case 16: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); break; case 17: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK) | + TILE_SPLIT(split_equal_to_row_size)); + break; + case 18: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_8x16)); + break; + case 19: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | PIPE_CONFIG(ADDR_SURF_P4_8x16) | - TILE_SPLIT(split_equal_to_row_size) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_16_BANK) | + TILE_SPLIT(split_equal_to_row_size)); + break; + case 20: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK) | + TILE_SPLIT(split_equal_to_row_size)); break; case 21: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - NUM_BANKS(ADDR_SURF_16_BANK) | - BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); break; case 22: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 23: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + break; + case 24: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + break; + case 25: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_2_BANK)); + break; + case 26: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_2_BANK)); + break; + case 27: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_2_BANK)); + break; + case 28: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_2_BANK)); + break; + case 29: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_2_BANK)); + break; + case 30: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_2_BANK)); + break; + default: + continue; + } + adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; + WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); + } + } else if (adev->asic_type == CHIP_OLAND || + adev->asic_type == CHIP_HAINAN) { + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { + switch (reg_offset) { + case 0: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 1: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 2: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 3: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK) | + TILE_SPLIT(split_equal_to_row_size)); + break; + case 4: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2)); + break; + case 5: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 6: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 7: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + break; + case 8: + gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED)); + break; + case 9: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2)); + break; + case 10: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 11: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 12: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 13: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2)); + break; + case 14: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 15: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 16: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); + break; + case 17: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | NUM_BANKS(ADDR_SURF_16_BANK) | + TILE_SPLIT(split_equal_to_row_size)); + break; + case 18: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2)); + break; + case 19: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P2) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK) | + TILE_SPLIT(split_equal_to_row_size)); + break; + case 20: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P2) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK) | + TILE_SPLIT(split_equal_to_row_size)); + break; + case 21: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); + break; + case 22: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); break; case 23: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); break; case 24: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_8_BANK)); break; case 25: - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | - NUM_BANKS(ADDR_SURF_8_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + break; + case 26: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + break; + case 27: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + break; + case 28: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + break; + case 29: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + break; + case 30: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P2) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); break; default: - gb_tile_moden = 0; - break; + continue; } adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); @@ -656,239 +999,291 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev) } else if ((adev->asic_type == CHIP_TAHITI) || (adev->asic_type == CHIP_PITCAIRN)) { for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { switch (reg_offset) { - case 0: /* non-AA compressed depth or any compressed stencil */ - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + case 0: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); break; - case 1: /* 2xAA/4xAA compressed depth only */ - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + case 1: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); break; - case 2: /* 8xAA compressed depth only */ - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + case 2: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); break; - case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */ - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + case 3: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK) | + TILE_SPLIT(split_equal_to_row_size)); break; - case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */ - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - NUM_BANKS(ADDR_SURF_16_BANK) | - BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + case 4: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)); break; - case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */ - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + case 5: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | - TILE_SPLIT(split_equal_to_row_size) | - NUM_BANKS(ADDR_SURF_16_BANK) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_2_BANK)); break; - case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */ - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + case 6: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | - TILE_SPLIT(split_equal_to_row_size) | - NUM_BANKS(ADDR_SURF_16_BANK) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_2_BANK)); break; - case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */ - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | - TILE_SPLIT(split_equal_to_row_size) | - NUM_BANKS(ADDR_SURF_16_BANK) | + case 7: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_2_BANK)); break; - case 8: /* 1D and 1D Array Surfaces */ - gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | - MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - NUM_BANKS(ADDR_SURF_16_BANK) | - BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + case 8: + gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED)); break; - case 9: /* Displayable maps. */ - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - NUM_BANKS(ADDR_SURF_16_BANK) | - BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + case 9: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)); break; - case 10: /* Display 8bpp. */ - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + case 10: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); break; - case 11: /* Display 16bpp. */ - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + case 11: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | + NUM_BANKS(ADDR_SURF_16_BANK)); break; - case 12: /* Display 32bpp. */ - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + case 12: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK)); break; - case 13: /* Thin. */ - gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | - NUM_BANKS(ADDR_SURF_16_BANK) | - BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + case 13: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)); break; - case 14: /* Thin 8 bpp. */ - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + case 14: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK)); break; - case 15: /* Thin 16 bpp. */ - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + case 15: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK)); break; - case 16: /* Thin 32 bpp. */ - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + case 16: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK)); break; - case 17: /* Thin 64 bpp. */ - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + case 17: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | - TILE_SPLIT(split_equal_to_row_size) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | NUM_BANKS(ADDR_SURF_16_BANK) | + TILE_SPLIT(split_equal_to_row_size)); + break; + case 18: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_1D_TILED_THICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)); + break; + case 19: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_16_BANK) | + TILE_SPLIT(split_equal_to_row_size)); break; - case 21: /* 8 bpp PRT. */ - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + case 20: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THICK) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | - TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | NUM_BANKS(ADDR_SURF_16_BANK) | - BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + TILE_SPLIT(split_equal_to_row_size)); break; - case 22: /* 16 bpp PRT */ - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + case 21: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); + break; + case 22: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_4_BANK)); break; - case 23: /* 32 bpp PRT */ - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + case 23: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_2_BANK)); break; - case 24: /* 64 bpp PRT */ - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + case 24: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | - NUM_BANKS(ADDR_SURF_16_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_2_BANK)); break; - case 25: /* 128 bpp PRT */ - gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | - MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | - PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + case 25: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | - NUM_BANKS(ADDR_SURF_8_BANK) | BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | - BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | - MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_2_BANK)); break; - default: - gb_tile_moden = 0; + case 26: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_2_BANK)); + break; + case 27: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_2_BANK)); + break; + case 28: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_2_BANK)); break; + case 29: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_2_BANK)); + break; + case 30: + gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | + NUM_BANKS(ADDR_SURF_2_BANK)); + break; + default: + continue; } adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 6324f67bdb1f..d0ec00986f38 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3949,8 +3949,12 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev) temp = mmRLC_SRM_INDEX_CNTL_ADDR_0; data = mmRLC_SRM_INDEX_CNTL_DATA_0; for (i = 0; i < sizeof(unique_indices) / sizeof(int); i++) { - amdgpu_mm_wreg(adev, temp + i, unique_indices[i] & 0x3FFFF, false); - amdgpu_mm_wreg(adev, data + i, unique_indices[i] >> 20, false); + if (unique_indices[i] != 0) { + amdgpu_mm_wreg(adev, temp + i, + unique_indices[i] & 0x3FFFF, false); + amdgpu_mm_wreg(adev, data + i, + unique_indices[i] >> 20, false); + } } kfree(register_list_format); @@ -3966,20 +3970,17 @@ static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev) { uint32_t data; - if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | - AMD_PG_SUPPORT_GFX_SMG | - AMD_PG_SUPPORT_GFX_DMG)) { - WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60); + WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60); - data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10); - data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10); - data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10); - data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10); - WREG32(mmRLC_PG_DELAY, data); + data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10); + data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10); + data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10); + data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10); + WREG32(mmRLC_PG_DELAY, data); + + WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3); + WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0); - WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3); - WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0); - } } static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev, @@ -3996,41 +3997,37 @@ static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev, static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable) { - WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 1 : 0); + WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 0 : 1); } static void gfx_v8_0_init_pg(struct amdgpu_device *adev) { - if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | - AMD_PG_SUPPORT_GFX_SMG | - AMD_PG_SUPPORT_GFX_DMG | - AMD_PG_SUPPORT_CP | - AMD_PG_SUPPORT_GDS | - AMD_PG_SUPPORT_RLC_SMU_HS)) { + if ((adev->asic_type == CHIP_CARRIZO) || + (adev->asic_type == CHIP_STONEY)) { gfx_v8_0_init_csb(adev); gfx_v8_0_init_save_restore_list(adev); gfx_v8_0_enable_save_restore_machine(adev); - - if ((adev->asic_type == CHIP_CARRIZO) || - (adev->asic_type == CHIP_STONEY)) { - WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8); - gfx_v8_0_init_power_gating(adev); - WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); - if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { - cz_enable_sck_slow_down_on_power_up(adev, true); - cz_enable_sck_slow_down_on_power_down(adev, true); - } else { - cz_enable_sck_slow_down_on_power_up(adev, false); - cz_enable_sck_slow_down_on_power_down(adev, false); - } - if (adev->pg_flags & AMD_PG_SUPPORT_CP) - cz_enable_cp_power_gating(adev, true); - else - cz_enable_cp_power_gating(adev, false); - } else if (adev->asic_type == CHIP_POLARIS11) { - gfx_v8_0_init_power_gating(adev); + WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8); + gfx_v8_0_init_power_gating(adev); + WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); + if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { + cz_enable_sck_slow_down_on_power_up(adev, true); + cz_enable_sck_slow_down_on_power_down(adev, true); + } else { + cz_enable_sck_slow_down_on_power_up(adev, false); + cz_enable_sck_slow_down_on_power_down(adev, false); } + if (adev->pg_flags & AMD_PG_SUPPORT_CP) + cz_enable_cp_power_gating(adev, true); + else + cz_enable_cp_power_gating(adev, false); + } else if (adev->asic_type == CHIP_POLARIS11) { + gfx_v8_0_init_csb(adev); + gfx_v8_0_init_save_restore_list(adev); + gfx_v8_0_enable_save_restore_machine(adev); + gfx_v8_0_init_power_gating(adev); } + } static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev) @@ -5339,14 +5336,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_PG_STATE_GATE) ? true : false; - if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) - return 0; - switch (adev->asic_type) { case CHIP_CARRIZO: case CHIP_STONEY: - if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) - cz_update_gfx_cg_power_gating(adev, enable); + + cz_update_gfx_cg_power_gating(adev, enable); if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true); @@ -5791,25 +5785,49 @@ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev, static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev, enum amd_clockgating_state state) { - uint32_t msg_id, pp_state; + uint32_t msg_id, pp_state = 0; + uint32_t pp_support_state = 0; void *pp_handle = adev->powerplay.pp_handle; - if (state == AMD_CG_STATE_UNGATE) - pp_state = 0; - else - pp_state = PP_STATE_CG | PP_STATE_LS; + if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) { + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { + pp_support_state = PP_STATE_SUPPORT_LS; + pp_state = PP_STATE_LS; + } + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) { + pp_support_state |= PP_STATE_SUPPORT_CG; + pp_state |= PP_STATE_CG; + } + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_CG, + pp_support_state, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + } - msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, - PP_BLOCK_GFX_CG, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - amd_set_clockgating_by_smu(pp_handle, msg_id); + if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) { + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { + pp_support_state = PP_STATE_SUPPORT_LS; + pp_state = PP_STATE_LS; + } - msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, - PP_BLOCK_GFX_MG, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - amd_set_clockgating_by_smu(pp_handle, msg_id); + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { + pp_support_state |= PP_STATE_SUPPORT_CG; + pp_state |= PP_STATE_CG; + } + + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_MG, + pp_support_state, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + } return 0; } @@ -5817,43 +5835,98 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev, static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev, enum amd_clockgating_state state) { - uint32_t msg_id, pp_state; + + uint32_t msg_id, pp_state = 0; + uint32_t pp_support_state = 0; void *pp_handle = adev->powerplay.pp_handle; - if (state == AMD_CG_STATE_UNGATE) - pp_state = 0; - else - pp_state = PP_STATE_CG | PP_STATE_LS; + if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) { + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { + pp_support_state = PP_STATE_SUPPORT_LS; + pp_state = PP_STATE_LS; + } + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) { + pp_support_state |= PP_STATE_SUPPORT_CG; + pp_state |= PP_STATE_CG; + } + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_CG, + pp_support_state, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + } - msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, - PP_BLOCK_GFX_CG, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - amd_set_clockgating_by_smu(pp_handle, msg_id); + if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) { + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) { + pp_support_state = PP_STATE_SUPPORT_LS; + pp_state = PP_STATE_LS; + } + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) { + pp_support_state |= PP_STATE_SUPPORT_CG; + pp_state |= PP_STATE_CG; + } + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_3D, + pp_support_state, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + } - msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, - PP_BLOCK_GFX_3D, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - amd_set_clockgating_by_smu(pp_handle, msg_id); + if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) { + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { + pp_support_state = PP_STATE_SUPPORT_LS; + pp_state = PP_STATE_LS; + } - msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, - PP_BLOCK_GFX_MG, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - amd_set_clockgating_by_smu(pp_handle, msg_id); + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { + pp_support_state |= PP_STATE_SUPPORT_CG; + pp_state |= PP_STATE_CG; + } - msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, - PP_BLOCK_GFX_RLC, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - amd_set_clockgating_by_smu(pp_handle, msg_id); + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_MG, + pp_support_state, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + } + + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { + pp_support_state = PP_STATE_SUPPORT_LS; - msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + else + pp_state = PP_STATE_LS; + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_RLC, + pp_support_state, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + } + + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { + pp_support_state = PP_STATE_SUPPORT_LS; + + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + else + pp_state = PP_STATE_LS; + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, PP_BLOCK_GFX_CP, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_support_state, pp_state); - amd_set_clockgating_by_smu(pp_handle, msg_id); + amd_set_clockgating_by_smu(pp_handle, msg_id); + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 3ed8ad8725b9..c46b0159007d 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -43,13 +43,14 @@ static const u32 tahiti_golden_registers[] = { + 0x17bc, 0x00000030, 0x00000011, 0x2684, 0x00010000, 0x00018208, 0x260c, 0xffffffff, 0x00000000, 0x260d, 0xf00fffff, 0x00000400, 0x260e, 0x0002021c, 0x00020200, 0x031e, 0x00000080, 0x00000000, - 0x340c, 0x000300c0, 0x00800040, - 0x360c, 0x000300c0, 0x00800040, + 0x340c, 0x000000c0, 0x00800040, + 0x360c, 0x000000c0, 0x00800040, 0x16ec, 0x000000f0, 0x00000070, 0x16f0, 0x00200000, 0x50100000, 0x1c0c, 0x31000311, 0x00000011, @@ -60,7 +61,7 @@ static const u32 tahiti_golden_registers[] = 0x22c4, 0x0000ff0f, 0x00000000, 0xa293, 0x07ffffff, 0x4e000000, 0xa0d4, 0x3f3f3fff, 0x2a00126a, - 0x000c, 0x000000ff, 0x0040, + 0x000c, 0xffffffff, 0x0040, 0x000d, 0x00000040, 0x00004040, 0x2440, 0x07ffffff, 0x03000000, 0x23a2, 0x01ff1f3f, 0x00000000, @@ -73,7 +74,11 @@ static const u32 tahiti_golden_registers[] = 0x2234, 0xffffffff, 0x000fff40, 0x2235, 0x0000001f, 0x00000010, 0x0504, 0x20000000, 0x20fffed8, - 0x0570, 0x000c0fc0, 0x000c0400 + 0x0570, 0x000c0fc0, 0x000c0400, + 0x052c, 0x0fffffff, 0xffffffff, + 0x052d, 0x0fffffff, 0x0fffffff, + 0x052e, 0x0fffffff, 0x0fffffff, + 0x052f, 0x0fffffff, 0x0fffffff }; static const u32 tahiti_golden_registers2[] = @@ -83,16 +88,18 @@ static const u32 tahiti_golden_registers2[] = static const u32 tahiti_golden_rlc_registers[] = { + 0x263e, 0xffffffff, 0x12011003, 0x3109, 0xffffffff, 0x00601005, 0x311f, 0xffffffff, 0x10104040, 0x3122, 0xffffffff, 0x0100000a, 0x30c5, 0xffffffff, 0x00000800, 0x30c3, 0xffffffff, 0x800000f4, - 0x3d2a, 0xffffffff, 0x00000000 + 0x3d2a, 0x00000008, 0x00000000 }; static const u32 pitcairn_golden_registers[] = { + 0x17bc, 0x00000030, 0x00000011, 0x2684, 0x00010000, 0x00018208, 0x260c, 0xffffffff, 0x00000000, 0x260d, 0xf00fffff, 0x00000400, @@ -110,7 +117,7 @@ static const u32 pitcairn_golden_registers[] = 0x22c4, 0x0000ff0f, 0x00000000, 0xa293, 0x07ffffff, 0x4e000000, 0xa0d4, 0x3f3f3fff, 0x2a00126a, - 0x000c, 0x000000ff, 0x0040, + 0x000c, 0xffffffff, 0x0040, 0x000d, 0x00000040, 0x00004040, 0x2440, 0x07ffffff, 0x03000000, 0x2418, 0x0000007f, 0x00000020, @@ -119,11 +126,16 @@ static const u32 pitcairn_golden_registers[] = 0x2b04, 0xffffffff, 0x00000000, 0x2b03, 0xffffffff, 0x32761054, 0x2235, 0x0000001f, 0x00000010, - 0x0570, 0x000c0fc0, 0x000c0400 + 0x0570, 0x000c0fc0, 0x000c0400, + 0x052c, 0x0fffffff, 0xffffffff, + 0x052d, 0x0fffffff, 0x0fffffff, + 0x052e, 0x0fffffff, 0x0fffffff, + 0x052f, 0x0fffffff, 0x0fffffff }; static const u32 pitcairn_golden_rlc_registers[] = { + 0x263e, 0xffffffff, 0x12011003, 0x3109, 0xffffffff, 0x00601004, 0x311f, 0xffffffff, 0x10102020, 0x3122, 0xffffffff, 0x01000020, @@ -133,133 +145,134 @@ static const u32 pitcairn_golden_rlc_registers[] = static const u32 verde_pg_init[] = { - 0xd4f, 0xffffffff, 0x40000, - 0xd4e, 0xffffffff, 0x200010ff, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x7007, - 0xd4e, 0xffffffff, 0x300010ff, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x400000, - 0xd4e, 0xffffffff, 0x100010ff, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x120200, - 0xd4e, 0xffffffff, 0x500010ff, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x1e1e16, - 0xd4e, 0xffffffff, 0x600010ff, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x171f1e, - 0xd4e, 0xffffffff, 0x700010ff, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4f, 0xffffffff, 0x0, - 0xd4e, 0xffffffff, 0x9ff, - 0xd40, 0xffffffff, 0x0, - 0xd41, 0xffffffff, 0x10000800, - 0xd41, 0xffffffff, 0xf, - 0xd41, 0xffffffff, 0xf, - 0xd40, 0xffffffff, 0x4, - 0xd41, 0xffffffff, 0x1000051e, - 0xd41, 0xffffffff, 0xffff, - 0xd41, 0xffffffff, 0xffff, - 0xd40, 0xffffffff, 0x8, - 0xd41, 0xffffffff, 0x80500, - 0xd40, 0xffffffff, 0x12, - 0xd41, 0xffffffff, 0x9050c, - 0xd40, 0xffffffff, 0x1d, - 0xd41, 0xffffffff, 0xb052c, - 0xd40, 0xffffffff, 0x2a, - 0xd41, 0xffffffff, 0x1053e, - 0xd40, 0xffffffff, 0x2d, - 0xd41, 0xffffffff, 0x10546, - 0xd40, 0xffffffff, 0x30, - 0xd41, 0xffffffff, 0xa054e, - 0xd40, 0xffffffff, 0x3c, - 0xd41, 0xffffffff, 0x1055f, - 0xd40, 0xffffffff, 0x3f, - 0xd41, 0xffffffff, 0x10567, - 0xd40, 0xffffffff, 0x42, - 0xd41, 0xffffffff, 0x1056f, - 0xd40, 0xffffffff, 0x45, - 0xd41, 0xffffffff, 0x10572, - 0xd40, 0xffffffff, 0x48, - 0xd41, 0xffffffff, 0x20575, - 0xd40, 0xffffffff, 0x4c, - 0xd41, 0xffffffff, 0x190801, - 0xd40, 0xffffffff, 0x67, - 0xd41, 0xffffffff, 0x1082a, - 0xd40, 0xffffffff, 0x6a, - 0xd41, 0xffffffff, 0x1b082d, - 0xd40, 0xffffffff, 0x87, - 0xd41, 0xffffffff, 0x310851, - 0xd40, 0xffffffff, 0xba, - 0xd41, 0xffffffff, 0x891, - 0xd40, 0xffffffff, 0xbc, - 0xd41, 0xffffffff, 0x893, - 0xd40, 0xffffffff, 0xbe, - 0xd41, 0xffffffff, 0x20895, - 0xd40, 0xffffffff, 0xc2, - 0xd41, 0xffffffff, 0x20899, - 0xd40, 0xffffffff, 0xc6, - 0xd41, 0xffffffff, 0x2089d, - 0xd40, 0xffffffff, 0xca, - 0xd41, 0xffffffff, 0x8a1, - 0xd40, 0xffffffff, 0xcc, - 0xd41, 0xffffffff, 0x8a3, - 0xd40, 0xffffffff, 0xce, - 0xd41, 0xffffffff, 0x308a5, - 0xd40, 0xffffffff, 0xd3, - 0xd41, 0xffffffff, 0x6d08cd, - 0xd40, 0xffffffff, 0x142, - 0xd41, 0xffffffff, 0x2000095a, - 0xd41, 0xffffffff, 0x1, - 0xd40, 0xffffffff, 0x144, - 0xd41, 0xffffffff, 0x301f095b, - 0xd40, 0xffffffff, 0x165, - 0xd41, 0xffffffff, 0xc094d, - 0xd40, 0xffffffff, 0x173, - 0xd41, 0xffffffff, 0xf096d, - 0xd40, 0xffffffff, 0x184, - 0xd41, 0xffffffff, 0x15097f, - 0xd40, 0xffffffff, 0x19b, - 0xd41, 0xffffffff, 0xc0998, - 0xd40, 0xffffffff, 0x1a9, - 0xd41, 0xffffffff, 0x409a7, - 0xd40, 0xffffffff, 0x1af, - 0xd41, 0xffffffff, 0xcdc, - 0xd40, 0xffffffff, 0x1b1, - 0xd41, 0xffffffff, 0x800, - 0xd42, 0xffffffff, 0x6c9b2000, - 0xd44, 0xfc00, 0x2000, - 0xd51, 0xffffffff, 0xfc0, - 0xa35, 0x00000100, 0x100 + 0x0d4f, 0xffffffff, 0x40000, + 0x0d4e, 0xffffffff, 0x200010ff, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x7007, + 0x0d4e, 0xffffffff, 0x300010ff, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x400000, + 0x0d4e, 0xffffffff, 0x100010ff, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x120200, + 0x0d4e, 0xffffffff, 0x500010ff, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x1e1e16, + 0x0d4e, 0xffffffff, 0x600010ff, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x171f1e, + 0x0d4e, 0xffffffff, 0x700010ff, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4f, 0xffffffff, 0x0, + 0x0d4e, 0xffffffff, 0x9ff, + 0x0d40, 0xffffffff, 0x0, + 0x0d41, 0xffffffff, 0x10000800, + 0x0d41, 0xffffffff, 0xf, + 0x0d41, 0xffffffff, 0xf, + 0x0d40, 0xffffffff, 0x4, + 0x0d41, 0xffffffff, 0x1000051e, + 0x0d41, 0xffffffff, 0xffff, + 0x0d41, 0xffffffff, 0xffff, + 0x0d40, 0xffffffff, 0x8, + 0x0d41, 0xffffffff, 0x80500, + 0x0d40, 0xffffffff, 0x12, + 0x0d41, 0xffffffff, 0x9050c, + 0x0d40, 0xffffffff, 0x1d, + 0x0d41, 0xffffffff, 0xb052c, + 0x0d40, 0xffffffff, 0x2a, + 0x0d41, 0xffffffff, 0x1053e, + 0x0d40, 0xffffffff, 0x2d, + 0x0d41, 0xffffffff, 0x10546, + 0x0d40, 0xffffffff, 0x30, + 0x0d41, 0xffffffff, 0xa054e, + 0x0d40, 0xffffffff, 0x3c, + 0x0d41, 0xffffffff, 0x1055f, + 0x0d40, 0xffffffff, 0x3f, + 0x0d41, 0xffffffff, 0x10567, + 0x0d40, 0xffffffff, 0x42, + 0x0d41, 0xffffffff, 0x1056f, + 0x0d40, 0xffffffff, 0x45, + 0x0d41, 0xffffffff, 0x10572, + 0x0d40, 0xffffffff, 0x48, + 0x0d41, 0xffffffff, 0x20575, + 0x0d40, 0xffffffff, 0x4c, + 0x0d41, 0xffffffff, 0x190801, + 0x0d40, 0xffffffff, 0x67, + 0x0d41, 0xffffffff, 0x1082a, + 0x0d40, 0xffffffff, 0x6a, + 0x0d41, 0xffffffff, 0x1b082d, + 0x0d40, 0xffffffff, 0x87, + 0x0d41, 0xffffffff, 0x310851, + 0x0d40, 0xffffffff, 0xba, + 0x0d41, 0xffffffff, 0x891, + 0x0d40, 0xffffffff, 0xbc, + 0x0d41, 0xffffffff, 0x893, + 0x0d40, 0xffffffff, 0xbe, + 0x0d41, 0xffffffff, 0x20895, + 0x0d40, 0xffffffff, 0xc2, + 0x0d41, 0xffffffff, 0x20899, + 0x0d40, 0xffffffff, 0xc6, + 0x0d41, 0xffffffff, 0x2089d, + 0x0d40, 0xffffffff, 0xca, + 0x0d41, 0xffffffff, 0x8a1, + 0x0d40, 0xffffffff, 0xcc, + 0x0d41, 0xffffffff, 0x8a3, + 0x0d40, 0xffffffff, 0xce, + 0x0d41, 0xffffffff, 0x308a5, + 0x0d40, 0xffffffff, 0xd3, + 0x0d41, 0xffffffff, 0x6d08cd, + 0x0d40, 0xffffffff, 0x142, + 0x0d41, 0xffffffff, 0x2000095a, + 0x0d41, 0xffffffff, 0x1, + 0x0d40, 0xffffffff, 0x144, + 0x0d41, 0xffffffff, 0x301f095b, + 0x0d40, 0xffffffff, 0x165, + 0x0d41, 0xffffffff, 0xc094d, + 0x0d40, 0xffffffff, 0x173, + 0x0d41, 0xffffffff, 0xf096d, + 0x0d40, 0xffffffff, 0x184, + 0x0d41, 0xffffffff, 0x15097f, + 0x0d40, 0xffffffff, 0x19b, + 0x0d41, 0xffffffff, 0xc0998, + 0x0d40, 0xffffffff, 0x1a9, + 0x0d41, 0xffffffff, 0x409a7, + 0x0d40, 0xffffffff, 0x1af, + 0x0d41, 0xffffffff, 0xcdc, + 0x0d40, 0xffffffff, 0x1b1, + 0x0d41, 0xffffffff, 0x800, + 0x0d42, 0xffffffff, 0x6c9b2000, + 0x0d44, 0xfc00, 0x2000, + 0x0d51, 0xffffffff, 0xfc0, + 0x0a35, 0x00000100, 0x100 }; static const u32 verde_golden_rlc_registers[] = { + 0x263e, 0xffffffff, 0x02010002, 0x3109, 0xffffffff, 0x033f1005, 0x311f, 0xffffffff, 0x10808020, 0x3122, 0xffffffff, 0x00800008, @@ -269,65 +282,45 @@ static const u32 verde_golden_rlc_registers[] = static const u32 verde_golden_registers[] = { + 0x17bc, 0x00000030, 0x00000011, 0x2684, 0x00010000, 0x00018208, 0x260c, 0xffffffff, 0x00000000, 0x260d, 0xf00fffff, 0x00000400, 0x260e, 0x0002021c, 0x00020200, 0x031e, 0x00000080, 0x00000000, 0x340c, 0x000300c0, 0x00800040, - 0x340c, 0x000300c0, 0x00800040, - 0x360c, 0x000300c0, 0x00800040, 0x360c, 0x000300c0, 0x00800040, 0x16ec, 0x000000f0, 0x00000070, 0x16f0, 0x00200000, 0x50100000, - 0x1c0c, 0x31000311, 0x00000011, 0x0ab9, 0x00073ffe, 0x000022a2, - 0x0ab9, 0x00073ffe, 0x000022a2, - 0x0ab9, 0x00073ffe, 0x000022a2, - 0x0903, 0x000007ff, 0x00000000, - 0x0903, 0x000007ff, 0x00000000, 0x0903, 0x000007ff, 0x00000000, 0x2285, 0xf000001f, 0x00000007, - 0x2285, 0xf000001f, 0x00000007, - 0x2285, 0xf000001f, 0x00000007, - 0x2285, 0xffffffff, 0x00ffffff, + 0x22c9, 0xffffffff, 0x00ffffff, 0x22c4, 0x0000ff0f, 0x00000000, - 0xa293, 0x07ffffff, 0x4e000000, 0xa0d4, 0x3f3f3fff, 0x0000124a, - 0xa0d4, 0x3f3f3fff, 0x0000124a, - 0xa0d4, 0x3f3f3fff, 0x0000124a, - 0x000c, 0x000000ff, 0x0040, + 0x000c, 0xffffffff, 0x0040, 0x000d, 0x00000040, 0x00004040, 0x2440, 0x07ffffff, 0x03000000, - 0x2440, 0x07ffffff, 0x03000000, - 0x23a2, 0x01ff1f3f, 0x00000000, - 0x23a3, 0x01ff1f3f, 0x00000000, 0x23a2, 0x01ff1f3f, 0x00000000, 0x23a1, 0x01ff1f3f, 0x00000000, - 0x23a1, 0x01ff1f3f, 0x00000000, - - 0x23a1, 0x01ff1f3f, 0x00000000, 0x2418, 0x0000007f, 0x00000020, 0x2542, 0x00010000, 0x00010000, - 0x2b01, 0x000003ff, 0x00000003, - 0x2b05, 0x000003ff, 0x00000003, 0x2b05, 0x000003ff, 0x00000003, 0x2b04, 0xffffffff, 0x00000000, - 0x2b04, 0xffffffff, 0x00000000, - 0x2b04, 0xffffffff, 0x00000000, - 0x2b03, 0xffffffff, 0x00001032, - 0x2b03, 0xffffffff, 0x00001032, 0x2b03, 0xffffffff, 0x00001032, 0x2235, 0x0000001f, 0x00000010, - 0x2235, 0x0000001f, 0x00000010, - 0x2235, 0x0000001f, 0x00000010, - 0x0570, 0x000c0fc0, 0x000c0400 + 0x0570, 0x000c0fc0, 0x000c0400, + 0x052c, 0x0fffffff, 0xffffffff, + 0x052d, 0x0fffffff, 0x0fffffff, + 0x052e, 0x0fffffff, 0x0fffffff, + 0x052f, 0x0fffffff, 0x0fffffff }; static const u32 oland_golden_registers[] = { + 0x17bc, 0x00000030, 0x00000011, 0x2684, 0x00010000, 0x00018208, 0x260c, 0xffffffff, 0x00000000, 0x260d, 0xf00fffff, 0x00000400, @@ -336,7 +329,7 @@ static const u32 oland_golden_registers[] = 0x340c, 0x000300c0, 0x00800040, 0x360c, 0x000300c0, 0x00800040, 0x16ec, 0x000000f0, 0x00000070, - 0x16f9, 0x00200000, 0x50100000, + 0x16f0, 0x00200000, 0x50100000, 0x1c0c, 0x31000311, 0x00000011, 0x0ab9, 0x00073ffe, 0x000022a2, 0x0903, 0x000007ff, 0x00000000, @@ -345,7 +338,7 @@ static const u32 oland_golden_registers[] = 0x22c4, 0x0000ff0f, 0x00000000, 0xa293, 0x07ffffff, 0x4e000000, 0xa0d4, 0x3f3f3fff, 0x00000082, - 0x000c, 0x000000ff, 0x0040, + 0x000c, 0xffffffff, 0x0040, 0x000d, 0x00000040, 0x00004040, 0x2440, 0x07ffffff, 0x03000000, 0x2418, 0x0000007f, 0x00000020, @@ -354,11 +347,16 @@ static const u32 oland_golden_registers[] = 0x2b04, 0xffffffff, 0x00000000, 0x2b03, 0xffffffff, 0x00003210, 0x2235, 0x0000001f, 0x00000010, - 0x0570, 0x000c0fc0, 0x000c0400 + 0x0570, 0x000c0fc0, 0x000c0400, + 0x052c, 0x0fffffff, 0xffffffff, + 0x052d, 0x0fffffff, 0x0fffffff, + 0x052e, 0x0fffffff, 0x0fffffff, + 0x052f, 0x0fffffff, 0x0fffffff }; static const u32 oland_golden_rlc_registers[] = { + 0x263e, 0xffffffff, 0x02010002, 0x3109, 0xffffffff, 0x00601005, 0x311f, 0xffffffff, 0x10104040, 0x3122, 0xffffffff, 0x0100000a, @@ -368,22 +366,27 @@ static const u32 oland_golden_rlc_registers[] = static const u32 hainan_golden_registers[] = { + 0x17bc, 0x00000030, 0x00000011, 0x2684, 0x00010000, 0x00018208, 0x260c, 0xffffffff, 0x00000000, 0x260d, 0xf00fffff, 0x00000400, 0x260e, 0x0002021c, 0x00020200, - 0x4595, 0xff000fff, 0x00000100, + 0x031e, 0x00000080, 0x00000000, + 0x3430, 0xff000fff, 0x00000100, 0x340c, 0x000300c0, 0x00800040, 0x3630, 0xff000fff, 0x00000100, 0x360c, 0x000300c0, 0x00800040, + 0x16ec, 0x000000f0, 0x00000070, + 0x16f0, 0x00200000, 0x50100000, + 0x1c0c, 0x31000311, 0x00000011, 0x0ab9, 0x00073ffe, 0x000022a2, 0x0903, 0x000007ff, 0x00000000, 0x2285, 0xf000001f, 0x00000007, 0x22c9, 0xffffffff, 0x00ffffff, 0x22c4, 0x0000ff0f, 0x00000000, - 0xa393, 0x07ffffff, 0x4e000000, + 0xa293, 0x07ffffff, 0x4e000000, 0xa0d4, 0x3f3f3fff, 0x00000000, - 0x000c, 0x000000ff, 0x0040, + 0x000c, 0xffffffff, 0x0040, 0x000d, 0x00000040, 0x00004040, 0x2440, 0x03e00000, 0x03600000, 0x2418, 0x0000007f, 0x00000020, @@ -392,12 +395,16 @@ static const u32 hainan_golden_registers[] = 0x2b04, 0xffffffff, 0x00000000, 0x2b03, 0xffffffff, 0x00003210, 0x2235, 0x0000001f, 0x00000010, - 0x0570, 0x000c0fc0, 0x000c0400 + 0x0570, 0x000c0fc0, 0x000c0400, + 0x052c, 0x0fffffff, 0xffffffff, + 0x052d, 0x0fffffff, 0x0fffffff, + 0x052e, 0x0fffffff, 0x0fffffff, + 0x052f, 0x0fffffff, 0x0fffffff }; static const u32 hainan_golden_registers2[] = { - 0x263e, 0xffffffff, 0x02010001 + 0x263e, 0xffffffff, 0x2011003 }; static const u32 tahiti_mgcg_cgcg_init[] = @@ -513,18 +520,18 @@ static const u32 tahiti_mgcg_cgcg_init[] = 0x21c2, 0xffffffff, 0x00900100, 0x311e, 0xffffffff, 0x00000080, 0x3101, 0xffffffff, 0x0020003f, - 0xc, 0xffffffff, 0x0000001c, - 0xd, 0x000f0000, 0x000f0000, - 0x583, 0xffffffff, 0x00000100, - 0x409, 0xffffffff, 0x00000100, - 0x40b, 0x00000101, 0x00000000, - 0x82a, 0xffffffff, 0x00000104, - 0x993, 0x000c0000, 0x000c0000, - 0x992, 0x000c0000, 0x000c0000, + 0x000c, 0xffffffff, 0x0000001c, + 0x000d, 0x000f0000, 0x000f0000, + 0x0583, 0xffffffff, 0x00000100, + 0x0409, 0xffffffff, 0x00000100, + 0x040b, 0x00000101, 0x00000000, + 0x082a, 0xffffffff, 0x00000104, + 0x0993, 0x000c0000, 0x000c0000, + 0x0992, 0x000c0000, 0x000c0000, 0x1579, 0xff000fff, 0x00000100, 0x157a, 0x00000001, 0x00000001, - 0xbd4, 0x00000001, 0x00000001, - 0xc33, 0xc0000fff, 0x00000104, + 0x0bd4, 0x00000001, 0x00000001, + 0x0c33, 0xc0000fff, 0x00000104, 0x3079, 0x00000001, 0x00000001, 0x3430, 0xfffffff0, 0x00000100, 0x3630, 0xfffffff0, 0x00000100 @@ -612,16 +619,16 @@ static const u32 pitcairn_mgcg_cgcg_init[] = 0x21c2, 0xffffffff, 0x00900100, 0x311e, 0xffffffff, 0x00000080, 0x3101, 0xffffffff, 0x0020003f, - 0xc, 0xffffffff, 0x0000001c, - 0xd, 0x000f0000, 0x000f0000, - 0x583, 0xffffffff, 0x00000100, - 0x409, 0xffffffff, 0x00000100, - 0x40b, 0x00000101, 0x00000000, - 0x82a, 0xffffffff, 0x00000104, + 0x000c, 0xffffffff, 0x0000001c, + 0x000d, 0x000f0000, 0x000f0000, + 0x0583, 0xffffffff, 0x00000100, + 0x0409, 0xffffffff, 0x00000100, + 0x040b, 0x00000101, 0x00000000, + 0x082a, 0xffffffff, 0x00000104, 0x1579, 0xff000fff, 0x00000100, 0x157a, 0x00000001, 0x00000001, - 0xbd4, 0x00000001, 0x00000001, - 0xc33, 0xc0000fff, 0x00000104, + 0x0bd4, 0x00000001, 0x00000001, + 0x0c33, 0xc0000fff, 0x00000104, 0x3079, 0x00000001, 0x00000001, 0x3430, 0xfffffff0, 0x00000100, 0x3630, 0xfffffff0, 0x00000100 @@ -709,18 +716,18 @@ static const u32 verde_mgcg_cgcg_init[] = 0x21c2, 0xffffffff, 0x00900100, 0x311e, 0xffffffff, 0x00000080, 0x3101, 0xffffffff, 0x0020003f, - 0xc, 0xffffffff, 0x0000001c, - 0xd, 0x000f0000, 0x000f0000, - 0x583, 0xffffffff, 0x00000100, - 0x409, 0xffffffff, 0x00000100, - 0x40b, 0x00000101, 0x00000000, - 0x82a, 0xffffffff, 0x00000104, - 0x993, 0x000c0000, 0x000c0000, - 0x992, 0x000c0000, 0x000c0000, + 0x000c, 0xffffffff, 0x0000001c, + 0x000d, 0x000f0000, 0x000f0000, + 0x0583, 0xffffffff, 0x00000100, + 0x0409, 0xffffffff, 0x00000100, + 0x040b, 0x00000101, 0x00000000, + 0x082a, 0xffffffff, 0x00000104, + 0x0993, 0x000c0000, 0x000c0000, + 0x0992, 0x000c0000, 0x000c0000, 0x1579, 0xff000fff, 0x00000100, 0x157a, 0x00000001, 0x00000001, - 0xbd4, 0x00000001, 0x00000001, - 0xc33, 0xc0000fff, 0x00000104, + 0x0bd4, 0x00000001, 0x00000001, + 0x0c33, 0xc0000fff, 0x00000104, 0x3079, 0x00000001, 0x00000001, 0x3430, 0xfffffff0, 0x00000100, 0x3630, 0xfffffff0, 0x00000100 @@ -788,18 +795,18 @@ static const u32 oland_mgcg_cgcg_init[] = 0x21c2, 0xffffffff, 0x00900100, 0x311e, 0xffffffff, 0x00000080, 0x3101, 0xffffffff, 0x0020003f, - 0xc, 0xffffffff, 0x0000001c, - 0xd, 0x000f0000, 0x000f0000, - 0x583, 0xffffffff, 0x00000100, - 0x409, 0xffffffff, 0x00000100, - 0x40b, 0x00000101, 0x00000000, - 0x82a, 0xffffffff, 0x00000104, - 0x993, 0x000c0000, 0x000c0000, - 0x992, 0x000c0000, 0x000c0000, + 0x000c, 0xffffffff, 0x0000001c, + 0x000d, 0x000f0000, 0x000f0000, + 0x0583, 0xffffffff, 0x00000100, + 0x0409, 0xffffffff, 0x00000100, + 0x040b, 0x00000101, 0x00000000, + 0x082a, 0xffffffff, 0x00000104, + 0x0993, 0x000c0000, 0x000c0000, + 0x0992, 0x000c0000, 0x000c0000, 0x1579, 0xff000fff, 0x00000100, 0x157a, 0x00000001, 0x00000001, - 0xbd4, 0x00000001, 0x00000001, - 0xc33, 0xc0000fff, 0x00000104, + 0x0bd4, 0x00000001, 0x00000001, + 0x0c33, 0xc0000fff, 0x00000104, 0x3079, 0x00000001, 0x00000001, 0x3430, 0xfffffff0, 0x00000100, 0x3630, 0xfffffff0, 0x00000100 @@ -867,15 +874,15 @@ static const u32 hainan_mgcg_cgcg_init[] = 0x21c2, 0xffffffff, 0x00900100, 0x311e, 0xffffffff, 0x00000080, 0x3101, 0xffffffff, 0x0020003f, - 0xc, 0xffffffff, 0x0000001c, - 0xd, 0x000f0000, 0x000f0000, - 0x583, 0xffffffff, 0x00000100, - 0x409, 0xffffffff, 0x00000100, - 0x82a, 0xffffffff, 0x00000104, - 0x993, 0x000c0000, 0x000c0000, - 0x992, 0x000c0000, 0x000c0000, - 0xbd4, 0x00000001, 0x00000001, - 0xc33, 0xc0000fff, 0x00000104, + 0x000c, 0xffffffff, 0x0000001c, + 0x000d, 0x000f0000, 0x000f0000, + 0x0583, 0xffffffff, 0x00000100, + 0x0409, 0xffffffff, 0x00000100, + 0x082a, 0xffffffff, 0x00000104, + 0x0993, 0x000c0000, 0x000c0000, + 0x0992, 0x000c0000, 0x000c0000, + 0x0bd4, 0x00000001, 0x00000001, + 0x0c33, 0xc0000fff, 0x00000104, 0x3079, 0x00000001, 0x00000001, 0x3430, 0xfffffff0, 0x00000100, 0x3630, 0xfffffff0, 0x00000100 @@ -1179,6 +1186,8 @@ static int si_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG; adev->pg_flags = 0; + adev->external_rev_id = (adev->rev_id == 0) ? 1 : + (adev->rev_id == 1) ? 5 : 6; break; case CHIP_PITCAIRN: adev->cg_flags = @@ -1198,6 +1207,7 @@ static int si_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG; adev->pg_flags = 0; + adev->external_rev_id = adev->rev_id + 20; break; case CHIP_VERDE: @@ -1219,7 +1229,7 @@ static int si_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_MGCG; adev->pg_flags = 0; //??? - adev->external_rev_id = adev->rev_id + 0x14; + adev->external_rev_id = adev->rev_id + 40; break; case CHIP_OLAND: adev->cg_flags = @@ -1238,6 +1248,7 @@ static int si_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG; adev->pg_flags = 0; + adev->external_rev_id = 60; break; case CHIP_HAINAN: adev->cg_flags = @@ -1255,6 +1266,7 @@ static int si_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG; adev->pg_flags = 0; + adev->external_rev_id = 70; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 9f771f4ffcb7..bf088d6d9bf1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -932,18 +932,64 @@ static int vi_common_early_init(void *handle) adev->external_rev_id = adev->rev_id + 0x3c; break; case CHIP_TONGA: - adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG; - adev->pg_flags = AMD_PG_SUPPORT_UVD; + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_ROM_MGCG | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_DRM_LS | + AMD_CG_SUPPORT_UVD_MGCG; + adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x14; break; case CHIP_POLARIS11: - adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_RLC_LS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGLS | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_BIF_MGCG | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_ROM_MGCG | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_DRM_LS | + AMD_CG_SUPPORT_UVD_MGCG | AMD_CG_SUPPORT_VCE_MGCG; adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x5A; break; case CHIP_POLARIS10: - adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_RLC_LS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGLS | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_BIF_MGCG | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_ROM_MGCG | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_DRM_LS | + AMD_CG_SUPPORT_UVD_MGCG | AMD_CG_SUPPORT_VCE_MGCG; adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x50; @@ -971,6 +1017,7 @@ static int vi_common_early_init(void *handle) adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_PIPELINE | + AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_UVD | AMD_PG_SUPPORT_VCE; } @@ -996,6 +1043,7 @@ static int vi_common_early_init(void *handle) adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_PIPELINE | + AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_UVD | AMD_PG_SUPPORT_VCE; adev->external_rev_id = adev->rev_id + 0x61; @@ -1155,57 +1203,118 @@ static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, static int vi_common_set_clockgating_state_by_smu(void *handle, enum amd_clockgating_state state) { - uint32_t msg_id, pp_state; + uint32_t msg_id, pp_state = 0; + uint32_t pp_support_state = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; void *pp_handle = adev->powerplay.pp_handle; - if (state == AMD_CG_STATE_UNGATE) - pp_state = 0; - else - pp_state = PP_STATE_CG | PP_STATE_LS; - - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_MC, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - amd_set_clockgating_by_smu(pp_handle, msg_id); - - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_SDMA, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - amd_set_clockgating_by_smu(pp_handle, msg_id); - - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_HDP, - PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, - pp_state); - amd_set_clockgating_by_smu(pp_handle, msg_id); - - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_BIF, - PP_STATE_SUPPORT_LS, - pp_state); - amd_set_clockgating_by_smu(pp_handle, msg_id); - - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_BIF, - PP_STATE_SUPPORT_CG, - pp_state); - amd_set_clockgating_by_smu(pp_handle, msg_id); - - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_DRM, - PP_STATE_SUPPORT_LS, - pp_state); - amd_set_clockgating_by_smu(pp_handle, msg_id); - - msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, - PP_BLOCK_SYS_ROM, - PP_STATE_SUPPORT_CG, - pp_state); - amd_set_clockgating_by_smu(pp_handle, msg_id); + if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) { + if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) { + pp_support_state = AMD_CG_SUPPORT_MC_LS; + pp_state = PP_STATE_LS; + } + if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) { + pp_support_state |= AMD_CG_SUPPORT_MC_MGCG; + pp_state |= PP_STATE_CG; + } + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_MC, + pp_support_state, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + } + + if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) { + if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) { + pp_support_state = AMD_CG_SUPPORT_SDMA_LS; + pp_state = PP_STATE_LS; + } + if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) { + pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG; + pp_state |= PP_STATE_CG; + } + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_SDMA, + pp_support_state, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + } + + if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) { + if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { + pp_support_state = AMD_CG_SUPPORT_HDP_LS; + pp_state = PP_STATE_LS; + } + if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) { + pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG; + pp_state |= PP_STATE_CG; + } + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_HDP, + pp_support_state, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + } + + if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) { + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + else + pp_state = PP_STATE_LS; + + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_BIF, + PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + } + if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) { + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + else + pp_state = PP_STATE_CG; + + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_BIF, + PP_STATE_SUPPORT_CG, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + } + + if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) { + + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + else + pp_state = PP_STATE_LS; + + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_DRM, + PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + } + + if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) { + + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + else + pp_state = PP_STATE_CG; + + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_ROM, + PP_STATE_SUPPORT_CG, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + } return 0; } diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index d1986276dbbd..c02469ada9f1 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -126,6 +126,10 @@ enum amd_vce_level { #define AMD_CG_SUPPORT_HDP_LS (1 << 15) #define AMD_CG_SUPPORT_HDP_MGCG (1 << 16) #define AMD_CG_SUPPORT_ROM_MGCG (1 << 17) +#define AMD_CG_SUPPORT_DRM_LS (1 << 18) +#define AMD_CG_SUPPORT_BIF_MGCG (1 << 19) +#define AMD_CG_SUPPORT_GFX_3D_CGCG (1 << 20) +#define AMD_CG_SUPPORT_GFX_3D_CGLS (1 << 21) /* PG flags */ #define AMD_PG_SUPPORT_GFX_PG (1 << 0) diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h index 09a7df17570d..09a7df17570d 100755..100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h index 1ddc4183a1c9..1ddc4183a1c9 100755..100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index e4a1697ec1d3..e4a1697ec1d3 100755..100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 26eff56b4a99..26eff56b4a99 100755..100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 768087ddb046..a293c8be232c 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -17,12 +17,11 @@ static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data); - unsigned long addr = (unsigned long)vmf->virtual_address; unsigned long pfn = obj->phys_addr >> PAGE_SHIFT; int ret; - pfn += (addr - vma->vm_start) >> PAGE_SHIFT; - ret = vm_insert_pfn(vma, addr, pfn); + pfn += (vmf->address - vma->vm_start) >> PAGE_SHIFT; + ret = vm_insert_pfn(vma, vmf->address, pfn); switch (ret) { case 0: diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index d85af0ff2653..dcdd59d505bd 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -223,7 +223,8 @@ static int ast_get_dram_info(struct drm_device *dev) ast_write32(ast, 0x10000, 0xfc600309); do { - ; + if (pci_channel_offline(dev->pdev)) + return -EIO; } while (ast_read32(ast, 0x10000) != 0x01); data = ast_read32(ast, 0x10004); @@ -428,7 +429,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags) ast_detect_chip(dev, &need_post); if (ast->chip != AST1180) { - ast_get_dram_info(dev); + ret = ast_get_dram_info(dev); + if (ret) + goto out_free; ast->vram_size = ast_get_vram_info(dev); DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size); } diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index caa4e4ca616d..bd311c77c254 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -124,8 +124,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) * Using vm_pgoff as a selector forces us to use this unusual * addressing scheme. */ - resource_size_t offset = (unsigned long)vmf->virtual_address - - vma->vm_start; + resource_size_t offset = vmf->address - vma->vm_start; resource_size_t baddr = map->offset + offset; struct drm_agp_mem *agpmem; struct page *page; @@ -195,7 +194,7 @@ static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (!map) return VM_FAULT_SIGBUS; /* Nothing allocated */ - offset = (unsigned long)vmf->virtual_address - vma->vm_start; + offset = vmf->address - vma->vm_start; i = (unsigned long)map->handle + offset; page = vmalloc_to_page((void *)i); if (!page) @@ -301,7 +300,8 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (!dma->pagelist) return VM_FAULT_SIGBUS; /* Nothing allocated */ - offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ + offset = vmf->address - vma->vm_start; + /* vm_[pg]off[set] should be 0 */ page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */ page = virt_to_page((void *)dma->pagelist[page_nr]); @@ -337,7 +337,7 @@ static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (!entry->pagelist) return VM_FAULT_SIGBUS; /* Nothing allocated */ - offset = (unsigned long)vmf->virtual_address - vma->vm_start; + offset = vmf->address - vma->vm_start; map_offset = map->offset - (unsigned long)dev->sg->virtual; page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); page = entry->pagelist[page_offset]; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 7d066a91d778..114dddbd297b 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -202,15 +202,14 @@ int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } /* We don't use vmf->pgoff since that has the fake offset: */ - pgoff = ((unsigned long)vmf->virtual_address - - vma->vm_start) >> PAGE_SHIFT; + pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; page = pages[pgoff]; - VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, + VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT); - ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); + ret = vm_insert_page(vma, vmf->address, page); out: switch (ret) { @@ -759,7 +758,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages( down_read(&mm->mmap_sem); while (pinned < npages) { ret = get_user_pages_remote(task, mm, ptr, npages - pinned, - flags, pvec + pinned, NULL); + flags, pvec + pinned, NULL, NULL); if (ret < 0) break; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index ea7a18230888..57b81460fec8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -455,8 +455,7 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) pgoff_t page_offset; int ret; - page_offset = ((unsigned long)vmf->virtual_address - - vma->vm_start) >> PAGE_SHIFT; + page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) { DRM_ERROR("invalid page offset\n"); @@ -465,8 +464,7 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } pfn = page_to_pfn(exynos_gem->pages[page_offset]); - ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, - __pfn_to_pfn_t(pfn, PFN_DEV)); + ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); out: switch (ret) { diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 6bf33ba055b3..fd1488bf5189 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -125,7 +125,7 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) psbfb->gtt->offset; page_num = vma_pages(vma); - address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT); + address = vmf->address - (vmf->pgoff << PAGE_SHIFT); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index 6d1cb6b370b1..527c62917660 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c @@ -197,15 +197,14 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Page relative to the VMA start - we must calculate this ourselves because vmf->pgoff is the fake GEM offset */ - page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start) - >> PAGE_SHIFT; + page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; /* CPU view of the page, don't go via the GART for CPU writes */ if (r->stolen) pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT; else pfn = page_to_pfn(r->pages[page_offset]); - ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); + ret = vm_insert_pfn(vma, vmf->address, pfn); fail: mutex_unlock(&dev_priv->mmap_mutex); diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 5ddde7349fbd..183f5dc1c3f2 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -116,6 +116,7 @@ config DRM_I915_GVT_KVMGT tristate "Enable KVM/VFIO support for Intel GVT-g" depends on DRM_I915_GVT depends on KVM + depends on VFIO_MDEV && VFIO_MDEV_DEVICE default n help Choose this option if you want to enable KVMGT support for diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile index 8a46a7f31d53..b123c20e2097 100644 --- a/drivers/gpu/drm/i915/gvt/Makefile +++ b/drivers/gpu/drm/i915/gvt/Makefile @@ -5,6 +5,4 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) - -CFLAGS_kvmgt.o := -Wno-unused-function obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index db516382a4d4..711c31c8d8b4 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -123,6 +123,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, u8 changed = old ^ new; int ret; + memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); if (!(changed & PCI_COMMAND_MEMORY)) return 0; @@ -142,7 +143,6 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, return ret; } - memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); return 0; } @@ -240,7 +240,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, if (WARN_ON(bytes > 4)) return -EINVAL; - if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ)) + if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ)) return -EINVAL; /* First check if it's PCI_COMMAND */ diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 7eaaf1c9ed2b..6c5fdf5b2ce2 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1998,6 +1998,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) INIT_LIST_HEAD(>t->oos_page_list_head); INIT_LIST_HEAD(>t->post_shadow_list_head); + intel_vgpu_reset_ggtt(vgpu); + ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, NULL, 1, 0); if (IS_ERR(ggtt_mm)) { @@ -2206,6 +2208,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, int intel_gvt_init_gtt(struct intel_gvt *gvt) { int ret; + void *page_addr; gvt_dbg_core("init gtt\n"); @@ -2218,6 +2221,23 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) return -ENODEV; } + gvt->gtt.scratch_ggtt_page = + alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); + if (!gvt->gtt.scratch_ggtt_page) { + gvt_err("fail to allocate scratch ggtt page\n"); + return -ENOMEM; + } + + page_addr = page_address(gvt->gtt.scratch_ggtt_page); + + gvt->gtt.scratch_ggtt_mfn = + intel_gvt_hypervisor_virt_to_mfn(page_addr); + if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { + gvt_err("fail to translate scratch ggtt page\n"); + __free_page(gvt->gtt.scratch_ggtt_page); + return -EFAULT; + } + if (enable_out_of_sync) { ret = setup_spt_oos(gvt); if (ret) { @@ -2239,6 +2259,41 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) */ void intel_gvt_clean_gtt(struct intel_gvt *gvt) { + __free_page(gvt->gtt.scratch_ggtt_page); + if (enable_out_of_sync) clean_spt_oos(gvt); } + +/** + * intel_vgpu_reset_ggtt - reset the GGTT entry + * @vgpu: a vGPU + * + * This function is called at the vGPU create stage + * to reset all the GGTT entries. + * + */ +void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) +{ + struct intel_gvt *gvt = vgpu->gvt; + struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + u32 index; + u32 offset; + u32 num_entries; + struct intel_gvt_gtt_entry e; + + memset(&e, 0, sizeof(struct intel_gvt_gtt_entry)); + e.type = GTT_TYPE_GGTT_PTE; + ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn); + e.val64 |= _PAGE_PRESENT; + + index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; + num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; + for (offset = 0; offset < num_entries; offset++) + ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); + + index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; + num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; + for (offset = 0; offset < num_entries; offset++) + ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); +} diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index d250013bc37b..b315ab3593ec 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -81,6 +81,9 @@ struct intel_gvt_gtt { struct list_head oos_page_use_list_head; struct list_head oos_page_free_list_head; struct list_head mm_lru_list_head; + + struct page *scratch_ggtt_page; + unsigned long scratch_ggtt_mfn; }; enum { @@ -202,6 +205,7 @@ struct intel_vgpu_gtt { extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); +void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); extern int intel_gvt_init_gtt(struct intel_gvt *gvt); extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index b1a7c8dd4b5f..0af17016f33f 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -164,15 +164,18 @@ struct intel_vgpu { #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) struct { - struct device *mdev; + struct mdev_device *mdev; struct vfio_region *region; int num_regions; struct eventfd_ctx *intx_trigger; struct eventfd_ctx *msi_trigger; struct rb_root cache; struct mutex cache_lock; - void *vfio_group; struct notifier_block iommu_notifier; + struct notifier_block group_notifier; + struct kvm *kvm; + struct work_struct release_work; + atomic_t released; } vdev; #endif }; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index dc0365033157..faaae07ae487 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -31,6 +31,7 @@ #include <linux/init.h> #include <linux/device.h> #include <linux/mm.h> +#include <linux/mmu_context.h> #include <linux/types.h> #include <linux/list.h> #include <linux/rbtree.h> @@ -39,24 +40,13 @@ #include <linux/uuid.h> #include <linux/kvm_host.h> #include <linux/vfio.h> +#include <linux/mdev.h> #include "i915_drv.h" #include "gvt.h" -static inline long kvmgt_pin_pages(struct device *dev, unsigned long *user_pfn, - long npage, int prot, unsigned long *phys_pfn) -{ - return 0; -} -static inline long kvmgt_unpin_pages(struct device *dev, unsigned long *pfn, - long npage) -{ - return 0; -} - static const struct intel_gvt_ops *intel_gvt_ops; - /* helper macros copied from vfio-pci */ #define VFIO_PCI_OFFSET_SHIFT 40 #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT) @@ -90,6 +80,15 @@ struct gvt_dma { kvm_pfn_t pfn; }; +static inline bool handle_valid(unsigned long handle) +{ + return !!(handle & ~0xff); +} + +static int kvmgt_guest_init(struct mdev_device *mdev); +static void intel_vgpu_release_work(struct work_struct *work); +static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); + static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) { struct rb_node *node = vgpu->vdev.cache.rb_node; @@ -115,12 +114,15 @@ out: static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) { struct gvt_dma *entry; + kvm_pfn_t pfn; mutex_lock(&vgpu->vdev.cache_lock); + entry = __gvt_cache_find(vgpu, gfn); - mutex_unlock(&vgpu->vdev.cache_lock); + pfn = (entry == NULL) ? 0 : entry->pfn; - return entry == NULL ? 0 : entry->pfn; + mutex_unlock(&vgpu->vdev.cache_lock); + return pfn; } static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) @@ -167,9 +169,10 @@ static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu, static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn) { - struct device *dev = vgpu->vdev.mdev; + struct device *dev = mdev_dev(vgpu->vdev.mdev); struct gvt_dma *this; - unsigned long pfn; + unsigned long g1; + int rc; mutex_lock(&vgpu->vdev.cache_lock); this = __gvt_cache_find(vgpu, gfn); @@ -178,8 +181,9 @@ static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn) return; } - pfn = this->pfn; - WARN_ON((kvmgt_unpin_pages(dev, &pfn, 1) != 1)); + g1 = gfn; + rc = vfio_unpin_pages(dev, &g1, 1); + WARN_ON(rc != 1); __gvt_cache_remove_entry(vgpu, this); mutex_unlock(&vgpu->vdev.cache_lock); } @@ -194,15 +198,15 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu) { struct gvt_dma *dma; struct rb_node *node = NULL; - struct device *dev = vgpu->vdev.mdev; - unsigned long pfn; + struct device *dev = mdev_dev(vgpu->vdev.mdev); + unsigned long gfn; mutex_lock(&vgpu->vdev.cache_lock); while ((node = rb_first(&vgpu->vdev.cache))) { dma = rb_entry(node, struct gvt_dma, node); - pfn = dma->pfn; + gfn = dma->gfn; - kvmgt_unpin_pages(dev, &pfn, 1); + vfio_unpin_pages(dev, &gfn, 1); __gvt_cache_remove_entry(vgpu, dma); } mutex_unlock(&vgpu->vdev.cache_lock); @@ -226,7 +230,53 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, return NULL; } +static ssize_t available_instance_show(struct kobject *kobj, struct device *dev, + char *buf) +{ + struct intel_vgpu_type *type; + unsigned int num = 0; + void *gvt = kdev_to_i915(dev)->gvt; + + type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); + if (!type) + num = 0; + else + num = type->avail_instance; + + return sprintf(buf, "%u\n", num); +} + +static ssize_t device_api_show(struct kobject *kobj, struct device *dev, + char *buf) +{ + return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING); +} + +static ssize_t description_show(struct kobject *kobj, struct device *dev, + char *buf) +{ + struct intel_vgpu_type *type; + void *gvt = kdev_to_i915(dev)->gvt; + + type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); + if (!type) + return 0; + + return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n" + "fence: %d\n", + BYTES_TO_MB(type->low_gm_size), + BYTES_TO_MB(type->high_gm_size), + type->fence); +} + +static MDEV_TYPE_ATTR_RO(available_instance); +static MDEV_TYPE_ATTR_RO(device_api); +static MDEV_TYPE_ATTR_RO(description); + static struct attribute *type_attrs[] = { + &mdev_type_attr_available_instance.attr, + &mdev_type_attr_device_api.attr, + &mdev_type_attr_description.attr, NULL, }; @@ -322,7 +372,7 @@ static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn) if (kvmgt_gfn_is_write_protected(info, gfn)) return; - p = kmalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC); + p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC); if (WARN(!p, "gfn: 0x%llx\n", gfn)) return; @@ -342,6 +392,739 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, } } +static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) +{ + struct intel_vgpu *vgpu; + struct intel_vgpu_type *type; + struct device *pdev; + void *gvt; + + pdev = mdev_parent_dev(mdev); + gvt = kdev_to_i915(pdev)->gvt; + + type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); + if (!type) { + gvt_err("failed to find type %s to create\n", + kobject_name(kobj)); + return -EINVAL; + } + + vgpu = intel_gvt_ops->vgpu_create(gvt, type); + if (IS_ERR_OR_NULL(vgpu)) { + gvt_err("create intel vgpu failed\n"); + return -EINVAL; + } + + INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); + + vgpu->vdev.mdev = mdev; + mdev_set_drvdata(mdev, vgpu); + + gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", + dev_name(mdev_dev(mdev))); + return 0; +} + +static int intel_vgpu_remove(struct mdev_device *mdev) +{ + struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + + if (handle_valid(vgpu->handle)) + return -EBUSY; + + intel_gvt_ops->vgpu_destroy(vgpu); + return 0; +} + +static int intel_vgpu_iommu_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct intel_vgpu *vgpu = container_of(nb, + struct intel_vgpu, + vdev.iommu_notifier); + + if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) { + struct vfio_iommu_type1_dma_unmap *unmap = data; + unsigned long gfn, end_gfn; + + gfn = unmap->iova >> PAGE_SHIFT; + end_gfn = gfn + unmap->size / PAGE_SIZE; + + while (gfn < end_gfn) + gvt_cache_remove(vgpu, gfn++); + } + + return NOTIFY_OK; +} + +static int intel_vgpu_group_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct intel_vgpu *vgpu = container_of(nb, + struct intel_vgpu, + vdev.group_notifier); + + /* the only action we care about */ + if (action == VFIO_GROUP_NOTIFY_SET_KVM) { + vgpu->vdev.kvm = data; + + if (!data) + schedule_work(&vgpu->vdev.release_work); + } + + return NOTIFY_OK; +} + +static int intel_vgpu_open(struct mdev_device *mdev) +{ + struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + unsigned long events; + int ret; + + vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier; + vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier; + + events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; + ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, + &vgpu->vdev.iommu_notifier); + if (ret != 0) { + gvt_err("vfio_register_notifier for iommu failed: %d\n", ret); + goto out; + } + + events = VFIO_GROUP_NOTIFY_SET_KVM; + ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, + &vgpu->vdev.group_notifier); + if (ret != 0) { + gvt_err("vfio_register_notifier for group failed: %d\n", ret); + goto undo_iommu; + } + + ret = kvmgt_guest_init(mdev); + if (ret) + goto undo_group; + + atomic_set(&vgpu->vdev.released, 0); + return ret; + +undo_group: + vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, + &vgpu->vdev.group_notifier); + +undo_iommu: + vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, + &vgpu->vdev.iommu_notifier); +out: + return ret; +} + +static void __intel_vgpu_release(struct intel_vgpu *vgpu) +{ + struct kvmgt_guest_info *info; + int ret; + + if (!handle_valid(vgpu->handle)) + return; + + if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1)) + return; + + ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY, + &vgpu->vdev.iommu_notifier); + WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); + + ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY, + &vgpu->vdev.group_notifier); + WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret); + + info = (struct kvmgt_guest_info *)vgpu->handle; + kvmgt_guest_exit(info); + + vgpu->vdev.kvm = NULL; + vgpu->handle = 0; +} + +static void intel_vgpu_release(struct mdev_device *mdev) +{ + struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + + __intel_vgpu_release(vgpu); +} + +static void intel_vgpu_release_work(struct work_struct *work) +{ + struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu, + vdev.release_work); + + __intel_vgpu_release(vgpu); +} + +static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu) +{ + u32 start_lo, start_hi; + u32 mem_type; + int pos = PCI_BASE_ADDRESS_0; + + start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) & + PCI_BASE_ADDRESS_MEM_MASK; + mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) & + PCI_BASE_ADDRESS_MEM_TYPE_MASK; + + switch (mem_type) { + case PCI_BASE_ADDRESS_MEM_TYPE_64: + start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + + pos + 4)); + break; + case PCI_BASE_ADDRESS_MEM_TYPE_32: + case PCI_BASE_ADDRESS_MEM_TYPE_1M: + /* 1M mem BAR treated as 32-bit BAR */ + default: + /* mem unknown type treated as 32-bit BAR */ + start_hi = 0; + break; + } + + return ((u64)start_hi << 32) | start_lo; +} + +static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, + size_t count, loff_t *ppos, bool is_write) +{ + struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + int ret = -EINVAL; + + + if (index >= VFIO_PCI_NUM_REGIONS) { + gvt_err("invalid index: %u\n", index); + return -EINVAL; + } + + switch (index) { + case VFIO_PCI_CONFIG_REGION_INDEX: + if (is_write) + ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos, + buf, count); + else + ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos, + buf, count); + break; + case VFIO_PCI_BAR0_REGION_INDEX: + case VFIO_PCI_BAR1_REGION_INDEX: + if (is_write) { + uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu); + + ret = intel_gvt_ops->emulate_mmio_write(vgpu, + bar0_start + pos, buf, count); + } else { + uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu); + + ret = intel_gvt_ops->emulate_mmio_read(vgpu, + bar0_start + pos, buf, count); + } + break; + case VFIO_PCI_BAR2_REGION_INDEX: + case VFIO_PCI_BAR3_REGION_INDEX: + case VFIO_PCI_BAR4_REGION_INDEX: + case VFIO_PCI_BAR5_REGION_INDEX: + case VFIO_PCI_VGA_REGION_INDEX: + case VFIO_PCI_ROM_REGION_INDEX: + default: + gvt_err("unsupported region: %u\n", index); + } + + return ret == 0 ? count : ret; +} + +static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned int done = 0; + int ret; + + while (count) { + size_t filled; + + if (count >= 4 && !(*ppos % 4)) { + u32 val; + + ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ppos, false); + if (ret <= 0) + goto read_err; + + if (copy_to_user(buf, &val, sizeof(val))) + goto read_err; + + filled = 4; + } else if (count >= 2 && !(*ppos % 2)) { + u16 val; + + ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ppos, false); + if (ret <= 0) + goto read_err; + + if (copy_to_user(buf, &val, sizeof(val))) + goto read_err; + + filled = 2; + } else { + u8 val; + + ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos, + false); + if (ret <= 0) + goto read_err; + + if (copy_to_user(buf, &val, sizeof(val))) + goto read_err; + + filled = 1; + } + + count -= filled; + done += filled; + *ppos += filled; + buf += filled; + } + + return done; + +read_err: + return -EFAULT; +} + +static ssize_t intel_vgpu_write(struct mdev_device *mdev, + const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned int done = 0; + int ret; + + while (count) { + size_t filled; + + if (count >= 4 && !(*ppos % 4)) { + u32 val; + + if (copy_from_user(&val, buf, sizeof(val))) + goto write_err; + + ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ppos, true); + if (ret <= 0) + goto write_err; + + filled = 4; + } else if (count >= 2 && !(*ppos % 2)) { + u16 val; + + if (copy_from_user(&val, buf, sizeof(val))) + goto write_err; + + ret = intel_vgpu_rw(mdev, (char *)&val, + sizeof(val), ppos, true); + if (ret <= 0) + goto write_err; + + filled = 2; + } else { + u8 val; + + if (copy_from_user(&val, buf, sizeof(val))) + goto write_err; + + ret = intel_vgpu_rw(mdev, &val, sizeof(val), + ppos, true); + if (ret <= 0) + goto write_err; + + filled = 1; + } + + count -= filled; + done += filled; + *ppos += filled; + buf += filled; + } + + return done; +write_err: + return -EFAULT; +} + +static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) +{ + unsigned int index; + u64 virtaddr; + unsigned long req_size, pgoff = 0; + pgprot_t pg_prot; + struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + + index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); + if (index >= VFIO_PCI_ROM_REGION_INDEX) + return -EINVAL; + + if (vma->vm_end < vma->vm_start) + return -EINVAL; + if ((vma->vm_flags & VM_SHARED) == 0) + return -EINVAL; + if (index != VFIO_PCI_BAR2_REGION_INDEX) + return -EINVAL; + + pg_prot = vma->vm_page_prot; + virtaddr = vma->vm_start; + req_size = vma->vm_end - vma->vm_start; + pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; + + return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); +} + +static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type) +{ + if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX) + return 1; + + return 0; +} + +static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu, + unsigned int index, unsigned int start, + unsigned int count, uint32_t flags, + void *data) +{ + return 0; +} + +static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu, + unsigned int index, unsigned int start, + unsigned int count, uint32_t flags, void *data) +{ + return 0; +} + +static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu, + unsigned int index, unsigned int start, unsigned int count, + uint32_t flags, void *data) +{ + return 0; +} + +static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu, + unsigned int index, unsigned int start, unsigned int count, + uint32_t flags, void *data) +{ + struct eventfd_ctx *trigger; + + if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int fd = *(int *)data; + + trigger = eventfd_ctx_fdget(fd); + if (IS_ERR(trigger)) { + gvt_err("eventfd_ctx_fdget failed\n"); + return PTR_ERR(trigger); + } + vgpu->vdev.msi_trigger = trigger; + } + + return 0; +} + +static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags, + unsigned int index, unsigned int start, unsigned int count, + void *data) +{ + int (*func)(struct intel_vgpu *vgpu, unsigned int index, + unsigned int start, unsigned int count, uint32_t flags, + void *data) = NULL; + + switch (index) { + case VFIO_PCI_INTX_IRQ_INDEX: + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { + case VFIO_IRQ_SET_ACTION_MASK: + func = intel_vgpu_set_intx_mask; + break; + case VFIO_IRQ_SET_ACTION_UNMASK: + func = intel_vgpu_set_intx_unmask; + break; + case VFIO_IRQ_SET_ACTION_TRIGGER: + func = intel_vgpu_set_intx_trigger; + break; + } + break; + case VFIO_PCI_MSI_IRQ_INDEX: + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { + case VFIO_IRQ_SET_ACTION_MASK: + case VFIO_IRQ_SET_ACTION_UNMASK: + /* XXX Need masking support exported */ + break; + case VFIO_IRQ_SET_ACTION_TRIGGER: + func = intel_vgpu_set_msi_trigger; + break; + } + break; + } + + if (!func) + return -ENOTTY; + + return func(vgpu, index, start, count, flags, data); +} + +static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, + unsigned long arg) +{ + struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + unsigned long minsz; + + gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd); + + if (cmd == VFIO_DEVICE_GET_INFO) { + struct vfio_device_info info; + + minsz = offsetofend(struct vfio_device_info, num_irqs); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + info.flags = VFIO_DEVICE_FLAGS_PCI; + info.flags |= VFIO_DEVICE_FLAGS_RESET; + info.num_regions = VFIO_PCI_NUM_REGIONS; + info.num_irqs = VFIO_PCI_NUM_IRQS; + + return copy_to_user((void __user *)arg, &info, minsz) ? + -EFAULT : 0; + + } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { + struct vfio_region_info info; + struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; + int i, ret; + struct vfio_region_info_cap_sparse_mmap *sparse = NULL; + size_t size; + int nr_areas = 1; + int cap_type_id; + + minsz = offsetofend(struct vfio_region_info, offset); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + switch (info.index) { + case VFIO_PCI_CONFIG_REGION_INDEX: + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = INTEL_GVT_MAX_CFG_SPACE_SZ; + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + break; + case VFIO_PCI_BAR0_REGION_INDEX: + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = vgpu->cfg_space.bar[info.index].size; + if (!info.size) { + info.flags = 0; + break; + } + + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + break; + case VFIO_PCI_BAR1_REGION_INDEX: + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = 0; + info.flags = 0; + break; + case VFIO_PCI_BAR2_REGION_INDEX: + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.flags = VFIO_REGION_INFO_FLAG_CAPS | + VFIO_REGION_INFO_FLAG_MMAP | + VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + info.size = gvt_aperture_sz(vgpu->gvt); + + size = sizeof(*sparse) + + (nr_areas * sizeof(*sparse->areas)); + sparse = kzalloc(size, GFP_KERNEL); + if (!sparse) + return -ENOMEM; + + sparse->nr_areas = nr_areas; + cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP; + sparse->areas[0].offset = + PAGE_ALIGN(vgpu_aperture_offset(vgpu)); + sparse->areas[0].size = vgpu_aperture_sz(vgpu); + if (!caps.buf) { + kfree(caps.buf); + caps.buf = NULL; + caps.size = 0; + } + break; + + case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = 0; + + info.flags = 0; + gvt_dbg_core("get region info bar:%d\n", info.index); + break; + + case VFIO_PCI_ROM_REGION_INDEX: + case VFIO_PCI_VGA_REGION_INDEX: + gvt_dbg_core("get region info index:%d\n", info.index); + break; + default: + { + struct vfio_region_info_cap_type cap_type; + + if (info.index >= VFIO_PCI_NUM_REGIONS + + vgpu->vdev.num_regions) + return -EINVAL; + + i = info.index - VFIO_PCI_NUM_REGIONS; + + info.offset = + VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = vgpu->vdev.region[i].size; + info.flags = vgpu->vdev.region[i].flags; + + cap_type.type = vgpu->vdev.region[i].type; + cap_type.subtype = vgpu->vdev.region[i].subtype; + + ret = vfio_info_add_capability(&caps, + VFIO_REGION_INFO_CAP_TYPE, + &cap_type); + if (ret) + return ret; + } + } + + if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) { + switch (cap_type_id) { + case VFIO_REGION_INFO_CAP_SPARSE_MMAP: + ret = vfio_info_add_capability(&caps, + VFIO_REGION_INFO_CAP_SPARSE_MMAP, + sparse); + kfree(sparse); + if (ret) + return ret; + break; + default: + return -EINVAL; + } + } + + if (caps.size) { + if (info.argsz < sizeof(info) + caps.size) { + info.argsz = sizeof(info) + caps.size; + info.cap_offset = 0; + } else { + vfio_info_cap_shift(&caps, sizeof(info)); + if (copy_to_user((void __user *)arg + + sizeof(info), caps.buf, + caps.size)) { + kfree(caps.buf); + return -EFAULT; + } + info.cap_offset = sizeof(info); + } + + kfree(caps.buf); + } + + return copy_to_user((void __user *)arg, &info, minsz) ? + -EFAULT : 0; + } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { + struct vfio_irq_info info; + + minsz = offsetofend(struct vfio_irq_info, count); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS) + return -EINVAL; + + switch (info.index) { + case VFIO_PCI_INTX_IRQ_INDEX: + case VFIO_PCI_MSI_IRQ_INDEX: + break; + default: + return -EINVAL; + } + + info.flags = VFIO_IRQ_INFO_EVENTFD; + + info.count = intel_vgpu_get_irq_count(vgpu, info.index); + + if (info.index == VFIO_PCI_INTX_IRQ_INDEX) + info.flags |= (VFIO_IRQ_INFO_MASKABLE | + VFIO_IRQ_INFO_AUTOMASKED); + else + info.flags |= VFIO_IRQ_INFO_NORESIZE; + + return copy_to_user((void __user *)arg, &info, minsz) ? + -EFAULT : 0; + } else if (cmd == VFIO_DEVICE_SET_IRQS) { + struct vfio_irq_set hdr; + u8 *data = NULL; + int ret = 0; + size_t data_size = 0; + + minsz = offsetofend(struct vfio_irq_set, count); + + if (copy_from_user(&hdr, (void __user *)arg, minsz)) + return -EFAULT; + + if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { + int max = intel_vgpu_get_irq_count(vgpu, hdr.index); + + ret = vfio_set_irqs_validate_and_prepare(&hdr, max, + VFIO_PCI_NUM_IRQS, &data_size); + if (ret) { + gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); + return -EINVAL; + } + if (data_size) { + data = memdup_user((void __user *)(arg + minsz), + data_size); + if (IS_ERR(data)) + return PTR_ERR(data); + } + } + + ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index, + hdr.start, hdr.count, data); + kfree(data); + + return ret; + } else if (cmd == VFIO_DEVICE_RESET) { + intel_gvt_ops->vgpu_reset(vgpu); + return 0; + } + + return 0; +} + +static const struct mdev_parent_ops intel_vgpu_ops = { + .supported_type_groups = intel_vgpu_type_groups, + .create = intel_vgpu_create, + .remove = intel_vgpu_remove, + + .open = intel_vgpu_open, + .release = intel_vgpu_release, + + .read = intel_vgpu_read, + .write = intel_vgpu_write, + .mmap = intel_vgpu_mmap, + .ioctl = intel_vgpu_ioctl, +}; + static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) { if (!intel_gvt_init_vgpu_type_groups(gvt)) @@ -349,24 +1132,34 @@ static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) intel_gvt_ops = ops; - /* MDEV is not yet available */ - return -ENODEV; + return mdev_register_device(dev, &intel_vgpu_ops); } static void kvmgt_host_exit(struct device *dev, void *gvt) { intel_gvt_cleanup_vgpu_type_groups(gvt); + mdev_unregister_device(dev); } static int kvmgt_write_protect_add(unsigned long handle, u64 gfn) { - struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle; - struct kvm *kvm = info->kvm; + struct kvmgt_guest_info *info; + struct kvm *kvm; struct kvm_memory_slot *slot; int idx; + if (!handle_valid(handle)) + return -ESRCH; + + info = (struct kvmgt_guest_info *)handle; + kvm = info->kvm; + idx = srcu_read_lock(&kvm->srcu); slot = gfn_to_memslot(kvm, gfn); + if (!slot) { + srcu_read_unlock(&kvm->srcu, idx); + return -EINVAL; + } spin_lock(&kvm->mmu_lock); @@ -384,13 +1177,23 @@ out: static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn) { - struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle; - struct kvm *kvm = info->kvm; + struct kvmgt_guest_info *info; + struct kvm *kvm; struct kvm_memory_slot *slot; int idx; + if (!handle_valid(handle)) + return 0; + + info = (struct kvmgt_guest_info *)handle; + kvm = info->kvm; + idx = srcu_read_lock(&kvm->srcu); slot = gfn_to_memslot(kvm, gfn); + if (!slot) { + srcu_read_unlock(&kvm->srcu, idx); + return -EINVAL; + } spin_lock(&kvm->mmu_lock); @@ -476,6 +1279,81 @@ static int kvmgt_detect_host(void) return kvmgt_check_guest() ? -ENODEV : 0; } +static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) +{ + struct intel_vgpu *itr; + struct kvmgt_guest_info *info; + int id; + bool ret = false; + + mutex_lock(&vgpu->gvt->lock); + for_each_active_vgpu(vgpu->gvt, itr, id) { + if (!handle_valid(itr->handle)) + continue; + + info = (struct kvmgt_guest_info *)itr->handle; + if (kvm && kvm == info->kvm) { + ret = true; + goto out; + } + } +out: + mutex_unlock(&vgpu->gvt->lock); + return ret; +} + +static int kvmgt_guest_init(struct mdev_device *mdev) +{ + struct kvmgt_guest_info *info; + struct intel_vgpu *vgpu; + struct kvm *kvm; + + vgpu = mdev_get_drvdata(mdev); + if (handle_valid(vgpu->handle)) + return -EEXIST; + + kvm = vgpu->vdev.kvm; + if (!kvm || kvm->mm != current->mm) { + gvt_err("KVM is required to use Intel vGPU\n"); + return -ESRCH; + } + + if (__kvmgt_vgpu_exist(vgpu, kvm)) + return -EEXIST; + + info = vzalloc(sizeof(struct kvmgt_guest_info)); + if (!info) + return -ENOMEM; + + vgpu->handle = (unsigned long)info; + info->vgpu = vgpu; + info->kvm = kvm; + + kvmgt_protect_table_init(info); + gvt_cache_init(vgpu); + + info->track_node.track_write = kvmgt_page_track_write; + info->track_node.track_flush_slot = kvmgt_page_track_flush_slot; + kvm_page_track_register_notifier(kvm, &info->track_node); + + return 0; +} + +static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) +{ + if (!info) { + gvt_err("kvmgt_guest_info invalid\n"); + return false; + } + + kvm_page_track_unregister_notifier(info->kvm, &info->track_node); + kvmgt_protect_table_destroy(info); + gvt_cache_destroy(info->vgpu); + vfree(info); + + return true; +} + static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle) { /* nothing to do here */ @@ -489,63 +1367,72 @@ static void kvmgt_detach_vgpu(unsigned long handle) static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) { - struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle; - struct intel_vgpu *vgpu = info->vgpu; + struct kvmgt_guest_info *info; + struct intel_vgpu *vgpu; - if (vgpu->vdev.msi_trigger) - return eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1; + if (!handle_valid(handle)) + return -ESRCH; - return false; + info = (struct kvmgt_guest_info *)handle; + vgpu = info->vgpu; + + if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1) + return 0; + + return -EFAULT; } static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) { unsigned long pfn; - struct kvmgt_guest_info *info = (struct kvmgt_guest_info *)handle; + struct kvmgt_guest_info *info; + struct device *dev; int rc; + if (!handle_valid(handle)) + return INTEL_GVT_INVALID_ADDR; + + info = (struct kvmgt_guest_info *)handle; pfn = gvt_cache_find(info->vgpu, gfn); if (pfn != 0) return pfn; - rc = kvmgt_pin_pages(info->vgpu->vdev.mdev, &gfn, 1, - IOMMU_READ | IOMMU_WRITE, &pfn); + pfn = INTEL_GVT_INVALID_ADDR; + dev = mdev_dev(info->vgpu->vdev.mdev); + rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn); if (rc != 1) { - gvt_err("vfio_pin_pages failed for gfn: 0x%lx\n", gfn); - return 0; + gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); + return INTEL_GVT_INVALID_ADDR; } gvt_cache_add(info->vgpu, gfn, pfn); return pfn; } -static void *kvmgt_gpa_to_hva(unsigned long handle, unsigned long gpa) +static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, + void *buf, unsigned long len, bool write) { - unsigned long pfn; - gfn_t gfn = gpa_to_gfn(gpa); + struct kvmgt_guest_info *info; + struct kvm *kvm; + int ret; + bool kthread = current->mm == NULL; - pfn = kvmgt_gfn_to_pfn(handle, gfn); - if (!pfn) - return NULL; + if (!handle_valid(handle)) + return -ESRCH; - return (char *)pfn_to_kaddr(pfn) + offset_in_page(gpa); -} + info = (struct kvmgt_guest_info *)handle; + kvm = info->kvm; -static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, - void *buf, unsigned long len, bool write) -{ - void *hva = NULL; + if (kthread) + use_mm(kvm->mm); - hva = kvmgt_gpa_to_hva(handle, gpa); - if (!hva) - return -EFAULT; + ret = write ? kvm_write_guest(kvm, gpa, buf, len) : + kvm_read_guest(kvm, gpa, buf, len); - if (write) - memcpy(hva, buf, len); - else - memcpy(buf, hva, len); + if (kthread) + unuse_mm(kvm->mm); - return 0; + return ret; } static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa, diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index d2a0fbc896c3..81cd921770c6 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -65,7 +65,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) int i, ret; for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) { - mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu) + mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va + i * PAGE_SIZE); if (mfn == INTEL_GVT_INVALID_ADDR) { gvt_err("fail to get MFN from VA\n"); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 89dbe55077f5..c3c1d32b65a3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3509,6 +3509,8 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); +int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, + u32 reply_mask, u32 reply, int timeout_base_ms); /* intel_sideband.c */ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d0dcaf35b429..3dd7fc662859 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -174,21 +174,35 @@ static struct sg_table * i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) { struct address_space *mapping = obj->base.filp->f_mapping; - char *vaddr = obj->phys_handle->vaddr; + drm_dma_handle_t *phys; struct sg_table *st; struct scatterlist *sg; + char *vaddr; int i; if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) return ERR_PTR(-EINVAL); + /* Always aligning to the object size, allows a single allocation + * to handle all possible callers, and given typical object sizes, + * the alignment of the buddy allocation will naturally match. + */ + phys = drm_pci_alloc(obj->base.dev, + obj->base.size, + roundup_pow_of_two(obj->base.size)); + if (!phys) + return ERR_PTR(-ENOMEM); + + vaddr = phys->vaddr; for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { struct page *page; char *src; page = shmem_read_mapping_page(mapping, i); - if (IS_ERR(page)) - return ERR_CAST(page); + if (IS_ERR(page)) { + st = ERR_CAST(page); + goto err_phys; + } src = kmap_atomic(page); memcpy(vaddr, src, PAGE_SIZE); @@ -202,34 +216,44 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) i915_gem_chipset_flush(to_i915(obj->base.dev)); st = kmalloc(sizeof(*st), GFP_KERNEL); - if (st == NULL) - return ERR_PTR(-ENOMEM); + if (!st) { + st = ERR_PTR(-ENOMEM); + goto err_phys; + } if (sg_alloc_table(st, 1, GFP_KERNEL)) { kfree(st); - return ERR_PTR(-ENOMEM); + st = ERR_PTR(-ENOMEM); + goto err_phys; } sg = st->sgl; sg->offset = 0; sg->length = obj->base.size; - sg_dma_address(sg) = obj->phys_handle->busaddr; + sg_dma_address(sg) = phys->busaddr; sg_dma_len(sg) = obj->base.size; + obj->phys_handle = phys; + return st; + +err_phys: + drm_pci_free(obj->base.dev, phys); return st; } static void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, - struct sg_table *pages) + struct sg_table *pages, + bool needs_clflush) { GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); if (obj->mm.madv == I915_MADV_DONTNEED) obj->mm.dirty = false; - if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 && + if (needs_clflush && + (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 && !cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) drm_clflush_sg(pages); @@ -241,7 +265,7 @@ static void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, struct sg_table *pages) { - __i915_gem_object_release_shmem(obj, pages); + __i915_gem_object_release_shmem(obj, pages, false); if (obj->mm.dirty) { struct address_space *mapping = obj->base.filp->f_mapping; @@ -272,12 +296,13 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, sg_free_table(pages); kfree(pages); + + drm_pci_free(obj->base.dev, obj->phys_handle); } static void i915_gem_object_release_phys(struct drm_i915_gem_object *obj) { - drm_pci_free(obj->base.dev, obj->phys_handle); i915_gem_object_unpin_pages(obj); } @@ -538,15 +563,13 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) { - drm_dma_handle_t *phys; int ret; - if (obj->phys_handle) { - if ((unsigned long)obj->phys_handle->vaddr & (align -1)) - return -EBUSY; + if (align > obj->base.size) + return -EINVAL; + if (obj->ops == &i915_gem_phys_ops) return 0; - } if (obj->mm.madv != I915_MADV_WILLNEED) return -EFAULT; @@ -562,12 +585,6 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, if (obj->mm.pages) return -EBUSY; - /* create a new object */ - phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); - if (!phys) - return -ENOMEM; - - obj->phys_handle = phys; obj->ops = &i915_gem_phys_ops; return i915_gem_object_pin_pages(obj); @@ -1796,8 +1813,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf) int ret; /* We don't use vmf->pgoff since that has the fake offset */ - page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >> - PAGE_SHIFT; + page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; trace_i915_gem_object_fault(obj, page_offset, true, write); @@ -2217,7 +2233,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, struct sgt_iter sgt_iter; struct page *page; - __i915_gem_object_release_shmem(obj, pages); + __i915_gem_object_release_shmem(obj, pages, true); i915_gem_gtt_finish_pages(obj, pages); @@ -2290,15 +2306,6 @@ unlock: mutex_unlock(&obj->mm.lock); } -static unsigned int swiotlb_max_size(void) -{ -#if IS_ENABLED(CONFIG_SWIOTLB) - return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE); -#else - return 0; -#endif -} - static void i915_sg_trim(struct sg_table *orig_st) { struct sg_table new_st; @@ -2308,7 +2315,7 @@ static void i915_sg_trim(struct sg_table *orig_st) if (orig_st->nents == orig_st->orig_nents) return; - if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL)) + if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN)) return; new_sg = new_st.sgl; @@ -2327,7 +2334,8 @@ static struct sg_table * i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - int page_count, i; + const unsigned long page_count = obj->base.size / PAGE_SIZE; + unsigned long i; struct address_space *mapping; struct sg_table *st; struct scatterlist *sg; @@ -2345,7 +2353,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); - max_segment = swiotlb_max_size(); + max_segment = swiotlb_max_segment(); if (!max_segment) max_segment = rounddown(UINT_MAX, PAGE_SIZE); @@ -2353,7 +2361,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) if (st == NULL) return ERR_PTR(-ENOMEM); - page_count = obj->base.size / PAGE_SIZE; +rebuild_st: if (sg_alloc_table(st, page_count, GFP_KERNEL)) { kfree(st); return ERR_PTR(-ENOMEM); @@ -2412,8 +2420,25 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) i915_sg_trim(st); ret = i915_gem_gtt_prepare_pages(obj, st); - if (ret) - goto err_pages; + if (ret) { + /* DMA remapping failed? One possible cause is that + * it could not reserve enough large entries, asking + * for PAGE_SIZE chunks instead may be helpful. + */ + if (max_segment > PAGE_SIZE) { + for_each_sgt_page(page, sgt_iter, st) + put_page(page); + sg_free_table(st); + + max_segment = PAGE_SIZE; + goto rebuild_st; + } else { + dev_warn(&dev_priv->drm.pdev->dev, + "Failed to DMA remap %lu pages\n", + page_count); + goto err_pages; + } + } if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_do_bit_17_swizzle(obj, st); @@ -2696,6 +2721,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) struct drm_i915_gem_request *request; struct i915_gem_context *incomplete_ctx; struct intel_timeline *timeline; + unsigned long flags; bool ring_hung; if (engine->irq_seqno_barrier) @@ -2731,13 +2757,20 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) if (i915_gem_context_is_default(incomplete_ctx)) return; + timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine); + + spin_lock_irqsave(&engine->timeline->lock, flags); + spin_lock(&timeline->lock); + list_for_each_entry_continue(request, &engine->timeline->requests, link) if (request->ctx == incomplete_ctx) reset_request(request); - timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine); list_for_each_entry(request, &timeline->requests, link) reset_request(request); + + spin_unlock(&timeline->lock); + spin_unlock_irqrestore(&engine->timeline->lock, flags); } void i915_gem_reset(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 57194471f8cd..b8f403faadbb 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -972,7 +972,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req, if (busywait_stop(timeout_us, cpu)) break; - cpu_relax_lowlatency(); + cpu_relax(); } while (!need_resched()); return false; diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index e2b077df2da0..d229f47d1028 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h @@ -413,6 +413,25 @@ i915_gem_active_set(struct i915_gem_active *active, rcu_assign_pointer(active->request, request); } +/** + * i915_gem_active_set_retire_fn - updates the retirement callback + * @active - the active tracker + * @fn - the routine called when the request is retired + * @mutex - struct_mutex used to guard retirements + * + * i915_gem_active_set_retire_fn() updates the function pointer that + * is called when the final request associated with the @active tracker + * is retired. + */ +static inline void +i915_gem_active_set_retire_fn(struct i915_gem_active *active, + i915_gem_retire_fn fn, + struct mutex *mutex) +{ + lockdep_assert_held(mutex); + active->retire = fn ?: i915_gem_retire_noop; +} + static inline struct drm_i915_gem_request * __i915_gem_active_peek(const struct i915_gem_active *active) { diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index a6fc1bdc48af..401006b4c6a3 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -35,31 +35,22 @@ #include "i915_drv.h" #include "i915_trace.h" -static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) +static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) { - if (!mutex_is_locked(mutex)) + switch (mutex_trylock_recursive(&dev->struct_mutex)) { + case MUTEX_TRYLOCK_FAILED: return false; -#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER) - return mutex->owner == task; -#else - /* Since UP may be pre-empted, we cannot assume that we own the lock */ - return false; -#endif -} - -static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) -{ - if (!mutex_trylock(&dev->struct_mutex)) { - if (!mutex_is_locked_by(&dev->struct_mutex, current)) - return false; + case MUTEX_TRYLOCK_SUCCESS: + *unlock = true; + return true; + case MUTEX_TRYLOCK_RECURSIVE: *unlock = false; - } else { - *unlock = true; + return true; } - return true; + BUG(); } static bool any_vma_pinned(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index ebaa941c83af..abc78bbfc1dc 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -55,10 +55,9 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, return -ENODEV; /* See the comment at the drm_mm_init() call for more about this check. - * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete) + * WaSkipStolenMemoryFirstPage:bdw+ (incomplete) */ - if (start < 4096 && (IS_GEN8(dev_priv) || - IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0))) + if (start < 4096 && INTEL_GEN(dev_priv) >= 8) start = 4096; mutex_lock(&dev_priv->mm.stolen_lock); diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 107ddf51065e..d068af2ec3a3 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -515,7 +515,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) obj->userptr.ptr + pinned * PAGE_SIZE, npages - pinned, flags, - pvec + pinned, NULL); + pvec + pinned, NULL, NULL); if (ret < 0) break; diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 47590ab08d7e..3df8d3dd31cd 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -460,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL); static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL); -static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store); +static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO | S_IWUSR, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store); static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 8405b5a367d7..7e3545f65257 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -46,14 +46,20 @@ struct edp_power_seq { u16 t11_t12; } __packed; -/* MIPI Sequence Block definitions */ +/* + * MIPI Sequence Block definitions + * + * Note the VBT spec has AssertReset / DeassertReset swapped from their + * usual naming, we use the proper names here to avoid confusion when + * reading the code. + */ enum mipi_seq { MIPI_SEQ_END = 0, - MIPI_SEQ_ASSERT_RESET, + MIPI_SEQ_DEASSERT_RESET, /* Spec says MipiAssertResetPin */ MIPI_SEQ_INIT_OTP, MIPI_SEQ_DISPLAY_ON, MIPI_SEQ_DISPLAY_OFF, - MIPI_SEQ_DEASSERT_RESET, + MIPI_SEQ_ASSERT_RESET, /* Spec says MipiDeassertResetPin */ MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */ MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */ MIPI_SEQ_TEAR_ON, /* sequence block v2+ */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1c3a20ee0709..252aaabc7eef 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6243,35 +6243,24 @@ skl_dpll0_disable(struct drm_i915_private *dev_priv) dev_priv->cdclk_pll.vco = 0; } -static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) -{ - int ret; - u32 val; - - /* inform PCU we want to change CDCLK */ - val = SKL_CDCLK_PREPARE_FOR_CHANGE; - mutex_lock(&dev_priv->rps.hw_lock); - ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val); - mutex_unlock(&dev_priv->rps.hw_lock); - - return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE); -} - -static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) -{ - return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0; -} - static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) { u32 freq_select, pcu_ack; + int ret; WARN_ON((cdclk == 24000) != (vco == 0)); DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); - if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { - DRM_ERROR("failed to inform PCU about cdclk change\n"); + mutex_lock(&dev_priv->rps.hw_lock); + ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, + SKL_CDCLK_PREPARE_FOR_CHANGE, + SKL_CDCLK_READY_FOR_CHANGE, + SKL_CDCLK_READY_FOR_CHANGE, 3); + mutex_unlock(&dev_priv->rps.hw_lock); + if (ret) { + DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", + ret); return; } @@ -16934,7 +16923,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) for_each_intel_crtc(dev, crtc) { struct intel_crtc_state *crtc_state = crtc->config; - int pixclk = 0; __drm_atomic_helper_crtc_destroy_state(&crtc_state->base); memset(crtc_state, 0, sizeof(*crtc_state)); @@ -16946,23 +16934,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) crtc->base.enabled = crtc_state->base.enable; crtc->active = crtc_state->base.active; - if (crtc_state->base.active) { + if (crtc_state->base.active) dev_priv->active_crtcs |= 1 << crtc->pipe; - if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) - pixclk = ilk_pipe_pixel_rate(crtc_state); - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - pixclk = crtc_state->base.adjusted_mode.crtc_clock; - else - WARN_ON(dev_priv->display.modeset_calc_cdclk); - - /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ - if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) - pixclk = DIV_ROUND_UP(pixclk * 100, 95); - } - - dev_priv->min_pixclk[crtc->pipe] = pixclk; - readout_plane_state(crtc); DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", @@ -17035,6 +17009,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) } for_each_intel_crtc(dev, crtc) { + int pixclk = 0; + crtc->base.hwmode = crtc->config->base.adjusted_mode; memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); @@ -17062,10 +17038,23 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) */ crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; + if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) + pixclk = ilk_pipe_pixel_rate(crtc->config); + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + pixclk = crtc->config->base.adjusted_mode.crtc_clock; + else + WARN_ON(dev_priv->display.modeset_calc_cdclk); + + /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ + if (IS_BROADWELL(dev_priv) && crtc->config->ips_enabled) + pixclk = DIV_ROUND_UP(pixclk * 100, 95); + drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); update_scanline_offset(crtc); } + dev_priv->min_pixclk[crtc->pipe] = pixclk; + intel_pipe_config_sanity_check(dev_priv, crtc->config); } } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 90283edcafba..0b8e8eb85c19 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -355,7 +355,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, struct intel_dp *intel_dp); static void intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, - struct intel_dp *intel_dp); + struct intel_dp *intel_dp, + bool force_disable_vdd); static void intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp); @@ -516,7 +517,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) /* init power sequencer on this pipe and port */ intel_dp_init_panel_power_sequencer(dev, intel_dp); - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true); /* * Even vdd force doesn't work until we've made @@ -553,7 +554,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp) * Only the HW needs to be reprogrammed, the SW state is fixed and * has been setup during connector init. */ - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false); return 0; } @@ -636,7 +637,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) port_name(port), pipe_name(intel_dp->pps_pipe)); intel_dp_init_panel_power_sequencer(dev, intel_dp); - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false); } void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) @@ -2912,7 +2913,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp) /* init power sequencer on this pipe and port */ intel_dp_init_panel_power_sequencer(dev, intel_dp); - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true); } static void vlv_pre_enable_dp(struct intel_encoder *encoder, @@ -4014,8 +4015,8 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) return; /* FIXME: we need to synchronize this sort of stuff with hardware - * readout */ - if (WARN_ON_ONCE(!intel_dp->lane_count)) + * readout. Currently fast link training doesn't work on boot-up. */ + if (!intel_dp->lane_count) return; /* if link training is requested we should perform it always */ @@ -5055,7 +5056,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, static void intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, - struct intel_dp *intel_dp) + struct intel_dp *intel_dp, + bool force_disable_vdd) { struct drm_i915_private *dev_priv = to_i915(dev); u32 pp_on, pp_off, pp_div, port_sel = 0; @@ -5068,6 +5070,31 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, intel_pps_get_registers(dev_priv, intel_dp, ®s); + /* + * On some VLV machines the BIOS can leave the VDD + * enabled even on power seqeuencers which aren't + * hooked up to any port. This would mess up the + * power domain tracking the first time we pick + * one of these power sequencers for use since + * edp_panel_vdd_on() would notice that the VDD was + * already on and therefore wouldn't grab the power + * domain reference. Disable VDD first to avoid this. + * This also avoids spuriously turning the VDD on as + * soon as the new power seqeuencer gets initialized. + */ + if (force_disable_vdd) { + u32 pp = ironlake_get_pp_control(intel_dp); + + WARN(pp & PANEL_POWER_ON, "Panel power already on\n"); + + if (pp & EDP_FORCE_VDD) + DRM_DEBUG_KMS("VDD already on, disabling first\n"); + + pp &= ~EDP_FORCE_VDD; + + I915_WRITE(regs.pp_ctrl, pp); + } + pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | @@ -5122,7 +5149,7 @@ static void intel_dp_pps_init(struct drm_device *dev, vlv_initial_power_sequencer_setup(intel_dp); } else { intel_dp_init_panel_power_sequencer(dev, intel_dp); - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false); } } diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index 0d8ff0034b88..47cd1b20fb3e 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c @@ -300,7 +300,8 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv, mutex_lock(&dev_priv->sb_lock); vlv_iosf_sb_write(dev_priv, port, cfg1, 0); vlv_iosf_sb_write(dev_priv, port, cfg0, - CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value)); + CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO | + CHV_GPIO_GPIOTXSTATE(value)); mutex_unlock(&dev_priv->sb_lock); } @@ -376,11 +377,11 @@ static const fn_mipi_elem_exec exec_elem[] = { */ static const char * const seq_name[] = { - [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET", + [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET", [MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP", [MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON", [MIPI_SEQ_DISPLAY_OFF] = "MIPI_SEQ_DISPLAY_OFF", - [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET", + [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET", [MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON", [MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF", [MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON", diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 0a09024d6ca3..d4961fa20c73 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1968,12 +1968,7 @@ int logical_render_ring_init(struct intel_engine_cs *engine) ret); } - ret = logical_ring_init(engine); - if (ret) { - lrc_destroy_wa_ctx_obj(engine); - } - - return ret; + return logical_ring_init(engine); } int logical_xcs_ring_init(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 568d194435fd..10610b4077d1 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -216,7 +216,8 @@ static void intel_overlay_submit_request(struct intel_overlay *overlay, { GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip, &overlay->i915->drm.struct_mutex)); - overlay->last_flip.retire = retire; + i915_gem_active_set_retire_fn(&overlay->last_flip, retire, + &overlay->i915->drm.struct_mutex); i915_gem_active_set(&overlay->last_flip, req); i915_add_request(req); } @@ -837,8 +838,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, if (ret) goto out_unpin; - i915_gem_track_fb(overlay->vma->obj, new_bo, - INTEL_FRONTBUFFER_OVERLAY(pipe)); + i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL, + vma->obj, INTEL_FRONTBUFFER_OVERLAY(pipe)); overlay->old_vma = overlay->vma; overlay->vma = vma; @@ -1428,6 +1429,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) overlay->contrast = 75; overlay->saturation = 146; + init_request_active(&overlay->last_flip, NULL); + regs = intel_overlay_map_regs(overlay); if (!regs) goto out_unpin_bo; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ce03d9d5aca6..9f3b78dfa997 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2967,24 +2967,10 @@ intel_enable_sagv(struct drm_i915_private *dev_priv) return 0; } -static int -intel_do_sagv_disable(struct drm_i915_private *dev_priv) -{ - int ret; - uint32_t temp = GEN9_SAGV_DISABLE; - - ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL, - &temp); - if (ret) - return ret; - else - return temp & GEN9_SAGV_IS_DISABLED; -} - int intel_disable_sagv(struct drm_i915_private *dev_priv) { - int ret, result; + int ret; if (!intel_has_sagv(dev_priv)) return 0; @@ -2996,25 +2982,23 @@ intel_disable_sagv(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->rps.hw_lock); /* bspec says to keep retrying for at least 1 ms */ - ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1); + ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL, + GEN9_SAGV_DISABLE, + GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, + 1); mutex_unlock(&dev_priv->rps.hw_lock); - if (ret == -ETIMEDOUT) { - DRM_ERROR("Request to disable SAGV timed out\n"); - return -ETIMEDOUT; - } - /* * Some skl systems, pre-release machines in particular, * don't actually have an SAGV. */ - if (IS_SKYLAKE(dev_priv) && result == -ENXIO) { + if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; return 0; - } else if (result < 0) { - DRM_ERROR("Failed to disable the SAGV\n"); - return result; + } else if (ret < 0) { + DRM_ERROR("Failed to disable the SAGV (%d)\n", ret); + return ret; } dev_priv->sagv_status = I915_SAGV_DISABLED; @@ -7897,6 +7881,81 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, return 0; } +static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox, + u32 request, u32 reply_mask, u32 reply, + u32 *status) +{ + u32 val = request; + + *status = sandybridge_pcode_read(dev_priv, mbox, &val); + + return *status || ((val & reply_mask) == reply); +} + +/** + * skl_pcode_request - send PCODE request until acknowledgment + * @dev_priv: device private + * @mbox: PCODE mailbox ID the request is targeted for + * @request: request ID + * @reply_mask: mask used to check for request acknowledgment + * @reply: value used to check for request acknowledgment + * @timeout_base_ms: timeout for polling with preemption enabled + * + * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE + * reports an error or an overall timeout of @timeout_base_ms+10 ms expires. + * The request is acknowledged once the PCODE reply dword equals @reply after + * applying @reply_mask. Polling is first attempted with preemption enabled + * for @timeout_base_ms and if this times out for another 10 ms with + * preemption disabled. + * + * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some + * other error as reported by PCODE. + */ +int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, + u32 reply_mask, u32 reply, int timeout_base_ms) +{ + u32 status; + int ret; + + WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + +#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \ + &status) + + /* + * Prime the PCODE by doing a request first. Normally it guarantees + * that a subsequent request, at most @timeout_base_ms later, succeeds. + * _wait_for() doesn't guarantee when its passed condition is evaluated + * first, so send the first request explicitly. + */ + if (COND) { + ret = 0; + goto out; + } + ret = _wait_for(COND, timeout_base_ms * 1000, 10); + if (!ret) + goto out; + + /* + * The above can time out if the number of requests was low (2 in the + * worst case) _and_ PCODE was busy for some reason even after a + * (queued) request and @timeout_base_ms delay. As a workaround retry + * the poll with preemption disabled to maximize the number of + * requests. Increase the timeout from @timeout_base_ms to 10ms to + * account for interrupts that could reduce the number of these + * requests. + */ + DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); + WARN_ON_ONCE(timeout_base_ms > 3); + preempt_disable(); + ret = wait_for_atomic(COND, 10); + preempt_enable(); + +out: + return ret ? ret : status; +#undef COND +} + static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) { /* diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 7b488e2793d9..c6be70686b4a 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -825,13 +825,9 @@ void intel_psr_init(struct drm_device *dev) dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; - /* Per platform default */ - if (i915.enable_psr == -1) { - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - i915.enable_psr = 1; - else - i915.enable_psr = 0; - } + /* Per platform default: all disabled. */ + if (i915.enable_psr == -1) + i915.enable_psr = 0; /* Set link_standby x link_off defaults */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 356c662ad453..87b4af092d54 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -1039,7 +1039,18 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) { - I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); + u32 val; + + /* + * On driver load, a pipe may be active and driving a DSI display. + * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck + * (and never recovering) in this case. intel_dsi_post_disable() will + * clear it when we turn off the display. + */ + val = I915_READ(DSPCLK_GATE_D); + val &= DPOUNIT_CLOCK_GATE_DISABLE; + val |= VRHUNIT_CLOCK_GATE_DISABLE; + I915_WRITE(DSPCLK_GATE_D, val); /* * Disable trickle feed and enable pnd deadline calculation diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index d7be0d94ba4d..0bffd3f0c15d 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -62,7 +62,7 @@ fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) { d->wake_count++; hrtimer_start_range_ns(&d->timer, - ktime_set(0, NSEC_PER_MSEC), + NSEC_PER_MSEC, NSEC_PER_MSEC, HRTIMER_MODE_REL); } diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index cd06cfd94687..d8bc59c7e261 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -225,16 +225,14 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } /* We don't use vmf->pgoff since that has the fake offset: */ - pgoff = ((unsigned long)vmf->virtual_address - - vma->vm_start) >> PAGE_SHIFT; + pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; pfn = page_to_pfn(pages[pgoff]); - VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, + VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, pfn, pfn << PAGE_SHIFT); - ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, - __pfn_to_pfn_t(pfn, PFN_DEV)); + ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); out_unlock: mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 192b2d3a79cb..ab1dd020eb04 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -18,33 +18,24 @@ #include "msm_drv.h" #include "msm_gem.h" -static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) +static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock) { - if (!mutex_is_locked(mutex)) + switch (mutex_trylock_recursive(&dev->struct_mutex)) { + case MUTEX_TRYLOCK_FAILED: return false; -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) - return mutex->owner == task; -#else - /* Since UP may be pre-empted, we cannot assume that we own the lock */ - return false; -#endif -} + case MUTEX_TRYLOCK_SUCCESS: + *unlock = true; + return true; -static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock) -{ - if (!mutex_trylock(&dev->struct_mutex)) { - if (!mutex_is_locked_by(&dev->struct_mutex, current)) - return false; + case MUTEX_TRYLOCK_RECURSIVE: *unlock = false; - } else { - *unlock = true; + return true; } - return true; + BUG(); } - static unsigned long msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index f2f348f0160c..a6126c93f215 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -330,7 +330,7 @@ nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait) __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); - kt = ktime_set(0, sleep_time); + kt = sleep_time; schedule_hrtimeout(&kt, HRTIMER_MODE_REL); sleep_time *= 2; if (sleep_time > NSEC_PER_MSEC) diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index d4e1e11466f8..4a90c690f09e 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -398,8 +398,7 @@ static int fault_1d(struct drm_gem_object *obj, pgoff_t pgoff; /* We don't use vmf->pgoff since that has the fake offset: */ - pgoff = ((unsigned long)vmf->virtual_address - - vma->vm_start) >> PAGE_SHIFT; + pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; if (omap_obj->pages) { omap_gem_cpu_sync(obj, pgoff); @@ -409,11 +408,10 @@ static int fault_1d(struct drm_gem_object *obj, pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff; } - VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, + VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, pfn, pfn << PAGE_SHIFT); - return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, - __pfn_to_pfn_t(pfn, PFN_DEV)); + return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); } /* Special handling for the case of faulting in 2d tiled buffers */ @@ -427,7 +425,7 @@ static int fault_2d(struct drm_gem_object *obj, struct page *pages[64]; /* XXX is this too much to have on stack? */ unsigned long pfn; pgoff_t pgoff, base_pgoff; - void __user *vaddr; + unsigned long vaddr; int i, ret, slots; /* @@ -447,8 +445,7 @@ static int fault_2d(struct drm_gem_object *obj, const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); /* We don't use vmf->pgoff since that has the fake offset: */ - pgoff = ((unsigned long)vmf->virtual_address - - vma->vm_start) >> PAGE_SHIFT; + pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; /* * Actual address we start mapping at is rounded down to previous slot @@ -459,7 +456,7 @@ static int fault_2d(struct drm_gem_object *obj, /* figure out buffer width in slots */ slots = omap_obj->width >> priv->usergart[fmt].slot_shift; - vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT); + vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT); entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last]; @@ -503,12 +500,11 @@ static int fault_2d(struct drm_gem_object *obj, pfn = entry->paddr >> PAGE_SHIFT; - VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, + VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, pfn, pfn << PAGE_SHIFT); for (i = n; i > 0; i--) { - vm_insert_mixed(vma, (unsigned long)vaddr, - __pfn_to_pfn_t(pfn, PFN_DEV)); + vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV)); pfn += priv->usergart[fmt].stride_pfn; vaddr += PAGE_SIZE * m; } diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 21b6732425c5..c829cfb02fc4 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -603,8 +603,9 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) GOP_VBIOS_CONTENT *vbios; VFCT_IMAGE_HEADER *vhdr; - if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size))) + if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr))) return false; + tbl_size = hdr->length; if (tbl_size < sizeof(UEFI_ACPI_VFCT)) { DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n"); goto out_unmap; diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index c08e5279eeac..7d853e6b5ff0 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -452,10 +452,10 @@ static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (!bo->pages) return VM_FAULT_SIGBUS; - offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT; + offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; page = bo->pages[offset]; - err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); + err = vm_insert_page(vma, vmf->address, page); switch (err) { case -EAGAIN: case 0: diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 47c2fe659fe3..3a763f7cb743 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -539,7 +539,7 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown) } drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); - tilcdc_crtc->last_vblank = ktime_set(0, 0); + tilcdc_crtc->last_vblank = 0; tilcdc_crtc->enabled = false; mutex_unlock(&tilcdc_crtc->enable_lock); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 4748aedc933a..68ef993ab431 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -101,7 +101,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) struct page *page; int ret; int i; - unsigned long address = (unsigned long)vmf->virtual_address; + unsigned long address = vmf->address; int retval = VM_FAULT_NOPAGE; struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 818e70712b18..3c0c4bd3f750 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -107,14 +107,13 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) unsigned int page_offset; int ret = 0; - page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> - PAGE_SHIFT; + page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; if (!obj->pages) return VM_FAULT_SIGBUS; page = obj->pages[page_offset]; - ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); + ret = vm_insert_page(vma, vmf->address, page); switch (ret) { case -EAGAIN: case 0: diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index f36c14729b55..477e07f0ecb6 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -54,7 +54,7 @@ static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_vgem_gem_object *obj = vma->vm_private_data; /* We don't use vmf->pgoff since that has the fake offset */ - unsigned long vaddr = (unsigned long)vmf->virtual_address; + unsigned long vaddr = vmf->address; struct page *page; page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping, diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index cb75f0663ba0..11288ffa4af6 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -88,8 +88,8 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane, (vgdev, handle, 0, cpu_to_le32(plane->state->src_w >> 16), cpu_to_le32(plane->state->src_h >> 16), - plane->state->src_x >> 16, - plane->state->src_y >> 16, NULL); + cpu_to_le32(plane->state->src_x >> 16), + cpu_to_le32(plane->state->src_y >> 16), NULL); } } else { handle = 0; diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 974f9410474b..43ea0dc957d2 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -109,8 +109,10 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev) spin_lock(&vgdev->free_vbufs_lock); for (i = 0; i < count; i++) { - if (WARN_ON(list_empty(&vgdev->free_vbufs))) + if (WARN_ON(list_empty(&vgdev->free_vbufs))) { + spin_unlock(&vgdev->free_vbufs_lock); return; + } vbuf = list_first_entry(&vgdev->free_vbufs, struct virtio_gpu_vbuffer, list); list_del(&vbuf->list); @@ -295,6 +297,8 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work) static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, struct virtio_gpu_vbuffer *vbuf) + __releases(&vgdev->ctrlq.qlock) + __acquires(&vgdev->ctrlq.qlock) { struct virtqueue *vq = vgdev->ctrlq.vq; struct scatterlist *sgs[3], vcmd, vout, vresp; |