summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c')
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c952
1 files changed, 744 insertions, 208 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 86fd4420f128..2855bb918535 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -34,6 +34,7 @@
#include "dc/inc/hw/dmcu.h"
#include "dc/inc/hw/abm.h"
#include "dc/dc_dmub_srv.h"
+#include "amdgpu_dm_trace.h"
#include "vid.h"
#include "amdgpu.h"
@@ -94,12 +95,16 @@
#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
-#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
-#endif
+#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
+#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
+#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
@@ -209,6 +214,9 @@ static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
+static const struct drm_format_info *
+amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+
/*
* dm_vblank_get_counter
*
@@ -583,7 +591,7 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
- struct dm_comressor_info *compressor = &adev->dm.compressor;
+ struct dm_compressor_info *compressor = &adev->dm.compressor;
struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
struct drm_display_mode *mode;
unsigned long max_size = 0;
@@ -880,44 +888,60 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
return 0;
}
-static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
- struct drm_atomic_state *state)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
{
- struct drm_connector *connector;
- struct drm_crtc *crtc;
- struct amdgpu_dm_connector *amdgpu_dm_connector;
- struct drm_connector_state *conn_state;
- struct dm_crtc_state *acrtc_state;
- struct drm_crtc_state *crtc_state;
- struct dc_stream_state *stream;
- struct drm_device *dev = adev_to_drm(adev);
+ uint64_t pt_base;
+ uint32_t logical_addr_low;
+ uint32_t logical_addr_high;
+ uint32_t agp_base, agp_bot, agp_top;
+ PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
+ pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
- amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
- conn_state = connector->state;
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+ logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
+ else
+ logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
- if (!(conn_state && conn_state->crtc))
- continue;
+ agp_base = 0;
+ agp_bot = adev->gmc.agp_start >> 24;
+ agp_top = adev->gmc.agp_end >> 24;
- crtc = conn_state->crtc;
- acrtc_state = to_dm_crtc_state(crtc->state);
- if (!(acrtc_state && acrtc_state->stream))
- continue;
+ page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
+ page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
+ page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
+ page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
+ page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
+ page_table_base.low_part = lower_32_bits(pt_base);
- stream = acrtc_state->stream;
+ pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
+ pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
+
+ pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
+ pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
+ pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
+
+ pa_config->system_aperture.fb_base = adev->gmc.fb_start;
+ pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
+ pa_config->system_aperture.fb_top = adev->gmc.fb_end;
+
+ pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
+ pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
+ pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
+
+ pa_config->is_hvm_enabled = 0;
- if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
- amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
- amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
- amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
- conn_state = drm_atomic_get_connector_state(state, connector);
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- crtc_state->mode_changed = true;
- }
- }
}
+#endif
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
@@ -973,6 +997,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
case CHIP_RAVEN:
case CHIP_RENOIR:
init_data.flags.gpu_vm_support = true;
+ if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
+ init_data.flags.disable_dmcu = true;
break;
default:
break;
@@ -1026,6 +1052,17 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_hardware_init(adev->dm.dc);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (adev->asic_type == CHIP_RENOIR) {
+ struct dc_phy_addr_space_config pa_config;
+
+ mmhub_read_system_context(adev, &pa_config);
+
+ // Call the DC init_memory func
+ dc_setup_system_context(adev->dm.dc, &pa_config);
+ }
+#endif
+
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
DRM_ERROR(
@@ -1072,6 +1109,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
+
DRM_DEBUG_DRIVER("KMS initialized.\n");
return 0;
@@ -1168,10 +1206,10 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_RENOIR:
-#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
-#endif
+ case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_VANGOGH:
return 0;
case CHIP_NAVI12:
fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
@@ -1267,8 +1305,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
case CHIP_RENOIR:
dmub_asic = DMUB_ASIC_DCN21;
fw_name_dmub = FIRMWARE_RENOIR_DMUB;
+ if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
+ fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
break;
-#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
case CHIP_SIENNA_CICHLID:
dmub_asic = DMUB_ASIC_DCN30;
fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
@@ -1277,7 +1316,14 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
dmub_asic = DMUB_ASIC_DCN30;
fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
break;
-#endif
+ case CHIP_VANGOGH:
+ dmub_asic = DMUB_ASIC_DCN301;
+ fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
+ break;
+ case CHIP_DIMGREY_CAVEFISH:
+ dmub_asic = DMUB_ASIC_DCN302;
+ fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
+ break;
default:
/* ASIC doesn't support DMUB. */
@@ -2095,6 +2141,7 @@ const struct amdgpu_ip_block_version dm_ip_block =
static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
.fb_create = amdgpu_display_user_framebuffer_create,
+ .get_format_info = amd_get_format_info,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = amdgpu_dm_atomic_check,
.atomic_commit = amdgpu_dm_atomic_commit,
@@ -3396,10 +3443,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_RENOIR:
-#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
-#endif
+ case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_VANGOGH:
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
@@ -3558,31 +3605,27 @@ static int dm_early_init(void *handle)
break;
#if defined(CONFIG_DRM_AMD_DC_DCN)
case CHIP_RAVEN:
+ case CHIP_RENOIR:
+ case CHIP_VANGOGH:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
adev->mode_info.num_dig = 4;
break;
-#endif
case CHIP_NAVI10:
case CHIP_NAVI12:
-#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
-#endif
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
case CHIP_NAVI14:
+ case CHIP_DIMGREY_CAVEFISH:
adev->mode_info.num_crtc = 5;
adev->mode_info.num_hpd = 5;
adev->mode_info.num_dig = 5;
break;
- case CHIP_RENOIR:
- adev->mode_info.num_crtc = 4;
- adev->mode_info.num_hpd = 4;
- adev->mode_info.num_dig = 4;
- break;
+#endif
default:
DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
return -EINVAL;
@@ -3686,78 +3729,84 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
return 0;
}
-static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
- uint64_t *tiling_flags, bool *tmz_surface)
+static void
+fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
+ uint64_t tiling_flags)
{
- struct amdgpu_bo *rbo;
- int r;
-
- if (!amdgpu_fb) {
- *tiling_flags = 0;
- *tmz_surface = false;
- return 0;
- }
+ /* Fill GFX8 params */
+ if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
+ unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
- rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
- r = amdgpu_bo_reserve(rbo, false);
+ bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
+ bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
+ mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
+ tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
+ num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
- if (unlikely(r)) {
- /* Don't show error message when returning -ERESTARTSYS */
- if (r != -ERESTARTSYS)
- DRM_ERROR("Unable to reserve buffer: %d\n", r);
- return r;
+ /* XXX fix me for VI */
+ tiling_info->gfx8.num_banks = num_banks;
+ tiling_info->gfx8.array_mode =
+ DC_ARRAY_2D_TILED_THIN1;
+ tiling_info->gfx8.tile_split = tile_split;
+ tiling_info->gfx8.bank_width = bankw;
+ tiling_info->gfx8.bank_height = bankh;
+ tiling_info->gfx8.tile_aspect = mtaspect;
+ tiling_info->gfx8.tile_mode =
+ DC_ADDR_SURF_MICRO_TILING_DISPLAY;
+ } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
+ == DC_ARRAY_1D_TILED_THIN1) {
+ tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
}
- if (tiling_flags)
- amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
-
- if (tmz_surface)
- *tmz_surface = amdgpu_bo_encrypted(rbo);
-
- amdgpu_bo_unreserve(rbo);
-
- return r;
+ tiling_info->gfx8.pipe_config =
+ AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
}
-static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
-{
- uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
-
- return offset ? (address + offset * 256) : 0;
+static void
+fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
+ union dc_tiling_info *tiling_info)
+{
+ tiling_info->gfx9.num_pipes =
+ adev->gfx.config.gb_addr_config_fields.num_pipes;
+ tiling_info->gfx9.num_banks =
+ adev->gfx.config.gb_addr_config_fields.num_banks;
+ tiling_info->gfx9.pipe_interleave =
+ adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
+ tiling_info->gfx9.num_shader_engines =
+ adev->gfx.config.gb_addr_config_fields.num_se;
+ tiling_info->gfx9.max_compressed_frags =
+ adev->gfx.config.gb_addr_config_fields.max_compress_frags;
+ tiling_info->gfx9.num_rb_per_se =
+ adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
+ tiling_info->gfx9.shaderEnable = 1;
+ if (adev->asic_type == CHIP_SIENNA_CICHLID ||
+ adev->asic_type == CHIP_NAVY_FLOUNDER ||
+ adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
+ adev->asic_type == CHIP_VANGOGH)
+ tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
}
static int
-fill_plane_dcc_attributes(struct amdgpu_device *adev,
- const struct amdgpu_framebuffer *afb,
- const enum surface_pixel_format format,
- const enum dc_rotation_angle rotation,
- const struct plane_size *plane_size,
- const union dc_tiling_info *tiling_info,
- const uint64_t info,
- struct dc_plane_dcc_param *dcc,
- struct dc_plane_address *address,
- bool force_disable_dcc)
+validate_dcc(struct amdgpu_device *adev,
+ const enum surface_pixel_format format,
+ const enum dc_rotation_angle rotation,
+ const union dc_tiling_info *tiling_info,
+ const struct dc_plane_dcc_param *dcc,
+ const struct dc_plane_address *address,
+ const struct plane_size *plane_size)
{
struct dc *dc = adev->dm.dc;
struct dc_dcc_surface_param input;
struct dc_surface_dcc_cap output;
- uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
- uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
- uint64_t dcc_address;
memset(&input, 0, sizeof(input));
memset(&output, 0, sizeof(output));
- if (force_disable_dcc)
+ if (!dcc->enable)
return 0;
- if (!offset)
- return 0;
-
- if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
- return 0;
-
- if (!dc->cap_funcs.get_dcc_compression_cap)
+ if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
+ !dc->cap_funcs.get_dcc_compression_cap)
return -EINVAL;
input.format = format;
@@ -3776,17 +3825,508 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
if (!output.capable)
return -EINVAL;
- if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
+ if (dcc->independent_64b_blks == 0 &&
+ output.grph.rgb.independent_64b_blks != 0)
return -EINVAL;
- dcc->enable = 1;
- dcc->meta_pitch =
- AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
- dcc->independent_64b_blks = i64b;
+ return 0;
+}
+
+static bool
+modifier_has_dcc(uint64_t modifier)
+{
+ return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
+}
+
+static unsigned
+modifier_gfx9_swizzle_mode(uint64_t modifier)
+{
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return 0;
+
+ return AMD_FMT_MOD_GET(TILE, modifier);
+}
+
+static const struct drm_format_info dcc_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 2,
+ .cpp = { 2, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+};
+
+static const struct drm_format_info dcc_retile_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 3,
+ .cpp = { 2, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+};
+
+
+static const struct drm_format_info *
+lookup_format_info(const struct drm_format_info formats[],
+ int num_formats, u32 format)
+{
+ int i;
+
+ for (i = 0; i < num_formats; i++) {
+ if (formats[i].format == format)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
+static const struct drm_format_info *
+amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
+{
+ uint64_t modifier = cmd->modifier[0];
+
+ if (!IS_AMD_FMT_MOD(modifier))
+ return NULL;
+
+ if (AMD_FMT_MOD_GET(DCC_RETILE, modifier))
+ return lookup_format_info(dcc_retile_formats,
+ ARRAY_SIZE(dcc_retile_formats),
+ cmd->pixel_format);
- dcc_address = get_dcc_address(afb->address, info);
- address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
- address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
+ if (AMD_FMT_MOD_GET(DCC, modifier))
+ return lookup_format_info(dcc_formats, ARRAY_SIZE(dcc_formats),
+ cmd->pixel_format);
+
+ /* returning NULL will cause the default format structs to be used. */
+ return NULL;
+}
+
+static void
+fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
+ union dc_tiling_info *tiling_info,
+ uint64_t modifier)
+{
+ unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
+ unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
+ unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
+ unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
+
+ fill_gfx9_tiling_info_from_device(adev, tiling_info);
+
+ if (!IS_AMD_FMT_MOD(modifier))
+ return;
+
+ tiling_info->gfx9.num_pipes = 1u << pipes_log2;
+ tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
+
+ if (adev->family >= AMDGPU_FAMILY_NV) {
+ tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
+ } else {
+ tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
+
+ /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
+ }
+}
+
+enum dm_micro_swizzle {
+ MICRO_SWIZZLE_Z = 0,
+ MICRO_SWIZZLE_S = 1,
+ MICRO_SWIZZLE_D = 2,
+ MICRO_SWIZZLE_R = 3
+};
+
+static bool dm_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format,
+ uint64_t modifier)
+{
+ struct amdgpu_device *adev = drm_to_adev(plane->dev);
+ const struct drm_format_info *info = drm_format_info(format);
+
+ enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
+
+ if (!info)
+ return false;
+
+ /*
+ * We always have to allow this modifier, because core DRM still
+ * checks LINEAR support if userspace does not provide modifers.
+ */
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
+ /*
+ * The arbitrary tiling support for multiplane formats has not been hooked
+ * up.
+ */
+ if (info->num_planes > 1)
+ return false;
+
+ /*
+ * For D swizzle the canonical modifier depends on the bpp, so check
+ * it here.
+ */
+ if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
+ adev->family >= AMDGPU_FAMILY_NV) {
+ if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
+ return false;
+ }
+
+ if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
+ info->cpp[0] < 8)
+ return false;
+
+ if (modifier_has_dcc(modifier)) {
+ /* Per radeonsi comments 16/64 bpp are more complicated. */
+ if (info->cpp[0] != 4)
+ return false;
+ }
+
+ return true;
+}
+
+static void
+add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
+{
+ if (!*mods)
+ return;
+
+ if (*cap - *size < 1) {
+ uint64_t new_cap = *cap * 2;
+ uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
+
+ if (!new_mods) {
+ kfree(*mods);
+ *mods = NULL;
+ return;
+ }
+
+ memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
+ kfree(*mods);
+ *mods = new_mods;
+ *cap = new_cap;
+ }
+
+ (*mods)[*size] = mod;
+ *size += 1;
+}
+
+static void
+add_gfx9_modifiers(const struct amdgpu_device *adev,
+ uint64_t **mods, uint64_t *size, uint64_t *capacity)
+{
+ int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
+ int pipe_xor_bits = min(8, pipes +
+ ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
+ int bank_xor_bits = min(8 - pipe_xor_bits,
+ ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
+ int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
+ ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
+
+
+ if (adev->family == AMDGPU_FAMILY_RV) {
+ /* Raven2 and later */
+ bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
+
+ /*
+ * No _D DCC swizzles yet because we only allow 32bpp, which
+ * doesn't support _D on DCN
+ */
+
+ if (has_constant_encode) {
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
+ AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
+ AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
+ }
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
+ AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
+ AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
+
+ if (has_constant_encode) {
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
+ AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_RETILE, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
+
+ AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
+ AMD_FMT_MOD_SET(RB, rb) |
+ AMD_FMT_MOD_SET(PIPE, pipes));
+ }
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
+ AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_RETILE, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
+ AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
+ AMD_FMT_MOD_SET(RB, rb) |
+ AMD_FMT_MOD_SET(PIPE, pipes));
+ }
+
+ /*
+ * Only supported for 64bpp on Raven, will be filtered on format in
+ * dm_plane_format_mod_supported.
+ */
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
+
+ if (adev->family == AMDGPU_FAMILY_RV) {
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
+ }
+
+ /*
+ * Only supported for 64bpp on Raven, will be filtered on format in
+ * dm_plane_format_mod_supported.
+ */
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
+
+ if (adev->family == AMDGPU_FAMILY_RV) {
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
+ }
+}
+
+static void
+add_gfx10_1_modifiers(const struct amdgpu_device *adev,
+ uint64_t **mods, uint64_t *size, uint64_t *capacity)
+{
+ int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_RETILE, 1) |
+ AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
+
+
+ /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
+}
+
+static void
+add_gfx10_3_modifiers(const struct amdgpu_device *adev,
+ uint64_t **mods, uint64_t *size, uint64_t *capacity)
+{
+ int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
+ int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(PACKERS, pkrs) |
+ AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(PACKERS, pkrs) |
+ AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_RETILE, 1) |
+ AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(PACKERS, pkrs));
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(PACKERS, pkrs));
+
+ /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
+}
+
+static int
+get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
+{
+ uint64_t size = 0, capacity = 128;
+ *mods = NULL;
+
+ /* We have not hooked up any pre-GFX9 modifiers. */
+ if (adev->family < AMDGPU_FAMILY_AI)
+ return 0;
+
+ *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
+
+ if (plane_type == DRM_PLANE_TYPE_CURSOR) {
+ add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
+ add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
+ return *mods ? 0 : -ENOMEM;
+ }
+
+ switch (adev->family) {
+ case AMDGPU_FAMILY_AI:
+ case AMDGPU_FAMILY_RV:
+ add_gfx9_modifiers(adev, mods, &size, &capacity);
+ break;
+ case AMDGPU_FAMILY_NV:
+ case AMDGPU_FAMILY_VGH:
+ if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+ add_gfx10_3_modifiers(adev, mods, &size, &capacity);
+ else
+ add_gfx10_1_modifiers(adev, mods, &size, &capacity);
+ break;
+ }
+
+ add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
+
+ /* INVALID marks the end of the list. */
+ add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
+
+ if (!*mods)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int
+fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
+ const struct amdgpu_framebuffer *afb,
+ const enum surface_pixel_format format,
+ const enum dc_rotation_angle rotation,
+ const struct plane_size *plane_size,
+ union dc_tiling_info *tiling_info,
+ struct dc_plane_dcc_param *dcc,
+ struct dc_plane_address *address,
+ const bool force_disable_dcc)
+{
+ const uint64_t modifier = afb->base.modifier;
+ int ret;
+
+ fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
+ tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
+
+ if (modifier_has_dcc(modifier) && !force_disable_dcc) {
+ uint64_t dcc_address = afb->address + afb->base.offsets[1];
+
+ dcc->enable = 1;
+ dcc->meta_pitch = afb->base.pitches[1];
+ dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
+
+ address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
+ address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
+ }
+
+ ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
+ if (ret)
+ return ret;
return 0;
}
@@ -3815,6 +4355,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
address->tmz_surface = tmz_surface;
if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ uint64_t addr = afb->address + fb->offsets[0];
+
plane_size->surface_size.x = 0;
plane_size->surface_size.y = 0;
plane_size->surface_size.width = fb->width;
@@ -3823,9 +4365,10 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
fb->pitches[0] / fb->format->cpp[0];
address->type = PLN_ADDR_TYPE_GRAPHICS;
- address->grph.addr.low_part = lower_32_bits(afb->address);
- address->grph.addr.high_part = upper_32_bits(afb->address);
+ address->grph.addr.low_part = lower_32_bits(addr);
+ address->grph.addr.high_part = upper_32_bits(addr);
} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
+ uint64_t luma_addr = afb->address + fb->offsets[0];
uint64_t chroma_addr = afb->address + fb->offsets[1];
plane_size->surface_size.x = 0;
@@ -3846,83 +4389,25 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
address->video_progressive.luma_addr.low_part =
- lower_32_bits(afb->address);
+ lower_32_bits(luma_addr);
address->video_progressive.luma_addr.high_part =
- upper_32_bits(afb->address);
+ upper_32_bits(luma_addr);
address->video_progressive.chroma_addr.low_part =
lower_32_bits(chroma_addr);
address->video_progressive.chroma_addr.high_part =
upper_32_bits(chroma_addr);
}
- /* Fill GFX8 params */
- if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
- unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
-
- bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
- bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
- mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
- tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
- num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
-
- /* XXX fix me for VI */
- tiling_info->gfx8.num_banks = num_banks;
- tiling_info->gfx8.array_mode =
- DC_ARRAY_2D_TILED_THIN1;
- tiling_info->gfx8.tile_split = tile_split;
- tiling_info->gfx8.bank_width = bankw;
- tiling_info->gfx8.bank_height = bankh;
- tiling_info->gfx8.tile_aspect = mtaspect;
- tiling_info->gfx8.tile_mode =
- DC_ADDR_SURF_MICRO_TILING_DISPLAY;
- } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
- == DC_ARRAY_1D_TILED_THIN1) {
- tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
- }
-
- tiling_info->gfx8.pipe_config =
- AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
-
- if (adev->asic_type == CHIP_VEGA10 ||
- adev->asic_type == CHIP_VEGA12 ||
- adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_NAVI10 ||
- adev->asic_type == CHIP_NAVI14 ||
- adev->asic_type == CHIP_NAVI12 ||
-#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
- adev->asic_type == CHIP_SIENNA_CICHLID ||
- adev->asic_type == CHIP_NAVY_FLOUNDER ||
-#endif
- adev->asic_type == CHIP_RENOIR ||
- adev->asic_type == CHIP_RAVEN) {
- /* Fill GFX9 params */
- tiling_info->gfx9.num_pipes =
- adev->gfx.config.gb_addr_config_fields.num_pipes;
- tiling_info->gfx9.num_banks =
- adev->gfx.config.gb_addr_config_fields.num_banks;
- tiling_info->gfx9.pipe_interleave =
- adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
- tiling_info->gfx9.num_shader_engines =
- adev->gfx.config.gb_addr_config_fields.num_se;
- tiling_info->gfx9.max_compressed_frags =
- adev->gfx.config.gb_addr_config_fields.max_compress_frags;
- tiling_info->gfx9.num_rb_per_se =
- adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
- tiling_info->gfx9.swizzle =
- AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
- tiling_info->gfx9.shaderEnable = 1;
-
-#ifdef CONFIG_DRM_AMD_DC_DCN3_0
- if (adev->asic_type == CHIP_SIENNA_CICHLID ||
- adev->asic_type == CHIP_NAVY_FLOUNDER)
- tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
-#endif
- ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
- plane_size, tiling_info,
- tiling_flags, dcc, address,
- force_disable_dcc);
+ if (adev->family >= AMDGPU_FAMILY_AI) {
+ ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
+ rotation, plane_size,
+ tiling_info, dcc,
+ address,
+ force_disable_dcc);
if (ret)
return ret;
+ } else {
+ fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
}
return 0;
@@ -4122,7 +4607,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
struct drm_crtc_state *crtc_state)
{
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
- struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
+ struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
struct dc_scaling_info scaling_info;
struct dc_plane_info plane_info;
int ret;
@@ -4139,10 +4624,10 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
ret = fill_dc_plane_info_and_addr(adev, plane_state,
- dm_plane_state->tiling_flags,
+ afb->tiling_flags,
&plane_info,
&dc_plane_state->address,
- dm_plane_state->tmz_surface,
+ afb->tmz_surface,
force_disable_dcc);
if (ret)
return ret;
@@ -4732,6 +5217,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
&dsc_caps,
aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
+ 0,
link_bandwidth_kbps,
&stream->timing,
&stream->timing.dsc_cfg))
@@ -5310,7 +5796,8 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
- if (dc_sink == NULL) {
+ if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
+ aconnector->base.force != DRM_FORCE_ON) {
DRM_ERROR("dc_sink is NULL!\n");
goto fail;
}
@@ -5416,6 +5903,8 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
struct drm_crtc_state *new_crtc_state;
int ret;
+ trace_amdgpu_dm_connector_atomic_check(new_con_state);
+
if (!crtc)
return 0;
@@ -5523,6 +6012,8 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
int ret = -EINVAL;
+ trace_amdgpu_dm_crtc_atomic_check(crtc_state);
+
dm_update_crtc_active_planes(crtc, crtc_state);
if (unlikely(!dm_crtc_state->stream &&
@@ -5739,10 +6230,6 @@ dm_drm_plane_duplicate_state(struct drm_plane *plane)
dc_plane_state_retain(dm_plane_state->dc_state);
}
- /* Framebuffer hasn't been updated yet, so retain old flags. */
- dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
- dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
-
return &dm_plane_state->base;
}
@@ -5764,6 +6251,7 @@ static const struct drm_plane_funcs dm_plane_funcs = {
.reset = dm_drm_plane_reset,
.atomic_duplicate_state = dm_drm_plane_duplicate_state,
.atomic_destroy_state = dm_drm_plane_destroy_state,
+ .format_mod_supported = dm_plane_format_mod_supported,
};
static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
@@ -5847,10 +6335,10 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
fill_plane_buffer_attributes(
adev, afb, plane_state->format, plane_state->rotation,
- dm_plane_state_new->tiling_flags,
+ afb->tiling_flags,
&plane_state->tiling_info, &plane_state->plane_size,
&plane_state->dcc, &plane_state->address,
- dm_plane_state_new->tmz_surface, force_disable_dcc);
+ afb->tmz_surface, force_disable_dcc);
}
return 0;
@@ -5898,6 +6386,8 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
struct drm_crtc_state *new_crtc_state;
int ret;
+ trace_amdgpu_dm_plane_atomic_check(state);
+
dm_plane_state = to_dm_plane_state(state);
if (!dm_plane_state->dc_state)
@@ -5938,6 +6428,8 @@ static void dm_plane_atomic_async_update(struct drm_plane *plane,
struct drm_plane_state *old_state =
drm_atomic_get_old_plane_state(new_state->state, plane);
+ trace_amdgpu_dm_atomic_update_cursor(new_state);
+
swap(plane->state->fb, new_state->fb);
plane->state->src_x = new_state->src_x;
@@ -6056,13 +6548,19 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
int num_formats;
int res = -EPERM;
unsigned int supported_rotations;
+ uint64_t *modifiers = NULL;
num_formats = get_plane_formats(plane, plane_cap, formats,
ARRAY_SIZE(formats));
+ res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
+ if (res)
+ return res;
+
res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
&dm_plane_funcs, formats, num_formats,
- NULL, plane->type, NULL);
+ modifiers, plane->type, NULL);
+ kfree(modifiers);
if (res)
return res;
@@ -7109,6 +7607,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state;
struct drm_framebuffer *fb = new_plane_state->fb;
+ struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
bool plane_needs_flip;
struct dc_plane_state *dc_plane;
struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
@@ -7163,10 +7662,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
fill_dc_plane_info_and_addr(
dm->adev, new_plane_state,
- dm_new_plane_state->tiling_flags,
+ afb->tiling_flags,
&bundle->plane_infos[planes_count],
&bundle->flip_addrs[planes_count].address,
- dm_new_plane_state->tmz_surface, false);
+ afb->tmz_surface, false);
DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
new_plane_state->plane->index,
@@ -7501,6 +8000,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
int crtc_disable_count = 0;
bool mode_set_reset_required = false;
+ trace_amdgpu_dm_atomic_commit_tail_begin(state);
+
drm_atomic_helper_update_legacy_modeset_state(dev, state);
drm_atomic_helper_calc_timestamping_constants(state);
@@ -8306,8 +8807,7 @@ static bool should_reset_plane(struct drm_atomic_state *state,
* TODO: Come up with a more elegant solution for this.
*/
for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
- struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
-
+ struct amdgpu_framebuffer *old_afb, *new_afb;
if (other->type == DRM_PLANE_TYPE_CURSOR)
continue;
@@ -8351,12 +8851,12 @@ static bool should_reset_plane(struct drm_atomic_state *state,
if (old_other_state->fb->format != new_other_state->fb->format)
return true;
- old_dm_plane_state = to_dm_plane_state(old_other_state);
- new_dm_plane_state = to_dm_plane_state(new_other_state);
+ old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
+ new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
/* Tiling and DCC changes also require bandwidth updates. */
- if (old_dm_plane_state->tiling_flags !=
- new_dm_plane_state->tiling_flags)
+ if (old_afb->tiling_flags != new_afb->tiling_flags ||
+ old_afb->base.modifier != new_afb->base.modifier)
return true;
}
@@ -8587,8 +9087,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
enum dc_status status;
int ret, i;
bool lock_and_validation_needed = false;
+ struct dm_crtc_state *dm_old_crtc_state;
- amdgpu_check_debugfs_connector_property_change(adev, state);
+ trace_amdgpu_dm_atomic_check_begin(state);
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
@@ -8629,9 +9130,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
#endif
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->color_mgmt_changed &&
- old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
+ old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
+ dm_old_crtc_state->dsc_force_changed == false)
continue;
if (!new_crtc_state->enable)
@@ -8682,17 +9186,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
- /* Prepass for updating tiling flags on new planes. */
- for_each_new_plane_in_state(state, plane, new_plane_state, i) {
- struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
- struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
-
- ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
- &new_dm_plane_state->tmz_surface);
- if (ret)
- goto fail;
- }
-
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
ret = dm_update_plane_state(dc, state, plane,
@@ -8886,6 +9379,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/* Must be success */
WARN_ON(ret);
+
+ trace_amdgpu_dm_atomic_check_finish(state, ret);
+
return ret;
fail:
@@ -8896,6 +9392,8 @@ fail:
else
DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
+ trace_amdgpu_dm_atomic_check_finish(state, ret);
+
return ret;
}
@@ -9160,3 +9658,41 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
}
mutex_unlock(&adev->dm.dc_lock);
}
+
+void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
+ uint32_t value, const char *func_name)
+{
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ DC_ERR("invalid register write. address = 0");
+ return;
+ }
+#endif
+ cgs_write_register(ctx->cgs_device, address, value);
+ trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
+}
+
+uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
+ const char *func_name)
+{
+ uint32_t value;
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ DC_ERR("invalid register read; address = 0\n");
+ return 0;
+ }
+#endif
+
+ if (ctx->dmub_srv &&
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
+ !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
+ ASSERT(false);
+ return 0;
+ }
+
+ value = cgs_read_register(ctx->cgs_device, address);
+
+ trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
+
+ return value;
+}