diff options
author | Maarten Lankhorst <maarten.lankhorst@linux.intel.com> | 2022-02-07 19:01:37 +0300 |
---|---|---|
committer | Maarten Lankhorst <maarten.lankhorst@linux.intel.com> | 2022-02-07 19:03:24 +0300 |
commit | 542898c5aa5c6a3179dffb1d1606884a63f75fed (patch) | |
tree | ba096d8eab6a4230782333a036b5de578456eec0 /drivers/gpu/drm | |
parent | ccbeca4ca04302d129602093c8d611065e3f7958 (diff) | |
parent | 53dbee4926d3706ca9e03f3928fa85b5ec3bc0cc (diff) | |
download | linux-542898c5aa5c6a3179dffb1d1606884a63f75fed.tar.xz |
Merge remote-tracking branch 'drm/drm-next' into drm-misc-next
First backmerge into drm-misc-next. Required for more helpers backmerged,
and to pull in 5.17 (rc2).
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm')
576 files changed, 13009 insertions, 9723 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 9f017663ac50..d8b854fcbffa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -812,6 +812,7 @@ struct amd_powerplay { #define AMDGPU_RESET_MAGIC_NUM 64 #define AMDGPU_MAX_DF_PERFMONS 4 +#define AMDGPU_PRODUCT_NAME_LEN 64 struct amdgpu_device { struct device *dev; struct pci_dev *pdev; @@ -1076,13 +1077,14 @@ struct amdgpu_device { bool runpm; bool in_runpm; bool has_pr3; + bool is_fw_fb; bool pm_sysfs_en; bool ucode_sysfs_en; /* Chip product information */ char product_number[16]; - char product_name[32]; + char product_name[AMDGPU_PRODUCT_NAME_LEN]; char serial[20]; atomic_t throttling_logging_enabled; @@ -1095,7 +1097,9 @@ struct amdgpu_device { pci_channel_state_t pci_channel_state; struct amdgpu_reset_control *reset_cntl; - uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE]; + uint32_t ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE]; + + bool ram_is_direct_mapped; }; static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) @@ -1316,6 +1320,8 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev, void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring); +void amdgpu_device_halt(struct amdgpu_device *adev); + /* atpx handler */ #if defined(CONFIG_VGA_SWITCHEROO) void amdgpu_register_atpx_handler(void); @@ -1359,8 +1365,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon); u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc); int amdgpu_enable_vblank_kms(struct drm_crtc *crtc); void amdgpu_disable_vblank_kms(struct drm_crtc *crtc); -long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, - unsigned long arg); int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 46cf48b3904a..6ca1db3c243f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -514,13 +514,6 @@ out_put: return r; } -uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev) -{ - struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); - - return amdgpu_vram_mgr_usage(vram_man); -} - uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, struct amdgpu_device *src) { @@ -721,13 +714,13 @@ bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev) return adev->have_atomics_support; } -void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev) +void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bool reset) { struct ras_err_data err_data = {0, 0, 0, NULL}; /* CPU MCA will handle page retirement if connected_to_cpu is 1 */ if (!adev->gmc.xgmi.connected_to_cpu) - amdgpu_umc_process_ras_data_cb(adev, &err_data, NULL); - else + amdgpu_umc_poison_handler(adev, &err_data, reset); + else if (reset) amdgpu_amdkfd_gpu_reset(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index fcbc8a9c9e06..ac841ae8f5cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -223,7 +223,6 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, uint64_t *bo_size, void *metadata_buffer, size_t buffer_size, uint32_t *metadata_size, uint32_t *flags); -uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev); uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, struct amdgpu_device *src); int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, @@ -296,7 +295,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, uint64_t *mmap_offset); int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, struct tile_config *config); -void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev); +void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, + bool reset); #if IS_ENABLED(CONFIG_HSA_AMD) void amdgpu_amdkfd_gpuvm_init_mem_limits(void); void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index ddfe7aff919d..1abf662a0e91 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -166,7 +166,7 @@ int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) lock_srbm(adev, mec, pipe, 0, 0); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL), + WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); @@ -279,7 +279,7 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, lower_32_bits((uintptr_t)wptr)); WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), upper_32_bits((uintptr_t)wptr)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1), + WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1, (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); } @@ -488,13 +488,13 @@ bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev, uint32_t low, high; acquire_queue(adev, pipe_id, queue_id); - act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); + act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); high = upper_32_bits(queue_address >> 8); - if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) && - high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI))) + if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) && + high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI)) retval = true; } release_queue(adev); @@ -556,7 +556,7 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, end_jiffies = (utimeout * HZ / 1000) + jiffies; while (true) { - temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); + temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) break; if (time_after(jiffies, end_jiffies)) { @@ -645,7 +645,7 @@ int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val); - WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd); + WREG32_SOC15(GC, 0, mmSQ_CMD, sq_cmd); data = REG_SET_FIELD(data, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); @@ -722,7 +722,7 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe; queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe; soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0); - reg_val = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) + + reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) + queue_slot); *wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK; if (*wave_cnt != 0) @@ -809,8 +809,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) { gfx_v9_0_select_se_sh(adev, se_idx, sh_idx, 0xffffffff); - queue_map = RREG32(SOC15_REG_OFFSET(GC, 0, - mmSPI_CSQ_WF_ACTIVE_STATUS)); + queue_map = RREG32_SOC15(GC, 0, mmSPI_CSQ_WF_ACTIVE_STATUS); /* * Assumption: queue map encodes following schema: four @@ -860,17 +859,17 @@ void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, /* * Program TBA registers */ - WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO), + WREG32_SOC15(GC, 0, mmSQ_SHADER_TBA_LO, lower_32_bits(tba_addr >> 8)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI), + WREG32_SOC15(GC, 0, mmSQ_SHADER_TBA_HI, upper_32_bits(tba_addr >> 8)); /* * Program TMA registers */ - WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO), + WREG32_SOC15(GC, 0, mmSQ_SHADER_TMA_LO, lower_32_bits(tma_addr >> 8)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI), + WREG32_SOC15(GC, 0, mmSQ_SHADER_TMA_HI, upper_32_bits(tma_addr >> 8)); unlock_srbm(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 5df89a295177..f9bab963a948 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -708,10 +708,12 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, va + bo_size, vm); - if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM && - amdgpu_xgmi_same_hive(adev, bo_adev))) { - /* Mappings on the local GPU and VRAM mappings in the - * local hive share the original BO + if (adev == bo_adev || + (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && adev->ram_is_direct_mapped) || + (mem->domain == AMDGPU_GEM_DOMAIN_VRAM && amdgpu_xgmi_same_hive(adev, bo_adev))) { + /* Mappings on the local GPU, or VRAM mappings in the + * local hive, or userptr mapping IOMMU direct map mode + * share the original BO */ attachment[i]->type = KFD_MEM_ATT_SHARED; bo[i] = mem->bo; @@ -1559,13 +1561,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ret = init_user_pages(*mem, user_addr); if (ret) goto allocate_init_user_pages_failed; - } - - if (offset) - *offset = amdgpu_bo_mmap_offset(bo); - - if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | - KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { + } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | + KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT); if (ret) { pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n"); @@ -1575,11 +1572,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; } + if (offset) + *offset = amdgpu_bo_mmap_offset(bo); + return 0; allocate_init_user_pages_failed: - remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); err_pin_bo: + remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); drm_vma_node_revoke(&gobj->vma_node, drm_priv); err_node_allow: /* Don't unreserve system mem limit twice */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index 97178b307ed6..4d4ddf026faf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -470,8 +470,8 @@ bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *ade /** * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS - * adev: amdgpu_device pointer - * i2c_address: pointer to u8; if not NULL, will contain + * @adev: amdgpu_device pointer + * @i2c_address: pointer to u8; if not NULL, will contain * the RAS EEPROM address if the function returns true * * Return true if VBIOS supports RAS EEPROM address reporting, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 7abe9500c0c6..d6d986be906a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -11,6 +11,7 @@ #include <linux/pci.h> #include <linux/delay.h> +#include "amdgpu.h" #include "amd_acpi.h" #define AMDGPU_PX_QUIRK_FORCE_ATPX (1 << 0) @@ -165,7 +166,7 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas } /** - * amdgpu_atpx_validate_functions - validate ATPX functions + * amdgpu_atpx_validate - validate ATPX functions * * @atpx: amdgpu atpx struct * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 82d847e4e7d8..fa20261aa928 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -108,7 +108,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_HDMIB: if (amdgpu_connector->use_digital) { - if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { + if (connector->display_info.is_hdmi) { if (connector->display_info.bpc) bpc = connector->display_info.bpc; } @@ -116,7 +116,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) break; case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_HDMIA: - if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { + if (connector->display_info.is_hdmi) { if (connector->display_info.bpc) bpc = connector->display_info.bpc; } @@ -125,7 +125,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) dig_connector = amdgpu_connector->con_priv; if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) || - drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { + connector->display_info.is_hdmi) { if (connector->display_info.bpc) bpc = connector->display_info.bpc; } @@ -149,7 +149,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) break; } - if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { + if (connector->display_info.is_hdmi) { /* * Pre DCE-8 hw can't handle > 12 bpc, and more than 12 bpc doesn't make * much sense without support for > 12 bpc framebuffers. RGB 4:4:4 at @@ -315,8 +315,10 @@ static void amdgpu_connector_get_edid(struct drm_connector *connector) if (!amdgpu_connector->edid) { /* some laptops provide a hardcoded edid in rom for LCDs */ if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) || - (connector->connector_type == DRM_MODE_CONNECTOR_eDP))) + (connector->connector_type == DRM_MODE_CONNECTOR_eDP))) { amdgpu_connector->edid = amdgpu_connector_get_hardcoded_edid(adev); + drm_connector_update_edid_property(connector, amdgpu_connector->edid); + } } } @@ -326,6 +328,7 @@ static void amdgpu_connector_free_edid(struct drm_connector *connector) kfree(amdgpu_connector->edid); amdgpu_connector->edid = NULL; + drm_connector_update_edid_property(connector, NULL); } static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector) @@ -387,6 +390,9 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder) native_mode->vdisplay != 0 && native_mode->clock != 0) { mode = drm_mode_duplicate(dev, native_mode); + if (!mode) + return NULL; + mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; drm_mode_set_name(mode); @@ -401,6 +407,9 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder) * simpler. */ mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false); + if (!mode) + return NULL; + mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name); } @@ -1171,7 +1180,7 @@ static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) || (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) { return MODE_OK; - } else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { + } else if (connector->display_info.is_hdmi) { /* HDMI 1.3+ supports max clock of 340 Mhz */ if (mode->clock > 340000) return MODE_CLOCK_HIGH; @@ -1463,7 +1472,7 @@ static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector (amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { return amdgpu_atombios_dp_mode_valid_helper(connector, mode); } else { - if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { + if (connector->display_info.is_hdmi) { /* HDMI 1.3+ supports max clock of 340 Mhz */ if (mode->clock > 340000) return MODE_CLOCK_HIGH; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 53e407ea4c89..e8440d306496 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -298,7 +298,6 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, { s64 time_us, increment_us; u64 free_vram, total_vram, used_vram; - struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); /* Allow a maximum of 200 accumulated ms. This is basically per-IB * throttling. * @@ -315,7 +314,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, } total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size); - used_vram = amdgpu_vram_mgr_usage(vram_man); + used_vram = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr); free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; spin_lock(&adev->mm_stats.lock); @@ -362,7 +361,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) { u64 total_vis_vram = adev->gmc.visible_vram_size; u64 used_vis_vram = - amdgpu_vram_mgr_vis_usage(vram_man); + amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); if (used_vis_vram < total_vis_vram) { u64 free_vis_vram = total_vis_vram - used_vis_vram; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 164d6a9e9fbb..25e2e5bf90eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1618,6 +1618,9 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) if (!debugfs_initialized()) return 0; + debugfs_create_x32("amdgpu_smu_debug", 0600, root, + &adev->pm.smu_debug_mask); + ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev, &fops_ib_preempt); if (IS_ERR(ent)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 90d22a376632..ed077de426d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -30,6 +30,7 @@ #include <linux/module.h> #include <linux/console.h> #include <linux/slab.h> +#include <linux/iommu.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_probe_helper.h> @@ -331,7 +332,7 @@ void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos, } /** - * amdgpu_device_vram_access - access vram by vram aperature + * amdgpu_device_aper_access - access vram by vram aperature * * @adev: amdgpu_device pointer * @pos: offset of the buffer in vram @@ -550,11 +551,11 @@ void amdgpu_device_wreg(struct amdgpu_device *adev, trace_amdgpu_device_wreg(adev->pdev->device, reg, v); } -/* - * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range +/** + * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range * * this function is invoked only the debugfs register access - * */ + */ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v) { @@ -566,6 +567,8 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, adev->gfx.rlc.funcs->is_rlcg_access_range) { if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0); + } else if ((reg * 4) >= adev->rmmio_size) { + adev->pcie_wreg(adev, reg * 4, v); } else { writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); } @@ -1100,7 +1103,7 @@ static void amdgpu_device_wb_fini(struct amdgpu_device *adev) } /** - * amdgpu_device_wb_init- Init Writeback driver info and allocate memory + * amdgpu_device_wb_init - Init Writeback driver info and allocate memory * * @adev: amdgpu_device pointer * @@ -1447,7 +1450,7 @@ static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev) adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2; break; default: - return -EINVAL; + break; } return 0; @@ -2316,6 +2319,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) /* need to do gmc hw init early so we can allocate gpu mem */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { + /* Try to reserve bad pages early */ + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_exchange_data(adev); + r = amdgpu_device_vram_scratch_init(adev); if (r) { DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); @@ -2614,11 +2621,10 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) if (r) DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); - /* For XGMI + passthrough configuration on arcturus, enable light SBR */ - if (adev->asic_type == CHIP_ARCTURUS && - amdgpu_passthrough(adev) && - adev->gmc.xgmi.num_physical_nodes > 1) - smu_set_light_sbr(&adev->smu, true); + /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */ + if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)|| + adev->asic_type == CHIP_ALDEBARAN )) + smu_handle_passthrough_sbr(&adev->smu, true); if (adev->gmc.xgmi.num_physical_nodes > 1) { mutex_lock(&mgpu_info.mutex); @@ -2657,6 +2663,36 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) return 0; } +/** + * amdgpu_device_smu_fini_early - smu hw_fini wrapper + * + * @adev: amdgpu_device pointer + * + * For ASICs need to disable SMC first + */ +static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev) +{ + int i, r; + + if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)) + return; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.hw) + continue; + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { + r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); + /* XXX handle errors */ + if (r) { + DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + } + adev->ip_blocks[i].status.hw = false; + break; + } + } +} + static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) { int i, r; @@ -2677,21 +2713,8 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); - /* need to disable SMC first */ - for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.hw) - continue; - if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { - r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); - /* XXX handle errors */ - if (r) { - DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - } - adev->ip_blocks[i].status.hw = false; - break; - } - } + /* Workaroud for ASICs need to disable SMC first */ + amdgpu_device_smu_fini_early(adev); for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.hw) @@ -2733,8 +2756,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done) amdgpu_virt_release_ras_err_handler_data(adev); - amdgpu_ras_pre_fini(adev); - if (adev->gmc.xgmi.num_physical_nodes > 1) amdgpu_xgmi_remove_device(adev); @@ -3166,6 +3187,12 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) { switch (asic_type) { +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_HAINAN: +#endif + case CHIP_TOPAZ: + /* chips with no display hardware */ + return false; #if defined(CONFIG_DRM_AMD_DC) case CHIP_TAHITI: case CHIP_PITCAIRN: @@ -3367,6 +3394,22 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) return ret; } +/** + * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU + * + * @adev: amdgpu_device pointer + * + * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode + */ +static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev) +{ + struct iommu_domain *domain; + + domain = iommu_get_domain_for_dev(adev->dev); + if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY) + adev->ram_is_direct_mapped = true; +} + static const struct attribute *amdgpu_dev_attributes[] = { &dev_attr_product_name.attr, &dev_attr_product_number.attr, @@ -3455,9 +3498,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->psp.mutex); mutex_init(&adev->notifier_lock); - r = amdgpu_device_init_apu_flags(adev); - if (r) - return r; + amdgpu_device_init_apu_flags(adev); r = amdgpu_device_check_arguments(adev); if (r) @@ -3541,6 +3582,13 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) return r; + /* Need to get xgmi info early to decide the reset behavior*/ + if (adev->gmc.xgmi.supported) { + r = adev->gfxhub.funcs->get_xgmi_info(adev); + if (r) + return r; + } + /* enable PCIE atomic ops */ if (amdgpu_sriov_vf(adev)) adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) @@ -3770,6 +3818,8 @@ fence_driver_init: queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work, msecs_to_jiffies(AMDGPU_RESUME_MS)); + amdgpu_device_check_iommu_direct_map(adev); + return 0; release_ras_con: @@ -3783,6 +3833,7 @@ failed: static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev) { + /* Clear all CPU mappings pointing to this device */ unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1); @@ -3803,7 +3854,7 @@ static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev) } /** - * amdgpu_device_fini - tear down the driver + * amdgpu_device_fini_hw - tear down the driver * * @adev: amdgpu_device pointer * @@ -3844,19 +3895,27 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) amdgpu_ucode_sysfs_fini(adev); sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); + /* disable ras feature must before hw fini */ + amdgpu_ras_pre_fini(adev); + amdgpu_device_ip_fini_early(adev); amdgpu_irq_fini_hw(adev); - ttm_device_clear_dma_mappings(&adev->mman.bdev); + if (adev->mman.initialized) + ttm_device_clear_dma_mappings(&adev->mman.bdev); amdgpu_gart_dummy_page_fini(adev); - amdgpu_device_unmap_mmio(adev); + if (drm_dev_is_unplugged(adev_to_drm(adev))) + amdgpu_device_unmap_mmio(adev); + } void amdgpu_device_fini_sw(struct amdgpu_device *adev) { + int idx; + amdgpu_fence_driver_sw_fini(adev); amdgpu_device_ip_fini(adev); release_firmware(adev->firmware.gpu_info_fw); @@ -3881,6 +3940,14 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) vga_client_unregister(adev->pdev); + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + + iounmap(adev->rmmio); + adev->rmmio = NULL; + amdgpu_device_doorbell_fini(adev); + drm_dev_exit(idx); + } + if (IS_ENABLED(CONFIG_PERF_EVENTS)) amdgpu_pmu_fini(adev); if (adev->mman.discovery_bin) @@ -3901,8 +3968,8 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) */ static void amdgpu_device_evict_resources(struct amdgpu_device *adev) { - /* No need to evict vram on APUs for suspend to ram */ - if (adev->in_s3 && (adev->flags & AMD_IS_APU)) + /* No need to evict vram on APUs for suspend to ram or s2idle */ + if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU)) return; if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM)) @@ -3949,16 +4016,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) if (!adev->in_s0ix) amdgpu_amdkfd_suspend(adev, adev->in_runpm); - /* First evict vram memory */ amdgpu_device_evict_resources(adev); amdgpu_fence_driver_hw_fini(adev); amdgpu_device_ip_suspend_phase2(adev); - /* This second call to evict device resources is to evict - * the gart page table using the CPU. - */ - amdgpu_device_evict_resources(adev); return 0; } @@ -4284,6 +4346,9 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, bool from_hypervisor) { int r; + struct amdgpu_hive_info *hive = NULL; + + amdgpu_amdkfd_pre_reset(adev); amdgpu_amdkfd_pre_reset(adev); @@ -4300,8 +4365,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, goto error; amdgpu_virt_init_data_exchange(adev); - /* we need recover gart prior to run SMC/CP/SDMA resume */ - amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)); r = amdgpu_device_fw_loading(adev); if (r) @@ -4312,8 +4375,19 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, if (r) goto error; - amdgpu_irq_gpu_reset_resume_helper(adev); - r = amdgpu_ib_ring_tests(adev); + hive = amdgpu_get_xgmi_hive(adev); + /* Update PSP FW topology after reset */ + if (hive && adev->gmc.xgmi.num_physical_nodes > 1) + r = amdgpu_xgmi_update_topology(hive, adev); + + if (hive) + amdgpu_put_xgmi_hive(hive); + + if (!r) { + amdgpu_irq_gpu_reset_resume_helper(adev); + r = amdgpu_ib_ring_tests(adev); + amdgpu_amdkfd_post_reset(adev); + } error: if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { @@ -4376,33 +4450,24 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) if (amdgpu_gpu_recovery == -1) { switch (adev->asic_type) { - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_TOPAZ: - case CHIP_TONGA: - case CHIP_FIJI: - case CHIP_POLARIS10: - case CHIP_POLARIS11: - case CHIP_POLARIS12: - case CHIP_VEGAM: - case CHIP_VEGA20: - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_RAVEN: - case CHIP_ARCTURUS: - case CHIP_RENOIR: - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_VANGOGH: - case CHIP_ALDEBARAN: - break; - default: +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_VERDE: + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_OLAND: + case CHIP_HAINAN: +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: +#endif + case CHIP_CARRIZO: + case CHIP_STONEY: + case CHIP_CYAN_SKILLFISH: goto disabled; + default: + break; } } @@ -4456,7 +4521,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev) int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, struct amdgpu_reset_context *reset_context) { - int i, j, r = 0; + int i, r = 0; struct amdgpu_job *job = NULL; bool need_full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); @@ -4478,15 +4543,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, /*clear job fence from fence drv to avoid force_completion *leave NULL and vm flush fence in fence drv */ - for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) { - struct dma_fence *old, **ptr; + amdgpu_fence_driver_clear_job_fences(ring); - ptr = &ring->fence_drv.fences[j]; - old = rcu_dereference_protected(*ptr, 1); - if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) { - RCU_INIT_POINTER(*ptr, NULL); - } - } /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ amdgpu_fence_driver_force_completion(ring); } @@ -4617,10 +4675,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, amdgpu_inc_vram_lost(tmp_adev); } - r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT)); - if (r) - goto out; - r = amdgpu_device_fw_loading(tmp_adev); if (r) return r; @@ -4745,7 +4799,7 @@ static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgp { struct amdgpu_device *tmp_adev = NULL; - if (adev->gmc.xgmi.num_physical_nodes > 1) { + if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) { if (!hive) { dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes"); return -ENODEV; @@ -4957,7 +5011,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, * We always reset all schedulers for device and all devices for XGMI * hive so that should take care of them too. */ - hive = amdgpu_get_xgmi_hive(adev); + if (!amdgpu_sriov_vf(adev)) + hive = amdgpu_get_xgmi_hive(adev); if (hive) { if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) { DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress", @@ -4998,7 +5053,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, * to put adev in the 1st position. */ INIT_LIST_HEAD(&device_list); - if (adev->gmc.xgmi.num_physical_nodes > 1) { + if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) { list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) list_add_tail(&tmp_adev->reset_list, &device_list); if (!list_is_first(&adev->reset_list, &device_list)) @@ -5632,3 +5687,42 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, amdgpu_asic_invalidate_hdp(adev, ring); } + +/** + * amdgpu_device_halt() - bring hardware to some kind of halt state + * + * @adev: amdgpu_device pointer + * + * Bring hardware to some kind of halt state so that no one can touch it + * any more. It will help to maintain error context when error occurred. + * Compare to a simple hang, the system will keep stable at least for SSH + * access. Then it should be trivial to inspect the hardware state and + * see what's going on. Implemented as following: + * + * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc), + * clears all CPU mappings to device, disallows remappings through page faults + * 2. amdgpu_irq_disable_all() disables all interrupts + * 3. amdgpu_fence_driver_hw_fini() signals all HW fences + * 4. set adev->no_hw_access to avoid potential crashes after setp 5 + * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings + * 6. pci_disable_device() and pci_wait_for_pending_transaction() + * flush any in flight DMA operations + */ +void amdgpu_device_halt(struct amdgpu_device *adev) +{ + struct pci_dev *pdev = adev->pdev; + struct drm_device *ddev = adev_to_drm(adev); + + drm_dev_unplug(ddev); + + amdgpu_irq_disable_all(adev); + + amdgpu_fence_driver_hw_fini(adev); + + adev->no_hw_access = true; + + amdgpu_device_unmap_mmio(adev); + + pci_disable_device(pdev); + pci_wait_for_pending_transaction(pdev); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index ea00090b3fb3..81bfee978b74 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -67,7 +67,8 @@ #include "smuio_v11_0_6.h" #include "smuio_v13_0.h" -MODULE_FIRMWARE("amdgpu/ip_discovery.bin"); +#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin" +MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY); #define mmRCC_CONFIG_MEMSIZE 0xde3 #define mmMM_INDEX 0x0 @@ -179,7 +180,7 @@ static int hw_id_map[MAX_HWIP] = { [DCI_HWIP] = DCI_HWID, }; -static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary) +static int amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary) { uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; @@ -189,6 +190,34 @@ static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *bin return 0; } +static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary) +{ + const struct firmware *fw; + const char *fw_name; + int r; + + switch (amdgpu_discovery) { + case 2: + fw_name = FIRMWARE_IP_DISCOVERY; + break; + default: + dev_warn(adev->dev, "amdgpu_discovery is not set properly\n"); + return -EINVAL; + } + + r = request_firmware(&fw, fw_name, adev->dev); + if (r) { + dev_err(adev->dev, "can't load firmware \"%s\"\n", + fw_name); + return r; + } + + memcpy((u8 *)binary, (u8 *)fw->data, adev->mman.discovery_tmr_size); + release_firmware(fw); + + return 0; +} + static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size) { uint16_t checksum = 0; @@ -206,13 +235,44 @@ static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size return !!(amdgpu_discovery_calculate_checksum(data, size) == expected); } +static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary) +{ + struct binary_header *bhdr; + bhdr = (struct binary_header *)binary; + + return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE); +} + +static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) +{ + /* + * So far, apply this quirk only on those Navy Flounder boards which + * have a bad harvest table of VCN config. + */ + if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && + (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) { + switch (adev->pdev->revision) { + case 0xC1: + case 0xC2: + case 0xC3: + case 0xC5: + case 0xC7: + case 0xCF: + case 0xDF: + adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; + break; + default: + break; + } + } +} + static int amdgpu_discovery_init(struct amdgpu_device *adev) { struct table_info *info; struct binary_header *bhdr; struct ip_discovery_header *ihdr; struct gpu_info_header *ghdr; - const struct firmware *fw; uint16_t offset; uint16_t size; uint16_t checksum; @@ -223,31 +283,32 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (!adev->mman.discovery_bin) return -ENOMEM; - if (amdgpu_discovery == 2) { - r = request_firmware(&fw, "amdgpu/ip_discovery.bin", adev->dev); - if (r) - goto get_from_vram; - dev_info(adev->dev, "Using IP discovery from file\n"); - memcpy((u8 *)adev->mman.discovery_bin, (u8 *)fw->data, - adev->mman.discovery_tmr_size); - release_firmware(fw); - } else { -get_from_vram: - r = amdgpu_discovery_read_binary(adev, adev->mman.discovery_bin); + r = amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin); + if (r) { + dev_err(adev->dev, "failed to read ip discovery binary from vram\n"); + r = -EINVAL; + goto out; + } + + if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { + dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n"); + /* retry read ip discovery binary from file */ + r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin); if (r) { - DRM_ERROR("failed to read ip discovery binary\n"); + dev_err(adev->dev, "failed to read ip discovery binary from file\n"); + r = -EINVAL; + goto out; + } + /* check the ip discovery binary signature */ + if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { + dev_warn(adev->dev, "get invalid ip discovery binary signature from file\n"); + r = -EINVAL; goto out; } } bhdr = (struct binary_header *)adev->mman.discovery_bin; - if (le32_to_cpu(bhdr->binary_signature) != BINARY_SIGNATURE) { - DRM_ERROR("invalid ip discovery binary signature\n"); - r = -EINVAL; - goto out; - } - offset = offsetof(struct binary_header, binary_checksum) + sizeof(bhdr->binary_checksum); size = le16_to_cpu(bhdr->binary_size) - offset; @@ -255,7 +316,7 @@ get_from_vram: if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, size, checksum)) { - DRM_ERROR("invalid ip discovery binary checksum\n"); + dev_err(adev->dev, "invalid ip discovery binary checksum\n"); r = -EINVAL; goto out; } @@ -266,14 +327,14 @@ get_from_vram: ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + offset); if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { - DRM_ERROR("invalid ip discovery data table signature\n"); + dev_err(adev->dev, "invalid ip discovery data table signature\n"); r = -EINVAL; goto out; } if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, le16_to_cpu(ihdr->size), checksum)) { - DRM_ERROR("invalid ip discovery data table checksum\n"); + dev_err(adev->dev, "invalid ip discovery data table checksum\n"); r = -EINVAL; goto out; } @@ -285,7 +346,7 @@ get_from_vram: if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, le32_to_cpu(ghdr->size), checksum)) { - DRM_ERROR("invalid gc data table checksum\n"); + dev_err(adev->dev, "invalid gc data table checksum\n"); r = -EINVAL; goto out; } @@ -379,8 +440,18 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) ip->major, ip->minor, ip->revision); - if (le16_to_cpu(ip->hw_id) == VCN_HWID) + if (le16_to_cpu(ip->hw_id) == VCN_HWID) { + /* Bit [5:0]: original revision value + * Bit [7:6]: en/decode capability: + * 0b00 : VCN function normally + * 0b10 : encode is disabled + * 0b01 : decode is disabled + */ + adev->vcn.vcn_config[adev->vcn.num_vcn_inst] = + ip->revision & 0xc0; + ip->revision &= ~0xc0; adev->vcn.num_vcn_inst++; + } if (le16_to_cpu(ip->hw_id) == SDMA0_HWID || le16_to_cpu(ip->hw_id) == SDMA1_HWID || le16_to_cpu(ip->hw_id) == SDMA2_HWID || @@ -472,14 +543,6 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int n return -EINVAL; } - -int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance, - int *major, int *minor, int *revision) -{ - return amdgpu_discovery_get_ip_version(adev, VCN_HWID, - vcn_instance, major, minor, revision); -} - void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) { struct binary_header *bhdr; @@ -509,10 +572,9 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) break; } } - /* some IP discovery tables on Navy Flounder don't have this set correctly */ - if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) - adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; + + amdgpu_discovery_harvest_config_quirk(adev); + if (vcn_harvest_count == adev->vcn.num_vcn_inst) { adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; @@ -526,10 +588,15 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) } } +union gc_info { + struct gc_info_v1_0 v1; + struct gc_info_v2_0 v2; +}; + int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) { struct binary_header *bhdr; - struct gc_info_v1_0 *gc_info; + union gc_info *gc_info; if (!adev->mman.discovery_bin) { DRM_ERROR("ip discovery uninitialized\n"); @@ -537,28 +604,55 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) } bhdr = (struct binary_header *)adev->mman.discovery_bin; - gc_info = (struct gc_info_v1_0 *)(adev->mman.discovery_bin + + gc_info = (union gc_info *)(adev->mman.discovery_bin + le16_to_cpu(bhdr->table_list[GC].offset)); - - adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se); - adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->gc_num_wgp0_per_sa) + - le32_to_cpu(gc_info->gc_num_wgp1_per_sa)); - adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->gc_num_sa_per_se); - adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->gc_num_rb_per_se); - adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->gc_num_gl2c); - adev->gfx.config.max_gprs = le32_to_cpu(gc_info->gc_num_gprs); - adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->gc_num_max_gs_thds); - adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->gc_gs_table_depth); - adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->gc_gsprim_buff_depth); - adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->gc_double_offchip_lds_buffer); - adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->gc_wave_size); - adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->gc_max_waves_per_simd); - adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->gc_max_scratch_slots_per_cu); - adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->gc_lds_size); - adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->gc_num_sc_per_se) / - le32_to_cpu(gc_info->gc_num_sa_per_se); - adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->gc_num_packer_per_sc); - + switch (gc_info->v1.header.version_major) { + case 1: + adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se); + adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) + + le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa)); + adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se); + adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se); + adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c); + adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs); + adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds); + adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth); + adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth); + adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer); + adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size); + adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd); + adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu); + adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size); + adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) / + le32_to_cpu(gc_info->v1.gc_num_sa_per_se); + adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc); + break; + case 2: + adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se); + adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh); + adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se); + adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se); + adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs); + adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs); + adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds); + adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth); + adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth); + adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer); + adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size); + adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd); + adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu); + adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size); + adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) / + le32_to_cpu(gc_info->v2.gc_num_sh_per_se); + adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc); + break; + default: + dev_err(adev->dev, + "Unhandled GC info table %d.%d\n", + gc_info->v1.header.version_major, + gc_info->v1.header.version_minor); + return -EINVAL; + } return 0; } @@ -917,7 +1011,6 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) break; case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 16): - case IP_VERSION(3, 0, 64): case IP_VERSION(3, 1, 1): case IP_VERSION(3, 0, 2): case IP_VERSION(3, 0, 192): @@ -954,7 +1047,7 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); break; default: - break;; + break; } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h index 0ea029e3b850..14537cec19db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h @@ -33,8 +33,6 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev); int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance, int *major, int *minor, int *revision); -int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance, - int *major, int *minor, int *revision); int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev); int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 4c847f48f276..e76b96d55551 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -1362,7 +1362,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc, if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) || ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) && - drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) && + connector->display_info.is_hdmi && amdgpu_display_is_hdtv_mode(mode)))) { if (amdgpu_encoder->underscan_hborder != 0) amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 4896c876ffec..579adfafe4d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -381,7 +381,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) struct amdgpu_vm_bo_base *bo_base; int r; - if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM) + if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM) return; r = ttm_bo_validate(&bo->tbo, &placement, &ctx); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 3a6f125c6dc9..4c83f1db8a24 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -38,6 +38,7 @@ #include <linux/mmu_notifier.h> #include <linux/suspend.h> #include <linux/cc_platform.h> +#include <linux/fb.h> #include "amdgpu.h" #include "amdgpu_irq.h" @@ -313,9 +314,12 @@ module_param_named(dpm, amdgpu_dpm, int, 0444); /** * DOC: fw_load_type (int) - * Set different firmware loading type for debugging (0 = direct, 1 = SMU, 2 = PSP). The default is -1 (auto). + * Set different firmware loading type for debugging, if supported. + * Set to 0 to force direct loading if supported by the ASIC. Set + * to -1 to select the default loading mode for the ASIC, as defined + * by the driver. The default is -1 (auto). */ -MODULE_PARM_DESC(fw_load_type, "firmware loading type (0 = direct, 1 = SMU, 2 = PSP, -1 = auto)"); +MODULE_PARM_DESC(fw_load_type, "firmware loading type (0 = force direct if supported, -1 = auto)"); module_param_named(fw_load_type, amdgpu_fw_load_type, int, 0444); /** @@ -327,10 +331,11 @@ module_param_named(aspm, amdgpu_aspm, int, 0444); /** * DOC: runpm (int) - * Override for runtime power management control for dGPUs in PX/HG laptops. The amdgpu driver can dynamically power down - * the dGPU on PX/HG laptops when it is idle. The default is -1 (auto enable). Setting the value to 0 disables this functionality. + * Override for runtime power management control for dGPUs. The amdgpu driver can dynamically power down + * the dGPUs when they are idle if supported. The default is -1 (auto enable). + * Setting the value to 0 disables this functionality. */ -MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = PX only default)"); +MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto)"); module_param_named(runpm, amdgpu_runtime_pm, int, 0444); /** @@ -1520,6 +1525,87 @@ static const u16 amdgpu_unsupported_pciidlist[] = { 0x99A0, 0x99A2, 0x99A4, + /* radeon secondary ids */ + 0x3171, + 0x3e70, + 0x4164, + 0x4165, + 0x4166, + 0x4168, + 0x4170, + 0x4171, + 0x4172, + 0x4173, + 0x496e, + 0x4a69, + 0x4a6a, + 0x4a6b, + 0x4a70, + 0x4a74, + 0x4b69, + 0x4b6b, + 0x4b6c, + 0x4c6e, + 0x4e64, + 0x4e65, + 0x4e66, + 0x4e67, + 0x4e68, + 0x4e69, + 0x4e6a, + 0x4e71, + 0x4f73, + 0x5569, + 0x556b, + 0x556d, + 0x556f, + 0x5571, + 0x5854, + 0x5874, + 0x5940, + 0x5941, + 0x5b72, + 0x5b73, + 0x5b74, + 0x5b75, + 0x5d44, + 0x5d45, + 0x5d6d, + 0x5d6f, + 0x5d72, + 0x5d77, + 0x5e6b, + 0x5e6d, + 0x7120, + 0x7124, + 0x7129, + 0x712e, + 0x712f, + 0x7162, + 0x7163, + 0x7166, + 0x7167, + 0x7172, + 0x7173, + 0x71a0, + 0x71a1, + 0x71a3, + 0x71a7, + 0x71bb, + 0x71e0, + 0x71e1, + 0x71e2, + 0x71e6, + 0x71e7, + 0x71f2, + 0x7269, + 0x726b, + 0x726e, + 0x72a0, + 0x72a8, + 0x72b1, + 0x72b3, + 0x793f, }; static const struct pci_device_id pciidlist[] = { @@ -1888,6 +1974,26 @@ MODULE_DEVICE_TABLE(pci, pciidlist); static const struct drm_driver amdgpu_kms_driver; +static bool amdgpu_is_fw_framebuffer(resource_size_t base, + resource_size_t size) +{ + bool found = false; +#if IS_REACHABLE(CONFIG_FB) + struct apertures_struct *a; + + a = alloc_apertures(1); + if (!a) + return false; + + a->ranges[0].base = base; + a->ranges[0].size = size; + + found = is_firmware_framebuffer(a); + kfree(a); +#endif + return found; +} + static int amdgpu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -1896,6 +2002,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, unsigned long flags = ent->driver_data; int ret, retry = 0, i; bool supports_atomic = false; + bool is_fw_fb; + resource_size_t base, size; /* skip devices which are owned by radeon */ for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) { @@ -1903,11 +2011,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, return -ENODEV; } - if (flags == 0) { - DRM_INFO("Unsupported asic. Remove me when IP discovery init is in place.\n"); - return -ENODEV; - } - if (amdgpu_virtual_display || amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK)) supports_atomic = true; @@ -1964,6 +2067,10 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, } #endif + base = pci_resource_start(pdev, 0); + size = pci_resource_len(pdev, 0); + is_fw_fb = amdgpu_is_fw_framebuffer(base, size); + /* Get rid of things like offb */ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver); if (ret) @@ -1976,6 +2083,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, adev->dev = &pdev->dev; adev->pdev = pdev; ddev = adev_to_drm(adev); + adev->is_fw_fb = is_fw_fb; if (!supports_atomic) ddev->driver_features &= ~DRIVER_ATOMIC; @@ -2162,10 +2270,13 @@ static int amdgpu_pmops_suspend(struct device *dev) if (amdgpu_acpi_is_s0ix_active(adev)) adev->in_s0ix = true; - adev->in_s3 = true; + else + adev->in_s3 = true; r = amdgpu_device_suspend(drm_dev, true); - adev->in_s3 = false; - + if (r) + return r; + if (!adev->in_s0ix) + r = amdgpu_asic_reset(adev); return r; } @@ -2182,6 +2293,8 @@ static int amdgpu_pmops_resume(struct device *dev) r = amdgpu_device_resume(drm_dev, true); if (amdgpu_acpi_is_s0ix_active(adev)) adev->in_s0ix = false; + else + adev->in_s3 = false; return r; } @@ -2246,12 +2359,27 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) if (amdgpu_device_supports_px(drm_dev)) drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + /* + * By setting mp1_state as PP_MP1_STATE_UNLOAD, MP1 will do some + * proper cleanups and put itself into a state ready for PNP. That + * can address some random resuming failure observed on BOCO capable + * platforms. + * TODO: this may be also needed for PX capable platform. + */ + if (amdgpu_device_supports_boco(drm_dev)) + adev->mp1_state = PP_MP1_STATE_UNLOAD; + ret = amdgpu_device_suspend(drm_dev, false); if (ret) { adev->in_runpm = false; + if (amdgpu_device_supports_boco(drm_dev)) + adev->mp1_state = PP_MP1_STATE_NONE; return ret; } + if (amdgpu_device_supports_boco(drm_dev)) + adev->mp1_state = PP_MP1_STATE_NONE; + if (amdgpu_device_supports_px(drm_dev)) { /* Only need to handle PCI state in the driver for ATPX * PCI core handles it for _PR3. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h index e3a4f7048042..8178323e4bef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h @@ -45,4 +45,7 @@ long amdgpu_drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +long amdgpu_kms_compat_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c index af4ef84e27a7..c96e458ed088 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c @@ -222,7 +222,7 @@ bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder, case DRM_MODE_CONNECTOR_HDMIB: if (amdgpu_connector->use_digital) { /* HDMI 1.3 supports up to 340 Mhz over single link */ - if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { + if (connector->display_info.is_hdmi) { if (pixel_clock > 340000) return true; else @@ -244,7 +244,7 @@ bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder, return false; else { /* HDMI 1.3 supports up to 340 Mhz over single link */ - if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) { + if (connector->display_info.is_hdmi) { if (pixel_clock > 340000) return true; else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 3b7e86ea7167..45977a72b5dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -77,11 +77,13 @@ void amdgpu_fence_slab_fini(void) * Cast helper */ static const struct dma_fence_ops amdgpu_fence_ops; +static const struct dma_fence_ops amdgpu_job_fence_ops; static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) { struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); - if (__f->base.ops == &amdgpu_fence_ops) + if (__f->base.ops == &amdgpu_fence_ops || + __f->base.ops == &amdgpu_job_fence_ops) return __f; return NULL; @@ -158,19 +160,18 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd } seq = ++ring->fence_drv.sync_seq; - if (job != NULL && job->job_run_counter) { + if (job && job->job_run_counter) { /* reinit seq for resubmitted jobs */ fence->seqno = seq; } else { - dma_fence_init(fence, &amdgpu_fence_ops, - &ring->fence_drv.lock, - adev->fence_context + ring->idx, - seq); - } - - if (job != NULL) { - /* mark this fence has a parent job */ - set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags); + if (job) + dma_fence_init(fence, &amdgpu_job_fence_ops, + &ring->fence_drv.lock, + adev->fence_context + ring->idx, seq); + else + dma_fence_init(fence, &amdgpu_fence_ops, + &ring->fence_drv.lock, + adev->fence_context + ring->idx, seq); } amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, @@ -546,9 +547,6 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) if (!ring || !ring->fence_drv.initialized) continue; - if (!ring->no_scheduler) - drm_sched_stop(&ring->sched, NULL); - /* You can't wait for HW to signal if it's gone */ if (!drm_dev_is_unplugged(adev_to_drm(adev))) r = amdgpu_fence_wait_empty(ring); @@ -608,11 +606,6 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) if (!ring || !ring->fence_drv.initialized) continue; - if (!ring->no_scheduler) { - drm_sched_resubmit_jobs(&ring->sched); - drm_sched_start(&ring->sched, true); - } - /* enable the interrupt */ if (ring->fence_drv.irq_src) amdgpu_irq_get(adev, ring->fence_drv.irq_src, @@ -621,6 +614,25 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) } /** + * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring + * + * @ring: fence of the ring to be cleared + * + */ +void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring) +{ + int i; + struct dma_fence *old, **ptr; + + for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) { + ptr = &ring->fence_drv.fences[i]; + old = rcu_dereference_protected(*ptr, 1); + if (old && old->ops == &amdgpu_job_fence_ops) + RCU_INIT_POINTER(*ptr, NULL); + } +} + +/** * amdgpu_fence_driver_force_completion - force signal latest fence of ring * * @ring: fence of the ring to signal @@ -643,16 +655,14 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence) static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) { - struct amdgpu_ring *ring; + return (const char *)to_amdgpu_fence(f)->ring->name; +} - if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) { - struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); +static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f) +{ + struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); - ring = to_amdgpu_ring(job->base.sched); - } else { - ring = to_amdgpu_fence(f)->ring; - } - return (const char *)ring->name; + return (const char *)to_amdgpu_ring(job->base.sched)->name; } /** @@ -665,18 +675,25 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) */ static bool amdgpu_fence_enable_signaling(struct dma_fence *f) { - struct amdgpu_ring *ring; + if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer)) + amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring); - if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) { - struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); + return true; +} - ring = to_amdgpu_ring(job->base.sched); - } else { - ring = to_amdgpu_fence(f)->ring; - } +/** + * amdgpu_job_fence_enable_signaling - enable signalling on job fence + * @f: fence + * + * This is the simliar function with amdgpu_fence_enable_signaling above, it + * only handles the job embedded fence. + */ +static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f) +{ + struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); - if (!timer_pending(&ring->fence_drv.fallback_timer)) - amdgpu_fence_schedule_fallback(ring); + if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer)) + amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched)); return true; } @@ -692,19 +709,23 @@ static void amdgpu_fence_free(struct rcu_head *rcu) { struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); - if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) { - /* free job if fence has a parent job */ - struct amdgpu_job *job; - - job = container_of(f, struct amdgpu_job, hw_fence); - kfree(job); - } else { /* free fence_slab if it's separated fence*/ - struct amdgpu_fence *fence; + kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f)); +} - fence = to_amdgpu_fence(f); - kmem_cache_free(amdgpu_fence_slab, fence); - } +/** + * amdgpu_job_fence_free - free up the job with embedded fence + * + * @rcu: RCU callback head + * + * Free up the job with embedded fence after the RCU grace period. + */ +static void amdgpu_job_fence_free(struct rcu_head *rcu) +{ + struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); + + /* free job if fence has a parent job */ + kfree(container_of(f, struct amdgpu_job, hw_fence)); } /** @@ -720,6 +741,19 @@ static void amdgpu_fence_release(struct dma_fence *f) call_rcu(&f->rcu, amdgpu_fence_free); } +/** + * amdgpu_job_fence_release - callback that job embedded fence can be freed + * + * @f: fence + * + * This is the simliar function with amdgpu_fence_release above, it + * only handles the job embedded fence. + */ +static void amdgpu_job_fence_release(struct dma_fence *f) +{ + call_rcu(&f->rcu, amdgpu_job_fence_free); +} + static const struct dma_fence_ops amdgpu_fence_ops = { .get_driver_name = amdgpu_fence_get_driver_name, .get_timeline_name = amdgpu_fence_get_timeline_name, @@ -727,6 +761,12 @@ static const struct dma_fence_ops amdgpu_fence_ops = { .release = amdgpu_fence_release, }; +static const struct dma_fence_ops amdgpu_job_fence_ops = { + .get_driver_name = amdgpu_fence_get_driver_name, + .get_timeline_name = amdgpu_job_fence_get_timeline_name, + .enable_signaling = amdgpu_job_fence_enable_signaling, + .release = amdgpu_job_fence_release, +}; /* * Fence debugfs diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c index 7709caeb233d..2a786e788627 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c @@ -56,6 +56,9 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev) return true; else return false; + case CHIP_ALDEBARAN: + /* All Aldebaran SKUs have the FRU */ + return true; default: return false; } @@ -88,13 +91,17 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr, int amdgpu_fru_get_product_info(struct amdgpu_device *adev) { - unsigned char buff[34]; + unsigned char buff[AMDGPU_PRODUCT_NAME_LEN+2]; u32 addrptr; int size, len; + int offset = 2; if (!is_fru_eeprom_supported(adev)) return 0; + if (adev->asic_type == CHIP_ALDEBARAN) + offset = 0; + /* If algo exists, it means that the i2c_adapter's initialized */ if (!adev->pm.smu_i2c.algo) { DRM_WARN("Cannot access FRU, EEPROM accessor not initialized"); @@ -131,15 +138,13 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) } len = size; - /* Product name should only be 32 characters. Any more, - * and something could be wrong. Cap it at 32 to be safe - */ - if (len >= sizeof(adev->product_name)) { - DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake"); - len = sizeof(adev->product_name) - 1; + if (len >= AMDGPU_PRODUCT_NAME_LEN) { + DRM_WARN("FRU Product Name is larger than %d characters. This is likely a mistake", + AMDGPU_PRODUCT_NAME_LEN); + len = AMDGPU_PRODUCT_NAME_LEN - 1; } /* Start at 2 due to buff using fields 0 and 1 for the address */ - memcpy(adev->product_name, &buff[2], len); + memcpy(adev->product_name, &buff[offset], len); adev->product_name[len] = '\0'; addrptr += size + 1; @@ -157,7 +162,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake"); len = sizeof(adev->product_number) - 1; } - memcpy(adev->product_number, &buff[2], len); + memcpy(adev->product_number, &buff[offset], len); adev->product_number[len] = '\0'; addrptr += size + 1; @@ -184,7 +189,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake"); len = sizeof(adev->serial) - 1; } - memcpy(adev->serial, &buff[2], len); + memcpy(adev->serial, &buff[offset], len); adev->serial[len] = '\0'; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index d3e4203f6217..645950a653a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -114,80 +114,12 @@ void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev) */ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) { - int r; - - if (adev->gart.bo == NULL) { - struct amdgpu_bo_param bp; - - memset(&bp, 0, sizeof(bp)); - bp.size = adev->gart.table_size; - bp.byte_align = PAGE_SIZE; - bp.domain = AMDGPU_GEM_DOMAIN_VRAM; - bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; - bp.type = ttm_bo_type_kernel; - bp.resv = NULL; - bp.bo_ptr_size = sizeof(struct amdgpu_bo); - - r = amdgpu_bo_create(adev, &bp, &adev->gart.bo); - if (r) { - return r; - } - } - return 0; -} - -/** - * amdgpu_gart_table_vram_pin - pin gart page table in vram - * - * @adev: amdgpu_device pointer - * - * Pin the GART page table in vram so it will not be moved - * by the memory manager (pcie r4xx, r5xx+). These asics require the - * gart table to be in video memory. - * Returns 0 for success, error for failure. - */ -int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev) -{ - int r; - - r = amdgpu_bo_reserve(adev->gart.bo, false); - if (unlikely(r != 0)) - return r; - r = amdgpu_bo_pin(adev->gart.bo, AMDGPU_GEM_DOMAIN_VRAM); - if (r) { - amdgpu_bo_unreserve(adev->gart.bo); - return r; - } - r = amdgpu_bo_kmap(adev->gart.bo, &adev->gart.ptr); - if (r) - amdgpu_bo_unpin(adev->gart.bo); - amdgpu_bo_unreserve(adev->gart.bo); - return r; -} - -/** - * amdgpu_gart_table_vram_unpin - unpin gart page table in vram - * - * @adev: amdgpu_device pointer - * - * Unpin the GART page table in vram (pcie r4xx, r5xx+). - * These asics require the gart table to be in video memory. - */ -void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev) -{ - int r; + if (adev->gart.bo != NULL) + return 0; - if (adev->gart.bo == NULL) { - return; - } - r = amdgpu_bo_reserve(adev->gart.bo, true); - if (likely(r == 0)) { - amdgpu_bo_kunmap(adev->gart.bo); - amdgpu_bo_unpin(adev->gart.bo); - amdgpu_bo_unreserve(adev->gart.bo); - adev->gart.ptr = NULL; - } + return amdgpu_bo_create_kernel(adev, adev->gart.table_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, &adev->gart.bo, + NULL, (void *)&adev->gart.ptr); } /** @@ -201,11 +133,7 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev) */ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev) { - if (adev->gart.bo == NULL) { - return; - } - amdgpu_bo_unref(&adev->gart.bo); - adev->gart.ptr = NULL; + amdgpu_bo_free_kernel(&adev->gart.bo, NULL, (void *)&adev->gart.ptr); } /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 08478fce00f2..2430d6223c2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -350,6 +350,7 @@ static inline uint64_t amdgpu_gmc_fault_key(uint64_t addr, uint16_t pasid) * amdgpu_gmc_filter_faults - filter VM faults * * @adev: amdgpu device structure + * @ih: interrupt ring that the fault received from * @addr: address of the VM fault * @pasid: PASID of the process causing the fault * @timestamp: timestamp of the fault @@ -358,7 +359,8 @@ static inline uint64_t amdgpu_gmc_fault_key(uint64_t addr, uint16_t pasid) * True if the fault was filtered and should not be processed further. * False if the fault is a new one and needs to be handled. */ -bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, +bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih, uint64_t addr, uint16_t pasid, uint64_t timestamp) { struct amdgpu_gmc *gmc = &adev->gmc; @@ -366,6 +368,10 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, struct amdgpu_gmc_fault *fault; uint32_t hash; + /* Stale retry fault if timestamp goes backward */ + if (amdgpu_ih_ts_after(timestamp, ih->processed_timestamp)) + return true; + /* If we don't have space left in the ring buffer return immediately */ stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) - AMDGPU_GMC_FAULT_TIMEOUT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index e55201134a01..8458cebc6d5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -316,7 +316,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc); void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc); -bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, +bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih, uint64_t addr, uint16_t pasid, uint64_t timestamp); void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr, uint16_t pasid); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 9e7685a4878c..e0c7fbe01d93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -77,10 +77,8 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - struct ttm_resource_manager *man; - man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); - return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(man)); + return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr)); } static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO, @@ -208,30 +206,27 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, /** * amdgpu_gtt_mgr_usage - return usage of GTT domain * - * @man: TTM memory type manager + * @mgr: amdgpu_gtt_mgr pointer * * Return how many bytes are used in the GTT domain */ -uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man) +uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr) { - struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); - return atomic64_read(&mgr->used) * PAGE_SIZE; } /** * amdgpu_gtt_mgr_recover - re-init gart * - * @man: TTM memory type manager + * @mgr: amdgpu_gtt_mgr pointer * * Re-init the gart for each known BO in the GTT. */ -int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man) +int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr) { - struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); - struct amdgpu_device *adev; struct amdgpu_gtt_node *node; struct drm_mm_node *mm_node; + struct amdgpu_device *adev; int r = 0; adev = container_of(mgr, typeof(*adev), mman.gtt_mgr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 0c7963dfacad..3df146579ad9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -164,52 +164,32 @@ void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv, } } -/* Waiter helper that checks current rptr matches or passes checkpoint wptr */ -static bool amdgpu_ih_has_checkpoint_processed(struct amdgpu_device *adev, - struct amdgpu_ih_ring *ih, - uint32_t checkpoint_wptr, - uint32_t *prev_rptr) -{ - uint32_t cur_rptr = ih->rptr | (*prev_rptr & ~ih->ptr_mask); - - /* rptr has wrapped. */ - if (cur_rptr < *prev_rptr) - cur_rptr += ih->ptr_mask + 1; - *prev_rptr = cur_rptr; - - /* check ring is empty to workaround missing wptr overflow flag */ - return cur_rptr >= checkpoint_wptr || - (cur_rptr & ih->ptr_mask) == amdgpu_ih_get_wptr(adev, ih); -} - /** - * amdgpu_ih_wait_on_checkpoint_process - wait to process IVs up to checkpoint + * amdgpu_ih_wait_on_checkpoint_process_ts - wait to process IVs up to checkpoint * * @adev: amdgpu_device pointer * @ih: ih ring to process * * Used to ensure ring has processed IVs up to the checkpoint write pointer. */ -int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev, +int amdgpu_ih_wait_on_checkpoint_process_ts(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { - uint32_t checkpoint_wptr, rptr; + uint32_t checkpoint_wptr; + uint64_t checkpoint_ts; + long timeout = HZ; if (!ih->enabled || adev->shutdown) return -ENODEV; checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih); - /* Order wptr with rptr. */ + /* Order wptr with ring data. */ rmb(); - rptr = READ_ONCE(ih->rptr); - - /* wptr has wrapped. */ - if (rptr > checkpoint_wptr) - checkpoint_wptr += ih->ptr_mask + 1; + checkpoint_ts = amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1); - return wait_event_interruptible(ih->wait_process, - amdgpu_ih_has_checkpoint_processed(adev, ih, - checkpoint_wptr, &rptr)); + return wait_event_interruptible_timeout(ih->wait_process, + amdgpu_ih_ts_after(checkpoint_ts, ih->processed_timestamp) || + ih->rptr == amdgpu_ih_get_wptr(adev, ih), timeout); } /** @@ -299,3 +279,18 @@ void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev, /* wptr/rptr are in bytes! */ ih->rptr += 32; } + +uint64_t amdgpu_ih_decode_iv_ts_helper(struct amdgpu_ih_ring *ih, u32 rptr, + signed int offset) +{ + uint32_t iv_size = 32; + uint32_t ring_index; + uint32_t dw1, dw2; + + rptr += iv_size * offset; + ring_index = (rptr & ih->ptr_mask) >> 2; + + dw1 = le32_to_cpu(ih->ring[ring_index + 1]); + dw2 = le32_to_cpu(ih->ring[ring_index + 2]); + return dw1 | ((u64)(dw2 & 0xffff) << 32); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index 0649b59830a5..dd1c2eded6b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -68,20 +68,30 @@ struct amdgpu_ih_ring { /* For waiting on IH processing at checkpoint. */ wait_queue_head_t wait_process; + uint64_t processed_timestamp; }; +/* return true if time stamp t2 is after t1 with 48bit wrap around */ +#define amdgpu_ih_ts_after(t1, t2) \ + (((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) > 0LL) + /* provided by the ih block */ struct amdgpu_ih_funcs { /* ring read/write ptr handling, called from interrupt context */ u32 (*get_wptr)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); void (*decode_iv)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, struct amdgpu_iv_entry *entry); + uint64_t (*decode_iv_ts)(struct amdgpu_ih_ring *ih, u32 rptr, + signed int offset); void (*set_rptr)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); }; #define amdgpu_ih_get_wptr(adev, ih) (adev)->irq.ih_funcs->get_wptr((adev), (ih)) #define amdgpu_ih_decode_iv(adev, iv) \ (adev)->irq.ih_funcs->decode_iv((adev), (ih), (iv)) +#define amdgpu_ih_decode_iv_ts(adev, ih, rptr, offset) \ + (WARN_ON_ONCE(!(adev)->irq.ih_funcs->decode_iv_ts) ? 0 : \ + (adev)->irq.ih_funcs->decode_iv_ts((ih), (rptr), (offset))) #define amdgpu_ih_set_rptr(adev, ih) (adev)->irq.ih_funcs->set_rptr((adev), (ih)) int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, @@ -89,10 +99,12 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv, unsigned int num_dw); -int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev, - struct amdgpu_ih_ring *ih); +int amdgpu_ih_wait_on_checkpoint_process_ts(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih); int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, struct amdgpu_iv_entry *entry); +uint64_t amdgpu_ih_decode_iv_ts_helper(struct amdgpu_ih_ring *ih, u32 rptr, + signed int offset); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c index 5cf142e849bb..a1cbd7c3deb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c @@ -1,4 +1,4 @@ -/** +/* * \file amdgpu_ioc32.c * * 32-bit ioctl compatibility routines for the AMDGPU DRM. @@ -37,12 +37,9 @@ long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); - int ret; if (nr < DRM_COMMAND_BASE) return drm_compat_ioctl(filp, cmd, arg); - ret = amdgpu_drm_ioctl(filp, cmd, arg); - - return ret; + return amdgpu_drm_ioctl(filp, cmd, arg); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 4f3c62adccbd..f5cbc2747ac6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -390,7 +390,7 @@ void amdgpu_irq_fini_hw(struct amdgpu_device *adev) } /** - * amdgpu_irq_fini - shut down interrupt handling + * amdgpu_irq_fini_sw - shut down interrupt handling * * @adev: amdgpu device pointer * @@ -528,6 +528,9 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, /* Send it to amdkfd as well if it isn't already handled */ if (!handled) amdgpu_amdkfd_interrupt(adev, entry.iv_entry); + + if (amdgpu_ih_ts_after(ih->processed_timestamp, entry.timestamp)) + ih->processed_timestamp = entry.timestamp; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 651c7abfde03..1ebb91db2274 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -206,6 +206,12 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) adev->runpm = true; break; } + /* XXX: disable runtime pm if we are the primary adapter + * to avoid displays being re-enabled after DPMS. + * This needs to be sorted out and fixed properly. + */ + if (adev->is_fw_fb) + adev->runpm = false; if (adev->runpm) dev_info(adev->dev, "Using BACO for runtime pm\n"); } @@ -672,13 +678,13 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ui64 = atomic64_read(&adev->num_vram_cpu_page_faults); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VRAM_USAGE: - ui64 = amdgpu_vram_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM)); + ui64 = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VIS_VRAM_USAGE: - ui64 = amdgpu_vram_mgr_vis_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM)); + ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_GTT_USAGE: - ui64 = amdgpu_gtt_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)); + ui64 = amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_GDS_CONFIG: { struct drm_amdgpu_info_gds gds_info; @@ -709,8 +715,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) } case AMDGPU_INFO_MEMORY: { struct drm_amdgpu_memory_info mem; - struct ttm_resource_manager *vram_man = - ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); struct ttm_resource_manager *gtt_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); memset(&mem, 0, sizeof(mem)); @@ -719,7 +723,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) atomic64_read(&adev->vram_pin_size) - AMDGPU_VM_RESERVED_VRAM; mem.vram.heap_usage = - amdgpu_vram_mgr_usage(vram_man); + amdgpu_vram_mgr_usage(&adev->mman.vram_mgr); mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; mem.cpu_accessible_vram.total_heap_size = @@ -729,7 +733,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) atomic64_read(&adev->visible_pin_size), mem.vram.usable_heap_size); mem.cpu_accessible_vram.heap_usage = - amdgpu_vram_mgr_vis_usage(vram_man); + amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); mem.cpu_accessible_vram.max_allocation = mem.cpu_accessible_vram.usable_heap_size * 3 / 4; @@ -738,7 +742,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) mem.gtt.usable_heap_size = mem.gtt.total_heap_size - atomic64_read(&adev->gart_pin_size); mem.gtt.heap_usage = - amdgpu_gtt_mgr_usage(gtt_man); + amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr); mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; return copy_to_user(out, &mem, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 3a7b56e57cec..5661b82d84d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -33,6 +33,7 @@ #include <linux/slab.h> #include <linux/dma-buf.h> +#include <drm/drm_drv.h> #include <drm/amdgpu_drm.h> #include <drm/drm_cache.h> #include "amdgpu.h" @@ -1061,7 +1062,18 @@ int amdgpu_bo_init(struct amdgpu_device *adev) */ void amdgpu_bo_fini(struct amdgpu_device *adev) { + int idx; + amdgpu_ttm_fini(adev); + + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + + if (!adev->gmc.xgmi.connected_to_cpu) { + arch_phys_wc_del(adev->gmc.vram_mtrr); + arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); + } + drm_dev_exit(idx); + } } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c index 4eaec446b49d..0bb2466d539a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c @@ -69,6 +69,7 @@ static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den, /** * amdgpu_pll_get_fb_ref_div - feedback and ref divider calculation * + * @adev: amdgpu_device pointer * @nom: nominator * @den: denominator * @post_div: post divider @@ -106,6 +107,7 @@ static void amdgpu_pll_get_fb_ref_div(struct amdgpu_device *adev, unsigned int n /** * amdgpu_pll_compute - compute PLL paramaters * + * @adev: amdgpu_device pointer * @pll: information about the PLL * @freq: requested frequency * @dot_clock_p: resulting pixel clock diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c index 82e9ecf84352..71ee361d0972 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c @@ -233,6 +233,10 @@ static void amdgpu_perf_start(struct perf_event *event, int flags) if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) return; + if ((!pe->adev->df.funcs) || + (!pe->adev->df.funcs->pmc_start)) + return; + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); hwc->state = 0; @@ -268,6 +272,10 @@ static void amdgpu_perf_read(struct perf_event *event) pmu); u64 count, prev; + if ((!pe->adev->df.funcs) || + (!pe->adev->df.funcs->pmc_get_count)) + return; + do { prev = local64_read(&hwc->prev_count); @@ -297,6 +305,10 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags) if (hwc->state & PERF_HES_UPTODATE) return; + if ((!pe->adev->df.funcs) || + (!pe->adev->df.funcs->pmc_stop)) + return; + switch (hwc->config_base) { case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF: case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI: @@ -326,6 +338,10 @@ static int amdgpu_perf_add(struct perf_event *event, int flags) struct amdgpu_pmu_entry, pmu); + if ((!pe->adev->df.funcs) || + (!pe->adev->df.funcs->pmc_start)) + return -EINVAL; + switch (pe->pmu_perf_type) { case AMDGPU_PMU_PERF_TYPE_DF: hwc->config_base = AMDGPU_PMU_EVENT_CONFIG_TYPE_DF; @@ -371,6 +387,9 @@ static void amdgpu_perf_del(struct perf_event *event, int flags) struct amdgpu_pmu_entry *pe = container_of(event->pmu, struct amdgpu_pmu_entry, pmu); + if ((!pe->adev->df.funcs) || + (!pe->adev->df.funcs->pmc_stop)) + return; amdgpu_perf_stop(event, PERF_EF_UPDATE); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c index 6f7189d32f0a..0d85c2096ab5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c @@ -59,7 +59,7 @@ static DEVICE_ATTR_RO(mem_info_preempt_used); * @man: TTM memory type manager * @tbo: TTM BO we need this range for * @place: placement flags and restrictions - * @mem: the resulting mem object + * @res: TTM memory object * * Dummy, just count the space used without allocating resources or any limit. */ @@ -85,7 +85,7 @@ static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man, * amdgpu_preempt_mgr_del - free ranges * * @man: TTM memory type manager - * @mem: TTM memory object + * @res: TTM memory object * * Free the allocated GTT again. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index c641f84649d6..dee17a0e1187 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -518,7 +518,7 @@ static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp) return cmd; } -void release_psp_cmd_buf(struct psp_context *psp) +static void release_psp_cmd_buf(struct psp_context *psp) { mutex_unlock(&psp->mutex); } @@ -2017,12 +2017,16 @@ static int psp_hw_start(struct psp_context *psp) return ret; } + if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) + goto skip_pin_bo; + ret = psp_tmr_init(psp); if (ret) { DRM_ERROR("PSP tmr init failed!\n"); return ret; } +skip_pin_bo: /* * For ASICs with DF Cstate management centralized * to PMFW, TMR setup should be performed after PMFW @@ -2452,6 +2456,18 @@ skip_memalloc: return ret; } + if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { + if (adev->gmc.xgmi.num_physical_nodes > 1) { + ret = psp_xgmi_initialize(psp, false, true); + /* Warning the XGMI seesion initialize failure + * Instead of stop driver initialization + */ + if (ret) + dev_err(psp->adev->dev, + "XGMI: Failed to initialize XGMI session\n"); + } + } + if (psp->ta_fw) { ret = psp_ras_initialize(psp); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 46910e7b2927..8f47c14ecbc7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -867,9 +867,9 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, /* feature ctl end */ -void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev, - struct ras_common_if *ras_block, - struct ras_err_data *err_data) +static void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev, + struct ras_common_if *ras_block, + struct ras_err_data *err_data) { switch (ras_block->sub_block_index) { case AMDGPU_RAS_MCA_BLOCK__MP0: @@ -1161,9 +1161,9 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, /** * amdgpu_ras_query_error_count -- Get error counts of all IPs - * adev: pointer to AMD GPU device - * ce_count: pointer to an integer to be set to the count of correctible errors. - * ue_count: pointer to an integer to be set to the count of uncorrectible + * @adev: pointer to AMD GPU device + * @ce_count: pointer to an integer to be set to the count of correctible errors. + * @ue_count: pointer to an integer to be set to the count of uncorrectible * errors. * * If set, @ce_count or @ue_count, count and return the corresponding @@ -1592,6 +1592,7 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) /* Let IP handle its data, maybe we need get the output * from the callback to udpate the error type/count, etc */ + memset(&err_data, 0, sizeof(err_data)); ret = data->cb(obj->adev, &err_data, &entry); /* ue will trigger an interrupt, and in that case * we need do a reset to recovery the whole system. @@ -1747,6 +1748,16 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF) continue; + /* + * this is a workaround for aldebaran, skip send msg to + * smu to get ecc_info table due to smu handle get ecc + * info table failed temporarily. + * should be removed until smu fix handle ecc_info table. + */ + if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) && + (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2))) + continue; + amdgpu_ras_query_error_status(adev, &info); } } @@ -1828,8 +1839,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, .size = AMDGPU_GPU_PAGE_SIZE, .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, }; - status = amdgpu_vram_mgr_query_page_status( - ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM), + status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr, data->bps[i].retired_page); if (status == -EBUSY) (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; @@ -1930,8 +1940,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, goto out; } - amdgpu_vram_mgr_reserve_range( - ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM), + amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr, bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT, AMDGPU_GPU_PAGE_SIZE); @@ -2362,7 +2371,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev) } /* Init poison supported flag, the default value is false */ - if (adev->df.funcs && + if (adev->gmc.xgmi.connected_to_cpu) { + /* enabled by default when GPU is connected to CPU */ + con->poison_supported = true; + } + else if (adev->df.funcs && adev->df.funcs->query_ras_poison_mode && adev->umc.ras_funcs && adev->umc.ras_funcs->query_ras_poison_mode) { @@ -2503,7 +2516,6 @@ void amdgpu_ras_late_fini(struct amdgpu_device *adev, amdgpu_ras_sysfs_remove(adev, ras_block); if (ih_info->cb) amdgpu_ras_interrupt_remove_handler(adev, ih_info); - amdgpu_ras_feature_enable(adev, ras_block, 0); } /* do some init work after IP late init as dependence. @@ -2673,7 +2685,7 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb, * and error occurred in DramECC (Extended error code = 0) then only * process the error, else bail out. */ - if (!m || !((smca_get_bank_type(m->bank) == SMCA_UMC_V2) && + if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) && (XEC(m->status, 0x3f) == 0x0))) return NOTIFY_DONE; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 4d380e79752c..fae7d185ad0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -53,9 +53,6 @@ enum amdgpu_ring_priority_level { #define AMDGPU_FENCE_FLAG_INT (1 << 1) #define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2) -/* fence flag bit to indicate the face is embedded in job*/ -#define AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT (DMA_FENCE_FLAG_USER_BITS + 1) - #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched) #define AMDGPU_IB_POOL_SIZE (1024 * 1024) @@ -114,6 +111,7 @@ struct amdgpu_fence_driver { struct dma_fence **fences; }; +void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring); void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring); int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index eac2ff4647e3..d178fbec7048 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -43,6 +43,7 @@ #include <linux/sizes.h> #include <linux/module.h> +#include <drm/drm_drv.h> #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> @@ -1804,6 +1805,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) */ void amdgpu_ttm_fini(struct amdgpu_device *adev) { + int idx; if (!adev->mman.initialized) return; @@ -1818,6 +1820,15 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) NULL, NULL); amdgpu_ttm_fw_reserve_vram_fini(adev); + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + + if (adev->mman.aper_base_kaddr) + iounmap(adev->mman.aper_base_kaddr); + adev->mman.aper_base_kaddr = NULL; + + drm_dev_exit(idx); + } + amdgpu_vram_mgr_fini(adev); amdgpu_gtt_mgr_fini(adev); amdgpu_preempt_mgr_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 7346ecff4438..f8f48be16d80 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -114,8 +114,8 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev); void amdgpu_vram_mgr_fini(struct amdgpu_device *adev); bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem); -uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man); -int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man); +uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr); +int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr); uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man); @@ -129,11 +129,11 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, void amdgpu_vram_mgr_free_sgt(struct device *dev, enum dma_data_direction dir, struct sg_table *sgt); -uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man); -uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man); -int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man, +uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr); +uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr); +int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, uint64_t start, uint64_t size); -int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man, +int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, uint64_t start); int amdgpu_ttm_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index 6e4bea012ea4..46264a4002f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -23,74 +23,10 @@ #include "amdgpu_ras.h" -int amdgpu_umc_ras_late_init(struct amdgpu_device *adev) -{ - int r; - struct ras_fs_if fs_info = { - .sysfs_name = "umc_err_count", - }; - struct ras_ih_if ih_info = { - .cb = amdgpu_umc_process_ras_data_cb, - }; - - if (!adev->umc.ras_if) { - adev->umc.ras_if = - kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); - if (!adev->umc.ras_if) - return -ENOMEM; - adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC; - adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; - adev->umc.ras_if->sub_block_index = 0; - } - ih_info.head = fs_info.head = *adev->umc.ras_if; - - r = amdgpu_ras_late_init(adev, adev->umc.ras_if, - &fs_info, &ih_info); - if (r) - goto free; - - if (amdgpu_ras_is_supported(adev, adev->umc.ras_if->block)) { - r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0); - if (r) - goto late_fini; - } else { - r = 0; - goto free; - } - - /* ras init of specific umc version */ - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->err_cnt_init) - adev->umc.ras_funcs->err_cnt_init(adev); - - return 0; - -late_fini: - amdgpu_ras_late_fini(adev, adev->umc.ras_if, &ih_info); -free: - kfree(adev->umc.ras_if); - adev->umc.ras_if = NULL; - return r; -} - -void amdgpu_umc_ras_fini(struct amdgpu_device *adev) -{ - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) && - adev->umc.ras_if) { - struct ras_common_if *ras_if = adev->umc.ras_if; - struct ras_ih_if ih_info = { - .head = *ras_if, - .cb = amdgpu_umc_process_ras_data_cb, - }; - - amdgpu_ras_late_fini(adev, ras_if, &ih_info); - kfree(ras_if); - } -} - -int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, +static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, void *ras_error_status, - struct amdgpu_iv_entry *entry) + struct amdgpu_iv_entry *entry, + bool reset) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; struct amdgpu_ras *con = amdgpu_ras_get_context(adev); @@ -164,13 +100,108 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs); } - amdgpu_ras_reset_gpu(adev); + if (reset) + amdgpu_ras_reset_gpu(adev); } kfree(err_data->err_addr); return AMDGPU_RAS_SUCCESS; } +int amdgpu_umc_poison_handler(struct amdgpu_device *adev, + void *ras_error_status, + bool reset) +{ + int ret; + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + struct ras_common_if head = { + .block = AMDGPU_RAS_BLOCK__UMC, + }; + struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head); + + ret = + amdgpu_umc_do_page_retirement(adev, ras_error_status, NULL, reset); + + if (ret == AMDGPU_RAS_SUCCESS && obj) { + obj->err_data.ue_count += err_data->ue_count; + obj->err_data.ce_count += err_data->ce_count; + } + + return ret; +} + +static int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, + void *ras_error_status, + struct amdgpu_iv_entry *entry) +{ + return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true); +} + +int amdgpu_umc_ras_late_init(struct amdgpu_device *adev) +{ + int r; + struct ras_fs_if fs_info = { + .sysfs_name = "umc_err_count", + }; + struct ras_ih_if ih_info = { + .cb = amdgpu_umc_process_ras_data_cb, + }; + + if (!adev->umc.ras_if) { + adev->umc.ras_if = + kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->umc.ras_if) + return -ENOMEM; + adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC; + adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->umc.ras_if->sub_block_index = 0; + } + ih_info.head = fs_info.head = *adev->umc.ras_if; + + r = amdgpu_ras_late_init(adev, adev->umc.ras_if, + &fs_info, &ih_info); + if (r) + goto free; + + if (amdgpu_ras_is_supported(adev, adev->umc.ras_if->block)) { + r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0); + if (r) + goto late_fini; + } else { + r = 0; + goto free; + } + + /* ras init of specific umc version */ + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->err_cnt_init) + adev->umc.ras_funcs->err_cnt_init(adev); + + return 0; + +late_fini: + amdgpu_ras_late_fini(adev, adev->umc.ras_if, &ih_info); +free: + kfree(adev->umc.ras_if); + adev->umc.ras_if = NULL; + return r; +} + +void amdgpu_umc_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) && + adev->umc.ras_if) { + struct ras_common_if *ras_if = adev->umc.ras_if; + struct ras_ih_if ih_info = { + .head = *ras_if, + .cb = amdgpu_umc_process_ras_data_cb, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} + int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 9e40bade0a68..b72194e8bfe5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -78,9 +78,9 @@ struct amdgpu_umc { int amdgpu_umc_ras_late_init(struct amdgpu_device *adev); void amdgpu_umc_ras_fini(struct amdgpu_device *adev); -int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, +int amdgpu_umc_poison_handler(struct amdgpu_device *adev, void *ras_error_status, - struct amdgpu_iv_entry *entry); + bool reset); int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 688bef1649b5..344f711ad144 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -434,7 +434,6 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) * * @ring: ring we should submit the msg to * @handle: VCE session handle to use - * @bo: amdgpu object for which we query the offset * @fence: optional fence to return * * Open up a stream for HW test diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 585961c2f5f2..9a19a6a57b23 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -286,20 +286,13 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance) { bool ret = false; + int vcn_config = adev->vcn.vcn_config[vcn_instance]; - int major; - int minor; - int revision; - - /* if cannot find IP data, then this VCN does not exist */ - if (amdgpu_discovery_get_vcn_version(adev, vcn_instance, &major, &minor, &revision) != 0) - return true; - - if ((type == VCN_ENCODE_RING) && (revision & VCN_BLOCK_ENCODE_DISABLE_MASK)) { + if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK)) { ret = true; - } else if ((type == VCN_DECODE_RING) && (revision & VCN_BLOCK_DECODE_DISABLE_MASK)) { + } else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK)) { ret = true; - } else if ((type == VCN_UNIFIED_RING) && (revision & VCN_BLOCK_QUEUE_DISABLE_MASK)) { + } else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK)) { ret = true; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index bfa27ea94804..5d3728b027d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -235,6 +235,7 @@ struct amdgpu_vcn { uint8_t num_vcn_inst; struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES]; + uint8_t vcn_config[AMDGPU_MAX_VCN_INSTANCES]; struct amdgpu_vcn_reg internal; struct mutex vcn_pg_lock; struct mutex vcn1_jpeg1_workaround; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 3fc49823f527..07bc0f504713 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -553,7 +553,6 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev) static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) { struct amd_sriov_msg_vf2pf_info *vf2pf_info; - struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf; @@ -576,8 +575,8 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) vf2pf_info->driver_cert = 0; vf2pf_info->os_info.all = 0; - vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(vram_man) >> 20; - vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(vram_man) >> 20; + vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr) >> 20; + vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20; vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20; vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20; @@ -622,16 +621,34 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev) void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) { - uint64_t bp_block_offset = 0; - uint32_t bp_block_size = 0; - struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL; - adev->virt.fw_reserve.p_pf2vf = NULL; adev->virt.fw_reserve.p_vf2pf = NULL; adev->virt.vf2pf_update_interval_ms = 0; if (adev->mman.fw_vram_usage_va != NULL) { - adev->virt.vf2pf_update_interval_ms = 2000; + /* go through this logic in ip_init and reset to init workqueue*/ + amdgpu_virt_exchange_data(adev); + + INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item); + schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms)); + } else if (adev->bios != NULL) { + /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/ + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *) + (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); + + amdgpu_virt_read_pf2vf_data(adev); + } +} + + +void amdgpu_virt_exchange_data(struct amdgpu_device *adev) +{ + uint64_t bp_block_offset = 0; + uint32_t bp_block_size = 0; + struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL; + + if (adev->mman.fw_vram_usage_va != NULL) { adev->virt.fw_reserve.p_pf2vf = (struct amd_sriov_msg_pf2vf_info_header *) @@ -657,22 +674,10 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) if (adev->virt.ras_init_done) amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size); } - } else if (adev->bios != NULL) { - adev->virt.fw_reserve.p_pf2vf = - (struct amd_sriov_msg_pf2vf_info_header *) - (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); - - amdgpu_virt_read_pf2vf_data(adev); - - return; - } - - if (adev->virt.vf2pf_update_interval_ms != 0) { - INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item); - schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms); } } + void amdgpu_detect_virtualization(struct amdgpu_device *adev) { uint32_t reg; @@ -715,6 +720,10 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev) vi_set_virt_ops(adev); break; case CHIP_VEGA10: + soc15_set_virt_ops(adev); + /* send a dummy GPU_INIT_DATA request to host on vega10 */ + amdgpu_virt_request_init_data(adev); + break; case CHIP_VEGA20: case CHIP_ARCTURUS: case CHIP_ALDEBARAN: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 8d4c20bb71c5..9adfb8d63280 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -308,6 +308,7 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev); void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); +void amdgpu_virt_exchange_data(struct amdgpu_device *adev); void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev); void amdgpu_detect_virtualization(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index ac9a8cd21c4b..d99c8779b51e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -16,6 +16,8 @@ #include "ivsrcid/ivsrcid_vislands30.h" #include "amdgpu_vkms.h" #include "amdgpu_display.h" +#include "atom.h" +#include "amdgpu_irq.h" /** * DOC: amdgpu_vkms @@ -41,16 +43,16 @@ static const u32 amdgpu_vkms_formats[] = { static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer) { - struct amdgpu_vkms_output *output = container_of(timer, - struct amdgpu_vkms_output, - vblank_hrtimer); - struct drm_crtc *crtc = &output->crtc; + struct amdgpu_crtc *amdgpu_crtc = container_of(timer, struct amdgpu_crtc, vblank_timer); + struct drm_crtc *crtc = &amdgpu_crtc->base; + struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc); u64 ret_overrun; bool ret; - ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer, + ret_overrun = hrtimer_forward_now(&amdgpu_crtc->vblank_timer, output->period_ns); - WARN_ON(ret_overrun != 1); + if (ret_overrun != 1) + DRM_WARN("%s: vblank timer overrun\n", __func__); ret = drm_crtc_handle_vblank(crtc); if (!ret) @@ -65,22 +67,21 @@ static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc) unsigned int pipe = drm_crtc_index(crtc); struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc); + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); drm_calc_timestamping_constants(crtc, &crtc->mode); - hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - out->vblank_hrtimer.function = &amdgpu_vkms_vblank_simulate; out->period_ns = ktime_set(0, vblank->framedur_ns); - hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL); + hrtimer_start(&amdgpu_crtc->vblank_timer, out->period_ns, HRTIMER_MODE_REL); return 0; } static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc) { - struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc); + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - hrtimer_cancel(&out->vblank_hrtimer); + hrtimer_cancel(&amdgpu_crtc->vblank_timer); } static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, @@ -92,13 +93,14 @@ static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, unsigned int pipe = crtc->index; struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc); struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); if (!READ_ONCE(vblank->enabled)) { *vblank_time = ktime_get(); return true; } - *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires); + *vblank_time = READ_ONCE(amdgpu_crtc->vblank_timer.node.expires); if (WARN_ON(*vblank_time == vblank->time)) return true; @@ -142,15 +144,16 @@ static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc, static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { + unsigned long flags; if (crtc->state->event) { - spin_lock(&crtc->dev->event_lock); + spin_lock_irqsave(&crtc->dev->event_lock, flags); if (drm_crtc_vblank_get(crtc) != 0) drm_crtc_send_vblank_event(crtc, crtc->state->event); else drm_crtc_arm_vblank_event(crtc, crtc->state->event); - spin_unlock(&crtc->dev->event_lock); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); crtc->state->event = NULL; } @@ -165,6 +168,8 @@ static const struct drm_crtc_helper_funcs amdgpu_vkms_crtc_helper_funcs = { static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor) { + struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); int ret; ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor, @@ -176,6 +181,17 @@ static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, drm_crtc_helper_add(crtc, &amdgpu_vkms_crtc_helper_funcs); + amdgpu_crtc->crtc_id = drm_crtc_index(crtc); + adev->mode_info.crtcs[drm_crtc_index(crtc)] = amdgpu_crtc; + + amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; + amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; + + hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + amdgpu_crtc->vblank_timer.function = &amdgpu_vkms_vblank_simulate; + return ret; } @@ -375,6 +391,7 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev, int index) { struct drm_plane *plane; + uint64_t modifiers[] = {DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID}; int ret; plane = kzalloc(sizeof(*plane), GFP_KERNEL); @@ -385,7 +402,7 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev, &amdgpu_vkms_plane_funcs, amdgpu_vkms_formats, ARRAY_SIZE(amdgpu_vkms_formats), - NULL, type, NULL); + modifiers, type, NULL); if (ret) { kfree(plane); return ERR_PTR(ret); @@ -396,12 +413,12 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev, return plane; } -int amdgpu_vkms_output_init(struct drm_device *dev, - struct amdgpu_vkms_output *output, int index) +static int amdgpu_vkms_output_init(struct drm_device *dev, struct + amdgpu_vkms_output *output, int index) { struct drm_connector *connector = &output->connector; struct drm_encoder *encoder = &output->encoder; - struct drm_crtc *crtc = &output->crtc; + struct drm_crtc *crtc = &output->crtc.base; struct drm_plane *primary, *cursor = NULL; int ret; @@ -465,6 +482,11 @@ static int amdgpu_vkms_sw_init(void *handle) int r, i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc, + sizeof(struct amdgpu_vkms_output), GFP_KERNEL); + if (!adev->amdgpu_vkms_output) + return -ENOMEM; + adev_to_drm(adev)->max_vblank_count = 0; adev_to_drm(adev)->mode_config.funcs = &amdgpu_vkms_mode_funcs; @@ -481,10 +503,6 @@ static int amdgpu_vkms_sw_init(void *handle) if (r) return r; - adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc, sizeof(struct amdgpu_vkms_output), GFP_KERNEL); - if (!adev->amdgpu_vkms_output) - return -ENOMEM; - /* allocate crtcs, encoders, connectors */ for (i = 0; i < adev->mode_info.num_crtc; i++) { r = amdgpu_vkms_output_init(adev_to_drm(adev), &adev->amdgpu_vkms_output[i], i); @@ -504,15 +522,16 @@ static int amdgpu_vkms_sw_fini(void *handle) int i = 0; for (i = 0; i < adev->mode_info.num_crtc; i++) - if (adev->amdgpu_vkms_output[i].vblank_hrtimer.function) - hrtimer_cancel(&adev->amdgpu_vkms_output[i].vblank_hrtimer); - - kfree(adev->mode_info.bios_hardcoded_edid); - kfree(adev->amdgpu_vkms_output); + if (adev->mode_info.crtcs[i]) + hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer); drm_kms_helper_poll_fini(adev_to_drm(adev)); + drm_mode_config_cleanup(adev_to_drm(adev)); adev->mode_info.mode_config_initialized = false; + + kfree(adev->mode_info.bios_hardcoded_edid); + kfree(adev->amdgpu_vkms_output); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h index 97f1b79c0724..4f8722ff37c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h @@ -10,15 +10,14 @@ #define YRES_MAX 16384 #define drm_crtc_to_amdgpu_vkms_output(target) \ - container_of(target, struct amdgpu_vkms_output, crtc) + container_of(target, struct amdgpu_vkms_output, crtc.base) extern const struct amdgpu_ip_block_version amdgpu_vkms_ip_block; struct amdgpu_vkms_output { - struct drm_crtc crtc; + struct amdgpu_crtc crtc; struct drm_encoder encoder; struct drm_connector connector; - struct hrtimer vblank_hrtimer; ktime_t period_ns; struct drm_pending_vblank_event *event; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index a96ae4c0e040..b37fc7d7d2c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -53,7 +53,7 @@ * can be mapped as snooped (cached system pages) or unsnooped * (uncached system pages). * Each VM has an ID associated with it and there is a page table - * associated with each VMID. When execting a command buffer, + * associated with each VMID. When executing a command buffer, * the kernel tells the the ring what VMID to use for that command * buffer. VMIDs are allocated dynamically as commands are submitted. * The userspace drivers maintain their own address space and the kernel diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index ddd0b6d74070..7442095f089c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -96,10 +96,9 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - struct ttm_resource_manager *man; - man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); - return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_usage(man)); + return sysfs_emit(buf, "%llu\n", + amdgpu_vram_mgr_usage(&adev->mman.vram_mgr)); } /** @@ -116,10 +115,9 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - struct ttm_resource_manager *man; - man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); - return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_vis_usage(man)); + return sysfs_emit(buf, "%llu\n", + amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr)); } /** @@ -263,16 +261,15 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man) /** * amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM * - * @man: TTM memory type manager + * @mgr: amdgpu_vram_mgr pointer * @start: start address of the range in VRAM * @size: size of the range * - * Reserve memory from start addess with the specified size in VRAM + * Reserve memory from start address with the specified size in VRAM */ -int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man, +int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, uint64_t start, uint64_t size) { - struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_vram_reservation *rsv; rsv = kzalloc(sizeof(*rsv), GFP_KERNEL); @@ -285,7 +282,7 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man, spin_lock(&mgr->lock); list_add_tail(&mgr->reservations_pending, &rsv->node); - amdgpu_vram_mgr_do_reserve(man); + amdgpu_vram_mgr_do_reserve(&mgr->manager); spin_unlock(&mgr->lock); return 0; @@ -294,7 +291,7 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man, /** * amdgpu_vram_mgr_query_page_status - query the reservation status * - * @man: TTM memory type manager + * @mgr: amdgpu_vram_mgr pointer * @start: start address of a page in VRAM * * Returns: @@ -302,10 +299,9 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man, * 0: the page has been reserved * -ENOENT: the input page is not a reservation */ -int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man, +int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, uint64_t start) { - struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_vram_reservation *rsv; int ret; @@ -634,28 +630,24 @@ void amdgpu_vram_mgr_free_sgt(struct device *dev, /** * amdgpu_vram_mgr_usage - how many bytes are used in this domain * - * @man: TTM memory type manager + * @mgr: amdgpu_vram_mgr pointer * * Returns how many bytes are used in this domain. */ -uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man) +uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr) { - struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); - return atomic64_read(&mgr->usage); } /** * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part * - * @man: TTM memory type manager + * @mgr: amdgpu_vram_mgr pointer * * Returns how many bytes are used in the visible part of VRAM */ -uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man) +uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr) { - struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); - return atomic64_read(&mgr->vis_usage); } @@ -677,8 +669,8 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, spin_unlock(&mgr->lock); drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", - man->size, amdgpu_vram_mgr_usage(man) >> 20, - amdgpu_vram_mgr_vis_usage(man) >> 20); + man->size, amdgpu_vram_mgr_usage(mgr) >> 20, + amdgpu_vram_mgr_vis_usage(mgr) >> 20); } static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 567df2db23ac..e8b8f28c2f72 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -208,6 +208,7 @@ static struct attribute *amdgpu_xgmi_hive_attrs[] = { &amdgpu_xgmi_hive_id, NULL }; +ATTRIBUTE_GROUPS(amdgpu_xgmi_hive); static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj, struct attribute *attr, char *buf) @@ -237,7 +238,7 @@ static const struct sysfs_ops amdgpu_xgmi_hive_ops = { struct kobj_type amdgpu_xgmi_hive_type = { .release = amdgpu_xgmi_hive_release, .sysfs_ops = &amdgpu_xgmi_hive_ops, - .default_attrs = amdgpu_xgmi_hive_attrs, + .default_groups = amdgpu_xgmi_hive_groups, }; static ssize_t amdgpu_xgmi_show_device_id(struct device *dev, @@ -265,6 +266,11 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev, ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200); ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208); + if ((!adev->df.funcs) || + (!adev->df.funcs->get_fica) || + (!adev->df.funcs->set_fica)) + return -EINVAL; + fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in); if (fica_out != 0x1f) pr_err("xGMI error counters not enabled!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 6134ed964027..a92d86e12718 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -469,7 +469,7 @@ int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder) if (amdgpu_connector->use_digital && (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE)) return ATOM_ENCODER_MODE_HDMI; - else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) && + else if (connector->display_info.is_hdmi && (amdgpu_connector->audio == AMDGPU_AUDIO_AUTO)) return ATOM_ENCODER_MODE_HDMI; else if (amdgpu_connector->use_digital) @@ -488,7 +488,7 @@ int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder) if (amdgpu_audio != 0) { if (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE) return ATOM_ENCODER_MODE_HDMI; - else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) && + else if (connector->display_info.is_hdmi && (amdgpu_connector->audio == AMDGPU_AUDIO_AUTO)) return ATOM_ENCODER_MODE_HDMI; else @@ -506,7 +506,7 @@ int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder) } else if (amdgpu_audio != 0) { if (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE) return ATOM_ENCODER_MODE_HDMI; - else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) && + else if (connector->display_info.is_hdmi && (amdgpu_connector->audio == AMDGPU_AUDIO_AUTO)) return ATOM_ENCODER_MODE_HDMI; else diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 54f28c075f21..f10ce740a29c 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1428,6 +1428,10 @@ static int cik_asic_reset(struct amdgpu_device *adev) { int r; + /* APUs don't have full asic reset */ + if (adev->flags & AMD_IS_APU) + return 0; + if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { dev_info(adev->dev, "BACO reset\n"); r = amdgpu_dpm_baco_reset(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index b305fd39874f..9189fb85a4dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -63,6 +63,13 @@ #define mmGCEA_PROBE_MAP 0x070c #define mmGCEA_PROBE_MAP_BASE_IDX 0 +#define GFX9_RLCG_GC_WRITE_OLD (0x8 << 28) +#define GFX9_RLCG_GC_WRITE (0x0 << 28) +#define GFX9_RLCG_GC_READ (0x1 << 28) +#define GFX9_RLCG_VFGATE_DISABLED 0x4000000 +#define GFX9_RLCG_WRONG_OPERATION_TYPE 0x2000000 +#define GFX9_RLCG_NOT_IN_RANGE 0x1000000 + MODULE_FIRMWARE("amdgpu/vega10_ce.bin"); MODULE_FIRMWARE("amdgpu/vega10_pfp.bin"); MODULE_FIRMWARE("amdgpu/vega10_me.bin"); @@ -739,7 +746,7 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] = mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0, }; -static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag) +static u32 gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag) { static void *scratch_reg0; static void *scratch_reg1; @@ -748,21 +755,20 @@ static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 f static void *spare_int; static uint32_t grbm_cntl; static uint32_t grbm_idx; + uint32_t i = 0; + uint32_t retries = 50000; + u32 ret = 0; + u32 tmp; scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4; scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4; - scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4; - scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4; + scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG2_BASE_IDX] + mmSCRATCH_REG2)*4; + scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG3_BASE_IDX] + mmSCRATCH_REG3)*4; spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4; grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX; - if (amdgpu_sriov_runtime(adev)) { - pr_err("shouldn't call rlcg write register during runtime\n"); - return; - } - if (offset == grbm_cntl || offset == grbm_idx) { if (offset == grbm_cntl) writel(v, scratch_reg2); @@ -771,41 +777,95 @@ static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 f writel(v, ((void __iomem *)adev->rmmio) + (offset * 4)); } else { - uint32_t i = 0; - uint32_t retries = 50000; - + /* + * SCRATCH_REG0 = read/write value + * SCRATCH_REG1[30:28] = command + * SCRATCH_REG1[19:0] = address in dword + * SCRATCH_REG1[26:24] = Error reporting + */ writel(v, scratch_reg0); - writel(offset | 0x80000000, scratch_reg1); + writel(offset | flag, scratch_reg1); writel(1, spare_int); - for (i = 0; i < retries; i++) { - u32 tmp; + for (i = 0; i < retries; i++) { tmp = readl(scratch_reg1); - if (!(tmp & 0x80000000)) + if (!(tmp & flag)) break; udelay(10); } - if (i >= retries) - pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset); + + if (i >= retries) { + if (amdgpu_sriov_reg_indirect_gc(adev)) { + if (tmp & GFX9_RLCG_VFGATE_DISABLED) + pr_err("The vfgate is disabled, program reg:0x%05x failed!\n", offset); + else if (tmp & GFX9_RLCG_WRONG_OPERATION_TYPE) + pr_err("Wrong operation type, program reg:0x%05x failed!\n", offset); + else if (tmp & GFX9_RLCG_NOT_IN_RANGE) + pr_err("The register is not in range, program reg:0x%05x failed!\n", offset); + else + pr_err("Unknown error type, program reg:0x%05x failed!\n", offset); + } else + pr_err("timeout: rlcg program reg:0x%05x failed!\n", offset); + } } + ret = readl(scratch_reg0); + + return ret; +} + +static bool gfx_v9_0_get_rlcg_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip, + int write, u32 *rlcg_flag) +{ + + switch (hwip) { + case GC_HWIP: + if (amdgpu_sriov_reg_indirect_gc(adev)) { + *rlcg_flag = write ? GFX9_RLCG_GC_WRITE : GFX9_RLCG_GC_READ; + + return true; + /* only in new version, AMDGPU_REGS_NO_KIQ and AMDGPU_REGS_RLC enabled simultaneously */ + } else if ((acc_flags & AMDGPU_REGS_RLC) && !(acc_flags & AMDGPU_REGS_NO_KIQ) && write) { + *rlcg_flag = GFX9_RLCG_GC_WRITE_OLD; + return true; + } + + break; + default: + return false; + } + + return false; +} + +static u32 gfx_v9_0_sriov_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip) +{ + u32 rlcg_flag; + + if (!amdgpu_sriov_runtime(adev) && gfx_v9_0_get_rlcg_flag(adev, acc_flags, hwip, 0, &rlcg_flag)) + return gfx_v9_0_rlcg_rw(adev, offset, 0, rlcg_flag); + + if (acc_flags & AMDGPU_REGS_NO_KIQ) + return RREG32_NO_KIQ(offset); + else + return RREG32(offset); } static void gfx_v9_0_sriov_wreg(struct amdgpu_device *adev, u32 offset, - u32 v, u32 acc_flags, u32 hwip) + u32 value, u32 acc_flags, u32 hwip) { - if ((acc_flags & AMDGPU_REGS_RLC) && - amdgpu_sriov_fullaccess(adev)) { - gfx_v9_0_rlcg_w(adev, offset, v, acc_flags); + u32 rlcg_flag; + if (!amdgpu_sriov_runtime(adev) && gfx_v9_0_get_rlcg_flag(adev, acc_flags, hwip, 1, &rlcg_flag)) { + gfx_v9_0_rlcg_rw(adev, offset, value, rlcg_flag); return; } if (acc_flags & AMDGPU_REGS_NO_KIQ) - WREG32_NO_KIQ(offset, v); + WREG32_NO_KIQ(offset, value); else - WREG32(offset, v); + WREG32(offset, value); } #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042 @@ -3070,8 +3130,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev) AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_GDS | AMD_PG_SUPPORT_RLC_SMU_HS)) { - WREG32(mmRLC_JUMP_TABLE_RESTORE, - adev->gfx.rlc.cp_table_gpu_addr >> 8); + WREG32_SOC15(GC, 0, mmRLC_JUMP_TABLE_RESTORE, + adev->gfx.rlc.cp_table_gpu_addr >> 8); gfx_v9_0_init_gfx_power_gating(adev); } } @@ -5135,7 +5195,7 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) if (amdgpu_sriov_is_pp_one_vf(adev)) data = RREG32_NO_KIQ(reg); else - data = RREG32(reg); + data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL); data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; @@ -5191,6 +5251,7 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { .start = gfx_v9_0_rlc_start, .update_spm_vmid = gfx_v9_0_update_spm_vmid, .sriov_wreg = gfx_v9_0_sriov_wreg, + .sriov_rreg = gfx_v9_0_sriov_rreg, .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range, }; @@ -5796,16 +5857,16 @@ static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - mec_int_cntl = RREG32(mec_int_cntl_reg); + mec_int_cntl = RREG32_SOC15_IP(GC,mec_int_cntl_reg); mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE, 0); - WREG32(mec_int_cntl_reg, mec_int_cntl); + WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); break; case AMDGPU_IRQ_STATE_ENABLE: - mec_int_cntl = RREG32(mec_int_cntl_reg); + mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE, 1); - WREG32(mec_int_cntl_reg, mec_int_cntl); + WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 480e41847d7c..ec4d5e15b766 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -162,7 +162,6 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC);/* XXX for emulation. */ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index 14c1c1a297dd..6e0ace2fbfab 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -196,7 +196,6 @@ static void gfxhub_v2_0_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC); /* UC, uncached */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c index e80d1dc43079..b4eddf6e98a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c @@ -197,7 +197,6 @@ static void gfxhub_v2_1_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC); /* UC, uncached */ diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 3ec5ff5a6dbe..38bb42727715 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -107,7 +107,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, /* Process it onyl if it's the first fault for this address */ if (entry->ih != &adev->irq.ih_soft && - amdgpu_gmc_filter_faults(adev, addr, entry->pasid, + amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, entry->timestamp)) return 1; @@ -914,12 +914,6 @@ static int gmc_v10_0_sw_init(void *handle) return r; } - if (adev->gmc.xgmi.supported) { - r = adev->gfxhub.funcs->get_xgmi_info(adev); - if (r) - return r; - } - r = gmc_v10_0_mc_init(adev); if (r) return r; @@ -992,10 +986,14 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) return -EINVAL; } - r = amdgpu_gart_table_vram_pin(adev); + if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) + goto skip_pin_bo; + + r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); if (r) return r; +skip_pin_bo: r = adev->gfxhub.funcs->gart_enable(adev); if (r) return r; @@ -1062,7 +1060,6 @@ static void gmc_v10_0_gart_disable(struct amdgpu_device *adev) { adev->gfxhub.funcs->gart_disable(adev); adev->mmhub.funcs->gart_disable(adev); - amdgpu_gart_table_vram_unpin(adev); } static int gmc_v10_0_hw_fini(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 0fe714f54cca..cd6c38e083d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -476,7 +476,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } - r = amdgpu_gart_table_vram_pin(adev); + r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); if (r) return r; @@ -608,7 +608,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) WREG32(mmVM_L2_CNTL3, VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK | (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT)); - amdgpu_gart_table_vram_unpin(adev); } static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 0a50fdaced7e..ab8adbff9e2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -620,7 +620,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } - r = amdgpu_gart_table_vram_pin(adev); + r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); if (r) return r; @@ -758,7 +758,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); WREG32(mmVM_L2_CNTL, tmp); WREG32(mmVM_L2_CNTL2, 0); - amdgpu_gart_table_vram_unpin(adev); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 492ebed2915b..054733838292 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -515,10 +515,10 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) static int gmc_v8_0_mc_init(struct amdgpu_device *adev) { int r; + u32 tmp; adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev); if (!adev->gmc.vram_width) { - u32 tmp; int chansize, numchan; /* Get VRAM informations */ @@ -562,8 +562,15 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) adev->gmc.vram_width = numchan * chansize; } /* size in MB on si */ - adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; - adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; + tmp = RREG32(mmCONFIG_MEMSIZE); + /* some boards may have garbage in the upper 16 bits */ + if (tmp & 0xffff0000) { + DRM_INFO("Probable bad vram size: 0x%08x\n", tmp); + if (tmp & 0xffff) + tmp &= 0xffff; + } + adev->gmc.mc_vram_size = tmp * 1024ULL * 1024ULL; + adev->gmc.real_vram_size = adev->gmc.mc_vram_size; if (!(adev->flags & AMD_IS_APU)) { r = amdgpu_device_resize_fb_bar(adev); @@ -837,7 +844,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } - r = amdgpu_gart_table_vram_pin(adev); + r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); if (r) return r; @@ -992,7 +999,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); WREG32(mmVM_L2_CNTL, tmp); WREG32(mmVM_L2_CNTL2, 0); - amdgpu_gart_table_vram_unpin(adev); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index cb82404df534..88c1eb9ad068 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -72,6 +72,9 @@ #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2 +#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea +#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2 + static const char *gfxhub_client_ids[] = { "CB", @@ -478,9 +481,18 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, hub = &adev->vmhub[j]; for (i = 0; i < 16; i++) { reg = hub->vm_context0_cntl + i; - tmp = RREG32(reg); + + if (j == AMDGPU_GFXHUB_0) + tmp = RREG32_SOC15_IP(GC, reg); + else + tmp = RREG32_SOC15_IP(MMHUB, reg); + tmp &= ~bits; - WREG32(reg, tmp); + + if (j == AMDGPU_GFXHUB_0) + WREG32_SOC15_IP(GC, reg, tmp); + else + WREG32_SOC15_IP(MMHUB, reg, tmp); } } break; @@ -489,9 +501,18 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, hub = &adev->vmhub[j]; for (i = 0; i < 16; i++) { reg = hub->vm_context0_cntl + i; - tmp = RREG32(reg); + + if (j == AMDGPU_GFXHUB_0) + tmp = RREG32_SOC15_IP(GC, reg); + else + tmp = RREG32_SOC15_IP(MMHUB, reg); + tmp |= bits; - WREG32(reg, tmp); + + if (j == AMDGPU_GFXHUB_0) + WREG32_SOC15_IP(GC, reg, tmp); + else + WREG32_SOC15_IP(MMHUB, reg, tmp); } } break; @@ -523,7 +544,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, /* Process it onyl if it's the first fault for this address */ if (entry->ih != &adev->irq.ih_soft && - amdgpu_gmc_filter_faults(adev, addr, entry->pasid, + amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, entry->timestamp)) return 1; @@ -788,9 +809,12 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ if (use_semaphore) { for (j = 0; j < adev->usec_timeout; j++) { - /* a read return value of 1 means semaphore acuqire */ - tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + - hub->eng_distance * eng); + /* a read return value of 1 means semaphore acquire */ + if (vmhub == AMDGPU_GFXHUB_0) + tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng); + else + tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng); + if (tmp & 0x1) break; udelay(1); @@ -801,8 +825,10 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, } do { - WREG32_NO_KIQ(hub->vm_inv_eng0_req + - hub->eng_distance * eng, inv_req); + if (vmhub == AMDGPU_GFXHUB_0) + WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); + else + WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); /* * Issue a dummy read to wait for the ACK register to @@ -815,8 +841,11 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, hub->eng_distance * eng); for (j = 0; j < adev->usec_timeout; j++) { - tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + - hub->eng_distance * eng); + if (vmhub == AMDGPU_GFXHUB_0) + tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng); + else + tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_ack + hub->eng_distance * eng); + if (tmp & (1 << vmid)) break; udelay(1); @@ -827,13 +856,16 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, } while (inv_req); /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ - if (use_semaphore) + if (use_semaphore) { /* * add semaphore release after invalidation, * write with 0 means semaphore release */ - WREG32_NO_KIQ(hub->vm_inv_eng0_sem + - hub->eng_distance * eng, 0); + if (vmhub == AMDGPU_GFXHUB_0) + WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0); + else + WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0); + } spin_unlock(&adev->gmc.invalidate_lock); @@ -1105,6 +1137,8 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); unsigned size; + /* TODO move to DC so GMC doesn't need to hard-code DCN registers */ + if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { size = AMDGPU_VBIOS_VGA_ALLOCATION; } else { @@ -1113,7 +1147,6 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): - case IP_VERSION(2, 1, 0): viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); size = (REG_GET_FIELD(viewport, HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * @@ -1121,6 +1154,14 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * 4); break; + case IP_VERSION(2, 1, 0): + viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2); + size = (REG_GET_FIELD(viewport, + HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * + REG_GET_FIELD(viewport, + HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * + 4); + break; default: viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE); size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) * @@ -1294,7 +1335,8 @@ static int gmc_v9_0_late_init(void *handle) if (!amdgpu_sriov_vf(adev) && (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) { if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) { - if (adev->df.funcs->enable_ecc_force_par_wr_rmw) + if (adev->df.funcs && + adev->df.funcs->enable_ecc_force_par_wr_rmw) adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); } } @@ -1505,9 +1547,11 @@ static int gmc_v9_0_sw_init(void *handle) chansize = 64; else chansize = 128; - - numchan = adev->df.funcs->get_hbm_channel_number(adev); - adev->gmc.vram_width = numchan * chansize; + if (adev->df.funcs && + adev->df.funcs->get_hbm_channel_number) { + numchan = adev->df.funcs->get_hbm_channel_number(adev); + adev->gmc.vram_width = numchan * chansize; + } } adev->gmc.vram_type = vram_type; @@ -1596,12 +1640,6 @@ static int gmc_v9_0_sw_init(void *handle) } adev->need_swiotlb = drm_need_swiotlb(44); - if (adev->gmc.xgmi.supported) { - r = adev->gfxhub.funcs->get_xgmi_info(adev); - if (r) - return r; - } - r = gmc_v9_0_mc_init(adev); if (r) return r; @@ -1714,10 +1752,14 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) return -EINVAL; } - r = amdgpu_gart_table_vram_pin(adev); + if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) + goto skip_pin_bo; + + r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); if (r) return r; +skip_pin_bo: r = adev->gfxhub.funcs->gart_enable(adev); if (r) return r; @@ -1742,7 +1784,7 @@ static int gmc_v9_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool value; - int r, i; + int i; /* The sequence of these two function calls matters.*/ gmc_v9_0_init_golden_registers(adev); @@ -1777,9 +1819,7 @@ static int gmc_v9_0_hw_init(void *handle) if (adev->umc.funcs && adev->umc.funcs->init_registers) adev->umc.funcs->init_registers(adev); - r = gmc_v9_0_gart_enable(adev); - - return r; + return gmc_v9_0_gart_enable(adev); } /** @@ -1793,7 +1833,6 @@ static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) { adev->gfxhub.funcs->gart_disable(adev); adev->mmhub.funcs->gart_disable(adev); - amdgpu_gart_table_vram_unpin(adev); } static int gmc_v9_0_hw_fini(void *handle) @@ -1808,6 +1847,14 @@ static int gmc_v9_0_hw_fini(void *handle) return 0; } + /* + * Pair the operations did in gmc_v9_0_hw_init and thus maintain + * a correct cached state for GMC. Otherwise, the "gate" again + * operation on S3 resuming will fail due to wrong cached state. + */ + if (adev->mmhub.funcs->update_power_gating) + adev->mmhub.funcs->update_power_gating(adev, false); + amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index a99953833820..1da2ec692057 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -145,7 +145,6 @@ static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC);/* XXX for emulation. */ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); @@ -302,10 +301,10 @@ static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, if (amdgpu_sriov_vf(adev)) return; - if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) { - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true); - - } + if (adev->pg_flags & AMD_PG_SUPPORT_MMHUB) + amdgpu_dpm_set_powergating_by_smu(adev, + AMD_IP_BLOCK_TYPE_GMC, + enable); } static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c index f80a14a1b82d..f5f7181f9af5 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c @@ -165,7 +165,6 @@ static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC);/* XXX for emulation. */ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index 25f8e93e5ec3..3718ff610ab2 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -267,7 +267,6 @@ static void mmhub_v2_0_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC); /* UC, uncached */ diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c index a11d60ec6321..9e16da28505a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c @@ -194,7 +194,6 @@ static void mmhub_v2_3_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC); /* UC, uncached */ diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index c4ef822bbe8c..ff49eeaf7882 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -190,8 +190,6 @@ static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid) tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, - ECO_BITS, 0); - tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC);/* XXX for emulation. */ tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 23b066bcffb2..56da5ab82987 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -180,6 +180,11 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2)); } + } else if (req == IDH_REQ_GPU_INIT_DATA){ + /* Dummy REQ_GPU_INIT_DATA handling */ + r = xgpu_ai_poll_msg(adev, IDH_REQ_GPU_INIT_DATA_READY); + /* version set to 0 since dummy */ + adev->virt.req_init_data_ver = 0; } return 0; @@ -252,11 +257,12 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) * otherwise the mailbox msg will be ruined/reseted by * the VF FLR. */ - if (!down_write_trylock(&adev->reset_sem)) + if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0) return; + down_write(&adev->reset_sem); + amdgpu_virt_fini_data_exchange(adev); - atomic_set(&adev->in_gpu_reset, 1); xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0); @@ -380,10 +386,16 @@ void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev) amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); } +static int xgpu_ai_request_init_data(struct amdgpu_device *adev) +{ + return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA); +} + const struct amdgpu_virt_ops xgpu_ai_virt_ops = { .req_full_gpu = xgpu_ai_request_full_gpu_access, .rel_full_gpu = xgpu_ai_release_full_gpu_access, .reset_gpu = xgpu_ai_request_reset, .wait_reset = NULL, .trans_msg = xgpu_ai_mailbox_trans_msg, + .req_init_data = xgpu_ai_request_init_data, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h index bd3b23171579..fa7e13e0459e 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h @@ -26,7 +26,7 @@ #define AI_MAILBOX_POLL_ACK_TIMEDOUT 500 #define AI_MAILBOX_POLL_MSG_TIMEDOUT 6000 -#define AI_MAILBOX_POLL_FLR_TIMEDOUT 5000 +#define AI_MAILBOX_POLL_FLR_TIMEDOUT 10000 #define AI_MAILBOX_POLL_MSG_REP_MAX 11 enum idh_request { @@ -35,6 +35,7 @@ enum idh_request { IDH_REQ_GPU_FINI_ACCESS, IDH_REL_GPU_FINI_ACCESS, IDH_REQ_GPU_RESET_ACCESS, + IDH_REQ_GPU_INIT_DATA, IDH_LOG_VF_ERROR = 200, IDH_READY_TO_RESET = 201, @@ -48,6 +49,7 @@ enum idh_event { IDH_SUCCESS, IDH_FAIL, IDH_QUERY_ALIVE, + IDH_REQ_GPU_INIT_DATA_READY, IDH_TEXT_MESSAGE = 255, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index a35e6d87e537..477d0dde19c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -281,11 +281,12 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) * otherwise the mailbox msg will be ruined/reseted by * the VF FLR. */ - if (!down_write_trylock(&adev->reset_sem)) + if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0) return; + down_write(&adev->reset_sem); + amdgpu_virt_fini_data_exchange(adev); - atomic_set(&adev->in_gpu_reset, 1); xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index 38241cf0e1f1..8ce5b8ca1fd7 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -716,6 +716,7 @@ static const struct amd_ip_funcs navi10_ih_ip_funcs = { static const struct amdgpu_ih_funcs navi10_ih_funcs = { .get_wptr = navi10_ih_get_wptr, .decode_iv = amdgpu_ih_decode_iv_helper, + .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper, .set_rptr = navi10_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 2176ef85f137..d0e76b36d4ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -277,13 +277,15 @@ static bool psp_v11_0_is_sos_alive(struct psp_context *psp) return sol_reg != 0x0; } -static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp) +static int psp_v11_0_bootloader_load_component(struct psp_context *psp, + struct psp_bin_desc *bin_desc, + enum psp_bootloader_cmd bl_cmd) { int ret; uint32_t psp_gfxdrv_command_reg = 0; struct amdgpu_device *adev = psp->adev; - /* Check tOS sign of life register to confirm sys driver and sOS + /* Check sOS sign of life register to confirm sys driver and sOS * are already been loaded. */ if (psp_v11_0_is_sos_alive(psp)) @@ -293,13 +295,13 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp) if (ret) return ret; - /* Copy PSP KDB binary to memory */ - psp_copy_fw(psp, psp->kdb.start_addr, psp->kdb.size_bytes); + /* Copy PSP System Driver binary to memory */ + psp_copy_fw(psp, bin_desc->start_addr, bin_desc->size_bytes); - /* Provide the PSP KDB to bootloader */ + /* Provide the sys driver to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, (uint32_t)(psp->fw_pri_mc_addr >> 20)); - psp_gfxdrv_command_reg = PSP_BL__LOAD_KEY_DATABASE; + psp_gfxdrv_command_reg = bl_cmd; WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, psp_gfxdrv_command_reg); @@ -308,69 +310,19 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp) return ret; } -static int psp_v11_0_bootloader_load_spl(struct psp_context *psp) +static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp) { - int ret; - uint32_t psp_gfxdrv_command_reg = 0; - struct amdgpu_device *adev = psp->adev; - - /* Check tOS sign of life register to confirm sys driver and sOS - * are already been loaded. - */ - if (psp_v11_0_is_sos_alive(psp)) - return 0; - - ret = psp_v11_0_wait_for_bootloader(psp); - if (ret) - return ret; - - /* Copy PSP SPL binary to memory */ - psp_copy_fw(psp, psp->spl.start_addr, psp->spl.size_bytes); - - /* Provide the PSP SPL to bootloader */ - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, - (uint32_t)(psp->fw_pri_mc_addr >> 20)); - psp_gfxdrv_command_reg = PSP_BL__LOAD_TOS_SPL_TABLE; - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, - psp_gfxdrv_command_reg); - - ret = psp_v11_0_wait_for_bootloader(psp); + return psp_v11_0_bootloader_load_component(psp, &psp->kdb, PSP_BL__LOAD_KEY_DATABASE); +} - return ret; +static int psp_v11_0_bootloader_load_spl(struct psp_context *psp) +{ + return psp_v11_0_bootloader_load_component(psp, &psp->spl, PSP_BL__LOAD_TOS_SPL_TABLE); } static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp) { - int ret; - uint32_t psp_gfxdrv_command_reg = 0; - struct amdgpu_device *adev = psp->adev; - - /* Check sOS sign of life register to confirm sys driver and sOS - * are already been loaded. - */ - if (psp_v11_0_is_sos_alive(psp)) - return 0; - - ret = psp_v11_0_wait_for_bootloader(psp); - if (ret) - return ret; - - /* Copy PSP System Driver binary to memory */ - psp_copy_fw(psp, psp->sys.start_addr, psp->sys.size_bytes); - - /* Provide the sys driver to bootloader */ - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, - (uint32_t)(psp->fw_pri_mc_addr >> 20)); - psp_gfxdrv_command_reg = PSP_BL__LOAD_SYSDRV; - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, - psp_gfxdrv_command_reg); - - /* there might be handshake issue with hardware which needs delay */ - mdelay(20); - - ret = psp_v11_0_wait_for_bootloader(psp); - - return ret; + return psp_v11_0_bootloader_load_component(psp, &psp->sys, PSP_BL__LOAD_SYSDRV); } static int psp_v11_0_bootloader_load_sos(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 853d1511b889..81e033549dda 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -481,8 +481,6 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring, * sdma_v5_0_ring_emit_mem_sync - flush the IB by graphics cache rinse * * @ring: amdgpu ring pointer - * @job: job to retrieve vmid from - * @ib: IB object to schedule * * flush the IB by graphics cache rinse. */ diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 4d4d1aa51b8a..d3d6d5b045b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -368,8 +368,6 @@ static void sdma_v5_2_ring_emit_ib(struct amdgpu_ring *ring, * sdma_v5_2_ring_emit_mem_sync - flush the IB by graphics cache rinse * * @ring: amdgpu ring pointer - * @job: job to retrieve vmid from - * @ib: IB object to schedule * * flush the IB by graphics cache rinse. */ @@ -544,9 +542,6 @@ static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable) } for (i = 0; i < adev->sdma.num_instances; i++) { - f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); - f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, - AUTO_CTXSW_ENABLE, enable ? 1 : 0); if (enable && amdgpu_sdma_phase_quantum) { WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM), phase_quantum); @@ -555,7 +550,13 @@ static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable) WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM), phase_quantum); } - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); + + if (!amdgpu_sriov_vf(adev)) { + f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + AUTO_CTXSW_ENABLE, enable ? 1 : 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); + } } } @@ -578,10 +579,12 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable) sdma_v5_2_rlc_stop(adev); } - for (i = 0; i < adev->sdma.num_instances; i++) { - f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); - f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl); + if (!amdgpu_sriov_vf(adev)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl); + } } } @@ -610,7 +613,8 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) ring = &adev->sdma.instance[i].ring; wb_offset = (ring->rptr_offs * 4); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); + if (!amdgpu_sriov_vf(adev)) + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); /* Set ring buffer size in dwords */ rb_bufsz = order_base_2(ring->ring_size / 4); @@ -685,32 +689,34 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) sdma_v5_2_ring_set_wptr(ring); /* set minor_ptr_update to 0 after wptr programed */ - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); - /* set utc l1 enable flag always to 1 */ - temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); - - /* enable MCBP */ - temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); - - /* Set up RESP_MODE to non-copy addresses */ - temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); - temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); - - /* program default cache read and write policy */ - temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); - /* clean read policy and write policy bits */ - temp &= 0xFF0FFF; - temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | - (CACHE_WRITE_POLICY_L2__DEFAULT << 14) | - SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK); - WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); + /* SRIOV VF has no control of any of registers below */ if (!amdgpu_sriov_vf(adev)) { + /* set utc l1 enable flag always to 1 */ + temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); + + /* enable MCBP */ + temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); + + /* Set up RESP_MODE to non-copy addresses */ + temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); + + /* program default cache read and write policy */ + temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); + /* clean read policy and write policy bits */ + temp &= 0xFF0FFF; + temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | + (CACHE_WRITE_POLICY_L2__DEFAULT << 14) | + SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); + /* unhalt engine */ temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); @@ -1438,13 +1444,14 @@ static int sdma_v5_2_set_trap_irq_state(struct amdgpu_device *adev, enum amdgpu_interrupt_state state) { u32 sdma_cntl; - u32 reg_offset = sdma_v5_2_get_reg_offset(adev, type, mmSDMA0_CNTL); - sdma_cntl = RREG32(reg_offset); - sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, - state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); - WREG32(reg_offset, sdma_cntl); + if (!amdgpu_sriov_vf(adev)) { + sdma_cntl = RREG32(reg_offset); + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32(reg_offset, sdma_cntl); + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index de9b55383e9f..0fc1747e4a70 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -744,7 +744,7 @@ static void soc15_reg_base_init(struct amdgpu_device *adev) vega10_reg_base_init(adev); break; case CHIP_RENOIR: - /* It's safe to do ip discovery here for Renior, + /* It's safe to do ip discovery here for Renoir, * it doesn't support SRIOV. */ if (amdgpu_discovery) { r = amdgpu_discovery_reg_base_init(adev); @@ -1238,7 +1238,9 @@ static int soc15_common_sw_init(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_add_irq_id(adev); - adev->df.funcs->sw_init(adev); + if (adev->df.funcs && + adev->df.funcs->sw_init) + adev->df.funcs->sw_init(adev); return 0; } @@ -1250,7 +1252,10 @@ static int soc15_common_sw_fini(void *handle) if (adev->nbio.ras_funcs && adev->nbio.ras_funcs->ras_fini) adev->nbio.ras_funcs->ras_fini(adev); - adev->df.funcs->sw_fini(adev); + + if (adev->df.funcs && + adev->df.funcs->sw_fini) + adev->df.funcs->sw_fini(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 8a9ca87d8663..473767e03676 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -51,6 +51,8 @@ #define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP) +#define RREG32_SOC15_IP_NO_KIQ(ip, reg) __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ, ip##_HWIP) + #define RREG32_SOC15_NO_KIQ(ip, inst, reg) \ __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ AMDGPU_REGS_NO_KIQ, ip##_HWIP) @@ -65,6 +67,9 @@ #define WREG32_SOC15_IP(ip, reg, value) \ __WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP) +#define WREG32_SOC15_IP_NO_KIQ(ip, reg, value) \ + __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ, ip##_HWIP) + #define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \ __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ value, AMDGPU_REGS_NO_KIQ, ip##_HWIP) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index d54d720b3cf6..3799226defc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -246,6 +246,13 @@ static int vcn_v1_0_suspend(void *handle) { int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool idle_work_unexecuted; + + idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work); + if (idle_work_unexecuted) { + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, false); + } r = vcn_v1_0_hw_fini(adev); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index a9ca6988009e..3070466f54e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -640,6 +640,7 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = { static const struct amdgpu_ih_funcs vega10_ih_funcs = { .get_wptr = vega10_ih_get_wptr, .decode_iv = amdgpu_ih_decode_iv_helper, + .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper, .set_rptr = vega10_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index f51dfc38ac65..3b4eb8285943 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -688,6 +688,7 @@ const struct amd_ip_funcs vega20_ih_ip_funcs = { static const struct amdgpu_ih_funcs vega20_ih_funcs = { .get_wptr = vega20_ih_get_wptr, .decode_iv = amdgpu_ih_decode_iv_helper, + .decode_iv_ts = amdgpu_ih_decode_iv_ts_helper, .set_rptr = vega20_ih_set_rptr }; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index fe9a7cc8d9eb..6645ebbd2696 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -956,6 +956,10 @@ static int vi_asic_reset(struct amdgpu_device *adev) { int r; + /* APUs don't have full asic reset */ + if (adev->flags & AMD_IS_APU) + return 0; + if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { dev_info(adev->dev, "BACO reset\n"); r = amdgpu_dpm_baco_reset(adev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index f187596faf66..9624bbe8b501 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1060,6 +1060,9 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink, return -ENODEV; /* same everything but the other direction */ props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL); + if (!props2) + return -ENOMEM; + props2->node_from = id_to; props2->node_to = id_from; props2->kobj = NULL; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index e1294fba0c26..2b65d0acae2c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -53,680 +53,304 @@ extern const struct kfd2kgd_calls aldebaran_kfd2kgd; extern const struct kfd2kgd_calls gfx_v10_kfd2kgd; extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd; -#ifdef KFD_SUPPORT_IOMMU_V2 -static const struct kfd_device_info kaveri_device_info = { - .asic_name = "kaveri", - .gfx_target_version = 70000, - .max_pasid_bits = 16, - /* max num of queues for KV.TODO should be a dynamic value */ - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = false, - .needs_iommu_device = true, - .needs_pci_atomics = false, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info carrizo_device_info = { - .asic_name = "carrizo", - .gfx_target_version = 80001, - .max_pasid_bits = 16, - /* max num of queues for CZ.TODO should be a dynamic value */ - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = true, - .needs_pci_atomics = false, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info raven_device_info = { - .asic_name = "raven", - .gfx_target_version = 90002, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = true, - .needs_pci_atomics = true, - .num_sdma_queues_per_engine = 2, -}; -#endif - -#ifdef CONFIG_DRM_AMDGPU_CIK -static const struct kfd_device_info hawaii_device_info = { - .asic_name = "hawaii", - .gfx_target_version = 70001, - .max_pasid_bits = 16, - /* max num of queues for KV.TODO should be a dynamic value */ - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = false, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_queues_per_engine = 2, -}; -#endif - -static const struct kfd_device_info tonga_device_info = { - .asic_name = "tonga", - .gfx_target_version = 80002, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = false, - .needs_iommu_device = false, - .needs_pci_atomics = true, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info fiji_device_info = { - .asic_name = "fiji", - .gfx_target_version = 80003, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = true, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info fiji_vf_device_info = { - .asic_name = "fiji", - .gfx_target_version = 80003, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_queues_per_engine = 2, -}; - - -static const struct kfd_device_info polaris10_device_info = { - .asic_name = "polaris10", - .gfx_target_version = 80003, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = true, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info polaris10_vf_device_info = { - .asic_name = "polaris10", - .gfx_target_version = 80003, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info polaris11_device_info = { - .asic_name = "polaris11", - .gfx_target_version = 80003, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = true, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info polaris12_device_info = { - .asic_name = "polaris12", - .gfx_target_version = 80003, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = true, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info vegam_device_info = { - .asic_name = "vegam", - .gfx_target_version = 80003, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = true, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info vega10_device_info = { - .asic_name = "vega10", - .gfx_target_version = 90000, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info vega10_vf_device_info = { - .asic_name = "vega10", - .gfx_target_version = 90000, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info vega12_device_info = { - .asic_name = "vega12", - .gfx_target_version = 90004, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info vega20_device_info = { - .asic_name = "vega20", - .gfx_target_version = 90006, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info arcturus_device_info = { - .asic_name = "arcturus", - .gfx_target_version = 90008, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info aldebaran_device_info = { - .asic_name = "aldebaran", - .gfx_target_version = 90010, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info renoir_device_info = { - .asic_name = "renoir", - .gfx_target_version = 90012, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = true, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info navi10_device_info = { - .asic_name = "navi10", - .gfx_target_version = 100100, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .no_atomic_fw_version = 145, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info navi12_device_info = { - .asic_name = "navi12", - .gfx_target_version = 100101, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .no_atomic_fw_version = 145, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info navi14_device_info = { - .asic_name = "navi14", - .gfx_target_version = 100102, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .no_atomic_fw_version = 145, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info sienna_cichlid_device_info = { - .asic_name = "sienna_cichlid", - .gfx_target_version = 100300, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .no_atomic_fw_version = 92, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info navy_flounder_device_info = { - .asic_name = "navy_flounder", - .gfx_target_version = 100301, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .no_atomic_fw_version = 92, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info vangogh_device_info = { - .asic_name = "vangogh", - .gfx_target_version = 100303, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .no_atomic_fw_version = 92, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info dimgrey_cavefish_device_info = { - .asic_name = "dimgrey_cavefish", - .gfx_target_version = 100302, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .no_atomic_fw_version = 92, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info beige_goby_device_info = { - .asic_name = "beige_goby", - .gfx_target_version = 100304, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .no_atomic_fw_version = 92, - .num_sdma_queues_per_engine = 8, -}; - -static const struct kfd_device_info yellow_carp_device_info = { - .asic_name = "yellow_carp", - .gfx_target_version = 100305, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .no_atomic_fw_version = 92, - .num_sdma_queues_per_engine = 2, -}; - -static const struct kfd_device_info cyan_skillfish_device_info = { - .asic_name = "cyan_skillfish", - .gfx_target_version = 100103, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 8, - .ih_ring_entry_size = 8 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_v9, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .needs_iommu_device = false, - .supports_cwsr = true, - .needs_pci_atomics = true, - .num_sdma_queues_per_engine = 8, -}; - static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, unsigned int chunk_size); static void kfd_gtt_sa_fini(struct kfd_dev *kfd); static int kfd_resume(struct kfd_dev *kfd); +static void kfd_device_info_set_sdma_queue_num(struct kfd_dev *kfd) +{ + uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0]; + + switch (sdma_version) { + case IP_VERSION(4, 0, 0):/* VEGA10 */ + case IP_VERSION(4, 0, 1):/* VEGA12 */ + case IP_VERSION(4, 1, 0):/* RAVEN */ + case IP_VERSION(4, 1, 1):/* RAVEN */ + case IP_VERSION(4, 1, 2):/* RENOIR */ + case IP_VERSION(5, 2, 1):/* VANGOGH */ + case IP_VERSION(5, 2, 3):/* YELLOW_CARP */ + kfd->device_info.num_sdma_queues_per_engine = 2; + break; + case IP_VERSION(4, 2, 0):/* VEGA20 */ + case IP_VERSION(4, 2, 2):/* ARCTURUS */ + case IP_VERSION(4, 4, 0):/* ALDEBARAN */ + case IP_VERSION(5, 0, 0):/* NAVI10 */ + case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */ + case IP_VERSION(5, 0, 2):/* NAVI14 */ + case IP_VERSION(5, 0, 5):/* NAVI12 */ + case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */ + case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */ + case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */ + case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */ + kfd->device_info.num_sdma_queues_per_engine = 8; + break; + default: + dev_warn(kfd_device, + "Default sdma queue per engine(8) is set due to " + "mismatch of sdma ip block(SDMA_HWIP:0x%x).\n", + sdma_version); + kfd->device_info.num_sdma_queues_per_engine = 8; + } +} + +static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) +{ + uint32_t gc_version = KFD_GC_VERSION(kfd); + + switch (gc_version) { + case IP_VERSION(9, 0, 1): /* VEGA10 */ + case IP_VERSION(9, 1, 0): /* RAVEN */ + case IP_VERSION(9, 2, 1): /* VEGA12 */ + case IP_VERSION(9, 2, 2): /* RAVEN */ + case IP_VERSION(9, 3, 0): /* RENOIR */ + case IP_VERSION(9, 4, 0): /* VEGA20 */ + case IP_VERSION(9, 4, 1): /* ARCTURUS */ + case IP_VERSION(9, 4, 2): /* ALDEBARAN */ + case IP_VERSION(10, 3, 1): /* VANGOGH */ + case IP_VERSION(10, 3, 3): /* YELLOW_CARP */ + case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */ + case IP_VERSION(10, 1, 10): /* NAVI10 */ + case IP_VERSION(10, 1, 2): /* NAVI12 */ + case IP_VERSION(10, 1, 1): /* NAVI14 */ + case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */ + case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */ + case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */ + case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */ + kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; + break; + default: + dev_warn(kfd_device, "v9 event interrupt handler is set due to " + "mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version); + kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; + } +} + +static void kfd_device_info_init(struct kfd_dev *kfd, + bool vf, uint32_t gfx_target_version) +{ + uint32_t gc_version = KFD_GC_VERSION(kfd); + uint32_t asic_type = kfd->adev->asic_type; + + kfd->device_info.max_pasid_bits = 16; + kfd->device_info.max_no_of_hqd = 24; + kfd->device_info.num_of_watch_points = 4; + kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED; + kfd->device_info.gfx_target_version = gfx_target_version; + + if (KFD_IS_SOC15(kfd)) { + kfd->device_info.doorbell_size = 8; + kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t); + kfd->device_info.supports_cwsr = true; + + kfd_device_info_set_sdma_queue_num(kfd); + + kfd_device_info_set_event_interrupt_class(kfd); + + /* Raven */ + if (gc_version == IP_VERSION(9, 1, 0) || + gc_version == IP_VERSION(9, 2, 2)) + kfd->device_info.needs_iommu_device = true; + + if (gc_version < IP_VERSION(11, 0, 0)) { + /* Navi2x+, Navi1x+ */ + if (gc_version >= IP_VERSION(10, 3, 0)) + kfd->device_info.no_atomic_fw_version = 92; + else if (gc_version >= IP_VERSION(10, 1, 1)) + kfd->device_info.no_atomic_fw_version = 145; + + /* Navi1x+ */ + if (gc_version >= IP_VERSION(10, 1, 1)) + kfd->device_info.needs_pci_atomics = true; + } + } else { + kfd->device_info.doorbell_size = 4; + kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t); + kfd->device_info.event_interrupt_class = &event_interrupt_class_cik; + kfd->device_info.num_sdma_queues_per_engine = 2; + + if (asic_type != CHIP_KAVERI && + asic_type != CHIP_HAWAII && + asic_type != CHIP_TONGA) + kfd->device_info.supports_cwsr = true; + + if (asic_type == CHIP_KAVERI || + asic_type == CHIP_CARRIZO) + kfd->device_info.needs_iommu_device = true; + + if (asic_type != CHIP_HAWAII && !vf) + kfd->device_info.needs_pci_atomics = true; + } +} + struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) { - struct kfd_dev *kfd; - const struct kfd_device_info *device_info; - const struct kfd2kgd_calls *f2g; + struct kfd_dev *kfd = NULL; + const struct kfd2kgd_calls *f2g = NULL; struct pci_dev *pdev = adev->pdev; + uint32_t gfx_target_version = 0; switch (adev->asic_type) { #ifdef KFD_SUPPORT_IOMMU_V2 #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_KAVERI: - if (vf) - device_info = NULL; - else - device_info = &kaveri_device_info; - f2g = &gfx_v7_kfd2kgd; + gfx_target_version = 70000; + if (!vf) + f2g = &gfx_v7_kfd2kgd; break; #endif case CHIP_CARRIZO: - if (vf) - device_info = NULL; - else - device_info = &carrizo_device_info; - f2g = &gfx_v8_kfd2kgd; + gfx_target_version = 80001; + if (!vf) + f2g = &gfx_v8_kfd2kgd; break; #endif #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_HAWAII: - if (vf) - device_info = NULL; - else - device_info = &hawaii_device_info; - f2g = &gfx_v7_kfd2kgd; + gfx_target_version = 70001; + if (!amdgpu_exp_hw_support) + pr_info( + "KFD support on Hawaii is experimental. See modparam exp_hw_support\n" + ); + else if (!vf) + f2g = &gfx_v7_kfd2kgd; break; #endif case CHIP_TONGA: - if (vf) - device_info = NULL; - else - device_info = &tonga_device_info; - f2g = &gfx_v8_kfd2kgd; + gfx_target_version = 80002; + if (!vf) + f2g = &gfx_v8_kfd2kgd; break; case CHIP_FIJI: - if (vf) - device_info = &fiji_vf_device_info; - else - device_info = &fiji_device_info; + gfx_target_version = 80003; f2g = &gfx_v8_kfd2kgd; break; case CHIP_POLARIS10: - if (vf) - device_info = &polaris10_vf_device_info; - else - device_info = &polaris10_device_info; + gfx_target_version = 80003; f2g = &gfx_v8_kfd2kgd; break; case CHIP_POLARIS11: - if (vf) - device_info = NULL; - else - device_info = &polaris11_device_info; - f2g = &gfx_v8_kfd2kgd; + gfx_target_version = 80003; + if (!vf) + f2g = &gfx_v8_kfd2kgd; break; case CHIP_POLARIS12: - if (vf) - device_info = NULL; - else - device_info = &polaris12_device_info; - f2g = &gfx_v8_kfd2kgd; + gfx_target_version = 80003; + if (!vf) + f2g = &gfx_v8_kfd2kgd; break; case CHIP_VEGAM: - if (vf) - device_info = NULL; - else - device_info = &vegam_device_info; - f2g = &gfx_v8_kfd2kgd; + gfx_target_version = 80003; + if (!vf) + f2g = &gfx_v8_kfd2kgd; break; default: switch (adev->ip_versions[GC_HWIP][0]) { + /* Vega 10 */ case IP_VERSION(9, 0, 1): - if (vf) - device_info = &vega10_vf_device_info; - else - device_info = &vega10_device_info; + gfx_target_version = 90000; f2g = &gfx_v9_kfd2kgd; break; #ifdef KFD_SUPPORT_IOMMU_V2 + /* Raven */ case IP_VERSION(9, 1, 0): case IP_VERSION(9, 2, 2): - if (vf) - device_info = NULL; - else - device_info = &raven_device_info; - f2g = &gfx_v9_kfd2kgd; + gfx_target_version = 90002; + if (!vf) + f2g = &gfx_v9_kfd2kgd; break; #endif + /* Vega12 */ case IP_VERSION(9, 2, 1): - if (vf) - device_info = NULL; - else - device_info = &vega12_device_info; - f2g = &gfx_v9_kfd2kgd; + gfx_target_version = 90004; + if (!vf) + f2g = &gfx_v9_kfd2kgd; break; + /* Renoir */ case IP_VERSION(9, 3, 0): - if (vf) - device_info = NULL; - else - device_info = &renoir_device_info; - f2g = &gfx_v9_kfd2kgd; + gfx_target_version = 90012; + if (!vf) + f2g = &gfx_v9_kfd2kgd; break; + /* Vega20 */ case IP_VERSION(9, 4, 0): - if (vf) - device_info = NULL; - else - device_info = &vega20_device_info; - f2g = &gfx_v9_kfd2kgd; + gfx_target_version = 90006; + if (!vf) + f2g = &gfx_v9_kfd2kgd; break; + /* Arcturus */ case IP_VERSION(9, 4, 1): - device_info = &arcturus_device_info; + gfx_target_version = 90008; f2g = &arcturus_kfd2kgd; break; + /* Aldebaran */ case IP_VERSION(9, 4, 2): - device_info = &aldebaran_device_info; + gfx_target_version = 90010; f2g = &aldebaran_kfd2kgd; break; + /* Navi10 */ case IP_VERSION(10, 1, 10): - if (vf) - device_info = NULL; - else - device_info = &navi10_device_info; - f2g = &gfx_v10_kfd2kgd; + gfx_target_version = 100100; + if (!vf) + f2g = &gfx_v10_kfd2kgd; break; + /* Navi12 */ case IP_VERSION(10, 1, 2): - device_info = &navi12_device_info; + gfx_target_version = 100101; f2g = &gfx_v10_kfd2kgd; break; + /* Navi14 */ case IP_VERSION(10, 1, 1): - if (vf) - device_info = NULL; - else - device_info = &navi14_device_info; - f2g = &gfx_v10_kfd2kgd; + gfx_target_version = 100102; + if (!vf) + f2g = &gfx_v10_kfd2kgd; break; + /* Cyan Skillfish */ case IP_VERSION(10, 1, 3): - if (vf) - device_info = NULL; - else - device_info = &cyan_skillfish_device_info; - f2g = &gfx_v10_kfd2kgd; + gfx_target_version = 100103; + if (!vf) + f2g = &gfx_v10_kfd2kgd; break; + /* Sienna Cichlid */ case IP_VERSION(10, 3, 0): - device_info = &sienna_cichlid_device_info; + gfx_target_version = 100300; f2g = &gfx_v10_3_kfd2kgd; break; + /* Navy Flounder */ case IP_VERSION(10, 3, 2): - device_info = &navy_flounder_device_info; + gfx_target_version = 100301; f2g = &gfx_v10_3_kfd2kgd; break; + /* Van Gogh */ case IP_VERSION(10, 3, 1): - if (vf) - device_info = NULL; - else - device_info = &vangogh_device_info; - f2g = &gfx_v10_3_kfd2kgd; + gfx_target_version = 100303; + if (!vf) + f2g = &gfx_v10_3_kfd2kgd; break; + /* Dimgrey Cavefish */ case IP_VERSION(10, 3, 4): - device_info = &dimgrey_cavefish_device_info; + gfx_target_version = 100302; f2g = &gfx_v10_3_kfd2kgd; break; + /* Beige Goby */ case IP_VERSION(10, 3, 5): - device_info = &beige_goby_device_info; + gfx_target_version = 100304; f2g = &gfx_v10_3_kfd2kgd; break; + /* Yellow Carp */ case IP_VERSION(10, 3, 3): - if (vf) - device_info = NULL; - else - device_info = &yellow_carp_device_info; - f2g = &gfx_v10_3_kfd2kgd; + gfx_target_version = 100305; + if (!vf) + f2g = &gfx_v10_3_kfd2kgd; break; default: - return NULL; + break; } break; } - if (!device_info || !f2g) { + if (!f2g) { if (adev->ip_versions[GC_HWIP][0]) dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n", adev->ip_versions[GC_HWIP][0], vf ? "VF" : ""); @@ -741,7 +365,7 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) return NULL; kfd->adev = adev; - kfd->device_info = device_info; + kfd_device_info_init(kfd, vf, gfx_target_version); kfd->pdev = pdev; kfd->init_complete = false; kfd->kfd2kgd = f2g; @@ -760,7 +384,7 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) static void kfd_cwsr_init(struct kfd_dev *kfd) { - if (cwsr_enable && kfd->device_info->supports_cwsr) { + if (cwsr_enable && kfd->device_info.supports_cwsr) { if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) { BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE); kfd->cwsr_isa = cwsr_trap_gfx8_hex; @@ -844,14 +468,14 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, */ kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev); if (!kfd->pci_atomic_requested && - kfd->device_info->needs_pci_atomics && - (!kfd->device_info->no_atomic_fw_version || - kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) { + kfd->device_info.needs_pci_atomics && + (!kfd->device_info.no_atomic_fw_version || + kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) { dev_info(kfd_device, "skipped device %x:%x, PCI rejects atomics %d<%d\n", kfd->pdev->vendor, kfd->pdev->device, kfd->mec_fw_version, - kfd->device_info->no_atomic_fw_version); + kfd->device_info.no_atomic_fw_version); return false; } @@ -868,7 +492,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, /* calculate max size of mqds needed for queues */ size = max_num_of_queues_per_device * - kfd->device_info->mqd_size_aligned; + kfd->device_info.mqd_size_aligned; /* * calculate max size of runlist packet. @@ -1143,7 +767,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) if (!kfd->init_complete) return; - if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) { + if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) { dev_err_once(kfd_device, "Ring entry too small\n"); return; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 2af2b3268171..4b6814949aad 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -47,7 +47,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, uint32_t filter_param); static int unmap_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, - uint32_t filter_param); + uint32_t filter_param, bool reset); static int map_queues_cpsch(struct device_queue_manager *dqm); @@ -108,13 +108,13 @@ static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm) unsigned int get_num_sdma_queues(struct device_queue_manager *dqm) { return kfd_get_num_sdma_engines(dqm->dev) * - dqm->dev->device_info->num_sdma_queues_per_engine; + dqm->dev->device_info.num_sdma_queues_per_engine; } unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm) { return kfd_get_num_xgmi_sdma_engines(dqm->dev) * - dqm->dev->device_info->num_sdma_queues_per_engine; + dqm->dev->device_info.num_sdma_queues_per_engine; } void program_sh_mem_settings(struct device_queue_manager *dqm, @@ -570,7 +570,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q, /* Make sure the queue is unmapped before updating the MQD */ if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { retval = unmap_queues_cpsch(dqm, - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false); if (retval) { pr_err("unmap queue failed\n"); goto out_unlock; @@ -1004,14 +1004,17 @@ static void uninitialize(struct device_queue_manager *dqm) static int start_nocpsch(struct device_queue_manager *dqm) { + int r = 0; + pr_info("SW scheduler is used"); init_interrupts(dqm); if (dqm->dev->adev->asic_type == CHIP_HAWAII) - return pm_init(&dqm->packet_mgr, dqm); - dqm->sched_running = true; + r = pm_init(&dqm->packet_mgr, dqm); + if (!r) + dqm->sched_running = true; - return 0; + return r; } static int stop_nocpsch(struct device_queue_manager *dqm) @@ -1223,7 +1226,7 @@ static int stop_cpsch(struct device_queue_manager *dqm) } if (!dqm->is_hws_hang) - unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); + unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, false); hanging = dqm->is_hws_hang || dqm->is_resetting; dqm->sched_running = false; @@ -1419,7 +1422,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm) /* dqm->lock mutex has to be locked before calling this function */ static int unmap_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, - uint32_t filter_param) + uint32_t filter_param, bool reset) { int retval = 0; struct mqd_manager *mqd_mgr; @@ -1432,7 +1435,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, return retval; retval = pm_send_unmap_queue(&dqm->packet_mgr, KFD_QUEUE_TYPE_COMPUTE, - filter, filter_param, false, 0); + filter, filter_param, reset, 0); if (retval) return retval; @@ -1476,6 +1479,21 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, return retval; } +/* only for compute queue */ +static int reset_queues_cpsch(struct device_queue_manager *dqm, + uint16_t pasid) +{ + int retval; + + dqm_lock(dqm); + + retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID, + pasid, true); + + dqm_unlock(dqm); + return retval; +} + /* dqm->lock mutex has to be locked before calling this function */ static int execute_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, @@ -1485,7 +1503,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, if (dqm->is_hws_hang) return -EIO; - retval = unmap_queues_cpsch(dqm, filter, filter_param); + retval = unmap_queues_cpsch(dqm, filter, filter_param, false); if (retval) return retval; @@ -1838,7 +1856,7 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd; uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size * get_num_all_sdma_engines(dqm) * - dev->device_info->num_sdma_queues_per_engine + + dev->device_info.num_sdma_queues_per_engine + dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size, @@ -1896,6 +1914,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) dqm->ops.evict_process_queues = evict_process_queues_cpsch; dqm->ops.restore_process_queues = restore_process_queues_cpsch; dqm->ops.get_wave_state = get_wave_state; + dqm->ops.reset_queues = reset_queues_cpsch; break; case KFD_SCHED_POLICY_NO_HWS: /* initialize dqm for no cp scheduling */ @@ -2082,7 +2101,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) { for (queue = 0; - queue < dqm->dev->device_info->num_sdma_queues_per_engine; + queue < dqm->dev->device_info.num_sdma_queues_per_engine; queue++) { r = dqm->dev->kfd2kgd->hqd_sdma_dump( dqm->dev->adev, pipe, queue, &dump, &n_regs); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 499fc0ea387f..e145e4deb53a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -81,6 +81,8 @@ struct device_process_node { * * @get_wave_state: Retrieves context save state and optionally copies the * control stack, if kept in the MQD, to the given userspace address. + * + * @reset_queues: reset queues which consume RAS poison */ struct device_queue_manager_ops { @@ -134,6 +136,9 @@ struct device_queue_manager_ops { void __user *ctl_stack, u32 *ctl_stack_used_size, u32 *save_area_used_size); + + int (*reset_queues)(struct device_queue_manager *dqm, + uint16_t pasid); }; struct device_queue_manager_asic_ops { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index 768d153acff4..0dbcf54657ed 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -48,7 +48,7 @@ /* # of doorbell bytes allocated for each process. */ size_t kfd_doorbell_process_slice(struct kfd_dev *kfd) { - return roundup(kfd->device_info->doorbell_size * + return roundup(kfd->device_info.doorbell_size * KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, PAGE_SIZE); } @@ -180,7 +180,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) return NULL; - inx *= kfd->device_info->doorbell_size / sizeof(u32); + inx *= kfd->device_info.doorbell_size / sizeof(u32); /* * Calculating the kernel doorbell offset using the first @@ -201,7 +201,7 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr) unsigned int inx; inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr) - * sizeof(u32) / kfd->device_info->doorbell_size; + * sizeof(u32) / kfd->device_info.doorbell_size; mutex_lock(&kfd->doorbell_mutex); __clear_bit(inx, kfd->doorbell_available_index); @@ -239,7 +239,7 @@ unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, return kfd->doorbell_base_dw_offset + pdd->doorbell_index * kfd_doorbell_process_slice(kfd) / sizeof(u32) + - doorbell_id * kfd->device_info->doorbell_size / sizeof(u32); + doorbell_id * kfd->device_info.doorbell_size / sizeof(u32); } uint64_t kfd_get_number_elems(struct kfd_dev *kfd) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 20512a4e9a91..e8bc28009c22 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -89,6 +89,44 @@ enum SQ_INTERRUPT_ERROR_TYPE { #define KFD_SQ_INT_DATA__ERR_TYPE_MASK 0xF00000 #define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20 +static void event_interrupt_poison_consumption(struct kfd_dev *dev, + uint16_t pasid, uint16_t source_id) +{ + int ret = -EINVAL; + struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); + + if (!p) + return; + + /* all queues of a process will be unmapped in one time */ + if (atomic_read(&p->poison)) { + kfd_unref_process(p); + return; + } + + atomic_set(&p->poison, 1); + kfd_unref_process(p); + + switch (source_id) { + case SOC15_INTSRC_SQ_INTERRUPT_MSG: + if (dev->dqm->ops.reset_queues) + ret = dev->dqm->ops.reset_queues(dev->dqm, pasid); + break; + case SOC15_INTSRC_SDMA_ECC: + default: + break; + } + + kfd_signal_poison_consumed_event(dev, pasid); + + /* resetting queue passes, do page retirement without gpu reset + resetting queue fails, fallback to gpu reset solution */ + if (!ret) + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false); + else + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true); +} + static bool event_interrupt_isr_v9(struct kfd_dev *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, @@ -135,7 +173,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, *patched_flag = true; memcpy(patched_ihre, ih_ring_entry, - dev->device_info->ih_ring_entry_size); + dev->device_info.ih_ring_entry_size); pasid = dev->dqm->vmid_pasid[vmid]; @@ -159,6 +197,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, */ return source_id == SOC15_INTSRC_CP_END_OF_PIPE || source_id == SOC15_INTSRC_SDMA_TRAP || + source_id == SOC15_INTSRC_SDMA_ECC || source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG || source_id == SOC15_INTSRC_CP_BAD_OPCODE || ((client_id == SOC15_IH_CLIENTID_VMC || @@ -230,8 +269,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev, sq_intr_err); if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST && sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) { - kfd_signal_poison_consumed_event(dev, pasid); - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev); + event_interrupt_poison_consumption(dev, pasid, source_id); return; } break; @@ -252,8 +290,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev, if (source_id == SOC15_INTSRC_SDMA_TRAP) { kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28); } else if (source_id == SOC15_INTSRC_SDMA_ECC) { - kfd_signal_poison_consumed_event(dev, pasid); - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev); + event_interrupt_poison_consumption(dev, pasid, source_id); return; } } else if (client_id == SOC15_IH_CLIENTID_VMC || diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c index bc47f6a44456..81887c2013c9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c @@ -54,7 +54,7 @@ int kfd_interrupt_init(struct kfd_dev *kfd) int r; r = kfifo_alloc(&kfd->ih_fifo, - KFD_IH_NUM_ENTRIES * kfd->device_info->ih_ring_entry_size, + KFD_IH_NUM_ENTRIES * kfd->device_info.ih_ring_entry_size, GFP_KERNEL); if (r) { dev_err(kfd_chardev(), "Failed to allocate IH fifo\n"); @@ -114,8 +114,8 @@ bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry) int count; count = kfifo_in(&kfd->ih_fifo, ih_ring_entry, - kfd->device_info->ih_ring_entry_size); - if (count != kfd->device_info->ih_ring_entry_size) { + kfd->device_info.ih_ring_entry_size); + if (count != kfd->device_info.ih_ring_entry_size) { dev_err_ratelimited(kfd_chardev(), "Interrupt ring overflow, dropping interrupt %d\n", count); @@ -133,11 +133,11 @@ static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry) int count; count = kfifo_out(&kfd->ih_fifo, ih_ring_entry, - kfd->device_info->ih_ring_entry_size); + kfd->device_info.ih_ring_entry_size); - WARN_ON(count && count != kfd->device_info->ih_ring_entry_size); + WARN_ON(count && count != kfd->device_info.ih_ring_entry_size); - return count == kfd->device_info->ih_ring_entry_size; + return count == kfd->device_info.ih_ring_entry_size; } static void interrupt_wq(struct work_struct *work) @@ -146,13 +146,13 @@ static void interrupt_wq(struct work_struct *work) interrupt_work); uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE]; - if (dev->device_info->ih_ring_entry_size > sizeof(ih_ring_entry)) { + if (dev->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) { dev_err_once(kfd_chardev(), "Ring entry too small\n"); return; } while (dequeue_ih_ring_entry(dev, ih_ring_entry)) - dev->device_info->event_interrupt_class->interrupt_wq(dev, + dev->device_info.event_interrupt_class->interrupt_wq(dev, ih_ring_entry); } @@ -163,7 +163,7 @@ bool interrupt_is_wanted(struct kfd_dev *dev, /* integer and bitwise OR so there is no boolean short-circuiting */ unsigned int wanted = 0; - wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev, + wanted |= dev->device_info.event_interrupt_class->interrupt_isr(dev, ih_ring_entry, patched_ihre, flag); return wanted != 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index 73f2257acc23..66ad8d0b8f7f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -89,7 +89,7 @@ int kfd_iommu_device_init(struct kfd_dev *kfd) } pasid_limit = min_t(unsigned int, - (unsigned int)(1 << kfd->device_info->max_pasid_bits), + (unsigned int)(1 << kfd->device_info.max_pasid_bits), iommu_info.max_pasids); if (!kfd_set_pasid_limit(pasid_limit)) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 406479a369a9..16f8bc4ca7f6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -111,7 +111,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev, kq->rptr_kernel = kq->rptr_mem->cpu_ptr; kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr; - retval = kfd_gtt_sa_allocate(dev, dev->device_info->doorbell_size, + retval = kfd_gtt_sa_allocate(dev, dev->device_info.doorbell_size, &kq->wptr_mem); if (retval != 0) @@ -297,7 +297,7 @@ void kq_submit_packet(struct kernel_queue *kq) } pr_debug("\n"); #endif - if (kq->dev->device_info->doorbell_size == 8) { + if (kq->dev->device_info.doorbell_size == 8) { *kq->wptr64_kernel = kq->pending_wptr64; write_kernel_doorbell64(kq->queue->properties.doorbell_ptr, kq->pending_wptr64); @@ -310,7 +310,7 @@ void kq_submit_packet(struct kernel_queue *kq) void kq_rollback_packet(struct kernel_queue *kq) { - if (kq->dev->device_info->doorbell_size == 8) { + if (kq->dev->device_info.doorbell_size == 8) { kq->pending_wptr64 = *kq->wptr64_kernel; kq->pending_wptr = *kq->wptr_kernel % (kq->queue->properties.queue_size / 4); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index d84cec0022b1..ed5385137f48 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -108,8 +108,8 @@ error_free: * svm_migrate_copy_memory_gart - sdma copy data between ram and vram * * @adev: amdgpu device the sdma ring running - * @src: source page address array - * @dst: destination page address array + * @sys: system DMA pointer to be copied + * @vram: vram destination DMA pointer * @npages: number of pages to copy * @direction: enum MIGRATION_COPY_DIR * @mfence: output, sdma fence to signal after sdma is done @@ -549,7 +549,7 @@ static void svm_migrate_page_free(struct page *page) if (svm_bo) { pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref)); - svm_range_bo_unref(svm_bo); + svm_range_bo_unref_async(svm_bo); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index 7b4118915bf6..e2825ad4d699 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -71,7 +71,7 @@ struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev, return NULL; offset = (q->sdma_engine_id * - dev->device_info->num_sdma_queues_per_engine + + dev->device_info.num_sdma_queues_per_engine + q->sdma_queue_id) * dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c index 08442e7d9944..3c0658e32e93 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c @@ -110,8 +110,8 @@ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer, return 0; } -int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer, - struct scheduling_resources *res) +static int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer, + struct scheduling_resources *res) { struct pm4_mes_set_resources *packet; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 7ea528941951..ea68f3b3a4e9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -195,7 +195,6 @@ struct kfd_event_interrupt_class { }; struct kfd_device_info { - const char *asic_name; uint32_t gfx_target_version; const struct kfd_event_interrupt_class *event_interrupt_class; unsigned int max_pasid_bits; @@ -231,7 +230,7 @@ struct kfd_vmid_info { struct kfd_dev { struct amdgpu_device *adev; - const struct kfd_device_info *device_info; + struct kfd_device_info device_info; struct pci_dev *pdev; struct drm_device *ddev; @@ -857,6 +856,8 @@ struct kfd_process { struct svm_range_list svms; bool xnack_enabled; + + atomic_t poison; }; #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index d4c8a6948a9f..d1145da5348f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -251,14 +251,13 @@ cleanup: } /** - * @kfd_get_cu_occupancy - Collect number of waves in-flight on this device + * kfd_get_cu_occupancy - Collect number of waves in-flight on this device * by current process. Translates acquired wave count into number of compute units * that are occupied. * - * @atr: Handle of attribute that allows reporting of wave count. The attribute + * @attr: Handle of attribute that allows reporting of wave count. The attribute * handle encapsulates GPU device it is associated with, thereby allowing collection * of waves in flight, etc - * * @buffer: Handle of user provided buffer updated with wave count * * Return: Number of bytes written to user buffer or an error value @@ -462,6 +461,7 @@ static struct attribute *procfs_queue_attrs[] = { &attr_queue_gpuid, NULL }; +ATTRIBUTE_GROUPS(procfs_queue); static const struct sysfs_ops procfs_queue_ops = { .show = kfd_procfs_queue_show, @@ -469,7 +469,7 @@ static const struct sysfs_ops procfs_queue_ops = { static struct kobj_type procfs_queue_type = { .sysfs_ops = &procfs_queue_ops, - .default_attrs = procfs_queue_attrs, + .default_groups = procfs_queue_groups, }; static const struct sysfs_ops procfs_stats_ops = { @@ -1011,7 +1011,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) free_pages((unsigned long)pdd->qpd.cwsr_kaddr, get_order(KFD_CWSR_TBA_TMA_SIZE)); - kfree(pdd->qpd.doorbell_bitmap); + bitmap_free(pdd->qpd.doorbell_bitmap); idr_destroy(&pdd->alloc_idr); kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index); @@ -1433,9 +1433,8 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd, if (!KFD_IS_SOC15(dev)) return 0; - qpd->doorbell_bitmap = - kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, - BITS_PER_BYTE), GFP_KERNEL); + qpd->doorbell_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, + GFP_KERNEL); if (!qpd->doorbell_bitmap) return -ENOMEM; @@ -1447,9 +1446,9 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd, for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) { if (i >= range_start && i <= range_end) { - set_bit(i, qpd->doorbell_bitmap); - set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, - qpd->doorbell_bitmap); + __set_bit(i, qpd->doorbell_bitmap); + __set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, + qpd->doorbell_bitmap); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 4f8464658daf..5e5c84a8e1ef 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -135,9 +135,8 @@ void kfd_process_dequeue_from_all_devices(struct kfd_process *p) int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p) { INIT_LIST_HEAD(&pqm->queues); - pqm->queue_slot_bitmap = - kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, - BITS_PER_BYTE), GFP_KERNEL); + pqm->queue_slot_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, + GFP_KERNEL); if (!pqm->queue_slot_bitmap) return -ENOMEM; pqm->process = p; @@ -159,7 +158,7 @@ void pqm_uninit(struct process_queue_manager *pqm) kfree(pqn); } - kfree(pqm->queue_slot_bitmap); + bitmap_free(pqm->queue_slot_bitmap); pqm->queue_slot_bitmap = NULL; } @@ -220,7 +219,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, * Hence we also check the type as well */ if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ)) - max_queues = dev->device_info->max_no_of_hqd/2; + max_queues = dev->device_info.max_no_of_hqd/2; if (pdd->qpd.queue_count >= max_queues) return -ENOSPC; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 755265f6c53b..f2805ba74c80 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -107,7 +107,7 @@ static void svm_range_add_to_svms(struct svm_range *prange) pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange, prange->start, prange->last); - list_add_tail(&prange->list, &prange->svms->list); + list_move_tail(&prange->list, &prange->svms->list); prange->it_node.start = prange->start; prange->it_node.last = prange->last; interval_tree_insert(&prange->it_node, &prange->svms->objects); @@ -295,8 +295,6 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start, prange->last = last; INIT_LIST_HEAD(&prange->list); INIT_LIST_HEAD(&prange->update_list); - INIT_LIST_HEAD(&prange->remove_list); - INIT_LIST_HEAD(&prange->insert_list); INIT_LIST_HEAD(&prange->svm_bo_list); INIT_LIST_HEAD(&prange->deferred_list); INIT_LIST_HEAD(&prange->child_list); @@ -332,6 +330,8 @@ static void svm_range_bo_release(struct kref *kref) struct svm_range_bo *svm_bo; svm_bo = container_of(kref, struct svm_range_bo, kref); + pr_debug("svm_bo 0x%p\n", svm_bo); + spin_lock(&svm_bo->list_lock); while (!list_empty(&svm_bo->range_list)) { struct svm_range *prange = @@ -365,12 +365,33 @@ static void svm_range_bo_release(struct kref *kref) kfree(svm_bo); } -void svm_range_bo_unref(struct svm_range_bo *svm_bo) +static void svm_range_bo_wq_release(struct work_struct *work) { - if (!svm_bo) - return; + struct svm_range_bo *svm_bo; - kref_put(&svm_bo->kref, svm_range_bo_release); + svm_bo = container_of(work, struct svm_range_bo, release_work); + svm_range_bo_release(&svm_bo->kref); +} + +static void svm_range_bo_release_async(struct kref *kref) +{ + struct svm_range_bo *svm_bo; + + svm_bo = container_of(kref, struct svm_range_bo, kref); + pr_debug("svm_bo 0x%p\n", svm_bo); + INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release); + schedule_work(&svm_bo->release_work); +} + +void svm_range_bo_unref_async(struct svm_range_bo *svm_bo) +{ + kref_put(&svm_bo->kref, svm_range_bo_release_async); +} + +static void svm_range_bo_unref(struct svm_range_bo *svm_bo) +{ + if (svm_bo) + kref_put(&svm_bo->kref, svm_range_bo_release); } static bool @@ -704,6 +725,61 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange, } } +static bool +svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange, + uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) +{ + uint32_t i; + int gpuidx; + + for (i = 0; i < nattr; i++) { + switch (attrs[i].type) { + case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: + if (prange->preferred_loc != attrs[i].value) + return false; + break; + case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: + /* Prefetch should always trigger a migration even + * if the value of the attribute didn't change. + */ + return false; + case KFD_IOCTL_SVM_ATTR_ACCESS: + case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: + case KFD_IOCTL_SVM_ATTR_NO_ACCESS: + gpuidx = kfd_process_gpuidx_from_gpuid(p, + attrs[i].value); + if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) { + if (test_bit(gpuidx, prange->bitmap_access) || + test_bit(gpuidx, prange->bitmap_aip)) + return false; + } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) { + if (!test_bit(gpuidx, prange->bitmap_access)) + return false; + } else { + if (!test_bit(gpuidx, prange->bitmap_aip)) + return false; + } + break; + case KFD_IOCTL_SVM_ATTR_SET_FLAGS: + if ((prange->flags & attrs[i].value) != attrs[i].value) + return false; + break; + case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: + if ((prange->flags & attrs[i].value) != 0) + return false; + break; + case KFD_IOCTL_SVM_ATTR_GRANULARITY: + if (prange->granularity != attrs[i].value) + return false; + break; + default: + WARN_ONCE(1, "svm_range_check_attrs wasn't called?"); + } + } + + return true; +} + /** * svm_range_debug_dump - print all range information from svms * @svms: svm range list header @@ -741,14 +817,6 @@ static void svm_range_debug_dump(struct svm_range_list *svms) } } -static bool -svm_range_is_same_attrs(struct svm_range *old, struct svm_range *new) -{ - return (old->prefetch_loc == new->prefetch_loc && - old->flags == new->flags && - old->granularity == new->granularity); -} - static int svm_range_split_array(void *ppnew, void *ppold, size_t size, uint64_t old_start, uint64_t old_n, @@ -941,26 +1009,26 @@ svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last, } static int -svm_range_split_tail(struct svm_range *prange, struct svm_range *new, +svm_range_split_tail(struct svm_range *prange, uint64_t new_last, struct list_head *insert_list) { struct svm_range *tail; int r = svm_range_split(prange, prange->start, new_last, &tail); if (!r) - list_add(&tail->insert_list, insert_list); + list_add(&tail->list, insert_list); return r; } static int -svm_range_split_head(struct svm_range *prange, struct svm_range *new, +svm_range_split_head(struct svm_range *prange, uint64_t new_start, struct list_head *insert_list) { struct svm_range *head; int r = svm_range_split(prange, new_start, prange->last, &head); if (!r) - list_add(&head->insert_list, insert_list); + list_add(&head->list, insert_list); return r; } @@ -1169,7 +1237,6 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned long npages, bool readonly, dma_addr_t *dma_addr, struct amdgpu_device *bo_adev, struct dma_fence **fence) { - struct amdgpu_bo_va bo_va; bool table_freed = false; uint64_t pte_flags; unsigned long last_start; @@ -1182,9 +1249,6 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms, last_start, last_start + npages - 1, readonly); - if (prange->svm_bo && prange->ttm_res) - bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev); - for (i = offset; i < offset + npages; i++) { last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN; dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN; @@ -1650,6 +1714,10 @@ out_reschedule: /** * svm_range_evict - evict svm range + * @prange: svm range structure + * @mm: current process mm_struct + * @start: starting process queue number + * @last: last process queue number * * Stop all queues of the process to ensure GPU doesn't access the memory, then * return to let CPU evict the buffer and proceed CPU pagetable update. @@ -1754,46 +1822,49 @@ static struct svm_range *svm_range_clone(struct svm_range *old) } /** - * svm_range_handle_overlap - split overlap ranges - * @svms: svm range list header - * @new: range added with this attributes - * @start: range added start address, in pages - * @last: range last address, in pages - * @update_list: output, the ranges attributes are updated. For set_attr, this - * will do validation and map to GPUs. For unmap, this will be - * removed and unmap from GPUs - * @insert_list: output, the ranges will be inserted into svms, attributes are - * not changes. For set_attr, this will add into svms. - * @remove_list:output, the ranges will be removed from svms - * @left: the remaining range after overlap, For set_attr, this will be added - * as new range. + * svm_range_add - add svm range and handle overlap + * @p: the range add to this process svms + * @start: page size aligned + * @size: page size aligned + * @nattr: number of attributes + * @attrs: array of attributes + * @update_list: output, the ranges need validate and update GPU mapping + * @insert_list: output, the ranges need insert to svms + * @remove_list: output, the ranges are replaced and need remove from svms * - * Total have 5 overlap cases. + * Check if the virtual address range has overlap with any existing ranges, + * split partly overlapping ranges and add new ranges in the gaps. All changes + * should be applied to the range_list and interval tree transactionally. If + * any range split or allocation fails, the entire update fails. Therefore any + * existing overlapping svm_ranges are cloned and the original svm_ranges left + * unchanged. * - * This function handles overlap of an address interval with existing - * struct svm_ranges for applying new attributes. This may require - * splitting existing struct svm_ranges. All changes should be applied to - * the range_list and interval tree transactionally. If any split operation - * fails, the entire update fails. Therefore the existing overlapping - * svm_ranges are cloned and the original svm_ranges left unchanged. If the - * transaction succeeds, the modified clones are added and the originals - * freed. Otherwise the clones are removed and the old svm_ranges remain. + * If the transaction succeeds, the caller can update and insert clones and + * new ranges, then free the originals. * - * Context: The caller must hold svms->lock + * Otherwise the caller can free the clones and new ranges, while the old + * svm_ranges remain unchanged. + * + * Context: Process context, caller must hold svms->lock + * + * Return: + * 0 - OK, otherwise error code */ static int -svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new, - unsigned long start, unsigned long last, - struct list_head *update_list, - struct list_head *insert_list, - struct list_head *remove_list, - unsigned long *left) +svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, + uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs, + struct list_head *update_list, struct list_head *insert_list, + struct list_head *remove_list) { + unsigned long last = start + size - 1UL; + struct svm_range_list *svms = &p->svms; struct interval_tree_node *node; struct svm_range *prange; struct svm_range *tmp; int r = 0; + pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last); + INIT_LIST_HEAD(update_list); INIT_LIST_HEAD(insert_list); INIT_LIST_HEAD(remove_list); @@ -1801,37 +1872,44 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new, node = interval_tree_iter_first(&svms->objects, start, last); while (node) { struct interval_tree_node *next; - struct svm_range *old; unsigned long next_start; pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start, node->last); - old = container_of(node, struct svm_range, it_node); + prange = container_of(node, struct svm_range, it_node); next = interval_tree_iter_next(node, start, last); next_start = min(node->last, last) + 1; - if (node->start < start || node->last > last) { - /* node intersects the updated range, clone+split it */ + if (svm_range_is_same_attrs(p, prange, nattr, attrs)) { + /* nothing to do */ + } else if (node->start < start || node->last > last) { + /* node intersects the update range and its attributes + * will change. Clone and split it, apply updates only + * to the overlapping part + */ + struct svm_range *old = prange; + prange = svm_range_clone(old); if (!prange) { r = -ENOMEM; goto out; } - list_add(&old->remove_list, remove_list); - list_add(&prange->insert_list, insert_list); + list_add(&old->update_list, remove_list); + list_add(&prange->list, insert_list); + list_add(&prange->update_list, update_list); if (node->start < start) { pr_debug("change old range start\n"); - r = svm_range_split_head(prange, new, start, + r = svm_range_split_head(prange, start, insert_list); if (r) goto out; } if (node->last > last) { pr_debug("change old range last\n"); - r = svm_range_split_tail(prange, new, last, + r = svm_range_split_tail(prange, last, insert_list); if (r) goto out; @@ -1840,22 +1918,18 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new, /* The node is contained within start..last, * just update it */ - prange = old; - } - - if (!svm_range_is_same_attrs(prange, new)) list_add(&prange->update_list, update_list); + } /* insert a new node if needed */ if (node->start > start) { - prange = svm_range_new(prange->svms, start, - node->start - 1); + prange = svm_range_new(svms, start, node->start - 1); if (!prange) { r = -ENOMEM; goto out; } - list_add(&prange->insert_list, insert_list); + list_add(&prange->list, insert_list); list_add(&prange->update_list, update_list); } @@ -1863,12 +1937,20 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new, start = next_start; } - if (left && start <= last) - *left = last - start + 1; + /* add a final range at the end if needed */ + if (start <= last) { + prange = svm_range_new(svms, start, last); + if (!prange) { + r = -ENOMEM; + goto out; + } + list_add(&prange->list, insert_list); + list_add(&prange->update_list, update_list); + } out: if (r) - list_for_each_entry_safe(prange, tmp, insert_list, insert_list) + list_for_each_entry_safe(prange, tmp, insert_list, list) svm_range_free(prange); return r; @@ -1970,7 +2052,7 @@ restart: pr_debug("drain retry fault gpu %d svms %p\n", i, svms); - amdgpu_ih_wait_on_checkpoint_process(pdd->dev->adev, + amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev, &pdd->dev->adev->irq.ih1); pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms); } @@ -2161,6 +2243,9 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, /** * svm_range_cpu_invalidate_pagetables - interval notifier callback + * @mni: mmu_interval_notifier struct + * @range: mmu_notifier_range struct + * @cur_seq: value to pass to mmu_interval_set_seq() * * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it * is from migration, or CPU page invalidation callback. @@ -2190,8 +2275,8 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, start = mni->interval_tree.start; last = mni->interval_tree.last; - start = (start > range->start ? start : range->start) >> PAGE_SHIFT; - last = (last < (range->end - 1) ? last : range->end - 1) >> PAGE_SHIFT; + start = max(start, range->start) >> PAGE_SHIFT; + last = min(last, range->end - 1) >> PAGE_SHIFT; pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n", start, last, range->start >> PAGE_SHIFT, (range->end - 1) >> PAGE_SHIFT, @@ -2884,59 +2969,6 @@ svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size) } /** - * svm_range_add - add svm range and handle overlap - * @p: the range add to this process svms - * @start: page size aligned - * @size: page size aligned - * @nattr: number of attributes - * @attrs: array of attributes - * @update_list: output, the ranges need validate and update GPU mapping - * @insert_list: output, the ranges need insert to svms - * @remove_list: output, the ranges are replaced and need remove from svms - * - * Check if the virtual address range has overlap with the registered ranges, - * split the overlapped range, copy and adjust pages address and vram nodes in - * old and new ranges. - * - * Context: Process context, caller must hold svms->lock - * - * Return: - * 0 - OK, otherwise error code - */ -static int -svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, - uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs, - struct list_head *update_list, struct list_head *insert_list, - struct list_head *remove_list) -{ - uint64_t last = start + size - 1UL; - struct svm_range_list *svms; - struct svm_range new = {0}; - struct svm_range *prange; - unsigned long left = 0; - int r = 0; - - pr_debug("svms 0x%p [0x%llx 0x%llx]\n", &p->svms, start, last); - - svm_range_apply_attrs(p, &new, nattr, attrs); - - svms = &p->svms; - - r = svm_range_handle_overlap(svms, &new, start, last, update_list, - insert_list, remove_list, &left); - if (r) - return r; - - if (left) { - prange = svm_range_new(svms, last - left + 1, last); - list_add(&prange->insert_list, insert_list); - list_add(&prange->update_list, update_list); - } - - return 0; -} - -/** * svm_range_best_prefetch_location - decide the best prefetch location * @prange: svm range structure * @@ -3202,7 +3234,7 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, goto out; } /* Apply changes as a transaction */ - list_for_each_entry_safe(prange, next, &insert_list, insert_list) { + list_for_each_entry_safe(prange, next, &insert_list, list) { svm_range_add_to_svms(prange); svm_range_add_notifier_locked(mm, prange); } @@ -3210,8 +3242,7 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, svm_range_apply_attrs(p, prange, nattr, attrs); /* TODO: unmap ranges from GPU that lost access */ } - list_for_each_entry_safe(prange, next, &remove_list, - remove_list) { + list_for_each_entry_safe(prange, next, &remove_list, update_list) { pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange, prange->start, prange->last); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 6dc91c33e80f..949b477e2f4c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -48,6 +48,7 @@ struct svm_range_bo { struct work_struct eviction_work; struct svm_range_list *svms; uint32_t evicting; + struct work_struct release_work; }; enum svm_work_list_ops { @@ -75,8 +76,6 @@ struct svm_work_list_item { * aligned, page size is (last - start + 1) * @list: link list node, used to scan all ranges of svms * @update_list:link list node used to add to update_list - * @remove_list:link list node used to add to remove list - * @insert_list:link list node used to add to insert list * @mapping: bo_va mapping structure to create and update GPU page table * @npages: number of pages * @dma_addr: dma mapping address on each GPU for system memory physical page @@ -112,8 +111,6 @@ struct svm_range { struct interval_tree_node it_node; struct list_head list; struct list_head update_list; - struct list_head remove_list; - struct list_head insert_list; uint64_t npages; dma_addr_t *dma_addr[MAX_GPU_INSTANCE]; struct ttm_resource *ttm_res; @@ -195,7 +192,7 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s */ #define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0) -void svm_range_bo_unref(struct svm_range_bo *svm_bo); +void svm_range_bo_unref_async(struct svm_range_bo *svm_bo); #else struct kfd_process; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 2d44b26b6657..948fbb39336e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -503,7 +503,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, if (dev->gpu) { log_max_watch_addr = - __ilog2_u32(dev->gpu->device_info->num_of_watch_points); + __ilog2_u32(dev->gpu->device_info.num_of_watch_points); if (log_max_watch_addr) { dev->node_props.capability |= @@ -1285,6 +1285,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu) void *crat_image = NULL; size_t image_size = 0; int proximity_domain; + int i; + const char *asic_name = amdgpu_asic_name[gpu->adev->asic_type]; INIT_LIST_HEAD(&temp_topology_device_list); @@ -1370,13 +1372,17 @@ int kfd_topology_add_device(struct kfd_dev *gpu) amdgpu_amdkfd_get_cu_info(dev->gpu->adev, &cu_info); - strncpy(dev->node_props.name, gpu->device_info->asic_name, - KFD_TOPOLOGY_PUBLIC_NAME_SIZE); + for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1; i++) { + dev->node_props.name[i] = __tolower(asic_name[i]); + if (asic_name[i] == '\0') + break; + } + dev->node_props.name[i] = '\0'; dev->node_props.simd_arrays_per_engine = cu_info.num_shader_arrays_per_engine; - dev->node_props.gfx_target_version = gpu->device_info->gfx_target_version; + dev->node_props.gfx_target_version = gpu->device_info.gfx_target_version; dev->node_props.vendor_id = gpu->pdev->vendor; dev->node_props.device_id = gpu->pdev->device; dev->node_props.capability |= @@ -1396,7 +1402,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu) dev->node_props.num_sdma_xgmi_engines = kfd_get_num_xgmi_sdma_engines(gpu); dev->node_props.num_sdma_queues_per_engine = - gpu->device_info->num_sdma_queues_per_engine; + gpu->device_info.num_sdma_queues_per_engine; dev->node_props.num_gws = (dev->gpu->gws && dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? dev->gpu->adev->gds.gws_size : 0; @@ -1572,7 +1578,7 @@ void kfd_double_confirm_iommu_support(struct kfd_dev *gpu) gpu->use_iommu_v2 = false; - if (!gpu->device_info->needs_iommu_device) + if (!gpu->device_info.needs_iommu_device) return; down_read(&topology_lock); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e8a994559b65..0eeb394e949c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -624,7 +624,7 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ /** - * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command. + * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command. * @adev: amdgpu_device pointer * @notify: dmub notification structure * @@ -632,7 +632,8 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) * Copies dmub notification to DM which is to be read by AUX command. * issuing thread and also signals the event to wake up the thread. */ -void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify) +static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, + struct dmub_notification *notify) { if (adev->dm.dmub_notify) memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); @@ -648,7 +649,8 @@ void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notific * Dmub Hpd interrupt processing callback. Gets displayindex through the * ink index and calls helper to do the processing. */ -void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify) +static void dmub_hpd_callback(struct amdgpu_device *adev, + struct dmub_notification *notify) { struct amdgpu_dm_connector *aconnector; struct amdgpu_dm_connector *hpd_aconnector = NULL; @@ -656,7 +658,7 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not struct drm_connector_list_iter iter; struct dc_link *link; uint8_t link_index = 0; - struct drm_device *dev = adev->dm.ddev; + struct drm_device *dev; if (adev == NULL) return; @@ -673,6 +675,7 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not link_index = notify->link_index; link = adev->dm.dc->links[link_index]; + dev = adev->dm.ddev; drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { @@ -705,8 +708,10 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not * to dmub interrupt handling thread * Return: true if successfully registered, false if there is existing registration */ -bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type, -dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload) +static bool register_dmub_notify_callback(struct amdgpu_device *adev, + enum dmub_notification_type type, + dmub_notify_interrupt_callback_t callback, + bool dmub_int_thread_offload) { if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { adev->dm.dmub_callback[type] = callback; @@ -1050,6 +1055,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) return 0; } + /* Reset DMCUB if it was previously running - before we overwrite its memory. */ + status = dmub_srv_hw_reset(dmub_srv); + if (status != DMUB_STATUS_OK) + DRM_WARN("Error resetting DMUB HW: %d\n", status); + hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; fw_inst_const = dmub_fw->data + @@ -1152,6 +1162,32 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) return 0; } +static void dm_dmub_hw_resume(struct amdgpu_device *adev) +{ + struct dmub_srv *dmub_srv = adev->dm.dmub_srv; + enum dmub_status status; + bool init; + + if (!dmub_srv) { + /* DMUB isn't supported on the ASIC. */ + return; + } + + status = dmub_srv_is_hw_init(dmub_srv, &init); + if (status != DMUB_STATUS_OK) + DRM_WARN("DMUB hardware init check failed: %d\n", status); + + if (status == DMUB_STATUS_OK && init) { + /* Wait for firmware load to finish. */ + status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); + if (status != DMUB_STATUS_OK) + DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); + } else { + /* Perform the full hardware initialization. */ + dm_dmub_hw_init(adev); + } +} + #if defined(CONFIG_DRM_AMD_DC_DCN) static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) { @@ -1453,6 +1489,13 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING) init_data.flags.edp_no_power_sequencing = true; +#ifdef CONFIG_DRM_AMD_DC_DCN + if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A) + init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true; + if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) + init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; +#endif + init_data.flags.power_down_display_on_boot = true; if (check_seamless_boot_capability(adev)) { @@ -2621,9 +2664,7 @@ static int dm_resume(void *handle) amdgpu_dm_outbox_init(adev); /* Before powering on DC we need to re-initialize DMUB. */ - r = dm_dmub_hw_init(adev); - if (r) - DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); + dm_dmub_hw_resume(adev); /* power on hardware */ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); @@ -2950,13 +2991,12 @@ void amdgpu_dm_update_connector_after_detect( aconnector->edid = (struct edid *)sink->dc_edid.raw_edid; - drm_connector_update_edid_property(connector, - aconnector->edid); if (aconnector->dc_link->aux_mode) drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, aconnector->edid); } + drm_connector_update_edid_property(connector, aconnector->edid); amdgpu_dm_update_freesync_caps(connector, aconnector->edid); update_connector_ext_caps(aconnector); } else { @@ -6058,13 +6098,16 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, struct dsc_dec_dpcd_caps *dsc_caps) { stream->timing.flags.DSC = 0; + dsc_caps->is_dsc_supported = false; if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || sink->sink_signal == SIGNAL_TYPE_EDP)) { - dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, - aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, - aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, - dsc_caps); + if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE || + sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) + dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, + aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, + aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, + dsc_caps); } } @@ -6134,6 +6177,8 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, uint32_t link_bandwidth_kbps; uint32_t max_dsc_target_bpp_limit_override = 0; struct dc *dc = sink->ctx->dc; + uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps; + uint32_t dsc_max_supported_bw_in_kbps; link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, dc_link_get_link_cap(aconnector->dc_link)); @@ -6152,16 +6197,37 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { - - if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], + if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { + if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], dsc_caps, aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, max_dsc_target_bpp_limit_override, link_bandwidth_kbps, &stream->timing, &stream->timing.dsc_cfg)) { - stream->timing.flags.DSC = 1; - DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); + stream->timing.flags.DSC = 1; + DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", + __func__, drm_connector->name); + } + } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { + timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); + max_supported_bw_in_kbps = link_bandwidth_kbps; + dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; + + if (timing_bw_in_kbps > max_supported_bw_in_kbps && + max_supported_bw_in_kbps > 0 && + dsc_max_supported_bw_in_kbps > 0) + if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], + dsc_caps, + aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, + max_dsc_target_bpp_limit_override, + dsc_max_supported_bw_in_kbps, + &stream->timing, + &stream->timing.dsc_cfg)) { + stream->timing.flags.DSC = 1; + DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n", + __func__, drm_connector->name); + } } } @@ -8308,15 +8374,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, break; case DRM_MODE_CONNECTOR_DisplayPort: aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) { - link->link_enc = - link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - if (!link->link_enc) - link->link_enc = - link_enc_cfg_get_next_avail_link_enc(link->ctx->dc); - } - + link->link_enc = dp_get_link_enc(link); + ASSERT(link->link_enc); if (link->link_enc) aconnector->base.ycbcr_420_allowed = link->link_enc->features.dp_ycbcr420_supported ? true : false; @@ -10707,6 +10766,8 @@ static int dm_update_plane_state(struct dc *dc, dm_new_plane_state->dc_state = dc_new_plane_state; + dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY); + /* Tell DC to do a full surface update every time there * is a plane change. Inefficient, but works for now. */ @@ -10719,6 +10780,24 @@ static int dm_update_plane_state(struct dc *dc, return ret; } +static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state, + int *src_w, int *src_h) +{ + switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { + case DRM_MODE_ROTATE_90: + case DRM_MODE_ROTATE_270: + *src_w = plane_state->src_h >> 16; + *src_h = plane_state->src_w >> 16; + break; + case DRM_MODE_ROTATE_0: + case DRM_MODE_ROTATE_180: + default: + *src_w = plane_state->src_w >> 16; + *src_h = plane_state->src_h >> 16; + break; + } +} + static int dm_check_crtc_cursor(struct drm_atomic_state *state, struct drm_crtc *crtc, struct drm_crtc_state *new_crtc_state) @@ -10727,6 +10806,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, struct drm_plane_state *new_cursor_state, *new_underlying_state; int i; int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h; + int cursor_src_w, cursor_src_h; + int underlying_src_w, underlying_src_h; /* On DCE and DCN there is no dedicated hardware cursor plane. We get a * cursor per pipe but it's going to inherit the scaling and @@ -10738,10 +10819,9 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, return 0; } - cursor_scale_w = new_cursor_state->crtc_w * 1000 / - (new_cursor_state->src_w >> 16); - cursor_scale_h = new_cursor_state->crtc_h * 1000 / - (new_cursor_state->src_h >> 16); + dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h); + cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w; + cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h; for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) { /* Narrow down to non-cursor planes on the same CRTC as the cursor */ @@ -10752,10 +10832,10 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, if (!new_underlying_state->fb) continue; - underlying_scale_w = new_underlying_state->crtc_w * 1000 / - (new_underlying_state->src_w >> 16); - underlying_scale_h = new_underlying_state->crtc_h * 1000 / - (new_underlying_state->src_h >> 16); + dm_get_oriented_plane_size(new_underlying_state, + &underlying_src_w, &underlying_src_h); + underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w; + underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h; if (cursor_scale_w != underlying_scale_w || cursor_scale_h != underlying_scale_h) { @@ -10840,7 +10920,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, enum dc_status status; int ret, i; bool lock_and_validation_needed = false; - struct dm_crtc_state *dm_old_crtc_state; + struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; #if defined(CONFIG_DRM_AMD_DC_DCN) struct dsc_mst_fairness_vars vars[MAX_PIPES]; struct drm_dp_mst_topology_state *mst_state; @@ -11022,6 +11102,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; } + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + if (dm_new_crtc_state->mpo_requested) + DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc); + } + /* Check cursor planes scaling */ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); @@ -11274,7 +11360,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header); input->offset = offset; input->length = length; - input->total_length = total_length; + input->cea_total_length = total_length; memcpy(input->payload, data, length); res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd); @@ -11581,8 +11667,10 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, return value; } -int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx, - uint8_t status_type, uint32_t *operation_result) +static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, + struct dc_context *ctx, + uint8_t status_type, + uint32_t *operation_result) { struct amdgpu_device *adev = ctx->driver_context; int return_status = -1; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index bec1de86e2ef..bee806ae3e52 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -50,9 +50,9 @@ #define AMDGPU_DMUB_NOTIFICATION_MAX 5 -/** +/* * DMUB Async to Sync Mechanism Status - **/ + */ #define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1 #define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2 #define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3 @@ -626,6 +626,8 @@ struct dm_crtc_state { bool cm_has_degamma; bool cm_is_degamma_srgb; + bool mpo_requested; + int update_type; int active_planes; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index a022e5bb30a5..a71177305bcd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -285,8 +285,12 @@ static int __set_input_tf(struct dc_transfer_func *func, } /** + * amdgpu_dm_verify_lut_sizes + * @crtc_state: the DRM CRTC state + * * Verifies that the Degamma and Gamma LUTs attached to the |crtc_state| are of * the expected size. + * * Returns 0 on success. */ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 0277685864c5..26719efa5396 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -824,6 +824,48 @@ static int dmub_fw_state_show(struct seq_file *m, void *data) return seq_write(m, state_base, state_size); } +/* psr_capability_show() - show eDP panel PSR capability + * + * The read function: sink_psr_capability_show + * Shows if sink has PSR capability or not. + * If yes - the PSR version is appended + * + * cat /sys/kernel/debug/dri/0/eDP-X/psr_capability + * + * Expected output: + * "Sink support: no\n" - if panel doesn't support PSR + * "Sink support: yes [0x01]\n" - if panel supports PSR1 + * "Driver support: no\n" - if driver doesn't support PSR + * "Driver support: yes [0x01]\n" - if driver supports PSR1 + */ +static int psr_capability_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct dc_link *link = aconnector->dc_link; + + if (!link) + return -ENODEV; + + if (link->type == dc_connection_none) + return -ENODEV; + + if (!(link->connector_signal & SIGNAL_TYPE_EDP)) + return -ENODEV; + + seq_printf(m, "Sink support: %s", yesno(link->dpcd_caps.psr_caps.psr_version != 0)); + if (link->dpcd_caps.psr_caps.psr_version) + seq_printf(m, " [0x%02x]", link->dpcd_caps.psr_caps.psr_version); + seq_puts(m, "\n"); + + seq_printf(m, "Driver support: %s", yesno(link->psr_settings.psr_feature_enabled)); + if (link->psr_settings.psr_version) + seq_printf(m, " [0x%02x]", link->psr_settings.psr_version); + seq_puts(m, "\n"); + + return 0; +} + /* * Returns the current and maximum output bpc for the connector. * Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc @@ -2467,6 +2509,7 @@ DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status); DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability); #endif DEFINE_SHOW_ATTRIBUTE(internal_display); +DEFINE_SHOW_ATTRIBUTE(psr_capability); static const struct file_operations dp_dsc_clock_en_debugfs_fops = { .owner = THIS_MODULE, @@ -2712,6 +2755,138 @@ static const struct { {"internal_display", &internal_display_fops} }; +/* + * Returns supported customized link rates by this eDP panel. + * Example usage: cat /sys/kernel/debug/dri/0/eDP-x/ilr_setting + */ +static int edp_ilr_show(struct seq_file *m, void *unused) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(m->private); + struct dc_link *link = aconnector->dc_link; + uint8_t supported_link_rates[16]; + uint32_t link_rate_in_khz; + uint32_t entry = 0; + uint8_t dpcd_rev; + + memset(supported_link_rates, 0, sizeof(supported_link_rates)); + dm_helpers_dp_read_dpcd(link->ctx, link, DP_SUPPORTED_LINK_RATES, + supported_link_rates, sizeof(supported_link_rates)); + + dpcd_rev = link->dpcd_caps.dpcd_rev.raw; + + if (dpcd_rev >= DP_DPCD_REV_13 && + (supported_link_rates[entry+1] != 0 || supported_link_rates[entry] != 0)) { + + for (entry = 0; entry < 16; entry += 2) { + link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 + + supported_link_rates[entry]) * 200; + seq_printf(m, "[%d] %d kHz\n", entry/2, link_rate_in_khz); + } + } else { + seq_printf(m, "ILR is not supported by this eDP panel.\n"); + } + + return 0; +} + +/* + * Set supported customized link rate to eDP panel. + * + * echo <lane_count> <link_rate option> > ilr_setting + * + * for example, supported ILR : [0] 1620000 kHz [1] 2160000 kHz [2] 2430000 kHz ... + * echo 4 1 > /sys/kernel/debug/dri/0/eDP-x/ilr_setting + * to set 4 lanes and 2.16 GHz + */ +static ssize_t edp_ilr_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *connector = file_inode(f)->i_private; + struct dc_link *link = connector->dc_link; + struct amdgpu_device *adev = drm_to_adev(connector->base.dev); + struct dc *dc = (struct dc *)link->dc; + struct dc_link_settings prefer_link_settings; + char *wr_buf = NULL; + const uint32_t wr_buf_size = 40; + /* 0: lane_count; 1: link_rate */ + int max_param_num = 2; + uint8_t param_nums = 0; + long param[2]; + bool valid_input = true; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + if (!wr_buf) + return -ENOMEM; + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + kfree(wr_buf); + return -EINVAL; + } + + switch (param[0]) { + case LANE_COUNT_ONE: + case LANE_COUNT_TWO: + case LANE_COUNT_FOUR: + break; + default: + valid_input = false; + break; + } + + if (param[1] >= link->dpcd_caps.edp_supported_link_rates_count) + valid_input = false; + + if (!valid_input) { + kfree(wr_buf); + DRM_DEBUG_DRIVER("Invalid Input value. No HW will be programmed\n"); + prefer_link_settings.use_link_rate_set = false; + dc_link_set_preferred_training_settings(dc, NULL, NULL, link, true); + return size; + } + + /* save user force lane_count, link_rate to preferred settings + * spread spectrum will not be changed + */ + prefer_link_settings.link_spread = link->cur_link_settings.link_spread; + prefer_link_settings.lane_count = param[0]; + prefer_link_settings.use_link_rate_set = true; + prefer_link_settings.link_rate_set = param[1]; + prefer_link_settings.link_rate = link->dpcd_caps.edp_supported_link_rates[param[1]]; + + mutex_lock(&adev->dm.dc_lock); + dc_link_set_preferred_training_settings(dc, &prefer_link_settings, + NULL, link, false); + mutex_unlock(&adev->dm.dc_lock); + + kfree(wr_buf); + return size; +} + +static int edp_ilr_open(struct inode *inode, struct file *file) +{ + return single_open(file, edp_ilr_show, inode->i_private); +} + +static const struct file_operations edp_ilr_debugfs_fops = { + .owner = THIS_MODULE, + .open = edp_ilr_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = edp_ilr_write +}; + void connector_debugfs_init(struct amdgpu_dm_connector *connector) { int i; @@ -2726,11 +2901,14 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) } } if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) { + debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops); debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops); debugfs_create_file("amdgpu_current_backlight_pwm", 0444, dir, connector, ¤t_backlight_fops); debugfs_create_file("amdgpu_target_backlight_pwm", 0444, dir, connector, &target_backlight_fops); + debugfs_create_file("ilr_setting", 0644, dir, connector, + &edp_ilr_debugfs_fops); } for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) { @@ -2909,10 +3087,13 @@ static int crc_win_update_set(void *data, u64 val) struct amdgpu_device *adev = drm_to_adev(new_crtc->dev); struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk; + if (!crc_rd_wrk) + return 0; + if (val) { spin_lock_irq(&adev_to_drm(adev)->event_lock); spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); - if (crc_rd_wrk && crc_rd_wrk->crtc) { + if (crc_rd_wrk->crtc) { old_crtc = crc_rd_wrk->crtc; old_acrtc = to_amdgpu_crtc(old_crtc); } @@ -3190,6 +3371,32 @@ static int disable_hpd_get(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get, disable_hpd_set, "%llu\n"); +#if defined(CONFIG_DRM_AMD_DC_DCN) +/* + * Temporary w/a to force sst sequence in M42D DP2 mst receiver + * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_set_mst_en_for_sst + */ +static int dp_force_sst_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + + adev->dm.dc->debug.set_mst_en_for_sst = val; + + return 0; +} + +static int dp_force_sst_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = data; + + *val = adev->dm.dc->debug.set_mst_en_for_sst; + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(dp_set_mst_en_for_sst_ops, dp_force_sst_get, + dp_force_sst_set, "%llu\n"); +#endif + /* * Sets the DC visual confirm debug option from the given string. * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm @@ -3299,6 +3506,10 @@ void dtn_debugfs_init(struct amdgpu_device *adev) adev, &mst_topo_fops); debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev, &dtn_log_fops); +#if defined(CONFIG_DRM_AMD_DC_DCN) + debugfs_create_file("amdgpu_dm_dp_set_mst_en_for_sst", 0644, root, adev, + &dp_set_mst_en_for_sst_ops); +#endif debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev, &visual_confirm_fops); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 72a2e84645df..29f07c26d080 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -83,16 +83,17 @@ static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps) * void * */ enum dc_edid_status dm_helpers_parse_edid_caps( - struct dc_context *ctx, + struct dc_link *link, const struct dc_edid *edid, struct dc_edid_caps *edid_caps) { + struct amdgpu_dm_connector *aconnector = link->priv; + struct drm_connector *connector = &aconnector->base; struct edid *edid_buf = (struct edid *) edid->raw_edid; struct cea_sad *sads; int sad_count = -1; int sadb_count = -1; int i = 0; - int j = 0; uint8_t *sadb = NULL; enum dc_edid_status result = EDID_OK; @@ -111,23 +112,11 @@ enum dc_edid_status dm_helpers_parse_edid_caps( edid_caps->manufacture_week = edid_buf->mfg_week; edid_caps->manufacture_year = edid_buf->mfg_year; - /* One of the four detailed_timings stores the monitor name. It's - * stored in an array of length 13. */ - for (i = 0; i < 4; i++) { - if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) { - while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) { - if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n') - break; - - edid_caps->display_name[j] = - edid_buf->detailed_timings[i].data.other_data.data.str.str[j]; - j++; - } - } - } + drm_edid_get_monitor_name(edid_buf, + edid_caps->display_name, + AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); - edid_caps->edid_hdmi = drm_detect_hdmi_monitor( - (struct edid *) edid->raw_edid); + edid_caps->edid_hdmi = connector->display_info.is_hdmi; sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); if (sad_count <= 0) @@ -585,8 +574,17 @@ bool dm_helpers_dp_write_dsc_enable( } if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) { - ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); - DC_LOG_DC("Send DSC %s to sst display\n", enable_dsc ? "enable" : "disable"); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { +#endif + ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); + DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable"); +#if defined(CONFIG_DRM_AMD_DC_DCN) + } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { + ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); + DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable"); + } +#endif } return (ret > 0); @@ -650,14 +648,8 @@ enum dc_edid_status dm_helpers_read_local_edid( /* We don't need the original edid anymore */ kfree(edid); - /* connector->display_info will be parsed from EDID and saved - * into drm_connector->display_info from edid by call stack - * below: - * drm_parse_ycbcr420_deep_color_info - * drm_parse_hdmi_forum_vsdb - * drm_parse_cea_ext - * drm_add_display_info - * drm_connector_update_edid_property + /* connector->display_info is parsed from EDID and saved + * into drm_connector->display_info * * drm_connector->display_info will be used by amdgpu_dm funcs, * like fill_stream_properties_from_drm_display_mode @@ -665,7 +657,7 @@ enum dc_edid_status dm_helpers_read_local_edid( amdgpu_dm_update_connector_after_detect(aconnector); edid_status = dm_helpers_parse_edid_caps( - ctx, + link, &sink->dc_edid, &sink->edid_caps); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index c022e56f9459..c510638b4f99 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -26,6 +26,73 @@ #include "amdgpu_dm_psr.h" #include "dc.h" #include "dm_helpers.h" +#include "amdgpu_dm.h" + +static bool link_get_psr_caps(struct dc_link *link) +{ + uint8_t psr_dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE]; + uint8_t edp_rev_dpcd_data; + + + + if (!dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT, + psr_dpcd_data, sizeof(psr_dpcd_data))) + return false; + + if (!dm_helpers_dp_read_dpcd(NULL, link, DP_EDP_DPCD_REV, + &edp_rev_dpcd_data, sizeof(edp_rev_dpcd_data))) + return false; + + link->dpcd_caps.psr_caps.psr_version = psr_dpcd_data[0]; + link->dpcd_caps.psr_caps.edp_revision = edp_rev_dpcd_data; + +#ifdef CONFIG_DRM_AMD_DC_DCN + if (link->dpcd_caps.psr_caps.psr_version > 0x1) { + uint8_t alpm_dpcd_data; + uint8_t su_granularity_dpcd_data; + + if (!dm_helpers_dp_read_dpcd(NULL, link, DP_RECEIVER_ALPM_CAP, + &alpm_dpcd_data, sizeof(alpm_dpcd_data))) + return false; + + if (!dm_helpers_dp_read_dpcd(NULL, link, DP_PSR2_SU_Y_GRANULARITY, + &su_granularity_dpcd_data, sizeof(su_granularity_dpcd_data))) + return false; + + link->dpcd_caps.psr_caps.y_coordinate_required = psr_dpcd_data[1] & DP_PSR2_SU_Y_COORDINATE_REQUIRED; + link->dpcd_caps.psr_caps.su_granularity_required = psr_dpcd_data[1] & DP_PSR2_SU_GRANULARITY_REQUIRED; + + link->dpcd_caps.psr_caps.alpm_cap = alpm_dpcd_data & DP_ALPM_CAP; + link->dpcd_caps.psr_caps.standby_support = alpm_dpcd_data & (1 << 1); + + link->dpcd_caps.psr_caps.su_y_granularity = su_granularity_dpcd_data; + } +#endif + return true; +} + +#ifdef CONFIG_DRM_AMD_DC_DCN +static bool link_supports_psrsu(struct dc_link *link) +{ + struct dc *dc = link->ctx->dc; + + if (!dc->caps.dmcub_support) + return false; + + if (dc->ctx->dce_version < DCN_VERSION_3_1) + return false; + + if (!link->dpcd_caps.psr_caps.alpm_cap || + !link->dpcd_caps.psr_caps.y_coordinate_required) + return false; + + if (link->dpcd_caps.psr_caps.su_granularity_required && + !link->dpcd_caps.psr_caps.su_y_granularity) + return false; + + return true; +} +#endif /* * amdgpu_dm_set_psr_caps() - set link psr capabilities @@ -34,26 +101,34 @@ */ void amdgpu_dm_set_psr_caps(struct dc_link *link) { - uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE]; - if (!(link->connector_signal & SIGNAL_TYPE_EDP)) return; + if (link->type == dc_connection_none) return; - if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT, - dpcd_data, sizeof(dpcd_data))) { - link->dpcd_caps.psr_caps.psr_version = dpcd_data[0]; - - if (dpcd_data[0] == 0) { - link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; - link->psr_settings.psr_feature_enabled = false; - } else { + + if (!link_get_psr_caps(link)) { + DRM_ERROR("amdgpu: Failed to read PSR Caps!\n"); + return; + } + + if (link->dpcd_caps.psr_caps.psr_version == 0) { + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + link->psr_settings.psr_feature_enabled = false; + + } else { +#ifdef CONFIG_DRM_AMD_DC_DCN + if (link_supports_psrsu(link)) + link->psr_settings.psr_version = DC_PSR_VERSION_SU_1; + else +#endif link->psr_settings.psr_version = DC_PSR_VERSION_1; - link->psr_settings.psr_feature_enabled = true; - } - DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled); + link->psr_settings.psr_feature_enabled = true; } + + DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled); + } /* diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c index ff5bb152ef49..e6ef36de0825 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c @@ -2033,10 +2033,10 @@ static void calculate_bandwidth( kfree(surface_type); free_tiling_mode: kfree(tiling_mode); -free_yclk: - kfree(yclk); free_sclk: kfree(sclk); +free_yclk: + kfree(yclk); } /******************************************************************************* diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index c8b0a2f05b4d..e447c74be713 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -503,7 +503,6 @@ static void dcn_bw_calc_rq_dlg_ttu( //input[in_idx].dout.output_standard; /*todo: soc->sr_enter_plus_exit_time??*/ - dlg_sys_param->t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep; dml1_rq_dlg_get_rq_params(dml, rq_param, &input->pipe.src); dml1_extract_rq_regs(dml, rq_regs, rq_param); @@ -739,7 +738,9 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v, hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz); } -unsigned int get_highest_allowed_voltage_level(uint32_t chip_family, uint32_t hw_internal_rev, uint32_t pci_revision_id) +static unsigned int get_highest_allowed_voltage_level(uint32_t chip_family, + uint32_t hw_internal_rev, + uint32_t pci_revision_id) { /* for low power RV2 variants, the highest voltage level we want is 0 */ if ((chip_family == FAMILY_RV) && diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c index 76ec8ec92efd..60761ff3cbf1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c @@ -34,7 +34,7 @@ #include "rv1_clk_mgr_vbios_smu.h" #include "rv1_clk_mgr_clk.h" -void rv1_init_clocks(struct clk_mgr *clk_mgr) +static void rv1_init_clocks(struct clk_mgr *clk_mgr) { memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c index fe18bb9e19aa..06bab24d8e27 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c @@ -28,6 +28,8 @@ #include "reg_helper.h" #include <linux/delay.h> +#include "rv1_clk_mgr_vbios_smu.h" + #define MAX_INSTANCE 5 #define MAX_SEGMENT 5 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 2108bff49d4e..cac80ba69072 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -38,7 +38,6 @@ #include "clk/clk_11_0_0_offset.h" #include "clk/clk_11_0_0_sh_mask.h" -#include "irq/dcn20/irq_service_dcn20.h" #undef FN #define FN(reg_name, field_name) \ @@ -223,8 +222,6 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, bool force_reset = false; bool p_state_change_support; int total_plane_count; - int irq_src; - uint32_t hpd_state; if (dc->work_arounds.skip_clock_update) return; @@ -242,13 +239,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, if (dc->res_pool->pp_smu) pp_smu = &dc->res_pool->pp_smu->nv_funcs; - for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD6; irq_src++) { - hpd_state = dc_get_hpd_state_dcn20(dc->res_pool->irqs, irq_src); - if (hpd_state) - break; - } - - if (display_count == 0 && !hpd_state) + if (display_count == 0) enter_display_off = true; if (enter_display_off == safe_to_lower) { @@ -409,7 +400,7 @@ void dcn2_init_clocks(struct clk_mgr *clk_mgr) clk_mgr->clks.prev_p_state_change_support = true; } -void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base) +static void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct pp_smu_funcs_nv *pp_smu = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c index db9950244c7b..fbdd0a92d146 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c @@ -74,42 +74,6 @@ static const struct clk_mgr_mask clk_mgr_mask = { CLK_COMMON_MASK_SH_LIST_DCN201_BASE(_MASK) }; -void dcn201_update_clocks_vbios(struct clk_mgr *clk_mgr, - struct dc_state *context, - bool safe_to_lower) -{ - struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; - - bool update_dppclk = false; - bool update_dispclk = false; - - if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->clks.dppclk_khz)) { - clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz; - update_dppclk = true; - } - - if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)) { - clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; - update_dispclk = true; - } - - if (update_dppclk || update_dispclk) { - struct bp_set_dce_clock_parameters dce_clk_params; - struct dc_bios *bp = clk_mgr->ctx->dc_bios; - - if (update_dispclk) { - memset(&dce_clk_params, 0, sizeof(dce_clk_params)); - dce_clk_params.target_clock_frequency = new_clocks->dispclk_khz; - dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; - dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK; - bp->funcs->set_dce_clock(bp, &dce_clk_params); - } - /* currently there is no DCECLOCK_TYPE_DPPCLK type defined in VBIOS interface. - * vbios program DPPCLK to the same DispCLK limitation - */ - } -} - static void dcn201_init_clocks(struct clk_mgr *clk_mgr) { memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); @@ -126,10 +90,8 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base, struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; - int display_count; bool update_dppclk = false; bool update_dispclk = false; - bool enter_display_off = false; bool dpp_clock_lowered = false; bool force_reset = false; bool p_state_change_support; @@ -145,10 +107,7 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base, dcn2_read_clocks_from_hw_dentist(clk_mgr_base); } - display_count = clk_mgr_helper_get_active_display_cnt(dc, context); - - if (display_count == 0) - enter_display_off = true; + clk_mgr_helper_get_active_display_cnt(dc, context); if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index ac2d4c4f04e4..f4dee0e48a67 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -42,7 +42,6 @@ #include "clk/clk_10_0_2_sh_mask.h" #include "renoir_ip_offset.h" -#include "irq/dcn21/irq_service_dcn21.h" /* Constants */ @@ -56,9 +55,7 @@ /* TODO: evaluate how to lower or disable all dcn clocks in screen off case */ -int rn_get_active_display_cnt_wa( - struct dc *dc, - struct dc_state *context) +static int rn_get_active_display_cnt_wa(struct dc *dc, struct dc_state *context) { int i, display_count; bool tmds_present = false; @@ -77,7 +74,8 @@ int rn_get_active_display_cnt_wa( const struct dc_link *link = dc->links[i]; /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ - if (link->link_enc->funcs->is_dig_enabled(link->link_enc)) + if (link->link_enc->funcs->is_dig_enabled && + link->link_enc->funcs->is_dig_enabled(link->link_enc)) display_count++; } @@ -88,7 +86,7 @@ int rn_get_active_display_cnt_wa( return display_count; } -void rn_set_low_power_state(struct clk_mgr *clk_mgr_base) +static void rn_set_low_power_state(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); @@ -122,7 +120,7 @@ static void rn_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, } -void rn_update_clocks(struct clk_mgr *clk_mgr_base, +static void rn_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower) { @@ -130,11 +128,9 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; int display_count; - int irq_src; bool update_dppclk = false; bool update_dispclk = false; bool dpp_clock_lowered = false; - uint32_t hpd_state; struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; @@ -151,14 +147,8 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, display_count = rn_get_active_display_cnt_wa(dc, context); - for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD5; irq_src++) { - hpd_state = dc_get_hpd_state_dcn21(dc->res_pool->irqs, irq_src); - if (hpd_state) - break; - } - /* if we can go lower, go lower */ - if (display_count == 0 && !hpd_state) { + if (display_count == 0) { rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER); /* update power state */ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; @@ -437,25 +427,14 @@ static void rn_dump_clk_registers(struct clk_state_registers_and_bypass *regs_an } } -/* This function produce translated logical clk state values*/ -void rn_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s) -{ - struct clk_state_registers_and_bypass sb = { 0 }; - struct clk_log_info log_info = { 0 }; - - rn_dump_clk_registers(&sb, clk_mgr_base, &log_info); - - s->dprefclk_khz = sb.dprefclk * 1000; -} - -void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base) +static void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); rn_vbios_smu_enable_pme_wa(clk_mgr); } -void rn_init_clocks(struct clk_mgr *clk_mgr) +static void rn_init_clocks(struct clk_mgr *clk_mgr) { memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); // Assumption is that boot state always supports pstate diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index 9f7eed6688c4..8161a6ae410d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -33,6 +33,8 @@ #include "mp/mp_12_0_0_offset.h" #include "mp/mp_12_0_0_sh_mask.h" +#include "rn_clk_mgr_vbios_smu.h" + #define REG(reg_name) \ (MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) @@ -86,7 +88,9 @@ static uint32_t rn_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsig } -int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned int msg_id, unsigned int param) +static int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, + unsigned int msg_id, + unsigned int param) { uint32_t result; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 1861a147a7fa..f977f29907df 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -252,6 +252,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base, bool update_dispclk = false; bool enter_display_off = false; bool dpp_clock_lowered = false; + bool update_pstate_unsupported_clk = false; struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; bool force_reset = false; bool update_uclk = false; @@ -299,13 +300,28 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base, clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context); p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0); - if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { + + // invalidate the current P-State forced min in certain dc_mode_softmax situations + if (dc->clk_mgr->dc_mode_softmax_enabled && safe_to_lower && !p_state_change_support) { + if ((new_clocks->dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) != + (clk_mgr_base->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)) + update_pstate_unsupported_clk = true; + } + + if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support) || + update_pstate_unsupported_clk) { clk_mgr_base->clks.p_state_change_support = p_state_change_support; /* to disable P-State switching, set UCLK min = max */ - if (!clk_mgr_base->clks.p_state_change_support) - dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, + if (!clk_mgr_base->clks.p_state_change_support) { + if (dc->clk_mgr->dc_mode_softmax_enabled && + new_clocks->dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) + dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, + dc->clk_mgr->bw_params->dc_mode_softmax_memclk); + else + dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz); + } } /* Always update saved value, even if new value not set due to P-State switching unsupported */ @@ -421,6 +437,24 @@ static void dcn3_set_hard_max_memclk(struct clk_mgr *clk_mgr_base) clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz); } +static void dcn3_set_max_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + if (!clk_mgr->smu_present) + return; + + dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz); +} +static void dcn3_set_min_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + if (!clk_mgr->smu_present) + return; + dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz); +} + /* Get current memclk states, update bounding box */ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) { @@ -436,6 +470,8 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) &num_levels); clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1; + clk_mgr_base->bw_params->dc_mode_softmax_memclk = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK); + /* Refresh bounding box */ clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box( clk_mgr->base.ctx->dc, clk_mgr_base->bw_params); @@ -505,6 +541,8 @@ static struct clk_mgr_funcs dcn3_funcs = { .notify_wm_ranges = dcn3_notify_wm_ranges, .set_hard_min_memclk = dcn3_set_hard_min_memclk, .set_hard_max_memclk = dcn3_set_hard_max_memclk, + .set_max_memclk = dcn3_set_max_memclk, + .set_min_memclk = dcn3_set_min_memclk, .get_memclk_states_from_smu = dcn3_get_memclk_states_from_smu, .are_clock_states_equal = dcn3_are_clock_states_equal, .enable_pme_wa = dcn3_enable_pme_wa, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c index 6ea642615854..d9920d91838d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c @@ -88,9 +88,9 @@ static uint32_t dcn301_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, u return res_val; } -int dcn301_smu_send_msg_with_param( - struct clk_mgr_internal *clk_mgr, - unsigned int msg_id, unsigned int param) +static int dcn301_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, + unsigned int msg_id, + unsigned int param) { uint32_t result; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index 3eee32faa208..48005def1164 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -89,9 +89,9 @@ static int vg_get_active_display_cnt_wa( return display_count; } -void vg_update_clocks(struct clk_mgr *clk_mgr_base, - struct dc_state *context, - bool safe_to_lower) +static void vg_update_clocks(struct clk_mgr *clk_mgr_base, + struct dc_state *context, + bool safe_to_lower) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; @@ -367,18 +367,6 @@ static void vg_dump_clk_registers(struct clk_state_registers_and_bypass *regs_an } } -/* This function produce translated logical clk state values*/ -void vg_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s) -{ - - struct clk_state_registers_and_bypass sb = { 0 }; - struct clk_log_info log_info = { 0 }; - - vg_dump_clk_registers(&sb, clk_mgr_base, &log_info); - - s->dprefclk_khz = sb.dprefclk * 1000; -} - static void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); @@ -386,7 +374,7 @@ static void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base) dcn301_smu_enable_pme_wa(clk_mgr); } -void vg_init_clocks(struct clk_mgr *clk_mgr) +static void vg_init_clocks(struct clk_mgr *clk_mgr) { memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); // Assumption is that boot state always supports pstate @@ -753,7 +741,7 @@ void vg_clk_mgr_construct( sizeof(struct watermarks), &clk_mgr->smu_wm_set.mc_address.quad_part); - if (clk_mgr->smu_wm_set.wm_set == 0) { + if (!clk_mgr->smu_wm_set.wm_set) { clk_mgr->smu_wm_set.wm_set = &dummy_wms; clk_mgr->smu_wm_set.mc_address.quad_part = 0; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index a13ff1783b9b..4162ce40089b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -158,6 +158,7 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, union display_idle_optimization_u idle_info = { 0 }; idle_info.idle_info.df_request_disabled = 1; idle_info.idle_info.phy_ref_clk_off = 1; + idle_info.idle_info.s0i2_rdy = 1; dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data); /* update power state */ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; @@ -540,10 +541,9 @@ static unsigned int find_clk_for_voltage( return clock; } -void dcn31_clk_mgr_helper_populate_bw_params( - struct clk_mgr_internal *clk_mgr, - struct integrated_info *bios_info, - const DpmClocks_t *clock_table) +static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr, + struct integrated_info *bios_info, + const DpmClocks_t *clock_table) { int i, j; struct clk_bw_params *bw_params = clk_mgr->base.bw_params; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index 8c2b77eb9459..a1011f3273f3 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -95,9 +95,9 @@ static uint32_t dcn31_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un return res_val; } -int dcn31_smu_send_msg_with_param( - struct clk_mgr_internal *clk_mgr, - unsigned int msg_id, unsigned int param) +static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, + unsigned int msg_id, + unsigned int param) { uint32_t result; @@ -119,6 +119,12 @@ int dcn31_smu_send_msg_with_param( result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000); + if (result == VBIOSSMC_Result_Failed) { + ASSERT(0); + REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK); + return -1; + } + if (IS_SMU_TIMEOUT(result)) { ASSERT(0); dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 17b7408d84b7..6f5528d34093 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -274,24 +274,6 @@ static bool create_links( goto failed_alloc; } -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && - dc->caps.dp_hpo && - link->dc->res_pool->res_cap->num_hpo_dp_link_encoder > 0) { - /* FPGA case - Allocate HPO DP link encoder */ - if (i < link->dc->res_pool->res_cap->num_hpo_dp_link_encoder) { - link->hpo_dp_link_enc = link->dc->res_pool->hpo_dp_link_enc[i]; - - if (link->hpo_dp_link_enc == NULL) { - BREAK_TO_DEBUGGER(); - goto failed_alloc; - } - link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source; - link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter; - } - } -#endif - link->link_status.dpcd_caps = &link->dpcd_caps; enc_init.ctx = dc->ctx; @@ -809,7 +791,7 @@ void dc_stream_set_static_screen_params(struct dc *dc, static void dc_destruct(struct dc *dc) { // reset link encoder assignment table on destruct - if (dc->res_pool->funcs->link_encs_assign) + if (dc->res_pool && dc->res_pool->funcs->link_encs_assign) link_enc_cfg_init(dc, dc->current_state); if (dc->current_state) { @@ -1422,20 +1404,34 @@ static void program_timing_sync( status->timing_sync_info.master = false; } - /* remove any other unblanked pipes as they have already been synced */ - for (j = j + 1; j < group_size; j++) { - bool is_blanked; - if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) - is_blanked = - pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); - else - is_blanked = - pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); - if (!is_blanked) { - group_size--; - pipe_set[j] = pipe_set[group_size]; - j--; + /* remove any other pipes that are already been synced */ + if (dc->config.use_pipe_ctx_sync_logic) { + /* check pipe's syncd to decide which pipe to be removed */ + for (j = 1; j < group_size; j++) { + if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) { + group_size--; + pipe_set[j] = pipe_set[group_size]; + j--; + } else + /* link slave pipe's syncd with master pipe */ + pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; + } + } else { + for (j = j + 1; j < group_size; j++) { + bool is_blanked; + + if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) + is_blanked = + pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); + else + is_blanked = + pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); + if (!is_blanked) { + group_size--; + pipe_set[j] = pipe_set[group_size]; + j--; + } } } @@ -2994,12 +2990,12 @@ static void commit_planes_for_stream(struct dc *dc, #ifdef CONFIG_DRM_AMD_DC_DCN if (dc->debug.validate_dml_output) { for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i]; - if (cur_pipe.stream == NULL) + struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; + if (cur_pipe->stream == NULL) continue; - cur_pipe.plane_res.hubp->funcs->validate_dml_output( - cur_pipe.plane_res.hubp, dc->ctx, + cur_pipe->plane_res.hubp->funcs->validate_dml_output( + cur_pipe->plane_res.hubp, dc->ctx, &context->res_ctx.pipe_ctx[i].rq_regs, &context->res_ctx.pipe_ctx[i].dlg_regs, &context->res_ctx.pipe_ctx[i].ttu_regs); @@ -3441,7 +3437,7 @@ struct dc_sink *dc_link_add_remote_sink( goto fail_add_sink; edid_status = dm_helpers_parse_edid_caps( - link->ctx, + link, &dc_sink->dc_edid, &dc_sink->edid_caps); @@ -3598,6 +3594,98 @@ void dc_lock_memory_clock_frequency(struct dc *dc) core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); } +static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz) +{ + struct dc_state *context = dc->current_state; + struct hubp *hubp; + struct pipe_ctx *pipe; + int i; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream != NULL) { + dc->hwss.disable_pixel_data(dc, pipe, true); + + // wait for double buffer + pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); + pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); + pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); + + hubp = pipe->plane_res.hubp; + hubp->funcs->set_blank_regs(hubp, true); + } + } + + dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz); + dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream != NULL) { + dc->hwss.disable_pixel_data(dc, pipe, false); + + hubp = pipe->plane_res.hubp; + hubp->funcs->set_blank_regs(hubp, false); + } + } +} + + +/** + * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode + * @dc: pointer to dc of the dm calling this + * @enable: True = transition to DC mode, false = transition back to AC mode + * + * Some SoCs define additional clock limits when in DC mode, DM should + * invoke this function when the platform undergoes a power source transition + * so DC can apply/unapply the limit. This interface may be disruptive to + * the onscreen content. + * + * Context: Triggered by OS through DM interface, or manually by escape calls. + * Need to hold a dclock when doing so. + * + * Return: none (void function) + * + */ +void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) +{ + uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; + unsigned int softMax, maxDPM, funcMin; + bool p_state_change_support; + + if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev)) + return; + + softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk; + maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz; + funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000; + p_state_change_support = dc->clk_mgr->clks.p_state_change_support; + + if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) { + if (p_state_change_support) { + if (funcMin <= softMax) + dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax); + // else: No-Op + } else { + if (funcMin <= softMax) + blank_and_force_memclk(dc, true, softMax); + // else: No-Op + } + } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) { + if (p_state_change_support) { + if (funcMin <= softMax) + dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM); + // else: No-Op + } else { + if (funcMin <= softMax) + blank_and_force_memclk(dc, true, maxDPM); + // else: No-Op + } + } + dc->clk_mgr->dc_mode_softmax_enabled = enable; +} bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, struct dc_cursor_attributes *cursor_attr) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index 21be2a684393..643762542e4d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -422,6 +422,8 @@ char *dc_status_to_str(enum dc_status status) return "The operation is not supported."; case DC_UNSUPPORTED_VALUE: return "The value specified is not supported."; + case DC_NO_LINK_ENC_RESOURCE: + return "No link encoder resource"; case DC_ERROR_UNEXPECTED: return "Unexpected error"; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index faa0bc308fc8..b5e570d33ca9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -66,31 +66,6 @@ /******************************************************************************* * Private functions ******************************************************************************/ -#if defined(CONFIG_DRM_AMD_DC_DCN) -static bool add_dp_hpo_link_encoder_to_link(struct dc_link *link) -{ - struct hpo_dp_link_encoder *enc = resource_get_unused_hpo_dp_link_encoder( - link->dc->res_pool); - - if (!link->hpo_dp_link_enc && enc) { - link->hpo_dp_link_enc = enc; - link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter; - link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source; - } - - return (link->hpo_dp_link_enc != NULL); -} - -static void remove_dp_hpo_link_encoder_from_link(struct dc_link *link) -{ - if (link->hpo_dp_link_enc) { - link->hpo_dp_link_enc->hpd_source = HPD_SOURCEID_UNKNOWN; - link->hpo_dp_link_enc->transmitter = TRANSMITTER_UNKNOWN; - link->hpo_dp_link_enc = NULL; - } -} -#endif - static void dc_link_destruct(struct dc_link *link) { int i; @@ -118,12 +93,6 @@ static void dc_link_destruct(struct dc_link *link) link->link_enc->funcs->destroy(&link->link_enc); } -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (link->hpo_dp_link_enc) { - remove_dp_hpo_link_encoder_from_link(link); - } -#endif - if (link->local_sink) dc_sink_release(link->local_sink); @@ -881,6 +850,7 @@ static bool dc_link_detect_helper(struct dc_link *link, enum dc_connection_type pre_connection_type = dc_connection_none; bool perform_dp_seamless_boot = false; const uint32_t post_oui_delay = 30; // 30ms + struct link_resource link_res = { 0 }; DC_LOGGER_INIT(link->ctx->logger); @@ -975,7 +945,10 @@ static bool dc_link_detect_helper(struct dc_link *link, #if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING) - add_dp_hpo_link_encoder_to_link(link); + link_res.hpo_dp_link_enc = resource_get_hpo_dp_link_enc_for_det_lt( + &link->dc->current_state->res_ctx, + link->dc->res_pool, + link); #endif if (link->type == dc_connection_mst_branch) { @@ -986,7 +959,7 @@ static bool dc_link_detect_helper(struct dc_link *link, * empty which leads to allocate_mst_payload() has "0" * pbn_per_slot value leading to exception on dc_fixpt_div() */ - dp_verify_mst_link_cap(link); + dp_verify_mst_link_cap(link, &link_res); /* * This call will initiate MST topology discovery. Which @@ -1150,6 +1123,7 @@ static bool dc_link_detect_helper(struct dc_link *link, // verify link cap for SST non-seamless boot if (!perform_dp_seamless_boot) dp_verify_link_cap_with_retries(link, + &link_res, &link->reported_link_cap, LINK_TRAINING_MAX_VERIFY_RETRY); } else { @@ -1844,6 +1818,8 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx) union down_spread_ctrl old_downspread; union down_spread_ctrl new_downspread; + memset(&old_downspread, 0, sizeof(old_downspread)); + core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL, &old_downspread.raw, sizeof(old_downspread)); @@ -2503,7 +2479,8 @@ static void write_i2c_redriver_setting( DC_LOG_DEBUG("Set redriver failed"); } -static void disable_link(struct dc_link *link, enum signal_type signal) +static void disable_link(struct dc_link *link, const struct link_resource *link_res, + enum signal_type signal) { /* * TODO: implement call for dp_set_hw_test_pattern @@ -2522,20 +2499,20 @@ static void disable_link(struct dc_link *link, enum signal_type signal) struct dc_link_settings link_settings = link->cur_link_settings; #endif if (dc_is_dp_sst_signal(signal)) - dp_disable_link_phy(link, signal); + dp_disable_link_phy(link, link_res, signal); else - dp_disable_link_phy_mst(link, signal); + dp_disable_link_phy_mst(link, link_res, signal); if (dc_is_dp_sst_signal(signal) || link->mst_stream_alloc_table.stream_count == 0) { #if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) { dp_set_fec_enable(link, false); - dp_set_fec_ready(link, false); + dp_set_fec_ready(link, link_res, false); } #else dp_set_fec_enable(link, false); - dp_set_fec_ready(link, false); + dp_set_fec_ready(link, link_res, false); #endif } } else { @@ -2646,7 +2623,7 @@ static enum dc_status enable_link( * new link settings. */ if (link->link_status.link_active) { - disable_link(link, pipe_ctx->stream->signal); + disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal); } switch (pipe_ctx->stream->signal) { @@ -2766,8 +2743,23 @@ static bool dp_active_dongle_validate_timing( return false; } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter + struct dc_crtc_timing outputTiming = *timing; + + if (timing->flags.DSC && !timing->dsc_cfg.is_frl) + /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */ + outputTiming.flags.DSC = 0; + if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) + return false; + } else { // DP to HDMI TMDS converter + if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) + return false; + } +#else if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) return false; +#endif #if defined(CONFIG_DRM_AMD_DC_DCN) } @@ -3401,11 +3393,12 @@ static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_tim /* * Payload allocation/deallocation for SST introduced in DP2.0 */ -enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, bool allocate) +static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, + bool allocate) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; - struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc; + struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc; struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; struct link_mst_stream_allocation_table proposed_table = {0}; struct fixed31_32 avg_time_slots_per_mtp; @@ -3487,7 +3480,7 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) struct link_encoder *link_encoder = NULL; struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; #if defined(CONFIG_DRM_AMD_DC_DCN) - struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc; + struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc; struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; #endif struct dp_mst_stream_allocation_table proposed_table = {0}; @@ -3817,7 +3810,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) struct link_encoder *link_encoder = NULL; struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; #if defined(CONFIG_DRM_AMD_DC_DCN) - struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc; + struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc; struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; #endif struct dp_mst_stream_allocation_table proposed_table = {0}; @@ -3978,104 +3971,73 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) { struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct link_encoder *link_enc = NULL; -#endif + struct cp_psp_stream_config config = {0}; + enum dp_panel_mode panel_mode = + dp_get_panel_mode(pipe_ctx->stream->link); - if (cp_psp && cp_psp->funcs.update_stream_config) { - struct cp_psp_stream_config config = {0}; - enum dp_panel_mode panel_mode = - dp_get_panel_mode(pipe_ctx->stream->link); + if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL) + return; - config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst; - /*stream_enc_inst*/ - config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst; - config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst; -#if defined(CONFIG_DRM_AMD_DC_DCN) - config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; - - if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY || - pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { - link_enc = pipe_ctx->stream->link->link_enc; - config.dio_output_type = pipe_ctx->stream->link->ep_type; - config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; - if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY) - link_enc = pipe_ctx->stream->link->link_enc; - else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) - if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) { - link_enc = link_enc_cfg_get_link_enc_used_by_stream( - pipe_ctx->stream->ctx->dc, - pipe_ctx->stream); - } - ASSERT(link_enc); + if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY) + link_enc = pipe_ctx->stream->link->link_enc; + else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) + link_enc = link_enc_cfg_get_link_enc_used_by_stream( + pipe_ctx->stream->ctx->dc, + pipe_ctx->stream); + ASSERT(link_enc); + if (link_enc == NULL) + return; - // Initialize PHY ID with ABCDE - 01234 mapping except when it is B0 - config.phy_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; + /* otg instance */ + config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst; - // Add flag to guard new A0 DIG mapping - if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true && - pipe_ctx->stream->link->dc->ctx->dce_version == DCN_VERSION_3_1) { - config.dig_be = link_enc->preferred_engine; - config.dio_output_type = pipe_ctx->stream->link->ep_type; - config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; - } else { - config.dio_output_type = 0; - config.dio_output_idx = 0; - } + /* dig front end */ + config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst; - // Add flag to guard B0 implementation - if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true && - link_enc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { - if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { - // enum ID 1-4 maps to DPIA PHY ID 0-3 - config.phy_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1; - } else { // for non DPIA mode over B0, ABCDE maps to 01564 - - switch (link_enc->transmitter) { - case TRANSMITTER_UNIPHY_A: - config.phy_idx = 0; - break; - case TRANSMITTER_UNIPHY_B: - config.phy_idx = 1; - break; - case TRANSMITTER_UNIPHY_C: - config.phy_idx = 5; - break; - case TRANSMITTER_UNIPHY_D: - config.phy_idx = 6; - break; - case TRANSMITTER_UNIPHY_E: - config.phy_idx = 4; - break; - default: - config.phy_idx = 0; - break; - } + /* stream encoder index */ + config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) + config.stream_enc_idx = + pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0; +#endif - } - } - } else if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) { - link_enc = link_enc_cfg_get_link_enc_used_by_stream( - pipe_ctx->stream->ctx->dc, - pipe_ctx->stream); - config.phy_idx = 0; /* Clear phy_idx for non-physical display endpoints. */ - } - ASSERT(link_enc); - if (link_enc) - config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; - if (is_dp_128b_132b_signal(pipe_ctx)) { - config.stream_enc_idx = pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0; - config.link_enc_idx = pipe_ctx->stream->link->hpo_dp_link_enc->inst; - config.dp2_enabled = 1; - } + /* dig back end */ + config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst; + + /* link encoder index */ + config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) + config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst; #endif - config.dpms_off = dpms_off; - config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; - config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP); - config.mst_enabled = (pipe_ctx->stream->signal == - SIGNAL_TYPE_DISPLAY_PORT_MST); - cp_psp->funcs.update_stream_config(cp_psp->handle, &config); - } + /* dio output index */ + config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; + + /* phy index */ + config.phy_idx = resource_transmitter_to_phy_idx( + pipe_ctx->stream->link->dc, link_enc->transmitter); + if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + /* USB4 DPIA doesn't use PHY in our soc, initialize it to 0 */ + config.phy_idx = 0; + + /* stream properties */ + config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0; + config.mst_enabled = (pipe_ctx->stream->signal == + SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0; +#if defined(CONFIG_DRM_AMD_DC_DCN) + config.dp2_enabled = is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0; +#endif + config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? + 1 : 0; + config.dpms_off = dpms_off; + + /* dm stream context */ + config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; + + cp_psp->funcs.update_stream_config(cp_psp->handle, &config); } #endif @@ -4096,7 +4058,7 @@ static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pi stream->link->cur_link_settings = link_settings; /* Enable clock, Configure lane count, and Enable Link Encoder*/ - enable_dp_hpo_output(stream->link, &stream->link->cur_link_settings); + enable_dp_hpo_output(stream->link, &pipe_ctx->link_res, &stream->link->cur_link_settings); #ifdef DIAGS_BUILD /* Workaround for FPGA HPO capture DP link data: @@ -4146,12 +4108,12 @@ static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pi proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; } - stream->link->hpo_dp_link_enc->funcs->update_stream_allocation_table( - stream->link->hpo_dp_link_enc, + pipe_ctx->link_res.hpo_dp_link_enc->funcs->update_stream_allocation_table( + pipe_ctx->link_res.hpo_dp_link_enc, &proposed_table); - stream->link->hpo_dp_link_enc->funcs->set_throttled_vcp_size( - stream->link->hpo_dp_link_enc, + pipe_ctx->link_res.hpo_dp_link_enc->funcs->set_throttled_vcp_size( + pipe_ctx->link_res.hpo_dp_link_enc, pipe_ctx->stream_res.hpo_dp_stream_enc->inst, avg_time_slots_per_mtp); @@ -4340,7 +4302,8 @@ void core_link_enable_stream( if (status != DC_FAIL_DP_LINK_TRAINING || pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { if (false == stream->link->link_status.link_active) - disable_link(stream->link, pipe_ctx->stream->signal); + disable_link(stream->link, &pipe_ctx->link_res, + pipe_ctx->stream->signal); BREAK_TO_DEBUGGER(); return; } @@ -4489,14 +4452,14 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) * state machine. * In DP2 or MST mode, our encoder will stay video active */ - disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); + disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); dc->hwss.disable_stream(pipe_ctx); } else { dc->hwss.disable_stream(pipe_ctx); - disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); + disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); } #else - disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); + disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); dc->hwss.disable_stream(pipe_ctx); #endif @@ -4579,16 +4542,22 @@ void dc_link_set_drive_settings(struct dc *dc, { int i; + struct pipe_ctx *pipe = NULL; + const struct link_resource *link_res; - for (i = 0; i < dc->link_count; i++) { - if (dc->links[i] == link) - break; - } + link_res = dc_link_get_cur_link_res(link); - if (i >= dc->link_count) + for (i = 0; i < MAX_PIPES; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe->stream && pipe->stream->link) { + if (pipe->stream->link == link) + break; + } + } + if (pipe && link_res) + dc_link_dp_set_drive_settings(pipe->stream->link, link_res, lt_settings); + else ASSERT_CRITICAL(false); - - dc_link_dp_set_drive_settings(dc->links[i], lt_settings); } void dc_link_set_preferred_link_settings(struct dc *dc, @@ -4649,11 +4618,9 @@ void dc_link_set_preferred_training_settings(struct dc *dc, if (link_setting != NULL) { link->preferred_link_setting = *link_setting; #if defined(CONFIG_DRM_AMD_DC_DCN) - if (dp_get_link_encoding_format(link_setting) == - DP_128b_132b_ENCODING && !link->hpo_dp_link_enc) { - if (!add_dp_hpo_link_encoder_to_link(link)) - memset(&link->preferred_link_setting, 0, sizeof(link->preferred_link_setting)); - } + if (dp_get_link_encoding_format(link_setting) == DP_128b_132b_ENCODING) + /* TODO: add dc update for acquiring link res */ + skip_immediate_retrain = true; #endif } else { link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN; @@ -4780,6 +4747,9 @@ void dc_link_overwrite_extended_receiver_cap( bool dc_link_is_fec_supported(const struct dc_link *link) { + /* TODO - use asic cap instead of link_enc->features + * we no longer know which link enc to use for this link before commit + */ struct link_encoder *link_enc = NULL; /* Links supporting dynamically assigned link encoder will be assigned next @@ -4874,3 +4844,125 @@ uint32_t dc_bandwidth_in_kbps_from_timing( return kbps; } + +const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link) +{ + int i; + struct pipe_ctx *pipe = NULL; + const struct link_resource *link_res = NULL; + + for (i = 0; i < MAX_PIPES; i++) { + pipe = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe->stream && pipe->stream->link && pipe->top_pipe == NULL) { + if (pipe->stream->link == link) { + link_res = &pipe->link_res; + break; + } + } + } + + return link_res; +} + +/** + * dc_get_cur_link_res_map() - take a snapshot of current link resource allocation state + * @dc: pointer to dc of the dm calling this + * @map: a dc link resource snapshot defined internally to dc. + * + * DM needs to capture a snapshot of current link resource allocation mapping + * and store it in its persistent storage. + * + * Some of the link resource is using first come first serve policy. + * The allocation mapping depends on original hotplug order. This information + * is lost after driver is loaded next time. The snapshot is used in order to + * restore link resource to its previous state so user will get consistent + * link capability allocation across reboot. + * + * Return: none (void function) + * + */ +void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map) +{ +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct dc_link *link; + uint8_t i; + uint32_t hpo_dp_recycle_map = 0; + + *map = 0; + + if (dc->caps.dp_hpo) { + for (i = 0; i < dc->caps.max_links; i++) { + link = dc->links[i]; + if (link->link_status.link_active && + dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING && + dp_get_link_encoding_format(&link->cur_link_settings) != DP_128b_132b_ENCODING) + /* hpo dp link encoder is considered as recycled, when RX reports 128b/132b encoding capability + * but current link doesn't use it. + */ + hpo_dp_recycle_map |= (1 << i); + } + *map |= (hpo_dp_recycle_map << LINK_RES_HPO_DP_REC_MAP__SHIFT); + } +#endif +} + +/** + * dc_restore_link_res_map() - restore link resource allocation state from a snapshot + * @dc: pointer to dc of the dm calling this + * @map: a dc link resource snapshot defined internally to dc. + * + * DM needs to call this function after initial link detection on boot and + * before first commit streams to restore link resource allocation state + * from previous boot session. + * + * Some of the link resource is using first come first serve policy. + * The allocation mapping depends on original hotplug order. This information + * is lost after driver is loaded next time. The snapshot is used in order to + * restore link resource to its previous state so user will get consistent + * link capability allocation across reboot. + * + * Return: none (void function) + * + */ +void dc_restore_link_res_map(const struct dc *dc, uint32_t *map) +{ +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct dc_link *link; + uint8_t i; + unsigned int available_hpo_dp_count; + uint32_t hpo_dp_recycle_map = (*map & LINK_RES_HPO_DP_REC_MAP__MASK) + >> LINK_RES_HPO_DP_REC_MAP__SHIFT; + + if (dc->caps.dp_hpo) { + available_hpo_dp_count = dc->res_pool->hpo_dp_link_enc_count; + /* remove excess 128b/132b encoding support for not recycled links */ + for (i = 0; i < dc->caps.max_links; i++) { + if ((hpo_dp_recycle_map & (1 << i)) == 0) { + link = dc->links[i]; + if (link->type != dc_connection_none && + dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { + if (available_hpo_dp_count > 0) + available_hpo_dp_count--; + else + /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */ + link->verified_link_cap.link_rate = LINK_RATE_HIGH3; + } + } + } + /* remove excess 128b/132b encoding support for recycled links */ + for (i = 0; i < dc->caps.max_links; i++) { + if ((hpo_dp_recycle_map & (1 << i)) != 0) { + link = dc->links[i]; + if (link->type != dc_connection_none && + dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { + if (available_hpo_dp_count > 0) + available_hpo_dp_count--; + else + /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */ + link->verified_link_cap.link_rate = LINK_RATE_HIGH3; + } + } + } + } +#endif +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 297553074bfd..4c3ab2575e4b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -100,6 +100,7 @@ static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = { #endif static bool decide_fallback_link_setting( + struct dc_link *link, struct dc_link_settings initial_link_settings, struct dc_link_settings *current_link_setting, enum link_training_result training_result); @@ -201,7 +202,7 @@ void dp_wait_for_training_aux_rd_interval( uint32_t wait_in_micro_secs) { #if defined(CONFIG_DRM_AMD_DC_DCN) - if (wait_in_micro_secs > 16000) + if (wait_in_micro_secs > 1000) msleep(wait_in_micro_secs/1000); else udelay(wait_in_micro_secs); @@ -398,6 +399,223 @@ static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings) } #endif +static void vendor_specific_lttpr_wa_one_start(struct dc_link *link) +{ + const uint8_t vendor_lttpr_write_data[4] = {0x1, 0x50, 0x63, 0xff}; + const uint8_t offset = dp_convert_to_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + uint32_t vendor_lttpr_write_address = 0xF004F; + + if (offset != 0xFF) + vendor_lttpr_write_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + + /* W/A for certain LTTPR to reset their lane settings, part one of two */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data[0], + sizeof(vendor_lttpr_write_data)); +} + +static void vendor_specific_lttpr_wa_one_end( + struct dc_link *link, + uint8_t retry_count) +{ + const uint8_t vendor_lttpr_write_data[4] = {0x1, 0x50, 0x63, 0x0}; + const uint8_t offset = dp_convert_to_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + uint32_t vendor_lttpr_write_address = 0xF004F; + + if (!retry_count) { + if (offset != 0xFF) + vendor_lttpr_write_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + + /* W/A for certain LTTPR to reset their lane settings, part two of two */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data[0], + sizeof(vendor_lttpr_write_data)); + } +} + +static void vendor_specific_lttpr_wa_one_two( + struct dc_link *link, + const uint8_t rate) +{ + if (link->apply_vendor_specific_lttpr_link_rate_wa) { + uint8_t toggle_rate = 0x0; + + if (rate == 0x6) + toggle_rate = 0xA; + else + toggle_rate = 0x6; + + if (link->vendor_specific_lttpr_link_rate_wa == rate) { + /* W/A for certain LTTPR to reset internal state for link training */ + core_link_write_dpcd( + link, + DP_LINK_BW_SET, + &toggle_rate, + 1); + } + + /* Store the last attempted link rate for this link */ + link->vendor_specific_lttpr_link_rate_wa = rate; + } +} + +static void vendor_specific_lttpr_wa_three( + struct dc_link *link, + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX]) +{ + const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63}; + const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63}; + const uint8_t offset = dp_convert_to_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + uint32_t vendor_lttpr_write_address = 0xF004F; + uint32_t vendor_lttpr_read_address = 0xF0053; + uint8_t dprx_vs = 0; + uint8_t dprx_pe = 0; + uint8_t lane; + + if (offset != 0xFF) { + vendor_lttpr_write_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + vendor_lttpr_read_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + } + + /* W/A to read lane settings requested by DPRX */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_vs[0], + sizeof(vendor_lttpr_write_data_vs)); + core_link_read_dpcd( + link, + vendor_lttpr_read_address, + &dprx_vs, + 1); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_pe[0], + sizeof(vendor_lttpr_write_data_pe)); + core_link_read_dpcd( + link, + vendor_lttpr_read_address, + &dprx_pe, + 1); + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE = (dprx_vs >> (2 * lane)) & 0x3; + dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE = (dprx_pe >> (2 * lane)) & 0x3; + } +} + +static void vendor_specific_lttpr_wa_three_dpcd( + struct dc_link *link, + union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX]) +{ + union lane_adjust lane_adjust[LANE_COUNT_DP_MAX]; + uint8_t lane = 0; + + vendor_specific_lttpr_wa_three(link, lane_adjust); + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = lane_adjust[lane].bits.VOLTAGE_SWING_LANE; + dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = lane_adjust[lane].bits.PRE_EMPHASIS_LANE; + } +} + +static void vendor_specific_lttpr_wa_four( + struct dc_link *link, + bool apply_wa) +{ + const uint8_t vendor_lttpr_write_data_one[4] = {0x1, 0x55, 0x63, 0x8}; + const uint8_t vendor_lttpr_write_data_two[4] = {0x1, 0x55, 0x63, 0x0}; + const uint8_t offset = dp_convert_to_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + uint32_t vendor_lttpr_write_address = 0xF004F; +#if defined(CONFIG_DRM_AMD_DC_DP2_0) + uint8_t sink_status = 0; + uint8_t i; +#endif + + if (offset != 0xFF) + vendor_lttpr_write_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + + /* W/A to pass through DPCD write of TPS=0 to DPRX */ + if (apply_wa) { + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_one[0], + sizeof(vendor_lttpr_write_data_one)); + } + + /* clear training pattern set */ + dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); + + if (apply_wa) { + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_two[0], + sizeof(vendor_lttpr_write_data_two)); + } + +#if defined(CONFIG_DRM_AMD_DC_DP2_0) + /* poll for intra-hop disable */ + for (i = 0; i < 10; i++) { + if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) && + (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0) + break; + udelay(1000); + } +#endif +} + +static void vendor_specific_lttpr_wa_five( + struct dc_link *link, + const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX], + uint8_t lane_count) +{ + const uint32_t vendor_lttpr_write_address = 0xF004F; + const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; + uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; + uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; + uint8_t lane = 0; + + for (lane = 0; lane < lane_count; lane++) { + vendor_lttpr_write_data_vs[3] |= + dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane); + vendor_lttpr_write_data_pe[3] |= + dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET << (2 * lane); + } + + /* Force LTTPR to output desired VS and PE */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_reset[0], + sizeof(vendor_lttpr_write_data_reset)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_vs[0], + sizeof(vendor_lttpr_write_data_vs)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_pe[0], + sizeof(vendor_lttpr_write_data_pe)); +} + enum dc_status dpcd_set_link_settings( struct dc_link *link, const struct link_training_settings *lt_settings) @@ -452,6 +670,15 @@ enum dc_status dpcd_set_link_settings( #else rate = (uint8_t) (lt_settings->link_settings.link_rate); #endif + if (link->dc->debug.apply_vendor_specific_lttpr_wa && + (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + link->lttpr_mode == LTTPR_MODE_TRANSPARENT) + vendor_specific_lttpr_wa_one_start(link); + + if (link->dc->debug.apply_vendor_specific_lttpr_wa && + (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) + vendor_specific_lttpr_wa_one_two(link, rate); + status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); } @@ -1024,6 +1251,7 @@ bool dp_is_max_vs_reached( static bool perform_post_lt_adj_req_sequence( struct dc_link *link, + const struct link_resource *link_res, struct link_training_settings *lt_settings) { enum dc_lane_count lane_count = @@ -1087,6 +1315,7 @@ static bool perform_post_lt_adj_req_sequence( lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); dc_link_dp_set_drive_settings(link, + link_res, lt_settings); break; } @@ -1161,6 +1390,7 @@ enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count, static enum link_training_result perform_channel_equalization_sequence( struct dc_link *link, + const struct link_resource *link_res, struct link_training_settings *lt_settings, uint32_t offset) { @@ -1183,12 +1413,12 @@ static enum link_training_result perform_channel_equalization_sequence( tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; #endif - dp_set_hw_training_pattern(link, tr_pattern, offset); + dp_set_hw_training_pattern(link, link_res, tr_pattern, offset); for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; retries_ch_eq++) { - dp_set_hw_lane_settings(link, lt_settings, offset); + dp_set_hw_lane_settings(link, link_res, lt_settings, offset); /* 2. update DPCD*/ if (!retries_ch_eq) @@ -1211,6 +1441,12 @@ static enum link_training_result perform_channel_equalization_sequence( dp_translate_training_aux_read_interval( link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]); + if (link->dc->debug.apply_vendor_specific_lttpr_wa && + (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { + wait_time_microsec = 16000; + } + dp_wait_for_training_aux_rd_interval( link, wait_time_microsec); @@ -1246,18 +1482,20 @@ static enum link_training_result perform_channel_equalization_sequence( } static void start_clock_recovery_pattern_early(struct dc_link *link, + const struct link_resource *link_res, struct link_training_settings *lt_settings, uint32_t offset) { DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n", __func__); - dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset); - dp_set_hw_lane_settings(link, lt_settings, offset); + dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); + dp_set_hw_lane_settings(link, link_res, lt_settings, offset); udelay(400); } static enum link_training_result perform_clock_recovery_sequence( struct dc_link *link, + const struct link_resource *link_res, struct link_training_settings *lt_settings, uint32_t offset) { @@ -1273,7 +1511,7 @@ static enum link_training_result perform_clock_recovery_sequence( retry_count = 0; if (!link->ctx->dc->work_arounds.lt_early_cr_pattern) - dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset); + dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); /* najeeb - The synaptics MST hub can put the LT in * infinite loop by switching the VS @@ -1290,6 +1528,7 @@ static enum link_training_result perform_clock_recovery_sequence( /* 1. call HWSS to set lane settings*/ dp_set_hw_lane_settings( link, + link_res, lt_settings, offset); @@ -1311,8 +1550,10 @@ static enum link_training_result perform_clock_recovery_sequence( /* 3. wait receiver to lock-on*/ wait_time_microsec = lt_settings->cr_pattern_time; - if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) - wait_time_microsec = TRAINING_AUX_RD_INTERVAL; + if (link->dc->debug.apply_vendor_specific_lttpr_wa && + (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) { + wait_time_microsec = 16000; + } dp_wait_for_training_aux_rd_interval( link, @@ -1329,6 +1570,13 @@ static enum link_training_result perform_clock_recovery_sequence( dpcd_lane_adjust, offset); + if (link->dc->debug.apply_vendor_specific_lttpr_wa && + (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { + vendor_specific_lttpr_wa_one_end(link, retry_count); + vendor_specific_lttpr_wa_three(link, dpcd_lane_adjust); + } + /* 5. check CR done*/ if (dp_is_cr_done(lane_count, dpcd_lane_status)) return LINK_TRAINING_SUCCESS; @@ -1379,13 +1627,14 @@ static enum link_training_result perform_clock_recovery_sequence( static inline enum link_training_result dp_transition_to_video_idle( struct dc_link *link, + const struct link_resource *link_res, struct link_training_settings *lt_settings, enum link_training_result status) { union lane_count_set lane_count_set = {0}; /* 4. mainlink output idle pattern*/ - dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); + dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); /* * 5. post training adjust if required @@ -1409,7 +1658,7 @@ static inline enum link_training_result dp_transition_to_video_idle( } if (status == LINK_TRAINING_SUCCESS && - perform_post_lt_adj_req_sequence(link, lt_settings) == false) + perform_post_lt_adj_req_sequence(link, link_res, lt_settings) == false) status = LINK_TRAINING_LQA_FAIL; lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count; @@ -1852,10 +2101,11 @@ static void print_status_message( void dc_link_dp_set_drive_settings( struct dc_link *link, + const struct link_resource *link_res, struct link_training_settings *lt_settings) { /* program ASIC PHY settings*/ - dp_set_hw_lane_settings(link, lt_settings, DPRX); + dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); @@ -1866,6 +2116,7 @@ void dc_link_dp_set_drive_settings( bool dc_link_dp_perform_link_training_skip_aux( struct dc_link *link, + const struct link_resource *link_res, const struct dc_link_settings *link_setting) { struct link_training_settings lt_settings = {0}; @@ -1882,10 +2133,10 @@ bool dc_link_dp_perform_link_training_skip_aux( /* 1. Perform_clock_recovery_sequence. */ /* transmit training pattern for clock recovery */ - dp_set_hw_training_pattern(link, lt_settings.pattern_for_cr, DPRX); + dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_cr, DPRX); /* call HWSS to set lane settings*/ - dp_set_hw_lane_settings(link, <_settings, DPRX); + dp_set_hw_lane_settings(link, link_res, <_settings, DPRX); /* wait receiver to lock-on*/ dp_wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time); @@ -1893,10 +2144,10 @@ bool dc_link_dp_perform_link_training_skip_aux( /* 2. Perform_channel_equalization_sequence. */ /* transmit training pattern for channel equalization. */ - dp_set_hw_training_pattern(link, lt_settings.pattern_for_eq, DPRX); + dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_eq, DPRX); /* call HWSS to set lane settings*/ - dp_set_hw_lane_settings(link, <_settings, DPRX); + dp_set_hw_lane_settings(link, link_res, <_settings, DPRX); /* wait receiver to lock-on. */ dp_wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time); @@ -1904,7 +2155,7 @@ bool dc_link_dp_perform_link_training_skip_aux( /* 3. Perform_link_training_int. */ /* Mainlink output idle pattern. */ - dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); + dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); print_status_message(link, <_settings, LINK_TRAINING_SUCCESS); @@ -1985,6 +2236,7 @@ static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link, static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence( struct dc_link *link, + const struct link_resource *link_res, struct link_training_settings *lt_settings) { uint8_t loop_count; @@ -1996,7 +2248,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence( union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; /* Transmit 128b/132b_TPS1 over Main-Link */ - dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, DPRX); + dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, DPRX); /* Set TRAINING_PATTERN_SET to 01h */ dpcd_set_training_pattern(link, lt_settings->pattern_for_cr); @@ -2006,8 +2258,8 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence( &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); - dp_set_hw_lane_settings(link, lt_settings, DPRX); - dp_set_hw_training_pattern(link, lt_settings->pattern_for_eq, DPRX); + dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); + dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_eq, DPRX); /* Set loop counter to start from 1 */ loop_count = 1; @@ -2034,7 +2286,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence( } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { status = DP_128b_132b_LT_FAILED; } else { - dp_set_hw_lane_settings(link, lt_settings, DPRX); + dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); dpcd_set_lane_settings(link, lt_settings, DPRX); } loop_count++; @@ -2063,6 +2315,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence( static enum link_training_result dp_perform_128b_132b_cds_done_sequence( struct dc_link *link, + const struct link_resource *link_res, struct link_training_settings *lt_settings) { /* Assumption: assume hardware has transmitted eq pattern */ @@ -2099,6 +2352,7 @@ static enum link_training_result dp_perform_128b_132b_cds_done_sequence( static enum link_training_result dp_perform_8b_10b_link_training( struct dc_link *link, + const struct link_resource *link_res, struct link_training_settings *lt_settings) { enum link_training_result status = LINK_TRAINING_SUCCESS; @@ -2108,7 +2362,7 @@ static enum link_training_result dp_perform_8b_10b_link_training( uint8_t lane = 0; if (link->ctx->dc->work_arounds.lt_early_cr_pattern) - start_clock_recovery_pattern_early(link, lt_settings, DPRX); + start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); /* 1. set link rate, lane count and spread. */ dpcd_set_link_settings(link, lt_settings); @@ -2122,12 +2376,13 @@ static enum link_training_result dp_perform_8b_10b_link_training( for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); repeater_id--) { - status = perform_clock_recovery_sequence(link, lt_settings, repeater_id); + status = perform_clock_recovery_sequence(link, link_res, lt_settings, repeater_id); if (status != LINK_TRAINING_SUCCESS) break; status = perform_channel_equalization_sequence(link, + link_res, lt_settings, repeater_id); @@ -2142,9 +2397,10 @@ static enum link_training_result dp_perform_8b_10b_link_training( } if (status == LINK_TRAINING_SUCCESS) { - status = perform_clock_recovery_sequence(link, lt_settings, DPRX); + status = perform_clock_recovery_sequence(link, link_res, lt_settings, DPRX); if (status == LINK_TRAINING_SUCCESS) { status = perform_channel_equalization_sequence(link, + link_res, lt_settings, DPRX); } @@ -2156,6 +2412,7 @@ static enum link_training_result dp_perform_8b_10b_link_training( #if defined(CONFIG_DRM_AMD_DC_DCN) static enum link_training_result dp_perform_128b_132b_link_training( struct dc_link *link, + const struct link_resource *link_res, struct link_training_settings *lt_settings) { enum link_training_result result = LINK_TRAINING_SUCCESS; @@ -2167,23 +2424,358 @@ static enum link_training_result dp_perform_128b_132b_link_training( decide_8b_10b_training_settings(link, <_settings->link_settings, &legacy_settings); - return dp_perform_8b_10b_link_training(link, &legacy_settings); + return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings); } dpcd_set_link_settings(link, lt_settings); if (result == LINK_TRAINING_SUCCESS) - result = dp_perform_128b_132b_channel_eq_done_sequence(link, lt_settings); + result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings); if (result == LINK_TRAINING_SUCCESS) - result = dp_perform_128b_132b_cds_done_sequence(link, lt_settings); + result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings); return result; } #endif +static enum link_training_result dc_link_dp_perform_fixed_vs_pe_training_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; + const uint8_t offset = dp_convert_to_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0}; + const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68}; + uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; + uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; + uint32_t vendor_lttpr_write_address = 0xF004F; + enum link_training_result status = LINK_TRAINING_SUCCESS; + uint8_t lane = 0; + union down_spread_ctrl downspread = {0}; + union lane_count_set lane_count_set = {0}; + uint8_t toggle_rate; + uint8_t rate; + + /* Only 8b/10b is supported */ + ASSERT(dp_get_link_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING); + + if (offset != 0xFF) { + vendor_lttpr_write_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + } + + /* Vendor specific: Reset lane settings */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_reset[0], + sizeof(vendor_lttpr_write_data_reset)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_vs[0], + sizeof(vendor_lttpr_write_data_vs)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_pe[0], + sizeof(vendor_lttpr_write_data_pe)); + + /* Vendor specific: Enable intercept */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_intercept_en[0], + sizeof(vendor_lttpr_write_data_intercept_en)); + + /* 1. set link rate, lane count and spread. */ + + downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread); + + lane_count_set.bits.LANE_COUNT_SET = + lt_settings->link_settings.lane_count; + + lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; + lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; + + + if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { + lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = + link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; + } + + core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, + &downspread.raw, sizeof(downspread)); + + core_link_write_dpcd(link, DP_LANE_COUNT_SET, + &lane_count_set.raw, 1); + +#if defined(CONFIG_DRM_AMD_DC_DCN) + rate = get_dpcd_link_rate(<_settings->link_settings); +#else + rate = (uint8_t) (lt_settings->link_settings.link_rate); +#endif + + /* Vendor specific: Toggle link rate */ + toggle_rate = (rate == 0x6) ? 0xA : 0x6; + + if (link->vendor_specific_lttpr_link_rate_wa == rate) { + core_link_write_dpcd( + link, + DP_LINK_BW_SET, + &toggle_rate, + 1); + } + + link->vendor_specific_lttpr_link_rate_wa = rate; + + core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); + + DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", + __func__, + DP_LINK_BW_SET, + lt_settings->link_settings.link_rate, + DP_LANE_COUNT_SET, + lt_settings->link_settings.lane_count, + lt_settings->enhanced_framing, + DP_DOWNSPREAD_CTRL, + lt_settings->link_settings.link_spread); + + /* 2. Perform link training */ + + /* Perform Clock Recovery Sequence */ + if (status == LINK_TRAINING_SUCCESS) { + uint32_t retries_cr; + uint32_t retry_count; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; + union lane_align_status_updated dpcd_lane_status_updated; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + + retries_cr = 0; + retry_count = 0; + + while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && + (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + + memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); + memset(&dpcd_lane_status_updated, '\0', + sizeof(dpcd_lane_status_updated)); + + /* 1. call HWSS to set lane settings */ + dp_set_hw_lane_settings( + link, + link_res, + lt_settings, + 0); + + /* 2. update DPCD of the receiver */ + if (!retry_count) { + /* EPR #361076 - write as a 5-byte burst, + * but only for the 1-st iteration. + */ + dpcd_set_lt_pattern_and_lane_settings( + link, + lt_settings, + lt_settings->pattern_for_cr, + 0); + /* Vendor specific: Disable intercept */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_intercept_dis[0], + sizeof(vendor_lttpr_write_data_intercept_dis)); + } else { + vendor_lttpr_write_data_vs[3] = 0; + vendor_lttpr_write_data_pe[3] = 0; + + for (lane = 0; lane < lane_count; lane++) { + vendor_lttpr_write_data_vs[3] |= + lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); + vendor_lttpr_write_data_pe[3] |= + lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); + } + + /* Vendor specific: Update VS and PE to DPRX requested value */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_vs[0], + sizeof(vendor_lttpr_write_data_vs)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_pe[0], + sizeof(vendor_lttpr_write_data_pe)); + + dpcd_set_lane_settings( + link, + lt_settings, + 0); + } + + /* 3. wait receiver to lock-on*/ + wait_time_microsec = lt_settings->cr_pattern_time; + + dp_wait_for_training_aux_rd_interval( + link, + wait_time_microsec); + + /* 4. Read lane status and requested drive + * settings as set by the sink + */ + dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + 0); + + /* 5. check CR done*/ + if (dp_is_cr_done(lane_count, dpcd_lane_status)) { + status = LINK_TRAINING_SUCCESS; + break; + } + + /* 6. max VS reached*/ + if (dp_is_max_vs_reached(lt_settings)) + break; + + /* 7. same lane settings */ + /* Note: settings are the same for all lanes, + * so comparing first lane is sufficient + */ + if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == + dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) + retries_cr++; + else + retries_cr = 0; + + /* 8. update VS/PE/PC2 in lt_settings*/ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + retry_count++; + } + + if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { + ASSERT(0); + DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", + __func__, + LINK_TRAINING_MAX_CR_RETRY); + + } + + status = dp_get_cr_failure(lane_count, dpcd_lane_status); + } + + /* Perform Channel EQ Sequence */ + if (status == LINK_TRAINING_SUCCESS) { + enum dc_dp_training_pattern tr_pattern; + uint32_t retries_ch_eq; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + + /* Note: also check that TPS4 is a supported feature*/ + tr_pattern = lt_settings->pattern_for_eq; + + dp_set_hw_training_pattern(link, link_res, tr_pattern, 0); + + status = LINK_TRAINING_EQ_FAIL_EQ; + + for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; + retries_ch_eq++) { + + dp_set_hw_lane_settings(link, link_res, lt_settings, 0); + + vendor_lttpr_write_data_vs[3] = 0; + vendor_lttpr_write_data_pe[3] = 0; + + for (lane = 0; lane < lane_count; lane++) { + vendor_lttpr_write_data_vs[3] |= + lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); + vendor_lttpr_write_data_pe[3] |= + lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); + } + + /* Vendor specific: Update VS and PE to DPRX requested value */ + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_vs[0], + sizeof(vendor_lttpr_write_data_vs)); + core_link_write_dpcd( + link, + vendor_lttpr_write_address, + &vendor_lttpr_write_data_pe[0], + sizeof(vendor_lttpr_write_data_pe)); + + /* 2. update DPCD*/ + if (!retries_ch_eq) + /* EPR #361076 - write as a 5-byte burst, + * but only for the 1-st iteration + */ + + dpcd_set_lt_pattern_and_lane_settings( + link, + lt_settings, + tr_pattern, 0); + else + dpcd_set_lane_settings(link, lt_settings, 0); + + /* 3. wait for receiver to lock-on*/ + wait_time_microsec = lt_settings->eq_pattern_time; + + dp_wait_for_training_aux_rd_interval( + link, + wait_time_microsec); + + /* 4. Read lane status and requested + * drive settings as set by the sink + */ + dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + 0); + + /* 5. check CR done*/ + if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { + status = LINK_TRAINING_EQ_FAIL_CR; + break; + } + + /* 6. check CHEQ done*/ + if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && + dp_is_symbol_locked(lane_count, dpcd_lane_status) && + dp_is_interlane_aligned(dpcd_lane_status_updated)) { + status = LINK_TRAINING_SUCCESS; + break; + } + + /* 7. update VS/PE/PC2 in lt_settings*/ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + } + } + + return status; +} + + enum link_training_result dc_link_dp_perform_link_training( struct dc_link *link, + const struct link_resource *link_res, const struct dc_link_settings *link_settings, bool skip_video_pattern) { @@ -2203,30 +2795,51 @@ enum link_training_result dc_link_dp_perform_link_training( <_settings); /* reset previous training states */ - dpcd_exit_training_mode(link); + if (link->dc->debug.apply_vendor_specific_lttpr_wa && + (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { + link->apply_vendor_specific_lttpr_link_rate_wa = true; + vendor_specific_lttpr_wa_four(link, true); + } else { + dpcd_exit_training_mode(link); + } /* configure link prior to entering training mode */ dpcd_configure_lttpr_mode(link, <_settings); - dp_set_fec_ready(link, lt_settings.should_set_fec_ready); + dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready); dpcd_configure_channel_coding(link, <_settings); /* enter training mode: * Per DP specs starting from here, DPTX device shall not issue * Non-LT AUX transactions inside training mode. */ - if (encoding == DP_8b_10b_ENCODING) - status = dp_perform_8b_10b_link_training(link, <_settings); + if (!link->dc->debug.apply_vendor_specific_lttpr_wa && + (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + link->lttpr_mode == LTTPR_MODE_TRANSPARENT) + status = dc_link_dp_perform_fixed_vs_pe_training_sequence(link, link_res, <_settings); + else if (encoding == DP_8b_10b_ENCODING) + status = dp_perform_8b_10b_link_training(link, link_res, <_settings); #if defined(CONFIG_DRM_AMD_DC_DCN) else if (encoding == DP_128b_132b_ENCODING) - status = dp_perform_128b_132b_link_training(link, <_settings); + status = dp_perform_128b_132b_link_training(link, link_res, <_settings); #endif else ASSERT(0); - /* exit training mode and switch to video idle */ - dpcd_exit_training_mode(link); + /* exit training mode */ + if (link->dc->debug.apply_vendor_specific_lttpr_wa && + (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { + link->apply_vendor_specific_lttpr_link_rate_wa = false; + vendor_specific_lttpr_wa_four(link, (status != LINK_TRAINING_SUCCESS)); + } else { + dpcd_exit_training_mode(link); + } + + /* switch to video idle */ if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) status = dp_transition_to_video_idle(link, + link_res, <_settings, status); @@ -2278,6 +2891,7 @@ bool perform_link_training_with_retries( dp_enable_link_phy( link, + &pipe_ctx->link_res, signal, pipe_ctx->clock_source->id, ¤t_setting); @@ -2305,23 +2919,24 @@ bool perform_link_training_with_retries( dp_set_panel_mode(link, panel_mode); if (link->aux_access_disabled) { - dc_link_dp_perform_link_training_skip_aux(link, ¤t_setting); + dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, ¤t_setting); return true; } else { /** @todo Consolidate USB4 DP and DPx.x training. */ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { status = dc_link_dpia_perform_link_training(link, - ¤t_setting, - skip_video_pattern); + &pipe_ctx->link_res, + ¤t_setting, + skip_video_pattern); /* Transmit idle pattern once training successful. */ if (status == LINK_TRAINING_SUCCESS) - dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, - NULL, 0); + dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); } else { status = dc_link_dp_perform_link_training(link, - ¤t_setting, - skip_video_pattern); + &pipe_ctx->link_res, + ¤t_setting, + skip_video_pattern); } if (status == LINK_TRAINING_SUCCESS) @@ -2336,7 +2951,7 @@ bool perform_link_training_with_retries( DC_LOG_WARNING("%s: Link training attempt %u of %d failed\n", __func__, (unsigned int)j + 1, attempts); - dp_disable_link_phy(link, signal); + dp_disable_link_phy(link, &pipe_ctx->link_res, signal); /* Abort link training if failure due to sink being unplugged. */ if (status == LINK_TRAINING_ABORT) { @@ -2349,7 +2964,7 @@ bool perform_link_training_with_retries( uint32_t req_bw; uint32_t link_bw; - decide_fallback_link_setting(*link_setting, ¤t_setting, status); + decide_fallback_link_setting(link, *link_setting, ¤t_setting, status); /* Fail link training if reduced link bandwidth no longer meets * stream requirements. */ @@ -2385,12 +3000,13 @@ static enum clock_source_id get_clock_source_id(struct dc_link *link) return dp_cs_id; } -static void set_dp_mst_mode(struct dc_link *link, bool mst_enable) +static void set_dp_mst_mode(struct dc_link *link, const struct link_resource *link_res, + bool mst_enable) { if (mst_enable == false && link->type == dc_connection_mst_branch) { /* Disable MST on link. Use only local sink. */ - dp_disable_link_phy_mst(link, link->connector_signal); + dp_disable_link_phy_mst(link, link_res, link->connector_signal); link->type = dc_connection_single; link->local_sink = link->remote_sinks[0]; @@ -2401,7 +3017,7 @@ static void set_dp_mst_mode(struct dc_link *link, bool mst_enable) link->type == dc_connection_single && link->remote_sinks[0] != NULL) { /* Re-enable MST on link. */ - dp_disable_link_phy(link, link->connector_signal); + dp_disable_link_phy(link, link_res, link->connector_signal); dp_enable_mst_on_sink(link, true); link->type = dc_connection_mst_branch; @@ -2427,6 +3043,7 @@ bool dc_link_dp_sync_lt_begin(struct dc_link *link) enum link_training_result dc_link_dp_sync_lt_attempt( struct dc_link *link, + const struct link_resource *link_res, struct dc_link_settings *link_settings, struct dc_link_training_overrides *lt_overrides) { @@ -2446,14 +3063,14 @@ enum link_training_result dc_link_dp_sync_lt_attempt( <_settings); /* Setup MST Mode */ if (lt_overrides->mst_enable) - set_dp_mst_mode(link, *lt_overrides->mst_enable); + set_dp_mst_mode(link, link_res, *lt_overrides->mst_enable); /* Disable link */ - dp_disable_link_phy(link, link->connector_signal); + dp_disable_link_phy(link, link_res, link->connector_signal); /* Enable link */ dp_cs_id = get_clock_source_id(link); - dp_enable_link_phy(link, link->connector_signal, + dp_enable_link_phy(link, link_res, link->connector_signal, dp_cs_id, link_settings); /* Set FEC enable */ @@ -2461,7 +3078,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt( if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { #endif fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable; - dp_set_fec_ready(link, fec_enable); + dp_set_fec_ready(link, NULL, fec_enable); #if defined(CONFIG_DRM_AMD_DC_DCN) } #endif @@ -2478,7 +3095,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt( /* Attempt to train with given link training settings */ if (link->ctx->dc->work_arounds.lt_early_cr_pattern) - start_clock_recovery_pattern_early(link, <_settings, DPRX); + start_clock_recovery_pattern_early(link, link_res, <_settings, DPRX); /* Set link rate, lane count and spread. */ dpcd_set_link_settings(link, <_settings); @@ -2486,9 +3103,10 @@ enum link_training_result dc_link_dp_sync_lt_attempt( /* 2. perform link training (set link training done * to false is done as well) */ - lt_status = perform_clock_recovery_sequence(link, <_settings, DPRX); + lt_status = perform_clock_recovery_sequence(link, link_res, <_settings, DPRX); if (lt_status == LINK_TRAINING_SUCCESS) { lt_status = perform_channel_equalization_sequence(link, + link_res, <_settings, DPRX); } @@ -2509,11 +3127,11 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down) #if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_link_settings link_settings = link->cur_link_settings; #endif - dp_disable_link_phy(link, link->connector_signal); + dp_disable_link_phy(link, NULL, link->connector_signal); #if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) #endif - dp_set_fec_ready(link, false); + dp_set_fec_ready(link, NULL, false); } link->sync_lt_in_progress = false; @@ -2568,7 +3186,8 @@ bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_ return false; } -static struct dc_link_settings get_max_link_cap(struct dc_link *link) +static struct dc_link_settings get_max_link_cap(struct dc_link *link, + const struct link_resource *link_res) { struct dc_link_settings max_link_cap = {0}; #if defined(CONFIG_DRM_AMD_DC_DCN) @@ -2592,9 +3211,11 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link) if (link_enc) link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap); #if defined(CONFIG_DRM_AMD_DC_DCN) - if (max_link_cap.link_rate >= LINK_RATE_UHBR10 && - !link->hpo_dp_link_enc) - max_link_cap.link_rate = LINK_RATE_HIGH3; + if (max_link_cap.link_rate >= LINK_RATE_UHBR10) { + if (!link_res->hpo_dp_link_enc || + link->dc->debug.disable_uhbr) + max_link_cap.link_rate = LINK_RATE_HIGH3; + } #endif /* Lower link settings based on sink's link cap */ @@ -2612,7 +3233,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link) * account for lttpr repeaters cap * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3). */ - if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + if (link->lttpr_mode != LTTPR_MODE_NON_LTTPR) { if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; @@ -2751,6 +3372,7 @@ bool hpd_rx_irq_check_link_loss_status( bool dp_verify_link_cap( struct dc_link *link, + const struct link_resource *link_res, struct dc_link_settings *known_limit_link_setting, int *fail_count) { @@ -2768,7 +3390,7 @@ bool dp_verify_link_cap( /* link training starts with the maximum common settings * supported by both sink and ASIC. */ - max_link_cap = get_max_link_cap(link); + max_link_cap = get_max_link_cap(link, link_res); initial_link_settings = get_common_supported_link_settings( *known_limit_link_setting, max_link_cap); @@ -2808,7 +3430,7 @@ bool dp_verify_link_cap( * find the physical link capability */ /* disable PHY done possible by BIOS, will be done by driver itself */ - dp_disable_link_phy(link, link->connector_signal); + dp_disable_link_phy(link, link_res, link->connector_signal); dp_cs_id = get_clock_source_id(link); @@ -2820,8 +3442,8 @@ bool dp_verify_link_cap( */ if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C && link->dc->debug.usbc_combo_phy_reset_wa) { - dp_enable_link_phy(link, link->connector_signal, dp_cs_id, cur); - dp_disable_link_phy(link, link->connector_signal); + dp_enable_link_phy(link, link_res, link->connector_signal, dp_cs_id, cur); + dp_disable_link_phy(link, link_res, link->connector_signal); } do { @@ -2832,6 +3454,7 @@ bool dp_verify_link_cap( dp_enable_link_phy( link, + link_res, link->connector_signal, dp_cs_id, cur); @@ -2842,6 +3465,7 @@ bool dp_verify_link_cap( else { status = dc_link_dp_perform_link_training( link, + link_res, cur, skip_video_pattern); if (status == LINK_TRAINING_SUCCESS) @@ -2863,8 +3487,8 @@ bool dp_verify_link_cap( * setting or before returning we'll enable it later * based on the actual mode we're driving */ - dp_disable_link_phy(link, link->connector_signal); - } while (!success && decide_fallback_link_setting( + dp_disable_link_phy(link, link_res, link->connector_signal); + } while (!success && decide_fallback_link_setting(link, initial_link_settings, cur, status)); /* Link Training failed for all Link Settings @@ -2887,6 +3511,7 @@ bool dp_verify_link_cap( bool dp_verify_link_cap_with_retries( struct dc_link *link, + const struct link_resource *link_res, struct dc_link_settings *known_limit_link_setting, int attempts) { @@ -2904,7 +3529,7 @@ bool dp_verify_link_cap_with_retries( link->verified_link_cap.link_rate = LINK_RATE_LOW; link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED; break; - } else if (dp_verify_link_cap(link, + } else if (dp_verify_link_cap(link, link_res, known_limit_link_setting, &fail_count) && fail_count == 0) { success = true; @@ -2916,13 +3541,13 @@ bool dp_verify_link_cap_with_retries( } bool dp_verify_mst_link_cap( - struct dc_link *link) + struct dc_link *link, const struct link_resource *link_res) { struct dc_link_settings max_link_cap = {0}; if (dp_get_link_encoding_format(&link->reported_link_cap) == DP_8b_10b_ENCODING) { - max_link_cap = get_max_link_cap(link); + max_link_cap = get_max_link_cap(link, link_res); link->verified_link_cap = get_common_supported_link_settings( link->reported_link_cap, max_link_cap); @@ -2931,6 +3556,7 @@ bool dp_verify_mst_link_cap( else if (dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING) { dp_verify_link_cap_with_retries(link, + link_res, &link->reported_link_cap, LINK_TRAINING_MAX_VERIFY_RETRY); } @@ -3116,6 +3742,7 @@ static bool decide_fallback_link_setting_max_bw_policy( * and no further fallback could be done */ static bool decide_fallback_link_setting( + struct dc_link *link, struct dc_link_settings initial_link_settings, struct dc_link_settings *current_link_setting, enum link_training_result training_result) @@ -3123,7 +3750,8 @@ static bool decide_fallback_link_setting( if (!current_link_setting) return false; #if defined(CONFIG_DRM_AMD_DC_DCN) - if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING) + if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING || + link->dc->debug.force_dp2_lt_fallback_method) return decide_fallback_link_setting_max_bw_policy(&initial_link_settings, current_link_setting); #endif @@ -3581,7 +4209,6 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link) &psr_configuration.raw, sizeof(psr_configuration.raw)); - if (psr_configuration.bits.ENABLE) { unsigned char dpcdbuf[3] = {0}; union psr_error_status psr_error_status; @@ -3613,10 +4240,12 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link) sizeof(psr_error_status.raw)); /* PSR error, disable and re-enable PSR */ - allow_active = false; - dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); - allow_active = true; - dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); + if (link->psr_settings.psr_allow_active) { + allow_active = false; + dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); + allow_active = true; + dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); + } return true; } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS == @@ -3694,6 +4323,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) &dpcd_lane_adjustment[0].raw, sizeof(dpcd_lane_adjustment)); + if (link->dc->debug.apply_vendor_specific_lttpr_wa && + (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + link->lttpr_mode == LTTPR_MODE_TRANSPARENT) + vendor_specific_lttpr_wa_three_dpcd( + link, + link_training_settings.dpcd_lane_settings); + /*get post cursor 2 parameters * For DP 1.1a or eariler, this DPCD register's value is 0 * For DP 1.2 or later: @@ -4313,6 +4949,56 @@ static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc) return -1; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw) +{ + switch (bw) { + case 0b001: + return 9000000; + case 0b010: + return 18000000; + case 0b011: + return 24000000; + case 0b100: + return 32000000; + case 0b101: + return 40000000; + case 0b110: + return 48000000; + } + + return 0; +} + +/** + * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw. + */ +static uint32_t intersect_frl_link_bw_support( + const uint32_t max_supported_frl_bw_in_kbps, + const union hdmi_encoded_link_bw hdmi_encoded_link_bw) +{ + uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps; + + // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode) + if (hdmi_encoded_link_bw.bits.FRL_MODE) { + if (hdmi_encoded_link_bw.bits.BW_48Gbps) + supported_bw_in_kbps = 48000000; + else if (hdmi_encoded_link_bw.bits.BW_40Gbps) + supported_bw_in_kbps = 40000000; + else if (hdmi_encoded_link_bw.bits.BW_32Gbps) + supported_bw_in_kbps = 32000000; + else if (hdmi_encoded_link_bw.bits.BW_24Gbps) + supported_bw_in_kbps = 24000000; + else if (hdmi_encoded_link_bw.bits.BW_18Gbps) + supported_bw_in_kbps = 18000000; + else if (hdmi_encoded_link_bw.bits.BW_9Gbps) + supported_bw_in_kbps = 9000000; + } + + return supported_bw_in_kbps; +} +#endif + static void read_dp_device_vendor_id(struct dc_link *link) { struct dp_device_vendor_id dp_id; @@ -4424,6 +5110,27 @@ static void get_active_converter_info( translate_dpcd_max_bpc( hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (link->dc->caps.hdmi_frl_pcon_support) { + union hdmi_encoded_link_bw hdmi_encoded_link_bw; + + link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = + dc_link_bw_kbps_from_raw_frl_link_rate_data( + hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT); + + // Intersect reported max link bw support with the supported link rate post FRL link training + if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS, + &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) { + link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support( + link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps, + hdmi_encoded_link_bw); + } + + if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0) + link->dpcd_caps.dongle_caps.extendedCapValid = true; + } +#endif + if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0) link->dpcd_caps.dongle_caps.extendedCapValid = true; } @@ -5378,7 +6085,7 @@ bool dc_link_dp_set_test_pattern( DP_TEST_PATTERN_VIDEO_MODE) { /* Set CRTC Test Pattern */ set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); - dp_set_hw_test_pattern(link, test_pattern, + dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern, (uint8_t *)p_custom_pattern, (uint32_t)cust_pattern_size); @@ -5400,8 +6107,18 @@ bool dc_link_dp_set_test_pattern( if (is_dp_phy_pattern(test_pattern)) { /* Set DPCD Lane Settings before running test pattern */ if (p_link_settings != NULL) { - dp_set_hw_lane_settings(link, p_link_settings, DPRX); - dpcd_set_lane_settings(link, p_link_settings, DPRX); + if (link->dc->debug.apply_vendor_specific_lttpr_wa && + (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { + dpcd_set_lane_settings(link, p_link_settings, DPRX); + vendor_specific_lttpr_wa_five( + link, + p_link_settings->dpcd_lane_settings, + p_link_settings->link_settings.lane_count); + } else { + dp_set_hw_lane_settings(link, &pipe_ctx->link_res, p_link_settings, DPRX); + dpcd_set_lane_settings(link, p_link_settings, DPRX); + } } /* Blank stream if running test pattern */ @@ -5414,7 +6131,7 @@ bool dc_link_dp_set_test_pattern( pipes->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc); } - dp_set_hw_test_pattern(link, test_pattern, + dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern, (uint8_t *)p_custom_pattern, (uint32_t)cust_pattern_size); @@ -5734,7 +6451,7 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link) return DP_PANEL_MODE_DEFAULT; } -enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready) +enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready) { /* FEC has to be "set ready" before the link training. * The policy is to always train with FEC @@ -5825,6 +6542,23 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) } } +struct link_encoder *dp_get_link_enc(struct dc_link *link) +{ + struct link_encoder *link_enc; + + link_enc = link->link_enc; + if (link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign) { + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, + link); + if (!link->link_enc) + link_enc = link_enc_cfg_get_next_avail_link_enc( + link->ctx->dc); + } + + return link_enc; +} + void dpcd_set_source_specific_data(struct dc_link *link) { if (!link->dc->vendor_signature.is_valid) { @@ -6201,7 +6935,7 @@ bool dpcd_write_128b_132b_sst_payload_allocation_table( } } retries++; - udelay(5000); + msleep(5); } if (!result && retries == max_retries) { @@ -6253,7 +6987,7 @@ bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link) break; } - udelay(5000); + msleep(5); } if (result == ACT_FAILED) { @@ -6284,8 +7018,21 @@ struct fixed31_32 calculate_sst_avg_time_slots_per_mtp( bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx) { + /* If this assert is hit then we have a link encoder dynamic management issue */ + ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true); return (pipe_ctx->stream_res.hpo_dp_stream_enc && - pipe_ctx->stream->link->hpo_dp_link_enc && + pipe_ctx->link_res.hpo_dp_link_enc && dc_is_dp_signal(pipe_ctx->stream->signal)); } #endif + +void edp_panel_backlight_power_on(struct dc_link *link) +{ + if (link->connector_signal != SIGNAL_TYPE_EDP) + return; + + link->dc->hwss.edp_power_control(link, true); + link->dc->hwss.edp_wait_for_hpd_ready(link, true); + if (link->dc->hwss.edp_backlight_control) + link->dc->hwss.edp_backlight_control(link, true); +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c index d72122593959..0e95bc5df4e7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c @@ -77,7 +77,9 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) * @param[in] link_setting Lane count, link rate and downspread control. * @param[out] lt_settings Link settings and drive settings (voltage swing and pre-emphasis). */ -static enum link_training_result dpia_configure_link(struct dc_link *link, +static enum link_training_result dpia_configure_link( + struct dc_link *link, + const struct link_resource *link_res, const struct dc_link_settings *link_setting, struct link_training_settings *lt_settings) { @@ -111,7 +113,7 @@ static enum link_training_result dpia_configure_link(struct dc_link *link, fec_enable = *link->preferred_training_settings.fec_enable; else fec_enable = true; - status = dp_set_fec_ready(link, fec_enable); + status = dp_set_fec_ready(link, link_res, fec_enable); if (status != DC_OK && link->is_hpd_pending) return LINK_TRAINING_ABORT; @@ -252,7 +254,9 @@ static enum dc_status dpcd_set_lt_pattern(struct dc_link *link, * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). * @param hop The Hop in display path. DPRX = 0. */ -static enum link_training_result dpia_training_cr_non_transparent(struct dc_link *link, +static enum link_training_result dpia_training_cr_non_transparent( + struct dc_link *link, + const struct link_resource *link_res, struct link_training_settings *lt_settings, uint32_t hop) { @@ -411,7 +415,9 @@ static enum link_training_result dpia_training_cr_non_transparent(struct dc_link * @param link DPIA link being trained. * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). */ -static enum link_training_result dpia_training_cr_transparent(struct dc_link *link, +static enum link_training_result dpia_training_cr_transparent( + struct dc_link *link, + const struct link_resource *link_res, struct link_training_settings *lt_settings) { enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0; @@ -511,16 +517,18 @@ static enum link_training_result dpia_training_cr_transparent(struct dc_link *li * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). * @param hop The Hop in display path. DPRX = 0. */ -static enum link_training_result dpia_training_cr_phase(struct dc_link *link, +static enum link_training_result dpia_training_cr_phase( + struct dc_link *link, + const struct link_resource *link_res, struct link_training_settings *lt_settings, uint32_t hop) { enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0; if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) - result = dpia_training_cr_non_transparent(link, lt_settings, hop); + result = dpia_training_cr_non_transparent(link, link_res, lt_settings, hop); else - result = dpia_training_cr_transparent(link, lt_settings); + result = dpia_training_cr_transparent(link, link_res, lt_settings); return result; } @@ -561,7 +569,9 @@ static uint32_t dpia_get_eq_aux_rd_interval(const struct dc_link *link, * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). * @param hop The Hop in display path. DPRX = 0. */ -static enum link_training_result dpia_training_eq_non_transparent(struct dc_link *link, +static enum link_training_result dpia_training_eq_non_transparent( + struct dc_link *link, + const struct link_resource *link_res, struct link_training_settings *lt_settings, uint32_t hop) { @@ -700,7 +710,9 @@ static enum link_training_result dpia_training_eq_non_transparent(struct dc_link * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). * @param hop The Hop in display path. DPRX = 0. */ -static enum link_training_result dpia_training_eq_transparent(struct dc_link *link, +static enum link_training_result dpia_training_eq_transparent( + struct dc_link *link, + const struct link_resource *link_res, struct link_training_settings *lt_settings) { enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ; @@ -779,16 +791,18 @@ static enum link_training_result dpia_training_eq_transparent(struct dc_link *li * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). * @param hop The Hop in display path. DPRX = 0. */ -static enum link_training_result dpia_training_eq_phase(struct dc_link *link, +static enum link_training_result dpia_training_eq_phase( + struct dc_link *link, + const struct link_resource *link_res, struct link_training_settings *lt_settings, uint32_t hop) { enum link_training_result result; if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) - result = dpia_training_eq_non_transparent(link, lt_settings, hop); + result = dpia_training_eq_non_transparent(link, link_res, lt_settings, hop); else - result = dpia_training_eq_transparent(link, lt_settings); + result = dpia_training_eq_transparent(link, link_res, lt_settings); return result; } @@ -908,7 +922,9 @@ static void dpia_training_abort(struct dc_link *link, uint32_t hop) core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data); } -enum link_training_result dc_link_dpia_perform_link_training(struct dc_link *link, +enum link_training_result dc_link_dpia_perform_link_training( + struct dc_link *link, + const struct link_resource *link_res, const struct dc_link_settings *link_setting, bool skip_video_pattern) { @@ -918,7 +934,7 @@ enum link_training_result dc_link_dpia_perform_link_training(struct dc_link *lin int8_t repeater_id; /* Current hop. */ /* Configure link as prescribed in link_setting and set LTTPR mode. */ - result = dpia_configure_link(link, link_setting, <_settings); + result = dpia_configure_link(link, link_res, link_setting, <_settings); if (result != LINK_TRAINING_SUCCESS) return result; @@ -930,12 +946,12 @@ enum link_training_result dc_link_dpia_perform_link_training(struct dc_link *lin */ for (repeater_id = repeater_cnt; repeater_id >= 0; repeater_id--) { /* Clock recovery. */ - result = dpia_training_cr_phase(link, <_settings, repeater_id); + result = dpia_training_cr_phase(link, link_res, <_settings, repeater_id); if (result != LINK_TRAINING_SUCCESS) break; /* Equalization. */ - result = dpia_training_eq_phase(link, <_settings, repeater_id); + result = dpia_training_eq_phase(link, link_res, <_settings, repeater_id); if (result != LINK_TRAINING_SUCCESS) break; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 368e834c6809..45d03d3a95c3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -71,6 +71,7 @@ void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode) void dp_enable_link_phy( struct dc_link *link, + const struct link_resource *link_res, enum signal_type signal, enum clock_source_id clock_source, const struct dc_link_settings *link_settings) @@ -135,7 +136,7 @@ void dp_enable_link_phy( #if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { - enable_dp_hpo_output(link, link_settings); + enable_dp_hpo_output(link, link_res, link_settings); } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { if (dc_is_dp_sst_signal(signal)) { link_enc->funcs->enable_dp_output( @@ -236,12 +237,13 @@ bool edp_receiver_ready_T7(struct dc_link *link) return result; } -void dp_disable_link_phy(struct dc_link *link, enum signal_type signal) +void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res, + enum signal_type signal) { struct dc *dc = link->ctx->dc; struct dmcu *dmcu = dc->res_pool->dmcu; #if defined(CONFIG_DRM_AMD_DC_DCN) - struct hpo_dp_link_encoder *hpo_link_enc = link->hpo_dp_link_enc; + struct hpo_dp_link_encoder *hpo_link_enc = link_res->hpo_dp_link_enc; #endif struct link_encoder *link_enc; @@ -260,7 +262,7 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal) link->dc->hwss.edp_backlight_control(link, false); #if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) - disable_dp_hpo_output(link, signal); + disable_dp_hpo_output(link, link_res, signal); else link_enc->funcs->disable_output(link_enc, signal); #else @@ -274,7 +276,7 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal) #if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING && hpo_link_enc) - disable_dp_hpo_output(link, signal); + disable_dp_hpo_output(link, link_res, signal); else link_enc->funcs->disable_output(link_enc, signal); #else @@ -294,13 +296,14 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal) dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); } -void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal) +void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res, + enum signal_type signal) { /* MST disable link only when no stream use the link */ if (link->mst_stream_alloc_table.stream_count > 0) return; - dp_disable_link_phy(link, signal); + dp_disable_link_phy(link, link_res, signal); /* set the sink to SST mode after disabling the link */ dp_enable_mst_on_sink(link, false); @@ -308,6 +311,7 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal) bool dp_set_hw_training_pattern( struct dc_link *link, + const struct link_resource *link_res, enum dc_dp_training_pattern pattern, uint32_t offset) { @@ -338,7 +342,7 @@ bool dp_set_hw_training_pattern( break; } - dp_set_hw_test_pattern(link, test_pattern, NULL, 0); + dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0); return true; } @@ -349,6 +353,7 @@ bool dp_set_hw_training_pattern( #endif void dp_set_hw_lane_settings( struct dc_link *link, + const struct link_resource *link_res, const struct link_training_settings *link_settings, uint32_t offset) { @@ -361,8 +366,8 @@ void dp_set_hw_lane_settings( #if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link_settings->link_settings) == DP_128b_132b_ENCODING) { - link->hpo_dp_link_enc->funcs->set_ffe( - link->hpo_dp_link_enc, + link_res->hpo_dp_link_enc->funcs->set_ffe( + link_res->hpo_dp_link_enc, &link_settings->link_settings, link_settings->lane_settings[0].FFE_PRESET.raw); } else if (dp_get_link_encoding_format(&link_settings->link_settings) @@ -379,6 +384,7 @@ void dp_set_hw_lane_settings( void dp_set_hw_test_pattern( struct dc_link *link, + const struct link_resource *link_res, enum dp_test_pattern test_pattern, uint8_t *custom_pattern, uint32_t custom_pattern_size) @@ -406,8 +412,8 @@ void dp_set_hw_test_pattern( #if defined(CONFIG_DRM_AMD_DC_DCN) switch (link_encoding_format) { case DP_128b_132b_ENCODING: - link->hpo_dp_link_enc->funcs->set_link_test_pattern( - link->hpo_dp_link_enc, &pattern_param); + link_res->hpo_dp_link_enc->funcs->set_link_test_pattern( + link_res->hpo_dp_link_enc, &pattern_param); break; case DP_8b_10b_ENCODING: ASSERT(encoder); @@ -446,7 +452,7 @@ void dp_retrain_link_dp_test(struct dc_link *link, pipes[i].stream_res.stream_enc); /* disable any test pattern that might be active */ - dp_set_hw_test_pattern(link, + dp_set_hw_test_pattern(link, &pipes[i].link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); dp_receiver_power_ctrl(link, false); @@ -763,7 +769,9 @@ static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) } } -void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *link_settings) +void enable_dp_hpo_output(struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings) { const struct dc *dc = link->dc; enum phyd32clk_clock_source phyd32clk; @@ -789,10 +797,11 @@ void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *l } } else { /* DP2.0 HW: call transmitter control to enable PHY */ - link->hpo_dp_link_enc->funcs->enable_link_phy( - link->hpo_dp_link_enc, + link_res->hpo_dp_link_enc->funcs->enable_link_phy( + link_res->hpo_dp_link_enc, link_settings, - link->link_enc->transmitter); + link->link_enc->transmitter, + link->link_enc->hpd_source); } /* DCCG muxing and DTBCLK DTO */ @@ -806,24 +815,26 @@ void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *l phyd32clk = get_phyd32clk_src(link); dc->res_pool->dccg->funcs->enable_symclk32_le( dc->res_pool->dccg, - link->hpo_dp_link_enc->inst, + link_res->hpo_dp_link_enc->inst, phyd32clk); - link->hpo_dp_link_enc->funcs->link_enable( - link->hpo_dp_link_enc, - link_settings->lane_count); + link_res->hpo_dp_link_enc->funcs->link_enable( + link_res->hpo_dp_link_enc, + link_settings->lane_count); } } -void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal) +void disable_dp_hpo_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) { const struct dc *dc = link->dc; - link->hpo_dp_link_enc->funcs->link_disable(link->hpo_dp_link_enc); + link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc); if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { dc->res_pool->dccg->funcs->disable_symclk32_le( dc->res_pool->dccg, - link->hpo_dp_link_enc->inst); + link_res->hpo_dp_link_enc->inst); dc->res_pool->dccg->funcs->set_physymclk( dc->res_pool->dccg, @@ -834,8 +845,8 @@ void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal) dm_set_phyd32clk(dc->ctx, 0); } else { /* DP2.0 HW: call transmitter control to disable PHY */ - link->hpo_dp_link_enc->funcs->disable_link_phy( - link->hpo_dp_link_enc, + link_res->hpo_dp_link_enc->funcs->disable_link_phy( + link_res->hpo_dp_link_enc, signal); } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index ce8f7f4fa2b7..b3912ff9dc91 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1724,6 +1724,94 @@ static void update_hpo_dp_stream_engine_usage( res_ctx->is_hpo_dp_stream_enc_acquired[i] = acquired; } } + +static inline int find_acquired_hpo_dp_link_enc_for_link( + const struct resource_context *res_ctx, + const struct dc_link *link) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_to_link_idx); i++) + if (res_ctx->hpo_dp_link_enc_ref_cnts[i] > 0 && + res_ctx->hpo_dp_link_enc_to_link_idx[i] == link->link_index) + return i; + + return -1; +} + +static inline int find_free_hpo_dp_link_enc(const struct resource_context *res_ctx, + const struct resource_pool *pool) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_ref_cnts); i++) + if (res_ctx->hpo_dp_link_enc_ref_cnts[i] == 0) + break; + + return (i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_ref_cnts) && + i < pool->hpo_dp_link_enc_count) ? i : -1; +} + +static inline void acquire_hpo_dp_link_enc( + struct resource_context *res_ctx, + unsigned int link_index, + int enc_index) +{ + res_ctx->hpo_dp_link_enc_to_link_idx[enc_index] = link_index; + res_ctx->hpo_dp_link_enc_ref_cnts[enc_index] = 1; +} + +static inline void retain_hpo_dp_link_enc( + struct resource_context *res_ctx, + int enc_index) +{ + res_ctx->hpo_dp_link_enc_ref_cnts[enc_index]++; +} + +static inline void release_hpo_dp_link_enc( + struct resource_context *res_ctx, + int enc_index) +{ + ASSERT(res_ctx->hpo_dp_link_enc_ref_cnts[enc_index] > 0); + res_ctx->hpo_dp_link_enc_ref_cnts[enc_index]--; +} + +static bool add_hpo_dp_link_enc_to_ctx(struct resource_context *res_ctx, + const struct resource_pool *pool, + struct pipe_ctx *pipe_ctx, + struct dc_stream_state *stream) +{ + int enc_index; + + enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, stream->link); + + if (enc_index >= 0) { + retain_hpo_dp_link_enc(res_ctx, enc_index); + } else { + enc_index = find_free_hpo_dp_link_enc(res_ctx, pool); + if (enc_index >= 0) + acquire_hpo_dp_link_enc(res_ctx, stream->link->link_index, enc_index); + } + + if (enc_index >= 0) + pipe_ctx->link_res.hpo_dp_link_enc = pool->hpo_dp_link_enc[enc_index]; + + return pipe_ctx->link_res.hpo_dp_link_enc != NULL; +} + +static void remove_hpo_dp_link_enc_from_ctx(struct resource_context *res_ctx, + struct pipe_ctx *pipe_ctx, + struct dc_stream_state *stream) +{ + int enc_index; + + enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, stream->link); + + if (enc_index >= 0) { + release_hpo_dp_link_enc(res_ctx, enc_index); + pipe_ctx->link_res.hpo_dp_link_enc = NULL; + } +} #endif /* TODO: release audio object */ @@ -1886,6 +1974,7 @@ enum dc_status dc_remove_stream_from_ctx( &new_ctx->res_ctx, dc->res_pool, del_pipe->stream_res.hpo_dp_stream_enc, false); + remove_hpo_dp_link_enc_from_ctx(&new_ctx->res_ctx, del_pipe, del_pipe->stream); } #endif @@ -2161,6 +2250,8 @@ enum dc_status resource_map_pool_resources( &context->res_ctx, pool, pipe_ctx->stream_res.hpo_dp_stream_enc, true); + if (!add_hpo_dp_link_enc_to_ctx(&context->res_ctx, pool, pipe_ctx, stream)) + return DC_NO_LINK_ENC_RESOURCE; } } #endif @@ -2836,6 +2927,8 @@ bool pipe_need_reprogram( #if defined(CONFIG_DRM_AMD_DC_DCN) if (pipe_ctx_old->stream_res.hpo_dp_stream_enc != pipe_ctx->stream_res.hpo_dp_stream_enc) return true; + if (pipe_ctx_old->link_res.hpo_dp_link_enc != pipe_ctx->link_res.hpo_dp_link_enc) + return true; #endif /* DIG link encoder resource assignment for stream changed. */ @@ -3104,21 +3197,109 @@ void get_audio_check(struct audio_info *aud_modes, } #if defined(CONFIG_DRM_AMD_DC_DCN) -struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder( - const struct resource_pool *pool) +struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt( + const struct resource_context *res_ctx, + const struct resource_pool *pool, + const struct dc_link *link) { - uint8_t i; - struct hpo_dp_link_encoder *enc = NULL; + struct hpo_dp_link_encoder *hpo_dp_link_enc = NULL; + int enc_index; - ASSERT(pool->hpo_dp_link_enc_count <= MAX_HPO_DP2_LINK_ENCODERS); + enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, link); - for (i = 0; i < pool->hpo_dp_link_enc_count; i++) { - if (pool->hpo_dp_link_enc[i]->transmitter == TRANSMITTER_UNKNOWN) { - enc = pool->hpo_dp_link_enc[i]; - break; + if (enc_index < 0) + enc_index = find_free_hpo_dp_link_enc(res_ctx, pool); + + if (enc_index >= 0) + hpo_dp_link_enc = pool->hpo_dp_link_enc[enc_index]; + + return hpo_dp_link_enc; +} +#endif + +void reset_syncd_pipes_from_disabled_pipes(struct dc *dc, + struct dc_state *context) +{ + int i, j; + struct pipe_ctx *pipe_ctx_old, *pipe_ctx, *pipe_ctx_syncd; + + /* If pipe backend is reset, need to reset pipe syncd status */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe_ctx_old = &dc->current_state->res_ctx.pipe_ctx[i]; + pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx_old->stream) + continue; + + if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) + continue; + + if (!pipe_ctx->stream || + pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { + + /* Reset all the syncd pipes from the disabled pipe */ + for (j = 0; j < dc->res_pool->pipe_count; j++) { + pipe_ctx_syncd = &context->res_ctx.pipe_ctx[j]; + if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_syncd) == pipe_ctx_old->pipe_idx) || + !IS_PIPE_SYNCD_VALID(pipe_ctx_syncd)) + SET_PIPE_SYNCD_TO_PIPE(pipe_ctx_syncd, j); + } } } +} + +void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc, + struct dc_state *context, + uint8_t disabled_master_pipe_idx) +{ + int i; + struct pipe_ctx *pipe_ctx, *pipe_ctx_check; + + pipe_ctx = &context->res_ctx.pipe_ctx[disabled_master_pipe_idx]; + if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx) != disabled_master_pipe_idx) || + !IS_PIPE_SYNCD_VALID(pipe_ctx)) + SET_PIPE_SYNCD_TO_PIPE(pipe_ctx, disabled_master_pipe_idx); + + /* for the pipe disabled, check if any slave pipe exists and assert */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe_ctx_check = &context->res_ctx.pipe_ctx[i]; - return enc; + if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_check) == disabled_master_pipe_idx) && + IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx)) + DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n", + i, disabled_master_pipe_idx); + } } + +uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter) +{ + /* TODO - get transmitter to phy idx mapping from DMUB */ + uint8_t phy_idx = transmitter - TRANSMITTER_UNIPHY_A; + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dc->ctx->dce_version == DCN_VERSION_3_1 && + dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + phy_idx = 0; + break; + case TRANSMITTER_UNIPHY_B: + phy_idx = 1; + break; + case TRANSMITTER_UNIPHY_C: + phy_idx = 5; + break; + case TRANSMITTER_UNIPHY_D: + phy_idx = 6; + break; + case TRANSMITTER_UNIPHY_E: + phy_idx = 4; + break; + default: + phy_idx = 0; + break; + } + } #endif + return phy_idx; +} diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index bf2878235dba..288e7b01f561 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -47,7 +47,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.163" +#define DC_VER "3.2.167" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -75,6 +75,16 @@ enum dc_plane_type { DC_PLANE_TYPE_DCN_UNIVERSAL, }; +// Sizes defined as multiples of 64KB +enum det_size { + DET_SIZE_DEFAULT = 0, + DET_SIZE_192KB = 3, + DET_SIZE_256KB = 4, + DET_SIZE_320KB = 5, + DET_SIZE_384KB = 6 +}; + + struct dc_plane_cap { enum dc_plane_type type; uint32_t blends_with_above : 1; @@ -187,6 +197,7 @@ struct dc_caps { struct dc_color_caps color; #if defined(CONFIG_DRM_AMD_DC_DCN) bool dp_hpo; + bool hdmi_frl_pcon_support; #endif bool edp_dsc_support; bool vbios_lttpr_aware; @@ -333,6 +344,7 @@ struct dc_config { uint8_t vblank_alignment_max_frame_time_diff; bool is_asymmetric_memory; bool is_single_rank_dimm; + bool use_pipe_ctx_sync_logic; }; enum visual_confirm { @@ -510,7 +522,8 @@ union dpia_debug_options { uint32_t force_non_lttpr:1; uint32_t extend_aux_rd_interval:1; uint32_t disable_mst_dsc_work_around:1; - uint32_t reserved:28; + uint32_t hpd_delay_in_ms:12; + uint32_t reserved:16; } bits; uint32_t raw; }; @@ -678,6 +691,8 @@ struct dc_debug_options { /* TODO - remove once tested */ bool legacy_dp2_lt; bool set_mst_en_for_sst; + bool disable_uhbr; + bool force_dp2_lt_fallback_method; #endif union mem_low_power_enable_options enable_mem_low_power; union root_clock_optimization_options root_clock_optimization; @@ -690,11 +705,14 @@ struct dc_debug_options { /* FEC/PSR1 sequence enable delay in 100us */ uint8_t fec_enable_delay_in100us; bool enable_driver_sequence_debug; + enum det_size crb_alloc_policy; + int crb_alloc_policy_min_disp_count; #if defined(CONFIG_DRM_AMD_DC_DCN) bool disable_z10; bool enable_sw_cntl_psr; union dpia_debug_options dpia_debug; #endif + bool apply_vendor_specific_lttpr_wa; }; struct gpu_info_soc_bounding_box_v1_0; @@ -1295,6 +1313,11 @@ struct dc_sink_dsc_caps { // 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology), // 'false' if they are sink's DSC caps bool is_virtual_dpcd_dsc; +#if defined(CONFIG_DRM_AMD_DC_DCN) + // 'true' if MST topology supports DSC passthrough for sink + // 'false' if MST topology does not support DSC passthrough + bool is_dsc_passthrough_supported; +#endif struct dsc_dec_dpcd_caps dsc_dec_caps; }; @@ -1410,6 +1433,9 @@ void dc_unlock_memory_clock_frequency(struct dc *dc); */ void dc_lock_memory_clock_frequency(struct dc *dc); +/* set soft max for memclk, to be used for AC/DC switching clock limitations */ +void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable); + /* cleanup on driver unload */ void dc_hardware_release(struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index e68e9a86a4d9..353dac420f34 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -378,7 +378,14 @@ enum dpcd_downstream_port_detailed_type { union dwnstream_port_caps_byte2 { struct { uint8_t MAX_BITS_PER_COLOR_COMPONENT:2; +#if defined(CONFIG_DRM_AMD_DC_DCN) + uint8_t MAX_ENCODED_LINK_BW_SUPPORT:3; + uint8_t SOURCE_CONTROL_MODE_SUPPORT:1; + uint8_t CONCURRENT_LINK_BRING_UP_SEQ_SUPPORT:1; + uint8_t RESERVED:1; +#else uint8_t RESERVED:6; +#endif } bits; uint8_t raw; }; @@ -416,6 +423,30 @@ union dwnstream_port_caps_byte3_hdmi { uint8_t raw; }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +union hdmi_sink_encoded_link_bw_support { + struct { + uint8_t HDMI_SINK_ENCODED_LINK_BW_SUPPORT:3; + uint8_t RESERVED:5; + } bits; + uint8_t raw; +}; + +union hdmi_encoded_link_bw { + struct { + uint8_t FRL_MODE:1; // Bit 0 + uint8_t BW_9Gbps:1; + uint8_t BW_18Gbps:1; + uint8_t BW_24Gbps:1; + uint8_t BW_32Gbps:1; + uint8_t BW_40Gbps:1; + uint8_t BW_48Gbps:1; + uint8_t RESERVED:1; // Bit 7 + } bits; + uint8_t raw; +}; +#endif + /*4-byte structure for detailed capabilities of a down-stream port (DP-to-TMDS converter).*/ union dwnstream_portxcaps { @@ -852,6 +883,15 @@ struct psr_caps { unsigned char psr_version; unsigned int psr_rfb_setup_time; bool psr_exit_link_training_required; + unsigned char edp_revision; + unsigned char support_ver; + bool su_granularity_required; + bool y_coordinate_required; + uint8_t su_y_granularity; + bool alpm_cap; + bool standby_support; + uint8_t rate_control_caps; + unsigned int psr_power_opt_flag; }; /* Length of router topology ID read from DPCD in bytes. */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 52355fe6994c..eac34f591a3f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -741,6 +741,9 @@ struct dc_dsc_config { uint32_t version_minor; /* DSC minor version. Full version is formed as 1.version_minor. */ bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */ int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + bool is_frl; /* indicate if DSC is applied based on HDMI FRL sink's capability */ +#endif bool is_dp; /* indicate if DSC is applied based on DP's capability */ }; struct dc_crtc_timing { diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 37af564c4b33..c0e37ad0e26c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -30,6 +30,8 @@ #include "dc_types.h" #include "grph_object_defs.h" +struct link_resource; + enum dc_link_fec_state { dc_link_fec_not_ready, dc_link_fec_ready, @@ -160,9 +162,6 @@ struct dc_link { struct panel_cntl *panel_cntl; struct link_encoder *link_enc; -#if defined(CONFIG_DRM_AMD_DC_DCN) - struct hpo_dp_link_encoder *hpo_dp_link_enc; -#endif struct graphics_object_id link_id; /* Endpoint type distinguishes display endpoints which do not have entries * in the BIOS connector table from those that do. Helps when tracking link @@ -186,6 +185,10 @@ struct dc_link { /* Drive settings read from integrated info table */ struct dc_lane_settings bios_forced_drive_settings; + /* Vendor specific LTTPR workaround variables */ + uint8_t vendor_specific_lttpr_link_rate_wa; + bool apply_vendor_specific_lttpr_link_rate_wa; + /* MST record stream using this link */ struct link_flags { bool dp_keep_receiver_powered; @@ -355,14 +358,17 @@ void dc_link_remove_remote_sink( void dc_link_dp_set_drive_settings( struct dc_link *link, + const struct link_resource *link_res, struct link_training_settings *lt_settings); bool dc_link_dp_perform_link_training_skip_aux( struct dc_link *link, + const struct link_resource *link_res, const struct dc_link_settings *link_setting); enum link_training_result dc_link_dp_perform_link_training( struct dc_link *link, + const struct link_resource *link_res, const struct dc_link_settings *link_settings, bool skip_video_pattern); @@ -370,6 +376,7 @@ bool dc_link_dp_sync_lt_begin(struct dc_link *link); enum link_training_result dc_link_dp_sync_lt_attempt( struct dc_link *link, + const struct link_resource *link_res, struct dc_link_settings *link_setting, struct dc_link_training_overrides *lt_settings); @@ -447,6 +454,13 @@ bool dc_link_is_fec_supported(const struct dc_link *link); bool dc_link_should_enable_fec(const struct dc_link *link); #if defined(CONFIG_DRM_AMD_DC_DCN) +uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw); enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link); #endif + +const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link); +/* take a snapshot of current link resource allocation state */ +void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map); +/* restore link resource allocation state from a snapshot */ +void dc_restore_link_res_map(const struct dc *dc, uint32_t *map); #endif /* DC_LINK_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 388457ffc0a8..0285a4b38d05 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -430,6 +430,7 @@ struct dc_dongle_caps { uint32_t dp_hdmi_max_bpc; uint32_t dp_hdmi_max_pixel_clk_in_khz; #if defined(CONFIG_DRM_AMD_DC_DCN) + uint32_t dp_hdmi_frl_max_link_bw_in_kbps; struct dc_dongle_dfp_cap_ext dfp_cap_ext; #endif }; @@ -950,6 +951,7 @@ enum dc_gpu_mem_alloc_type { enum dc_psr_version { DC_PSR_VERSION_1 = 0, + DC_PSR_VERSION_SU_1 = 1, DC_PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF, }; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 1e77ffee71b3..f1c61d5aee6c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -788,8 +788,9 @@ static bool dce110_link_encoder_validate_hdmi_output( crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) return false; - if (!enc110->base.features.flags.bits.HDMI_6GB_EN && - adjusted_pix_clk_khz >= 300000) + if ((!enc110->base.features.flags.bits.HDMI_6GB_EN || + enc110->base.ctx->dc->debug.hdmi20_disable) && + adjusted_pix_clk_khz >= 300000) return false; if (enc110->base.ctx->dc->debug.hdmi20_disable && crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 3d421583e9ca..f3ff141b706a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -69,6 +69,8 @@ #include "dcn10/dcn10_hw_sequencer.h" +#include "dce110_hw_sequencer.h" + #define GAMMA_HW_POINTS_NUM 256 /* @@ -1564,6 +1566,10 @@ static enum dc_status apply_single_controller_ctx_to_hw( &pipe_ctx->stream->audio_info); } + /* make sure no pipes syncd to the pipe being enabled */ + if (!pipe_ctx->stream->apply_seamless_boot_optimization && dc->config.use_pipe_ctx_sync_logic) + check_syncd_pipes_for_disabled_master_pipe(dc, context, pipe_ctx->pipe_idx); + #if defined(CONFIG_DRM_AMD_DC_DCN) /* DCN3.1 FPGA Workaround * Need to enable HPO DP Stream Encoder before setting OTG master enable. @@ -1602,7 +1608,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.tg->inst); - if (dc_is_dp_signal(pipe_ctx->stream->signal) && + if (dc_is_embedded_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc->funcs->reset_fifo) pipe_ctx->stream_res.stream_enc->funcs->reset_fifo( pipe_ctx->stream_res.stream_enc); @@ -1792,7 +1798,6 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) struct dc_stream_state *edp_streams[MAX_NUM_EDP]; struct dc_link *edp_link_with_sink = NULL; struct dc_link *edp_link = NULL; - struct dc_stream_state *edp_stream = NULL; struct dce_hwseq *hws = dc->hwseq; int edp_with_sink_num; int edp_num; @@ -1813,27 +1818,29 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) get_edp_streams(context, edp_streams, &edp_stream_num); // Check fastboot support, disable on DCE8 because of blank screens - if (edp_num && dc->ctx->dce_version != DCE_VERSION_8_0 && + if (edp_num && edp_stream_num && dc->ctx->dce_version != DCE_VERSION_8_0 && dc->ctx->dce_version != DCE_VERSION_8_1 && dc->ctx->dce_version != DCE_VERSION_8_3) { for (i = 0; i < edp_num; i++) { edp_link = edp_links[i]; + if (edp_link != edp_streams[0]->link) + continue; // enable fastboot if backend is enabled on eDP - if (edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc)) { - /* Set optimization flag on eDP stream*/ - if (edp_stream_num && edp_link->link_status.link_active) { - edp_stream = edp_streams[0]; - can_apply_edp_fast_boot = !is_edp_ilr_optimization_required(edp_stream->link, &edp_stream->timing); - edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot; - if (can_apply_edp_fast_boot) - DC_LOG_EVENT_LINK_TRAINING("eDP fast boot disabled to optimize link rate\n"); - - break; - } + if (edp_link->link_enc->funcs->is_dig_enabled && + edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && + edp_link->link_status.link_active) { + struct dc_stream_state *edp_stream = edp_streams[0]; + + can_apply_edp_fast_boot = !is_edp_ilr_optimization_required(edp_stream->link, &edp_stream->timing); + edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot; + if (can_apply_edp_fast_boot) + DC_LOG_EVENT_LINK_TRAINING("eDP fast boot disabled to optimize link rate\n"); + + break; } } // We are trying to enable eDP, don't power down VDD - if (edp_stream_num && can_apply_edp_fast_boot) + if (can_apply_edp_fast_boot) keep_edp_vdd_on = true; } @@ -2294,6 +2301,10 @@ enum dc_status dce110_apply_ctx_to_hw( enum dc_status status; int i; + /* reset syncd pipes from disabled pipes */ + if (dc->config.use_pipe_ctx_sync_logic) + reset_syncd_pipes_from_disabled_pipes(dc, context); + /* Reset old context */ /* look up the targets that have been removed since last commit */ hws->funcs.reset_hw_ctx_wrap(dc, context); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index 91fdfcd8a14e..db7ca4b0cdb9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -119,14 +119,6 @@ void dpp_read_state(struct dpp *dpp_base, } } -/* Program gamut remap in bypass mode */ -void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp) -{ - REG_SET(CM_GAMUT_REMAP_CONTROL, 0, - CM_GAMUT_REMAP_MODE, 0); - /* Gamut remap in bypass */ -} - #define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19)) bool dpp1_get_optimal_number_of_taps( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index e31a6f1516bb..f607a0e28f14 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -89,51 +89,6 @@ enum dscl_mode_sel { DSCL_MODE_DSCL_BYPASS = 6 }; -static void dpp1_dscl_set_overscan( - struct dcn10_dpp *dpp, - const struct scaler_data *data) -{ - uint32_t left = data->recout.x; - uint32_t top = data->recout.y; - - int right = data->h_active - data->recout.x - data->recout.width; - int bottom = data->v_active - data->recout.y - data->recout.height; - - if (right < 0) { - BREAK_TO_DEBUGGER(); - right = 0; - } - if (bottom < 0) { - BREAK_TO_DEBUGGER(); - bottom = 0; - } - - REG_SET_2(DSCL_EXT_OVERSCAN_LEFT_RIGHT, 0, - EXT_OVERSCAN_LEFT, left, - EXT_OVERSCAN_RIGHT, right); - - REG_SET_2(DSCL_EXT_OVERSCAN_TOP_BOTTOM, 0, - EXT_OVERSCAN_BOTTOM, bottom, - EXT_OVERSCAN_TOP, top); -} - -static void dpp1_dscl_set_otg_blank( - struct dcn10_dpp *dpp, const struct scaler_data *data) -{ - uint32_t h_blank_start = data->h_active; - uint32_t h_blank_end = 0; - uint32_t v_blank_start = data->v_active; - uint32_t v_blank_end = 0; - - REG_SET_2(OTG_H_BLANK, 0, - OTG_H_BLANK_START, h_blank_start, - OTG_H_BLANK_END, h_blank_end); - - REG_SET_2(OTG_V_BLANK, 0, - OTG_V_BLANK_START, v_blank_start, - OTG_V_BLANK_END, v_blank_end); -} - static int dpp1_dscl_get_pixel_depth_val(enum lb_pixel_depth depth) { if (depth == LB_PIXEL_DEPTH_30BPP) @@ -555,58 +510,6 @@ static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *d return LB_MEMORY_CONFIG_0; } -void dpp1_dscl_set_scaler_auto_scale( - struct dpp *dpp_base, - const struct scaler_data *scl_data) -{ - enum lb_memory_config lb_config; - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - enum dscl_mode_sel dscl_mode = dpp1_dscl_get_dscl_mode( - dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale); - bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN - && scl_data->format <= PIXEL_FORMAT_VIDEO_END; - - dpp1_dscl_set_overscan(dpp, scl_data); - - dpp1_dscl_set_otg_blank(dpp, scl_data); - - REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode); - - if (dscl_mode == DSCL_MODE_DSCL_BYPASS) - return; - - lb_config = dpp1_dscl_find_lb_memory_config(dpp, scl_data); - dpp1_dscl_set_lb(dpp, &scl_data->lb_params, lb_config); - - if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) - return; - - /* TODO: v_min */ - REG_SET_3(DSCL_AUTOCAL, 0, - AUTOCAL_MODE, AUTOCAL_MODE_AUTOSCALE, - AUTOCAL_NUM_PIPE, 0, - AUTOCAL_PIPE_ID, 0); - - /* Black offsets */ - if (ycbcr) - REG_SET_2(SCL_BLACK_OFFSET, 0, - SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, - SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR); - else - - REG_SET_2(SCL_BLACK_OFFSET, 0, - SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, - SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y); - - REG_SET_4(SCL_TAP_CONTROL, 0, - SCL_V_NUM_TAPS, scl_data->taps.v_taps - 1, - SCL_H_NUM_TAPS, scl_data->taps.h_taps - 1, - SCL_V_NUM_TAPS_C, scl_data->taps.v_taps_c - 1, - SCL_H_NUM_TAPS_C, scl_data->taps.h_taps_c - 1); - - dpp1_dscl_set_scl_filter(dpp, scl_data, ycbcr); -} - static void dpp1_dscl_set_manual_ratio_init( struct dcn10_dpp *dpp, const struct scaler_data *data) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index a2b925cc4132..530a72e3eefe 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -77,9 +77,9 @@ #define PGFSM_POWER_ON 0 #define PGFSM_POWER_OFF 2 -void print_microsec(struct dc_context *dc_ctx, - struct dc_log_buffer_ctx *log_ctx, - uint32_t ref_cycle) +static void print_microsec(struct dc_context *dc_ctx, + struct dc_log_buffer_ctx *log_ctx, + uint32_t ref_cycle) { const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; static const unsigned int frac = 1000; @@ -132,7 +132,8 @@ static void log_mpc_crc(struct dc *dc, REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G)); } -void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx) +static void dcn10_log_hubbub_state(struct dc *dc, + struct dc_log_buffer_ctx *log_ctx) { struct dc_context *dc_ctx = dc->ctx; struct dcn_hubbub_wm wm; @@ -467,8 +468,6 @@ void dcn10_log_hw_state(struct dc *dc, log_mpc_crc(dc, log_ctx); { - int hpo_dp_link_enc_count = 0; - if (pool->hpo_dp_stream_enc_count > 0) { DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n"); for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) { @@ -499,18 +498,14 @@ void dcn10_log_hw_state(struct dc *dc, } /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */ - for (i = 0; i < dc->link_count; i++) - if (dc->links[i]->hpo_dp_link_enc) - hpo_dp_link_enc_count++; - - if (hpo_dp_link_enc_count) { + if (pool->hpo_dp_link_enc_count) { DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n"); - for (i = 0; i < dc->link_count; i++) { - struct hpo_dp_link_encoder *hpo_dp_link_enc = dc->links[i]->hpo_dp_link_enc; + for (i = 0; i < pool->hpo_dp_link_enc_count; i++) { + struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i]; struct hpo_dp_link_enc_state hpo_dp_le_state = {0}; - if (hpo_dp_link_enc && hpo_dp_link_enc->funcs->read_state) { + if (hpo_dp_link_enc->funcs->read_state) { hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state); DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n", hpo_dp_link_enc->inst, @@ -1370,7 +1365,12 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) uint32_t opp_id_src1 = OPP_ID_INVALID; // Step 1: To find out which OPTC is running & OPTC DSC is ON - for (i = 0; i < dc->res_pool->res_cap->num_timing_generator; i++) { + // We can't use res_pool->res_cap->num_timing_generator to check + // Because it records display pipes default setting built in driver, + // not display pipes of the current chip. + // Some ASICs would be fused display pipes less than the default setting. + // In dcnxx_resource_construct function, driver would obatin real information. + for (i = 0; i < dc->res_pool->timing_generator_count; i++) { uint32_t optc_dsc_state = 0; struct timing_generator *tg = dc->res_pool->timing_generators[i]; @@ -1972,10 +1972,9 @@ static bool wait_for_reset_trigger_to_occur( return rc; } -uint64_t reduceSizeAndFraction( - uint64_t *numerator, - uint64_t *denominator, - bool checkUint32Bounary) +static uint64_t reduceSizeAndFraction(uint64_t *numerator, + uint64_t *denominator, + bool checkUint32Bounary) { int i; bool ret = checkUint32Bounary == false; @@ -2023,7 +2022,7 @@ uint64_t reduceSizeAndFraction( return ret; } -bool is_low_refresh_rate(struct pipe_ctx *pipe) +static bool is_low_refresh_rate(struct pipe_ctx *pipe) { uint32_t master_pipe_refresh_rate = pipe->stream->timing.pix_clk_100hz * 100 / @@ -2032,7 +2031,8 @@ bool is_low_refresh_rate(struct pipe_ctx *pipe) return master_pipe_refresh_rate <= 30; } -uint8_t get_clock_divider(struct pipe_ctx *pipe, bool account_low_refresh_rate) +static uint8_t get_clock_divider(struct pipe_ctx *pipe, + bool account_low_refresh_rate) { uint32_t clock_divider = 1; uint32_t numpipes = 1; @@ -2052,10 +2052,8 @@ uint8_t get_clock_divider(struct pipe_ctx *pipe, bool account_low_refresh_rate) return clock_divider; } -int dcn10_align_pixel_clocks( - struct dc *dc, - int group_size, - struct pipe_ctx *grouped_pipes[]) +static int dcn10_align_pixel_clocks(struct dc *dc, int group_size, + struct pipe_ctx *grouped_pipes[]) { struct dc_context *dc_ctx = dc->ctx; int i, master = -1, embedded = -1; @@ -2344,7 +2342,7 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1, } -void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp) +static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp) { struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); struct vm_system_aperture_param apt = {0}; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c index 34001a30d449..10e613ec7d24 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c @@ -78,6 +78,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .get_clock = dcn10_get_clock, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, + .power_down = dce110_power_down, .set_backlight_level = dce110_set_backlight_level, .set_abm_immediate_disable = dce110_set_abm_immediate_disable, .set_pipe = dce110_set_pipe, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index 2dc4b4e4ba02..f4b34c110eae 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -646,8 +646,9 @@ static bool dcn10_link_encoder_validate_hdmi_output( crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) return false; - if (!enc10->base.features.flags.bits.HDMI_6GB_EN && - adjusted_pix_clk_100hz >= 3000000) + if ((!enc10->base.features.flags.bits.HDMI_6GB_EN || + enc10->base.ctx->dc->debug.hdmi20_disable) && + adjusted_pix_clk_100hz >= 3000000) return false; if (enc10->base.ctx->dc->debug.hdmi20_disable && crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index d54d731415d7..2c409356f512 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -348,36 +348,6 @@ void opp1_program_stereo( */ } -void opp1_program_oppbuf( - struct output_pixel_processor *opp, - struct oppbuf_params *oppbuf) -{ - struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); - - /* Program the oppbuf active width to be the frame width from mpc */ - REG_UPDATE(OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, oppbuf->active_width); - - /* Specifies the number of segments in multi-segment mode (DP-MSO operation) - * description "In 1/2/4 segment mode, specifies the horizontal active width in pixels of the display panel. - * In 4 segment split left/right mode, specifies the horizontal 1/2 active width in pixels of the display panel. - * Used to determine segment boundaries in multi-segment mode. Used to determine the width of the vertical active space in 3D frame packed modes. - * OPPBUF_ACTIVE_WIDTH must be integer divisible by the total number of segments." - */ - REG_UPDATE(OPPBUF_CONTROL, OPPBUF_DISPLAY_SEGMENTATION, oppbuf->mso_segmentation); - - /* description "Specifies the number of overlap pixels (1-8 overlapping pixels supported), used in multi-segment mode (DP-MSO operation)" */ - REG_UPDATE(OPPBUF_CONTROL, OPPBUF_OVERLAP_PIXEL_NUM, oppbuf->mso_overlap_pixel_num); - - /* description "Specifies the number of times a pixel is replicated (0-15 pixel replications supported). - * A value of 0 disables replication. The total number of times a pixel is output is OPPBUF_PIXEL_REPETITION + 1." - */ - REG_UPDATE(OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, oppbuf->pixel_repetition); - - /* Controls the number of padded pixels at the end of a segment */ - if (REG(OPPBUF_CONTROL1)) - REG_UPDATE(OPPBUF_CONTROL1, OPPBUF_NUM_SEGMENT_PADDED_PIXELS, oppbuf->num_segment_padded_pixels); -} - void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable) { struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 3d2a2848857a..b1671b00ce40 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -132,22 +132,6 @@ void optc1_setup_vertical_interrupt2( } /** - * Vupdate keepout can be set to a window to block the update lock for that pipe from changing. - * Start offset begins with vstartup and goes for x number of clocks, - * end offset starts from end of vupdate to x number of clocks. - */ -void optc1_set_vupdate_keepout(struct timing_generator *optc, - struct vupdate_keepout_params *params) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET_3(OTG_VUPDATE_KEEPOUT, 0, - MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, params->start_offset, - MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, params->end_offset, - OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, params->enable); -} - -/** * program_timing_generator used by mode timing set * Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition. * Including SYNC. Call BIOS command table to program Timings. @@ -876,7 +860,7 @@ void optc1_set_static_screen_control( OTG_STATIC_SCREEN_FRAME_COUNT, num_frames); } -void optc1_setup_manual_trigger(struct timing_generator *optc) +static void optc1_setup_manual_trigger(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -894,7 +878,7 @@ void optc1_setup_manual_trigger(struct timing_generator *optc) OTG_TRIGA_CLEAR, 1); } -void optc1_program_manual_trigger(struct timing_generator *optc) +static void optc1_program_manual_trigger(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 19a2dd619ec7..858b72149897 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -686,9 +686,8 @@ static struct output_pixel_processor *dcn10_opp_create( return &opp->base; } -struct dce_aux *dcn10_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) +static struct dce_aux *dcn10_aux_engine_create(struct dc_context *ctx, + uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); @@ -724,9 +723,8 @@ static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) }; -struct dce_i2c_hw *dcn10_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) +static struct dce_i2c_hw *dcn10_i2c_hw_create(struct dc_context *ctx, + uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); @@ -805,7 +803,7 @@ static const struct encoder_feature_support link_enc_feature = { .flags.bits.IS_TPS4_CAPABLE = true }; -struct link_encoder *dcn10_link_encoder_create( +static struct link_encoder *dcn10_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dcn10_link_encoder *enc10 = @@ -847,7 +845,7 @@ static struct panel_cntl *dcn10_panel_cntl_create(const struct panel_cntl_init_d return &panel_cntl->base; } -struct clock_source *dcn10_clock_source_create( +static struct clock_source *dcn10_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, @@ -945,7 +943,7 @@ static const struct resource_create_funcs res_create_maximus_funcs = { .create_hwseq = dcn10_hwseq_create, }; -void dcn10_clock_source_destroy(struct clock_source **clk_src) +static void dcn10_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; @@ -1122,7 +1120,7 @@ static enum dc_status build_mapped_resource( return DC_OK; } -enum dc_status dcn10_add_stream_to_ctx( +static enum dc_status dcn10_add_stream_to_ctx( struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c index a9e420c7d75a..970b65efeac1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c @@ -251,20 +251,6 @@ static void dpp2_cnv_setup ( } -void dpp2_cnv_set_bias_scale( - struct dpp *dpp_base, - struct dc_bias_and_scale *bias_and_scale) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_UPDATE(FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, bias_and_scale->bias_red); - REG_UPDATE(FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, bias_and_scale->bias_green); - REG_UPDATE(FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, bias_and_scale->bias_blue); - REG_UPDATE(FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, bias_and_scale->scale_red); - REG_UPDATE(FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, bias_and_scale->scale_green); - REG_UPDATE(FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, bias_and_scale->scale_blue); -} - /*compute the maximum number of lines that we can fit in the line buffer*/ void dscl2_calc_lb_num_partitions( const struct scaler_data *scl_data, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c index 880954ac0b02..994fb732a7cb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c @@ -527,7 +527,7 @@ static const uint16_t filter_12tap_16p_183[108] = { 0, 84, 16328, 16032, 416, 1944, 1944, 416, 16032, 16328, 84, 0, }; -const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio) +static const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_3tap_16p_upscale; @@ -539,7 +539,7 @@ const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio) return filter_3tap_16p_183; } -const uint16_t *wbscl_get_filter_4tap_16p(struct fixed31_32 ratio) +static const uint16_t *wbscl_get_filter_4tap_16p(struct fixed31_32 ratio) { if (ratio.value < dc_fixpt_one.value) return filter_4tap_16p_upscale; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index 5adf42a7cc27..dc1752e9f461 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -192,9 +192,8 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp, REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value); } -void hubp2_program_requestor( - struct hubp *hubp, - struct _vcs_dpi_display_rq_regs_st *rq_regs) +static void hubp2_program_requestor(struct hubp *hubp, + struct _vcs_dpi_display_rq_regs_st *rq_regs) { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); @@ -930,6 +929,16 @@ bool hubp2_is_flip_pending(struct hubp *hubp) void hubp2_set_blank(struct hubp *hubp, bool blank) { + hubp2_set_blank_regs(hubp, blank); + + if (blank) { + hubp->mpcc_id = 0xf; + hubp->opp_id = OPP_ID_INVALID; + } +} + +void hubp2_set_blank_regs(struct hubp *hubp, bool blank) +{ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); uint32_t blank_en = blank ? 1 : 0; @@ -951,9 +960,6 @@ void hubp2_set_blank(struct hubp *hubp, bool blank) HUBP_NO_OUTSTANDING_REQ, 1, 1, 200); } - - hubp->mpcc_id = 0xf; - hubp->opp_id = OPP_ID_INVALID; } } @@ -1285,7 +1291,7 @@ void hubp2_read_state(struct hubp *hubp) } -void hubp2_validate_dml_output(struct hubp *hubp, +static void hubp2_validate_dml_output(struct hubp *hubp, struct dc_context *ctx, struct _vcs_dpi_display_rq_regs_st *dml_rq_regs, struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr, @@ -1603,6 +1609,7 @@ static struct hubp_funcs dcn20_hubp_funcs = { .hubp_setup_interdependent = hubp2_setup_interdependent, .hubp_set_vm_system_aperture_settings = hubp2_set_vm_system_aperture_settings, .set_blank = hubp2_set_blank, + .set_blank_regs = hubp2_set_blank_regs, .dcc_control = hubp2_dcc_control, .mem_program_viewport = min_set_viewport, .set_cursor_attributes = hubp2_cursor_set_attributes, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h index eea2254b15e4..9204c3ef323b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h @@ -330,6 +330,7 @@ void hubp2_program_surface_config( bool hubp2_is_flip_pending(struct hubp *hubp); void hubp2_set_blank(struct hubp *hubp, bool blank); +void hubp2_set_blank_regs(struct hubp *hubp, bool blank); void hubp2_cursor_set_position( struct hubp *hubp, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index e6af99ae3d9f..4991e93e5308 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -615,6 +615,11 @@ void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) pipe_ctx->pipe_idx); } +void dcn20_disable_pixel_data(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank) +{ + dcn20_blank_pixel_data(dc, pipe_ctx, blank); +} + static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream, int opp_cnt) { @@ -1080,10 +1085,8 @@ static void dcn20_power_on_plane( } } -void dcn20_enable_plane( - struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct dc_state *context) +static void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct dc_state *context) { //if (dc->debug.sanity_checks) { // dcn10_verify_allow_pstate_change_high(dc); @@ -1842,6 +1845,11 @@ void dcn20_optimize_bandwidth( dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, true); + if (dc->clk_mgr->dc_mode_softmax_enabled) + if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && + context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) + dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk); + dc->clk_mgr->funcs->update_clocks( dc->clk_mgr, context, @@ -2406,7 +2414,7 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->map_stream_to_link( pipe_ctx->stream_res.hpo_dp_stream_enc, pipe_ctx->stream_res.hpo_dp_stream_enc->inst, - link->hpo_dp_link_enc->inst); + pipe_ctx->link_res.hpo_dp_link_enc->inst); } if (!is_dp_128b_132b_signal(pipe_ctx) && link_enc) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index 6bba191cd33e..33a36c02b2f8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -53,6 +53,10 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx); void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings); void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_disable_pixel_data( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank); void dcn20_blank_pixel_data( struct dc *dc, struct pipe_ctx *pipe_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index 5cfd4b0afea5..91e4885b743e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -27,6 +27,8 @@ #include "dcn10/dcn10_hw_sequencer.h" #include "dcn20_hwseq.h" +#include "dcn20_init.h" + static const struct hw_sequencer_funcs dcn20_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn10_init_hw, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index 947eb0df3f12..15734db0cdea 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -400,10 +400,9 @@ static void mpc20_program_ogam_pwl( } -void apply_DEDCN20_305_wa( - struct mpc *mpc, - int mpcc_id, enum dc_lut_mode current_mode, - enum dc_lut_mode next_mode) +static void apply_DEDCN20_305_wa(struct mpc *mpc, int mpcc_id, + enum dc_lut_mode current_mode, + enum dc_lut_mode next_mode) { struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); @@ -525,7 +524,7 @@ static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst) mpcc->sm_cfg.enable = false; } -struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id) +static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id) { struct mpcc *tmp_mpcc = tree->opp_list; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index 8c34751b0e58..0340fdd3f5fb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -73,21 +73,6 @@ bool optc2_enable_crtc(struct timing_generator *optc) } /** - * DRR double buffering control to select buffer point - * for V_TOTAL, H_TOTAL, VTOTAL_MIN, VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers - * Options: anytime, start of frame, dp start of frame (range timing) - */ -void optc2_set_timing_db_mode(struct timing_generator *optc, bool enable) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - uint32_t blank_data_double_buffer_enable = enable ? 1 : 0; - - REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL, - OTG_RANGE_TIMING_DBUF_UPDATE_MODE, blank_data_double_buffer_enable); -} - -/** *For the below, I'm not sure how your GSL parameters are stored in your env, * so I will assume a gsl_params struct for now */ @@ -110,30 +95,6 @@ void optc2_set_gsl(struct timing_generator *optc, } -/* Use the gsl allow flip as the master update lock */ -void optc2_use_gsl_as_master_update_lock(struct timing_generator *optc, - const struct gsl_params *params) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_UPDATE(OTG_GSL_CONTROL, - OTG_MASTER_UPDATE_LOCK_GSL_EN, params->master_update_lock_gsl_en); -} - -/* You can control the GSL timing by limiting GSL to a window (X,Y) */ -void optc2_set_gsl_window(struct timing_generator *optc, - const struct gsl_params *params) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET_2(OTG_GSL_WINDOW_X, 0, - OTG_GSL_WINDOW_START_X, params->gsl_window_start_x, - OTG_GSL_WINDOW_END_X, params->gsl_window_end_x); - REG_SET_2(OTG_GSL_WINDOW_Y, 0, - OTG_GSL_WINDOW_START_Y, params->gsl_window_start_y, - OTG_GSL_WINDOW_END_Y, params->gsl_window_end_y); -} - void optc2_set_gsl_source_select( struct timing_generator *optc, int group_idx, @@ -156,18 +117,6 @@ void optc2_set_gsl_source_select( } } -/* DSC encoder frame start controls: x = h position, line_num = # of lines from vstartup */ -void optc2_set_dsc_encoder_frame_start(struct timing_generator *optc, - int x_position, - int line_num) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET_2(OTG_DSC_START_POSITION, 0, - OTG_DSC_START_POSITION_X, x_position, - OTG_DSC_START_POSITION_LINE_NUM, line_num); -} - /* Set DSC-related configuration. * dsc_mode: 0 disables DSC, other values enable DSC in specified format * sc_bytes_per_pixel: Bytes per pixel in u3.28 format @@ -293,8 +242,8 @@ void optc2_get_optc_source(struct timing_generator *optc, *num_of_src_opp = 1; } -void optc2_set_dwb_source(struct timing_generator *optc, - uint32_t dwb_pipe_inst) +static void optc2_set_dwb_source(struct timing_generator *optc, + uint32_t dwb_pipe_inst) { struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -306,7 +255,7 @@ void optc2_set_dwb_source(struct timing_generator *optc, OPTC_DWB1_SOURCE_SELECT, optc->inst); } -void optc2_align_vblanks( +static void optc2_align_vblanks( struct timing_generator *optc_master, struct timing_generator *optc_slave, uint32_t master_pixel_clock_100Hz, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 3883f918b3bb..2bc93df023ad 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -1069,7 +1069,7 @@ static const struct dc_debug_options debug_defaults_drv = { .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, - .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, @@ -3093,8 +3093,7 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) { struct dc_link *link = context->streams[0]->sink->link; - if ((link->link_index == 0 && link->psr_settings.psr_feature_enabled) - || context->bw_ctx.dml.vba.StutterPeriod > 5000.0) + if (link->link_index == 0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0) return DCN_ZSTATE_SUPPORT_ALLOW; else return DCN_ZSTATE_SUPPORT_DISALLOW; @@ -3796,6 +3795,8 @@ static bool dcn20_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.hdmi_frl_pcon_support = true; + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) { dc->debug = debug_defaults_drv; } else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c index f5bf04f7da25..9a3402148fde 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c @@ -44,7 +44,8 @@ #define DC_LOGGER \ dccg->ctx->logger -void dccg201_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) +static void dccg201_update_dpp_dto(struct dccg *dccg, int dpp_inst, + int req_dppclk) { /* vbios handles it */ } diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c index 6b6f74d4afd1..35dd4bac242a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c @@ -55,7 +55,7 @@ static void hubp201_program_surface_config( hubp1_program_pixel_format(hubp, format); } -void hubp201_program_deadline( +static void hubp201_program_deadline( struct hubp *hubp, struct _vcs_dpi_display_dlg_regs_st *dlg_attr, struct _vcs_dpi_display_ttu_regs_st *ttu_attr) @@ -63,9 +63,8 @@ void hubp201_program_deadline( hubp1_program_deadline(hubp, dlg_attr, ttu_attr); } -void hubp201_program_requestor( - struct hubp *hubp, - struct _vcs_dpi_display_rq_regs_st *rq_regs) +static void hubp201_program_requestor(struct hubp *hubp, + struct _vcs_dpi_display_rq_regs_st *rq_regs) { struct dcn201_hubp *hubp201 = TO_DCN201_HUBP(hubp); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c index cfd09b3f705e..fe22530242d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c @@ -134,11 +134,12 @@ void dcn201_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) PHYSICAL_ADDRESS_LOC addr; struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct dce_hwseq *hws = dc->hwseq; - struct dc_plane_address uma = plane_state->address; + struct dc_plane_address uma; if (plane_state == NULL) return; + uma = plane_state->address; addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr); plane_address_in_gpu_space_to_uma(hws, &uma); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c index a65e8f7801db..7f9ec59ef443 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c @@ -50,8 +50,8 @@ #define IND_REG(index) \ (enc10->link_regs->index) -void dcn201_link_encoder_get_max_link_cap(struct link_encoder *enc, - struct dc_link_settings *link_settings) +static void dcn201_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings) { uint32_t value1, value2; struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); @@ -66,7 +66,7 @@ void dcn201_link_encoder_get_max_link_cap(struct link_encoder *enc, } } -bool dcn201_link_encoder_is_in_alt_mode(struct link_encoder *enc) +static bool dcn201_link_encoder_is_in_alt_mode(struct link_encoder *enc) { uint32_t value; struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c index 0fa381088d1d..0bb7d3dd53fa 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c @@ -603,7 +603,7 @@ static const struct dc_debug_options debug_defaults_drv = { .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, - .pipe_split_policy = MPC_SPLIT_AVOID, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, @@ -672,9 +672,8 @@ static struct output_pixel_processor *dcn201_opp_create( return &opp->base; } -struct dce_aux *dcn201_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) +static struct dce_aux *dcn201_aux_engine_create(struct dc_context *ctx, + uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC); @@ -706,9 +705,8 @@ static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) }; -struct dce_i2c_hw *dcn201_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) +static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx, + uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC); @@ -789,7 +787,7 @@ static const struct encoder_feature_support link_enc_feature = { .flags.bits.IS_TPS4_CAPABLE = true }; -struct link_encoder *dcn201_link_encoder_create( +static struct link_encoder *dcn201_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dcn20_link_encoder *enc20 = @@ -811,7 +809,7 @@ struct link_encoder *dcn201_link_encoder_create( return &enc10->base; } -struct clock_source *dcn201_clock_source_create( +static struct clock_source *dcn201_clock_source_create( struct dc_context *ctx, struct dc_bios *bios, enum clock_source_id id, @@ -906,7 +904,7 @@ static const struct resource_create_funcs res_create_maximus_funcs = { .create_hwseq = dcn201_hwseq_create, }; -void dcn201_clock_source_destroy(struct clock_source **clk_src) +static void dcn201_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); *clk_src = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c index 36044cb8ec83..c5e200d09038 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c @@ -680,7 +680,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage); } -void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub) +static void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); uint32_t prog_wm_value; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index 3de1bcf9b3d8..58e459c7e7d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -183,7 +183,7 @@ static void hubp21_setup( } -void hubp21_set_viewport( +static void hubp21_set_viewport( struct hubp *hubp, const struct rect *viewport, const struct rect *viewport_c) @@ -225,8 +225,8 @@ void hubp21_set_viewport( SEC_VIEWPORT_Y_START_C, viewport_c->y); } -void hubp21_set_vm_system_aperture_settings(struct hubp *hubp, - struct vm_system_aperture_param *apt) +static void hubp21_set_vm_system_aperture_settings(struct hubp *hubp, + struct vm_system_aperture_param *apt) { struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); @@ -248,7 +248,7 @@ void hubp21_set_vm_system_aperture_settings(struct hubp *hubp, SYSTEM_ACCESS_MODE, 0x3); } -void hubp21_validate_dml_output(struct hubp *hubp, +static void hubp21_validate_dml_output(struct hubp *hubp, struct dc_context *ctx, struct _vcs_dpi_display_rq_regs_st *dml_rq_regs, struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr, @@ -664,7 +664,8 @@ static void program_surface_flip_and_addr(struct hubp *hubp, struct surface_flip flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS); } -void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_regs) +static void dmcub_PLAT_54186_wa(struct hubp *hubp, + struct surface_flip_registers *flip_regs) { struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv; struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); @@ -697,7 +698,7 @@ void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_ PERF_TRACE(); // TODO: remove after performance is stable. } -bool hubp21_program_surface_flip_and_addr( +static bool hubp21_program_surface_flip_and_addr( struct hubp *hubp, const struct dc_plane_address *address, bool flip_immediate) @@ -805,7 +806,7 @@ bool hubp21_program_surface_flip_and_addr( return true; } -void hubp21_init(struct hubp *hubp) +static void hubp21_init(struct hubp *hubp) { // DEDCN21-133: Inconsistent row starting line for flip between DPTE and Meta // This is a chicken bit to enable the ECO fix. diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index 54c11ba550ae..b270f0b194dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -28,6 +28,8 @@ #include "dcn20/dcn20_hwseq.h" #include "dcn21_hwseq.h" +#include "dcn21_init.h" + static const struct hw_sequencer_funcs dcn21_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn10_init_hw, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c index aa46c35b05a2..0a1ba6e7081c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c @@ -203,7 +203,7 @@ static bool update_cfg_data( return true; } -bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc) +static bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); int value; @@ -277,7 +277,7 @@ void dcn21_link_encoder_enable_dp_output( } -void dcn21_link_encoder_enable_dp_mst_output( +static void dcn21_link_encoder_enable_dp_mst_output( struct link_encoder *enc, const struct dc_link_settings *link_settings, enum clock_source_id clock_source) @@ -288,9 +288,8 @@ void dcn21_link_encoder_enable_dp_mst_output( dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source); } -void dcn21_link_encoder_disable_output( - struct link_encoder *enc, - enum signal_type signal) +static void dcn21_link_encoder_disable_output(struct link_encoder *enc, + enum signal_type signal) { dcn10_link_encoder_disable_output(enc, signal); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index d452a0d1777e..e5cc6bf45743 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -784,9 +784,8 @@ static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) }; -struct dce_i2c_hw *dcn21_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) +static struct dce_i2c_hw *dcn21_i2c_hw_create(struct dc_context *ctx, + uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); @@ -874,7 +873,7 @@ static const struct dc_debug_options debug_defaults_drv = { .clock_trace = true, .disable_pplib_clock_request = true, .min_disp_clk_khz = 100000, - .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, @@ -1093,7 +1092,7 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s } } -void dcn21_calculate_wm( +static void dcn21_calculate_wm( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *out_pipe_cnt, @@ -1390,7 +1389,7 @@ validate_out: * with DC_FP_START()/DC_FP_END(). Use the same approach as for * dcn20_validate_bandwidth in dcn20_resource.c. */ -bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, +static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate) { bool voltage_supported; @@ -1480,8 +1479,8 @@ static struct hubbub *dcn21_hubbub_create(struct dc_context *ctx) return &hubbub->base; } -struct output_pixel_processor *dcn21_opp_create( - struct dc_context *ctx, uint32_t inst) +static struct output_pixel_processor *dcn21_opp_create(struct dc_context *ctx, + uint32_t inst) { struct dcn20_opp *opp = kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); @@ -1496,9 +1495,8 @@ struct output_pixel_processor *dcn21_opp_create( return &opp->base; } -struct timing_generator *dcn21_timing_generator_create( - struct dc_context *ctx, - uint32_t instance) +static struct timing_generator *dcn21_timing_generator_create(struct dc_context *ctx, + uint32_t instance) { struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); @@ -1518,7 +1516,7 @@ struct timing_generator *dcn21_timing_generator_create( return &tgn10->base; } -struct mpc *dcn21_mpc_create(struct dc_context *ctx) +static struct mpc *dcn21_mpc_create(struct dc_context *ctx) { struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc), GFP_KERNEL); @@ -1545,8 +1543,8 @@ static void read_dce_straps( } -struct display_stream_compressor *dcn21_dsc_create( - struct dc_context *ctx, uint32_t inst) +static struct display_stream_compressor *dcn21_dsc_create(struct dc_context *ctx, + uint32_t inst) { struct dcn20_dsc *dsc = kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); @@ -1683,9 +1681,8 @@ static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap }; -struct stream_encoder *dcn21_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) +static struct stream_encoder *dcn21_stream_encoder_create(enum engine_id eng_id, + struct dc_context *ctx) { struct dcn10_stream_encoder *enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); @@ -1917,7 +1914,7 @@ static int dcn21_populate_dml_pipes_from_context( return pipe_cnt; } -enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state) +static enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state) { enum dc_status result = DC_OK; @@ -2028,6 +2025,8 @@ static bool dcn21_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.hdmi_frl_pcon_support = true; + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c index 7aa9aaf5db4c..8daa12730bc1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c @@ -50,22 +50,6 @@ enc1->base.ctx -void convert_dc_info_packet_to_128( - const struct dc_info_packet *info_packet, - struct dc_info_packet_128 *info_packet_128) -{ - unsigned int i; - - info_packet_128->hb0 = info_packet->hb0; - info_packet_128->hb1 = info_packet->hb1; - info_packet_128->hb2 = info_packet->hb2; - info_packet_128->hb3 = info_packet->hb3; - - for (i = 0; i < 32; i++) { - info_packet_128->sb[i] = info_packet->sb[i]; - } - -} static void enc3_update_hdmi_info_packet( struct dcn10_stream_encoder *enc1, uint32_t packet_index, @@ -489,7 +473,7 @@ static void enc3_dp_set_odm_combine( } /* setup stream encoder in dvi mode */ -void enc3_stream_encoder_dvi_set_stream_attribute( +static void enc3_stream_encoder_dvi_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, bool is_dual_link) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c index c1d967ed6551..ab3918c0a15b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c @@ -41,8 +41,7 @@ dpp->tf_shift->field_name, dpp->tf_mask->field_name -void dpp30_read_state(struct dpp *dpp_base, - struct dcn_dpp_state *s) +static void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s) { struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); @@ -373,7 +372,7 @@ void dpp3_set_cursor_attributes( } -bool dpp3_get_optimal_number_of_taps( +static bool dpp3_get_optimal_number_of_taps( struct dpp *dpp, struct scaler_data *scl_data, const struct scaling_taps *in_taps) @@ -474,22 +473,7 @@ bool dpp3_get_optimal_number_of_taps( return true; } -void dpp3_cnv_set_bias_scale( - struct dpp *dpp_base, - struct dc_bias_and_scale *bias_and_scale) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_UPDATE(FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, bias_and_scale->bias_red); - REG_UPDATE(FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, bias_and_scale->bias_green); - REG_UPDATE(FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, bias_and_scale->bias_blue); - REG_UPDATE(FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, bias_and_scale->scale_red); - REG_UPDATE(FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, bias_and_scale->scale_green); - REG_UPDATE(FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, bias_and_scale->scale_blue); -} - -void dpp3_deferred_update( - struct dpp *dpp_base) +static void dpp3_deferred_update(struct dpp *dpp_base) { int bypass_state; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); @@ -751,8 +735,8 @@ static enum dc_lut_mode dpp3_get_blndgam_current(struct dpp *dpp_base) return mode; } -bool dpp3_program_blnd_lut( - struct dpp *dpp_base, const struct pwl_params *params) +static bool dpp3_program_blnd_lut(struct dpp *dpp_base, + const struct pwl_params *params) { enum dc_lut_mode current_mode; enum dc_lut_mode next_mode; @@ -1164,9 +1148,8 @@ static void dpp3_program_shaper_lutb_settings( } -bool dpp3_program_shaper( - struct dpp *dpp_base, - const struct pwl_params *params) +static bool dpp3_program_shaper(struct dpp *dpp_base, + const struct pwl_params *params) { enum dc_lut_mode current_mode; enum dc_lut_mode next_mode; @@ -1355,9 +1338,8 @@ static void dpp3_select_3dlut_ram_mask( REG_SET(CM_3DLUT_INDEX, 0, CM_3DLUT_INDEX, 0); } -bool dpp3_program_3dlut( - struct dpp *dpp_base, - struct tetrahedral_params *params) +static bool dpp3_program_3dlut(struct dpp *dpp_base, + struct tetrahedral_params *params) { enum dc_lut_mode mode; bool is_17x17x17; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c index eac08926b574..6a4dcafb9bba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c @@ -490,6 +490,7 @@ static struct hubp_funcs dcn30_hubp_funcs = { .hubp_setup_interdependent = hubp2_setup_interdependent, .hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings, .set_blank = hubp2_set_blank, + .set_blank_regs = hubp2_set_blank_regs, .dcc_control = hubp3_dcc_control, .mem_program_viewport = min_set_viewport, .set_cursor_attributes = hubp2_cursor_set_attributes, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 3e99bb9c70ab..1db1ca19411d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -344,6 +344,17 @@ void dcn30_enable_writeback( dwb->funcs->enable(dwb, &wb_info->dwb_params); } +void dcn30_prepare_bandwidth(struct dc *dc, + struct dc_state *context) +{ + if (dc->clk_mgr->dc_mode_softmax_enabled) + if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && + context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) + dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); + + dcn20_prepare_bandwidth(dc, context); +} + void dcn30_disable_writeback( struct dc *dc, unsigned int dwb_pipe_inst) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h index e9a0005288d3..73e7b690e82c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h @@ -27,7 +27,7 @@ #define __DC_HWSS_DCN30_H__ #include "hw_sequencer_private.h" - +#include "dcn20/dcn20_hwseq.h" struct dc; void dcn30_init_hw(struct dc *dc); @@ -47,6 +47,9 @@ void dcn30_disable_writeback( struct dc *dc, unsigned int dwb_pipe_inst); +void dcn30_prepare_bandwidth(struct dc *dc, + struct dc_state *context); + bool dcn30_mmhubbub_warmup( struct dc *dc, unsigned int num_dwb, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c index 93f32a312fee..bb347319de83 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c @@ -29,6 +29,8 @@ #include "dcn21/dcn21_hwseq.h" #include "dcn30_hwseq.h" +#include "dcn30_init.h" + static const struct hw_sequencer_funcs dcn30_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn30_init_hw, @@ -53,6 +55,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, + .disable_pixel_data = dcn20_disable_pixel_data, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn10_lock_all_pipes, .cursor_lock = dcn10_cursor_lock, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c index 1c4b171c68ad..7a93eff183d9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c @@ -100,7 +100,7 @@ static void mmhubbub3_warmup_mcif(struct mcif_wb *mcif_wb, REG_UPDATE(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_EN, false); } -void mmhubbub3_config_mcif_buf(struct mcif_wb *mcif_wb, +static void mmhubbub3_config_mcif_buf(struct mcif_wb *mcif_wb, struct mcif_buf_params *params, unsigned int dest_height) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c index 95149734378b..0ce0d6165f43 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c @@ -1362,7 +1362,7 @@ uint32_t mpcc3_acquire_rmu(struct mpc *mpc, int mpcc_id, int rmu_idx) return -1; } -int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id) +static int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id) { struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); int rmu_idx; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 79a66e0c4303..8ca26383b568 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -816,7 +816,7 @@ static const struct dc_plane_cap plane_cap = { .argb8888 = true, .nv12 = true, .fp16 = true, - .p010 = false, + .p010 = true, .ayuv = false, }, @@ -840,7 +840,7 @@ static const struct dc_debug_options debug_defaults_drv = { .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, - .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, @@ -875,7 +875,7 @@ static const struct dc_debug_options debug_defaults_diags = { .use_max_lb = true }; -void dcn30_dpp_destroy(struct dpp **dpp) +static void dcn30_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN20_DPP(*dpp)); *dpp = NULL; @@ -992,7 +992,7 @@ static struct mpc *dcn30_mpc_create( return &mpc30->base; } -struct hubbub *dcn30_hubbub_create(struct dc_context *ctx) +static struct hubbub *dcn30_hubbub_create(struct dc_context *ctx) { int i; @@ -1143,9 +1143,8 @@ static struct afmt *dcn30_afmt_create( return &afmt3->base; } -struct stream_encoder *dcn30_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) +static struct stream_encoder *dcn30_stream_encoder_create(enum engine_id eng_id, + struct dc_context *ctx) { struct dcn10_stream_encoder *enc1; struct vpg *vpg; @@ -1179,8 +1178,7 @@ struct stream_encoder *dcn30_stream_encoder_create( return &enc1->base; } -struct dce_hwseq *dcn30_hwseq_create( - struct dc_context *ctx) +static struct dce_hwseq *dcn30_hwseq_create(struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); @@ -1880,7 +1878,6 @@ noinline bool dcn30_internal_validate_bw( dc->res_pool->funcs->update_soc_for_wm_a(dc, context); pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); - DC_FP_START(); if (!pipe_cnt) { out = true; goto validate_out; @@ -2106,7 +2103,6 @@ validate_fail: out = false; validate_out: - DC_FP_END(); return out; } @@ -2308,7 +2304,9 @@ bool dcn30_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_COUNT(); + DC_FP_START(); out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); + DC_FP_END(); if (pipe_cnt == 0) goto validate_out; @@ -2639,6 +2637,8 @@ static bool dcn30_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.hdmi_frl_pcon_support = true; + /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c index e85b695f2351..3d42a1a337ec 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c @@ -30,6 +30,8 @@ #include "dcn30/dcn30_hwseq.h" #include "dcn301_hwseq.h" +#include "dcn301_init.h" + static const struct hw_sequencer_funcs dcn301_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn10_init_hw, diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c index 736bda30abc3..ad0df1a72a90 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c @@ -93,7 +93,7 @@ static unsigned int dcn301_get_16_bit_backlight_from_pwm(struct panel_cntl *pane return (uint32_t)(current_backlight); } -uint32_t dcn301_panel_cntl_hw_init(struct panel_cntl *panel_cntl) +static uint32_t dcn301_panel_cntl_hw_init(struct panel_cntl *panel_cntl) { struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl); uint32_t value; @@ -147,7 +147,7 @@ uint32_t dcn301_panel_cntl_hw_init(struct panel_cntl *panel_cntl) return current_backlight; } -void dcn301_panel_cntl_destroy(struct panel_cntl **panel_cntl) +static void dcn301_panel_cntl_destroy(struct panel_cntl **panel_cntl) { struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(*panel_cntl); @@ -155,7 +155,7 @@ void dcn301_panel_cntl_destroy(struct panel_cntl **panel_cntl) *panel_cntl = NULL; } -bool dcn301_is_panel_backlight_on(struct panel_cntl *panel_cntl) +static bool dcn301_is_panel_backlight_on(struct panel_cntl *panel_cntl) { struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl); uint32_t value; @@ -165,7 +165,7 @@ bool dcn301_is_panel_backlight_on(struct panel_cntl *panel_cntl) return value; } -bool dcn301_is_panel_powered_on(struct panel_cntl *panel_cntl) +static bool dcn301_is_panel_powered_on(struct panel_cntl *panel_cntl) { struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl); uint32_t pwr_seq_state, dig_on, dig_on_ovrd; @@ -177,7 +177,7 @@ bool dcn301_is_panel_powered_on(struct panel_cntl *panel_cntl) return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1); } -void dcn301_store_backlight_level(struct panel_cntl *panel_cntl) +static void dcn301_store_backlight_level(struct panel_cntl *panel_cntl) { struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl); diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 7abc36a4ff76..5d9637b07429 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -656,7 +656,7 @@ static const struct dc_plane_cap plane_cap = { .argb8888 = true, .nv12 = true, .fp16 = true, - .p010 = false, + .p010 = true, .ayuv = false, }, @@ -717,15 +717,13 @@ static const struct dc_debug_options debug_defaults_diags = { .use_max_lb = false, }; -void dcn301_dpp_destroy(struct dpp **dpp) +static void dcn301_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN20_DPP(*dpp)); *dpp = NULL; } -struct dpp *dcn301_dpp_create( - struct dc_context *ctx, - uint32_t inst) +static struct dpp *dcn301_dpp_create(struct dc_context *ctx, uint32_t inst) { struct dcn3_dpp *dpp = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); @@ -741,8 +739,8 @@ struct dpp *dcn301_dpp_create( kfree(dpp); return NULL; } -struct output_pixel_processor *dcn301_opp_create( - struct dc_context *ctx, uint32_t inst) +static struct output_pixel_processor *dcn301_opp_create(struct dc_context *ctx, + uint32_t inst) { struct dcn20_opp *opp = kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); @@ -757,9 +755,7 @@ struct output_pixel_processor *dcn301_opp_create( return &opp->base; } -struct dce_aux *dcn301_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) +static struct dce_aux *dcn301_aux_engine_create(struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); @@ -793,9 +789,7 @@ static const struct dce_i2c_mask i2c_masks = { I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) }; -struct dce_i2c_hw *dcn301_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) +static struct dce_i2c_hw *dcn301_i2c_hw_create(struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); @@ -829,7 +823,7 @@ static struct mpc *dcn301_mpc_create( return &mpc30->base; } -struct hubbub *dcn301_hubbub_create(struct dc_context *ctx) +static struct hubbub *dcn301_hubbub_create(struct dc_context *ctx) { int i; @@ -860,9 +854,8 @@ struct hubbub *dcn301_hubbub_create(struct dc_context *ctx) return &hubbub3->base; } -struct timing_generator *dcn301_timing_generator_create( - struct dc_context *ctx, - uint32_t instance) +static struct timing_generator *dcn301_timing_generator_create( + struct dc_context *ctx, uint32_t instance) { struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); @@ -894,7 +887,7 @@ static const struct encoder_feature_support link_enc_feature = { .flags.bits.IS_TPS4_CAPABLE = true }; -struct link_encoder *dcn301_link_encoder_create( +static struct link_encoder *dcn301_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dcn20_link_encoder *enc20 = @@ -915,7 +908,7 @@ struct link_encoder *dcn301_link_encoder_create( return &enc20->enc10.base; } -struct panel_cntl *dcn301_panel_cntl_create(const struct panel_cntl_init_data *init_data) +static struct panel_cntl *dcn301_panel_cntl_create(const struct panel_cntl_init_data *init_data) { struct dcn301_panel_cntl *panel_cntl = kzalloc(sizeof(struct dcn301_panel_cntl), GFP_KERNEL); @@ -997,9 +990,8 @@ static struct afmt *dcn301_afmt_create( return &afmt3->base; } -struct stream_encoder *dcn301_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) +static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id, + struct dc_context *ctx) { struct dcn10_stream_encoder *enc1; struct vpg *vpg; @@ -1033,8 +1025,7 @@ struct stream_encoder *dcn301_stream_encoder_create( return &enc1->base; } -struct dce_hwseq *dcn301_hwseq_create( - struct dc_context *ctx) +static struct dce_hwseq *dcn301_hwseq_create(struct dc_context *ctx) { struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); @@ -1182,9 +1173,7 @@ static void dcn301_destruct(struct dcn301_resource_pool *pool) dcn_dccg_destroy(&pool->base.dccg); } -struct hubp *dcn301_hubp_create( - struct dc_context *ctx, - uint32_t inst) +static struct hubp *dcn301_hubp_create(struct dc_context *ctx, uint32_t inst) { struct dcn20_hubp *hubp2 = kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); @@ -1201,7 +1190,7 @@ struct hubp *dcn301_hubp_create( return NULL; } -bool dcn301_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +static bool dcn301_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t pipe_count = pool->res_cap->num_dwb; @@ -1226,7 +1215,7 @@ bool dcn301_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) return true; } -bool dcn301_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +static bool dcn301_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) { int i; uint32_t pipe_count = pool->res_cap->num_dwb; @@ -1391,6 +1380,17 @@ static void set_wm_ranges( pp_smu->nv_funcs.set_wm_ranges(&pp_smu->nv_funcs.pp_smu, &ranges); } +static void dcn301_calculate_wm_and_dlg( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel) +{ + DC_FP_START(); + dcn301_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel); + DC_FP_END(); +} + static struct resource_funcs dcn301_res_pool_funcs = { .destroy = dcn301_destroy_resource_pool, .link_enc_create = dcn301_link_encoder_create, diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c index d88b9011c502..eb375f30f5bc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c @@ -29,6 +29,8 @@ #include "dc.h" +#include "dcn302_init.h" + void dcn302_hw_sequencer_construct(struct dc *dc) { dcn30_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index 058f5d71e037..2e9cbfa7663b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -211,7 +211,7 @@ static const struct dc_debug_options debug_defaults_drv = { .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, - .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, @@ -276,7 +276,7 @@ static const struct dc_plane_cap plane_cap = { .argb8888 = true, .nv12 = true, .fp16 = true, - .p010 = false, + .p010 = true, .ayuv = false, }, .max_upscale_factor = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h index a79c54bbc899..294bd757bcb5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h @@ -15,7 +15,11 @@ SR(DPPCLK_DTO_CTRL),\ DCCG_SRII(DTO_PARAM, DPPCLK, 0),\ DCCG_SRII(DTO_PARAM, DPPCLK, 1),\ - SR(REFCLK_CNTL) + SR(REFCLK_CNTL),\ + SR(DISPCLK_FREQ_CHANGE_CNTL),\ + DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\ + DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1) + #define DCCG_MASK_SH_LIST_DCN3_03(mask_sh) \ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\ @@ -25,6 +29,18 @@ DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\ DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\ DCCG_SF(REFCLK_CNTL, REFCLK_CLOCK_EN, mask_sh),\ - DCCG_SF(REFCLK_CNTL, REFCLK_SRC_SEL, mask_sh) + DCCG_SF(REFCLK_CNTL, REFCLK_SRC_SEL, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_DELAY, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_SIZE, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_FREQ_RAMP_DONE, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_MAX_ERRDET_CYCLES, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_RESET, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_STATE, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_OVR_EN, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_CHG_FWD_CORR_DISABLE, mask_sh),\ + DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 0, mask_sh),\ + DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 1, mask_sh),\ + DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 0, mask_sh),\ + DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 1, mask_sh) #endif //__DCN303_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c index aa5dbbade2bd..f499f8ab5e47 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c @@ -9,6 +9,8 @@ #include "dcn30/dcn30_init.h" #include "dc.h" +#include "dcn303_init.h" + void dcn303_hw_sequencer_construct(struct dc *dc) { dcn30_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 7024aeb0884c..2de687f64cf6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -193,7 +193,7 @@ static const struct dc_debug_options debug_defaults_drv = { .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, - .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, @@ -254,7 +254,7 @@ static const struct dc_plane_cap plane_cap = { .argb8888 = true, .nv12 = true, .fp16 = true, - .p010 = false, + .p010 = true, .ayuv = false, }, .max_upscale_factor = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c index 815481a3ef54..ea4f8e06b07c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c @@ -462,7 +462,7 @@ void dccg31_set_physymclk( } /* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */ -void dccg31_set_dtbclk_dto( +static void dccg31_set_dtbclk_dto( struct dccg *dccg, int dtbclk_inst, int req_dtbclk_khz, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index ee6f13bef377..8b9b1a5309ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -67,6 +67,68 @@ #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) #endif +static uint8_t phy_id_from_transmitter(enum transmitter t) +{ + uint8_t phy_id; + + switch (t) { + case TRANSMITTER_UNIPHY_A: + phy_id = 0; + break; + case TRANSMITTER_UNIPHY_B: + phy_id = 1; + break; + case TRANSMITTER_UNIPHY_C: + phy_id = 2; + break; + case TRANSMITTER_UNIPHY_D: + phy_id = 3; + break; + case TRANSMITTER_UNIPHY_E: + phy_id = 4; + break; + case TRANSMITTER_UNIPHY_F: + phy_id = 5; + break; + case TRANSMITTER_UNIPHY_G: + phy_id = 6; + break; + default: + phy_id = 0; + break; + } + return phy_id; +} + +static bool has_query_dp_alt(struct link_encoder *enc) +{ + struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv; + + /* Supports development firmware and firmware >= 4.0.11 */ + return dc_dmub_srv && + !(dc_dmub_srv->dmub->fw_version >= DMUB_FW_VERSION(4, 0, 0) && + dc_dmub_srv->dmub->fw_version <= DMUB_FW_VERSION(4, 0, 10)); +} + +static bool query_dp_alt_from_dmub(struct link_encoder *enc, + union dmub_rb_cmd *cmd) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv; + + memset(cmd, 0, sizeof(*cmd)); + cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS; + cmd->query_dp_alt.header.sub_type = + DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT; + cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data); + cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter); + + if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, cmd)) + return false; + + return true; +} + void dcn31_link_encoder_set_dio_phy_mux( struct link_encoder *enc, enum encoder_type_select sel, @@ -141,7 +203,7 @@ void dcn31_link_encoder_set_dio_phy_mux( } } -void enc31_hw_init(struct link_encoder *enc) +static void enc31_hw_init(struct link_encoder *enc) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); @@ -536,57 +598,90 @@ void dcn31_link_encoder_disable_output( bool dcn31_link_encoder_is_in_alt_mode(struct link_encoder *enc) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + union dmub_rb_cmd cmd; uint32_t dp_alt_mode_disable; - bool is_usb_c_alt_mode = false; - if (enc->features.flags.bits.DP_IS_USB_C) { - if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) { - // [Note] no need to check hw_internal_rev once phy mux selection is ready - REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); - } else { + /* Only applicable to USB-C PHY. */ + if (!enc->features.flags.bits.DP_IS_USB_C) + return false; + + /* + * Use the new interface from DMCUB if available. + * Avoids hanging the RDCPSPIPE if DMCUB wasn't already running. + */ + if (has_query_dp_alt(enc)) { + if (!query_dp_alt_from_dmub(enc, &cmd)) + return false; + + return (cmd.query_dp_alt.data.is_dp_alt_disable == 0); + } + + /* Legacy path, avoid if possible. */ + if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) { + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, + &dp_alt_mode_disable); + } else { /* * B0 phys use a new set of registers to check whether alt mode is disabled. * if value == 1 alt mode is disabled, otherwise it is enabled. */ - if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) - || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) - || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) { - REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); - } else { - // [Note] need to change TRANSMITTER_UNIPHY_C/D to F/G once phy mux selection is ready - REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); - } + if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) || + (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) || + (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) { + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, + &dp_alt_mode_disable); + } else { + REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, + &dp_alt_mode_disable); } - - is_usb_c_alt_mode = (dp_alt_mode_disable == 0); } - return is_usb_c_alt_mode; + return (dp_alt_mode_disable == 0); } -void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, - struct dc_link_settings *link_settings) +void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + union dmub_rb_cmd cmd; uint32_t is_in_usb_c_dp4_mode = 0; dcn10_link_encoder_get_max_link_cap(enc, link_settings); - /* in usb c dp2 mode, max lane count is 2 */ - if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) { - if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) { - // [Note] no need to check hw_internal_rev once phy mux selection is ready - REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); + /* Take the link cap directly if not USB */ + if (!enc->features.flags.bits.DP_IS_USB_C) + return; + + /* + * Use the new interface from DMCUB if available. + * Avoids hanging the RDCPSPIPE if DMCUB wasn't already running. + */ + if (has_query_dp_alt(enc)) { + if (!query_dp_alt_from_dmub(enc, &cmd)) + return; + + if (cmd.query_dp_alt.data.is_usb && + cmd.query_dp_alt.data.is_dp4 == 0) + link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); + + return; + } + + /* Legacy path, avoid if possible. */ + if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) { + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, + &is_in_usb_c_dp4_mode); + } else { + if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) || + (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) || + (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) { + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, + &is_in_usb_c_dp4_mode); } else { - if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) - || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) - || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) { - REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); - } else { - REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); - } + REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, + &is_in_usb_c_dp4_mode); } - if (!is_in_usb_c_dp4_mode) - link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); } + + if (!is_in_usb_c_dp4_mode) + link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c index 6c08e21bb708..80dfaa4d4d81 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c @@ -499,7 +499,8 @@ static enum bp_result link_transmitter_control( void dcn31_hpo_dp_link_enc_enable_dp_output( struct hpo_dp_link_encoder *enc, const struct dc_link_settings *link_settings, - enum transmitter transmitter) + enum transmitter transmitter, + enum hpd_source_id hpd_source) { struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); struct bp_transmitter_control cntl = { 0 }; @@ -508,6 +509,9 @@ void dcn31_hpo_dp_link_enc_enable_dp_output( /* Set the transmitter */ enc3->base.transmitter = transmitter; + /* Set the hpd source */ + enc3->base.hpd_source = hpd_source; + /* Enable the PHY */ cntl.action = TRANSMITTER_CONTROL_ENABLE; cntl.engine_id = ENGINE_ID_UNKNOWN; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h index 0706ccaf6fec..e324e9b83136 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h @@ -184,7 +184,8 @@ void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31, void dcn31_hpo_dp_link_enc_enable_dp_output( struct hpo_dp_link_encoder *enc, const struct dc_link_settings *link_settings, - enum transmitter transmitter); + enum transmitter transmitter, + enum hpd_source_id hpd_source); void dcn31_hpo_dp_link_enc_disable_output( struct hpo_dp_link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index e175b6cc0125..d7559e5a99ce 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -31,6 +31,8 @@ #include "dcn301/dcn301_hwseq.h" #include "dcn31/dcn31_hwseq.h" +#include "dcn31_init.h" + static const struct hw_sequencer_funcs dcn31_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn31_init_hw, @@ -101,6 +103,8 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .z10_restore = dcn31_z10_restore, .z10_save_init = dcn31_z10_save_init, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, + .optimize_pwr_state = dcn21_optimize_pwr_state, + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, .update_visual_confirm_color = dcn20_update_visual_confirm_color, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c index 3b3721386571..83ece02380a8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c @@ -65,7 +65,7 @@ static uint32_t dcn31_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cnt return cmd.panel_cntl.data.current_backlight; } -uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl) +static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl) { struct dcn31_panel_cntl *dcn31_panel_cntl = TO_DCN31_PANEL_CNTL(panel_cntl); struct dc_dmub_srv *dc_dmub_srv = panel_cntl->ctx->dmub_srv; @@ -96,7 +96,7 @@ uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl) return cmd.panel_cntl.data.current_backlight; } -void dcn31_panel_cntl_destroy(struct panel_cntl **panel_cntl) +static void dcn31_panel_cntl_destroy(struct panel_cntl **panel_cntl) { struct dcn31_panel_cntl *dcn31_panel_cntl = TO_DCN31_PANEL_CNTL(*panel_cntl); @@ -104,7 +104,7 @@ void dcn31_panel_cntl_destroy(struct panel_cntl **panel_cntl) *panel_cntl = NULL; } -bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl) +static bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl) { union dmub_rb_cmd cmd; @@ -114,7 +114,7 @@ bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl) return cmd.panel_cntl.data.is_backlight_on; } -bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl) +static bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl) { union dmub_rb_cmd cmd; @@ -124,7 +124,7 @@ bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl) return cmd.panel_cntl.data.is_powered_on; } -void dcn31_store_backlight_level(struct panel_cntl *panel_cntl) +static void dcn31_store_backlight_level(struct panel_cntl *panel_cntl) { union dmub_rb_cmd cmd; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index 88e040687940..8d64187478e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -355,6 +355,14 @@ static const struct dce110_clk_src_regs clk_src_regs[] = { clk_src_regs(3, D), clk_src_regs(4, E) }; +/*pll_id being rempped in dmub, in driver it is logical instance*/ +static const struct dce110_clk_src_regs clk_src_regs_b0[] = { + clk_src_regs(0, A), + clk_src_regs(1, B), + clk_src_regs(2, F), + clk_src_regs(3, G), + clk_src_regs(4, E) +}; static const struct dce110_clk_src_shift cs_shift = { CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) @@ -485,7 +493,8 @@ static const struct dcn31_apg_mask apg_mask = { SE_DCN3_REG_LIST(id)\ } -static const struct dcn10_stream_enc_registers stream_enc_regs[] = { +/* Some encoders won't be initialized here - but they're logical, not physical. */ +static const struct dcn10_stream_enc_registers stream_enc_regs[ENGINE_ID_COUNT] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), @@ -968,7 +977,7 @@ static const struct dc_plane_cap plane_cap = { .argb8888 = true, .nv12 = true, .fp16 = true, - .p010 = false, + .p010 = true, .ayuv = false, }, @@ -994,7 +1003,7 @@ static const struct dc_debug_options debug_defaults_drv = { .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = false, - .pipe_split_policy = MPC_SPLIT_AVOID, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, @@ -1023,6 +1032,7 @@ static const struct dc_debug_options debug_defaults_drv = { }, .optimize_edp_link_rate = true, .enable_sw_cntl_psr = true, + .apply_vendor_specific_lttpr_wa = true, }; static const struct dc_debug_options debug_defaults_diags = { @@ -1270,7 +1280,7 @@ static struct link_encoder *dcn31_link_enc_create_minimal( return &enc20->enc10.base; } -struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) +static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) { struct dcn31_panel_cntl *panel_cntl = kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); @@ -1774,6 +1784,7 @@ static int dcn31_populate_dml_pipes_from_context( int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; struct pipe_ctx *pipe; + bool upscaled = false; dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); @@ -1785,6 +1796,11 @@ static int dcn31_populate_dml_pipes_from_context( pipe = &res_ctx->pipe_ctx[i]; timing = &pipe->stream->timing; + if (pipe->plane_state && + (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height || + pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width)) + upscaled = true; + /* * Immediate flip can be set dynamically after enabling the plane. * We need to require support for immediate flip or underflow can be @@ -1829,6 +1845,11 @@ static int dcn31_populate_dml_pipes_from_context( context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; pipes[0].pipe.src.unbounded_req_mode = true; } + } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count + && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) { + context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64; + } else if (context->stream_count >= 3 && upscaled) { + context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; } return pipe_cnt; @@ -1963,7 +1984,7 @@ static void dcn31_calculate_wm_and_dlg_fp( pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); - if (dc->config.forced_clocks) { + if (dc->config.forced_clocks || dc->debug.max_disp_clk) { pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; } @@ -2199,6 +2220,7 @@ static bool dcn31_resource_construct( dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.dp_hpo = true; + dc->caps.hdmi_frl_pcon_support = true; dc->caps.edp_dsc_support = true; dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; @@ -2238,6 +2260,9 @@ static bool dcn31_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + /* Use pipe context based otg sync logic */ + dc->config.use_pipe_ctx_sync_logic = true; + /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { @@ -2277,14 +2302,27 @@ static bool dcn31_resource_construct( dcn30_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL1, &clk_src_regs[1], false); - pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = + /*move phypllx_pixclk_resync to dmub next*/ + if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { + pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs_b0[2], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs_b0[3], false); + } else { + pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = dcn30_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); - pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = + pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = dcn30_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL3, &clk_src_regs[3], false); + } + pool->base.clock_sources[DCN31_CLK_SRC_PLL4] = dcn30_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL4, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h index 416fe7a721d8..a513363b3326 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h @@ -49,4 +49,35 @@ struct resource_pool *dcn31_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc); +/*temp: B0 specific before switch to dcn313 headers*/ +#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL +#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e +#define regPHYPLLF_PIXCLK_RESYNC_CNTL_BASE_IDX 1 +#define regPHYPLLG_PIXCLK_RESYNC_CNTL 0x005f +#define regPHYPLLG_PIXCLK_RESYNC_CNTL_BASE_IDX 1 + +//PHYPLLF_PIXCLK_RESYNC_CNTL +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE__SHIFT 0x0 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE__SHIFT 0x8 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE_MASK 0x00000100L +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L + +//PHYPLLG_PIXCLK_RESYNC_CNTL +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE__SHIFT 0x0 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE__SHIFT 0x8 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE_MASK 0x00000100L +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L +#endif #endif /* _DCN31_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h index 511f9e1159c7..4229369c57f4 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h +++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h @@ -34,12 +34,12 @@ struct cp_psp_stream_config { uint8_t dig_fe; uint8_t link_enc_idx; uint8_t stream_enc_idx; - uint8_t phy_idx; uint8_t dio_output_idx; - uint8_t dio_output_type; + uint8_t phy_idx; uint8_t assr_enabled; uint8_t mst_enabled; uint8_t dp2_enabled; + uint8_t usb4_enabled; void *dm_stream_ctx; bool dpms_off; }; diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 0fe66b080a03..7f94e3f70d7f 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -59,7 +59,7 @@ void dm_helpers_free_gpu_mem( void *pvMem); enum dc_edid_status dm_helpers_parse_edid_caps( - struct dc_context *ctx, + struct dc_link *link, const struct dc_edid *edid, struct dc_edid_caps *edid_caps); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index 246071c72f6b..548cdef8a8ad 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -1576,8 +1576,6 @@ void dml20_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib, e2e_pipe_param, num_pipes); - dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency - / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated print__dlg_sys_params_st(mode_lib, &dlg_sys_param); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c index 015e7f2c0b16..0fc9f3e3ffae 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c @@ -1577,8 +1577,6 @@ void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib, e2e_pipe_param, num_pipes); - dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency - / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated print__dlg_sys_params_st(mode_lib, &dlg_sys_param); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index 46c433c0bcb0..618f4b682ab1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -1688,8 +1688,6 @@ void dml21_rq_dlg_get_dlg_reg( mode_lib, e2e_pipe_param, num_pipes); - dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency - / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated print__dlg_sys_params_st(mode_lib, &dlg_sys_param); @@ -1711,14 +1709,6 @@ void dml21_rq_dlg_get_dlg_reg( dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx); } -void dml_rq_dlg_get_arb_params(struct display_mode_lib *mode_lib, display_arb_params_st *arb_param) -{ - memset(arb_param, 0, sizeof(*arb_param)); - arb_param->max_req_outstanding = 256; - arb_param->min_req_outstanding = 68; - arb_param->sat_level_us = 60; -} - static void calculate_ttu_cursor( struct display_mode_lib *mode_lib, double *refcyc_per_req_delivery_pre_cur, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c index aef854270054..747167083dea 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c @@ -1858,8 +1858,6 @@ void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib, e2e_pipe_param, num_pipes); - dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency - / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated print__dlg_sys_params_st(mode_lib, &dlg_sys_param); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c index 94c32832a0e7..0a7a33864973 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c @@ -327,7 +327,7 @@ void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info) dcn3_01_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10; } -void dcn301_calculate_wm_and_dlg(struct dc *dc, +void dcn301_calculate_wm_and_dlg_fp(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h index fc7065d17842..774b0fdfc80b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h @@ -34,7 +34,7 @@ void dcn301_fpu_set_wm_ranges(int i, void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info); -void dcn301_calculate_wm_and_dlg(struct dc *dc, +void dcn301_calculate_wm_and_dlg_fp(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index 7e937bdcea00..6feb23432f8d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -422,62 +422,8 @@ static void CalculateUrgentBurstFactor( static void UseMinimumDCFCLK( struct display_mode_lib *mode_lib, - int MaxInterDCNTileRepeaters, int MaxPrefetchMode, - double FinalDRAMClockChangeLatency, - double SREnterPlusExitTime, - int ReturnBusWidth, - int RoundTripPingLatencyCycles, - int ReorderingBytes, - int PixelChunkSizeInKByte, - int MetaChunkSize, - bool GPUVMEnable, - int GPUVMMaxPageTableLevels, - bool HostVMEnable, - int NumberOfActivePlanes, - double HostVMMinPageSize, - int HostVMMaxNonCachedPageTableLevels, - bool DynamicMetadataVMEnabled, - enum immediate_flip_requirement ImmediateFlipRequirement, - bool ProgressiveToInterlaceUnitInOPP, - double MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation, - double PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency, - int VTotal[], - int VActive[], - int DynamicMetadataTransmittedBytes[], - int DynamicMetadataLinesBeforeActiveRequired[], - bool Interlace[], - double RequiredDPPCLK[][2][DC__NUM_DPP__MAX], - double RequiredDISPCLK[][2], - double UrgLatency[], - unsigned int NoOfDPP[][2][DC__NUM_DPP__MAX], - double ProjectedDCFCLKDeepSleep[][2], - double MaximumVStartup[][2][DC__NUM_DPP__MAX], - double TotalVActivePixelBandwidth[][2], - double TotalVActiveCursorBandwidth[][2], - double TotalMetaRowBandwidth[][2], - double TotalDPTERowBandwidth[][2], - unsigned int TotalNumberOfActiveDPP[][2], - unsigned int TotalNumberOfDCCActiveDPP[][2], - int dpte_group_bytes[], - double PrefetchLinesY[][2][DC__NUM_DPP__MAX], - double PrefetchLinesC[][2][DC__NUM_DPP__MAX], - int swath_width_luma_ub_all_states[][2][DC__NUM_DPP__MAX], - int swath_width_chroma_ub_all_states[][2][DC__NUM_DPP__MAX], - int BytePerPixelY[], - int BytePerPixelC[], - int HTotal[], - double PixelClock[], - double PDEAndMetaPTEBytesPerFrame[][2][DC__NUM_DPP__MAX], - double DPTEBytesPerRow[][2][DC__NUM_DPP__MAX], - double MetaRowBytes[][2][DC__NUM_DPP__MAX], - bool DynamicMetadataEnable[], - double VActivePixelBandwidth[][2][DC__NUM_DPP__MAX], - double VActiveCursorBandwidth[][2][DC__NUM_DPP__MAX], - double ReadBandwidthLuma[], - double ReadBandwidthChroma[], - double DCFCLKPerState[], - double DCFCLKState[][2]); + int ReorderingBytes); static void CalculatePixelDeliveryTimes( unsigned int NumberOfActivePlanes, @@ -3949,6 +3895,102 @@ static double TruncToValidBPP( return BPP_INVALID; } +static noinline void CalculatePrefetchSchedulePerPlane( + struct display_mode_lib *mode_lib, + double HostVMInefficiencyFactor, + int i, + unsigned j, + unsigned k) +{ + struct vba_vars_st *v = &mode_lib->vba; + Pipe myPipe; + + myPipe.DPPCLK = v->RequiredDPPCLK[i][j][k]; + myPipe.DISPCLK = v->RequiredDISPCLK[i][j]; + myPipe.PixelClock = v->PixelClock[k]; + myPipe.DCFCLKDeepSleep = v->ProjectedDCFCLKDeepSleep[i][j]; + myPipe.DPPPerPlane = v->NoOfDPP[i][j][k]; + myPipe.ScalerEnabled = v->ScalerEnabled[k]; + myPipe.SourceScan = v->SourceScan[k]; + myPipe.BlockWidth256BytesY = v->Read256BlockWidthY[k]; + myPipe.BlockHeight256BytesY = v->Read256BlockHeightY[k]; + myPipe.BlockWidth256BytesC = v->Read256BlockWidthC[k]; + myPipe.BlockHeight256BytesC = v->Read256BlockHeightC[k]; + myPipe.InterlaceEnable = v->Interlace[k]; + myPipe.NumberOfCursors = v->NumberOfCursors[k]; + myPipe.VBlank = v->VTotal[k] - v->VActive[k]; + myPipe.HTotal = v->HTotal[k]; + myPipe.DCCEnable = v->DCCEnable[k]; + myPipe.ODMCombineIsEnabled = v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1 + || v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1; + myPipe.SourcePixelFormat = v->SourcePixelFormat[k]; + myPipe.BytePerPixelY = v->BytePerPixelY[k]; + myPipe.BytePerPixelC = v->BytePerPixelC[k]; + myPipe.ProgressiveToInterlaceUnitInOPP = v->ProgressiveToInterlaceUnitInOPP; + v->NoTimeForPrefetch[i][j][k] = CalculatePrefetchSchedule( + mode_lib, + HostVMInefficiencyFactor, + &myPipe, + v->DSCDelayPerState[i][k], + v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater, + v->DPPCLKDelaySCL, + v->DPPCLKDelaySCLLBOnly, + v->DPPCLKDelayCNVCCursor, + v->DISPCLKDelaySubtotal, + v->SwathWidthYThisState[k] / v->HRatio[k], + v->OutputFormat[k], + v->MaxInterDCNTileRepeaters, + dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]), + v->MaximumVStartup[i][j][k], + v->GPUVMMaxPageTableLevels, + v->GPUVMEnable, + v->HostVMEnable, + v->HostVMMaxNonCachedPageTableLevels, + v->HostVMMinPageSize, + v->DynamicMetadataEnable[k], + v->DynamicMetadataVMEnabled, + v->DynamicMetadataLinesBeforeActiveRequired[k], + v->DynamicMetadataTransmittedBytes[k], + v->UrgLatency[i], + v->ExtraLatency, + v->TimeCalc, + v->PDEAndMetaPTEBytesPerFrame[i][j][k], + v->MetaRowBytes[i][j][k], + v->DPTEBytesPerRow[i][j][k], + v->PrefetchLinesY[i][j][k], + v->SwathWidthYThisState[k], + v->PrefillY[k], + v->MaxNumSwY[k], + v->PrefetchLinesC[i][j][k], + v->SwathWidthCThisState[k], + v->PrefillC[k], + v->MaxNumSwC[k], + v->swath_width_luma_ub_this_state[k], + v->swath_width_chroma_ub_this_state[k], + v->SwathHeightYThisState[k], + v->SwathHeightCThisState[k], + v->TWait, + &v->DSTXAfterScaler[k], + &v->DSTYAfterScaler[k], + &v->LineTimesForPrefetch[k], + &v->PrefetchBW[k], + &v->LinesForMetaPTE[k], + &v->LinesForMetaAndDPTERow[k], + &v->VRatioPreY[i][j][k], + &v->VRatioPreC[i][j][k], + &v->RequiredPrefetchPixelDataBWLuma[i][j][k], + &v->RequiredPrefetchPixelDataBWChroma[i][j][k], + &v->NoTimeForDynamicMetadata[i][j][k], + &v->Tno_bw[k], + &v->prefetch_vmrow_bw[k], + &v->dummy7[k], + &v->dummy8[k], + &v->dummy13[k], + &v->VUpdateOffsetPix[k], + &v->VUpdateWidthPix[k], + &v->VReadyOffsetPix[k]); +} + void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib) { struct vba_vars_st *v = &mode_lib->vba; @@ -5079,66 +5121,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - if (v->UseMinimumRequiredDCFCLK == true) { - UseMinimumDCFCLK( - mode_lib, - v->MaxInterDCNTileRepeaters, - MaxPrefetchMode, - v->DRAMClockChangeLatency, - v->SREnterPlusExitTime, - v->ReturnBusWidth, - v->RoundTripPingLatencyCycles, - ReorderingBytes, - v->PixelChunkSizeInKByte, - v->MetaChunkSize, - v->GPUVMEnable, - v->GPUVMMaxPageTableLevels, - v->HostVMEnable, - v->NumberOfActivePlanes, - v->HostVMMinPageSize, - v->HostVMMaxNonCachedPageTableLevels, - v->DynamicMetadataVMEnabled, - v->ImmediateFlipRequirement[0], - v->ProgressiveToInterlaceUnitInOPP, - v->MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation, - v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency, - v->VTotal, - v->VActive, - v->DynamicMetadataTransmittedBytes, - v->DynamicMetadataLinesBeforeActiveRequired, - v->Interlace, - v->RequiredDPPCLK, - v->RequiredDISPCLK, - v->UrgLatency, - v->NoOfDPP, - v->ProjectedDCFCLKDeepSleep, - v->MaximumVStartup, - v->TotalVActivePixelBandwidth, - v->TotalVActiveCursorBandwidth, - v->TotalMetaRowBandwidth, - v->TotalDPTERowBandwidth, - v->TotalNumberOfActiveDPP, - v->TotalNumberOfDCCActiveDPP, - v->dpte_group_bytes, - v->PrefetchLinesY, - v->PrefetchLinesC, - v->swath_width_luma_ub_all_states, - v->swath_width_chroma_ub_all_states, - v->BytePerPixelY, - v->BytePerPixelC, - v->HTotal, - v->PixelClock, - v->PDEAndMetaPTEBytesPerFrame, - v->DPTEBytesPerRow, - v->MetaRowBytes, - v->DynamicMetadataEnable, - v->VActivePixelBandwidth, - v->VActiveCursorBandwidth, - v->ReadBandwidthLuma, - v->ReadBandwidthChroma, - v->DCFCLKPerState, - v->DCFCLKState); - } + if (v->UseMinimumRequiredDCFCLK == true) + UseMinimumDCFCLK(mode_lib, MaxPrefetchMode, ReorderingBytes); for (i = 0; i < v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { @@ -5276,92 +5260,9 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->SREnterPlusExitTime); for (k = 0; k < v->NumberOfActivePlanes; k++) { - Pipe myPipe; - - myPipe.DPPCLK = v->RequiredDPPCLK[i][j][k]; - myPipe.DISPCLK = v->RequiredDISPCLK[i][j]; - myPipe.PixelClock = v->PixelClock[k]; - myPipe.DCFCLKDeepSleep = v->ProjectedDCFCLKDeepSleep[i][j]; - myPipe.DPPPerPlane = v->NoOfDPP[i][j][k]; - myPipe.ScalerEnabled = v->ScalerEnabled[k]; - myPipe.SourceScan = v->SourceScan[k]; - myPipe.BlockWidth256BytesY = v->Read256BlockWidthY[k]; - myPipe.BlockHeight256BytesY = v->Read256BlockHeightY[k]; - myPipe.BlockWidth256BytesC = v->Read256BlockWidthC[k]; - myPipe.BlockHeight256BytesC = v->Read256BlockHeightC[k]; - myPipe.InterlaceEnable = v->Interlace[k]; - myPipe.NumberOfCursors = v->NumberOfCursors[k]; - myPipe.VBlank = v->VTotal[k] - v->VActive[k]; - myPipe.HTotal = v->HTotal[k]; - myPipe.DCCEnable = v->DCCEnable[k]; - myPipe.ODMCombineIsEnabled = v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1 - || v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1; - myPipe.SourcePixelFormat = v->SourcePixelFormat[k]; - myPipe.BytePerPixelY = v->BytePerPixelY[k]; - myPipe.BytePerPixelC = v->BytePerPixelC[k]; - myPipe.ProgressiveToInterlaceUnitInOPP = v->ProgressiveToInterlaceUnitInOPP; - v->NoTimeForPrefetch[i][j][k] = CalculatePrefetchSchedule( - mode_lib, - HostVMInefficiencyFactor, - &myPipe, - v->DSCDelayPerState[i][k], - v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater, - v->DPPCLKDelaySCL, - v->DPPCLKDelaySCLLBOnly, - v->DPPCLKDelayCNVCCursor, - v->DISPCLKDelaySubtotal, - v->SwathWidthYThisState[k] / v->HRatio[k], - v->OutputFormat[k], - v->MaxInterDCNTileRepeaters, - dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]), - v->MaximumVStartup[i][j][k], - v->GPUVMMaxPageTableLevels, - v->GPUVMEnable, - v->HostVMEnable, - v->HostVMMaxNonCachedPageTableLevels, - v->HostVMMinPageSize, - v->DynamicMetadataEnable[k], - v->DynamicMetadataVMEnabled, - v->DynamicMetadataLinesBeforeActiveRequired[k], - v->DynamicMetadataTransmittedBytes[k], - v->UrgLatency[i], - v->ExtraLatency, - v->TimeCalc, - v->PDEAndMetaPTEBytesPerFrame[i][j][k], - v->MetaRowBytes[i][j][k], - v->DPTEBytesPerRow[i][j][k], - v->PrefetchLinesY[i][j][k], - v->SwathWidthYThisState[k], - v->PrefillY[k], - v->MaxNumSwY[k], - v->PrefetchLinesC[i][j][k], - v->SwathWidthCThisState[k], - v->PrefillC[k], - v->MaxNumSwC[k], - v->swath_width_luma_ub_this_state[k], - v->swath_width_chroma_ub_this_state[k], - v->SwathHeightYThisState[k], - v->SwathHeightCThisState[k], - v->TWait, - &v->DSTXAfterScaler[k], - &v->DSTYAfterScaler[k], - &v->LineTimesForPrefetch[k], - &v->PrefetchBW[k], - &v->LinesForMetaPTE[k], - &v->LinesForMetaAndDPTERow[k], - &v->VRatioPreY[i][j][k], - &v->VRatioPreC[i][j][k], - &v->RequiredPrefetchPixelDataBWLuma[i][j][k], - &v->RequiredPrefetchPixelDataBWChroma[i][j][k], - &v->NoTimeForDynamicMetadata[i][j][k], - &v->Tno_bw[k], - &v->prefetch_vmrow_bw[k], - &v->dummy7[k], - &v->dummy8[k], - &v->dummy13[k], - &v->VUpdateOffsetPix[k], - &v->VUpdateWidthPix[k], - &v->VReadyOffsetPix[k]); + CalculatePrefetchSchedulePerPlane(mode_lib, + HostVMInefficiencyFactor, + i, j, k); } for (k = 0; k < v->NumberOfActivePlanes; k++) { @@ -7249,69 +7150,15 @@ static double CalculateUrgentLatency( static void UseMinimumDCFCLK( struct display_mode_lib *mode_lib, - int MaxInterDCNTileRepeaters, int MaxPrefetchMode, - double FinalDRAMClockChangeLatency, - double SREnterPlusExitTime, - int ReturnBusWidth, - int RoundTripPingLatencyCycles, - int ReorderingBytes, - int PixelChunkSizeInKByte, - int MetaChunkSize, - bool GPUVMEnable, - int GPUVMMaxPageTableLevels, - bool HostVMEnable, - int NumberOfActivePlanes, - double HostVMMinPageSize, - int HostVMMaxNonCachedPageTableLevels, - bool DynamicMetadataVMEnabled, - enum immediate_flip_requirement ImmediateFlipRequirement, - bool ProgressiveToInterlaceUnitInOPP, - double MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation, - double PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency, - int VTotal[], - int VActive[], - int DynamicMetadataTransmittedBytes[], - int DynamicMetadataLinesBeforeActiveRequired[], - bool Interlace[], - double RequiredDPPCLK[][2][DC__NUM_DPP__MAX], - double RequiredDISPCLK[][2], - double UrgLatency[], - unsigned int NoOfDPP[][2][DC__NUM_DPP__MAX], - double ProjectedDCFCLKDeepSleep[][2], - double MaximumVStartup[][2][DC__NUM_DPP__MAX], - double TotalVActivePixelBandwidth[][2], - double TotalVActiveCursorBandwidth[][2], - double TotalMetaRowBandwidth[][2], - double TotalDPTERowBandwidth[][2], - unsigned int TotalNumberOfActiveDPP[][2], - unsigned int TotalNumberOfDCCActiveDPP[][2], - int dpte_group_bytes[], - double PrefetchLinesY[][2][DC__NUM_DPP__MAX], - double PrefetchLinesC[][2][DC__NUM_DPP__MAX], - int swath_width_luma_ub_all_states[][2][DC__NUM_DPP__MAX], - int swath_width_chroma_ub_all_states[][2][DC__NUM_DPP__MAX], - int BytePerPixelY[], - int BytePerPixelC[], - int HTotal[], - double PixelClock[], - double PDEAndMetaPTEBytesPerFrame[][2][DC__NUM_DPP__MAX], - double DPTEBytesPerRow[][2][DC__NUM_DPP__MAX], - double MetaRowBytes[][2][DC__NUM_DPP__MAX], - bool DynamicMetadataEnable[], - double VActivePixelBandwidth[][2][DC__NUM_DPP__MAX], - double VActiveCursorBandwidth[][2][DC__NUM_DPP__MAX], - double ReadBandwidthLuma[], - double ReadBandwidthChroma[], - double DCFCLKPerState[], - double DCFCLKState[][2]) + int ReorderingBytes) { struct vba_vars_st *v = &mode_lib->vba; int dummy1, i, j, k; double NormalEfficiency, dummy2, dummy3; double TotalMaxPrefetchFlipDPTERowBandwidth[DC__VOLTAGE_STATES][2]; - NormalEfficiency = PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0; + NormalEfficiency = v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0; for (i = 0; i < v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { double PixelDCFCLKCyclesRequiredInPrefetch[DC__NUM_DPP__MAX]; @@ -7329,61 +7176,61 @@ static void UseMinimumDCFCLK( double MinimumTvmPlus2Tr0; TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = 0; - for (k = 0; k < NumberOfActivePlanes; ++k) { + for (k = 0; k < v->NumberOfActivePlanes; ++k) { TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = TotalMaxPrefetchFlipDPTERowBandwidth[i][j] - + NoOfDPP[i][j][k] * DPTEBytesPerRow[i][j][k] / (15.75 * HTotal[k] / PixelClock[k]); + + v->NoOfDPP[i][j][k] * v->DPTEBytesPerRow[i][j][k] / (15.75 * v->HTotal[k] / v->PixelClock[k]); } - for (k = 0; k <= NumberOfActivePlanes - 1; ++k) { - NoOfDPPState[k] = NoOfDPP[i][j][k]; + for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) { + NoOfDPPState[k] = v->NoOfDPP[i][j][k]; } - MinimumTWait = CalculateTWait(MaxPrefetchMode, FinalDRAMClockChangeLatency, UrgLatency[i], SREnterPlusExitTime); - NonDPTEBandwidth = TotalVActivePixelBandwidth[i][j] + TotalVActiveCursorBandwidth[i][j] + TotalMetaRowBandwidth[i][j]; - DPTEBandwidth = (HostVMEnable == true || ImmediateFlipRequirement == dm_immediate_flip_required) ? - TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : TotalDPTERowBandwidth[i][j]; + MinimumTWait = CalculateTWait(MaxPrefetchMode, v->FinalDRAMClockChangeLatency, v->UrgLatency[i], v->SREnterPlusExitTime); + NonDPTEBandwidth = v->TotalVActivePixelBandwidth[i][j] + v->TotalVActiveCursorBandwidth[i][j] + v->TotalMetaRowBandwidth[i][j]; + DPTEBandwidth = (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) ? + TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : v->TotalDPTERowBandwidth[i][j]; DCFCLKRequiredForAverageBandwidth = dml_max3( - ProjectedDCFCLKDeepSleep[i][j], - (NonDPTEBandwidth + TotalDPTERowBandwidth[i][j]) / ReturnBusWidth - / (MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation / 100), - (NonDPTEBandwidth + DPTEBandwidth / NormalEfficiency) / NormalEfficiency / ReturnBusWidth); + v->ProjectedDCFCLKDeepSleep[i][j], + (NonDPTEBandwidth + v->TotalDPTERowBandwidth[i][j]) / v->ReturnBusWidth + / (v->MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation / 100), + (NonDPTEBandwidth + DPTEBandwidth / NormalEfficiency) / NormalEfficiency / v->ReturnBusWidth); ExtraLatencyBytes = CalculateExtraLatencyBytes( ReorderingBytes, - TotalNumberOfActiveDPP[i][j], - PixelChunkSizeInKByte, - TotalNumberOfDCCActiveDPP[i][j], - MetaChunkSize, - GPUVMEnable, - HostVMEnable, - NumberOfActivePlanes, + v->TotalNumberOfActiveDPP[i][j], + v->PixelChunkSizeInKByte, + v->TotalNumberOfDCCActiveDPP[i][j], + v->MetaChunkSize, + v->GPUVMEnable, + v->HostVMEnable, + v->NumberOfActivePlanes, NoOfDPPState, - dpte_group_bytes, + v->dpte_group_bytes, 1, - HostVMMinPageSize, - HostVMMaxNonCachedPageTableLevels); - ExtraLatencyCycles = RoundTripPingLatencyCycles + __DML_ARB_TO_RET_DELAY__ + ExtraLatencyBytes / NormalEfficiency / ReturnBusWidth; - for (k = 0; k < NumberOfActivePlanes; ++k) { + v->HostVMMinPageSize, + v->HostVMMaxNonCachedPageTableLevels); + ExtraLatencyCycles = v->RoundTripPingLatencyCycles + __DML_ARB_TO_RET_DELAY__ + ExtraLatencyBytes / NormalEfficiency / v->ReturnBusWidth; + for (k = 0; k < v->NumberOfActivePlanes; ++k) { double DCFCLKCyclesRequiredInPrefetch; double ExpectedPrefetchBWAcceleration; double PrefetchTime; - PixelDCFCLKCyclesRequiredInPrefetch[k] = (PrefetchLinesY[i][j][k] * swath_width_luma_ub_all_states[i][j][k] * BytePerPixelY[k] - + PrefetchLinesC[i][j][k] * swath_width_chroma_ub_all_states[i][j][k] * BytePerPixelC[k]) / NormalEfficiency / ReturnBusWidth; + PixelDCFCLKCyclesRequiredInPrefetch[k] = (v->PrefetchLinesY[i][j][k] * v->swath_width_luma_ub_all_states[i][j][k] * v->BytePerPixelY[k] + + v->PrefetchLinesC[i][j][k] * v->swath_width_chroma_ub_all_states[i][j][k] * v->BytePerPixelC[k]) / NormalEfficiency / v->ReturnBusWidth; DCFCLKCyclesRequiredInPrefetch = 2 * ExtraLatencyCycles / NoOfDPPState[k] - + PDEAndMetaPTEBytesPerFrame[i][j][k] / NormalEfficiency / NormalEfficiency / ReturnBusWidth * (GPUVMMaxPageTableLevels > 2 ? 1 : 0) - + 2 * DPTEBytesPerRow[i][j][k] / NormalEfficiency / NormalEfficiency / ReturnBusWidth - + 2 * MetaRowBytes[i][j][k] / NormalEfficiency / ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k]; - PrefetchPixelLinesTime[k] = dml_max(PrefetchLinesY[i][j][k], PrefetchLinesC[i][j][k]) * HTotal[k] / PixelClock[k]; - ExpectedPrefetchBWAcceleration = (VActivePixelBandwidth[i][j][k] + VActiveCursorBandwidth[i][j][k]) - / (ReadBandwidthLuma[k] + ReadBandwidthChroma[k]); + + v->PDEAndMetaPTEBytesPerFrame[i][j][k] / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth * (v->GPUVMMaxPageTableLevels > 2 ? 1 : 0) + + 2 * v->DPTEBytesPerRow[i][j][k] / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth + + 2 * v->MetaRowBytes[i][j][k] / NormalEfficiency / v->ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k]; + PrefetchPixelLinesTime[k] = dml_max(v->PrefetchLinesY[i][j][k], v->PrefetchLinesC[i][j][k]) * v->HTotal[k] / v->PixelClock[k]; + ExpectedPrefetchBWAcceleration = (v->VActivePixelBandwidth[i][j][k] + v->VActiveCursorBandwidth[i][j][k]) + / (v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k]); DynamicMetadataVMExtraLatency[k] = - (GPUVMEnable == true && DynamicMetadataEnable[k] == true && DynamicMetadataVMEnabled == true) ? - UrgLatency[i] * GPUVMMaxPageTableLevels * (HostVMEnable == true ? HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0; - PrefetchTime = (MaximumVStartup[i][j][k] - 1) * HTotal[k] / PixelClock[k] - MinimumTWait - - UrgLatency[i] - * ((GPUVMMaxPageTableLevels <= 2 ? GPUVMMaxPageTableLevels : GPUVMMaxPageTableLevels - 2) - * (HostVMEnable == true ? HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1) + (v->GPUVMEnable == true && v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true) ? + v->UrgLatency[i] * v->GPUVMMaxPageTableLevels * (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0; + PrefetchTime = (v->MaximumVStartup[i][j][k] - 1) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait + - v->UrgLatency[i] + * ((v->GPUVMMaxPageTableLevels <= 2 ? v->GPUVMMaxPageTableLevels : v->GPUVMMaxPageTableLevels - 2) + * (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1) - DynamicMetadataVMExtraLatency[k]; if (PrefetchTime > 0) { @@ -7392,14 +7239,14 @@ static void UseMinimumDCFCLK( / (PrefetchTime * PixelDCFCLKCyclesRequiredInPrefetch[k] / DCFCLKCyclesRequiredInPrefetch); DCFCLKRequiredForPeakBandwidthPerPlane[k] = NoOfDPPState[k] * PixelDCFCLKCyclesRequiredInPrefetch[k] / PrefetchPixelLinesTime[k] * dml_max(1.0, ExpectedVRatioPrefetch) * dml_max(1.0, ExpectedVRatioPrefetch / 4) * ExpectedPrefetchBWAcceleration; - if (HostVMEnable == true || ImmediateFlipRequirement == dm_immediate_flip_required) { + if (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) { DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKRequiredForPeakBandwidthPerPlane[k] - + NoOfDPPState[k] * DPTEBandwidth / NormalEfficiency / NormalEfficiency / ReturnBusWidth; + + NoOfDPPState[k] * DPTEBandwidth / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth; } } else { - DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKPerState[i]; + DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i]; } - if (DynamicMetadataEnable[k] == true) { + if (v->DynamicMetadataEnable[k] == true) { double TSetupPipe; double TdmbfPipe; double TdmsksPipe; @@ -7407,17 +7254,17 @@ static void UseMinimumDCFCLK( double AllowedTimeForUrgentExtraLatency; CalculateVupdateAndDynamicMetadataParameters( - MaxInterDCNTileRepeaters, - RequiredDPPCLK[i][j][k], - RequiredDISPCLK[i][j], - ProjectedDCFCLKDeepSleep[i][j], - PixelClock[k], - HTotal[k], - VTotal[k] - VActive[k], - DynamicMetadataTransmittedBytes[k], - DynamicMetadataLinesBeforeActiveRequired[k], - Interlace[k], - ProgressiveToInterlaceUnitInOPP, + v->MaxInterDCNTileRepeaters, + v->RequiredDPPCLK[i][j][k], + v->RequiredDISPCLK[i][j], + v->ProjectedDCFCLKDeepSleep[i][j], + v->PixelClock[k], + v->HTotal[k], + v->VTotal[k] - v->VActive[k], + v->DynamicMetadataTransmittedBytes[k], + v->DynamicMetadataLinesBeforeActiveRequired[k], + v->Interlace[k], + v->ProgressiveToInterlaceUnitInOPP, &TSetupPipe, &TdmbfPipe, &TdmecPipe, @@ -7425,31 +7272,31 @@ static void UseMinimumDCFCLK( &dummy1, &dummy2, &dummy3); - AllowedTimeForUrgentExtraLatency = MaximumVStartup[i][j][k] * HTotal[k] / PixelClock[k] - MinimumTWait - TSetupPipe - TdmbfPipe - TdmecPipe + AllowedTimeForUrgentExtraLatency = v->MaximumVStartup[i][j][k] * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - TSetupPipe - TdmbfPipe - TdmecPipe - TdmsksPipe - DynamicMetadataVMExtraLatency[k]; if (AllowedTimeForUrgentExtraLatency > 0) { DCFCLKRequiredForPeakBandwidthPerPlane[k] = dml_max( DCFCLKRequiredForPeakBandwidthPerPlane[k], ExtraLatencyCycles / AllowedTimeForUrgentExtraLatency); } else { - DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKPerState[i]; + DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i]; } } } DCFCLKRequiredForPeakBandwidth = 0; - for (k = 0; k <= NumberOfActivePlanes - 1; ++k) { + for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) { DCFCLKRequiredForPeakBandwidth = DCFCLKRequiredForPeakBandwidth + DCFCLKRequiredForPeakBandwidthPerPlane[k]; } - MinimumTvmPlus2Tr0 = UrgLatency[i] - * (GPUVMEnable == true ? - (HostVMEnable == true ? - (GPUVMMaxPageTableLevels + 2) * (HostVMMaxNonCachedPageTableLevels + 1) - 1 : GPUVMMaxPageTableLevels + 1) : + MinimumTvmPlus2Tr0 = v->UrgLatency[i] + * (v->GPUVMEnable == true ? + (v->HostVMEnable == true ? + (v->GPUVMMaxPageTableLevels + 2) * (v->HostVMMaxNonCachedPageTableLevels + 1) - 1 : v->GPUVMMaxPageTableLevels + 1) : 0); - for (k = 0; k < NumberOfActivePlanes; ++k) { + for (k = 0; k < v->NumberOfActivePlanes; ++k) { double MaximumTvmPlus2Tr0PlusTsw; - MaximumTvmPlus2Tr0PlusTsw = (MaximumVStartup[i][j][k] - 2) * HTotal[k] / PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k]; + MaximumTvmPlus2Tr0PlusTsw = (v->MaximumVStartup[i][j][k] - 2) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k]; if (MaximumTvmPlus2Tr0PlusTsw <= MinimumTvmPlus2Tr0 + PrefetchPixelLinesTime[k] / 4) { - DCFCLKRequiredForPeakBandwidth = DCFCLKPerState[i]; + DCFCLKRequiredForPeakBandwidth = v->DCFCLKPerState[i]; } else { DCFCLKRequiredForPeakBandwidth = dml_max3( DCFCLKRequiredForPeakBandwidth, @@ -7457,7 +7304,7 @@ static void UseMinimumDCFCLK( (2 * ExtraLatencyCycles + PixelDCFCLKCyclesRequiredInPrefetch[k]) / (MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0)); } } - DCFCLKState[i][j] = dml_min(DCFCLKPerState[i], 1.05 * dml_max(DCFCLKRequiredForAverageBandwidth, DCFCLKRequiredForPeakBandwidth)); + v->DCFCLKState[i][j] = dml_min(v->DCFCLKPerState[i], 1.05 * dml_max(DCFCLKRequiredForAverageBandwidth, DCFCLKRequiredForPeakBandwidth)); } } } diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index d46a2733024c..8f9f1d607f7c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -546,7 +546,6 @@ struct _vcs_dpi_display_dlg_sys_params_st { double t_sr_wm_us; double t_extra_us; double mem_trip_us; - double t_srx_delay_us; double deepsleep_dcfclk_mhz; double total_flip_bw; unsigned int total_flip_bytes; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c index 71ea503cb32f..412e75eb4704 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c @@ -142,9 +142,6 @@ void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _v dml_print("DML_RQ_DLG_CALC: t_sr_wm_us = %3.2f\n", dlg_sys_param->t_sr_wm_us); dml_print("DML_RQ_DLG_CALC: t_extra_us = %3.2f\n", dlg_sys_param->t_extra_us); dml_print( - "DML_RQ_DLG_CALC: t_srx_delay_us = %3.2f\n", - dlg_sys_param->t_srx_delay_us); - dml_print( "DML_RQ_DLG_CALC: deepsleep_dcfclk_mhz = %3.2f\n", dlg_sys_param->deepsleep_dcfclk_mhz); dml_print( diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c index 59dc2c5b58dd..3df559c591f8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c @@ -1331,10 +1331,6 @@ void dml1_rq_dlg_get_dlg_params( if (dual_plane) DTRACE("DLG: %s: swath_height_c = %d", __func__, swath_height_c); - DTRACE( - "DLG: %s: t_srx_delay_us = %3.2f", - __func__, - (double) dlg_sys_param->t_srx_delay_us); DTRACE("DLG: %s: line_time_in_us = %3.2f", __func__, (double) line_time_in_us); DTRACE("DLG: %s: vupdate_offset = %d", __func__, vupdate_offset); DTRACE("DLG: %s: vupdate_width = %d", __func__, vupdate_width); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c index ece34b0b8a46..789f7562cdc7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c @@ -1223,8 +1223,8 @@ static void dml_full_validate_bw_helper(struct dc *dc, *pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false); *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); if (*vlevel < context->bw_ctx.dml.soc.num_states) { - memset(split, 0, sizeof(split)); - memset(merge, 0, sizeof(merge)); + memset(split, 0, MAX_PIPES * sizeof(*split)); + memset(merge, 0, MAX_PIPES * sizeof(*merge)); *vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge); } @@ -1274,7 +1274,7 @@ static void dcn20_adjust_adaptive_sync_v_startup( static bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx) { return (pipe_ctx->stream_res.hpo_dp_stream_enc && - pipe_ctx->stream->link->hpo_dp_link_enc && + pipe_ctx->link_res.hpo_dp_link_enc && dc_is_dp_signal(pipe_ctx->stream->signal)); } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c index 122ba291a7ef..ec636d06e18c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c @@ -93,7 +93,7 @@ static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, TABLE_CASE(420, 12, min); } - if (table == 0) + if (!table) return; index = (bpp - table[0].bpp) * 2; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h index d34b0b0eea65..444182a97e6e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h @@ -53,6 +53,8 @@ enum dc_status { DC_NOT_SUPPORTED = 24, DC_UNSUPPORTED_VALUE = 25, + DC_NO_LINK_ENC_RESOURCE = 26, + DC_ERROR_UNEXPECTED = -1 }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 6fc6488c54c0..943240e2809e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -334,6 +334,20 @@ struct plane_resource { struct dcn_fe_bandwidth bw; }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +#define LINK_RES_HPO_DP_REC_MAP__MASK 0xFFFF +#define LINK_RES_HPO_DP_REC_MAP__SHIFT 0 +#endif + +/* all mappable hardware resources used to enable a link */ +struct link_resource { +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct hpo_dp_link_encoder *hpo_dp_link_enc; +#else + void *dummy; +#endif +}; + union pipe_update_flags { struct { uint32_t enable : 1; @@ -361,12 +375,14 @@ struct pipe_ctx { struct plane_resource plane_res; struct stream_resource stream_res; + struct link_resource link_res; struct clock_source *clock_source; struct pll_settings pll_settings; uint8_t pipe_idx; + uint8_t pipe_idx_syncd; struct pipe_ctx *top_pipe; struct pipe_ctx *bottom_pipe; @@ -411,6 +427,8 @@ struct resource_context { struct link_enc_cfg_context link_enc_cfg_ctx; #if defined(CONFIG_DRM_AMD_DC_DCN) bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS]; + unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS]; + int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS]; #endif #if defined(CONFIG_DRM_AMD_DC_DCN) bool is_mpc_3dlut_acquired[MAX_PIPES]; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index a6d3d859754a..cd52813a8432 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -56,16 +56,19 @@ enum { bool dp_verify_link_cap( struct dc_link *link, + const struct link_resource *link_res, struct dc_link_settings *known_limit_link_setting, int *fail_count); bool dp_verify_link_cap_with_retries( struct dc_link *link, + const struct link_resource *link_res, struct dc_link_settings *known_limit_link_setting, int attempts); bool dp_verify_mst_link_cap( - struct dc_link *link); + struct dc_link *link, + const struct link_resource *link_res); bool dp_validate_mode_timing( struct dc_link *link, @@ -168,8 +171,9 @@ uint8_t dc_dp_initialize_scrambling_data_symbols( struct dc_link *link, enum dc_dp_training_pattern pattern); -enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready); +enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready); void dp_set_fec_enable(struct dc_link *link, bool enable); +struct link_encoder *dp_get_link_enc(struct dc_link *link); bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable); bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update); void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable); @@ -210,11 +214,16 @@ bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link); struct fixed31_32 calculate_sst_avg_time_slots_per_mtp( const struct dc_stream_state *stream, const struct dc_link *link); -void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *link_settings); -void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal); +void enable_dp_hpo_output(struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings); +void disable_dp_hpo_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal); void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable); bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx); void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link); bool dp_retrieve_lttpr_cap(struct dc_link *link); +void edp_panel_backlight_power_on(struct dc_link *link); #endif /* __DC_LINK_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h index 974d703e3771..74dafd0f9d3d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h @@ -91,8 +91,9 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link); * DPIA equivalent of dc_link_dp_perfrorm_link_training. * Aborts link training upon detection of sink unplug. */ -enum link_training_result -dc_link_dpia_perform_link_training(struct dc_link *link, +enum link_training_result dc_link_dpia_perform_link_training( + struct dc_link *link, + const struct link_resource *link_res, const struct dc_link_settings *link_setting, bool skip_video_pattern); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index a17e5de3b100..c920c4b6077d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -211,6 +211,8 @@ struct dummy_pstate_entry { struct clk_bw_params { unsigned int vram_type; unsigned int num_channels; + unsigned int dispclk_vco_khz; + unsigned int dc_mode_softmax_memclk; struct clk_limit_table clk_table; struct wm_table wm_table; struct dummy_pstate_entry dummy_pstate_table[4]; @@ -261,6 +263,10 @@ struct clk_mgr_funcs { /* Send message to PMFW to set hard max memclk frequency to highest DPM */ void (*set_hard_max_memclk)(struct clk_mgr *clk_mgr); + /* Custom set a memclk freq range*/ + void (*set_max_memclk)(struct clk_mgr *clk_mgr, unsigned int memclk_mhz); + void (*set_min_memclk)(struct clk_mgr *clk_mgr, unsigned int memclk_mhz); + /* Get current memclk states from PMFW, update relevant structures */ void (*get_memclk_states_from_smu)(struct clk_mgr *clk_mgr); @@ -274,6 +280,7 @@ struct clk_mgr { struct dc_clocks clks; bool psr_allow_active_cache; bool force_smu_not_present; + bool dc_mode_softmax_enabled; int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes int dentist_vco_freq_khz; struct clk_state_registers_and_bypass boot_snapshot; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 80e1a32bc63d..2c031586f4e6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -139,6 +139,7 @@ struct hubp_funcs { bool (*hubp_is_flip_pending)(struct hubp *hubp); void (*set_blank)(struct hubp *hubp, bool blank); + void (*set_blank_regs)(struct hubp *hubp, bool blank); void (*set_hubp_blank_en)(struct hubp *hubp, bool blank); void (*set_cursor_attributes)( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index bb0e91756ddd..2ce15cd10d80 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -268,7 +268,8 @@ struct hpo_dp_link_encoder_funcs { void (*enable_link_phy)(struct hpo_dp_link_encoder *enc, const struct dc_link_settings *link_settings, - enum transmitter transmitter); + enum transmitter transmitter, + enum hpd_source_id hpd_source); void (*disable_link_phy)(struct hpo_dp_link_encoder *link_enc, enum signal_type signal); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index d50f4bd06b5d..05053f3b4ab7 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -64,6 +64,7 @@ struct hw_sequencer_funcs { enum dc_status (*apply_ctx_to_hw)(struct dc *dc, struct dc_state *context); void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank); void (*apply_ctx_for_surface)(struct dc *dc, const struct dc_stream_state *stream, int num_planes, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h index ba664bc49595..69d63763a10e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h @@ -32,6 +32,7 @@ struct gpio *get_hpd_gpio(struct dc_bios *dcb, void dp_enable_link_phy( struct dc_link *link, + const struct link_resource *link_res, enum signal_type signal, enum clock_source_id clock_source, const struct dc_link_settings *link_settings); @@ -42,22 +43,27 @@ void edp_add_delay_for_T9(struct dc_link *link); bool edp_receiver_ready_T9(struct dc_link *link); bool edp_receiver_ready_T7(struct dc_link *link); -void dp_disable_link_phy(struct dc_link *link, enum signal_type signal); +void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res, + enum signal_type signal); -void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal); +void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res, + enum signal_type signal); bool dp_set_hw_training_pattern( struct dc_link *link, + const struct link_resource *link_res, enum dc_dp_training_pattern pattern, uint32_t offset); void dp_set_hw_lane_settings( struct dc_link *link, + const struct link_resource *link_res, const struct link_training_settings *link_settings, uint32_t offset); void dp_set_hw_test_pattern( struct dc_link *link, + const struct link_resource *link_res, enum dp_test_pattern test_pattern, uint8_t *custom_pattern, uint32_t custom_pattern_size); diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 372c0898facd..dbfe6690ded8 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -34,6 +34,10 @@ #define MEMORY_TYPE_HBM 2 +#define IS_PIPE_SYNCD_VALID(pipe) ((((pipe)->pipe_idx_syncd) & 0x80)?1:0) +#define GET_PIPE_SYNCD_FROM_PIPE(pipe) ((pipe)->pipe_idx_syncd & 0x7F) +#define SET_PIPE_SYNCD_TO_PIPE(pipe, pipe_syncd) ((pipe)->pipe_idx_syncd = (0x80 | pipe_syncd)) + enum dce_version resource_parse_asic_id( struct hw_asic_id asic_id); @@ -202,8 +206,19 @@ int get_num_mpc_splits(struct pipe_ctx *pipe); int get_num_odm_splits(struct pipe_ctx *pipe); #if defined(CONFIG_DRM_AMD_DC_DCN) -struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder( - const struct resource_pool *pool); +struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt( + const struct resource_context *res_ctx, + const struct resource_pool *pool, + const struct dc_link *link); #endif +void reset_syncd_pipes_from_disabled_pipes(struct dc *dc, + struct dc_state *context); + +void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc, + struct dc_state *context, + uint8_t disabled_master_pipe_idx); + +uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter); + #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c index 378cc11aa047..6b5fedd9ace0 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c @@ -185,16 +185,18 @@ bool dal_irq_service_dummy_set(struct irq_service *irq_service, const struct irq_source_info *info, bool enable) { - DC_LOG_ERROR("%s: called for non-implemented irq source\n", - __func__); + DC_LOG_ERROR("%s: called for non-implemented irq source, src_id=%u, ext_id=%u\n", + __func__, info->src_id, info->ext_id); + return false; } bool dal_irq_service_dummy_ack(struct irq_service *irq_service, const struct irq_source_info *info) { - DC_LOG_ERROR("%s: called for non-implemented irq source\n", - __func__); + DC_LOG_ERROR("%s: called for non-implemented irq source, src_id=%u, ext_id=%u\n", + __func__, info->src_id, info->ext_id); + return false; } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c index 34f43cb650f8..cf072e2347d3 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c @@ -40,10 +40,9 @@ #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" -enum dc_irq_source to_dal_irq_source_dcn10( - struct irq_service *irq_service, - uint32_t src_id, - uint32_t ext_id) +static enum dc_irq_source to_dal_irq_source_dcn10(struct irq_service *irq_service, + uint32_t src_id, + uint32_t ext_id) { switch (src_id) { case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c index 9ccafe007b23..c4b067d01895 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c @@ -132,31 +132,6 @@ enum dc_irq_source to_dal_irq_source_dcn20( } } -uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source) -{ - const struct irq_source_info *info; - uint32_t addr; - uint32_t value; - uint32_t current_status; - - info = find_irq_source_info(irq_service, source); - if (!info) - return 0; - - addr = info->status_reg; - if (!addr) - return 0; - - value = dm_read_reg(irq_service->ctx, addr); - current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE); - - return current_status; -} - static bool hpd_ack( struct irq_service *irq_service, const struct irq_source_info *info) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h index 4d69ab24ca25..aee4b37999f1 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h @@ -31,6 +31,4 @@ struct irq_service *dal_irq_service_dcn20_create( struct irq_service_init_data *init_data); -uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source); - #endif diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c index a47f68634fc3..aa708b61142f 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c @@ -39,10 +39,9 @@ #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" -enum dc_irq_source to_dal_irq_source_dcn201( - struct irq_service *irq_service, - uint32_t src_id, - uint32_t ext_id) +static enum dc_irq_source to_dal_irq_source_dcn201(struct irq_service *irq_service, + uint32_t src_id, + uint32_t ext_id) { switch (src_id) { case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index 78940cb20e10..0f15bcada4e9 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -40,10 +40,9 @@ #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" -enum dc_irq_source to_dal_irq_source_dcn21( - struct irq_service *irq_service, - uint32_t src_id, - uint32_t ext_id) +static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_service, + uint32_t src_id, + uint32_t ext_id) { switch (src_id) { case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: @@ -135,31 +134,6 @@ enum dc_irq_source to_dal_irq_source_dcn21( return DC_IRQ_SOURCE_INVALID; } -uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source) -{ - const struct irq_source_info *info; - uint32_t addr; - uint32_t value; - uint32_t current_status; - - info = find_irq_source_info(irq_service, source); - if (!info) - return 0; - - addr = info->status_reg; - if (!addr) - return 0; - - value = dm_read_reg(irq_service->ctx, addr); - current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE); - - return current_status; -} - static bool hpd_ack( struct irq_service *irq_service, const struct irq_source_info *info) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h index 616470e32380..da2bd0e93d7a 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h @@ -31,6 +31,4 @@ struct irq_service *dal_irq_service_dcn21_create( struct irq_service_init_data *init_data); -uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source); - #endif diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c index 38e0ade60c7b..1b88e4e627fd 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c @@ -36,10 +36,9 @@ #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" -enum dc_irq_source to_dal_irq_source_dcn31( - struct irq_service *irq_service, - uint32_t src_id, - uint32_t ext_id) +static enum dc_irq_source to_dal_irq_source_dcn31(struct irq_service *irq_service, + uint32_t src_id, + uint32_t ext_id) { switch (src_id) { case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c index 4db1133e4466..a2a4fbeb83f8 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c @@ -79,7 +79,7 @@ void dal_irq_service_destroy(struct irq_service **irq_service) *irq_service = NULL; } -const struct irq_source_info *find_irq_source_info( +static const struct irq_source_info *find_irq_source_info( struct irq_service *irq_service, enum dc_irq_source source) { diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h index e60b82480093..dbfcb096eedd 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h @@ -69,10 +69,6 @@ struct irq_service { const struct irq_service_funcs *funcs; }; -const struct irq_source_info *find_irq_source_info( - struct irq_service *irq_service, - enum dc_irq_source source); - void dal_irq_service_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data); diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 7eec65090862..873ecd04e01d 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -46,10 +46,10 @@ /* Firmware versioning. */ #ifdef DMUB_EXPOSE_VERSION -#define DMUB_FW_VERSION_GIT_HASH 0x465e619a +#define DMUB_FW_VERSION_GIT_HASH 0xbaf06b95 #define DMUB_FW_VERSION_MAJOR 0 #define DMUB_FW_VERSION_MINOR 0 -#define DMUB_FW_VERSION_REVISION 94 +#define DMUB_FW_VERSION_REVISION 98 #define DMUB_FW_VERSION_TEST 0 #define DMUB_FW_VERSION_VBIOS 0 #define DMUB_FW_VERSION_HOTFIX 0 @@ -408,7 +408,14 @@ enum dmub_cmd_vbios_type { * Enables or disables power gating. */ DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING = 3, + /** + * Controls embedded panels. + */ DMUB_CMD__VBIOS_LVTMA_CONTROL = 15, + /** + * Query DP alt status on a transmitter. + */ + DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT = 26, }; //============================================================================== @@ -2394,6 +2401,24 @@ struct dmub_rb_cmd_lvtma_control { }; /** + * Data passed in/out in a DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT command. + */ +struct dmub_rb_cmd_transmitter_query_dp_alt_data { + uint8_t phy_id; /**< 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4=UNIPHYE, 5=UNIPHYF */ + uint8_t is_usb; /**< is phy is usb */ + uint8_t is_dp_alt_disable; /**< is dp alt disable */ + uint8_t is_dp4; /**< is dp in 4 lane */ +}; + +/** + * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT command. + */ +struct dmub_rb_cmd_transmitter_query_dp_alt { + struct dmub_cmd_header header; /**< header */ + struct dmub_rb_cmd_transmitter_query_dp_alt_data data; /**< payload */ +}; + +/** * Maximum number of bytes a chunk sent to DMUB for parsing */ #define DMUB_EDID_CEA_DATA_CHUNK_BYTES 8 @@ -2404,7 +2429,7 @@ struct dmub_rb_cmd_lvtma_control { struct dmub_cmd_send_edid_cea { uint16_t offset; /**< offset into the CEA block */ uint8_t length; /**< number of bytes in payload to copy as part of CEA block */ - uint16_t total_length; /**< total length of the CEA block */ + uint16_t cea_total_length; /**< total length of the CEA block */ uint8_t payload[DMUB_EDID_CEA_DATA_CHUNK_BYTES]; /**< data chunk of the CEA block */ uint8_t pad[3]; /**< padding and for future expansion */ }; @@ -2601,6 +2626,10 @@ union dmub_rb_cmd { */ struct dmub_rb_cmd_lvtma_control lvtma_control; /** + * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT command. + */ + struct dmub_rb_cmd_transmitter_query_dp_alt query_dp_alt; + /** * Definition of a DMUB_CMD__DPIA_DIG1_CONTROL command. */ struct dmub_rb_cmd_dig1_dpia_control dig1_dpia_control; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index f673a1c1777a..9280f2abd973 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -852,7 +852,7 @@ bool dmub_srv_should_detect(struct dmub_srv *dmub) enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub) { - if (!dmub->hw_init || dmub->hw_funcs.clear_inbox0_ack_register) + if (!dmub->hw_init || !dmub->hw_funcs.clear_inbox0_ack_register) return DMUB_STATUS_INVALID; dmub->hw_funcs.clear_inbox0_ack_register(dmub); @@ -878,7 +878,7 @@ enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t ti enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data) { - if (!dmub->hw_init || dmub->hw_funcs.send_inbox0_cmd) + if (!dmub->hw_init || !dmub->hw_funcs.send_inbox0_cmd) return DMUB_STATUS_INVALID; dmub->hw_funcs.send_inbox0_cmd(dmub, data); diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index 6d648c889866..f7420c3f5672 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -104,6 +104,7 @@ struct mod_hdcp_displayport { uint8_t rev; uint8_t assr_enabled; uint8_t mst_enabled; + uint8_t usb4_enabled; }; struct mod_hdcp_hdmi { @@ -249,7 +250,6 @@ struct mod_hdcp_link { uint8_t ddc_line; uint8_t link_enc_idx; uint8_t phy_idx; - uint8_t dio_output_type; uint8_t dio_output_id; uint8_t hdcp_supported_informational; union { diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 4b9e68a79f06..f57a1478f0fe 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -231,6 +231,8 @@ enum DC_FEATURE_MASK { DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), //0x4, disabled by default DC_PSR_MASK = (1 << 3), //0x8, disabled by default for dcn < 3.1 DC_EDP_NO_POWER_SEQUENCING = (1 << 4), //0x10, disabled by default + DC_DISABLE_LTTPR_DP1_4A = (1 << 5), //0x20, disabled by default + DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default }; enum DC_DEBUG_MASK { diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_offset.h index 79eae0256dbd..8072b0a6376d 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_offset.h @@ -20753,8 +20753,6 @@ // addressBlock: nbio_nbif0_gdc_GDCDEC // base address: 0xd0000000 -#define regGDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL 0x2ffc0eda -#define regGDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL_BASE_IDX 5 #define regGDC1_NGDC_SDP_PORT_CTRL 0x2ffc0ee2 #define regGDC1_NGDC_SDP_PORT_CTRL_BASE_IDX 5 #define regGDC1_SHUB_REGS_IF_CTL 0x2ffc0ee3 diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_sh_mask.h index e27fdc0c643c..54b0e4623971 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_sh_mask.h @@ -1,4 +1,3 @@ - /* * Copyright (C) 2020 Advanced Micro Devices, Inc. * @@ -108541,17 +108540,6 @@ // addressBlock: nbio_nbif0_gdc_GDCDEC -//GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL -#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN0_FAST_WRITE_RESPONSE_EN__SHIFT 0x0 -#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN1_FAST_WRITE_RESPONSE_EN__SHIFT 0x1 -#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN2_FAST_WRITE_RESPONSE_EN__SHIFT 0x2 -#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN3_FAST_WRITE_RESPONSE_EN__SHIFT 0x3 -#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__FWR_NORMAL_ARB_MODE__SHIFT 0x10 -#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN0_FAST_WRITE_RESPONSE_EN_MASK 0x00000001L -#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN1_FAST_WRITE_RESPONSE_EN_MASK 0x00000002L -#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN2_FAST_WRITE_RESPONSE_EN_MASK 0x00000004L -#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__LOGAN3_FAST_WRITE_RESPONSE_EN_MASK 0x00000008L -#define GDC1_LOGAN_FAST_WRITE_RESPONSE_CNTL__FWR_NORMAL_ARB_MODE_MASK 0x00010000L //GDC1_NGDC_SDP_PORT_CTRL #define GDC1_NGDC_SDP_PORT_CTRL__SDP_DISCON_HYSTERESIS__SHIFT 0x0 #define GDC1_NGDC_SDP_PORT_CTRL__NGDC_OBFF_HW_URGENT_EARLY_WAKEUP_EN__SHIFT 0xf diff --git a/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h b/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h index 9cb5f3631c60..ce79e5de8ce3 100644 --- a/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h +++ b/drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h @@ -25,15 +25,15 @@ #define MAX_SEGMENT 5 -struct IP_BASE_INSTANCE +struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; -}; - -struct IP_BASE +} __maybe_unused; + +struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; -}; +} __maybe_unused; static const struct IP_BASE ATHUB_BASE ={ { { { 0x00000C00, 0, 0, 0, 0 } }, diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h index 7ec4331e67f2..a486769b66c6 100644 --- a/drivers/gpu/drm/amd/include/discovery.h +++ b/drivers/gpu/drm/amd/include/discovery.h @@ -143,6 +143,55 @@ struct gc_info_v1_0 { uint32_t gc_num_gl2a; }; +struct gc_info_v1_1 { + struct gpu_info_header header; + + uint32_t gc_num_se; + uint32_t gc_num_wgp0_per_sa; + uint32_t gc_num_wgp1_per_sa; + uint32_t gc_num_rb_per_se; + uint32_t gc_num_gl2c; + uint32_t gc_num_gprs; + uint32_t gc_num_max_gs_thds; + uint32_t gc_gs_table_depth; + uint32_t gc_gsprim_buff_depth; + uint32_t gc_parameter_cache_depth; + uint32_t gc_double_offchip_lds_buffer; + uint32_t gc_wave_size; + uint32_t gc_max_waves_per_simd; + uint32_t gc_max_scratch_slots_per_cu; + uint32_t gc_lds_size; + uint32_t gc_num_sc_per_se; + uint32_t gc_num_sa_per_se; + uint32_t gc_num_packer_per_sc; + uint32_t gc_num_gl2a; + uint32_t gc_num_tcp_per_sa; + uint32_t gc_num_sdp_interface; + uint32_t gc_num_tcps; +}; + +struct gc_info_v2_0 { + struct gpu_info_header header; + + uint32_t gc_num_se; + uint32_t gc_num_cu_per_sh; + uint32_t gc_num_sh_per_se; + uint32_t gc_num_rb_per_se; + uint32_t gc_num_tccs; + uint32_t gc_num_gprs; + uint32_t gc_num_max_gs_thds; + uint32_t gc_gs_table_depth; + uint32_t gc_gsprim_buff_depth; + uint32_t gc_parameter_cache_depth; + uint32_t gc_double_offchip_lds_buffer; + uint32_t gc_wave_size; + uint32_t gc_max_waves_per_simd; + uint32_t gc_max_scratch_slots_per_cu; + uint32_t gc_lds_size; + uint32_t gc_num_sc_per_se; + uint32_t gc_num_packer_per_sc; +}; + typedef struct harvest_info_header { uint32_t signature; /* Table Signature */ uint32_t version; /* Table Version */ diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index bac15c466733..5c0867ebcfce 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -153,6 +153,10 @@ enum PP_SMC_POWER_PROFILE { PP_SMC_POWER_PROFILE_COUNT, }; +extern const char * const amdgpu_pp_profile_name[PP_SMC_POWER_PROFILE_COUNT]; + + + enum { PP_GROUP_UNKNOWN = 0, PP_GROUP_GFX = 1, diff --git a/drivers/gpu/drm/amd/include/yellow_carp_offset.h b/drivers/gpu/drm/amd/include/yellow_carp_offset.h index 76b9eb3f441d..28a56b56bcaf 100644 --- a/drivers/gpu/drm/amd/include/yellow_carp_offset.h +++ b/drivers/gpu/drm/amd/include/yellow_carp_offset.h @@ -9,12 +9,12 @@ struct IP_BASE_INSTANCE { unsigned int segment[MAX_SEGMENT]; -}; +} __maybe_unused; struct IP_BASE { struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; -}; +} __maybe_unused; static const struct IP_BASE ACP_BASE = { { { { 0x02403800, 0x00480000, 0, 0, 0, 0 } }, diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 49df4c20f09e..e2cae97f4ff1 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -82,6 +82,16 @@ static const struct hwmon_temp_label { {PP_TEMP_MEM, "mem"}, }; +const char * const amdgpu_pp_profile_name[] = { + "BOOTUP_DEFAULT", + "3D_FULL_SCREEN", + "POWER_SAVING", + "VIDEO", + "VR", + "COMPUTE", + "CUSTOM" +}; + /** * DOC: power_dpm_state * @@ -2080,7 +2090,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } else if (DEVICE_ATTR_IS(unique_id)) { if (asic_type != CHIP_VEGA10 && asic_type != CHIP_VEGA20 && - asic_type != CHIP_ARCTURUS) + asic_type != CHIP_ARCTURUS && + asic_type != CHIP_ALDEBARAN) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_features)) { if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10) @@ -2123,6 +2134,12 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } } + /* setting should not be allowed from VF */ + if (amdgpu_sriov_vf(adev)) { + dev_attr->attr.mode &= ~S_IWUGO; + dev_attr->store = NULL; + } + #undef DEVICE_ATTR_IS return 0; diff --git a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h index 35fa0d8e92dd..ab66a4b9e438 100644 --- a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h @@ -102,7 +102,9 @@ #define PPSMC_MSG_GfxDriverResetRecovery 0x42 #define PPSMC_MSG_BoardPowerCalibration 0x43 -#define PPSMC_Message_Count 0x44 +#define PPSMC_MSG_HeavySBR 0x45 +#define PPSMC_Message_Count 0x46 + //PPSMC Reset Types #define PPSMC_RESET_TYPE_WARM_RESET 0x00 diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 16e3f72d31b9..c464a045000d 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -423,6 +423,9 @@ enum ip_power_state { POWER_STATE_OFF, }; +/* Used to mask smu debug modes */ +#define SMU_DEBUG_HALT_ON_ERROR 0x1 + struct amdgpu_pm { struct mutex mutex; u32 current_sclk; @@ -460,6 +463,11 @@ struct amdgpu_pm { struct list_head pm_attr_list; atomic_t pwr_state[AMD_IP_BLOCK_TYPE_NUM]; + + /* + * 0 = disabled (default), otherwise enable corresponding debug mode + */ + uint32_t smu_debug_mask; }; #define R600_SSTU_DFLT 0 diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h index f738f7dc20c9..ba7565bc8104 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h @@ -481,6 +481,7 @@ struct stb_context { }; #define WORKLOAD_POLICY_MAX 7 + struct smu_context { struct amdgpu_device *adev; @@ -1256,9 +1257,9 @@ struct pptable_funcs { int (*set_fine_grain_gfx_freq_parameters)(struct smu_context *smu); /** - * @set_light_sbr: Set light sbr mode for the SMU. + * @smu_handle_passthrough_sbr: Send message to SMU about special handling for SBR. */ - int (*set_light_sbr)(struct smu_context *smu, bool enable); + int (*smu_handle_passthrough_sbr)(struct smu_context *smu, bool enable); /** * @wait_for_event: Wait for events from SMU. @@ -1414,7 +1415,7 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en); int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value); -int smu_set_light_sbr(struct smu_context *smu, bool enable); +int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable); int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, uint64_t event_arg); diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h index 18b862a90fbe..ff8a0bcbd290 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h @@ -229,7 +229,8 @@ __SMU_DUMMY_MAP(BoardPowerCalibration), \ __SMU_DUMMY_MAP(RequestGfxclk), \ __SMU_DUMMY_MAP(ForceGfxVid), \ - __SMU_DUMMY_MAP(UnforceGfxVid), + __SMU_DUMMY_MAP(UnforceGfxVid), \ + __SMU_DUMMY_MAP(HeavySBR), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h index 2d422e6a9feb..acb3be292096 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h @@ -312,7 +312,7 @@ int smu_v11_0_deep_sleep_control(struct smu_context *smu, void smu_v11_0_interrupt_work(struct smu_context *smu); -int smu_v11_0_set_light_sbr(struct smu_context *smu, bool enable); +int smu_v11_0_handle_passthrough_sbr(struct smu_context *smu, bool enable); int smu_v11_0_restore_user_od_settings(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 20cb234d5061..3ab67b232cd4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -1328,7 +1328,12 @@ static int pp_set_powergating_by_smu(void *handle, pp_dpm_powergate_vce(handle, gate); break; case AMD_IP_BLOCK_TYPE_GMC: - pp_dpm_powergate_mmhub(handle); + /* + * For now, this is only used on PICASSO. + * And only "gate" operation is supported. + */ + if (gate) + pp_dpm_powergate_mmhub(handle); break; case AMD_IP_BLOCK_TYPE_GFX: ret = pp_dpm_powergate_gfx(handle, gate); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index 1f406f21b452..9ddd8491ff00 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -1439,13 +1439,6 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) {70, 90, 0, 0,}, {30, 60, 0, 6,}, }; - static const char *profile_name[6] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE"}; static const char *title[6] = {"NUM", "MODE_NAME", "BUSY_SET_POINT", @@ -1463,7 +1456,7 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++) size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n", - i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ", + i, amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ", profile_mode_setting[i][0], profile_mode_setting[i][1], profile_mode_setting[i][2], profile_mode_setting[i][3]); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index 611969bf4520..cd99db0dc2be 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -5498,14 +5498,6 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) uint32_t i, size = 0; uint32_t len; - static const char *profile_name[7] = {"BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; - static const char *title[8] = {"NUM", "MODE_NAME", "SCLK_UP_HYST", @@ -5529,7 +5521,7 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) for (i = 0; i < len; i++) { if (i == hwmgr->power_profile_mode) { size += sysfs_emit_at(buf, size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n", - i, profile_name[i], "*", + i, amdgpu_pp_profile_name[i], "*", data->current_profile_setting.sclk_up_hyst, data->current_profile_setting.sclk_down_hyst, data->current_profile_setting.sclk_activity, @@ -5540,12 +5532,12 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) } if (smu7_profiling[i].bupdate_sclk) size += sysfs_emit_at(buf, size, "%3d %16s: %8d %16d %16d ", - i, profile_name[i], smu7_profiling[i].sclk_up_hyst, + i, amdgpu_pp_profile_name[i], smu7_profiling[i].sclk_up_hyst, smu7_profiling[i].sclk_down_hyst, smu7_profiling[i].sclk_activity); else size += sysfs_emit_at(buf, size, "%3d %16s: %8s %16s %16s ", - i, profile_name[i], "-", "-", "-"); + i, amdgpu_pp_profile_name[i], "-", "-", "-"); if (smu7_profiling[i].bupdate_mclk) size += sysfs_emit_at(buf, size, "%16d %16d %16d\n", diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index e6336654c565..3f040be0d158 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -5097,13 +5097,6 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) {70, 90, 0, 0,}, {30, 60, 0, 6,}, }; - static const char *profile_name[7] = {"BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; static const char *title[6] = {"NUM", "MODE_NAME", "BUSY_SET_POINT", @@ -5121,11 +5114,12 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) for (i = 0; i < PP_SMC_POWER_PROFILE_CUSTOM; i++) size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n", - i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ", + i, amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ", profile_mode_setting[i][0], profile_mode_setting[i][1], profile_mode_setting[i][2], profile_mode_setting[i][3]); + size += sysfs_emit_at(buf, size, "%3d %14s%s: %14d %3d %10d %14d\n", i, - profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ", + amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ", data->custom_profile_mode[0], data->custom_profile_mode[1], data->custom_profile_mode[2], data->custom_profile_mode[3]); return size; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 85d55ab4e369..97b3ad369046 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -3980,14 +3980,6 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) DpmActivityMonitorCoeffInt_t activity_monitor; uint32_t i, size = 0; uint16_t workload_type = 0; - static const char *profile_name[] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; static const char *title[] = { "PROFILE_INDEX(NAME)", "CLOCK_TYPE(NAME)", @@ -4021,7 +4013,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) return result); size += sysfs_emit_at(buf, size, "%2d %14s%s:\n", - i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " "); + i, amdgpu_pp_profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " "); size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", " ", diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index edcf2738748a..d93d28c1af95 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -277,8 +277,12 @@ static int smu_dpm_set_power_gate(void *handle, struct smu_context *smu = handle; int ret = 0; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) { + dev_WARN(smu->adev->dev, + "SMU uninitialized but power %s requested for %u!\n", + gate ? "gate" : "ungate", block_type); return -EOPNOTSUPP; + } switch (block_type) { /* @@ -1346,7 +1350,6 @@ static int smu_hw_init(void *handle) } if (smu->is_apu) { - smu_powergate_sdma(&adev->smu, false); smu_dpm_set_vcn_enable(smu, true); smu_dpm_set_jpeg_enable(smu, true); smu_set_gfx_cgpg(&adev->smu, true); @@ -1402,8 +1405,14 @@ static int smu_disable_dpms(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; int ret = 0; + /* + * TODO: (adev->in_suspend && !adev->in_s0ix) is added to pair + * the workaround which always reset the asic in suspend. + * It's likely that workaround will be dropped in the future. + * Then the change here should be dropped together. + */ bool use_baco = !smu->is_apu && - ((amdgpu_in_reset(adev) && + (((amdgpu_in_reset(adev) || (adev->in_suspend && !adev->in_s0ix)) && (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev))); @@ -1508,10 +1517,6 @@ static int smu_hw_fini(void *handle) if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) return 0; - if (smu->is_apu) { - smu_powergate_sdma(&adev->smu, true); - } - smu_dpm_set_vcn_enable(smu, false); smu_dpm_set_jpeg_enable(smu, false); @@ -1570,9 +1575,7 @@ static int smu_suspend(void *handle) smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); - /* skip CGPG when in S0ix */ - if (smu->is_apu && !adev->in_s0ix) - smu_set_gfx_cgpg(&adev->smu, false); + smu_set_gfx_cgpg(&adev->smu, false); return 0; } @@ -1603,8 +1606,7 @@ static int smu_resume(void *handle) return ret; } - if (smu->is_apu) - smu_set_gfx_cgpg(&adev->smu, true); + smu_set_gfx_cgpg(&adev->smu, true); smu->disable_uclk_switch = 0; @@ -3062,13 +3064,13 @@ static int smu_gfx_state_change_set(void *handle, return ret; } -int smu_set_light_sbr(struct smu_context *smu, bool enable) +int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable) { int ret = 0; mutex_lock(&smu->mutex); - if (smu->ppt_funcs->set_light_sbr) - ret = smu->ppt_funcs->set_light_sbr(smu, enable); + if (smu->ppt_funcs->smu_handle_passthrough_sbr) + ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable); mutex_unlock(&smu->mutex); return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index fd1d30a93db5..505d2fb94fd9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -295,16 +295,6 @@ static int arcturus_allocate_dpm_context(struct smu_context *smu) return -ENOMEM; smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context); - smu_dpm->dpm_current_power_state = kzalloc(sizeof(struct smu_power_state), - GFP_KERNEL); - if (!smu_dpm->dpm_current_power_state) - return -ENOMEM; - - smu_dpm->dpm_request_power_state = kzalloc(sizeof(struct smu_power_state), - GFP_KERNEL); - if (!smu_dpm->dpm_request_power_state) - return -ENOMEM; - return 0; } @@ -1389,14 +1379,6 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu, char *buf) { DpmActivityMonitorCoeffInt_t activity_monitor; - static const char *profile_name[] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; static const char *title[] = { "PROFILE_INDEX(NAME)", "CLOCK_TYPE(NAME)", @@ -1453,7 +1435,7 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu, } size += sysfs_emit_at(buf, size, "%2d %14s%s\n", - i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); if (smu_version >= 0x360d00) { size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", @@ -2490,7 +2472,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .deep_sleep_control = smu_v11_0_deep_sleep_control, .get_fan_parameters = arcturus_get_fan_parameters, .interrupt_work = smu_v11_0_interrupt_work, - .set_light_sbr = smu_v11_0_set_light_sbr, + .smu_handle_passthrough_sbr = smu_v11_0_handle_passthrough_sbr, .set_mp1_state = smu_cmn_set_mp1_state, }; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 60a557068ea4..2bb7816b245a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1713,14 +1713,6 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf) DpmActivityMonitorCoeffInt_t activity_monitor; uint32_t i, size = 0; int16_t workload_type = 0; - static const char *profile_name[] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; static const char *title[] = { "PROFILE_INDEX(NAME)", "CLOCK_TYPE(NAME)", @@ -1759,7 +1751,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf) } size += sysfs_emit_at(buf, size, "%2d %14s%s:\n", - i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", " ", diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index a673e05853fe..777f717c37ae 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1349,14 +1349,6 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char * &(activity_monitor_external.DpmActivityMonitorCoeffInt); uint32_t i, size = 0; int16_t workload_type = 0; - static const char *profile_name[] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; static const char *title[] = { "PROFILE_INDEX(NAME)", "CLOCK_TYPE(NAME)", @@ -1395,7 +1387,7 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char * } size += sysfs_emit_at(buf, size, "%2d %14s%s:\n", - i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", " ", diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 28b7c0562b99..4e9e2cf39859 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1724,7 +1724,7 @@ int smu_v11_0_mode1_reset(struct smu_context *smu) return ret; } -int smu_v11_0_set_light_sbr(struct smu_context *smu, bool enable) +int smu_v11_0_handle_passthrough_sbr(struct smu_context *smu, bool enable) { int ret = 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index c02ed65ffa38..5cb07ed227fb 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -1039,14 +1039,6 @@ failed: static int vangogh_get_power_profile_mode(struct smu_context *smu, char *buf) { - static const char *profile_name[] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; uint32_t i, size = 0; int16_t workload_type = 0; @@ -1066,7 +1058,7 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu, continue; size += sysfs_emit_at(buf, size, "%2d %14s%s\n", - i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); } return size; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 145f13b8c977..25c4b135f830 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -1095,14 +1095,6 @@ static int renoir_set_watermarks_table( static int renoir_get_power_profile_mode(struct smu_context *smu, char *buf) { - static const char *profile_name[] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; uint32_t i, size = 0; int16_t workload_type = 0; @@ -1121,7 +1113,7 @@ static int renoir_get_power_profile_mode(struct smu_context *smu, continue; size += sysfs_emit_at(buf, size, "%2d %14s%s\n", - i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); } return size; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c index d60b8c5e8715..9c91e79c955f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c @@ -120,7 +120,8 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) { - if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) + /* Until now the SMU12 only implemented for Renoir series so here neen't do APU check. */ + if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) || smu->adev->in_s0ix) return 0; return smu_cmn_send_smc_msg_with_param(smu, @@ -191,6 +192,9 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu) kfree(smu_table->watermarks_table); smu_table->watermarks_table = NULL; + kfree(smu_table->gpu_metrics_table); + smu_table->gpu_metrics_table = NULL; + return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 6e781cee8bb6..4885c4ae78b7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -141,6 +141,7 @@ static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT MSG_MAP(SetUclkDpmMode, PPSMC_MSG_SetUclkDpmMode, 0), MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0), MSG_MAP(BoardPowerCalibration, PPSMC_MSG_BoardPowerCalibration, 0), + MSG_MAP(HeavySBR, PPSMC_MSG_HeavySBR, 0), }; static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = { @@ -262,16 +263,6 @@ static int aldebaran_allocate_dpm_context(struct smu_context *smu) return -ENOMEM; smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context); - smu_dpm->dpm_current_power_state = kzalloc(sizeof(struct smu_power_state), - GFP_KERNEL); - if (!smu_dpm->dpm_current_power_state) - return -ENOMEM; - - smu_dpm->dpm_request_power_state = kzalloc(sizeof(struct smu_power_state), - GFP_KERNEL); - if (!smu_dpm->dpm_request_power_state) - return -ENOMEM; - return 0; } @@ -1615,7 +1606,8 @@ out_unlock: mutex_unlock(&smu->metrics_lock); adev->unique_id = ((uint64_t)upper32 << 32) | lower32; - sprintf(adev->serial, "%016llx", adev->unique_id); + if (adev->serial[0] == '\0') + sprintf(adev->serial, "%016llx", adev->unique_id); } static bool aldebaran_is_baco_supported(struct smu_context *smu) @@ -1633,10 +1625,18 @@ static int aldebaran_set_df_cstate(struct smu_context *smu, static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en) { - return smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GmiPwrDnControl, - en ? 1 : 0, - NULL); + struct amdgpu_device *adev = smu->adev; + + /* The message only works on master die and NACK will be sent + back for other dies, only send it on master die */ + if (!adev->smuio.funcs->get_socket_id(adev) && + !adev->smuio.funcs->get_die_id(adev)) + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GmiPwrDnControl, + en ? 0 : 1, + NULL); + else + return 0; } static const struct throttling_logging_label { @@ -1922,6 +1922,14 @@ out: return ret; } +static int aldebaran_smu_handle_passthrough_sbr(struct smu_context *smu, bool enable) +{ + int ret = 0; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_HeavySBR, enable ? 1 : 0, NULL); + + return ret; +} + static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu) { #if 0 @@ -2031,6 +2039,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .get_gpu_metrics = aldebaran_get_gpu_metrics, .mode1_reset_is_support = aldebaran_is_mode1_reset_supported, .mode2_reset_is_support = aldebaran_is_mode2_reset_supported, + .smu_handle_passthrough_sbr = aldebaran_smu_handle_passthrough_sbr, .mode1_reset = aldebaran_mode1_reset, .set_mp1_state = aldebaran_set_mp1_state, .mode2_reset = aldebaran_mode2_reset, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 55421ea622fb..b54790d3483e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -196,6 +196,7 @@ int smu_v13_0_check_fw_status(struct smu_context *smu) int smu_v13_0_check_fw_version(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; uint32_t if_version = 0xff, smu_version = 0xff; uint16_t smu_major; uint8_t smu_minor, smu_debug; @@ -208,8 +209,10 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) smu_major = (smu_version >> 16) & 0xffff; smu_minor = (smu_version >> 8) & 0xff; smu_debug = (smu_version >> 0) & 0xff; + if (smu->is_apu) + adev->pm.fw_version = smu_version; - switch (smu->adev->ip_versions[MP1_HWIP][0]) { + switch (adev->ip_versions[MP1_HWIP][0]) { case IP_VERSION(13, 0, 2): smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE; break; @@ -218,13 +221,15 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP; break; default: - dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n", - smu->adev->ip_versions[MP1_HWIP][0]); + dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n", + adev->ip_versions[MP1_HWIP][0]); smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV; break; } - dev_info(smu->adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n", + /* only for dGPU w/ SMU13*/ + if (adev->pm.fw) + dev_dbg(adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n", smu_version, smu_major, smu_minor, smu_debug); /* @@ -236,11 +241,11 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) * of halt driver loading. */ if (if_version != smu->smc_driver_if_version) { - dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " + dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " "smu fw version = 0x%08x (%d.%d.%d)\n", smu->smc_driver_if_version, if_version, smu_version, smu_major, smu_minor, smu_debug); - dev_warn(smu->adev->dev, "SMU driver if version not matched\n"); + dev_warn(adev->dev, "SMU driver if version not matched\n"); } return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 500af6f8adcb..ee1a312fd497 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -94,7 +94,7 @@ static void smu_cmn_read_arg(struct smu_context *smu, /** * __smu_cmn_poll_stat -- poll for a status from the SMU - * smu: a pointer to SMU context + * @smu: a pointer to SMU context * * Returns the status of the SMU, which could be, * 0, the SMU is busy with your command; @@ -257,10 +257,11 @@ int smu_cmn_send_msg_without_waiting(struct smu_context *smu, uint16_t msg_index, uint32_t param) { + struct amdgpu_device *adev = smu->adev; u32 reg; int res; - if (smu->adev->no_hw_access) + if (adev->no_hw_access) return 0; reg = __smu_cmn_poll_stat(smu); @@ -272,6 +273,12 @@ int smu_cmn_send_msg_without_waiting(struct smu_context *smu, __smu_cmn_send_msg(smu, msg_index, param); res = 0; Out: + if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && + res && (res != -ETIME)) { + amdgpu_device_halt(adev); + WARN_ON(1); + } + return res; } @@ -288,9 +295,18 @@ Out: int smu_cmn_wait_for_response(struct smu_context *smu) { u32 reg; + int res; reg = __smu_cmn_poll_stat(smu); - return __smu_cmn_reg2errno(smu, reg); + res = __smu_cmn_reg2errno(smu, reg); + + if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && + res && (res != -ETIME)) { + amdgpu_device_halt(smu->adev); + WARN_ON(1); + } + + return res; } /** @@ -328,10 +344,11 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, uint32_t param, uint32_t *read_arg) { + struct amdgpu_device *adev = smu->adev; int res, index; u32 reg; - if (smu->adev->no_hw_access) + if (adev->no_hw_access) return 0; index = smu_cmn_to_asic_specific_index(smu, @@ -357,6 +374,11 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, if (read_arg) smu_cmn_read_arg(smu, read_arg); Out: + if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) { + amdgpu_device_halt(adev); + WARN_ON(1); + } + mutex_unlock(&smu->message_lock); return res; } diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 6fa8042a0dfd..ab52efb15670 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -1121,7 +1121,10 @@ static void ast_crtc_reset(struct drm_crtc *crtc) if (crtc->state) crtc->funcs->atomic_destroy_state(crtc, crtc->state); - __drm_atomic_helper_crtc_reset(crtc, &ast_state->base); + if (ast_state) + __drm_atomic_helper_crtc_reset(crtc, &ast_state->base); + else + __drm_atomic_helper_crtc_reset(crtc, NULL); } static struct drm_crtc_state * diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h index d9eb353a4bf0..dbe1cc620f6e 100644 --- a/drivers/gpu/drm/ast/ast_tables.h +++ b/drivers/gpu/drm/ast/ast_tables.h @@ -282,8 +282,6 @@ static const struct ast_vbios_enhtable res_1360x768[] = { }; static const struct ast_vbios_enhtable res_1600x900[] = { - {1800, 1600, 24, 80, 1000, 900, 1, 3, VCLK108, /* 60Hz */ - (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 3, 0x3A }, {1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | AST2500PreCatchCRT), 60, 1, 0x3A }, diff --git a/drivers/gpu/drm/dp/drm_dp_mst_topology.c b/drivers/gpu/drm/dp/drm_dp_mst_topology.c index ddb9aa051288..11300b53d24f 100644 --- a/drivers/gpu/drm/dp/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/dp/drm_dp_mst_topology.c @@ -5511,6 +5511,7 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, mutex_init(&mgr->probe_lock); #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) mutex_init(&mgr->topology_ref_history_lock); + stack_depot_init(); #endif INIT_LIST_HEAD(&mgr->tx_msg_downq); INIT_LIST_HEAD(&mgr->destroy_port_list); diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 21174efd91be..88cd992df356 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1327,8 +1327,10 @@ int drm_atomic_check_only(struct drm_atomic_state *state) drm_dbg_atomic(dev, "checking %p\n", state); - for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) - requested_crtc |= drm_crtc_mask(crtc); + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + if (new_crtc_state->enable) + requested_crtc |= drm_crtc_mask(crtc); + } for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { ret = drm_atomic_plane_check(old_plane_state, new_plane_state); @@ -1377,8 +1379,10 @@ int drm_atomic_check_only(struct drm_atomic_state *state) } } - for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) - affected_crtc |= drm_crtc_mask(crtc); + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + if (new_crtc_state->enable) + affected_crtc |= drm_crtc_mask(crtc); + } /* * For commits that allow modesets drivers can add other CRTCs to the diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index a7a05e1e26bb..9603193d2fa1 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1016,7 +1016,7 @@ crtc_needs_disable(struct drm_crtc_state *old_state, * it's in self refresh mode and needs to be fully disabled. */ return old_state->active || - (old_state->self_refresh_active && !new_state->enable) || + (old_state->self_refresh_active && !new_state->active) || new_state->self_refresh_active; } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 805c5a666490..f15127a32f7a 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1743,7 +1743,13 @@ void drm_fb_helper_fill_info(struct fb_info *info, sizes->fb_width, sizes->fb_height); info->par = fb_helper; - snprintf(info->fix.id, sizeof(info->fix.id), "%s", + /* + * The DRM drivers fbdev emulation device name can be confusing if the + * driver name also has a "drm" suffix on it. Leading to names such as + * "simpledrmdrmfb" in /proc/fb. Unfortunately, it's an uAPI and can't + * be changed due user-space tools (e.g: pm-utils) matching against it. + */ + snprintf(info->fix.id, sizeof(info->fix.id), "%sdrmfb", fb_helper->dev->driver->name); } diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 7d1c578388d3..8257f9d4f619 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -980,6 +980,10 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size) add_hole(&mm->head_node); mm->scan_active = 0; + +#ifdef CONFIG_DRM_DEBUG_MM + stack_depot_init(); +#endif } EXPORT_SYMBOL(drm_mm_init); diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index c97323365675..918065982db4 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -107,6 +107,11 @@ static void __drm_stack_depot_print(depot_stack_handle_t stack_depot) kfree(buf); } + +static void __drm_stack_depot_init(void) +{ + stack_depot_init(); +} #else /* CONFIG_DRM_DEBUG_MODESET_LOCK */ static depot_stack_handle_t __drm_stack_depot_save(void) { @@ -115,6 +120,9 @@ static depot_stack_handle_t __drm_stack_depot_save(void) static void __drm_stack_depot_print(depot_stack_handle_t stack_depot) { } +static void __drm_stack_depot_init(void) +{ +} #endif /* CONFIG_DRM_DEBUG_MODESET_LOCK */ /** @@ -359,6 +367,7 @@ void drm_modeset_lock_init(struct drm_modeset_lock *lock) { ww_mutex_init(&lock->mutex, &crtc_ww_class); INIT_LIST_HEAD(&lock->head); + __drm_stack_depot_init(); } EXPORT_SYMBOL(drm_modeset_lock_init); diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 042bb80383c9..b910978d3e48 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -115,6 +115,12 @@ static const struct drm_dmi_panel_orientation_data lcd1280x1920_rightside_up = { .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +static const struct drm_dmi_panel_orientation_data lcd1600x2560_leftside_up = { + .width = 1600, + .height = 2560, + .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP, +}; + static const struct dmi_system_id orientation_data[] = { { /* Acer One 10 (S1003) */ .matches = { @@ -275,6 +281,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"), }, .driver_data = (void *)&onegx1_pro, + }, { /* OneXPlayer */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ONE-NETBOOK TECHNOLOGY CO., LTD."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"), + }, + .driver_data = (void *)&lcd1600x2560_leftside_up, }, { /* Samsung GalaxyBook 10.6 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), diff --git a/drivers/gpu/drm/drm_privacy_screen_x86.c b/drivers/gpu/drm/drm_privacy_screen_x86.c index 88802cd7a1ee..72ed40e4997e 100644 --- a/drivers/gpu/drm/drm_privacy_screen_x86.c +++ b/drivers/gpu/drm/drm_privacy_screen_x86.c @@ -33,6 +33,9 @@ static bool __init detect_thinkpad_privacy_screen(void) unsigned long long output; acpi_status status; + if (acpi_disabled) + return false; + /* Get embedded-controller handle */ status = acpi_get_devices("PNP0C09", acpi_set_handle, NULL, &ec_handle); if (ACPI_FAILURE(status) || !ec_handle) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 7dcc6392792d..0b756ecb1bc2 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -589,6 +589,7 @@ static int compare_str(struct device *dev, void *data) static int etnaviv_pdev_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct device_node *first_node = NULL; struct component_match *match = NULL; if (!dev->platform_data) { @@ -598,6 +599,9 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) if (!of_device_is_available(core_node)) continue; + if (!first_node) + first_node = core_node; + drm_of_component_match_add(&pdev->dev, &match, compare_of, core_node); } @@ -609,6 +613,32 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) component_match_add(dev, &match, compare_str, names[i]); } + /* + * PTA and MTLB can have 40 bit base addresses, but + * unfortunately, an entry in the MTLB can only point to a + * 32 bit base address of a STLB. Moreover, to initialize the + * MMU we need a command buffer with a 32 bit address because + * without an MMU there is only an indentity mapping between + * the internal 32 bit addresses and the bus addresses. + * + * To make things easy, we set the dma_coherent_mask to 32 + * bit to make sure we are allocating the command buffers and + * TLBs in the lower 4 GiB address space. + */ + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)) || + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) { + dev_dbg(&pdev->dev, "No suitable DMA available\n"); + return -ENODEV; + } + + /* + * Apply the same DMA configuration to the virtual etnaviv + * device as the GPU we found. This assumes that all Vivante + * GPUs in the system share the same DMA constraints. + */ + if (first_node) + of_dma_configure(&pdev->dev, first_node, true); + return component_master_add_with_match(dev, &etnaviv_master_ops, match); } @@ -653,21 +683,12 @@ static int __init etnaviv_init(void) if (!of_device_is_available(np)) continue; - pdev = platform_device_alloc("etnaviv", -1); + pdev = platform_device_alloc("etnaviv", PLATFORM_DEVID_NONE); if (!pdev) { ret = -ENOMEM; of_node_put(np); goto unregister_platform_driver; } - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40); - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - - /* - * Apply the same DMA configuration to the virtual etnaviv - * device as the GPU we found. This assumes that all Vivante - * GPUs in the system share the same DMA constraints. - */ - of_dma_configure(&pdev->dev, np, true); ret = platform_device_add(pdev); if (ret) { diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 64c90ff348f2..4eb00a0cb650 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -468,6 +468,12 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, return -EINVAL; } + if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K || + args->nr_bos > SZ_128K || args->nr_pmrs > 128) { + DRM_ERROR("submit arguments out of size limits\n"); + return -EINVAL; + } + /* * Copy the command submission and bo array to kernel space in * one go, and do this outside of any locks. diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 242a5fd8b932..37018bc55810 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -1047,7 +1047,7 @@ pm_put: void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) { - unsigned int i = 0; + unsigned int i; dev_err(gpu->dev, "recover hung GPU!\n"); @@ -1060,7 +1060,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) /* complete all events, the GPU won't do it after the reset */ spin_lock(&gpu->event_spinlock); - for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS) + for_each_set_bit(i, gpu->event_bitmap, ETNA_NR_EVENTS) complete(&gpu->event_free); bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS); spin_unlock(&gpu->event_spinlock); @@ -1658,7 +1658,7 @@ etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev, return 0; } -static struct thermal_cooling_device_ops cooling_ops = { +static const struct thermal_cooling_device_ops cooling_ops = { .get_max_state = etnaviv_gpu_cooling_get_max_state, .get_cur_state = etnaviv_gpu_cooling_get_cur_state, .set_cur_state = etnaviv_gpu_cooling_set_cur_state, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 1c75c8ed5bce..85eddd492774 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -130,6 +130,7 @@ struct etnaviv_gpu { /* hang detection */ u32 hangcheck_dma_addr; + u32 hangcheck_fence; void __iomem *mmio; int irq; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index 180bb633d5c5..58f593b278c1 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -107,8 +107,10 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job */ dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); change = dma_addr - gpu->hangcheck_dma_addr; - if (change < 0 || change > 16) { + if (gpu->completed_fence != gpu->hangcheck_fence || + change < 0 || change > 16) { gpu->hangcheck_dma_addr = dma_addr; + gpu->hangcheck_fence = gpu->completed_fence; goto out_no_timeout; } diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 6a251e3aa779..f27cfd2a9726 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -66,6 +66,7 @@ config DRM_EXYNOS_DP bool "Exynos specific extensions for Analogix DP driver" depends on DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON select DRM_ANALOGIX_DP + select DRM_DP_HELPER default DRM_EXYNOS select DRM_PANEL help diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index f9f10413a4f2..c68498497c0b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -102,16 +102,7 @@ static const struct drm_ioctl_desc exynos_ioctls[] = { DRM_RENDER_ALLOW), }; -static const struct file_operations exynos_drm_driver_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .mmap = exynos_drm_gem_mmap, - .poll = drm_poll, - .read = drm_read, - .unlocked_ioctl = drm_ioctl, - .compat_ioctl = drm_compat_ioctl, - .release = drm_release, -}; +DEFINE_DRM_GEM_FOPS(exynos_drm_driver_fops); static const struct drm_driver exynos_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM @@ -124,7 +115,7 @@ static const struct drm_driver exynos_drm_driver = { .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = exynos_drm_gem_prime_import, .gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table, - .gem_prime_mmap = exynos_drm_gem_prime_mmap, + .gem_prime_mmap = drm_gem_prime_mmap, .ioctls = exynos_ioctls, .num_ioctls = ARRAY_SIZE(exynos_ioctls), .fops = &exynos_drm_driver_fops, diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index bce5331ed1e6..b7d0a4aead0a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -13,7 +13,6 @@ #include <linux/gpio/consumer.h> #include <linux/irq.h> #include <linux/of_device.h> -#include <linux/of_gpio.h> #include <linux/of_graph.h> #include <linux/phy/phy.h> #include <linux/regulator/consumer.h> @@ -266,7 +265,7 @@ struct exynos_dsi { struct clk **clks; struct regulator_bulk_data supplies[2]; int irq; - int te_gpio; + struct gpio_desc *te_gpio; u32 pll_clk_rate; u32 burst_clk_rate; @@ -1299,14 +1298,14 @@ static void exynos_dsi_enable_irq(struct exynos_dsi *dsi) { enable_irq(dsi->irq); - if (gpio_is_valid(dsi->te_gpio)) - enable_irq(gpio_to_irq(dsi->te_gpio)); + if (dsi->te_gpio) + enable_irq(gpiod_to_irq(dsi->te_gpio)); } static void exynos_dsi_disable_irq(struct exynos_dsi *dsi) { - if (gpio_is_valid(dsi->te_gpio)) - disable_irq(gpio_to_irq(dsi->te_gpio)); + if (dsi->te_gpio) + disable_irq(gpiod_to_irq(dsi->te_gpio)); disable_irq(dsi->irq); } @@ -1336,42 +1335,31 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi, int ret; int te_gpio_irq; - dsi->te_gpio = of_get_named_gpio(panel->of_node, "te-gpios", 0); - if (dsi->te_gpio == -ENOENT) - return 0; - - if (!gpio_is_valid(dsi->te_gpio)) { - ret = dsi->te_gpio; - dev_err(dsi->dev, "cannot get te-gpios, %d\n", ret); - goto out; - } - - ret = gpio_request(dsi->te_gpio, "te_gpio"); - if (ret) { - dev_err(dsi->dev, "gpio request failed with %d\n", ret); - goto out; + dsi->te_gpio = devm_gpiod_get_optional(dsi->dev, "te", GPIOD_IN); + if (IS_ERR(dsi->te_gpio)) { + dev_err(dsi->dev, "gpio request failed with %ld\n", + PTR_ERR(dsi->te_gpio)); + return PTR_ERR(dsi->te_gpio); } - te_gpio_irq = gpio_to_irq(dsi->te_gpio); + te_gpio_irq = gpiod_to_irq(dsi->te_gpio); ret = request_threaded_irq(te_gpio_irq, exynos_dsi_te_irq_handler, NULL, IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN, "TE", dsi); if (ret) { dev_err(dsi->dev, "request interrupt failed with %d\n", ret); - gpio_free(dsi->te_gpio); - goto out; + gpiod_put(dsi->te_gpio); + return ret; } -out: - return ret; + return 0; } static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi) { - if (gpio_is_valid(dsi->te_gpio)) { - free_irq(gpio_to_irq(dsi->te_gpio), dsi); - gpio_free(dsi->te_gpio); - dsi->te_gpio = -ENOENT; + if (dsi->te_gpio) { + free_irq(gpiod_to_irq(dsi->te_gpio), dsi); + gpiod_put(dsi->te_gpio); } } @@ -1756,9 +1744,6 @@ static int exynos_dsi_probe(struct platform_device *pdev) if (!dsi) return -ENOMEM; - /* To be checked as invalid one */ - dsi->te_gpio = -ENOENT; - init_completion(&dsi->completed); spin_lock_init(&dsi->transfer_lock); INIT_LIST_HEAD(&dsi->transfer_list); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 5147f5929be7..02c97b9ca926 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -15,6 +15,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_prime.h> #include <drm/drm_probe_helper.h> #include <drm/exynos_drm.h> @@ -39,25 +40,8 @@ static int exynos_drm_fb_mmap(struct fb_info *info, struct drm_fb_helper *helper = info->par; struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper); struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem; - unsigned long vm_size; - int ret; - - vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; - - vm_size = vma->vm_end - vma->vm_start; - - if (vm_size > exynos_gem->size) - return -EINVAL; - ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie, - exynos_gem->dma_addr, exynos_gem->size, - exynos_gem->dma_attrs); - if (ret < 0) { - DRM_DEV_ERROR(to_dma_dev(helper->dev), "failed to mmap.\n"); - return ret; - } - - return 0; + return drm_gem_prime_mmap(&exynos_gem->base, vma); } static const struct fb_ops exynos_drm_fb_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index ecfd82d0afb7..023f54ee61a8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -782,8 +782,8 @@ static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc, sc->hratio = (src_w << 14) / (dst_w << hfactor); sc->vratio = (src_h << 14) / (dst_h << vfactor); - sc->up_h = (dst_w >= src_w) ? true : false; - sc->up_v = (dst_h >= src_h) ? true : false; + sc->up_h = (dst_w >= src_w); + sc->up_v = (dst_h >= src_h); DRM_DEV_DEBUG_KMS(ctx->dev, "hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n", sc->hratio, sc->vratio, sc->up_h, sc->up_v); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 0a0c042a3155..3e493f48e0d4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -20,6 +20,8 @@ MODULE_IMPORT_NS(DMA_BUF); +static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); + static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap) { struct drm_device *dev = exynos_gem->base.dev; @@ -138,6 +140,7 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = { static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = { .free = exynos_drm_gem_free_object, .get_sg_table = exynos_drm_gem_prime_get_sg_table, + .mmap = exynos_drm_gem_mmap, .vm_ops = &exynos_drm_gem_vm_ops, }; @@ -357,12 +360,16 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, return 0; } -static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj, - struct vm_area_struct *vma) +static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); int ret; + if (obj->import_attach) + return dma_buf_mmap(obj->dma_buf, vma, 0); + + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; + DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n", exynos_gem->flags); @@ -388,26 +395,6 @@ err_close_vm: return ret; } -int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_gem_object *obj; - int ret; - - /* set vm_area_struct. */ - ret = drm_gem_mmap(filp, vma); - if (ret < 0) { - DRM_ERROR("failed to mmap.\n"); - return ret; - } - - obj = vma->vm_private_data; - - if (obj->import_attach) - return dma_buf_mmap(obj->dma_buf, vma, 0); - - return exynos_drm_gem_mmap_obj(obj, vma); -} - /* low-level interface prime helpers */ struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) @@ -469,15 +456,3 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, exynos_gem->sgt = sgt; return &exynos_gem->base; } - -int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma) -{ - int ret; - - ret = drm_gem_mmap_obj(obj, obj->size, vma); - if (ret < 0) - return ret; - - return exynos_drm_gem_mmap_obj(obj, vma); -} diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index a23272fb96fb..79d7e1a87419 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -96,9 +96,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); -/* set vm_flags and we can change the vm attribute to other one at here. */ -int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); - /* low-level interface prime helpers */ struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); @@ -107,7 +104,5 @@ struct drm_gem_object * exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); -int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma); #endif diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 72c2e9c5e0b3..b136234120c4 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -162,6 +162,7 @@ i915-y += \ $(gem-y) \ i915_active.o \ i915_cmd_parser.o \ + i915_deps.o \ i915_gem_evict.o \ i915_gem_gtt.o \ i915_gem_ww.o \ @@ -258,6 +259,7 @@ i915-y += \ display/intel_crt.o \ display/intel_ddi.o \ display/intel_ddi_buf_trans.o \ + display/intel_display_trace.o \ display/intel_dp.o \ display/intel_dp_aux.o \ display/intel_dp_aux_backlight.o \ diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index dc41868d01ef..f37677df6ebf 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -9,6 +9,7 @@ #include "intel_audio.h" #include "intel_backlight.h" #include "intel_connector.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp.h" diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c index f5b4dd5b4275..06e00b1eaa7c 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.c +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c @@ -8,6 +8,7 @@ #include "g4x_hdmi.h" #include "intel_audio.h" #include "intel_connector.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dpio_phy.h" diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index 2194f74101ae..85950ff67609 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -13,6 +13,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_fb.h" +#include "intel_fbc.h" #include "intel_sprite.h" #include "i9xx_plane.h" @@ -120,6 +121,15 @@ static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, return i9xx_plane == PLANE_A; } +static struct intel_fbc *i9xx_plane_fbc(struct drm_i915_private *dev_priv, + enum i9xx_plane_id i9xx_plane) +{ + if (i9xx_plane_has_fbc(dev_priv, i9xx_plane)) + return dev_priv->fbc; + else + return NULL; +} + static bool i9xx_plane_has_windowing(struct intel_plane *plane) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -807,10 +817,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) plane->id = PLANE_PRIMARY; plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); - if (i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane)) - plane->fbc = &dev_priv->fbc; - if (plane->fbc) - plane->fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; + intel_fbc_add_plane(i9xx_plane_fbc(dev_priv, plane->i9xx_plane), plane); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { formats = vlv_primary_formats; diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index b4e7ac51aa31..a62550711e98 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -139,6 +139,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn, new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio || new_conn_state->base.content_type != old_conn_state->base.content_type || new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode || + new_conn_state->base.privacy_screen_sw_state != old_conn_state->base.privacy_screen_sw_state || !drm_connector_atomic_hdr_metadata_equal(old_state, new_state)) crtc_state->mode_changed = true; diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index e3a0bfb7be84..c2c512cd8ec0 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -35,15 +35,16 @@ #include <drm/drm_fourcc.h> #include <drm/drm_plane_helper.h> -#include "i915_trace.h" +#include "gt/intel_rps.h" + #include "intel_atomic_plane.h" #include "intel_cdclk.h" +#include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fb_pin.h" #include "intel_pm.h" #include "intel_sprite.h" -#include "gt/intel_rps.h" static void intel_plane_state_reset(struct intel_plane_state *plane_state, struct intel_plane *plane) @@ -395,7 +396,7 @@ int intel_plane_atomic_check(struct intel_atomic_state *state, const struct intel_plane_state *old_plane_state = intel_atomic_get_old_plane_state(state, plane); const struct intel_plane_state *new_master_plane_state; - struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, plane->pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(i915, plane->pipe); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = @@ -818,7 +819,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane, * maximum clocks following a vblank miss (see do_rps_boost()). */ if (!state->rps_interactive) { - intel_rps_mark_interactive(&dev_priv->gt.rps, true); + intel_rps_mark_interactive(&to_gt(dev_priv)->rps, true); state->rps_interactive = true; } @@ -852,7 +853,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane, return; if (state->rps_interactive) { - intel_rps_mark_interactive(&dev_priv->gt.rps, false); + intel_rps_mark_interactive(&to_gt(dev_priv)->rps, false); state->rps_interactive = false; } diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 03c3111ebdf0..3bdca0fe2cee 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -31,6 +31,7 @@ #include "intel_atomic.h" #include "intel_audio.h" #include "intel_cdclk.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_lpe_audio.h" @@ -1019,7 +1020,7 @@ static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv, struct intel_crtc *crtc; int ret; - crtc = intel_get_first_crtc(dev_priv); + crtc = intel_first_crtc(dev_priv); if (!crtc) return; diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 41d37a8c5c8f..1ff1f1c426b2 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1555,12 +1555,24 @@ static const u8 gen9bc_tgp_ddc_pin_map[] = { [DDC_BUS_DDI_D] = GMBUS_PIN_10_TC2_ICP, }; +static const u8 adlp_ddc_pin_map[] = { + [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT, + [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT, + [ADLP_DDC_BUS_PORT_TC1] = GMBUS_PIN_9_TC1_ICP, + [ADLP_DDC_BUS_PORT_TC2] = GMBUS_PIN_10_TC2_ICP, + [ADLP_DDC_BUS_PORT_TC3] = GMBUS_PIN_11_TC3_ICP, + [ADLP_DDC_BUS_PORT_TC4] = GMBUS_PIN_12_TC4_ICP, +}; + static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin) { const u8 *ddc_pin_map; int n_entries; - if (IS_ALDERLAKE_S(i915)) { + if (IS_ALDERLAKE_P(i915)) { + ddc_pin_map = adlp_ddc_pin_map; + n_entries = ARRAY_SIZE(adlp_ddc_pin_map); + } else if (IS_ALDERLAKE_S(i915)) { ddc_pin_map = adls_ddc_pin_map; n_entries = ARRAY_SIZE(adls_ddc_pin_map); } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) { diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 91c19e0a98d7..c30cf8d2b835 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -24,9 +24,11 @@ #include <linux/time.h> #include "intel_atomic.h" +#include "intel_atomic_plane.h" #include "intel_audio.h" #include "intel_bw.h" #include "intel_cdclk.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_pcode.h" @@ -67,7 +69,7 @@ void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv, dev_priv->cdclk_funcs->get_cdclk(dev_priv, cdclk_config); } -int intel_cdclk_bw_calc_min_cdclk(struct intel_atomic_state *state) +static int intel_cdclk_bw_calc_min_cdclk(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); return dev_priv->cdclk_funcs->bw_calc_min_cdclk(state); @@ -1212,6 +1214,19 @@ static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv) skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); } +static bool has_cdclk_squasher(struct drm_i915_private *i915) +{ + return IS_DG2(i915); +} + +struct intel_cdclk_vals { + u32 cdclk; + u16 refclk; + u16 waveform; + u8 divider; /* CD2X divider * 2 */ + u8 ratio; +}; + static const struct intel_cdclk_vals bxt_cdclk_table[] = { { .refclk = 19200, .cdclk = 144000, .divider = 8, .ratio = 60 }, { .refclk = 19200, .cdclk = 288000, .divider = 4, .ratio = 60 }, @@ -1313,12 +1328,19 @@ static const struct intel_cdclk_vals adlp_cdclk_table[] = { }; static const struct intel_cdclk_vals dg2_cdclk_table[] = { - { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 9 }, - { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 }, - { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, - { .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 }, - { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, - { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, + { .refclk = 38400, .cdclk = 163200, .divider = 2, .ratio = 34, .waveform = 0x8888 }, + { .refclk = 38400, .cdclk = 204000, .divider = 2, .ratio = 34, .waveform = 0x9248 }, + { .refclk = 38400, .cdclk = 244800, .divider = 2, .ratio = 34, .waveform = 0xa4a4 }, + { .refclk = 38400, .cdclk = 285600, .divider = 2, .ratio = 34, .waveform = 0xa54a }, + { .refclk = 38400, .cdclk = 326400, .divider = 2, .ratio = 34, .waveform = 0xaaaa }, + { .refclk = 38400, .cdclk = 367200, .divider = 2, .ratio = 34, .waveform = 0xad5a }, + { .refclk = 38400, .cdclk = 408000, .divider = 2, .ratio = 34, .waveform = 0xb6b6 }, + { .refclk = 38400, .cdclk = 448800, .divider = 2, .ratio = 34, .waveform = 0xdbb6 }, + { .refclk = 38400, .cdclk = 489600, .divider = 2, .ratio = 34, .waveform = 0xeeee }, + { .refclk = 38400, .cdclk = 530400, .divider = 2, .ratio = 34, .waveform = 0xf7de }, + { .refclk = 38400, .cdclk = 571200, .divider = 2, .ratio = 34, .waveform = 0xfefe }, + { .refclk = 38400, .cdclk = 612000, .divider = 2, .ratio = 34, .waveform = 0xfffe }, + { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0xffff }, {} }; @@ -1454,6 +1476,7 @@ static void bxt_de_pll_readout(struct drm_i915_private *dev_priv, static void bxt_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { + u32 squash_ctl = 0; u32 divider; int div; @@ -1491,7 +1514,21 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, return; } - cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div); + if (has_cdclk_squasher(dev_priv)) + squash_ctl = intel_de_read(dev_priv, CDCLK_SQUASH_CTL); + + if (squash_ctl & CDCLK_SQUASH_ENABLE) { + u16 waveform; + int size; + + size = REG_FIELD_GET(CDCLK_SQUASH_WINDOW_SIZE_MASK, squash_ctl) + 1; + waveform = REG_FIELD_GET(CDCLK_SQUASH_WAVEFORM_MASK, squash_ctl) >> (16 - size); + + cdclk_config->cdclk = DIV_ROUND_CLOSEST(hweight16(waveform) * + cdclk_config->vco, size * div); + } else { + cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div); + } out: /* @@ -1626,6 +1663,26 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv, } } +static u32 cdclk_squash_waveform(struct drm_i915_private *dev_priv, + int cdclk) +{ + const struct intel_cdclk_vals *table = dev_priv->cdclk.table; + int i; + + if (cdclk == dev_priv->cdclk.hw.bypass) + return 0; + + for (i = 0; table[i].refclk; i++) + if (table[i].refclk == dev_priv->cdclk.hw.ref && + table[i].cdclk == cdclk) + return table[i].waveform; + + drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n", + cdclk, dev_priv->cdclk.hw.ref); + + return 0xffff; +} + static void bxt_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) @@ -1633,6 +1690,8 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk = cdclk_config->cdclk; int vco = cdclk_config->vco; u32 val; + u16 waveform; + int clock; int ret; /* Inform power controller of upcoming frequency change. */ @@ -1676,7 +1735,24 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, bxt_de_pll_enable(dev_priv, vco); } - val = bxt_cdclk_cd2x_div_sel(dev_priv, cdclk, vco) | + waveform = cdclk_squash_waveform(dev_priv, cdclk); + + if (waveform) + clock = vco / 2; + else + clock = cdclk; + + if (has_cdclk_squasher(dev_priv)) { + u32 squash_ctl = 0; + + if (waveform) + squash_ctl = CDCLK_SQUASH_ENABLE | + CDCLK_SQUASH_WINDOW_SIZE(0xf) | waveform; + + intel_de_write(dev_priv, CDCLK_SQUASH_CTL, squash_ctl); + } + + val = bxt_cdclk_cd2x_div_sel(dev_priv, clock, vco) | bxt_cdclk_cd2x_pipe(dev_priv, pipe) | skl_cdclk_decimal(cdclk); @@ -1690,7 +1766,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, intel_de_write(dev_priv, CDCLK_CTL, val); if (pipe != INVALID_PIPE) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); if (DISPLAY_VER(dev_priv) >= 11) { ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, @@ -1728,7 +1804,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) { u32 cdctl, expected; - int cdclk, vco; + int cdclk, clock, vco; intel_update_cdclk(dev_priv); intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); @@ -1764,8 +1840,12 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) expected = skl_cdclk_decimal(cdclk); /* Figure out what CD2X divider we should be using for this cdclk */ - expected |= bxt_cdclk_cd2x_div_sel(dev_priv, - dev_priv->cdclk.hw.cdclk, + if (has_cdclk_squasher(dev_priv)) + clock = dev_priv->cdclk.hw.vco / 2; + else + clock = dev_priv->cdclk.hw.cdclk; + + expected |= bxt_cdclk_cd2x_div_sel(dev_priv, clock, dev_priv->cdclk.hw.vco); /* @@ -1881,6 +1961,25 @@ static bool intel_cdclk_can_crawl(struct drm_i915_private *dev_priv, a->ref == b->ref; } +static bool intel_cdclk_can_squash(struct drm_i915_private *dev_priv, + const struct intel_cdclk_config *a, + const struct intel_cdclk_config *b) +{ + /* + * FIXME should store a bit more state in intel_cdclk_config + * to differentiate squasher vs. cd2x divider properly. For + * the moment all platforms with squasher use a fixed cd2x + * divider. + */ + if (!has_cdclk_squasher(dev_priv)) + return false; + + return a->cdclk != b->cdclk && + a->vco != 0 && + a->vco == b->vco && + a->ref == b->ref; +} + /** * intel_cdclk_needs_modeset - Determine if changong between the CDCLK * configurations requires a modeset on all pipes @@ -1918,7 +2017,17 @@ static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv, if (DISPLAY_VER(dev_priv) < 10 && !IS_BROXTON(dev_priv)) return false; + /* + * FIXME should store a bit more state in intel_cdclk_config + * to differentiate squasher vs. cd2x divider properly. For + * the moment all platforms with squasher use a fixed cd2x + * divider. + */ + if (has_cdclk_squasher(dev_priv)) + return false; + return a->cdclk != b->cdclk && + a->vco != 0 && a->vco == b->vco && a->ref == b->ref; } @@ -2529,6 +2638,58 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state) return to_intel_cdclk_state(cdclk_state); } +int intel_cdclk_atomic_check(struct intel_atomic_state *state, + bool *need_cdclk_calc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_cdclk_state *old_cdclk_state; + const struct intel_cdclk_state *new_cdclk_state; + struct intel_plane_state *plane_state; + struct intel_bw_state *new_bw_state; + struct intel_plane *plane; + int min_cdclk = 0; + enum pipe pipe; + int ret; + int i; + + /* + * active_planes bitmask has been updated, and potentially affected + * planes are part of the state. We can now compute the minimum cdclk + * for each plane. + */ + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc); + if (ret) + return ret; + } + + old_cdclk_state = intel_atomic_get_old_cdclk_state(state); + new_cdclk_state = intel_atomic_get_new_cdclk_state(state); + + if (new_cdclk_state && + old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk) + *need_cdclk_calc = true; + + ret = intel_cdclk_bw_calc_min_cdclk(state); + if (ret) + return ret; + + new_bw_state = intel_atomic_get_new_bw_state(state); + + if (!new_cdclk_state || !new_bw_state) + return 0; + + for_each_pipe(i915, pipe) { + min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk); + + /* Currently do this change only if we need to increase */ + if (new_bw_state->min_cdclk > min_cdclk) + *need_cdclk_calc = true; + } + + return 0; +} + int intel_cdclk_init(struct drm_i915_private *dev_priv) { struct intel_cdclk_state *cdclk_state; @@ -2592,7 +2753,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) struct intel_crtc_state *crtc_state; pipe = ilog2(new_cdclk_state->active_pipes); - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(dev_priv, pipe); crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) @@ -2602,9 +2763,14 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) pipe = INVALID_PIPE; } - if (intel_cdclk_can_crawl(dev_priv, - &old_cdclk_state->actual, - &new_cdclk_state->actual)) { + if (intel_cdclk_can_squash(dev_priv, + &old_cdclk_state->actual, + &new_cdclk_state->actual)) { + drm_dbg_kms(&dev_priv->drm, + "Can change cdclk via squasher\n"); + } else if (intel_cdclk_can_crawl(dev_priv, + &old_cdclk_state->actual, + &new_cdclk_state->actual)) { drm_dbg_kms(&dev_priv->drm, "Can change cdclk via crawl\n"); } else if (pipe != INVALID_PIPE) { diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index 309b3f394e24..fc638522e445 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -16,13 +16,6 @@ struct drm_i915_private; struct intel_atomic_state; struct intel_crtc_state; -struct intel_cdclk_vals { - u32 cdclk; - u16 refclk; - u8 divider; /* CD2X divider * 2 */ - u8 ratio; -}; - struct intel_cdclk_state { struct intel_global_state base; @@ -70,7 +63,8 @@ void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config, int intel_modeset_calc_cdclk(struct intel_atomic_state *state); void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config); -int intel_cdclk_bw_calc_min_cdclk(struct intel_atomic_state *state); +int intel_cdclk_atomic_check(struct intel_atomic_state *state, + bool *need_cdclk_calc); struct intel_cdclk_state * intel_atomic_get_cdclk_state(struct intel_atomic_state *state); diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 840f13b75492..de3ded1e327a 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -808,6 +808,14 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state) } } +static int glk_degamma_lut_size(struct drm_i915_private *i915) +{ + if (DISPLAY_VER(i915) >= 13) + return 131; + else + return 35; +} + static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -827,8 +835,8 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) for (i = 0; i < lut_size; i++) { /* - * First 33 entries represent range from 0 to 1.0 - * 34th and 35th entry will represent extended range + * First lut_size entries represent range from 0 to 1.0 + * 3 additional lut entries will represent extended range * inputs 3.0 and 7.0 respectively, currently clamped * at 1.0. Since the precision is 16bit, the user * value can be directly filled to register. @@ -844,7 +852,7 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) } /* Clamp values > 1.0. */ - while (i++ < 35) + while (i++ < glk_degamma_lut_size(dev_priv)) intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); @@ -1574,6 +1582,8 @@ static int glk_color_check(struct intel_crtc_state *crtc_state) static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); u32 gamma_mode = 0; if (crtc_state->hw.degamma_lut) @@ -1586,6 +1596,13 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state) if (!crtc_state->hw.gamma_lut || crtc_state_is_legacy_gamma(crtc_state)) gamma_mode |= GAMMA_MODE_MODE_8BIT; + /* + * Enable 10bit gamma for D13 + * ToDo: Extend to Logarithmic Gamma once the new UAPI + * is acccepted and implemented by a userspace consumer + */ + else if (DISPLAY_VER(i915) >= 13) + gamma_mode |= GAMMA_MODE_MODE_10BIT; else gamma_mode |= GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED; diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index f0f28572dfdc..6a3893c8ff22 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -321,8 +321,8 @@ static void hsw_enable_crt(struct intel_atomic_state *state, intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON); - intel_wait_for_vblank(dev_priv, pipe); - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); + intel_crtc_wait_for_next_vblank(crtc); intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } @@ -721,7 +721,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) intel_uncore_posting_read(uncore, pipeconf_reg); /* Wait for next Vblank to substitue * border color for Color info */ - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); st00 = intel_uncore_read8(uncore, _VGA_MSR_WRITE); status = ((st00 & (1 << 4)) != 0) ? connector_status_connected : diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c index 243d5cc29734..16c3ca66d9f0 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c @@ -12,8 +12,8 @@ #include <drm/drm_plane_helper.h> #include <drm/drm_vblank_work.h> -#include "i915_trace.h" #include "i915_vgpu.h" +#include "i9xx_plane.h" #include "icl_dsi.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" @@ -21,13 +21,13 @@ #include "intel_crtc.h" #include "intel_cursor.h" #include "intel_display_debugfs.h" +#include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_dsi.h" #include "intel_pipe_crc.h" #include "intel_psr.h" #include "intel_sprite.h" #include "intel_vrr.h" -#include "i9xx_plane.h" #include "skl_universal_plane.h" static void assert_vblank_disabled(struct drm_crtc *crtc) @@ -36,6 +36,38 @@ static void assert_vblank_disabled(struct drm_crtc *crtc) drm_crtc_vblank_put(crtc); } +struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915) +{ + return to_intel_crtc(drm_crtc_from_index(&i915->drm, 0)); +} + +struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915, + enum pipe pipe) +{ + struct intel_crtc *crtc; + + for_each_intel_crtc(&i915->drm, crtc) { + if (crtc->pipe == pipe) + return crtc; + } + + return NULL; +} + +void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc) +{ + drm_crtc_wait_one_vblank(&crtc->base); +} + +void intel_wait_for_vblank_if_active(struct drm_i915_private *i915, + enum pipe pipe) +{ + struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); + + if (crtc->active) + intel_crtc_wait_for_next_vblank(crtc); +} + u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; @@ -327,18 +359,6 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) if (ret) goto fail; - BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) || - dev_priv->pipe_to_crtc_mapping[pipe] != NULL); - dev_priv->pipe_to_crtc_mapping[pipe] = crtc; - - if (DISPLAY_VER(dev_priv) < 9) { - enum i9xx_plane_id i9xx_plane = primary->i9xx_plane; - - BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || - dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL); - dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc; - } - if (DISPLAY_VER(dev_priv) >= 11) drm_crtc_create_scaling_filter_property(&crtc->base, BIT(DRM_SCALING_FILTER_DEFAULT) | diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h index a0039fdb1eb0..73077137fb99 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.h +++ b/drivers/gpu/drm/i915/display/intel_crtc.h @@ -8,6 +8,7 @@ #include <linux/types.h> +enum i9xx_plane_id; enum pipe; struct drm_display_mode; struct drm_i915_private; @@ -28,5 +29,11 @@ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state); void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state); void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); void intel_wait_for_vblank_workers(struct intel_atomic_state *state); +struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915); +struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915, + enum pipe pipe); +void intel_wait_for_vblank_if_active(struct drm_i915_private *i915, + enum pipe pipe); +void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc); #endif diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index f9e7e3d1c7d0..cab505277595 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -25,6 +25,7 @@ * */ +#include <drm/drm_privacy_screen_consumer.h> #include <drm/drm_scdc_helper.h> #include "i915_drv.h" @@ -1297,6 +1298,28 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder, intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), DKL_TX_DP20BITMODE, 0); + + if (IS_ALDERLAKE_P(dev_priv)) { + u32 val; + + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { + if (ln == 0) { + val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(0); + val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(2); + } else { + val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(3); + val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(3); + } + } else { + val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(0); + val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(0); + } + + intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), + DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK | + DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, + val); + } } } @@ -2905,6 +2928,7 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state, if (port == PORT_A && DISPLAY_VER(dev_priv) < 9) intel_dp_stop_link_train(intel_dp, crtc_state); + drm_connector_update_privacy_screen(conn_state); intel_edp_backlight_on(crtc_state, conn_state); if (!dig_port->lspcon.active || dig_port->dp.has_hdmi_sink) @@ -3111,6 +3135,7 @@ static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state, intel_drrs_update(intel_dp, crtc_state); intel_backlight_update(state, encoder, crtc_state, conn_state); + drm_connector_update_privacy_screen(conn_state); } void intel_ddi_update_pipe(struct intel_atomic_state *state, @@ -3922,6 +3947,19 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port) return NULL; } + if (dig_port->base.type == INTEL_OUTPUT_EDP) { + struct drm_device *dev = dig_port->base.base.dev; + struct drm_privacy_screen *privacy_screen; + + privacy_screen = drm_privacy_screen_get(dev->dev, NULL); + if (!IS_ERR(privacy_screen)) { + drm_connector_attach_privacy_screen_provider(&connector->base, + privacy_screen); + } else if (PTR_ERR(privacy_screen) != -ENODEV) { + drm_warn(dev, "Error getting privacy-screen\n"); + } + } + return connector; } diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c index 78cd8f77b49d..e2dfb93a82bd 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c +++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c @@ -477,14 +477,14 @@ static const struct intel_ddi_buf_trans icl_combo_phy_trans_hdmi = { static const union intel_ddi_buf_trans_entry _ehl_combo_phy_trans_dp[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x33, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ - { .icl = { 0xA, 0x47, 0x36, 0x00, 0x09 } }, /* 350 500 3.1 */ - { .icl = { 0xC, 0x64, 0x34, 0x00, 0x0B } }, /* 350 700 6.0 */ - { .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 350 900 8.2 */ + { .icl = { 0xA, 0x47, 0x38, 0x00, 0x07 } }, /* 350 500 3.1 */ + { .icl = { 0xC, 0x64, 0x33, 0x00, 0x0C } }, /* 350 700 6.0 */ + { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 350 900 8.2 */ { .icl = { 0xA, 0x46, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ - { .icl = { 0xC, 0x64, 0x38, 0x00, 0x07 } }, /* 500 700 2.9 */ + { .icl = { 0xC, 0x64, 0x37, 0x00, 0x08 } }, /* 500 700 2.9 */ { .icl = { 0x6, 0x7F, 0x32, 0x00, 0x0D } }, /* 500 900 5.1 */ { .icl = { 0xC, 0x61, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */ - { .icl = { 0x6, 0x7F, 0x38, 0x00, 0x07 } }, /* 600 900 3.5 */ + { .icl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } }, /* 600 900 3.5 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; @@ -1032,6 +1032,21 @@ bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table) return table == &tgl_combo_phy_trans_edp_hbr2_hobl; } +static bool use_edp_hobl(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + return i915->vbt.edp.hobl && !intel_dp->hobl_failed; +} + +static bool use_edp_low_vswing(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + return i915->vbt.edp.low_vswing; +} + static const struct intel_ddi_buf_trans * intel_get_buf_trans(const struct intel_ddi_buf_trans *trans, int *num_entries) { @@ -1057,14 +1072,12 @@ bdw_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) return intel_get_buf_trans(&bdw_trans_fdi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&bdw_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return intel_get_buf_trans(&bdw_trans_edp, n_entries); else return intel_get_buf_trans(&bdw_trans_dp, n_entries); @@ -1094,12 +1107,10 @@ skl_y_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&skl_y_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return _skl_get_buf_trans_dp(encoder, &skl_y_trans_edp, n_entries); else return _skl_get_buf_trans_dp(encoder, &skl_y_trans_dp, n_entries); @@ -1110,12 +1121,10 @@ skl_u_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&skl_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return _skl_get_buf_trans_dp(encoder, &skl_u_trans_edp, n_entries); else return _skl_get_buf_trans_dp(encoder, &skl_u_trans_dp, n_entries); @@ -1126,12 +1135,10 @@ skl_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&skl_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return _skl_get_buf_trans_dp(encoder, &skl_trans_edp, n_entries); else return _skl_get_buf_trans_dp(encoder, &skl_trans_dp, n_entries); @@ -1142,12 +1149,10 @@ kbl_y_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&skl_y_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return _skl_get_buf_trans_dp(encoder, &skl_y_trans_edp, n_entries); else return _skl_get_buf_trans_dp(encoder, &kbl_y_trans_dp, n_entries); @@ -1158,12 +1163,10 @@ kbl_u_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&skl_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return _skl_get_buf_trans_dp(encoder, &skl_u_trans_edp, n_entries); else return _skl_get_buf_trans_dp(encoder, &kbl_u_trans_dp, n_entries); @@ -1174,12 +1177,10 @@ kbl_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&skl_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return _skl_get_buf_trans_dp(encoder, &skl_trans_edp, n_entries); else return _skl_get_buf_trans_dp(encoder, &kbl_trans_dp, n_entries); @@ -1190,12 +1191,10 @@ bxt_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&bxt_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return intel_get_buf_trans(&bxt_trans_edp, n_entries); else return intel_get_buf_trans(&bxt_trans_dp, n_entries); @@ -1215,12 +1214,10 @@ icl_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (crtc_state->port_clock > 540000) { return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); - } else if (dev_priv->vbt.edp.low_vswing) { + } else if (use_edp_low_vswing(encoder)) { return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries); } @@ -1282,12 +1279,10 @@ ehl_get_combo_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - dev_priv->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return ehl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else return intel_get_buf_trans(&ehl_combo_phy_trans_dp, n_entries); @@ -1309,12 +1304,10 @@ jsl_get_combo_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - dev_priv->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return jsl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); @@ -1346,16 +1339,13 @@ tgl_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (crtc_state->port_clock > 540000) { return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); - } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) { + } else if (use_edp_hobl(encoder)) { return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); - } else if (dev_priv->vbt.edp.low_vswing) { + } else if (use_edp_low_vswing(encoder)) { return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries); } @@ -1394,16 +1384,13 @@ dg1_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (crtc_state->port_clock > 540000) return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); - else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) + else if (use_edp_hobl(encoder)) return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); - else if (dev_priv->vbt.edp.low_vswing) + else if (use_edp_low_vswing(encoder)) return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries); else @@ -1439,16 +1426,13 @@ rkl_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (crtc_state->port_clock > 540000) { return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); - } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) { + } else if (use_edp_hobl(encoder)) { return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); - } else if (dev_priv->vbt.edp.low_vswing) { + } else if (use_edp_low_vswing(encoder)) { return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries); } @@ -1485,14 +1469,11 @@ adls_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (crtc_state->port_clock > 540000) return intel_get_buf_trans(&adls_combo_phy_trans_edp_hbr3, n_entries); - else if (i915->vbt.edp.hobl && !intel_dp->hobl_failed) + else if (use_edp_hobl(encoder)) return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); - else if (i915->vbt.edp.low_vswing) + else if (use_edp_low_vswing(encoder)) return intel_get_buf_trans(&adls_combo_phy_trans_edp_hbr2, n_entries); else return adls_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); @@ -1527,16 +1508,13 @@ adlp_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (crtc_state->port_clock > 540000) { return intel_get_buf_trans(&adlp_combo_phy_trans_edp_hbr3, n_entries); - } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) { + } else if (use_edp_hobl(encoder)) { return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); - } else if (dev_priv->vbt.edp.low_vswing) { + } else if (use_edp_low_vswing(encoder)) { return intel_get_buf_trans(&adlp_combo_phy_trans_edp_up_to_hbr2, n_entries); } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index df4c1a17ba5f..7d558217ca16 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -32,6 +32,7 @@ #include <linux/module.h> #include <linux/dma-resv.h> #include <linux/slab.h> +#include <linux/vga_switcheroo.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> @@ -41,6 +42,7 @@ #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane_helper.h> +#include <drm/drm_privacy_screen_consumer.h> #include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> @@ -697,7 +699,7 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, * the highest stride limits of them all, * if in case pipe A is disabled, use the first pipe from pipe_mask. */ - crtc = intel_get_first_crtc(dev_priv); + crtc = intel_first_crtc(dev_priv); if (!crtc) return 0; @@ -775,7 +777,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc, */ if (HAS_GMCH(dev_priv) && intel_set_memory_cxsr(dev_priv, false)) - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); /* * Gen2 reports pipe underruns whenever all planes are disabled. @@ -785,7 +787,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc, intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); intel_plane_disable_arm(plane, crtc_state); - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); } unsigned int @@ -841,7 +843,7 @@ __intel_display_resume(struct drm_device *dev, static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) { return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display && - intel_has_gpu_reset(&dev_priv->gt)); + intel_has_gpu_reset(to_gt(dev_priv))); } void intel_display_prepare_reset(struct drm_i915_private *dev_priv) @@ -860,14 +862,14 @@ void intel_display_prepare_reset(struct drm_i915_private *dev_priv) return; /* We have a modeset vs reset deadlock, defensively unbreak it. */ - set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags); + set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags); smp_mb__after_atomic(); - wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET); + wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET); if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { drm_dbg_kms(&dev_priv->drm, "Modeset potentially stuck, unbreaking through wedging\n"); - intel_gt_set_wedged(&dev_priv->gt); + intel_gt_set_wedged(to_gt(dev_priv)); } /* @@ -918,7 +920,7 @@ void intel_display_finish_reset(struct drm_i915_private *dev_priv) return; /* reset doesn't touch the display */ - if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) + if (!test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags)) return; state = fetch_and_zero(&dev_priv->modeset_restore_state); @@ -956,7 +958,7 @@ unlock: drm_modeset_acquire_fini(ctx); mutex_unlock(&dev->mode_config.mutex); - clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags); + clear_bit_unlock(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags); } static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) @@ -991,6 +993,10 @@ static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) else if (DISPLAY_VER(dev_priv) >= 13) tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; + /* Wa_14010547955:dg2 */ + if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER)) + tmp |= DG2_RENDER_CCSTAG_4_3_EN; + intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); } @@ -1011,7 +1017,7 @@ bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) if (cleanup_done) continue; - drm_crtc_wait_one_vblank(crtc); + intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc)); return true; } @@ -1158,7 +1164,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state) } /* We need to wait for a vblank before we can disable the plane. */ - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); } static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) @@ -1389,7 +1395,6 @@ static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); const struct intel_crtc_state *new_crtc_state = @@ -1415,7 +1420,7 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, } if (need_vbl_wait) - intel_wait_for_vblank(i915, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); } static void intel_pre_plane_update(struct intel_atomic_state *state, @@ -1434,7 +1439,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, hsw_disable_ips(old_crtc_state); if (intel_fbc_pre_update(state, crtc)) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); if (!needs_async_flip_vtd_wa(old_crtc_state) && needs_async_flip_vtd_wa(new_crtc_state)) @@ -1466,7 +1471,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, */ if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); /* * IVB workaround: must disable low power watermarks for at least @@ -1477,7 +1482,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, */ if (old_crtc_state->hw.active && new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); /* * If we're doing a modeset we don't need to do any @@ -1893,8 +1898,8 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, * in case there are more corner cases we don't know about. */ if (new_crtc_state->has_pch_encoder) { - intel_wait_for_vblank(dev_priv, pipe); - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); + intel_crtc_wait_for_next_vblank(crtc); } intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); @@ -2094,7 +2099,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, intel_encoders_enable(state, crtc); if (psl_clkgate_wa) { - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); } @@ -2102,8 +2107,12 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, * to change the workaround. */ hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe; if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { - intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); - intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); + struct intel_crtc *wa_crtc; + + wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe); + + intel_crtc_wait_for_next_vblank(wa_crtc); + intel_crtc_wait_for_next_vblank(wa_crtc); } } @@ -2529,7 +2538,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state, /* prevents spurious underruns */ if (DISPLAY_VER(dev_priv) == 2) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); } static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) @@ -2560,7 +2569,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state, * wait for planes to fully turn off before disabling the pipe. */ if (DISPLAY_VER(dev_priv) == 2) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); intel_encoders_disable(state, crtc); @@ -4645,7 +4654,8 @@ found: drm_atomic_state_put(state); /* let the connector get through one full cycle before testing */ - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); + return true; fail: @@ -4830,7 +4840,7 @@ intel_encoder_current_mode(struct intel_encoder *encoder) if (!encoder->get_hw_state(encoder, &pipe)) return NULL; - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(dev_priv, pipe); mode = kzalloc(sizeof(*mode), GFP_KERNEL); if (!mode) @@ -5159,13 +5169,13 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) if (icl_is_hdr_plane(dev_priv, plane->id)) { if (linked->id == PLANE_SPRITE5) - plane_state->cus_ctl |= PLANE_CUS_PLANE_7; + plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL; else if (linked->id == PLANE_SPRITE4) - plane_state->cus_ctl |= PLANE_CUS_PLANE_6; + plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL; else if (linked->id == PLANE_SPRITE3) - plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL; + plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL; else if (linked->id == PLANE_SPRITE2) - plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL; + plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL; else MISSING_CASE(linked->id); } @@ -7546,59 +7556,6 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state) return 0; } -static int intel_atomic_check_cdclk(struct intel_atomic_state *state, - bool *need_cdclk_calc) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_cdclk_state *old_cdclk_state; - const struct intel_cdclk_state *new_cdclk_state; - struct intel_plane_state *plane_state; - struct intel_bw_state *new_bw_state; - struct intel_plane *plane; - int min_cdclk = 0; - enum pipe pipe; - int ret; - int i; - /* - * active_planes bitmask has been updated, and potentially - * affected planes are part of the state. We can now - * compute the minimum cdclk for each plane. - */ - for_each_new_intel_plane_in_state(state, plane, plane_state, i) { - ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc); - if (ret) - return ret; - } - - old_cdclk_state = intel_atomic_get_old_cdclk_state(state); - new_cdclk_state = intel_atomic_get_new_cdclk_state(state); - - if (new_cdclk_state && - old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk) - *need_cdclk_calc = true; - - ret = intel_cdclk_bw_calc_min_cdclk(state); - if (ret) - return ret; - - new_bw_state = intel_atomic_get_new_bw_state(state); - - if (!new_cdclk_state || !new_bw_state) - return 0; - - for_each_pipe(dev_priv, pipe) { - min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk); - - /* - * Currently do this change only if we need to increase - */ - if (new_bw_state->min_cdclk > min_cdclk) - *need_cdclk_calc = true; - } - - return 0; -} - static int intel_atomic_check_crtcs(struct intel_atomic_state *state) { struct intel_crtc_state *crtc_state; @@ -8039,7 +7996,6 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) goto fail; - intel_fbc_choose_crtc(dev_priv, state); ret = intel_compute_global_watermarks(state); if (ret) goto fail; @@ -8048,7 +8004,7 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) goto fail; - ret = intel_atomic_check_cdclk(state, &any_ms); + ret = intel_cdclk_atomic_check(state, &any_ms); if (ret) goto fail; @@ -8071,6 +8027,10 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) goto fail; + ret = intel_fbc_atomic_check(state); + if (ret) + goto fail; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (new_crtc_state->uapi.async_flip) { @@ -8462,7 +8422,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, &old_crtc_state->wm.skl.ddb) && (update_pipes | modeset_pipes)) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); } } @@ -8553,19 +8513,19 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat for (;;) { prepare_to_wait(&intel_state->commit_ready.wait, &wait_fence, TASK_UNINTERRUPTIBLE); - prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags, + prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET), &wait_reset, TASK_UNINTERRUPTIBLE); if (i915_sw_fence_done(&intel_state->commit_ready) || - test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) + test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags)) break; schedule(); } finish_wait(&intel_state->commit_ready.wait, &wait_fence); - finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags, + finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET), &wait_reset); } @@ -8962,8 +8922,8 @@ static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv) struct intel_plane *plane; for_each_intel_plane(&dev_priv->drm, plane) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, - plane->pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, + plane->pipe); plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); } @@ -9956,7 +9916,7 @@ int intel_modeset_init(struct drm_i915_private *i915) void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); /* 640x480@60Hz, ~25175 kHz */ struct dpll clock = { .m1 = 18, @@ -10029,7 +9989,7 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n", pipe_name(pipe)); @@ -10081,7 +10041,7 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", plane->base.base.id, plane->base.name); - plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + plane_crtc = intel_crtc_for_pipe(dev_priv, pipe); intel_plane_disable_noatomic(plane_crtc, plane); } } @@ -10334,7 +10294,7 @@ static void readout_plane_state(struct drm_i915_private *dev_priv) visible = plane->get_hw_state(plane, &pipe); - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(dev_priv, pipe); crtc_state = to_intel_crtc_state(crtc->base.state); intel_set_plane_visible(crtc_state, plane_state, visible); @@ -10401,7 +10361,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) pipe = 0; if (encoder->get_hw_state(encoder, &pipe)) { - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(dev_priv, pipe); crtc_state = to_intel_crtc_state(crtc->base.state); encoder->base.crtc = &crtc->base; @@ -10854,6 +10814,27 @@ void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915) intel_bios_driver_remove(i915); } +bool intel_modeset_probe_defer(struct pci_dev *pdev) +{ + struct drm_privacy_screen *privacy_screen; + + /* + * apple-gmux is needed on dual GPU MacBook Pro + * to probe the panel if we're the inactive GPU. + */ + if (vga_switcheroo_client_probe_defer(pdev)) + return true; + + /* If the LCD panel has a privacy-screen, wait for it */ + privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL); + if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER) + return true; + + drm_privacy_screen_put(privacy_screen); + + return false; +} + void intel_display_driver_register(struct drm_i915_private *i915) { if (!HAS_DISPLAY(i915)) diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 38c15ec30ee7..b61b75248ded 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -57,6 +57,7 @@ struct intel_plane; struct intel_plane_state; struct intel_remapped_info; struct intel_rotation_info; +struct pci_dev; enum i915_gpio { GPIOA, @@ -346,9 +347,33 @@ enum phy_fia { FIA3, }; +enum hpd_pin { + HPD_NONE = 0, + HPD_TV = HPD_NONE, /* TV is known to be unreliable */ + HPD_CRT, + HPD_SDVO_B, + HPD_SDVO_C, + HPD_PORT_A, + HPD_PORT_B, + HPD_PORT_C, + HPD_PORT_D, + HPD_PORT_E, + HPD_PORT_TC1, + HPD_PORT_TC2, + HPD_PORT_TC3, + HPD_PORT_TC4, + HPD_PORT_TC5, + HPD_PORT_TC6, + + HPD_NUM_PINS +}; + +#define for_each_hpd_pin(__pin) \ + for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) + #define for_each_pipe(__dev_priv, __p) \ for ((__p) = 0; (__p) < I915_MAX_PIPES; (__p)++) \ - for_each_if(INTEL_INFO(__dev_priv)->pipe_mask & BIT(__p)) + for_each_if(INTEL_INFO(__dev_priv)->display.pipe_mask & BIT(__p)) #define for_each_pipe_masked(__dev_priv, __p, __mask) \ for_each_pipe(__dev_priv, __p) \ @@ -356,7 +381,7 @@ enum phy_fia { #define for_each_cpu_transcoder(__dev_priv, __t) \ for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++) \ - for_each_if (INTEL_INFO(__dev_priv)->cpu_transcoder_mask & BIT(__t)) + for_each_if (INTEL_INFO(__dev_priv)->display.cpu_transcoder_mask & BIT(__t)) #define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \ for_each_cpu_transcoder(__dev_priv, __t) \ @@ -616,6 +641,7 @@ void intel_display_driver_register(struct drm_i915_private *i915); void intel_display_driver_unregister(struct drm_i915_private *i915); /* modesetting */ +bool intel_modeset_probe_defer(struct pci_dev *pdev); void intel_modeset_init_hw(struct drm_i915_private *i915); int intel_modeset_init_noirq(struct drm_i915_private *i915); int intel_modeset_init_nogem(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index acf70ae66a29..572445299b04 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -40,52 +40,6 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) return 0; } -static int i915_fbc_status(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct intel_fbc *fbc = &dev_priv->fbc; - intel_wakeref_t wakeref; - - if (!HAS_FBC(dev_priv)) - return -ENODEV; - - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - mutex_lock(&fbc->lock); - - if (intel_fbc_is_active(fbc)) { - seq_puts(m, "FBC enabled\n"); - seq_printf(m, "Compressing: %s\n", - yesno(intel_fbc_is_compressing(fbc))); - } else { - seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); - } - - mutex_unlock(&fbc->lock); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - - return 0; -} - -static int i915_fbc_false_color_get(void *data, u64 *val) -{ - struct drm_i915_private *dev_priv = data; - - *val = dev_priv->fbc.false_color; - - return 0; -} - -static int i915_fbc_false_color_set(void *data, u64 val) -{ - struct drm_i915_private *dev_priv = data; - - return intel_fbc_set_false_color(&dev_priv->fbc, val); -} - -DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops, - i915_fbc_false_color_get, i915_fbc_false_color_set, - "%llu\n"); - static int i915_ips_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -2044,9 +1998,7 @@ i915_fifo_underrun_reset_write(struct file *filp, return ret; } - ret = intel_fbc_reset_underrun(&dev_priv->fbc); - if (ret) - return ret; + intel_fbc_reset_underrun(dev_priv); return cnt; } @@ -2060,7 +2012,6 @@ static const struct file_operations i915_fifo_underrun_reset_ops = { static const struct drm_info_list intel_display_debugfs_list[] = { {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, - {"i915_fbc_status", i915_fbc_status, 0}, {"i915_ips_status", i915_ips_status, 0}, {"i915_sr_status", i915_sr_status, 0}, {"i915_opregion", i915_opregion, 0}, @@ -2085,7 +2036,6 @@ static const struct { {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, - {"i915_fbc_false_color", &i915_fbc_false_color_fops}, {"i915_dp_test_data", &i915_displayport_test_data_fops}, {"i915_dp_test_type", &i915_displayport_test_type_fops}, {"i915_dp_test_active", &i915_displayport_test_active_fops}, @@ -2112,6 +2062,8 @@ void intel_display_debugfs_register(struct drm_i915_private *i915) drm_debugfs_create_files(intel_display_debugfs_list, ARRAY_SIZE(intel_display_debugfs_list), minor->debugfs_root, minor); + + intel_fbc_debugfs_register(i915); } static int i915_panel_show(struct seq_file *m, void *data) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 229b4c127c6c..05babdcf5f2e 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -5370,7 +5370,7 @@ static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv) static void icl_mbus_init(struct drm_i915_private *dev_priv) { - unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask; + unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask; u32 mask, val, i; if (IS_ALDERLAKE_P(dev_priv)) @@ -5830,7 +5830,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) enum intel_dram_type type = dev_priv->dram_info.type; u8 num_channels = dev_priv->dram_info.num_channels; const struct buddy_page_mask *table; - unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask; + unsigned long abox_mask = INTEL_INFO(dev_priv)->display.abox_mask; int config, i; /* BW_BUDDY registers are not used on dgpu's beyond DG1 */ diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.c b/drivers/gpu/drm/i915/display/intel_display_trace.c new file mode 100644 index 000000000000..737979ada869 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_trace.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "intel_display_trace.h" +#endif diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h new file mode 100644 index 000000000000..4043e1276383 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_trace.h @@ -0,0 +1,587 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright © 2021 Intel Corporation + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM i915 + +#if !defined(__INTEL_DISPLAY_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ) +#define __INTEL_DISPLAY_TRACE_H__ + +#include <linux/types.h> +#include <linux/tracepoint.h> + +#include "i915_drv.h" +#include "intel_crtc.h" +#include "intel_display_types.h" + +TRACE_EVENT(intel_pipe_enable, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __array(u32, frame, 3) + __array(u32, scanline, 3) + __field(enum pipe, pipe) + ), + TP_fast_assign( + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc *it__; + for_each_intel_crtc(&dev_priv->drm, it__) { + __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); + __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); + } + __entry->pipe = crtc->pipe; + ), + + TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", + pipe_name(__entry->pipe), + __entry->frame[PIPE_A], __entry->scanline[PIPE_A], + __entry->frame[PIPE_B], __entry->scanline[PIPE_B], + __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) +); + +TRACE_EVENT(intel_pipe_disable, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __array(u32, frame, 3) + __array(u32, scanline, 3) + __field(enum pipe, pipe) + ), + + TP_fast_assign( + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc *it__; + for_each_intel_crtc(&dev_priv->drm, it__) { + __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); + __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); + } + __entry->pipe = crtc->pipe; + ), + + TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", + pipe_name(__entry->pipe), + __entry->frame[PIPE_A], __entry->scanline[PIPE_A], + __entry->frame[PIPE_B], __entry->scanline[PIPE_B], + __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) +); + +TRACE_EVENT(intel_pipe_crc, + TP_PROTO(struct intel_crtc *crtc, const u32 *crcs), + TP_ARGS(crtc, crcs), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __array(u32, crcs, 5) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + memcpy(__entry->crcs, crcs, sizeof(__entry->crcs)); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline, + __entry->crcs[0], __entry->crcs[1], __entry->crcs[2], + __entry->crcs[3], __entry->crcs[4]) +); + +TRACE_EVENT(intel_cpu_fifo_underrun, + TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), + TP_ARGS(dev_priv, pipe), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + __entry->pipe = pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), + __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_pch_fifo_underrun, + TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pch_transcoder), + TP_ARGS(dev_priv, pch_transcoder), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + enum pipe pipe = pch_transcoder; + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + __entry->pipe = pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pch transcoder %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), + __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_memory_cxsr, + TP_PROTO(struct drm_i915_private *dev_priv, bool old, bool new), + TP_ARGS(dev_priv, old, new), + + TP_STRUCT__entry( + __array(u32, frame, 3) + __array(u32, scanline, 3) + __field(bool, old) + __field(bool, new) + ), + + TP_fast_assign( + struct intel_crtc *crtc; + for_each_intel_crtc(&dev_priv->drm, crtc) { + __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc); + __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc); + } + __entry->old = old; + __entry->new = new; + ), + + TP_printk("%s->%s, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", + onoff(__entry->old), onoff(__entry->new), + __entry->frame[PIPE_A], __entry->scanline[PIPE_A], + __entry->frame[PIPE_B], __entry->scanline[PIPE_B], + __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) +); + +TRACE_EVENT(g4x_wm, + TP_PROTO(struct intel_crtc *crtc, const struct g4x_wm_values *wm), + TP_ARGS(crtc, wm), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __field(u16, primary) + __field(u16, sprite) + __field(u16, cursor) + __field(u16, sr_plane) + __field(u16, sr_cursor) + __field(u16, sr_fbc) + __field(u16, hpll_plane) + __field(u16, hpll_cursor) + __field(u16, hpll_fbc) + __field(bool, cxsr) + __field(bool, hpll) + __field(bool, fbc) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; + __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; + __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR]; + __entry->sr_plane = wm->sr.plane; + __entry->sr_cursor = wm->sr.cursor; + __entry->sr_fbc = wm->sr.fbc; + __entry->hpll_plane = wm->hpll.plane; + __entry->hpll_cursor = wm->hpll.cursor; + __entry->hpll_fbc = wm->hpll.fbc; + __entry->cxsr = wm->cxsr; + __entry->hpll = wm->hpll_en; + __entry->fbc = wm->fbc_en; + ), + + TP_printk("pipe %c, frame=%u, scanline=%u, wm %d/%d/%d, sr %s/%d/%d/%d, hpll %s/%d/%d/%d, fbc %s", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline, + __entry->primary, __entry->sprite, __entry->cursor, + yesno(__entry->cxsr), __entry->sr_plane, __entry->sr_cursor, __entry->sr_fbc, + yesno(__entry->hpll), __entry->hpll_plane, __entry->hpll_cursor, __entry->hpll_fbc, + yesno(__entry->fbc)) +); + +TRACE_EVENT(vlv_wm, + TP_PROTO(struct intel_crtc *crtc, const struct vlv_wm_values *wm), + TP_ARGS(crtc, wm), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __field(u32, level) + __field(u32, cxsr) + __field(u32, primary) + __field(u32, sprite0) + __field(u32, sprite1) + __field(u32, cursor) + __field(u32, sr_plane) + __field(u32, sr_cursor) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + __entry->level = wm->level; + __entry->cxsr = wm->cxsr; + __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; + __entry->sprite0 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; + __entry->sprite1 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE1]; + __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR]; + __entry->sr_plane = wm->sr.plane; + __entry->sr_cursor = wm->sr.cursor; + ), + + TP_printk("pipe %c, frame=%u, scanline=%u, level=%d, cxsr=%d, wm %d/%d/%d/%d, sr %d/%d", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline, __entry->level, __entry->cxsr, + __entry->primary, __entry->sprite0, __entry->sprite1, __entry->cursor, + __entry->sr_plane, __entry->sr_cursor) +); + +TRACE_EVENT(vlv_fifo_size, + TP_PROTO(struct intel_crtc *crtc, u32 sprite0_start, u32 sprite1_start, u32 fifo_size), + TP_ARGS(crtc, sprite0_start, sprite1_start, fifo_size), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __field(u32, sprite0_start) + __field(u32, sprite1_start) + __field(u32, fifo_size) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + __entry->sprite0_start = sprite0_start; + __entry->sprite1_start = sprite1_start; + __entry->fifo_size = fifo_size; + ), + + TP_printk("pipe %c, frame=%u, scanline=%u, %d/%d/%d", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline, __entry->sprite0_start, + __entry->sprite1_start, __entry->fifo_size) +); + +TRACE_EVENT(intel_plane_update_noarm, + TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), + TP_ARGS(plane, crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __array(int, src, 4) + __array(int, dst, 4) + __string(name, plane->name) + ), + + TP_fast_assign( + __assign_str(name, plane->name); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); + memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); + ), + + TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT, + pipe_name(__entry->pipe), __get_str(name), + __entry->frame, __entry->scanline, + DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src), + DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) +); + +TRACE_EVENT(intel_plane_update_arm, + TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), + TP_ARGS(plane, crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __array(int, src, 4) + __array(int, dst, 4) + __string(name, plane->name) + ), + + TP_fast_assign( + __assign_str(name, plane->name); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); + memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); + ), + + TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT, + pipe_name(__entry->pipe), __get_str(name), + __entry->frame, __entry->scanline, + DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src), + DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) +); + +TRACE_EVENT(intel_plane_disable_arm, + TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), + TP_ARGS(plane, crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __string(name, plane->name) + ), + + TP_fast_assign( + __assign_str(name, plane->name); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, plane %s, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __get_str(name), + __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_fbc_activate, + TP_PROTO(struct intel_plane *plane), + TP_ARGS(plane), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), + plane->pipe); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_fbc_deactivate, + TP_PROTO(struct intel_plane *plane), + TP_ARGS(plane), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), + plane->pipe); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_fbc_nuke, + TP_PROTO(struct intel_plane *plane), + TP_ARGS(plane), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), + plane->pipe); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_crtc_vblank_work_start, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline) +); + +TRACE_EVENT(intel_crtc_vblank_work_end, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline) +); + +TRACE_EVENT(intel_pipe_update_start, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __field(u32, min) + __field(u32, max) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + __entry->min = crtc->debug.min_vbl; + __entry->max = crtc->debug.max_vbl; + ), + + TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline, __entry->min, __entry->max) +); + +TRACE_EVENT(intel_pipe_update_vblank_evaded, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __field(u32, min) + __field(u32, max) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = crtc->debug.start_vbl_count; + __entry->scanline = crtc->debug.scanline_start; + __entry->min = crtc->debug.min_vbl; + __entry->max = crtc->debug.max_vbl; + ), + + TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline, __entry->min, __entry->max) +); + +TRACE_EVENT(intel_pipe_update_end, + TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end), + TP_ARGS(crtc, frame, scanline_end), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = frame; + __entry->scanline = scanline_end; + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline) +); + +TRACE_EVENT(intel_frontbuffer_invalidate, + TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin), + TP_ARGS(frontbuffer_bits, origin), + + TP_STRUCT__entry( + __field(unsigned int, frontbuffer_bits) + __field(unsigned int, origin) + ), + + TP_fast_assign( + __entry->frontbuffer_bits = frontbuffer_bits; + __entry->origin = origin; + ), + + TP_printk("frontbuffer_bits=0x%08x, origin=%u", + __entry->frontbuffer_bits, __entry->origin) +); + +TRACE_EVENT(intel_frontbuffer_flush, + TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin), + TP_ARGS(frontbuffer_bits, origin), + + TP_STRUCT__entry( + __field(unsigned int, frontbuffer_bits) + __field(unsigned int, origin) + ), + + TP_fast_assign( + __entry->frontbuffer_bits = frontbuffer_bits; + __entry->origin = origin; + ), + + TP_printk("frontbuffer_bits=0x%08x, origin=%u", + __entry->frontbuffer_bits, __entry->origin) +); + +#endif /* __INTEL_DISPLAY_TRACE_H__ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915/display +#define TRACE_INCLUDE_FILE intel_display_trace +#include <trace/define_trace.h> diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 0d68dec6ac83..41e3dd25a78f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -36,6 +36,7 @@ #include <drm/dp/drm_dp_mst_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_crtc.h> +#include <drm/drm_dsc.h> #include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> @@ -46,12 +47,19 @@ #include <drm/i915_mei_hdcp_interface.h> #include <media/cec-notifier.h> -#include "i915_drv.h" +#include "i915_vma.h" +#include "i915_vma_types.h" +#include "intel_bios.h" +#include "intel_display.h" +#include "intel_display_power.h" +#include "intel_dpll_mgr.h" +#include "intel_pm_types.h" struct drm_printer; struct __intel_global_objs_state; struct intel_ddi_buf_trans; struct intel_fbc; +struct intel_connector; /* * Display related stuff @@ -687,6 +695,8 @@ struct intel_plane_state { /* Clear Color Value */ u64 ccval; + + const char *no_fbc_reason; }; struct intel_initial_plane_config { @@ -1117,8 +1127,6 @@ struct intel_crtc_state { bool crc_enabled; - bool enable_fbc; - bool double_wide; int pbn; @@ -1773,35 +1781,6 @@ vlv_pipe_to_channel(enum pipe pipe) } } -static inline bool intel_pipe_valid(struct drm_i915_private *i915, enum pipe pipe) -{ - return (pipe >= 0 && - pipe < ARRAY_SIZE(i915->pipe_to_crtc_mapping) && - INTEL_INFO(i915)->pipe_mask & BIT(pipe) && - i915->pipe_to_crtc_mapping[pipe]); -} - -static inline struct intel_crtc * -intel_get_first_crtc(struct drm_i915_private *dev_priv) -{ - return to_intel_crtc(drm_crtc_from_index(&dev_priv->drm, 0)); -} - -static inline struct intel_crtc * -intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) -{ - /* pipe_to_crtc_mapping may have hole on any of 3 display pipe system */ - drm_WARN_ON(&dev_priv->drm, - !(INTEL_INFO(dev_priv)->pipe_mask & BIT(pipe))); - return dev_priv->pipe_to_crtc_mapping[pipe]; -} - -static inline struct intel_crtc * -intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum i9xx_plane_id plane) -{ - return dev_priv->plane_to_crtc_mapping[plane]; -} - struct intel_load_detect_pipe { struct drm_atomic_state *restore_state; }; @@ -1911,11 +1890,7 @@ dp_to_lspcon(struct intel_dp *intel_dp) return &dp_to_dig_port(intel_dp)->lspcon; } -static inline struct drm_i915_private * -dp_to_i915(struct intel_dp *intel_dp) -{ - return to_i915(dp_to_dig_port(intel_dp)->base.base.dev); -} +#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev) #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \ (intel_dp)->psr.source_support) @@ -2019,33 +1994,6 @@ intel_crtc_needs_modeset(const struct intel_crtc_state *crtc_state) return drm_atomic_crtc_needs_modeset(&crtc_state->uapi); } -static inline void -intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) -{ - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - - drm_crtc_wait_one_vblank(&crtc->base); -} - -static inline void -intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe) -{ - const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - - if (crtc->active) - intel_wait_for_vblank(dev_priv, pipe); -} - -static inline bool intel_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier) -{ - return DISPLAY_VER(i915) >= 13 && modifier != DRM_FORMAT_MOD_LINEAR; -} - -static inline bool intel_fb_uses_dpt(const struct drm_framebuffer *fb) -{ - return fb && intel_modifier_uses_dpt(to_i915(fb->dev), fb->modifier); -} - static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state) { return i915_ggtt_offset(plane_state->ggtt_vma); diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 2dc9d632969d..a69b28d65a9b 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -45,8 +45,10 @@ #define GEN12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE -#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 12) -#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 12) +#define GEN13_DMC_MAX_FW_SIZE 0x20000 + +#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 14) +#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 14) MODULE_FIRMWARE(ADLP_DMC_PATH); #define ADLS_DMC_PATH DMC_PATH(adls, 2, 01) @@ -596,7 +598,7 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv, continue; offset = readcount + dmc->dmc_info[id].dmc_offset * 4; - if (fw->size - offset < 0) { + if (offset > fw->size) { drm_err(&dev_priv->drm, "Reading beyond the fw_size\n"); continue; } @@ -682,7 +684,7 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv) if (IS_ALDERLAKE_P(dev_priv)) { dmc->fw_path = ADLP_DMC_PATH; dmc->required_version = ADLP_DMC_VERSION_REQUIRED; - dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE; + dmc->max_fw_size = GEN13_DMC_MAX_FW_SIZE; } else if (IS_ALDERLAKE_S(dev_priv)) { dmc->fw_path = ADLS_DMC_PATH; dmc->required_version = ADLS_DMC_VERSION_REQUIRED; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index d8a0f7a2b939..36ab58c25b64 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -47,6 +47,7 @@ #include "intel_audio.h" #include "intel_backlight.h" #include "intel_connector.h" +#include "intel_crtc.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_types.h" @@ -3905,7 +3906,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, to_intel_crtc_state(crtc->base.state); /* Keep underrun reporting disabled until things are stable */ - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); if (crtc_state->has_pch_encoder) diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 62c112daacf2..97cf3cac0105 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -34,6 +34,7 @@ * for some reason. */ +#include "i915_drv.h" #include "intel_backlight.h" #include "intel_display_types.h" #include "intel_dp.h" diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index e264467de8ed..9451f336f28f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -21,11 +21,11 @@ * IN THE SOFTWARE. */ +#include "i915_drv.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_link_training.h" - static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp) { memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps)); diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 04a7af8340ca..1ce0c171f4fb 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -1823,7 +1823,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state) int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, const struct dpll *dpll) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); struct intel_crtc_state *crtc_state; crtc_state = intel_crtc_state_alloc(crtc); diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c index 963ca7155b06..8f674745e7e0 100644 --- a/drivers/gpu/drm/i915/display/intel_dpt.c +++ b/drivers/gpu/drm/i915/display/intel_dpt.c @@ -264,7 +264,7 @@ intel_dpt_create(struct intel_framebuffer *fb) vm = &dpt->vm; - vm->gt = &i915->gt; + vm->gt = to_gt(i915); vm->i915 = i915; vm->dma = i915->drm.dev; vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; @@ -279,8 +279,6 @@ intel_dpt_create(struct intel_framebuffer *fb) vm->vma_ops.bind_vma = dpt_bind_vma; vm->vma_ops.unbind_vma = dpt_unbind_vma; - vm->vma_ops.set_pages = ggtt_set_pages; - vm->vma_ops.clear_pages = clear_pages; vm->pte_encode = gen8_ggtt_pte_encode; diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c index 6b0301ba046e..a50422e03a7e 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.c +++ b/drivers/gpu/drm/i915/display/intel_dsi.c @@ -4,6 +4,8 @@ */ #include <drm/drm_mipi_dsi.h> + +#include "i915_drv.h" #include "intel_dsi.h" #include "intel_panel.h" diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index c4a743d0913f..23cfe2e5ce2a 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -6,6 +6,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_modeset_helper.h> +#include "i915_drv.h" #include "intel_display.h" #include "intel_display_types.h" #include "intel_dpt.h" @@ -658,6 +659,16 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) } } +static bool intel_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier) +{ + return DISPLAY_VER(i915) >= 13 && modifier != DRM_FORMAT_MOD_LINEAR; +} + +bool intel_fb_uses_dpt(const struct drm_framebuffer *fb) +{ + return fb && intel_modifier_uses_dpt(to_i915(fb->dev), fb->modifier); +} + unsigned int intel_cursor_alignment(const struct drm_i915_private *i915) { if (IS_I830(i915)) diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h index b54997175d6d..ba9df8986c1e 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.h +++ b/drivers/gpu/drm/i915/display/intel_fb.h @@ -90,4 +90,6 @@ intel_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp, const struct drm_mode_fb_cmd2 *user_mode_cmd); +bool intel_fb_uses_dpt(const struct drm_framebuffer *fb); + #endif /* __INTEL_FB_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c index 3b20f69e0240..31c15e5fca95 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_pin.c +++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c @@ -7,13 +7,13 @@ * DOC: display pinning helpers */ -#include "intel_display_types.h" -#include "intel_fb_pin.h" -#include "intel_fb.h" +#include "gem/i915_gem_object.h" +#include "i915_drv.h" +#include "intel_display_types.h" #include "intel_dpt.h" - -#include "gem/i915_gem_object.h" +#include "intel_fb.h" +#include "intel_fb_pin.h" static struct i915_vma * intel_pin_fb_obj_dpt(struct drm_framebuffer *fb, diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 614e8697c068..160fd2bdafe5 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -41,9 +41,10 @@ #include <drm/drm_fourcc.h> #include "i915_drv.h" -#include "i915_trace.h" #include "i915_vgpu.h" +#include "intel_cdclk.h" #include "intel_de.h" +#include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_fbc.h" #include "intel_frontbuffer.h" @@ -58,19 +59,53 @@ struct intel_fbc_funcs { void (*set_false_color)(struct intel_fbc *fbc, bool enable); }; -/* - * For SKL+, the plane source size used by the hardware is based on the value we - * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value - * we wrote to PIPESRC. - */ -static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *cache, - int *width, int *height) -{ - if (width) - *width = cache->plane.src_w; - if (height) - *height = cache->plane.src_h; -} +struct intel_fbc_state { + struct intel_plane *plane; + unsigned int cfb_stride; + unsigned int cfb_size; + unsigned int fence_y_offset; + u16 override_cfb_stride; + u16 interval; + s8 fence_id; +}; + +struct intel_fbc { + struct drm_i915_private *i915; + const struct intel_fbc_funcs *funcs; + + /* + * This is always the inner lock when overlapping with + * struct_mutex and it's the outer lock when overlapping + * with stolen_lock. + */ + struct mutex lock; + unsigned int possible_framebuffer_bits; + unsigned int busy_bits; + + struct drm_mm_node compressed_fb; + struct drm_mm_node compressed_llb; + + u8 limit; + + bool false_color; + + bool active; + bool activated; + bool flip_pending; + + bool underrun_detected; + struct work_struct underrun_work; + + /* + * This structure contains everything that's relevant to program the + * hardware registers. When we want to figure out if we need to disable + * and re-enable FBC for a new configuration we just check if there's + * something different in the struct. The genx_fbc_activate functions + * are supposed to read from it in order to program the registers. + */ + struct intel_fbc_state state; + const char *no_fbc_reason; +}; /* plane stride in pixels */ static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state) @@ -86,25 +121,25 @@ static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane } /* plane stride based cfb stride in bytes, assuming 1:1 compression limit */ -static unsigned int _intel_fbc_cfb_stride(const struct intel_fbc_state_cache *cache) +static unsigned int _intel_fbc_cfb_stride(const struct intel_plane_state *plane_state) { unsigned int cpp = 4; /* FBC always 4 bytes per pixel */ - return cache->fb.stride * cpp; + return intel_fbc_plane_stride(plane_state) * cpp; } /* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */ -static unsigned int skl_fbc_min_cfb_stride(struct intel_fbc *fbc, - const struct intel_fbc_state_cache *cache) +static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = fbc->i915; + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); unsigned int limit = 4; /* 1:4 compression limit is the worst case */ unsigned int cpp = 4; /* FBC always 4 bytes per pixel */ + unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16; unsigned int height = 4; /* FBC segment is 4 lines */ unsigned int stride; /* minimum segment stride we can use */ - stride = cache->plane.src_w * cpp * height / limit; + stride = width * cpp * height / limit; /* * Wa_16011863758: icl+ @@ -124,11 +159,10 @@ static unsigned int skl_fbc_min_cfb_stride(struct intel_fbc *fbc, } /* properly aligned cfb stride in bytes, assuming 1:1 compression limit */ -static unsigned int intel_fbc_cfb_stride(struct intel_fbc *fbc, - const struct intel_fbc_state_cache *cache) +static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = fbc->i915; - unsigned int stride = _intel_fbc_cfb_stride(cache); + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + unsigned int stride = _intel_fbc_cfb_stride(plane_state); /* * At least some of the platforms require each 4 line segment to @@ -136,33 +170,53 @@ static unsigned int intel_fbc_cfb_stride(struct intel_fbc *fbc, * that regardless of the compression limit we choose later. */ if (DISPLAY_VER(i915) >= 9) - return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(fbc, cache)); + return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(plane_state)); else return stride; } -static unsigned int intel_fbc_cfb_size(struct intel_fbc *fbc, - const struct intel_fbc_state_cache *cache) +static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = fbc->i915; - int lines = cache->plane.src_h; + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + int lines = drm_rect_height(&plane_state->uapi.src) >> 16; if (DISPLAY_VER(i915) == 7) lines = min(lines, 2048); else if (DISPLAY_VER(i915) >= 8) lines = min(lines, 2560); - return lines * intel_fbc_cfb_stride(fbc, cache); + return lines * intel_fbc_cfb_stride(plane_state); +} + +static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state); + unsigned int stride = _intel_fbc_cfb_stride(plane_state); + const struct drm_framebuffer *fb = plane_state->hw.fb; + + /* + * Override stride in 64 byte units per 4 line segment. + * + * Gen9 hw miscalculates cfb stride for linear as + * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so + * we always need to use the override there. + */ + if (stride != stride_aligned || + (DISPLAY_VER(i915) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR)) + return stride_aligned * 4 / 64; + + return 0; } static u32 i8xx_fbc_ctl(struct intel_fbc *fbc) { - const struct intel_fbc_reg_params *params = &fbc->params; + const struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; unsigned int cfb_stride; u32 fbc_ctl; - cfb_stride = params->cfb_stride / fbc->limit; + cfb_stride = fbc_state->cfb_stride / fbc->limit; /* FBC_CTL wants 32B or 64B units */ if (DISPLAY_VER(i915) == 2) @@ -171,27 +225,27 @@ static u32 i8xx_fbc_ctl(struct intel_fbc *fbc) cfb_stride = (cfb_stride / 64) - 1; fbc_ctl = FBC_CTL_PERIODIC | - FBC_CTL_INTERVAL(params->interval) | + FBC_CTL_INTERVAL(fbc_state->interval) | FBC_CTL_STRIDE(cfb_stride); if (IS_I945GM(i915)) fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ - if (params->fence_id >= 0) - fbc_ctl |= FBC_CTL_FENCENO(params->fence_id); + if (fbc_state->fence_id >= 0) + fbc_ctl |= FBC_CTL_FENCENO(fbc_state->fence_id); return fbc_ctl; } static u32 i965_fbc_ctl2(struct intel_fbc *fbc) { - const struct intel_fbc_reg_params *params = &fbc->params; + const struct intel_fbc_state *fbc_state = &fbc->state; u32 fbc_ctl2; fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | - FBC_CTL_PLANE(params->crtc.i9xx_plane); + FBC_CTL_PLANE(fbc_state->plane->i9xx_plane); - if (params->fence_id >= 0) + if (fbc_state->fence_id >= 0) fbc_ctl2 |= FBC_CTL_CPU_FENCE_EN; return fbc_ctl2; @@ -220,7 +274,7 @@ static void i8xx_fbc_deactivate(struct intel_fbc *fbc) static void i8xx_fbc_activate(struct intel_fbc *fbc) { - const struct intel_fbc_reg_params *params = &fbc->params; + const struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; int i; @@ -232,7 +286,7 @@ static void i8xx_fbc_activate(struct intel_fbc *fbc) intel_de_write(i915, FBC_CONTROL2, i965_fbc_ctl2(fbc)); intel_de_write(i915, FBC_FENCE_OFF, - params->fence_y_offset); + fbc_state->fence_y_offset); } intel_de_write(i915, FBC_CONTROL, @@ -252,8 +306,8 @@ static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc) static void i8xx_fbc_nuke(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &fbc->params; - enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane; + struct intel_fbc_state *fbc_state = &fbc->state; + enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane; struct drm_i915_private *dev_priv = fbc->i915; spin_lock_irq(&dev_priv->uncore.lock); @@ -288,8 +342,8 @@ static const struct intel_fbc_funcs i8xx_fbc_funcs = { static void i965_fbc_nuke(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &fbc->params; - enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane; + struct intel_fbc_state *fbc_state = &fbc->state; + enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane; struct drm_i915_private *dev_priv = fbc->i915; spin_lock_irq(&dev_priv->uncore.lock); @@ -324,21 +378,21 @@ static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc) static u32 g4x_dpfc_ctl(struct intel_fbc *fbc) { - const struct intel_fbc_reg_params *params = &fbc->params; + const struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; u32 dpfc_ctl; dpfc_ctl = g4x_dpfc_ctl_limit(fbc) | - DPFC_CTL_PLANE_G4X(params->crtc.i9xx_plane); + DPFC_CTL_PLANE_G4X(fbc_state->plane->i9xx_plane); if (IS_G4X(i915)) dpfc_ctl |= DPFC_CTL_SR_EN; - if (params->fence_id >= 0) { + if (fbc_state->fence_id >= 0) { dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X; if (DISPLAY_VER(i915) < 6) - dpfc_ctl |= DPFC_CTL_FENCENO(params->fence_id); + dpfc_ctl |= DPFC_CTL_FENCENO(fbc_state->fence_id); } return dpfc_ctl; @@ -346,11 +400,11 @@ static u32 g4x_dpfc_ctl(struct intel_fbc *fbc) static void g4x_fbc_activate(struct intel_fbc *fbc) { - const struct intel_fbc_reg_params *params = &fbc->params; + const struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; intel_de_write(i915, DPFC_FENCE_YOFF, - params->fence_y_offset); + fbc_state->fence_y_offset); intel_de_write(i915, DPFC_CONTROL, DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); @@ -397,11 +451,11 @@ static const struct intel_fbc_funcs g4x_fbc_funcs = { static void ilk_fbc_activate(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &fbc->params; + struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; intel_de_write(i915, ILK_DPFC_FENCE_YOFF, - params->fence_y_offset); + fbc_state->fence_y_offset); intel_de_write(i915, ILK_DPFC_CONTROL, DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); @@ -448,15 +502,15 @@ static const struct intel_fbc_funcs ilk_fbc_funcs = { static void snb_fbc_program_fence(struct intel_fbc *fbc) { - const struct intel_fbc_reg_params *params = &fbc->params; + const struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; u32 ctl = 0; - if (params->fence_id >= 0) - ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(params->fence_id); + if (fbc_state->fence_id >= 0) + ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(fbc_state->fence_id); intel_de_write(i915, SNB_DPFC_CTL_SA, ctl); - intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, params->fence_y_offset); + intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset); } static void snb_fbc_activate(struct intel_fbc *fbc) @@ -485,27 +539,27 @@ static const struct intel_fbc_funcs snb_fbc_funcs = { static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc) { - const struct intel_fbc_reg_params *params = &fbc->params; + const struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; u32 val = 0; - if (params->override_cfb_stride) + if (fbc_state->override_cfb_stride) val |= FBC_STRIDE_OVERRIDE | - FBC_STRIDE(params->override_cfb_stride / fbc->limit); + FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit); intel_de_write(i915, GLK_FBC_STRIDE, val); } static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc) { - const struct intel_fbc_reg_params *params = &fbc->params; + const struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; u32 val = 0; /* Display WA #0529: skl, kbl, bxt. */ - if (params->override_cfb_stride) + if (fbc_state->override_cfb_stride) val |= CHICKEN_FBC_STRIDE_OVERRIDE | - CHICKEN_FBC_STRIDE(params->override_cfb_stride / fbc->limit); + CHICKEN_FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit); intel_de_rmw(i915, CHICKEN_MISC_4, CHICKEN_FBC_STRIDE_OVERRIDE | @@ -514,16 +568,16 @@ static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc) static u32 ivb_dpfc_ctl(struct intel_fbc *fbc) { - const struct intel_fbc_reg_params *params = &fbc->params; + const struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; u32 dpfc_ctl; dpfc_ctl = g4x_dpfc_ctl_limit(fbc); if (IS_IVYBRIDGE(i915)) - dpfc_ctl |= DPFC_CTL_PLANE_IVB(params->crtc.i9xx_plane); + dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane); - if (params->fence_id >= 0) + if (fbc_state->fence_id >= 0) dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB; if (fbc->false_color) @@ -577,7 +631,7 @@ static bool intel_fbc_hw_is_active(struct intel_fbc *fbc) static void intel_fbc_hw_activate(struct intel_fbc *fbc) { - trace_intel_fbc_activate(fbc->crtc); + trace_intel_fbc_activate(fbc->state.plane); fbc->active = true; fbc->activated = true; @@ -587,59 +641,31 @@ static void intel_fbc_hw_activate(struct intel_fbc *fbc) static void intel_fbc_hw_deactivate(struct intel_fbc *fbc) { - trace_intel_fbc_deactivate(fbc->crtc); + trace_intel_fbc_deactivate(fbc->state.plane); fbc->active = false; fbc->funcs->deactivate(fbc); } -bool intel_fbc_is_compressing(struct intel_fbc *fbc) +static bool intel_fbc_is_compressing(struct intel_fbc *fbc) { return fbc->funcs->is_compressing(fbc); } static void intel_fbc_nuke(struct intel_fbc *fbc) { - trace_intel_fbc_nuke(fbc->crtc); + trace_intel_fbc_nuke(fbc->state.plane); fbc->funcs->nuke(fbc); } -int intel_fbc_set_false_color(struct intel_fbc *fbc, bool enable) -{ - if (!fbc->funcs || !fbc->funcs->set_false_color) - return -ENODEV; - - mutex_lock(&fbc->lock); - - fbc->false_color = enable; - - fbc->funcs->set_false_color(fbc, enable); - - mutex_unlock(&fbc->lock); - - return 0; -} - -/** - * intel_fbc_is_active - Is FBC active? - * @fbc: The FBC instance - * - * This function is used to verify the current state of FBC. - * - * FIXME: This should be tracked in the plane config eventually - * instead of queried at runtime for most callers. - */ -bool intel_fbc_is_active(struct intel_fbc *fbc) -{ - return fbc->active; -} - static void intel_fbc_activate(struct intel_fbc *fbc) { intel_fbc_hw_activate(fbc); intel_fbc_nuke(fbc); + + fbc->no_fbc_reason = NULL; } static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason) @@ -679,9 +705,9 @@ static u64 intel_fbc_stolen_end(struct drm_i915_private *i915) return min(end, intel_fbc_cfb_base_max(i915)); } -static int intel_fbc_min_limit(int fb_cpp) +static int intel_fbc_min_limit(const struct intel_plane_state *plane_state) { - return fb_cpp == 2 ? 2 : 1; + return plane_state->hw.fb->format->cpp[0] == 2 ? 2 : 1; } static int intel_fbc_max_limit(struct drm_i915_private *i915) @@ -784,19 +810,25 @@ static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc) void intel_fbc_cleanup(struct drm_i915_private *i915) { - struct intel_fbc *fbc = &i915->fbc; + struct intel_fbc *fbc = i915->fbc; - if (!HAS_FBC(i915)) + if (!fbc) return; mutex_lock(&fbc->lock); __intel_fbc_cleanup_cfb(fbc); mutex_unlock(&fbc->lock); + + kfree(fbc); } -static bool stride_is_valid(struct drm_i915_private *i915, - u64 modifier, unsigned int stride) +static bool stride_is_valid(const struct intel_plane_state *plane_state) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int stride = intel_fbc_plane_stride(plane_state) * + fb->format->cpp[0]; + /* This should have been caught earlier. */ if (drm_WARN_ON_ONCE(&i915->drm, (stride & (64 - 1)) != 0)) return false; @@ -813,7 +845,7 @@ static bool stride_is_valid(struct drm_i915_private *i915, /* Display WA #1105: skl,bxt,kbl,cfl,glk */ if ((DISPLAY_VER(i915) == 9 || IS_GEMINILAKE(i915)) && - modifier == DRM_FORMAT_MOD_LINEAR && stride & 511) + fb->modifier == DRM_FORMAT_MOD_LINEAR && stride & 511) return false; if (stride > 16384) @@ -822,10 +854,12 @@ static bool stride_is_valid(struct drm_i915_private *i915, return true; } -static bool pixel_format_is_valid(struct drm_i915_private *i915, - u32 pixel_format) +static bool pixel_format_is_valid(const struct intel_plane_state *plane_state) { - switch (pixel_format) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + + switch (fb->format->format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: return true; @@ -843,10 +877,13 @@ static bool pixel_format_is_valid(struct drm_i915_private *i915, } } -static bool rotation_is_valid(struct drm_i915_private *i915, - u32 pixel_format, unsigned int rotation) +static bool rotation_is_valid(const struct intel_plane_state *plane_state) { - if (DISPLAY_VER(i915) >= 9 && pixel_format == DRM_FORMAT_RGB565 && + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; + + if (DISPLAY_VER(i915) >= 9 && fb->format->format == DRM_FORMAT_RGB565 && drm_rotation_90_or_270(rotation)) return false; else if (DISPLAY_VER(i915) <= 4 && !IS_G4X(i915) && @@ -862,10 +899,9 @@ static bool rotation_is_valid(struct drm_i915_private *i915, * the X and Y offset registers. That's why we include the src x/y offsets * instead of just looking at the plane size. */ -static bool intel_fbc_hw_tracking_covers_screen(struct intel_fbc *fbc, - struct intel_crtc *crtc) +static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = fbc->i915; + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); unsigned int effective_w, effective_h, max_w, max_h; if (DISPLAY_VER(i915) >= 10) { @@ -882,18 +918,20 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_fbc *fbc, max_h = 1536; } - intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w, - &effective_h); - effective_w += fbc->state_cache.plane.adjusted_x; - effective_h += fbc->state_cache.plane.adjusted_y; + effective_w = plane_state->view.color_plane[0].x + + (drm_rect_width(&plane_state->uapi.src) >> 16); + effective_h = plane_state->view.color_plane[0].y + + (drm_rect_height(&plane_state->uapi.src) >> 16); return effective_w <= max_w && effective_h <= max_h; } -static bool tiling_is_valid(struct drm_i915_private *i915, - u64 modifier) +static bool tiling_is_valid(const struct intel_plane_state *plane_state) { - switch (modifier) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + + switch (fb->modifier) { case DRM_FORMAT_MOD_LINEAR: case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Yf_TILED: @@ -905,208 +943,163 @@ static bool tiling_is_valid(struct drm_i915_private *i915, } } -static void intel_fbc_update_state_cache(struct intel_crtc *crtc, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static void intel_fbc_update_state(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_plane *plane) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &i915->fbc; - struct intel_fbc_state_cache *cache = &fbc->state_cache; - struct drm_framebuffer *fb = plane_state->hw.fb; - - cache->plane.visible = plane_state->uapi.visible; - if (!cache->plane.visible) - return; - - cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags; - if (IS_HASWELL(i915) || IS_BROADWELL(i915)) - cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; - - cache->plane.rotation = plane_state->hw.rotation; - /* - * Src coordinates are already rotated by 270 degrees for - * the 90/270 degree plane rotation cases (to match the - * GTT mapping), hence no need to account for rotation here. - */ - cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16; - cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - cache->plane.adjusted_x = plane_state->view.color_plane[0].x; - cache->plane.adjusted_y = plane_state->view.color_plane[0].y; + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct intel_fbc *fbc = plane->fbc; + struct intel_fbc_state *fbc_state = &fbc->state; - cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode; + WARN_ON(plane_state->no_fbc_reason); - cache->fb.format = fb->format; - cache->fb.modifier = fb->modifier; - cache->fb.stride = intel_fbc_plane_stride(plane_state); + fbc_state->plane = plane; /* FBC1 compression interval: arbitrary choice of 1 second */ - cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode); + fbc_state->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode); - cache->fence_y_offset = intel_plane_fence_y_offset(plane_state); + fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state); drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE && !plane_state->ggtt_vma->fence); if (plane_state->flags & PLANE_HAS_FENCE && plane_state->ggtt_vma->fence) - cache->fence_id = plane_state->ggtt_vma->fence->id; + fbc_state->fence_id = plane_state->ggtt_vma->fence->id; else - cache->fence_id = -1; + fbc_state->fence_id = -1; - cache->psr2_active = crtc_state->has_psr2; + fbc_state->cfb_stride = intel_fbc_cfb_stride(plane_state); + fbc_state->cfb_size = intel_fbc_cfb_size(plane_state); + fbc_state->override_cfb_stride = intel_fbc_override_cfb_stride(plane_state); } -static bool intel_fbc_cfb_size_changed(struct intel_fbc *fbc) +static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state) { - return intel_fbc_cfb_size(fbc, &fbc->state_cache) > - fbc->compressed_fb.size * fbc->limit; -} + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); -static u16 intel_fbc_override_cfb_stride(struct intel_fbc *fbc, - const struct intel_fbc_state_cache *cache) -{ - unsigned int stride = _intel_fbc_cfb_stride(cache); - unsigned int stride_aligned = intel_fbc_cfb_stride(fbc, cache); - - /* - * Override stride in 64 byte units per 4 line segment. + /* The use of a CPU fence is one of two ways to detect writes by the + * CPU to the scanout and trigger updates to the FBC. * - * Gen9 hw miscalculates cfb stride for linear as - * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so - * we always need to use the override there. + * The other method is by software tracking (see + * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke + * the current compressed buffer and recompress it. + * + * Note that is possible for a tiled surface to be unmappable (and + * so have no fence associated with it) due to aperture constraints + * at the time of pinning. + * + * FIXME with 90/270 degree rotation we should use the fence on + * the normal GTT view (the rotated view doesn't even have a + * fence). Would need changes to the FBC fence Y offset as well. + * For now this will effectively disable FBC with 90/270 degree + * rotation. */ - if (stride != stride_aligned || - (DISPLAY_VER(fbc->i915) == 9 && - cache->fb.modifier == DRM_FORMAT_MOD_LINEAR)) - return stride_aligned * 4 / 64; - - return 0; + return DISPLAY_VER(i915) >= 9 || + (plane_state->flags & PLANE_HAS_FENCE && + plane_state->ggtt_vma->fence); } -static bool intel_fbc_can_enable(struct intel_fbc *fbc) +static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = fbc->i915; - - if (intel_vgpu_active(i915)) { - fbc->no_fbc_reason = "VGPU is active"; - return false; - } - - if (!i915->params.enable_fbc) { - fbc->no_fbc_reason = "disabled per module param or by default"; - return false; - } + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct intel_fbc *fbc = plane->fbc; - if (fbc->underrun_detected) { - fbc->no_fbc_reason = "underrun detected"; - return false; - } + return intel_fbc_min_limit(plane_state) <= fbc->limit && + intel_fbc_cfb_size(plane_state) <= fbc->compressed_fb.size * fbc->limit; +} - return true; +static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state) +{ + return !plane_state->no_fbc_reason && + intel_fbc_is_fence_ok(plane_state) && + intel_fbc_is_cfb_ok(plane_state); } -static bool intel_fbc_can_activate(struct intel_crtc *crtc) +static int intel_fbc_check_plane(struct intel_atomic_state *state, + struct intel_plane *plane) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &i915->fbc; - struct intel_fbc_state_cache *cache = &fbc->state_cache; + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + const struct drm_framebuffer *fb = plane_state->hw.fb; + struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc); + const struct intel_crtc_state *crtc_state; + struct intel_fbc *fbc = plane->fbc; - if (!intel_fbc_can_enable(fbc)) - return false; + if (!fbc) + return 0; - if (!cache->plane.visible) { - fbc->no_fbc_reason = "primary plane not visible"; - return false; + if (intel_vgpu_active(i915)) { + plane_state->no_fbc_reason = "VGPU active"; + return 0; } - /* We don't need to use a state cache here since this information is - * global for all CRTC. - */ - if (fbc->underrun_detected) { - fbc->no_fbc_reason = "underrun detected"; - return false; + if (!i915->params.enable_fbc) { + plane_state->no_fbc_reason = "disabled per module param or by default"; + return 0; } - if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) { - fbc->no_fbc_reason = "incompatible mode"; - return false; + if (!plane_state->uapi.visible) { + plane_state->no_fbc_reason = "plane not visible"; + return 0; } - if (!intel_fbc_hw_tracking_covers_screen(fbc, crtc)) { - fbc->no_fbc_reason = "mode too large for compression"; - return false; + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + + if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { + plane_state->no_fbc_reason = "interlaced mode not supported"; + return 0; } - /* The use of a CPU fence is one of two ways to detect writes by the - * CPU to the scanout and trigger updates to the FBC. - * - * The other method is by software tracking (see - * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke - * the current compressed buffer and recompress it. - * - * Note that is possible for a tiled surface to be unmappable (and - * so have no fence associated with it) due to aperture constraints - * at the time of pinning. - * - * FIXME with 90/270 degree rotation we should use the fence on - * the normal GTT view (the rotated view doesn't even have a - * fence). Would need changes to the FBC fence Y offset as well. - * For now this will effectively disable FBC with 90/270 degree - * rotation. - */ - if (DISPLAY_VER(i915) < 9 && cache->fence_id < 0) { - fbc->no_fbc_reason = "framebuffer not tiled or fenced"; - return false; + if (crtc_state->double_wide) { + plane_state->no_fbc_reason = "double wide pipe not supported"; + return 0; } - if (!pixel_format_is_valid(i915, cache->fb.format->format)) { - fbc->no_fbc_reason = "pixel format is invalid"; + /* + * Display 12+ is not supporting FBC with PSR2. + * Recommendation is to keep this combination disabled + * Bspec: 50422 HSD: 14010260002 + */ + if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) { + plane_state->no_fbc_reason = "PSR2 enabled"; return false; } - if (!rotation_is_valid(i915, cache->fb.format->format, - cache->plane.rotation)) { - fbc->no_fbc_reason = "rotation unsupported"; - return false; + if (!pixel_format_is_valid(plane_state)) { + plane_state->no_fbc_reason = "pixel format not supported"; + return 0; } - if (!tiling_is_valid(i915, cache->fb.modifier)) { - fbc->no_fbc_reason = "tiling unsupported"; - return false; + if (!tiling_is_valid(plane_state)) { + plane_state->no_fbc_reason = "tiling not supported"; + return 0; } - if (!stride_is_valid(i915, cache->fb.modifier, - cache->fb.stride * cache->fb.format->cpp[0])) { - fbc->no_fbc_reason = "framebuffer stride not supported"; - return false; + if (!rotation_is_valid(plane_state)) { + plane_state->no_fbc_reason = "rotation not supported"; + return 0; } - if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && - cache->fb.format->has_alpha) { - fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC"; - return false; + if (!stride_is_valid(plane_state)) { + plane_state->no_fbc_reason = "stride not supported"; + return 0; } - /* WaFbcExceedCdClockThreshold:hsw,bdw */ - if ((IS_HASWELL(i915) || IS_BROADWELL(i915)) && - cache->crtc.hsw_bdw_pixel_rate >= i915->cdclk.hw.cdclk * 95 / 100) { - fbc->no_fbc_reason = "pixel rate is too big"; + if (plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && + fb->format->has_alpha) { + plane_state->no_fbc_reason = "per-pixel alpha not supported"; return false; } - /* It is possible for the required CFB size change without a - * crtc->disable + crtc->enable since it is possible to change the - * stride without triggering a full modeset. Since we try to - * over-allocate the CFB, there's a chance we may keep FBC enabled even - * if this happens, but if we exceed the current CFB size we'll have to - * disable FBC. Notice that it would be possible to disable FBC, wait - * for a frame, free the stolen node, then try to reenable FBC in case - * we didn't get any invalidate/deactivate calls, but this would require - * a lot of tracking just for a specific case. If we conclude it's an - * important case, we can implement it later. */ - if (intel_fbc_cfb_size_changed(fbc)) { - fbc->no_fbc_reason = "CFB requirements changed"; - return false; + if (!intel_fbc_hw_tracking_covers_screen(plane_state)) { + plane_state->no_fbc_reason = "plane size too big"; + return 0; } /* @@ -1115,146 +1108,139 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) * and screen flicker. */ if (DISPLAY_VER(i915) >= 9 && - (fbc->state_cache.plane.adjusted_y & 3)) { - fbc->no_fbc_reason = "plane Y offset is misaligned"; + plane_state->view.color_plane[0].y & 3) { + plane_state->no_fbc_reason = "plane start Y offset misaligned"; return false; } /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */ if (DISPLAY_VER(i915) >= 11 && - (cache->plane.src_h + cache->plane.adjusted_y) % 4) { - fbc->no_fbc_reason = "plane height + offset is non-modulo of 4"; + (plane_state->view.color_plane[0].y + drm_rect_height(&plane_state->uapi.src)) & 3) { + plane_state->no_fbc_reason = "plane end Y offset misaligned"; return false; } - /* - * Display 12+ is not supporting FBC with PSR2. - * Recommendation is to keep this combination disabled - * Bspec: 50422 HSD: 14010260002 - */ - if (fbc->state_cache.psr2_active && DISPLAY_VER(i915) >= 12) { - fbc->no_fbc_reason = "not supported with PSR2"; - return false; - } - - return true; -} - -static void intel_fbc_get_reg_params(struct intel_fbc *fbc, - struct intel_crtc *crtc) -{ - const struct intel_fbc_state_cache *cache = &fbc->state_cache; - struct intel_fbc_reg_params *params = &fbc->params; - - /* Since all our fields are integer types, use memset here so the - * comparison function can rely on memcmp because the padding will be - * zero. */ - memset(params, 0, sizeof(*params)); - - params->fence_id = cache->fence_id; - params->fence_y_offset = cache->fence_y_offset; - - params->interval = cache->interval; + /* WaFbcExceedCdClockThreshold:hsw,bdw */ + if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { + const struct intel_cdclk_state *cdclk_state; - params->crtc.pipe = crtc->pipe; - params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane; + cdclk_state = intel_atomic_get_cdclk_state(state); + if (IS_ERR(cdclk_state)) + return PTR_ERR(cdclk_state); - params->fb.format = cache->fb.format; - params->fb.modifier = cache->fb.modifier; - params->fb.stride = cache->fb.stride; + if (crtc_state->pixel_rate >= cdclk_state->logical.cdclk * 95 / 100) { + plane_state->no_fbc_reason = "pixel rate too high"; + return 0; + } + } - params->cfb_stride = intel_fbc_cfb_stride(fbc, cache); - params->cfb_size = intel_fbc_cfb_size(fbc, cache); - params->override_cfb_stride = intel_fbc_override_cfb_stride(fbc, cache); + plane_state->no_fbc_reason = NULL; - params->plane_visible = cache->plane.visible; + return 0; } -static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &i915->fbc; - const struct intel_fbc_state_cache *cache = &fbc->state_cache; - const struct intel_fbc_reg_params *params = &fbc->params; - if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) - return false; +static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_plane *plane) +{ + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *old_plane_state = + intel_atomic_get_old_plane_state(state, plane); + const struct intel_plane_state *new_plane_state = + intel_atomic_get_new_plane_state(state, plane); + const struct drm_framebuffer *old_fb = old_plane_state->hw.fb; + const struct drm_framebuffer *new_fb = new_plane_state->hw.fb; - if (!params->plane_visible) + if (drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) return false; - if (!intel_fbc_can_activate(crtc)) + if (!intel_fbc_is_ok(old_plane_state) || + !intel_fbc_is_ok(new_plane_state)) return false; - if (params->fb.format != cache->fb.format) + if (old_fb->format->format != new_fb->format->format) return false; - if (params->fb.modifier != cache->fb.modifier) + if (old_fb->modifier != new_fb->modifier) return false; - if (params->fb.stride != cache->fb.stride) + if (intel_fbc_plane_stride(old_plane_state) != + intel_fbc_plane_stride(new_plane_state)) return false; - if (params->cfb_stride != intel_fbc_cfb_stride(fbc, cache)) + if (intel_fbc_cfb_stride(old_plane_state) != + intel_fbc_cfb_stride(new_plane_state)) return false; - if (params->cfb_size != intel_fbc_cfb_size(fbc, cache)) + if (intel_fbc_cfb_size(old_plane_state) != + intel_fbc_cfb_size(new_plane_state)) return false; - if (params->override_cfb_stride != intel_fbc_override_cfb_stride(fbc, cache)) + if (intel_fbc_override_cfb_stride(old_plane_state) != + intel_fbc_override_cfb_stride(new_plane_state)) return false; return true; } -bool intel_fbc_pre_update(struct intel_atomic_state *state, - struct intel_crtc *crtc) +static bool __intel_fbc_pre_update(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_plane *plane) { - struct intel_plane *plane = to_intel_plane(crtc->base.primary); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_plane_state *plane_state = - intel_atomic_get_new_plane_state(state, plane); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_fbc *fbc = plane->fbc; - const char *reason = "update pending"; bool need_vblank_wait = false; - if (!fbc || !plane_state) + fbc->flip_pending = true; + + if (intel_fbc_can_flip_nuke(state, crtc, plane)) return need_vblank_wait; - mutex_lock(&fbc->lock); + intel_fbc_deactivate(fbc, "update pending"); + + /* + * Display WA #1198: glk+ + * Need an extra vblank wait between FBC disable and most plane + * updates. Bspec says this is only needed for plane disable, but + * that is not true. Touching most plane registers will cause the + * corruption to appear. Also SKL/derivatives do not seem to be + * affected. + * + * TODO: could optimize this a bit by sampling the frame + * counter when we disable FBC (if it was already done earlier) + * and skipping the extra vblank wait before the plane update + * if at least one frame has already passed. + */ + if (fbc->activated && DISPLAY_VER(i915) >= 10) + need_vblank_wait = true; + fbc->activated = false; - if (fbc->crtc != crtc) - goto unlock; + return need_vblank_wait; +} - intel_fbc_update_state_cache(crtc, crtc_state, plane_state); - fbc->flip_pending = true; +bool intel_fbc_pre_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_plane_state *plane_state; + bool need_vblank_wait = false; + struct intel_plane *plane; + int i; + + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + struct intel_fbc *fbc = plane->fbc; - if (!intel_fbc_can_flip_nuke(crtc_state)) { - intel_fbc_deactivate(fbc, reason); - - /* - * Display WA #1198: glk+ - * Need an extra vblank wait between FBC disable and most plane - * updates. Bspec says this is only needed for plane disable, but - * that is not true. Touching most plane registers will cause the - * corruption to appear. Also SKL/derivatives do not seem to be - * affected. - * - * TODO: could optimize this a bit by sampling the frame - * counter when we disable FBC (if it was already done earlier) - * and skipping the extra vblank wait before the plane update - * if at least one frame has already passed. - */ - if (fbc->activated && - DISPLAY_VER(i915) >= 10) - need_vblank_wait = true; - fbc->activated = false; + if (!fbc || plane->pipe != crtc->pipe) + continue; + + mutex_lock(&fbc->lock); + + if (fbc->state.plane == plane) + need_vblank_wait |= __intel_fbc_pre_update(state, crtc, plane); + + mutex_unlock(&fbc->lock); } -unlock: - mutex_unlock(&fbc->lock); return need_vblank_wait; } @@ -1262,44 +1248,25 @@ unlock: static void __intel_fbc_disable(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; - struct intel_crtc *crtc = fbc->crtc; + struct intel_plane *plane = fbc->state.plane; drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock)); - drm_WARN_ON(&i915->drm, !fbc->crtc); drm_WARN_ON(&i915->drm, fbc->active); - drm_dbg_kms(&i915->drm, "Disabling FBC on pipe %c\n", - pipe_name(crtc->pipe)); + drm_dbg_kms(&i915->drm, "Disabling FBC on [PLANE:%d:%s]\n", + plane->base.base.id, plane->base.name); __intel_fbc_cleanup_cfb(fbc); - fbc->crtc = NULL; + fbc->state.plane = NULL; } -static void __intel_fbc_post_update(struct intel_crtc *crtc) +static void __intel_fbc_post_update(struct intel_fbc *fbc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &i915->fbc; + struct drm_i915_private *i915 = fbc->i915; drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock)); - if (fbc->crtc != crtc) - return; - - fbc->flip_pending = false; - - if (!i915->params.enable_fbc) { - intel_fbc_deactivate(fbc, "disabled at runtime per module param"); - __intel_fbc_disable(fbc); - - return; - } - - intel_fbc_get_reg_params(fbc, crtc); - - if (!intel_fbc_can_activate(crtc)) - return; - if (!fbc->busy_bits) intel_fbc_activate(fbc); else @@ -1309,23 +1276,31 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc) void intel_fbc_post_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct intel_plane *plane = to_intel_plane(crtc->base.primary); - const struct intel_plane_state *plane_state = - intel_atomic_get_new_plane_state(state, plane); - struct intel_fbc *fbc = plane->fbc; + const struct intel_plane_state *plane_state; + struct intel_plane *plane; + int i; - if (!fbc || !plane_state) - return; + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + struct intel_fbc *fbc = plane->fbc; - mutex_lock(&fbc->lock); - __intel_fbc_post_update(crtc); - mutex_unlock(&fbc->lock); + if (!fbc || plane->pipe != crtc->pipe) + continue; + + mutex_lock(&fbc->lock); + + if (fbc->state.plane == plane) { + fbc->flip_pending = false; + __intel_fbc_post_update(fbc); + } + + mutex_unlock(&fbc->lock); + } } static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) { - if (fbc->crtc) - return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit; + if (fbc->state.plane) + return fbc->state.plane->frontbuffer_bit; else return fbc->possible_framebuffer_bits; } @@ -1334,9 +1309,9 @@ void intel_fbc_invalidate(struct drm_i915_private *i915, unsigned int frontbuffer_bits, enum fb_op_origin origin) { - struct intel_fbc *fbc = &i915->fbc; + struct intel_fbc *fbc = i915->fbc; - if (!HAS_FBC(i915)) + if (!fbc) return; if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE) @@ -1346,7 +1321,7 @@ void intel_fbc_invalidate(struct drm_i915_private *i915, fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; - if (fbc->crtc && fbc->busy_bits) + if (fbc->state.plane && fbc->busy_bits) intel_fbc_deactivate(fbc, "frontbuffer write"); mutex_unlock(&fbc->lock); @@ -1355,9 +1330,9 @@ void intel_fbc_invalidate(struct drm_i915_private *i915, void intel_fbc_flush(struct drm_i915_private *i915, unsigned int frontbuffer_bits, enum fb_op_origin origin) { - struct intel_fbc *fbc = &i915->fbc; + struct intel_fbc *fbc = i915->fbc; - if (!HAS_FBC(i915)) + if (!fbc) return; mutex_lock(&fbc->lock); @@ -1367,144 +1342,85 @@ void intel_fbc_flush(struct drm_i915_private *i915, if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE) goto out; - if (!fbc->busy_bits && fbc->crtc && + if (!fbc->busy_bits && fbc->state.plane && (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { if (fbc->active) intel_fbc_nuke(fbc); else if (!fbc->flip_pending) - __intel_fbc_post_update(fbc->crtc); + __intel_fbc_post_update(fbc); } out: mutex_unlock(&fbc->lock); } -/** - * intel_fbc_choose_crtc - select a CRTC to enable FBC on - * @i915: i915 device instance - * @state: the atomic state structure - * - * This function looks at the proposed state for CRTCs and planes, then chooses - * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to - * true. - * - * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe - * enable FBC for the chosen CRTC. If it does, it will set i915->fbc.crtc. - */ -void intel_fbc_choose_crtc(struct drm_i915_private *i915, - struct intel_atomic_state *state) +int intel_fbc_atomic_check(struct intel_atomic_state *state) { - struct intel_fbc *fbc = &i915->fbc; - struct intel_plane *plane; struct intel_plane_state *plane_state; - bool crtc_chosen = false; + struct intel_plane *plane; int i; - mutex_lock(&fbc->lock); - - /* Does this atomic commit involve the CRTC currently tied to FBC? */ - if (fbc->crtc && - !intel_atomic_get_new_crtc_state(state, fbc->crtc)) - goto out; - - if (!intel_fbc_can_enable(fbc)) - goto out; - - /* Simply choose the first CRTC that is compatible and has a visible - * plane. We could go for fancier schemes such as checking the plane - * size, but this would just affect the few platforms that don't tie FBC - * to pipe or plane A. */ for_each_new_intel_plane_in_state(state, plane, plane_state, i) { - struct intel_crtc_state *crtc_state; - struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); - - if (plane->fbc != fbc) - continue; - - if (!plane_state->uapi.visible) - continue; + int ret; - crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - - crtc_state->enable_fbc = true; - crtc_chosen = true; - break; + ret = intel_fbc_check_plane(state, plane); + if (ret) + return ret; } - if (!crtc_chosen) - fbc->no_fbc_reason = "no suitable CRTC for FBC"; - -out: - mutex_unlock(&fbc->lock); + return 0; } -/** - * intel_fbc_enable: tries to enable FBC on the CRTC - * @crtc: the CRTC - * @state: corresponding &drm_crtc_state for @crtc - * - * This function checks if the given CRTC was chosen for FBC, then enables it if - * possible. Notice that it doesn't activate FBC. It is valid to call - * intel_fbc_enable multiple times for the same pipe without an - * intel_fbc_disable in the middle, as long as it is deactivated. - */ -static void intel_fbc_enable(struct intel_atomic_state *state, - struct intel_crtc *crtc) +static void __intel_fbc_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_plane *plane) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - struct intel_plane *plane = to_intel_plane(crtc->base.primary); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); + struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_plane_state *plane_state = intel_atomic_get_new_plane_state(state, plane); struct intel_fbc *fbc = plane->fbc; - struct intel_fbc_state_cache *cache; - int min_limit; - - if (!fbc || !plane_state) - return; - cache = &fbc->state_cache; - - min_limit = intel_fbc_min_limit(plane_state->hw.fb ? - plane_state->hw.fb->format->cpp[0] : 0); - - mutex_lock(&fbc->lock); + if (fbc->state.plane) { + if (fbc->state.plane != plane) + return; - if (fbc->crtc) { - if (fbc->crtc != crtc) - goto out; - - if (fbc->limit >= min_limit && - !intel_fbc_cfb_size_changed(fbc)) - goto out; + if (intel_fbc_is_ok(plane_state)) { + intel_fbc_update_state(state, crtc, plane); + return; + } __intel_fbc_disable(fbc); } drm_WARN_ON(&i915->drm, fbc->active); - intel_fbc_update_state_cache(crtc, crtc_state, plane_state); + fbc->no_fbc_reason = plane_state->no_fbc_reason; + if (fbc->no_fbc_reason) + return; - /* FIXME crtc_state->enable_fbc lies :( */ - if (!cache->plane.visible) - goto out; + if (!intel_fbc_is_fence_ok(plane_state)) { + fbc->no_fbc_reason = "framebuffer not fenced"; + return; + } + + if (fbc->underrun_detected) { + fbc->no_fbc_reason = "FIFO underrun"; + return; + } - if (intel_fbc_alloc_cfb(fbc, intel_fbc_cfb_size(fbc, cache), min_limit)) { - cache->plane.visible = false; + if (intel_fbc_alloc_cfb(fbc, intel_fbc_cfb_size(plane_state), + intel_fbc_min_limit(plane_state))) { fbc->no_fbc_reason = "not enough stolen memory"; - goto out; + return; } - drm_dbg_kms(&i915->drm, "Enabling FBC on pipe %c\n", - pipe_name(crtc->pipe)); + drm_dbg_kms(&i915->drm, "Enabling FBC on [PLANE:%d:%s]\n", + plane->base.base.id, plane->base.name); fbc->no_fbc_reason = "FBC enabled but not active yet\n"; - fbc->crtc = crtc; + intel_fbc_update_state(state, crtc, plane); intel_fbc_program_cfb(fbc); -out: - mutex_unlock(&fbc->lock); } /** @@ -1515,38 +1431,48 @@ out: */ void intel_fbc_disable(struct intel_crtc *crtc) { - struct intel_plane *plane = to_intel_plane(crtc->base.primary); - struct intel_fbc *fbc = plane->fbc; + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_plane *plane; - if (!fbc) - return; + for_each_intel_plane(&i915->drm, plane) { + struct intel_fbc *fbc = plane->fbc; - mutex_lock(&fbc->lock); - if (fbc->crtc == crtc) - __intel_fbc_disable(fbc); - mutex_unlock(&fbc->lock); + if (!fbc || plane->pipe != crtc->pipe) + continue; + + mutex_lock(&fbc->lock); + if (fbc->state.plane == plane) + __intel_fbc_disable(fbc); + mutex_unlock(&fbc->lock); + } } -/** - * intel_fbc_update: enable/disable FBC on the CRTC - * @state: atomic state - * @crtc: the CRTC - * - * This function checks if the given CRTC was chosen for FBC, then enables it if - * possible. Notice that it doesn't activate FBC. It is valid to call - * intel_fbc_update multiple times for the same pipe without an - * intel_fbc_disable in the middle. - */ void intel_fbc_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state; + struct intel_plane *plane; + int i; - if (crtc_state->update_pipe && !crtc_state->enable_fbc) - intel_fbc_disable(crtc); - else - intel_fbc_enable(state, crtc); + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + struct intel_fbc *fbc = plane->fbc; + + if (!fbc || plane->pipe != crtc->pipe) + continue; + + mutex_lock(&fbc->lock); + + if (crtc_state->update_pipe && plane_state->no_fbc_reason) { + if (fbc->state.plane == plane) + __intel_fbc_disable(fbc); + } else { + __intel_fbc_enable(state, crtc, plane); + } + + mutex_unlock(&fbc->lock); + } } /** @@ -1557,56 +1483,56 @@ void intel_fbc_update(struct intel_atomic_state *state, */ void intel_fbc_global_disable(struct drm_i915_private *i915) { - struct intel_fbc *fbc = &i915->fbc; + struct intel_fbc *fbc = i915->fbc; - if (!HAS_FBC(i915)) + if (!fbc) return; mutex_lock(&fbc->lock); - if (fbc->crtc) { - drm_WARN_ON(&i915->drm, fbc->crtc->active); + if (fbc->state.plane) __intel_fbc_disable(fbc); - } mutex_unlock(&fbc->lock); } static void intel_fbc_underrun_work_fn(struct work_struct *work) { - struct drm_i915_private *i915 = - container_of(work, struct drm_i915_private, fbc.underrun_work); - struct intel_fbc *fbc = &i915->fbc; + struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work); + struct drm_i915_private *i915 = fbc->i915; mutex_lock(&fbc->lock); /* Maybe we were scheduled twice. */ - if (fbc->underrun_detected || !fbc->crtc) + if (fbc->underrun_detected || !fbc->state.plane) goto out; drm_dbg_kms(&i915->drm, "Disabling FBC due to FIFO underrun.\n"); fbc->underrun_detected = true; intel_fbc_deactivate(fbc, "FIFO underrun"); + if (!fbc->flip_pending) + intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(i915, fbc->state.plane->pipe)); + __intel_fbc_disable(fbc); out: mutex_unlock(&fbc->lock); } /* * intel_fbc_reset_underrun - reset FBC fifo underrun status. - * @fbc: The FBC instance + * @i915: the i915 device * * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we * want to re-enable FBC after an underrun to increase test coverage. */ -int intel_fbc_reset_underrun(struct intel_fbc *fbc) +void intel_fbc_reset_underrun(struct drm_i915_private *i915) { - struct drm_i915_private *i915 = fbc->i915; - int ret; + struct intel_fbc *fbc = i915->fbc; + + if (!fbc) + return; cancel_work_sync(&fbc->underrun_work); - ret = mutex_lock_interruptible(&fbc->lock); - if (ret) - return ret; + mutex_lock(&fbc->lock); if (fbc->underrun_detected) { drm_dbg_kms(&i915->drm, @@ -1616,13 +1542,11 @@ int intel_fbc_reset_underrun(struct intel_fbc *fbc) fbc->underrun_detected = false; mutex_unlock(&fbc->lock); - - return 0; } /** * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun - * @fbc: The FBC instance + * @i915: i915 device * * Without FBC, most underruns are harmless and don't really cause too many * problems, except for an annoying message on dmesg. With FBC, underruns can @@ -1634,9 +1558,11 @@ int intel_fbc_reset_underrun(struct intel_fbc *fbc) * * This function is called from the IRQ handler. */ -void intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc) +void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915) { - if (!HAS_FBC(fbc->i915)) + struct intel_fbc *fbc = i915->fbc; + + if (!fbc) return; /* There's no guarantee that underrun_detected won't be set to true @@ -1687,6 +1613,43 @@ static bool need_fbc_vtd_wa(struct drm_i915_private *i915) return false; } +void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane) +{ + if (!fbc) + return; + + plane->fbc = fbc; + fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; +} + +static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915) +{ + struct intel_fbc *fbc; + + fbc = kzalloc(sizeof(*fbc), GFP_KERNEL); + if (!fbc) + return NULL; + + fbc->i915 = i915; + INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); + mutex_init(&fbc->lock); + + if (DISPLAY_VER(i915) >= 7) + fbc->funcs = &ivb_fbc_funcs; + else if (DISPLAY_VER(i915) == 6) + fbc->funcs = &snb_fbc_funcs; + else if (DISPLAY_VER(i915) == 5) + fbc->funcs = &ilk_fbc_funcs; + else if (IS_G4X(i915)) + fbc->funcs = &g4x_fbc_funcs; + else if (DISPLAY_VER(i915) == 4) + fbc->funcs = &i965_fbc_funcs; + else + fbc->funcs = &i8xx_fbc_funcs; + + return fbc; +} + /** * intel_fbc_init - Initialize FBC * @i915: the i915 device @@ -1695,12 +1658,7 @@ static bool need_fbc_vtd_wa(struct drm_i915_private *i915) */ void intel_fbc_init(struct drm_i915_private *i915) { - struct intel_fbc *fbc = &i915->fbc; - - fbc->i915 = i915; - INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); - mutex_init(&fbc->lock); - fbc->active = false; + struct intel_fbc *fbc; if (!drm_mm_initialized(&i915->mm.stolen)) mkwrite_device_info(i915)->display.has_fbc = false; @@ -1712,27 +1670,114 @@ void intel_fbc_init(struct drm_i915_private *i915) drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n", i915->params.enable_fbc); - if (!HAS_FBC(i915)) { - fbc->no_fbc_reason = "unsupported by this chipset"; + if (!HAS_FBC(i915)) return; - } - if (DISPLAY_VER(i915) >= 7) - fbc->funcs = &ivb_fbc_funcs; - else if (DISPLAY_VER(i915) == 6) - fbc->funcs = &snb_fbc_funcs; - else if (DISPLAY_VER(i915) == 5) - fbc->funcs = &ilk_fbc_funcs; - else if (IS_G4X(i915)) - fbc->funcs = &g4x_fbc_funcs; - else if (DISPLAY_VER(i915) == 4) - fbc->funcs = &i965_fbc_funcs; - else - fbc->funcs = &i8xx_fbc_funcs; + fbc = intel_fbc_create(i915); + if (!fbc) + return; /* We still don't have any sort of hardware state readout for FBC, so * deactivate it in case the BIOS activated it to make sure software * matches the hardware state. */ if (intel_fbc_hw_is_active(fbc)) intel_fbc_hw_deactivate(fbc); + + i915->fbc = fbc; +} + +static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused) +{ + struct intel_fbc *fbc = m->private; + struct drm_i915_private *i915 = fbc->i915; + struct intel_plane *plane; + intel_wakeref_t wakeref; + + drm_modeset_lock_all(&i915->drm); + + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + mutex_lock(&fbc->lock); + + if (fbc->active) { + seq_puts(m, "FBC enabled\n"); + seq_printf(m, "Compressing: %s\n", + yesno(intel_fbc_is_compressing(fbc))); + } else { + seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); + } + + for_each_intel_plane(&i915->drm, plane) { + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + + if (plane->fbc != fbc) + continue; + + seq_printf(m, "%c [PLANE:%d:%s]: %s\n", + fbc->state.plane == plane ? '*' : ' ', + plane->base.base.id, plane->base.name, + plane_state->no_fbc_reason ?: "FBC possible"); + } + + mutex_unlock(&fbc->lock); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + + drm_modeset_unlock_all(&i915->drm); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(intel_fbc_debugfs_status); + +static int intel_fbc_debugfs_false_color_get(void *data, u64 *val) +{ + struct intel_fbc *fbc = data; + + *val = fbc->false_color; + + return 0; +} + +static int intel_fbc_debugfs_false_color_set(void *data, u64 val) +{ + struct intel_fbc *fbc = data; + + mutex_lock(&fbc->lock); + + fbc->false_color = val; + + if (fbc->active) + fbc->funcs->set_false_color(fbc, fbc->false_color); + + mutex_unlock(&fbc->lock); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(intel_fbc_debugfs_false_color_fops, + intel_fbc_debugfs_false_color_get, + intel_fbc_debugfs_false_color_set, + "%llu\n"); + +static void intel_fbc_debugfs_add(struct intel_fbc *fbc) +{ + struct drm_i915_private *i915 = fbc->i915; + struct drm_minor *minor = i915->drm.primary; + + debugfs_create_file("i915_fbc_status", 0444, + minor->debugfs_root, fbc, + &intel_fbc_debugfs_status_fops); + + if (fbc->funcs->set_false_color) + debugfs_create_file("i915_fbc_false_color", 0644, + minor->debugfs_root, fbc, + &intel_fbc_debugfs_false_color_fops); +} + +void intel_fbc_debugfs_register(struct drm_i915_private *i915) +{ + struct intel_fbc *fbc = i915->fbc; + + if (fbc) + intel_fbc_debugfs_add(fbc); } diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h index ce48a22c5e9e..07ad0411fcc3 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.h +++ b/drivers/gpu/drm/i915/display/intel_fbc.h @@ -8,19 +8,16 @@ #include <linux/types.h> -#include "intel_frontbuffer.h" - +enum fb_op_origin; struct drm_i915_private; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; struct intel_fbc; +struct intel_plane; struct intel_plane_state; -void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, - struct intel_atomic_state *state); -bool intel_fbc_is_active(struct intel_fbc *fbc); -bool intel_fbc_is_compressing(struct intel_fbc *fbc); +int intel_fbc_atomic_check(struct intel_atomic_state *state); bool intel_fbc_pre_update(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_fbc_post_update(struct intel_atomic_state *state, @@ -36,8 +33,9 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv, enum fb_op_origin origin); void intel_fbc_flush(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits, enum fb_op_origin origin); -void intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc); -int intel_fbc_reset_underrun(struct intel_fbc *fbc); -int intel_fbc_set_false_color(struct intel_fbc *fbc, bool enable); +void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane); +void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915); +void intel_fbc_reset_underrun(struct drm_i915_private *i915); +void intel_fbc_debugfs_register(struct drm_i915_private *i915); #endif /* __INTEL_FBC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c index 2b5f80f3b4e0..3d6e22923601 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.c +++ b/drivers/gpu/drm/i915/display/intel_fdi.c @@ -4,6 +4,7 @@ */ #include "intel_atomic.h" +#include "intel_crtc.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_types.h" @@ -157,7 +158,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, if (pipe_config->fdi_lanes <= 2) return 0; - other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C); + other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C); other_crtc_state = intel_atomic_get_crtc_state(state, other_crtc); if (IS_ERR(other_crtc_state)) @@ -178,7 +179,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, return -EINVAL; } - other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B); + other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B); other_crtc_state = intel_atomic_get_crtc_state(state, other_crtc); if (IS_ERR(other_crtc_state)) diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c index 28d9eeb7b4f3..d636d21fa9ce 100644 --- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c @@ -26,8 +26,8 @@ */ #include "i915_drv.h" -#include "i915_trace.h" #include "intel_de.h" +#include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_fbc.h" #include "intel_fifo_underrun.h" @@ -61,7 +61,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev) lockdep_assert_held(&dev_priv->irq_lock); for_each_pipe(dev_priv, pipe) { - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(dev_priv, pipe); if (crtc->cpu_fifo_underrun_disabled) return false; @@ -79,7 +79,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev) lockdep_assert_held(&dev_priv->irq_lock); for_each_pipe(dev_priv, pipe) { - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(dev_priv, pipe); if (crtc->pch_fifo_underrun_disabled) return false; @@ -279,7 +279,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); bool old; lockdep_assert_held(&dev_priv->irq_lock); @@ -348,7 +348,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, bool enable) { struct intel_crtc *crtc = - intel_get_crtc_for_pipe(dev_priv, pch_transcoder); + intel_crtc_for_pipe(dev_priv, pch_transcoder); unsigned long flags; bool old; @@ -391,7 +391,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); u32 underruns = 0; /* We may be called too early in init, thanks BIOS! */ @@ -434,7 +434,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe)); } - intel_fbc_handle_fifo_underrun_irq(&dev_priv->fbc); + intel_fbc_handle_fifo_underrun_irq(dev_priv); } /** diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 0492446cd04a..791248f812aa 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -55,14 +55,13 @@ * cancelled as soon as busyness is detected. */ -#include "display/intel_dp.h" - #include "i915_drv.h" -#include "i915_trace.h" +#include "intel_display_trace.h" #include "intel_display_types.h" +#include "intel_dp.h" +#include "intel_drrs.h" #include "intel_fbc.h" #include "intel_frontbuffer.h" -#include "intel_drrs.h" #include "intel_psr.h" /** diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h index a88441edc8f9..ff0c37b079aa 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h @@ -28,7 +28,7 @@ #include <linux/kref.h> #include "gem/i915_gem_object_types.h" -#include "i915_active.h" +#include "i915_active_types.h" struct drm_i915_private; diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 7e3f5c6ca484..1a376e9a1ff3 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -1382,7 +1382,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) if (!HAS_OVERLAY(dev_priv)) return; - engine = dev_priv->gt.engine[RCS0]; + engine = to_gt(dev_priv)->engine[RCS0]; if (!engine || !engine->kernel_context) return; diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c index dcd698a02da2..01ce1d72297f 100644 --- a/drivers/gpu/drm/i915/display/intel_plane_initial.c +++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c @@ -3,11 +3,12 @@ * Copyright © 2021 Intel Corporation */ -#include "intel_display_types.h" -#include "intel_plane_initial.h" +#include "i915_drv.h" #include "intel_atomic_plane.h" #include "intel_display.h" +#include "intel_display_types.h" #include "intel_fb.h" +#include "intel_plane_initial.h" static bool intel_reuse_initial_plane_obj(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index 3d9c0e13c329..f6526d9ccfdc 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -6,18 +6,19 @@ #ifndef __INTEL_PSR_H__ #define __INTEL_PSR_H__ -#include "intel_frontbuffer.h" +#include <linux/types.h> +enum fb_op_origin; struct drm_connector; struct drm_connector_state; struct drm_i915_private; +struct intel_atomic_state; +struct intel_crtc; struct intel_crtc_state; struct intel_dp; -struct intel_crtc; -struct intel_atomic_state; -struct intel_plane_state; -struct intel_plane; struct intel_encoder; +struct intel_plane; +struct intel_plane_state; void intel_psr_init_dpcd(struct intel_dp *intel_dp); void intel_psr_pre_plane_update(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c index 8a52b7a16774..c8488f5ebd04 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.c +++ b/drivers/gpu/drm/i915/display/intel_quirks.c @@ -5,6 +5,7 @@ #include <linux/dmi.h> +#include "i915_drv.h" #include "intel_display_types.h" #include "intel_quirks.h" diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 2dc6c3742ba2..76e1188b01d4 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -1842,7 +1842,7 @@ static void intel_enable_sdvo(struct intel_atomic_state *state, intel_sdvo_write_sdvox(intel_sdvo, temp); for (i = 0; i < 2; i++) - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); /* diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index c2251218a39e..09f405e4d363 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -186,6 +186,7 @@ static const struct intel_mpllb_state dg2_dp_uhbr10_100 = { REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) | @@ -369,6 +370,7 @@ static const struct intel_mpllb_state dg2_dp_uhbr10_38_4 = { REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL, 1) | REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2), .mpllb_div2 = REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 1b99a9501a45..2357a1301f48 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -40,15 +40,15 @@ #include <drm/drm_rect.h> #include "i915_drv.h" -#include "i915_trace.h" #include "i915_vgpu.h" +#include "i9xx_plane.h" #include "intel_atomic_plane.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_frontbuffer.h" #include "intel_sprite.h" -#include "i9xx_plane.h" #include "intel_vrr.h" int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) @@ -431,10 +431,6 @@ vlv_sprite_update_noarm(struct intel_plane *plane, u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); unsigned long irqflags; - /* Sizes are 0 based */ - crtc_w--; - crtc_h--; - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id), @@ -442,7 +438,7 @@ vlv_sprite_update_noarm(struct intel_plane *plane, intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x); intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id), - (crtc_h << 16) | crtc_w); + ((crtc_h - 1) << 16) | (crtc_w - 1)); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -866,21 +862,15 @@ ivb_sprite_update_noarm(struct intel_plane *plane, u32 sprscale = 0; unsigned long irqflags; - /* Sizes are 0 based */ - src_w--; - src_h--; - crtc_w--; - crtc_h--; - if (crtc_w != src_w || crtc_h != src_h) - sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; + sprscale = SPRITE_SCALE_ENABLE | ((src_w - 1) << 16) | (src_h - 1); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, SPRSTRIDE(pipe), plane_state->view.color_plane[0].mapping_stride); intel_de_write_fw(dev_priv, SPRPOS(pipe), (crtc_y << 16) | crtc_x); - intel_de_write_fw(dev_priv, SPRSIZE(pipe), (crtc_h << 16) | crtc_w); + intel_de_write_fw(dev_priv, SPRSIZE(pipe), ((crtc_h - 1) << 16) | (crtc_w - 1)); if (IS_IVYBRIDGE(dev_priv)) intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale); @@ -1208,21 +1198,15 @@ g4x_sprite_update_noarm(struct intel_plane *plane, u32 dvsscale = 0; unsigned long irqflags; - /* Sizes are 0 based */ - src_w--; - src_h--; - crtc_w--; - crtc_h--; - if (crtc_w != src_w || crtc_h != src_h) - dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; + dvsscale = DVS_SCALE_ENABLE | ((src_w - 1) << 16) | (src_h - 1); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, DVSSTRIDE(pipe), plane_state->view.color_plane[0].mapping_stride); intel_de_write_fw(dev_priv, DVSPOS(pipe), (crtc_y << 16) | crtc_x); - intel_de_write_fw(dev_priv, DVSSIZE(pipe), (crtc_h << 16) | crtc_w); + intel_de_write_fw(dev_priv, DVSSIZE(pipe), ((crtc_h - 1) << 16) | (crtc_w - 1)); intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); @@ -1584,8 +1568,8 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, */ if (!ret && has_dst_key_in_primary_plane(dev_priv)) { struct intel_crtc *crtc = - intel_get_crtc_for_pipe(dev_priv, - to_intel_plane(plane)->pipe); + intel_crtc_for_pipe(dev_priv, + to_intel_plane(plane)->pipe); plane_state = drm_atomic_get_plane_state(state, crtc->base.primary); diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index 88a398df9621..8a39989b87ad 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -36,6 +36,7 @@ #include "i915_drv.h" #include "intel_connector.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_hotplug.h" @@ -924,8 +925,7 @@ intel_enable_tv(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(dev); /* Prevents vblank waits from timing out in intel_tv_detect_type() */ - intel_wait_for_vblank(dev_priv, - to_intel_crtc(pipe_config->uapi.crtc)->pipe); + intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc)); intel_de_write(dev_priv, TV_CTL, intel_de_read(dev_priv, TV_CTL) | TV_ENC_ENABLE); @@ -1618,7 +1618,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, intel_de_write(dev_priv, TV_DAC, tv_dac); intel_de_posting_read(dev_priv, TV_DAC); - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); type = -1; tv_dac = intel_de_read(dev_priv, TV_DAC); @@ -1651,7 +1651,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, intel_de_posting_read(dev_priv, TV_CTL); /* For unknown reasons the hw barfs if we don't do this vblank wait. */ - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); /* Restore interrupt config */ if (connector->polled & DRM_CONNECTOR_POLL_HPD) { diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index a2108a8f544d..f043d85ba64d 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -330,7 +330,12 @@ enum vbt_gmbus_ddi { ADLS_DDC_BUS_PORT_TC1 = 0x2, ADLS_DDC_BUS_PORT_TC2, ADLS_DDC_BUS_PORT_TC3, - ADLS_DDC_BUS_PORT_TC4 + ADLS_DDC_BUS_PORT_TC4, + ADLP_DDC_BUS_PORT_TC1 = 0x3, + ADLP_DDC_BUS_PORT_TC2, + ADLP_DDC_BUS_PORT_TC3, + ADLP_DDC_BUS_PORT_TC4 + }; #define DP_AUX_A 0x40 diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index bf8d3c7ca2d9..9b05f93ed8bc 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -6,12 +6,14 @@ * Manasi Navare <manasi.d.navare@intel.com> */ #include <linux/limits.h> + #include "i915_drv.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dsi.h" -#include "intel_vdsc.h" #include "intel_qp_tables.h" +#include "intel_vdsc.h" enum ROW_INDEX_BPP { ROW_INDEX_6BPP = 0, @@ -1110,25 +1112,16 @@ static i915_reg_t dss_ctl2_reg(struct intel_crtc *crtc, enum transcoder cpu_tran ICL_PIPE_DSS_CTL2(crtc->pipe) : DSS_CTL2; } -static struct intel_crtc * -_get_crtc_for_pipe(struct drm_i915_private *i915, enum pipe pipe) -{ - if (!intel_pipe_valid(i915, pipe)) - return NULL; - - return intel_get_crtc_for_pipe(i915, pipe); -} - struct intel_crtc * intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc) { - return _get_crtc_for_pipe(to_i915(primary_crtc->base.dev), primary_crtc->pipe + 1); + return intel_crtc_for_pipe(to_i915(primary_crtc->base.dev), primary_crtc->pipe + 1); } static struct intel_crtc * intel_dsc_get_bigjoiner_primary(const struct intel_crtc *secondary_crtc) { - return _get_crtc_for_pipe(to_i915(secondary_crtc->base.dev), secondary_crtc->pipe - 1); + return intel_crtc_for_pipe(to_i915(secondary_crtc->base.dev), secondary_crtc->pipe - 1); } void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state) diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 28890876bdeb..93a385396512 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -13,6 +13,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_fb.h" +#include "intel_fbc.h" #include "intel_pm.h" #include "intel_psr.h" #include "intel_sprite.h" @@ -420,9 +421,19 @@ static int icl_plane_min_width(const struct drm_framebuffer *fb, } } -static int icl_plane_max_width(const struct drm_framebuffer *fb, - int color_plane, - unsigned int rotation) +static int icl_hdr_plane_max_width(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) + return 4096; + else + return 5120; +} + +static int icl_sdr_plane_max_width(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) { return 5120; } @@ -672,13 +683,13 @@ static u32 skl_plane_ctl_format(u32 pixel_format) case DRM_FORMAT_XYUV8888: return PLANE_CTL_FORMAT_XYUV; case DRM_FORMAT_YUYV: - return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; + return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_YUYV; case DRM_FORMAT_YVYU: - return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; + return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_YVYU; case DRM_FORMAT_UYVY: - return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; + return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_UYVY; case DRM_FORMAT_VYUY: - return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; + return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_VYUY; case DRM_FORMAT_NV12: return PLANE_CTL_FORMAT_NV12; case DRM_FORMAT_P010: @@ -1022,10 +1033,6 @@ skl_program_plane_noarm(struct intel_plane *plane, u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; unsigned long irqflags; - /* Sizes are 0 based */ - src_w--; - src_h--; - /* The scaler will handle the output position */ if (plane_state->scaler_id >= 0) { crtc_x = 0; @@ -1045,7 +1052,14 @@ skl_program_plane_noarm(struct intel_plane *plane, intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x); intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id), - (src_h << 16) | src_w); + ((src_h - 1) << 16) | (src_w - 1)); + + if (intel_fb_is_rc_ccs_cc_modifier(fb->modifier)) { + intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 0), + lower_32_bits(plane_state->ccval)); + intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 1), + upper_32_bits(plane_state->ccval)); + } if (icl_is_hdr_plane(dev_priv, plane_id)) intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), @@ -1054,10 +1068,6 @@ skl_program_plane_noarm(struct intel_plane *plane, if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id)) icl_program_input_csc(plane, crtc_state, plane_state); - if (intel_fb_is_rc_ccs_cc_modifier(fb->modifier)) - intel_uncore_write64_fw(&dev_priv->uncore, - PLANE_CC_VAL(pipe, plane_id), plane_state->ccval); - skl_write_plane_wm(plane, crtc_state); intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane); @@ -1727,7 +1737,7 @@ static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - return intel_pxp_key_check(&i915->gt.pxp, obj, false) == 0; + return intel_pxp_key_check(&to_gt(i915)->pxp, obj, false) == 0; } static bool pxp_is_borked(struct drm_i915_gem_object *obj) @@ -1815,6 +1825,15 @@ static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, return pipe == PIPE_A && plane_id == PLANE_PRIMARY; } +static struct intel_fbc *skl_plane_fbc(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id) +{ + if (skl_plane_has_fbc(dev_priv, pipe, plane_id)) + return dev_priv->fbc; + else + return NULL; +} + static bool skl_plane_has_planar(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { @@ -2101,14 +2120,14 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, plane->id = plane_id; plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id); - if (skl_plane_has_fbc(dev_priv, pipe, plane_id)) - plane->fbc = &dev_priv->fbc; - if (plane->fbc) - plane->fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; + intel_fbc_add_plane(skl_plane_fbc(dev_priv, pipe, plane_id), plane); if (DISPLAY_VER(dev_priv) >= 11) { plane->min_width = icl_plane_min_width; - plane->max_width = icl_plane_max_width; + if (icl_is_hdr_plane(dev_priv, plane_id)) + plane->max_width = icl_hdr_plane_max_width; + else + plane->max_width = icl_sdr_plane_max_width; plane->max_height = icl_plane_max_height; plane->min_cdclk = icl_plane_min_cdclk; } else if (DISPLAY_VER(dev_priv) >= 10) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 347dab952e90..00327b750fbb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -237,7 +237,7 @@ static int proto_context_set_persistence(struct drm_i915_private *i915, * colateral damage, and we should not pretend we can by * exposing the interface. */ - if (!intel_has_reset_engine(&i915->gt)) + if (!intel_has_reset_engine(to_gt(i915))) return -ENODEV; pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE); @@ -254,7 +254,7 @@ static int proto_context_set_protected(struct drm_i915_private *i915, if (!protected) { pc->uses_protected_content = false; - } else if (!intel_pxp_is_enabled(&i915->gt.pxp)) { + } else if (!intel_pxp_is_enabled(&to_gt(i915)->pxp)) { ret = -ENODEV; } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) || !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) { @@ -268,8 +268,8 @@ static int proto_context_set_protected(struct drm_i915_private *i915, */ pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); - if (!intel_pxp_is_active(&i915->gt.pxp)) - ret = intel_pxp_start(&i915->gt.pxp); + if (!intel_pxp_is_active(&to_gt(i915)->pxp)) + ret = intel_pxp_start(&to_gt(i915)->pxp); } return ret; @@ -564,6 +564,7 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base, container_of_user(base, typeof(*ext), base); const struct set_proto_ctx_engines *set = data; struct drm_i915_private *i915 = set->i915; + struct i915_engine_class_instance prev_engine; u64 flags; int err = 0, n, i, j; u16 slot, width, num_siblings; @@ -571,7 +572,7 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base, intel_engine_mask_t prev_mask; /* FIXME: This is NIY for execlists */ - if (!(intel_uc_uses_guc_submission(&i915->gt.uc))) + if (!(intel_uc_uses_guc_submission(&to_gt(i915)->uc))) return -ENODEV; if (get_user(slot, &ext->engine_index)) @@ -629,7 +630,6 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base, /* Create contexts / engines */ for (i = 0; i < width; ++i) { intel_engine_mask_t current_mask = 0; - struct i915_engine_class_instance prev_engine; for (j = 0; j < num_siblings; ++j) { struct i915_engine_class_instance ci; @@ -833,7 +833,7 @@ static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv, sseu = &pc->legacy_rcs_sseu; } - ret = i915_gem_user_to_context_sseu(&i915->gt, &user_sseu, sseu); + ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu); if (ret) return ret; @@ -1044,7 +1044,7 @@ static struct i915_gem_engines *alloc_engines(unsigned int count) static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx, struct intel_sseu rcs_sseu) { - const struct intel_gt *gt = &ctx->i915->gt; + const struct intel_gt *gt = to_gt(ctx->i915); struct intel_engine_cs *engine; struct i915_gem_engines *e, *err; enum intel_engine_id id; @@ -1521,7 +1521,7 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state) * colateral damage, and we should not pretend we can by * exposing the interface. */ - if (!intel_has_reset_engine(&ctx->i915->gt)) + if (!intel_has_reset_engine(to_gt(ctx->i915))) return -ENODEV; i915_gem_context_clear_persistence(ctx); @@ -1559,7 +1559,7 @@ i915_gem_create_context(struct drm_i915_private *i915, } else if (HAS_FULL_PPGTT(i915)) { struct i915_ppgtt *ppgtt; - ppgtt = i915_ppgtt_create(&i915->gt, 0); + ppgtt = i915_ppgtt_create(to_gt(i915), 0); if (IS_ERR(ppgtt)) { drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n", PTR_ERR(ppgtt)); @@ -1742,7 +1742,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, if (args->flags) return -EINVAL; - ppgtt = i915_ppgtt_create(&i915->gt, 0); + ppgtt = i915_ppgtt_create(to_gt(i915), 0); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); @@ -2194,7 +2194,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) return -EINVAL; - ret = intel_gt_terminally_wedged(&i915->gt); + ret = intel_gt_terminally_wedged(to_gt(i915)); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c index 8955d6abcef1..9402d4bf4ffc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_create.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c @@ -379,7 +379,7 @@ static int ext_set_protected(struct i915_user_extension __user *base, void *data if (ext.flags) return -EINVAL; - if (!intel_pxp_is_enabled(&ext_data->i915->gt.pxp)) + if (!intel_pxp_is_enabled(&to_gt(ext_data->i915)->pxp)) return -ENODEV; ext_data->flags |= I915_BO_PROTECTED; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 60ee60f7bb09..3a5b247be738 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -1098,6 +1098,47 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache) return &i915->ggtt; } +static void reloc_cache_unmap(struct reloc_cache *cache) +{ + void *vaddr; + + if (!cache->vaddr) + return; + + vaddr = unmask_page(cache->vaddr); + if (cache->vaddr & KMAP) + kunmap_atomic(vaddr); + else + io_mapping_unmap_atomic((void __iomem *)vaddr); +} + +static void reloc_cache_remap(struct reloc_cache *cache, + struct drm_i915_gem_object *obj) +{ + void *vaddr; + + if (!cache->vaddr) + return; + + if (cache->vaddr & KMAP) { + struct page *page = i915_gem_object_get_page(obj, cache->page); + + vaddr = kmap_atomic(page); + cache->vaddr = unmask_flags(cache->vaddr) | + (unsigned long)vaddr; + } else { + struct i915_ggtt *ggtt = cache_to_ggtt(cache); + unsigned long offset; + + offset = cache->node.start; + if (!drm_mm_node_allocated(&cache->node)) + offset += cache->page << PAGE_SHIFT; + + cache->vaddr = (unsigned long) + io_mapping_map_atomic_wc(&ggtt->iomap, offset); + } +} + static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer *eb) { void *vaddr; @@ -1362,10 +1403,17 @@ eb_relocate_entry(struct i915_execbuffer *eb, * batchbuffers. */ if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && - GRAPHICS_VER(eb->i915) == 6) { + GRAPHICS_VER(eb->i915) == 6 && + !i915_vma_is_bound(target->vma, I915_VMA_GLOBAL_BIND)) { + struct i915_vma *vma = target->vma; + + reloc_cache_unmap(&eb->reloc_cache); + mutex_lock(&vma->vm->mutex); err = i915_vma_bind(target->vma, target->vma->obj->cache_level, PIN_GLOBAL, NULL); + mutex_unlock(&vma->vm->mutex); + reloc_cache_remap(&eb->reloc_cache, ev->vma->obj); if (err) return err; } @@ -2361,9 +2409,9 @@ static int eb_submit(struct i915_execbuffer *eb) return err; } -static int num_vcs_engines(const struct drm_i915_private *i915) +static int num_vcs_engines(struct drm_i915_private *i915) { - return hweight_long(VDBOX_MASK(&i915->gt)); + return hweight_long(VDBOX_MASK(to_gt(i915))); } /* @@ -3102,7 +3150,7 @@ eb_composite_fence_create(struct i915_execbuffer *eb, int out_fence_fd) fence_array = dma_fence_array_create(eb->num_batches, fences, eb->context->parallel.fence_context, - eb->context->parallel.seqno, + eb->context->parallel.seqno++, false); if (!fence_array) { kfree(fences); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 65fc6ff5f59d..1478c02a82cb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -17,6 +17,7 @@ #include "i915_gem_ioctls.h" #include "i915_gem_object.h" #include "i915_gem_mman.h" +#include "i915_mm.h" #include "i915_trace.h" #include "i915_user_extensions.h" #include "i915_gem_ttm.h" @@ -72,7 +73,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, if (args->flags & ~(I915_MMAP_WC)) return -EINVAL; - if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT)) + if (args->flags & I915_MMAP_WC && !pat_enabled()) return -ENODEV; obj = i915_gem_object_lookup(file, args->handle); @@ -537,6 +538,9 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) { struct i915_mmap_offset *mmo, *mn; + if (obj->ops->unmap_virtual) + obj->ops->unmap_virtual(obj); + spin_lock(&obj->mmo.lock); rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) { @@ -645,7 +649,7 @@ mmap_offset_attach(struct drm_i915_gem_object *obj, goto insert; /* Attempt to reap some mmap space from dead objects */ - err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT, + err = intel_gt_retire_requests_timeout(to_gt(i915), MAX_SCHEDULE_TIMEOUT, NULL); if (err) goto err; @@ -736,7 +740,7 @@ i915_gem_dumb_mmap_offset(struct drm_file *file, if (HAS_LMEM(to_i915(dev))) mmap_type = I915_MMAP_TYPE_FIXED; - else if (boot_cpu_has(X86_FEATURE_PAT)) + else if (pat_enabled()) mmap_type = I915_MMAP_TYPE_WC; else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt)) return -ENODEV; @@ -792,7 +796,7 @@ i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data, break; case I915_MMAP_OFFSET_WC: - if (!boot_cpu_has(X86_FEATURE_PAT)) + if (!pat_enabled()) return -ENODEV; type = I915_MMAP_TYPE_WC; break; @@ -802,7 +806,7 @@ i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data, break; case I915_MMAP_OFFSET_UC: - if (!boot_cpu_has(X86_FEATURE_PAT)) + if (!pat_enabled()) return -ENODEV; type = I915_MMAP_TYPE_UC; break; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 5fac9b560b73..d87b508b59b1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -262,6 +262,8 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj) */ void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj) { + assert_object_held(obj); + if (!list_empty(&obj->vma.list)) { struct i915_vma *vma; @@ -328,7 +330,16 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, obj->ops->delayed_free(obj); continue; } + + if (!i915_gem_object_trylock(obj, NULL)) { + /* busy, toss it back to the pile */ + if (llist_add(&obj->freed, &i915->mm.free_list)) + queue_delayed_work(i915->wq, &i915->mm.free_work, msecs_to_jiffies(10)); + continue; + } + __i915_gem_object_pages_fini(obj); + i915_gem_object_unlock(obj); __i915_gem_free_object(obj); /* But keep the pointer alive for RCU-protected lookups */ @@ -348,7 +359,7 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915) static void __i915_gem_free_work(struct work_struct *work) { struct drm_i915_private *i915 = - container_of(work, struct drm_i915_private, mm.free_work); + container_of(work, struct drm_i915_private, mm.free_work.work); i915_gem_flush_free_objects(i915); } @@ -380,7 +391,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj) */ if (llist_add(&obj->freed, &i915->mm.free_list)) - queue_work(i915->wq, &i915->mm.free_work); + queue_delayed_work(i915->wq, &i915->mm.free_work, 0); } void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, @@ -705,7 +716,7 @@ bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj, void i915_gem_init__objects(struct drm_i915_private *i915) { - INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); + INIT_DELAYED_WORK(&i915->mm.free_work, __i915_gem_free_work); } void i915_objects_module_exit(void) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 66f20b803b01..f66d46882ea7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -210,9 +210,13 @@ static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object return __i915_gem_object_lock(obj, ww, true); } -static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj) +static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww) { - return dma_resv_trylock(obj->base.resv); + if (!ww) + return dma_resv_trylock(obj->base.resv); + else + return ww_mutex_trylock(&obj->base.resv->lock, &ww->ctx); } static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index f9f7e44099fe..0dd107dcecc2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -67,6 +67,7 @@ struct drm_i915_gem_object_ops { int (*pwrite)(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pwrite *arg); u64 (*mmap_offset)(struct drm_i915_gem_object *obj); + void (*unmap_virtual)(struct drm_i915_gem_object *obj); int (*dmabuf_export)(struct drm_i915_gem_object *obj); @@ -310,6 +311,7 @@ struct drm_i915_gem_object { #define I915_BO_READONLY BIT(6) #define I915_TILING_QUIRK_BIT 7 /* unknown swizzling; do not release! */ #define I915_BO_PROTECTED BIT(8) +#define I915_BO_WAS_BOUND_BIT 9 /** * @mem_flags - Mutable placement-related flags * diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 49c6e55c68ce..a50f884973bc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -10,6 +10,8 @@ #include "i915_gem_lmem.h" #include "i915_gem_mman.h" +#include "gt/intel_gt.h" + void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, struct sg_table *pages, unsigned int sg_page_sizes) @@ -161,7 +163,6 @@ retry: /* Immediately discard the backing storage */ int i915_gem_object_truncate(struct drm_i915_gem_object *obj) { - drm_gem_free_mmap_offset(&obj->base); if (obj->ops->truncate) return obj->ops->truncate(obj); @@ -222,6 +223,14 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) __i915_gem_object_reset_page_iter(obj); obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; + if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); + intel_wakeref_t wakeref; + + with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref) + intel_gt_invalidate_tlbs(to_gt(i915)); + } + return pages; } @@ -424,8 +433,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, goto err_unpin; } - if (GEM_WARN_ON(type == I915_MAP_WC && - !static_cpu_has(X86_FEATURE_PAT))) + if (GEM_WARN_ON(type == I915_MAP_WC && !pat_enabled())) ptr = ERR_PTR(-ENODEV); else if (i915_gem_object_has_struct_page(obj)) ptr = i915_gem_object_map_page(obj, type); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 7986612f48fa..ca6faffcc496 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -19,6 +19,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) { struct address_space *mapping = obj->base.filp->f_mapping; + struct drm_i915_private *i915 = to_i915(obj->base.dev); struct scatterlist *sg; struct sg_table *st; dma_addr_t dma; @@ -73,7 +74,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) dst += PAGE_SIZE; } - intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); + intel_gt_chipset_flush(to_gt(i915)); /* We're no longer struct page backed */ obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE; @@ -140,6 +141,7 @@ int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj, { void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; char __user *user_data = u64_to_user_ptr(args->data_ptr); + struct drm_i915_private *i915 = to_i915(obj->base.dev); int err; err = i915_gem_object_wait(obj, @@ -159,7 +161,7 @@ int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj, return -EFAULT; drm_clflush_virt_range(vaddr, args->size); - intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); + intel_gt_chipset_flush(to_gt(i915)); i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); return 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 726b40e1fbb0..ac56124760e1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -35,7 +35,7 @@ void i915_gem_suspend(struct drm_i915_private *i915) * state. Fortunately, the kernel_context is disposable and we do * not rely on its state. */ - intel_gt_suspend_prepare(&i915->gt); + intel_gt_suspend_prepare(to_gt(i915)); i915_gem_drain_freed_objects(i915); } @@ -153,7 +153,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) * machine in an unusable condition. */ - intel_gt_suspend_late(&i915->gt); + intel_gt_suspend_late(to_gt(i915)); spin_lock_irqsave(&i915->mm.obj_lock, flags); for (phase = phases; *phase; phase++) { @@ -223,7 +223,7 @@ void i915_gem_resume(struct drm_i915_private *i915) * guarantee that the context image is complete. So let's just reset * it and start again. */ - intel_gt_resume(&i915->gt); + intel_gt_resume(to_gt(i915)); ret = lmem_restore(i915, I915_TTM_BACKUP_ALLOW_GPU); GEM_WARN_ON(ret); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 157a9765f483..cc927e49d21f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -36,8 +36,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) return swap_available() || obj->mm.madv == I915_MADV_DONTNEED; } -static bool unsafe_drop_pages(struct drm_i915_gem_object *obj, - unsigned long shrink, bool trylock_vm) +static int drop_pages(struct drm_i915_gem_object *obj, + unsigned long shrink, bool trylock_vm) { unsigned long flags; @@ -153,7 +153,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, */ if (shrink & I915_SHRINK_ACTIVE) /* Retire requests to unpin all idle contexts */ - intel_gt_retire_requests(&i915->gt); + intel_gt_retire_requests(to_gt(i915)); /* * As we may completely rewrite the (un)bound list whilst unbinding @@ -214,26 +214,24 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, spin_unlock_irqrestore(&i915->mm.obj_lock, flags); - err = 0; - if (unsafe_drop_pages(obj, shrink, trylock_vm)) { - /* May arrive from get_pages on another bo */ - if (!ww) { - if (!i915_gem_object_trylock(obj)) - goto skip; - } else { - err = i915_gem_object_lock(obj, ww); - if (err) - goto skip; - } - - if (!__i915_gem_object_put_pages(obj)) { - if (!try_to_writeback(obj, shrink)) - count += obj->base.size >> PAGE_SHIFT; - } - if (!ww) - i915_gem_object_unlock(obj); + /* May arrive from get_pages on another bo */ + if (!ww) { + if (!i915_gem_object_trylock(obj, NULL)) + goto skip; + } else { + err = i915_gem_object_lock(obj, ww); + if (err) + goto skip; } + if (drop_pages(obj, shrink, trylock_vm) && + !__i915_gem_object_put_pages(obj) && + !try_to_writeback(obj, shrink)) + count += obj->base.size >> PAGE_SHIFT; + + if (!ww) + i915_gem_object_unlock(obj); + scanned += obj->base.size >> PAGE_SHIFT; skip: i915_gem_object_put(obj); @@ -407,12 +405,18 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr list_for_each_entry_safe(vma, next, &i915->ggtt.vm.bound_list, vm_link) { unsigned long count = vma->node.size >> PAGE_SHIFT; + struct drm_i915_gem_object *obj = vma->obj; if (!vma->iomap || i915_vma_is_active(vma)) continue; + if (!i915_gem_object_trylock(obj, NULL)) + continue; + if (__i915_vma_unbind(vma) == 0) freed_pages += count; + + i915_gem_object_unlock(obj); } mutex_unlock(&i915->ggtt.vm.mutex); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index bce03d74a0b4..7df50fd6cc7b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -488,6 +488,9 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem) return 0; } + /* Exclude the reserved region from driver use */ + mem->region.end = reserved_base - 1; + /* It is possible for the reserved area to end before the end of stolen * memory, so just consider the start. */ reserved_total = stolen_top - reserved_base; @@ -653,7 +656,7 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem, cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE; i915_gem_object_set_cache_coherency(obj, cache_level); - if (WARN_ON(!i915_gem_object_trylock(obj))) + if (WARN_ON(!i915_gem_object_trylock(obj, NULL))) return -EBUSY; i915_gem_object_init_memory_region(obj, mem); @@ -780,6 +783,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, struct intel_uncore *uncore = &i915->uncore; struct pci_dev *pdev = to_pci_dev(i915->drm.dev); struct intel_memory_region *mem; + resource_size_t min_page_size; resource_size_t io_start; resource_size_t lmem_size; u64 lmem_base; @@ -791,8 +795,11 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, lmem_size = pci_resource_len(pdev, 2) - lmem_base; io_start = pci_resource_start(pdev, 2) + lmem_base; + min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K : + I915_GTT_PAGE_SIZE_4K; + mem = intel_memory_region_create(i915, lmem_base, lmem_size, - I915_GTT_PAGE_SIZE_4K, io_start, + min_page_size, io_start, type, instance, &i915_region_stolen_lmem_ops); if (IS_ERR(mem)) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c index 1929d6cf4150..75501db71041 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c @@ -38,12 +38,13 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data, { const unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_private *i915 = to_i915(dev); struct i915_gem_context *ctx; unsigned long idx; long ret; /* ABI: return -EIO if already wedged */ - ret = intel_gt_terminally_wedged(&to_i915(dev)->gt); + ret = intel_gt_terminally_wedged(to_gt(i915)); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 218a9b3037c7..de3fe79b665a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -166,7 +166,7 @@ static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev, struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM]; struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); const unsigned int max_segment = i915_sg_segment_size(); - const size_t size = ttm->num_pages << PAGE_SHIFT; + const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT; struct file *filp = i915_tt->filp; struct sgt_iter sgt_iter; struct sg_table *st; @@ -556,6 +556,20 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj, return intel_region_ttm_resource_to_rsgt(obj->mm.region, res); } +static int i915_ttm_truncate(struct drm_i915_gem_object *obj) +{ + struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + int err; + + WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED); + + err = i915_ttm_move_notify(bo); + if (err) + return err; + + return i915_ttm_purge(obj); +} + static void i915_ttm_swap_notify(struct ttm_buffer_object *bo) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); @@ -883,6 +897,11 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) if (ret) return ret; + if (obj->mm.madv != I915_MADV_WILLNEED) { + dma_resv_unlock(bo->base.resv); + return VM_FAULT_SIGBUS; + } + if (drm_dev_enter(dev, &idx)) { ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, TTM_BO_VM_NUM_PREFAULT); @@ -945,6 +964,11 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj) return drm_vma_node_offset_addr(&obj->base.vma_node); } +static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj) +{ + ttm_bo_unmap_virtual(i915_gem_to_ttm(obj)); +} + static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { .name = "i915_gem_object_ttm", .flags = I915_GEM_OBJECT_IS_SHRINKABLE | @@ -952,7 +976,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { .get_pages = i915_ttm_get_pages, .put_pages = i915_ttm_put_pages, - .truncate = i915_ttm_purge, + .truncate = i915_ttm_truncate, .shrinker_release_pages = i915_ttm_shrinker_release_pages, .adjust_lru = i915_ttm_adjust_lru, @@ -960,6 +984,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { .migrate = i915_ttm_migrate, .mmap_offset = i915_ttm_mmap_offset, + .unmap_virtual = i915_ttm_unmap_virtual, .mmap_ops = &vm_ops_ttm, }; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c index 80df9f592407..ee9612a3ee5e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c @@ -3,10 +3,9 @@ * Copyright © 2021 Intel Corporation */ -#include <linux/dma-fence-array.h> - #include <drm/ttm/ttm_bo_driver.h> +#include "i915_deps.h" #include "i915_drv.h" #include "intel_memory_region.h" #include "intel_region_ttm.h" @@ -43,234 +42,6 @@ void i915_ttm_migrate_set_failure_modes(bool gpu_migration, } #endif -/** - * DOC: Set of utilities to dynamically collect dependencies and - * eventually coalesce them into a single fence which is fed into - * the GT migration code, since it only accepts a single dependency - * fence. - * The single fence returned from these utilities, in the case of - * dependencies from multiple fence contexts, a struct dma_fence_array, - * since the i915 request code can break that up and await the individual - * fences. - * - * Once we can do async unbinding, this is also needed to coalesce - * the migration fence with the unbind fences. - * - * While collecting the individual dependencies, we store the refcounted - * struct dma_fence pointers in a realloc-managed pointer array, since - * that can be easily fed into a dma_fence_array. Other options are - * available, like for example an xarray for similarity with drm/sched. - * Can be changed easily if needed. - * - * A struct i915_deps need to be initialized using i915_deps_init(). - * If i915_deps_add_dependency() or i915_deps_add_resv() return an - * error code they will internally call i915_deps_fini(), which frees - * all internal references and allocations. After a call to - * i915_deps_to_fence(), or i915_deps_sync(), the struct should similarly - * be viewed as uninitialized. - * - * We might want to break this out into a separate file as a utility. - */ - -#define I915_DEPS_MIN_ALLOC_CHUNK 8U - -/** - * struct i915_deps - Collect dependencies into a single dma-fence - * @single: Storage for pointer if the collection is a single fence. - * @fence: Allocated array of fence pointers if more than a single fence; - * otherwise points to the address of @single. - * @num_deps: Current number of dependency fences. - * @fences_size: Size of the @fences array in number of pointers. - * @gfp: Allocation mode. - */ -struct i915_deps { - struct dma_fence *single; - struct dma_fence **fences; - unsigned int num_deps; - unsigned int fences_size; - gfp_t gfp; -}; - -static void i915_deps_reset_fences(struct i915_deps *deps) -{ - if (deps->fences != &deps->single) - kfree(deps->fences); - deps->num_deps = 0; - deps->fences_size = 1; - deps->fences = &deps->single; -} - -static void i915_deps_init(struct i915_deps *deps, gfp_t gfp) -{ - deps->fences = NULL; - deps->gfp = gfp; - i915_deps_reset_fences(deps); -} - -static void i915_deps_fini(struct i915_deps *deps) -{ - unsigned int i; - - for (i = 0; i < deps->num_deps; ++i) - dma_fence_put(deps->fences[i]); - - if (deps->fences != &deps->single) - kfree(deps->fences); -} - -static int i915_deps_grow(struct i915_deps *deps, struct dma_fence *fence, - const struct ttm_operation_ctx *ctx) -{ - int ret; - - if (deps->num_deps >= deps->fences_size) { - unsigned int new_size = 2 * deps->fences_size; - struct dma_fence **new_fences; - - new_size = max(new_size, I915_DEPS_MIN_ALLOC_CHUNK); - new_fences = kmalloc_array(new_size, sizeof(*new_fences), deps->gfp); - if (!new_fences) - goto sync; - - memcpy(new_fences, deps->fences, - deps->fences_size * sizeof(*new_fences)); - swap(new_fences, deps->fences); - if (new_fences != &deps->single) - kfree(new_fences); - deps->fences_size = new_size; - } - deps->fences[deps->num_deps++] = dma_fence_get(fence); - return 0; - -sync: - if (ctx->no_wait_gpu && !dma_fence_is_signaled(fence)) { - ret = -EBUSY; - goto unref; - } - - ret = dma_fence_wait(fence, ctx->interruptible); - if (ret) - goto unref; - - ret = fence->error; - if (ret) - goto unref; - - return 0; - -unref: - i915_deps_fini(deps); - return ret; -} - -static int i915_deps_sync(struct i915_deps *deps, - const struct ttm_operation_ctx *ctx) -{ - struct dma_fence **fences = deps->fences; - unsigned int i; - int ret = 0; - - for (i = 0; i < deps->num_deps; ++i, ++fences) { - if (ctx->no_wait_gpu && !dma_fence_is_signaled(*fences)) { - ret = -EBUSY; - break; - } - - ret = dma_fence_wait(*fences, ctx->interruptible); - if (!ret) - ret = (*fences)->error; - if (ret) - break; - } - - i915_deps_fini(deps); - return ret; -} - -static int i915_deps_add_dependency(struct i915_deps *deps, - struct dma_fence *fence, - const struct ttm_operation_ctx *ctx) -{ - unsigned int i; - int ret; - - if (!fence) - return 0; - - if (dma_fence_is_signaled(fence)) { - ret = fence->error; - if (ret) - i915_deps_fini(deps); - return ret; - } - - for (i = 0; i < deps->num_deps; ++i) { - struct dma_fence *entry = deps->fences[i]; - - if (!entry->context || entry->context != fence->context) - continue; - - if (dma_fence_is_later(fence, entry)) { - dma_fence_put(entry); - deps->fences[i] = dma_fence_get(fence); - } - - return 0; - } - - return i915_deps_grow(deps, fence, ctx); -} - -static struct dma_fence *i915_deps_to_fence(struct i915_deps *deps, - const struct ttm_operation_ctx *ctx) -{ - struct dma_fence_array *array; - - if (deps->num_deps == 0) - return NULL; - - if (deps->num_deps == 1) { - deps->num_deps = 0; - return deps->fences[0]; - } - - /* - * TODO: Alter the allocation mode here to not try too hard to - * make things async. - */ - array = dma_fence_array_create(deps->num_deps, deps->fences, 0, 0, - false); - if (!array) - return ERR_PTR(i915_deps_sync(deps, ctx)); - - deps->fences = NULL; - i915_deps_reset_fences(deps); - - return &array->base; -} - -static int i915_deps_add_resv(struct i915_deps *deps, struct dma_resv *resv, - bool all, const bool no_excl, - const struct ttm_operation_ctx *ctx) -{ - struct dma_resv_iter iter; - struct dma_fence *fence; - - dma_resv_assert_held(resv); - dma_resv_for_each_fence(&iter, resv, all, fence) { - int ret; - - if (no_excl && dma_resv_iter_is_exclusive(&iter)) - continue; - - ret = i915_deps_add_dependency(deps, fence, ctx); - if (ret) - return ret; - } - - return 0; -} - static enum i915_cache_level i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res, struct ttm_tt *ttm) @@ -387,7 +158,7 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo, struct ttm_resource *dst_mem, struct ttm_tt *dst_ttm, struct sg_table *dst_st, - struct dma_fence *dep) + const struct i915_deps *deps) { struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915), bdev); @@ -397,7 +168,7 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo, enum i915_cache_level src_level, dst_level; int ret; - if (!i915->gt.migrate.context || intel_gt_is_wedged(&i915->gt)) + if (!to_gt(i915)->migrate.context || intel_gt_is_wedged(to_gt(i915))) return ERR_PTR(-EINVAL); /* With fail_gpu_migration, we always perform a GPU clear. */ @@ -410,8 +181,8 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo, !I915_SELFTEST_ONLY(fail_gpu_migration)) return ERR_PTR(-EINVAL); - intel_engine_pm_get(i915->gt.migrate.context->engine); - ret = intel_context_migrate_clear(i915->gt.migrate.context, dep, + intel_engine_pm_get(to_gt(i915)->migrate.context->engine); + ret = intel_context_migrate_clear(to_gt(i915)->migrate.context, deps, dst_st->sgl, dst_level, i915_ttm_gtt_binds_lmem(dst_mem), 0, &rq); @@ -423,9 +194,9 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo, return ERR_CAST(src_rsgt); src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm); - intel_engine_pm_get(i915->gt.migrate.context->engine); - ret = intel_context_migrate_copy(i915->gt.migrate.context, - dep, src_rsgt->table.sgl, + intel_engine_pm_get(to_gt(i915)->migrate.context->engine); + ret = intel_context_migrate_copy(to_gt(i915)->migrate.context, + deps, src_rsgt->table.sgl, src_level, i915_ttm_gtt_binds_lmem(bo->resource), dst_st->sgl, dst_level, @@ -435,7 +206,7 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo, i915_refct_sgt_put(src_rsgt); } - intel_engine_pm_put(i915->gt.migrate.context->engine); + intel_engine_pm_put(to_gt(i915)->migrate.context->engine); if (ret && rq) { i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); @@ -610,10 +381,11 @@ i915_ttm_memcpy_work_arm(struct i915_ttm_memcpy_work *work, } static struct dma_fence * -__i915_ttm_move(struct ttm_buffer_object *bo, bool clear, +__i915_ttm_move(struct ttm_buffer_object *bo, + const struct ttm_operation_ctx *ctx, bool clear, struct ttm_resource *dst_mem, struct ttm_tt *dst_ttm, struct i915_refct_sgt *dst_rsgt, bool allow_accel, - struct dma_fence *move_dep) + const struct i915_deps *move_deps) { struct i915_ttm_memcpy_work *copy_work = NULL; struct i915_ttm_memcpy_arg _arg, *arg = &_arg; @@ -621,7 +393,7 @@ __i915_ttm_move(struct ttm_buffer_object *bo, bool clear, if (allow_accel) { fence = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm, - &dst_rsgt->table, move_dep); + &dst_rsgt->table, move_deps); /* * We only need to intercept the error when moving to lmem. @@ -655,8 +427,8 @@ __i915_ttm_move(struct ttm_buffer_object *bo, bool clear, if (!IS_ERR(fence)) goto out; - } else if (move_dep) { - int err = dma_fence_wait(move_dep, true); + } else if (move_deps) { + int err = i915_deps_sync(move_deps, ctx); if (err) return ERR_PTR(err); @@ -680,29 +452,17 @@ out: return fence; } -static struct dma_fence *prev_fence(struct ttm_buffer_object *bo, - struct ttm_operation_ctx *ctx) +static int +prev_deps(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, + struct i915_deps *deps) { - struct i915_deps deps; int ret; - /* - * Instead of trying hard with GFP_KERNEL to allocate memory, - * the dependency collection will just sync if it doesn't - * succeed. - */ - i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); - ret = i915_deps_add_dependency(&deps, bo->moving, ctx); + ret = i915_deps_add_dependency(deps, bo->moving, ctx); if (!ret) - /* - * TODO: Only await excl fence here, and shared fences before - * signaling the migration fence. - */ - ret = i915_deps_add_resv(&deps, bo->base.resv, true, false, ctx); - if (ret) - return ERR_PTR(ret); + ret = i915_deps_add_resv(deps, bo->base.resv, ctx); - return i915_deps_to_fence(&deps, ctx); + return ret; } /** @@ -756,16 +516,18 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, clear = !i915_ttm_cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm)); if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) { - struct dma_fence *dep = prev_fence(bo, ctx); + struct i915_deps deps; - if (IS_ERR(dep)) { + i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); + ret = prev_deps(bo, ctx, &deps); + if (ret) { i915_refct_sgt_put(dst_rsgt); - return PTR_ERR(dep); + return ret; } - migration_fence = __i915_ttm_move(bo, clear, dst_mem, bo->ttm, - dst_rsgt, true, dep); - dma_fence_put(dep); + migration_fence = __i915_ttm_move(bo, ctx, clear, dst_mem, bo->ttm, + dst_rsgt, true, &deps); + i915_deps_fini(&deps); } /* We can possibly get an -ERESTARTSYS here */ @@ -826,47 +588,38 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, .interruptible = intr, }; struct i915_refct_sgt *dst_rsgt; - struct dma_fence *copy_fence, *dep_fence; + struct dma_fence *copy_fence; struct i915_deps deps; - int ret, shared_err; + int ret; assert_object_held(dst); assert_object_held(src); i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); - /* - * We plan to add a shared fence only for the source. If that - * fails, we await all source fences before commencing - * the copy instead of only the exclusive. - */ - shared_err = dma_resv_reserve_shared(src_bo->base.resv, 1); - ret = i915_deps_add_resv(&deps, dst_bo->base.resv, true, false, &ctx); - if (!ret) - ret = i915_deps_add_resv(&deps, src_bo->base.resv, - !!shared_err, false, &ctx); + ret = dma_resv_reserve_shared(src_bo->base.resv, 1); if (ret) return ret; - dep_fence = i915_deps_to_fence(&deps, &ctx); - if (IS_ERR(dep_fence)) - return PTR_ERR(dep_fence); + ret = i915_deps_add_resv(&deps, dst_bo->base.resv, &ctx); + if (ret) + return ret; + + ret = i915_deps_add_resv(&deps, src_bo->base.resv, &ctx); + if (ret) + return ret; dst_rsgt = i915_ttm_resource_get_st(dst, dst_bo->resource); - copy_fence = __i915_ttm_move(src_bo, false, dst_bo->resource, + copy_fence = __i915_ttm_move(src_bo, &ctx, false, dst_bo->resource, dst_bo->ttm, dst_rsgt, allow_accel, - dep_fence); + &deps); + i915_deps_fini(&deps); i915_refct_sgt_put(dst_rsgt); if (IS_ERR_OR_NULL(copy_fence)) return PTR_ERR_OR_ZERO(copy_fence); dma_resv_add_excl_fence(dst_bo->base.resv, copy_fence); - - /* If we failed to reserve a shared slot, add an exclusive fence */ - if (shared_err) - dma_resv_add_excl_fence(src_bo->base.resv, copy_fence); - else - dma_resv_add_shared_fence(src_bo->base.resv, copy_fence); + dma_resv_add_shared_fence(src_bo->base.resv, copy_fence); dma_fence_put(copy_fence); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 3173c9f9a040..3cc01c30dd62 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -529,7 +529,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, * On almost all of the older hw, we cannot tell the GPU that * a page is readonly. */ - if (!dev_priv->gt.vm->has_read_only) + if (!to_gt(dev_priv)->vm->has_read_only) return -ENODEV; } diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index c69c7d45aabc..11f0aa65f8a3 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1705,7 +1705,7 @@ int i915_gem_huge_page_mock_selftests(void) mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; mkwrite_device_info(dev_priv)->ppgtt_size = 48; - ppgtt = i915_ppgtt_create(&dev_priv->gt, 0); + ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_unlock; @@ -1747,7 +1747,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) return 0; } - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; return i915_live_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index 8402ed925a69..75947e9dada2 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -592,7 +592,7 @@ int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_client_tiled_blits), }; - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; return i915_live_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 21b71568cd5f..3f41fe5ec9d4 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -90,7 +90,7 @@ static int live_nop_switch(void *arg) } if (i915_request_wait(rq, 0, 10 * HZ) < 0) { pr_err("Failed to populated %d contexts\n", nctx); - intel_gt_set_wedged(&i915->gt); + intel_gt_set_wedged(to_gt(i915)); i915_request_put(rq); err = -EIO; goto out_file; @@ -146,7 +146,7 @@ static int live_nop_switch(void *arg) if (i915_request_wait(rq, 0, HZ / 5) < 0) { pr_err("Switching between %ld contexts timed out\n", prime); - intel_gt_set_wedged(&i915->gt); + intel_gt_set_wedged(to_gt(i915)); i915_request_put(rq); break; } @@ -1223,7 +1223,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915, return 0; if (flags & TEST_RESET) - igt_global_reset_lock(&i915->gt); + igt_global_reset_lock(to_gt(i915)); obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) { @@ -1306,7 +1306,7 @@ out_put: out_unlock: if (flags & TEST_RESET) - igt_global_reset_unlock(&i915->gt); + igt_global_reset_unlock(to_gt(i915)); if (ret) pr_err("%s: Failed with %d!\n", name, ret); @@ -1481,10 +1481,10 @@ static int check_scratch(struct i915_address_space *vm, u64 offset) static int write_to_scratch(struct i915_gem_context *ctx, struct intel_engine_cs *engine, + struct drm_i915_gem_object *obj, u64 offset, u32 value) { struct drm_i915_private *i915 = ctx->i915; - struct drm_i915_gem_object *obj; struct i915_address_space *vm; struct i915_request *rq; struct i915_vma *vma; @@ -1497,15 +1497,9 @@ static int write_to_scratch(struct i915_gem_context *ctx, if (err) return err; - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); - cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); - if (IS_ERR(cmd)) { - err = PTR_ERR(cmd); - goto out; - } + if (IS_ERR(cmd)) + return PTR_ERR(cmd); *cmd++ = MI_STORE_DWORD_IMM_GEN4; if (GRAPHICS_VER(i915) >= 8) { @@ -1569,17 +1563,19 @@ err_unpin: i915_vma_unpin(vma); out_vm: i915_vm_put(vm); -out: - i915_gem_object_put(obj); + + if (!err) + err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT); + return err; } static int read_from_scratch(struct i915_gem_context *ctx, struct intel_engine_cs *engine, + struct drm_i915_gem_object *obj, u64 offset, u32 *value) { struct drm_i915_private *i915 = ctx->i915; - struct drm_i915_gem_object *obj; struct i915_address_space *vm; const u32 result = 0x100; struct i915_request *rq; @@ -1594,10 +1590,6 @@ static int read_from_scratch(struct i915_gem_context *ctx, if (err) return err; - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); - if (GRAPHICS_VER(i915) >= 8) { const u32 GPR0 = engine->mmio_base + 0x600; @@ -1615,7 +1607,7 @@ static int read_from_scratch(struct i915_gem_context *ctx, cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); - goto out; + goto err_unpin; } memset(cmd, POISON_INUSE, PAGE_SIZE); @@ -1651,7 +1643,7 @@ static int read_from_scratch(struct i915_gem_context *ctx, cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); - goto out; + goto err_unpin; } memset(cmd, POISON_INUSE, PAGE_SIZE); @@ -1722,8 +1714,10 @@ err_unpin: i915_vma_unpin(vma); out_vm: i915_vm_put(vm); -out: - i915_gem_object_put(obj); + + if (!err) + err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT); + return err; } @@ -1757,6 +1751,7 @@ static int igt_vm_isolation(void *arg) { struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx_a, *ctx_b; + struct drm_i915_gem_object *obj_a, *obj_b; unsigned long num_engines, count; struct intel_engine_cs *engine; struct igt_live_test t; @@ -1810,6 +1805,18 @@ static int igt_vm_isolation(void *arg) vm_total = ctx_a->vm->total; GEM_BUG_ON(ctx_b->vm->total != vm_total); + obj_a = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj_a)) { + err = PTR_ERR(obj_a); + goto out_file; + } + + obj_b = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj_b)) { + err = PTR_ERR(obj_b); + goto put_a; + } + count = 0; num_engines = 0; for_each_uabi_engine(engine, i915) { @@ -1832,13 +1839,13 @@ static int igt_vm_isolation(void *arg) I915_GTT_PAGE_SIZE, vm_total, sizeof(u32), alignof_dword); - err = write_to_scratch(ctx_a, engine, + err = write_to_scratch(ctx_a, engine, obj_a, offset, 0xdeadbeef); if (err == 0) - err = read_from_scratch(ctx_b, engine, + err = read_from_scratch(ctx_b, engine, obj_b, offset, &value); if (err) - goto out_file; + goto put_b; if (value != expected) { pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n", @@ -1847,7 +1854,7 @@ static int igt_vm_isolation(void *arg) lower_32_bits(offset), this); err = -EINVAL; - goto out_file; + goto put_b; } this++; @@ -1858,6 +1865,10 @@ static int igt_vm_isolation(void *arg) pr_info("Checked %lu scratch offsets across %lu engines\n", count, num_engines); +put_b: + i915_gem_object_put(obj_b); +put_a: + i915_gem_object_put(obj_a); out_file: if (igt_live_test_end(&t)) err = -EIO; @@ -1877,7 +1888,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_vm_isolation), }; - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; return i915_live_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c index 4b8e6b098659..ecb691c81d1e 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c @@ -261,5 +261,5 @@ int i915_gem_migrate_live_selftests(struct drm_i915_private *i915) if (!HAS_LMEM(i915)) return 0; - return intel_gt_live_subtests(tests, &i915->gt); + return intel_gt_live_subtests(tests, to_gt(i915)); } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 6d30cdfa80f3..c6291429b00c 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -84,6 +84,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, struct rnd_state *prng) { const unsigned long npages = obj->base.size / PAGE_SIZE; + struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_ggtt_view view; struct i915_vma *vma; unsigned long page; @@ -141,7 +142,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, if (offset >= obj->base.size) goto out; - intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt); + intel_gt_flush_ggtt_writes(to_gt(i915)); p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); cpu = kmap(p) + offset_in_page(offset); @@ -175,6 +176,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj, { const unsigned int nreal = obj->scratch / PAGE_SIZE; const unsigned long npages = obj->base.size / PAGE_SIZE; + struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_vma *vma; unsigned long page; int err; @@ -234,7 +236,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj, if (offset >= obj->base.size) continue; - intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt); + intel_gt_flush_ggtt_writes(to_gt(i915)); p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); cpu = kmap(p) + offset_in_page(offset); @@ -616,14 +618,14 @@ static bool assert_mmap_offset(struct drm_i915_private *i915, static void disable_retire_worker(struct drm_i915_private *i915) { i915_gem_driver_unregister__shrinker(i915); - intel_gt_pm_get(&i915->gt); - cancel_delayed_work_sync(&i915->gt.requests.retire_work); + intel_gt_pm_get(to_gt(i915)); + cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work); } static void restore_retire_worker(struct drm_i915_private *i915) { igt_flush_test(i915); - intel_gt_pm_put(&i915->gt); + intel_gt_pm_put(to_gt(i915)); i915_gem_driver_register__shrinker(i915); } @@ -651,8 +653,8 @@ static int igt_mmap_offset_exhaustion(void *arg) /* Disable background reaper */ disable_retire_worker(i915); - GEM_BUG_ON(!i915->gt.awake); - intel_gt_retire_requests(&i915->gt); + GEM_BUG_ON(!to_gt(i915)->awake); + intel_gt_retire_requests(to_gt(i915)); i915_gem_drain_freed_objects(i915); /* Trim the device mmap space to only a page */ @@ -728,7 +730,7 @@ static int igt_mmap_offset_exhaustion(void *arg) /* Now fill with busy dead objects that we expect to reap */ for (loop = 0; loop < 3; loop++) { - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) break; obj = i915_gem_object_create_internal(i915, PAGE_SIZE); @@ -942,7 +944,7 @@ static int __igt_mmap(struct drm_i915_private *i915, } if (type == I915_MMAP_TYPE_GTT) - intel_gt_flush_ggtt_writes(&i915->gt); + intel_gt_flush_ggtt_writes(to_gt(i915)); err = wc_check(obj); if (err == -ENXIO) @@ -1049,7 +1051,7 @@ static int __igt_mmap_access(struct drm_i915_private *i915, goto out_unmap; } - intel_gt_flush_ggtt_writes(&i915->gt); + intel_gt_flush_ggtt_writes(to_gt(i915)); err = access_process_vm(current, addr, &x, sizeof(x), 0); if (err != sizeof(x)) { @@ -1065,7 +1067,7 @@ static int __igt_mmap_access(struct drm_i915_private *i915, goto out_unmap; } - intel_gt_flush_ggtt_writes(&i915->gt); + intel_gt_flush_ggtt_writes(to_gt(i915)); err = __get_user(y, ptr); if (err) { @@ -1165,7 +1167,7 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915, } if (type == I915_MMAP_TYPE_GTT) - intel_gt_flush_ggtt_writes(&i915->gt); + intel_gt_flush_ggtt_writes(to_gt(i915)); for_each_uabi_engine(engine, i915) { struct i915_request *rq; @@ -1366,20 +1368,10 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915, } } - if (!obj->ops->mmap_ops) { - err = check_absent(addr, obj->base.size); - if (err) { - pr_err("%s: was not absent\n", obj->mm.region->name); - goto out_unmap; - } - } else { - /* ttm allows access to evicted regions by design */ - - err = check_present(addr, obj->base.size); - if (err) { - pr_err("%s: was not present\n", obj->mm.region->name); - goto out_unmap; - } + err = check_absent(addr, obj->base.size); + if (err) { + pr_err("%s: was not absent\n", obj->mm.region->name); + goto out_unmap; } out_unmap: diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index 4a166d25fe60..6e9292918bfc 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -269,19 +269,6 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) free_pd(&ppgtt->base.vm, ppgtt->base.pd); } -static int pd_vma_set_pages(struct i915_vma *vma) -{ - vma->pages = ERR_PTR(-ENODEV); - return 0; -} - -static void pd_vma_clear_pages(struct i915_vma *vma) -{ - GEM_BUG_ON(!vma->pages); - - vma->pages = NULL; -} - static void pd_vma_bind(struct i915_address_space *vm, struct i915_vm_pt_stash *stash, struct i915_vma *vma, @@ -321,8 +308,6 @@ static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma) } static const struct i915_vma_ops pd_vma_ops = { - .set_pages = pd_vma_set_pages, - .clear_pages = pd_vma_clear_pages, .bind_vma = pd_vma_bind, .unbind_vma = pd_vma_unbind, }; @@ -454,6 +439,7 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup; ppgtt->base.vm.alloc_pt_dma = alloc_pt_dma; + ppgtt->base.vm.alloc_scratch_dma = alloc_pt_dma; ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; err = gen6_ppgtt_init_scratch(ppgtt); diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index 95c02096a61b..b012c50f7ce7 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -776,10 +776,29 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt, */ ppgtt->vm.has_read_only = !IS_GRAPHICS_VER(gt->i915, 11, 12); - if (HAS_LMEM(gt->i915)) + if (HAS_LMEM(gt->i915)) { ppgtt->vm.alloc_pt_dma = alloc_pt_lmem; - else + + /* + * On some platforms the hw has dropped support for 4K GTT pages + * when dealing with LMEM, and due to the design of 64K GTT + * pages in the hw, we can only mark the *entire* page-table as + * operating in 64K GTT mode, since the enable bit is still on + * the pde, and not the pte. And since we still need to allow + * 4K GTT pages for SMEM objects, we can't have a "normal" 4K + * page-table with scratch pointing to LMEM, since that's + * undefined from the hw pov. The simplest solution is to just + * move the 64K scratch page to SMEM on such platforms and call + * it a day, since that should work for all configurations. + */ + if (HAS_64K_PAGES(gt->i915)) + ppgtt->vm.alloc_scratch_dma = alloc_pt_dma; + else + ppgtt->vm.alloc_scratch_dma = alloc_pt_lmem; + } else { ppgtt->vm.alloc_pt_dma = alloc_pt_dma; + ppgtt->vm.alloc_scratch_dma = alloc_pt_dma; + } err = gen8_init_scratch(&ppgtt->vm); if (err) diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index 246c37d72cd7..d8c74bbf9aae 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -211,7 +211,8 @@ static inline void intel_context_enter(struct intel_context *ce) static inline void intel_context_mark_active(struct intel_context *ce) { - lockdep_assert_held(&ce->timeline->mutex); + lockdep_assert(lockdep_is_held(&ce->timeline->mutex) || + test_bit(CONTEXT_IS_PARKING, &ce->flags)); ++ce->active_count; } diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 9e0177dc5484..30cd81ad8911 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -118,6 +118,7 @@ struct intel_context { #define CONTEXT_LRCA_DIRTY 9 #define CONTEXT_GUC_INIT 10 #define CONTEXT_PERMA_PIN 11 +#define CONTEXT_IS_PARKING 12 struct { u64 timeout_us; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index a1334b48dde7..b0a4a2dbe3ee 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -26,7 +26,7 @@ static void dbg_poison_ce(struct intel_context *ce) int type = i915_coherent_map_type(ce->engine->i915, obj, true); void *map; - if (!i915_gem_object_trylock(obj)) + if (!i915_gem_object_trylock(obj, NULL)) return; map = i915_gem_object_pin_map(obj, type); @@ -80,39 +80,6 @@ static int __engine_unpark(struct intel_wakeref *wf) return 0; } -#if IS_ENABLED(CONFIG_LOCKDEP) - -static unsigned long __timeline_mark_lock(struct intel_context *ce) -{ - unsigned long flags; - - local_irq_save(flags); - mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_); - - return flags; -} - -static void __timeline_mark_unlock(struct intel_context *ce, - unsigned long flags) -{ - mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_); - local_irq_restore(flags); -} - -#else - -static unsigned long __timeline_mark_lock(struct intel_context *ce) -{ - return 0; -} - -static void __timeline_mark_unlock(struct intel_context *ce, - unsigned long flags) -{ -} - -#endif /* !IS_ENABLED(CONFIG_LOCKDEP) */ - static void duration(struct dma_fence *fence, struct dma_fence_cb *cb) { struct i915_request *rq = to_request(fence); @@ -159,7 +126,6 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) { struct intel_context *ce = engine->kernel_context; struct i915_request *rq; - unsigned long flags; bool result = true; /* @@ -214,7 +180,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) * engine->wakeref.count, we may see the request completion and retire * it causing an underflow of the engine->wakeref. */ - flags = __timeline_mark_lock(ce); + set_bit(CONTEXT_IS_PARKING, &ce->flags); GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0); rq = __i915_request_create(ce, GFP_NOWAIT); @@ -246,7 +212,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) result = false; out_unlock: - __timeline_mark_unlock(ce, flags); + clear_bit(CONTEXT_IS_PARKING, &ce->flags); return result; } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c index 8f8bea08e734..9ce85a845105 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_user.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c @@ -116,7 +116,7 @@ static void set_scheduler_caps(struct drm_i915_private *i915) disabled |= (I915_SCHEDULER_CAP_ENABLED | I915_SCHEDULER_CAP_PRIORITY); - if (intel_uc_uses_guc_submission(&i915->gt.uc)) + if (intel_uc_uses_guc_submission(&to_gt(i915)->uc)) enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP; for (i = 0; i < ARRAY_SIZE(map); i++) { diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index cbc6d2b1fd9e..5263dda7f8d5 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -22,9 +22,6 @@ #include "intel_gtt.h" #include "gen8_ppgtt.h" -static int -i915_get_ggtt_vma_pages(struct i915_vma *vma); - static void i915_ggtt_color_adjust(const struct drm_mm_node *node, unsigned long color, u64 *start, @@ -892,21 +889,6 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) return 0; } -int ggtt_set_pages(struct i915_vma *vma) -{ - int ret; - - GEM_BUG_ON(vma->pages); - - ret = i915_get_ggtt_vma_pages(vma); - if (ret) - return ret; - - vma->page_sizes = vma->obj->mm.page_sizes; - - return 0; -} - static void gen6_gmch_remove(struct i915_address_space *vm) { struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); @@ -941,6 +923,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) size = gen8_get_total_gtt_size(snb_gmch_ctl); ggtt->vm.alloc_pt_dma = alloc_pt_dma; + ggtt->vm.alloc_scratch_dma = alloc_pt_dma; ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY; ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; @@ -967,8 +950,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; - ggtt->vm.vma_ops.set_pages = ggtt_set_pages; - ggtt->vm.vma_ops.clear_pages = clear_pages; ggtt->vm.pte_encode = gen8_ggtt_pte_encode; @@ -1094,6 +1075,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; ggtt->vm.alloc_pt_dma = alloc_pt_dma; + ggtt->vm.alloc_scratch_dma = alloc_pt_dma; ggtt->vm.clear_range = nop_clear_range; if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915)) @@ -1117,8 +1099,6 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; - ggtt->vm.vma_ops.set_pages = ggtt_set_pages; - ggtt->vm.vma_ops.clear_pages = clear_pages; return ggtt_probe_common(ggtt, size); } @@ -1146,6 +1126,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end); ggtt->vm.alloc_pt_dma = alloc_pt_dma; + ggtt->vm.alloc_scratch_dma = alloc_pt_dma; if (needs_idle_maps(i915)) { drm_notice(&i915->drm, @@ -1162,8 +1143,6 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; - ggtt->vm.vma_ops.set_pages = ggtt_set_pages; - ggtt->vm.vma_ops.clear_pages = clear_pages; if (unlikely(ggtt->do_idle_maps)) drm_notice(&i915->drm, @@ -1229,7 +1208,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915) { int ret; - ret = ggtt_probe_hw(&i915->ggtt, &i915->gt); + ret = ggtt_probe_hw(&i915->ggtt, to_gt(i915)); if (ret) return ret; @@ -1333,382 +1312,3 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt) intel_ggtt_restore_fences(ggtt); } - -static struct scatterlist * -rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, - unsigned int width, unsigned int height, - unsigned int src_stride, unsigned int dst_stride, - struct sg_table *st, struct scatterlist *sg) -{ - unsigned int column, row; - unsigned int src_idx; - - for (column = 0; column < width; column++) { - unsigned int left; - - src_idx = src_stride * (height - 1) + column + offset; - for (row = 0; row < height; row++) { - st->nents++; - /* - * We don't need the pages, but need to initialize - * the entries so the sg list can be happily traversed. - * The only thing we need are DMA addresses. - */ - sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); - sg_dma_address(sg) = - i915_gem_object_get_dma_address(obj, src_idx); - sg_dma_len(sg) = I915_GTT_PAGE_SIZE; - sg = sg_next(sg); - src_idx -= src_stride; - } - - left = (dst_stride - height) * I915_GTT_PAGE_SIZE; - - if (!left) - continue; - - st->nents++; - - /* - * The DE ignores the PTEs for the padding tiles, the sg entry - * here is just a conenience to indicate how many padding PTEs - * to insert at this spot. - */ - sg_set_page(sg, NULL, left, 0); - sg_dma_address(sg) = 0; - sg_dma_len(sg) = left; - sg = sg_next(sg); - } - - return sg; -} - -static noinline struct sg_table * -intel_rotate_pages(struct intel_rotation_info *rot_info, - struct drm_i915_gem_object *obj) -{ - unsigned int size = intel_rotation_info_size(rot_info); - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct sg_table *st; - struct scatterlist *sg; - int ret = -ENOMEM; - int i; - - /* Allocate target SG list. */ - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) - goto err_st_alloc; - - ret = sg_alloc_table(st, size, GFP_KERNEL); - if (ret) - goto err_sg_alloc; - - st->nents = 0; - sg = st->sgl; - - for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) - sg = rotate_pages(obj, rot_info->plane[i].offset, - rot_info->plane[i].width, rot_info->plane[i].height, - rot_info->plane[i].src_stride, - rot_info->plane[i].dst_stride, - st, sg); - - return st; - -err_sg_alloc: - kfree(st); -err_st_alloc: - - drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", - obj->base.size, rot_info->plane[0].width, - rot_info->plane[0].height, size); - - return ERR_PTR(ret); -} - -static struct scatterlist * -add_padding_pages(unsigned int count, - struct sg_table *st, struct scatterlist *sg) -{ - st->nents++; - - /* - * The DE ignores the PTEs for the padding tiles, the sg entry - * here is just a convenience to indicate how many padding PTEs - * to insert at this spot. - */ - sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0); - sg_dma_address(sg) = 0; - sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE; - sg = sg_next(sg); - - return sg; -} - -static struct scatterlist * -remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj, - unsigned int offset, unsigned int alignment_pad, - unsigned int width, unsigned int height, - unsigned int src_stride, unsigned int dst_stride, - struct sg_table *st, struct scatterlist *sg, - unsigned int *gtt_offset) -{ - unsigned int row; - - if (!width || !height) - return sg; - - if (alignment_pad) - sg = add_padding_pages(alignment_pad, st, sg); - - for (row = 0; row < height; row++) { - unsigned int left = width * I915_GTT_PAGE_SIZE; - - while (left) { - dma_addr_t addr; - unsigned int length; - - /* - * We don't need the pages, but need to initialize - * the entries so the sg list can be happily traversed. - * The only thing we need are DMA addresses. - */ - - addr = i915_gem_object_get_dma_address_len(obj, offset, &length); - - length = min(left, length); - - st->nents++; - - sg_set_page(sg, NULL, length, 0); - sg_dma_address(sg) = addr; - sg_dma_len(sg) = length; - sg = sg_next(sg); - - offset += length / I915_GTT_PAGE_SIZE; - left -= length; - } - - offset += src_stride - width; - - left = (dst_stride - width) * I915_GTT_PAGE_SIZE; - - if (!left) - continue; - - sg = add_padding_pages(left >> PAGE_SHIFT, st, sg); - } - - *gtt_offset += alignment_pad + dst_stride * height; - - return sg; -} - -static struct scatterlist * -remap_contiguous_pages(struct drm_i915_gem_object *obj, - unsigned int obj_offset, - unsigned int count, - struct sg_table *st, struct scatterlist *sg) -{ - struct scatterlist *iter; - unsigned int offset; - - iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset); - GEM_BUG_ON(!iter); - - do { - unsigned int len; - - len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), - count << PAGE_SHIFT); - sg_set_page(sg, NULL, len, 0); - sg_dma_address(sg) = - sg_dma_address(iter) + (offset << PAGE_SHIFT); - sg_dma_len(sg) = len; - - st->nents++; - count -= len >> PAGE_SHIFT; - if (count == 0) - return sg; - - sg = __sg_next(sg); - iter = __sg_next(iter); - offset = 0; - } while (1); -} - -static struct scatterlist * -remap_linear_color_plane_pages(struct drm_i915_gem_object *obj, - unsigned int obj_offset, unsigned int alignment_pad, - unsigned int size, - struct sg_table *st, struct scatterlist *sg, - unsigned int *gtt_offset) -{ - if (!size) - return sg; - - if (alignment_pad) - sg = add_padding_pages(alignment_pad, st, sg); - - sg = remap_contiguous_pages(obj, obj_offset, size, st, sg); - sg = sg_next(sg); - - *gtt_offset += alignment_pad + size; - - return sg; -} - -static struct scatterlist * -remap_color_plane_pages(const struct intel_remapped_info *rem_info, - struct drm_i915_gem_object *obj, - int color_plane, - struct sg_table *st, struct scatterlist *sg, - unsigned int *gtt_offset) -{ - unsigned int alignment_pad = 0; - - if (rem_info->plane_alignment) - alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset; - - if (rem_info->plane[color_plane].linear) - sg = remap_linear_color_plane_pages(obj, - rem_info->plane[color_plane].offset, - alignment_pad, - rem_info->plane[color_plane].size, - st, sg, - gtt_offset); - - else - sg = remap_tiled_color_plane_pages(obj, - rem_info->plane[color_plane].offset, - alignment_pad, - rem_info->plane[color_plane].width, - rem_info->plane[color_plane].height, - rem_info->plane[color_plane].src_stride, - rem_info->plane[color_plane].dst_stride, - st, sg, - gtt_offset); - - return sg; -} - -static noinline struct sg_table * -intel_remap_pages(struct intel_remapped_info *rem_info, - struct drm_i915_gem_object *obj) -{ - unsigned int size = intel_remapped_info_size(rem_info); - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct sg_table *st; - struct scatterlist *sg; - unsigned int gtt_offset = 0; - int ret = -ENOMEM; - int i; - - /* Allocate target SG list. */ - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) - goto err_st_alloc; - - ret = sg_alloc_table(st, size, GFP_KERNEL); - if (ret) - goto err_sg_alloc; - - st->nents = 0; - sg = st->sgl; - - for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) - sg = remap_color_plane_pages(rem_info, obj, i, st, sg, >t_offset); - - i915_sg_trim(st); - - return st; - -err_sg_alloc: - kfree(st); -err_st_alloc: - - drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n", - obj->base.size, rem_info->plane[0].width, - rem_info->plane[0].height, size); - - return ERR_PTR(ret); -} - -static noinline struct sg_table * -intel_partial_pages(const struct i915_ggtt_view *view, - struct drm_i915_gem_object *obj) -{ - struct sg_table *st; - struct scatterlist *sg; - unsigned int count = view->partial.size; - int ret = -ENOMEM; - - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) - goto err_st_alloc; - - ret = sg_alloc_table(st, count, GFP_KERNEL); - if (ret) - goto err_sg_alloc; - - st->nents = 0; - - sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl); - - sg_mark_end(sg); - i915_sg_trim(st); /* Drop any unused tail entries. */ - - return st; - -err_sg_alloc: - kfree(st); -err_st_alloc: - return ERR_PTR(ret); -} - -static int -i915_get_ggtt_vma_pages(struct i915_vma *vma) -{ - int ret; - - /* - * The vma->pages are only valid within the lifespan of the borrowed - * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so - * must be the vma->pages. A simple rule is that vma->pages must only - * be accessed when the obj->mm.pages are pinned. - */ - GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); - - switch (vma->ggtt_view.type) { - default: - GEM_BUG_ON(vma->ggtt_view.type); - fallthrough; - case I915_GGTT_VIEW_NORMAL: - vma->pages = vma->obj->mm.pages; - return 0; - - case I915_GGTT_VIEW_ROTATED: - vma->pages = - intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); - break; - - case I915_GGTT_VIEW_REMAPPED: - vma->pages = - intel_remap_pages(&vma->ggtt_view.remapped, vma->obj); - break; - - case I915_GGTT_VIEW_PARTIAL: - vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); - break; - } - - ret = 0; - if (IS_ERR(vma->pages)) { - ret = PTR_ERR(vma->pages); - vma->pages = NULL; - drm_err(&vma->vm->i915->drm, - "Failed to get pages for VMA view type %u (%d)!\n", - vma->ggtt_view.type, ret); - } - return ret; -} diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index f2422d48be32..35d0fcd3a86c 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -25,13 +25,12 @@ #include "shmem_utils.h" #include "pxp/intel_pxp.h" -void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) +void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) { - gt->i915 = i915; - gt->uncore = &i915->uncore; - spin_lock_init(>->irq_lock); + mutex_init(>->tlb_invalidate_lock); + INIT_LIST_HEAD(>->closed_vma); spin_lock_init(>->closed_lock); @@ -48,6 +47,12 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) intel_rps_init_early(>->rps); } +void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) +{ + gt->i915 = i915; + gt->uncore = &i915->uncore; +} + int intel_gt_probe_lmem(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; @@ -909,3 +914,109 @@ void intel_gt_info_print(const struct intel_gt_info *info, intel_sseu_dump(&info->sseu, p); } + +struct reg_and_bit { + i915_reg_t reg; + u32 bit; +}; + +static struct reg_and_bit +get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8, + const i915_reg_t *regs, const unsigned int num) +{ + const unsigned int class = engine->class; + struct reg_and_bit rb = { }; + + if (drm_WARN_ON_ONCE(&engine->i915->drm, + class >= num || !regs[class].reg)) + return rb; + + rb.reg = regs[class]; + if (gen8 && class == VIDEO_DECODE_CLASS) + rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */ + else + rb.bit = engine->instance; + + rb.bit = BIT(rb.bit); + + return rb; +} + +void intel_gt_invalidate_tlbs(struct intel_gt *gt) +{ + static const i915_reg_t gen8_regs[] = { + [RENDER_CLASS] = GEN8_RTCR, + [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */ + [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR, + [COPY_ENGINE_CLASS] = GEN8_BTCR, + }; + static const i915_reg_t gen12_regs[] = { + [RENDER_CLASS] = GEN12_GFX_TLB_INV_CR, + [VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR, + [VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR, + [COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR, + }; + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + struct intel_engine_cs *engine; + enum intel_engine_id id; + const i915_reg_t *regs; + unsigned int num = 0; + + if (I915_SELFTEST_ONLY(gt->awake == -ENODEV)) + return; + + if (GRAPHICS_VER(i915) == 12) { + regs = gen12_regs; + num = ARRAY_SIZE(gen12_regs); + } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) { + regs = gen8_regs; + num = ARRAY_SIZE(gen8_regs); + } else if (GRAPHICS_VER(i915) < 8) { + return; + } + + if (drm_WARN_ONCE(&i915->drm, !num, + "Platform does not implement TLB invalidation!")) + return; + + GEM_TRACE("\n"); + + assert_rpm_wakelock_held(&i915->runtime_pm); + + mutex_lock(>->tlb_invalidate_lock); + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + + for_each_engine(engine, gt, id) { + /* + * HW architecture suggest typical invalidation time at 40us, + * with pessimistic cases up to 100us and a recommendation to + * cap at 1ms. We go a bit higher just in case. + */ + const unsigned int timeout_us = 100; + const unsigned int timeout_ms = 4; + struct reg_and_bit rb; + + rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num); + if (!i915_mmio_reg_offset(rb.reg)) + continue; + + intel_uncore_write_fw(uncore, rb.reg, rb.bit); + if (__intel_wait_for_register_fw(uncore, + rb.reg, rb.bit, 0, + timeout_us, timeout_ms, + NULL)) + drm_err_ratelimited(>->i915->drm, + "%s TLB invalidation did not complete in %ums!\n", + engine->name, timeout_ms); + } + + /* + * Use delayed put since a) we mostly expect a flurry of TLB + * invalidations so it is good to avoid paying the forcewake cost and + * b) it works around a bug in Icelake which cannot cope with too rapid + * transitions. + */ + intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL); + mutex_unlock(>->tlb_invalidate_lock); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 74e771871a9b..a913fb6ffec3 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -35,6 +35,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc) } void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915); +void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915); void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt); int intel_gt_probe_lmem(struct intel_gt *gt); int intel_gt_init_mmio(struct intel_gt *gt); @@ -90,4 +91,6 @@ void intel_gt_info_print(const struct intel_gt_info *info, void intel_gt_watchdog_work(struct work_struct *work); +void intel_gt_invalidate_tlbs(struct intel_gt *gt); + #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c index acc49c56a9f3..9db3dcbd917f 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c @@ -9,11 +9,6 @@ #include "intel_engine_pm.h" #include "intel_gt_buffer_pool.h" -static struct intel_gt *to_gt(struct intel_gt_buffer_pool *pool) -{ - return container_of(pool, struct intel_gt, buffer_pool); -} - static struct list_head * bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz) { @@ -141,7 +136,7 @@ static struct intel_gt_buffer_pool_node * node_create(struct intel_gt_buffer_pool *pool, size_t sz, enum i915_map_type type) { - struct intel_gt *gt = to_gt(pool); + struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool); struct intel_gt_buffer_pool_node *node; struct drm_i915_gem_object *obj; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h index e307ceb99031..17e79b735cfe 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h @@ -10,11 +10,7 @@ struct intel_gt; -#define DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(__name) \ - static int __name ## _open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, __name ## _show, inode->i_private); \ -} \ +#define __GT_DEBUGFS_ATTRIBUTE_FOPS(__name) \ static const struct file_operations __name ## _fops = { \ .owner = THIS_MODULE, \ .open = __name ## _open, \ @@ -23,6 +19,21 @@ static const struct file_operations __name ## _fops = { \ .release = single_release, \ } +#define DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(__name) \ +static int __name ## _open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, __name ## _show, inode->i_private); \ +} \ +__GT_DEBUGFS_ATTRIBUTE_FOPS(__name) + +#define DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE_WITH_SIZE(__name, __size_vf) \ +static int __name ## _open(struct inode *inode, struct file *file) \ +{ \ + return single_open_size(file, __name ## _show, inode->i_private, \ + __size_vf(inode->i_private)); \ +} \ +__GT_DEBUGFS_ATTRIBUTE_FOPS(__name) + void intel_gt_debugfs_register(struct intel_gt *gt); struct intel_gt_debugfs_file { diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 14216cc471b1..f20687796490 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -73,6 +73,8 @@ struct intel_gt { struct intel_uc uc; + struct mutex tlb_invalidate_lock; + struct i915_wa_list wa_list; struct intel_gt_timelines { diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 9fee968d57db..a94be0306464 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -224,19 +224,6 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass) INIT_LIST_HEAD(&vm->bound_list); } -void clear_pages(struct i915_vma *vma) -{ - GEM_BUG_ON(!vma->pages); - - if (vma->pages != vma->obj->mm.pages) { - sg_free_table(vma->pages); - kfree(vma->pages); - } - vma->pages = NULL; - - memset(&vma->page_sizes, 0, sizeof(vma->page_sizes)); -} - void *__px_vaddr(struct drm_i915_gem_object *p) { enum i915_map_type type; @@ -302,7 +289,7 @@ int setup_scratch_page(struct i915_address_space *vm) do { struct drm_i915_gem_object *obj; - obj = vm->alloc_pt_dma(vm, size); + obj = vm->alloc_scratch_dma(vm, size); if (IS_ERR(obj)) goto skip; @@ -338,6 +325,18 @@ skip: if (size == I915_GTT_PAGE_SIZE_4K) return -ENOMEM; + /* + * If we need 64K minimum GTT pages for device local-memory, + * like on XEHPSDV, then we need to fail the allocation here, + * otherwise we can't safely support the insertion of + * local-memory pages for this vm, since the HW expects the + * correct physical alignment and size when the page-table is + * operating in 64K GTT mode, which includes any scratch PTEs, + * since userspace can still touch them. + */ + if (HAS_64K_PAGES(vm->i915)) + return -ENOMEM; + size = I915_GTT_PAGE_SIZE_4K; } while (1); } diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index 51afe66d00f2..177b42b935a1 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -209,9 +209,6 @@ struct i915_vma_ops { */ void (*unbind_vma)(struct i915_address_space *vm, struct i915_vma *vma); - - int (*set_pages)(struct i915_vma *vma); - void (*clear_pages)(struct i915_vma *vma); }; struct i915_address_space { @@ -268,6 +265,8 @@ struct i915_address_space { struct drm_i915_gem_object * (*alloc_pt_dma)(struct i915_address_space *vm, int sz); + struct drm_i915_gem_object * + (*alloc_scratch_dma)(struct i915_address_space *vm, int sz); u64 (*pte_encode)(dma_addr_t addr, enum i915_cache_level level, @@ -599,10 +598,6 @@ release_pd_entry(struct i915_page_directory * const pd, const struct drm_i915_gem_object * const scratch); void gen6_ggtt_invalidate(struct i915_ggtt *ggtt); -int ggtt_set_pages(struct i915_vma *vma); -int ppgtt_set_pages(struct i915_vma *vma); -void clear_pages(struct i915_vma *vma); - void ppgtt_bind_vma(struct i915_address_space *vm, struct i915_vm_pt_stash *stash, struct i915_vma *vma, diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c index 19a01878fee3..18b44af56969 100644 --- a/drivers/gpu/drm/i915/gt/intel_migrate.c +++ b/drivers/gpu/drm/i915/gt/intel_migrate.c @@ -404,7 +404,7 @@ static int emit_copy(struct i915_request *rq, int size) int intel_context_migrate_copy(struct intel_context *ce, - struct dma_fence *await, + const struct i915_deps *deps, struct scatterlist *src, enum i915_cache_level src_cache_level, bool src_is_lmem, @@ -431,8 +431,8 @@ intel_context_migrate_copy(struct intel_context *ce, goto out_ce; } - if (await) { - err = i915_request_await_dma_fence(rq, await); + if (deps) { + err = i915_request_await_deps(rq, deps); if (err) goto out_rq; @@ -442,7 +442,7 @@ intel_context_migrate_copy(struct intel_context *ce, goto out_rq; } - await = NULL; + deps = NULL; } /* The PTE updates + copy must not be interrupted. */ @@ -525,7 +525,7 @@ static int emit_clear(struct i915_request *rq, int size, u32 value) int intel_context_migrate_clear(struct intel_context *ce, - struct dma_fence *await, + const struct i915_deps *deps, struct scatterlist *sg, enum i915_cache_level cache_level, bool is_lmem, @@ -550,8 +550,8 @@ intel_context_migrate_clear(struct intel_context *ce, goto out_ce; } - if (await) { - err = i915_request_await_dma_fence(rq, await); + if (deps) { + err = i915_request_await_deps(rq, deps); if (err) goto out_rq; @@ -561,7 +561,7 @@ intel_context_migrate_clear(struct intel_context *ce, goto out_rq; } - await = NULL; + deps = NULL; } /* The PTE updates + clear must not be interrupted. */ @@ -599,7 +599,7 @@ out_ce: int intel_migrate_copy(struct intel_migrate *m, struct i915_gem_ww_ctx *ww, - struct dma_fence *await, + const struct i915_deps *deps, struct scatterlist *src, enum i915_cache_level src_cache_level, bool src_is_lmem, @@ -624,7 +624,7 @@ int intel_migrate_copy(struct intel_migrate *m, if (err) goto out; - err = intel_context_migrate_copy(ce, await, + err = intel_context_migrate_copy(ce, deps, src, src_cache_level, src_is_lmem, dst, dst_cache_level, dst_is_lmem, out); @@ -638,7 +638,7 @@ out: int intel_migrate_clear(struct intel_migrate *m, struct i915_gem_ww_ctx *ww, - struct dma_fence *await, + const struct i915_deps *deps, struct scatterlist *sg, enum i915_cache_level cache_level, bool is_lmem, @@ -661,7 +661,7 @@ intel_migrate_clear(struct intel_migrate *m, if (err) goto out; - err = intel_context_migrate_clear(ce, await, sg, cache_level, + err = intel_context_migrate_clear(ce, deps, sg, cache_level, is_lmem, value, out); intel_context_unpin(ce); diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.h b/drivers/gpu/drm/i915/gt/intel_migrate.h index 4e18e755a00b..ccc677ec4aa3 100644 --- a/drivers/gpu/drm/i915/gt/intel_migrate.h +++ b/drivers/gpu/drm/i915/gt/intel_migrate.h @@ -11,6 +11,7 @@ #include "intel_migrate_types.h" struct dma_fence; +struct i915_deps; struct i915_request; struct i915_gem_ww_ctx; struct intel_gt; @@ -23,7 +24,7 @@ struct intel_context *intel_migrate_create_context(struct intel_migrate *m); int intel_migrate_copy(struct intel_migrate *m, struct i915_gem_ww_ctx *ww, - struct dma_fence *await, + const struct i915_deps *deps, struct scatterlist *src, enum i915_cache_level src_cache_level, bool src_is_lmem, @@ -33,7 +34,7 @@ int intel_migrate_copy(struct intel_migrate *m, struct i915_request **out); int intel_context_migrate_copy(struct intel_context *ce, - struct dma_fence *await, + const struct i915_deps *deps, struct scatterlist *src, enum i915_cache_level src_cache_level, bool src_is_lmem, @@ -45,7 +46,7 @@ int intel_context_migrate_copy(struct intel_context *ce, int intel_migrate_clear(struct intel_migrate *m, struct i915_gem_ww_ctx *ww, - struct dma_fence *await, + const struct i915_deps *deps, struct scatterlist *sg, enum i915_cache_level cache_level, bool is_lmem, @@ -53,7 +54,7 @@ intel_migrate_clear(struct intel_migrate *m, struct i915_request **out); int intel_context_migrate_clear(struct intel_context *ce, - struct dma_fence *await, + const struct i915_deps *deps, struct scatterlist *sg, enum i915_cache_level cache_level, bool is_lmem, diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c index 4396bfd630d8..083b3090c69c 100644 --- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c @@ -289,16 +289,6 @@ void i915_vm_free_pt_stash(struct i915_address_space *vm, } } -int ppgtt_set_pages(struct i915_vma *vma) -{ - GEM_BUG_ON(vma->pages); - - vma->pages = vma->obj->mm.pages; - vma->page_sizes = vma->obj->mm.page_sizes; - - return 0; -} - void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt, unsigned long lmem_pt_obj_flags) { @@ -315,6 +305,4 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt, ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; - ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; - ppgtt->vm.vma_ops.clear_pages = clear_pages; } diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c index 9ea49e0a27c0..fde2dcb59809 100644 --- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c @@ -197,6 +197,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) struct intel_uncore *uncore = gt->uncore; struct pci_dev *pdev = to_pci_dev(i915->drm.dev); struct intel_memory_region *mem; + resource_size_t min_page_size; resource_size_t io_start; resource_size_t lmem_size; int err; @@ -211,10 +212,12 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) if (GEM_WARN_ON(lmem_size > pci_resource_len(pdev, 2))) return ERR_PTR(-ENODEV); + min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K : + I915_GTT_PAGE_SIZE_4K; mem = intel_memory_region_create(i915, 0, lmem_size, - I915_GTT_PAGE_SIZE_4K, + min_page_size, io_start, INTEL_MEMORY_LOCAL, 0, diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 63199f0550e6..7be0002d9d70 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -6,7 +6,7 @@ #include <linux/sched/mm.h> #include <linux/stop_machine.h> -#include "display/intel_display_types.h" +#include "display/intel_display.h" #include "display/intel_overlay.h" #include "gem/i915_gem_context.h" diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 07ff7ba7b2b7..54e7df788dbf 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -2226,6 +2226,65 @@ u32 intel_rps_read_state_cap(struct intel_rps *rps) return intel_uncore_read(uncore, GEN6_RP_STATE_CAP); } +static void intel_rps_set_manual(struct intel_rps *rps, bool enable) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + u32 state = enable ? GEN9_RPSWCTL_ENABLE : GEN9_RPSWCTL_DISABLE; + + /* Allow punit to process software requests */ + intel_uncore_write(uncore, GEN6_RP_CONTROL, state); +} + +void intel_rps_raise_unslice(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + u32 rp0_unslice_req; + + mutex_lock(&rps->lock); + + if (rps_uses_slpc(rps)) { + /* RP limits have not been initialized yet for SLPC path */ + rp0_unslice_req = ((intel_rps_read_state_cap(rps) >> 0) + & 0xff) * GEN9_FREQ_SCALER; + + intel_rps_set_manual(rps, true); + intel_uncore_write(uncore, GEN6_RPNSWREQ, + ((rp0_unslice_req << + GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) | + GEN9_IGNORE_SLICE_RATIO)); + intel_rps_set_manual(rps, false); + } else { + intel_rps_set(rps, rps->rp0_freq); + } + + mutex_unlock(&rps->lock); +} + +void intel_rps_lower_unslice(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + u32 rpn_unslice_req; + + mutex_lock(&rps->lock); + + if (rps_uses_slpc(rps)) { + /* RP limits have not been initialized yet for SLPC path */ + rpn_unslice_req = ((intel_rps_read_state_cap(rps) >> 16) + & 0xff) * GEN9_FREQ_SCALER; + + intel_rps_set_manual(rps, true); + intel_uncore_write(uncore, GEN6_RPNSWREQ, + ((rpn_unslice_req << + GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) | + GEN9_IGNORE_SLICE_RATIO)); + intel_rps_set_manual(rps, false); + } else { + intel_rps_set(rps, rps->min_freq); + } + + mutex_unlock(&rps->lock); +} + /* External interface for intel_ips.ko */ static struct drm_i915_private __rcu *ips_mchdev; @@ -2302,7 +2361,7 @@ unsigned long i915_read_mch_val(void) return 0; with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - struct intel_ips *ips = &i915->gt.rps.ips; + struct intel_ips *ips = &to_gt(i915)->rps.ips; spin_lock_irq(&mchdev_lock); chipset_val = __ips_chipset_val(ips); @@ -2329,7 +2388,7 @@ bool i915_gpu_raise(void) if (!i915) return false; - rps = &i915->gt.rps; + rps = &to_gt(i915)->rps; spin_lock_irq(&mchdev_lock); if (rps->max_freq_softlimit < rps->max_freq) @@ -2356,7 +2415,7 @@ bool i915_gpu_lower(void) if (!i915) return false; - rps = &i915->gt.rps; + rps = &to_gt(i915)->rps; spin_lock_irq(&mchdev_lock); if (rps->max_freq_softlimit > rps->min_freq) @@ -2382,7 +2441,7 @@ bool i915_gpu_busy(void) if (!i915) return false; - ret = i915->gt.awake; + ret = to_gt(i915)->awake; drm_dev_put(&i915->drm); return ret; @@ -2405,11 +2464,11 @@ bool i915_gpu_turbo_disable(void) if (!i915) return false; - rps = &i915->gt.rps; + rps = &to_gt(i915)->rps; spin_lock_irq(&mchdev_lock); rps->max_freq_softlimit = rps->min_freq; - ret = !__gen5_rps_set(&i915->gt.rps, rps->min_freq); + ret = !__gen5_rps_set(&to_gt(i915)->rps, rps->min_freq); spin_unlock_irq(&mchdev_lock); drm_dev_put(&i915->drm); diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h index aee12f37d38a..c6d76a3d1331 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.h +++ b/drivers/gpu/drm/i915/gt/intel_rps.h @@ -45,6 +45,8 @@ u32 intel_rps_get_rpn_frequency(struct intel_rps *rps); u32 intel_rps_read_punit_req(struct intel_rps *rps); u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps); u32 intel_rps_read_state_cap(struct intel_rps *rps); +void intel_rps_raise_unslice(struct intel_rps *rps); +void intel_rps_lower_unslice(struct intel_rps *rps); void gen5_rps_irq_handler(struct intel_rps *rps); void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 3113266c286e..ab3277a3d593 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -929,7 +929,7 @@ hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) static void gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) { - const struct sseu_dev_info *sseu = &i915->gt.info.sseu; + const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu; unsigned int slice, subslice; u32 mcr, mcr_mask; diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index bb99fc03f503..c0637bf799a3 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -17,7 +17,7 @@ static int mock_timeline_pin(struct intel_timeline *tl) { int err; - if (WARN_ON(!i915_gem_object_trylock(tl->hwsp_ggtt->obj))) + if (WARN_ON(!i915_gem_object_trylock(tl->hwsp_ggtt->obj, NULL))) return -EBUSY; err = intel_timeline_pin_map(tl); @@ -345,7 +345,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, struct mock_engine *engine; GEM_BUG_ON(id >= I915_NUM_ENGINES); - GEM_BUG_ON(!i915->gt.uncore); + GEM_BUG_ON(!to_gt(i915)->uncore); engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL); if (!engine) @@ -353,8 +353,8 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, /* minimal engine setup for requests */ engine->base.i915 = i915; - engine->base.gt = &i915->gt; - engine->base.uncore = i915->gt.uncore; + engine->base.gt = to_gt(i915); + engine->base.uncore = to_gt(i915)->uncore; snprintf(engine->base.name, sizeof(engine->base.name), "%s", name); engine->base.id = id; engine->base.mask = BIT(id); @@ -377,8 +377,8 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, engine->base.release = mock_engine_release; - i915->gt.engine[id] = &engine->base; - i915->gt.engine_class[0][id] = &engine->base; + to_gt(i915)->engine[id] = &engine->base; + to_gt(i915)->engine_class[0][id] = &engine->base; /* fake hw queue */ spin_lock_init(&engine->hw_lock); diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c index fa7b99a671dd..76fbae358072 100644 --- a/drivers/gpu/drm/i915/gt/selftest_context.c +++ b/drivers/gpu/drm/i915/gt/selftest_context.c @@ -442,7 +442,7 @@ int intel_context_live_selftests(struct drm_i915_private *i915) SUBTEST(live_active_context), SUBTEST(live_remote_context), }; - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); if (intel_gt_is_wedged(gt)) return 0; diff --git a/drivers/gpu/drm/i915/gt/selftest_engine.c b/drivers/gpu/drm/i915/gt/selftest_engine.c index 262764f6d90a..57fea9ea1705 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine.c @@ -12,7 +12,7 @@ int intel_engine_live_selftests(struct drm_i915_private *i915) live_engine_pm_selftests, NULL, }; - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); typeof(*tests) *fn; for (fn = tests; *fn; fn++) { diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c index 64abf5feabfa..1b75f478d1b8 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c @@ -361,10 +361,10 @@ int intel_engine_cs_perf_selftests(struct drm_i915_private *i915) SUBTEST(perf_mi_noop), }; - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; - return intel_gt_live_subtests(tests, &i915->gt); + return intel_gt_live_subtests(tests, to_gt(i915)); } static int intel_mmio_bases_check(void *arg) diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c index 6e6e4d747cca..273d440a53e3 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c @@ -378,13 +378,13 @@ int intel_heartbeat_live_selftests(struct drm_i915_private *i915) int saved_hangcheck; int err; - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; saved_hangcheck = i915->params.enable_hangcheck; i915->params.enable_hangcheck = INT_MAX; - err = intel_gt_live_subtests(tests, &i915->gt); + err = intel_gt_live_subtests(tests, to_gt(i915)); i915->params.enable_hangcheck = saved_hangcheck; return err; diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c index 75f6efc9882f..8af261831470 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c @@ -229,7 +229,7 @@ static int __spin_until_busier(struct intel_engine_cs *engine, ktime_t busyness) start = ktime_get(); while (intel_engine_get_busy_time(engine, &unused) == busyness) { dt = ktime_get() - start; - if (dt > 500000) { + if (dt > 10000000) { pr_err("active wait timed out %lld\n", dt); ENGINE_TRACE(engine, "active wait time out %lld\n", dt); return -ETIME; diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c index b367ecfa42de..e10da897e07a 100644 --- a/drivers/gpu/drm/i915/gt/selftest_execlists.c +++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c @@ -4502,11 +4502,11 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) SUBTEST(live_virtual_reset), }; - if (i915->gt.submission_method != INTEL_SUBMISSION_ELSP) + if (to_gt(i915)->submission_method != INTEL_SUBMISSION_ELSP) return 0; - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; - return intel_gt_live_subtests(tests, &i915->gt); + return intel_gt_live_subtests(tests, to_gt(i915)); } diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c index b9441217ca3d..8bf62a5826cc 100644 --- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c @@ -43,7 +43,7 @@ static void measure_clocks(struct intel_engine_cs *engine, int i; for (i = 0; i < 5; i++) { - preempt_disable(); + local_irq_disable(); cycles[i] = -ENGINE_READ_FW(engine, RING_TIMESTAMP); dt[i] = ktime_get(); @@ -51,7 +51,7 @@ static void measure_clocks(struct intel_engine_cs *engine, dt[i] = ktime_sub(ktime_get(), dt[i]); cycles[i] += ENGINE_READ_FW(engine, RING_TIMESTAMP); - preempt_enable(); + local_irq_enable(); } /* Use the median of both cycle/dt; close enough */ @@ -193,10 +193,10 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915) SUBTEST(live_gt_resume), }; - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; - return intel_gt_live_subtests(tests, &i915->gt); + return intel_gt_live_subtests(tests, to_gt(i915)); } int intel_gt_pm_late_selftests(struct drm_i915_private *i915) @@ -210,8 +210,8 @@ int intel_gt_pm_late_selftests(struct drm_i915_private *i915) SUBTEST(live_rc6_ctx_wa), }; - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; - return intel_gt_live_subtests(tests, &i915->gt); + return intel_gt_live_subtests(tests, to_gt(i915)); } diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index e5ad4d5a91c0..15d63435ec4d 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -2018,7 +2018,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_reset_evict_fence), SUBTEST(igt_handle_error), }; - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); intel_wakeref_t wakeref; int err; diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index b0977a3b699b..618c905daa19 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -1847,5 +1847,5 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915) if (!HAS_LOGICAL_RING_CONTEXTS(i915)) return 0; - return intel_gt_live_subtests(tests, &i915->gt); + return intel_gt_live_subtests(tests, to_gt(i915)); } diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c index e21787301bbd..fa4293d2944f 100644 --- a/drivers/gpu/drm/i915/gt/selftest_migrate.c +++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c @@ -442,7 +442,7 @@ int intel_migrate_live_selftests(struct drm_i915_private *i915) SUBTEST(thread_global_copy), SUBTEST(thread_global_clear), }; - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); if (!gt->migrate.context) return 0; @@ -465,7 +465,7 @@ create_init_lmem_internal(struct intel_gt *gt, size_t sz, bool try_lmem) return obj; } - i915_gem_object_trylock(obj); + i915_gem_object_trylock(obj, NULL); err = i915_gem_object_pin_pages(obj); if (err) { i915_gem_object_unlock(obj); @@ -658,7 +658,7 @@ int intel_migrate_perf_selftests(struct drm_i915_private *i915) SUBTEST(perf_clear_blt), SUBTEST(perf_copy_blt), }; - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); if (intel_gt_is_wedged(gt)) return 0; diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c index 13d25bf2a94a..c1d861333c44 100644 --- a/drivers/gpu/drm/i915/gt/selftest_mocs.c +++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c @@ -451,5 +451,5 @@ int intel_mocs_live_selftests(struct drm_i915_private *i915) if (!get_mocs_settings(i915, &table)) return 0; - return intel_gt_live_subtests(tests, &i915->gt); + return intel_gt_live_subtests(tests, to_gt(i915)); } diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index 7a50c9f4071b..8a873f6bda7f 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -376,7 +376,7 @@ int intel_reset_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_atomic_reset), SUBTEST(igt_atomic_engine_reset), }; - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); if (!intel_has_gpu_reset(gt)) return 0; diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c index 041954408d0f..70f9ac1ec2c7 100644 --- a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c @@ -291,8 +291,8 @@ int intel_ring_submission_live_selftests(struct drm_i915_private *i915) SUBTEST(live_ctx_switch_wa), }; - if (i915->gt.submission_method > INTEL_SUBMISSION_RING) + if (to_gt(i915)->submission_method > INTEL_SUBMISSION_RING) return 0; - return intel_gt_live_subtests(tests, &i915->gt); + return intel_gt_live_subtests(tests, to_gt(i915)); } diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c index 9334bad131a2..b768cea5943d 100644 --- a/drivers/gpu/drm/i915/gt/selftest_slpc.c +++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c @@ -39,7 +39,7 @@ static int slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 freq) static int live_slpc_clamp_min(void *arg) { struct drm_i915_private *i915 = arg; - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); struct intel_guc_slpc *slpc = >->uc.guc.slpc; struct intel_rps *rps = >->rps; struct intel_engine_cs *engine; @@ -166,7 +166,7 @@ static int live_slpc_clamp_min(void *arg) static int live_slpc_clamp_max(void *arg) { struct drm_i915_private *i915 = arg; - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); struct intel_guc_slpc *slpc; struct intel_rps *rps; struct intel_engine_cs *engine; @@ -304,7 +304,7 @@ int intel_slpc_live_selftests(struct drm_i915_private *i915) SUBTEST(live_slpc_clamp_min), }; - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; return i915_live_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index d0b6a3afcf44..e2eb686a9763 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -159,7 +159,7 @@ static int mock_hwsp_freelist(void *arg) INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL); state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed); - state.gt = &i915->gt; + state.gt = to_gt(i915); /* * Create a bunch of timelines and check that their HWSP do not overlap. @@ -1416,8 +1416,8 @@ int intel_timeline_live_selftests(struct drm_i915_private *i915) SUBTEST(live_hwsp_rollover_user), }; - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; - return intel_gt_live_subtests(tests, &i915->gt); + return intel_gt_live_subtests(tests, to_gt(i915)); } diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 962e91ba3be4..0287c2573c51 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -1387,8 +1387,8 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915) SUBTEST(live_engine_reset_workarounds), }; - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; - return intel_gt_live_subtests(tests, &i915->gt); + return intel_gt_live_subtests(tests, to_gt(i915)); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 1cb46098030d..f9240d4baa69 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -95,6 +95,11 @@ struct intel_guc { */ struct ida guc_ids; /** + * @num_guc_ids: Number of guc_ids, selftest feature to be able + * to reduce this number while testing. + */ + int num_guc_ids; + /** * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */ unsigned long *guc_ids_bitmap; @@ -202,6 +207,13 @@ struct intel_guc { */ struct delayed_work work; } timestamp; + +#ifdef CONFIG_DRM_I915_SELFTEST + /** + * @number_guc_id_stolen: The number of guc_ids that have been stolen + */ + int number_guc_id_stolen; +#endif }; static inline struct intel_guc *log_to_guc(struct intel_guc_log *log) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index a0cc34be7b56..aa6dd6415202 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -523,6 +523,15 @@ static inline bool ct_deadlocked(struct intel_guc_ct *ct) CT_ERROR(ct, "Communication stalled for %lld ms, desc status=%#x,%#x\n", ktime_ms_delta(ktime_get(), ct->stall_time), send->status, recv->status); + CT_ERROR(ct, "H2G Space: %u (Bytes)\n", + atomic_read(&ct->ctbs.send.space) * 4); + CT_ERROR(ct, "Head: %u (Dwords)\n", ct->ctbs.send.desc->head); + CT_ERROR(ct, "Tail: %u (Dwords)\n", ct->ctbs.send.desc->tail); + CT_ERROR(ct, "G2H Space: %u (Bytes)\n", + atomic_read(&ct->ctbs.recv.space) * 4); + CT_ERROR(ct, "Head: %u\n (Dwords)", ct->ctbs.recv.desc->head); + CT_ERROR(ct, "Tail: %u\n (Dwords)", ct->ctbs.recv.desc->tail); + ct->ctbs.send.broken = true; } @@ -582,12 +591,19 @@ static inline bool h2g_has_room(struct intel_guc_ct *ct, u32 len_dw) static int has_room_nb(struct intel_guc_ct *ct, u32 h2g_dw, u32 g2h_dw) { + bool h2g = h2g_has_room(ct, h2g_dw); + bool g2h = g2h_has_room(ct, g2h_dw); + lockdep_assert_held(&ct->ctbs.send.lock); - if (unlikely(!h2g_has_room(ct, h2g_dw) || !g2h_has_room(ct, g2h_dw))) { + if (unlikely(!h2g || !g2h)) { if (ct->stall_time == KTIME_MAX) ct->stall_time = ktime_get(); + /* Be paranoid and kick G2H tasklet to free credits */ + if (!g2h) + tasklet_hi_schedule(&ct->receive_tasklet); + if (unlikely(ct_deadlocked(ct))) return -EPIPE; else diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 196424be0998..31420ce1ce6b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -40,9 +40,8 @@ static void guc_prepare_xfer(struct intel_uncore *uncore) } } -/* Copy RSA signature from the fw image to HW for verification */ -static int guc_xfer_rsa(struct intel_uc_fw *guc_fw, - struct intel_uncore *uncore) +static int guc_xfer_rsa_mmio(struct intel_uc_fw *guc_fw, + struct intel_uncore *uncore) { u32 rsa[UOS_RSA_SCRATCH_COUNT]; size_t copied; @@ -58,6 +57,27 @@ static int guc_xfer_rsa(struct intel_uc_fw *guc_fw, return 0; } +static int guc_xfer_rsa_vma(struct intel_uc_fw *guc_fw, + struct intel_uncore *uncore) +{ + struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw); + + intel_uncore_write(uncore, UOS_RSA_SCRATCH(0), + intel_guc_ggtt_offset(guc, guc_fw->rsa_data)); + + return 0; +} + +/* Copy RSA signature from the fw image to HW for verification */ +static int guc_xfer_rsa(struct intel_uc_fw *guc_fw, + struct intel_uncore *uncore) +{ + if (guc_fw->rsa_data) + return guc_xfer_rsa_vma(guc_fw, uncore); + else + return guc_xfer_rsa_mmio(guc_fw, uncore); +} + /* * Read the GuC status register (GUC_STATUS) and store it in the * specified location; then return a boolean indicating whether @@ -142,7 +162,10 @@ int intel_guc_fw_upload(struct intel_guc *guc) /* * Note that GuC needs the CSS header plus uKernel code to be copied * by the DMA engine in one operation, whereas the RSA signature is - * loaded via MMIO. + * loaded separately, either by copying it to the UOS_RSA_SCRATCH + * register (if key size <= 256) or through a ggtt-pinned vma (if key + * size > 256). The RSA size and therefore the way we provide it to the + * HW is fixed for each platform and hard-coded in the bootrom. */ ret = guc_xfer_rsa(&guc->fw, uncore); if (ret) @@ -164,6 +187,6 @@ int intel_guc_fw_upload(struct intel_guc *guc) return 0; out: - intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_FAIL); + intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL); return ret; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h index ac1ee1d5ce10..fe6ab7550a14 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h @@ -15,9 +15,12 @@ struct intel_guc; -#ifdef CONFIG_DRM_I915_DEBUG_GUC +#if defined(CONFIG_DRM_I915_DEBUG_GUC) #define CRASH_BUFFER_SIZE SZ_2M #define DEBUG_BUFFER_SIZE SZ_16M +#elif defined(CONFIG_DRM_I915_DEBUG_GEM) +#define CRASH_BUFFER_SIZE SZ_1M +#define DEBUG_BUFFER_SIZE SZ_2M #else #define CRASH_BUFFER_SIZE SZ_8K #define DEBUG_BUFFER_SIZE SZ_64K diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c index 46026c2c1722..ddfbe334689f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c @@ -10,28 +10,80 @@ #include "intel_guc.h" #include "intel_guc_log.h" #include "intel_guc_log_debugfs.h" +#include "intel_uc.h" + +static u32 obj_to_guc_log_dump_size(struct drm_i915_gem_object *obj) +{ + u32 size; + + if (!obj) + return PAGE_SIZE; + + /* "0x%08x 0x%08x 0x%08x 0x%08x\n" => 16 bytes -> 44 chars => x2.75 */ + size = ((obj->base.size * 11) + 3) / 4; + + /* Add padding for final blank line, any extra header info, etc. */ + size = PAGE_ALIGN(size + PAGE_SIZE); + + return size; +} + +static u32 guc_log_dump_size(struct intel_guc_log *log) +{ + struct intel_guc *guc = log_to_guc(log); + + if (!intel_guc_is_supported(guc)) + return PAGE_SIZE; + + if (!log->vma) + return PAGE_SIZE; + + return obj_to_guc_log_dump_size(log->vma->obj); +} static int guc_log_dump_show(struct seq_file *m, void *data) { struct drm_printer p = drm_seq_file_printer(m); + int ret; - return intel_guc_log_dump(m->private, &p, false); + ret = intel_guc_log_dump(m->private, &p, false); + + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && seq_has_overflowed(m)) + pr_warn_once("preallocated size:%zx for %s exceeded\n", + m->size, __func__); + + return ret; +} +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE_WITH_SIZE(guc_log_dump, guc_log_dump_size); + +static u32 guc_load_err_dump_size(struct intel_guc_log *log) +{ + struct intel_guc *guc = log_to_guc(log); + struct intel_uc *uc = container_of(guc, struct intel_uc, guc); + + if (!intel_guc_is_supported(guc)) + return PAGE_SIZE; + + return obj_to_guc_log_dump_size(uc->load_err_log); } -DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_log_dump); static int guc_load_err_log_dump_show(struct seq_file *m, void *data) { struct drm_printer p = drm_seq_file_printer(m); + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && seq_has_overflowed(m)) + pr_warn_once("preallocated size:%zx for %s exceeded\n", + m->size, __func__); + return intel_guc_log_dump(m->private, &p, true); } -DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_load_err_log_dump); +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE_WITH_SIZE(guc_load_err_log_dump, guc_load_err_dump_size); static int guc_log_level_get(void *data, u64 *val) { struct intel_guc_log *log = data; - if (!intel_guc_is_used(log_to_guc(log))) + if (!log->vma) return -ENODEV; *val = intel_guc_log_get_level(log); @@ -43,7 +95,7 @@ static int guc_log_level_set(void *data, u64 val) { struct intel_guc_log *log = data; - if (!intel_guc_is_used(log_to_guc(log))) + if (!log->vma) return -ENODEV; return intel_guc_log_set_level(log, val); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index 22c1c12369f2..13b27b8ff74e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -623,7 +623,7 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) if (unlikely(ret < 0)) return ret; - intel_guc_pm_intrmsk_enable(&i915->gt); + intel_guc_pm_intrmsk_enable(to_gt(i915)); slpc_get_rp_values(slpc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 1f9d4fde421f..e7517206af82 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -145,7 +145,8 @@ guc_create_parallel(struct intel_engine_cs **engines, * use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for * multi-lrc. */ -#define NUMBER_MULTI_LRC_GUC_ID (GUC_MAX_LRC_DESCRIPTORS / 16) +#define NUMBER_MULTI_LRC_GUC_ID(guc) \ + ((guc)->submission_state.num_guc_ids / 16) /* * Below is a set of functions which control the GuC scheduling state which @@ -1040,8 +1041,6 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) spin_unlock(&ce->guc_state.lock); - GEM_BUG_ON(!do_put && !destroyed); - if (pending_enable || destroyed || deregister) { decr_outstanding_submission_g2h(guc); if (deregister) @@ -1206,7 +1205,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now) * start_gt_clk is derived from GuC state. To get a consistent * view of activity, we query the GuC state only if gt is awake. */ - if (intel_gt_pm_get_if_awake(gt) && !in_reset) { + if (!in_reset && intel_gt_pm_get_if_awake(gt)) { stats_saved = *stats; gt_stamp_saved = guc->timestamp.gt_stamp; guc_update_engine_gt_clks(engine); @@ -1777,7 +1776,7 @@ int intel_guc_submission_init(struct intel_guc *guc) destroyed_worker_func); guc->submission_state.guc_ids_bitmap = - bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID, GFP_KERNEL); + bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL); if (!guc->submission_state.guc_ids_bitmap) return -ENOMEM; @@ -1871,13 +1870,13 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce) if (intel_context_is_parent(ce)) ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap, - NUMBER_MULTI_LRC_GUC_ID, + NUMBER_MULTI_LRC_GUC_ID(guc), order_base_2(ce->parallel.number_children + 1)); else ret = ida_simple_get(&guc->submission_state.guc_ids, - NUMBER_MULTI_LRC_GUC_ID, - GUC_MAX_LRC_DESCRIPTORS, + NUMBER_MULTI_LRC_GUC_ID(guc), + guc->submission_state.num_guc_ids, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); if (unlikely(ret < 0)) @@ -1935,14 +1934,18 @@ static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce) GEM_BUG_ON(intel_context_is_parent(cn)); list_del_init(&cn->guc_id.link); - ce->guc_id = cn->guc_id; + ce->guc_id.id = cn->guc_id.id; - spin_lock(&ce->guc_state.lock); + spin_lock(&cn->guc_state.lock); clr_context_registered(cn); - spin_unlock(&ce->guc_state.lock); + spin_unlock(&cn->guc_state.lock); set_context_guc_id_invalid(cn); +#ifdef CONFIG_DRM_I915_SELFTEST + guc->number_guc_id_stolen++; +#endif + return 0; } else { return -EAGAIN; @@ -2646,7 +2649,6 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce) unsigned long flags; bool disabled; - lockdep_assert_held(&guc->submission_state.lock); GEM_BUG_ON(!intel_gt_pm_is_awake(gt)); GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id)); GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id)); @@ -2662,7 +2664,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce) } spin_unlock_irqrestore(&ce->guc_state.lock, flags); if (unlikely(disabled)) { - __release_guc_id(guc, ce); + release_guc_id(guc, ce); __guc_context_destroy(ce); return; } @@ -2696,36 +2698,48 @@ static void __guc_context_destroy(struct intel_context *ce) static void guc_flush_destroyed_contexts(struct intel_guc *guc) { - struct intel_context *ce, *cn; + struct intel_context *ce; unsigned long flags; GEM_BUG_ON(!submission_disabled(guc) && guc_submission_initialized(guc)); - spin_lock_irqsave(&guc->submission_state.lock, flags); - list_for_each_entry_safe(ce, cn, - &guc->submission_state.destroyed_contexts, - destroyed_link) { - list_del_init(&ce->destroyed_link); - __release_guc_id(guc, ce); + while (!list_empty(&guc->submission_state.destroyed_contexts)) { + spin_lock_irqsave(&guc->submission_state.lock, flags); + ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts, + struct intel_context, + destroyed_link); + if (ce) + list_del_init(&ce->destroyed_link); + spin_unlock_irqrestore(&guc->submission_state.lock, flags); + + if (!ce) + break; + + release_guc_id(guc, ce); __guc_context_destroy(ce); } - spin_unlock_irqrestore(&guc->submission_state.lock, flags); } static void deregister_destroyed_contexts(struct intel_guc *guc) { - struct intel_context *ce, *cn; + struct intel_context *ce; unsigned long flags; - spin_lock_irqsave(&guc->submission_state.lock, flags); - list_for_each_entry_safe(ce, cn, - &guc->submission_state.destroyed_contexts, - destroyed_link) { - list_del_init(&ce->destroyed_link); + while (!list_empty(&guc->submission_state.destroyed_contexts)) { + spin_lock_irqsave(&guc->submission_state.lock, flags); + ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts, + struct intel_context, + destroyed_link); + if (ce) + list_del_init(&ce->destroyed_link); + spin_unlock_irqrestore(&guc->submission_state.lock, flags); + + if (!ce) + break; + guc_lrc_desc_unpin(ce); } - spin_unlock_irqrestore(&guc->submission_state.lock, flags); } static void destroyed_worker_func(struct work_struct *w) @@ -3770,6 +3784,7 @@ static bool __guc_submission_selected(struct intel_guc *guc) void intel_guc_submission_init_early(struct intel_guc *guc) { + guc->submission_state.num_guc_ids = GUC_MAX_LRC_DESCRIPTORS; guc->submission_supported = __guc_submission_supported(guc); guc->submission_selected = __guc_submission_selected(guc); } @@ -4018,11 +4033,12 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc, const u32 *msg, u32 len) { struct intel_engine_cs *engine; + struct intel_gt *gt = guc_to_gt(guc); u8 guc_class, instance; u32 reason; if (unlikely(len != 3)) { - drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len); + drm_err(>->i915->drm, "Invalid length %u", len); return -EPROTO; } @@ -4032,12 +4048,19 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc, engine = guc_lookup_engine(guc, guc_class, instance); if (unlikely(!engine)) { - drm_err(&guc_to_gt(guc)->i915->drm, + drm_err(>->i915->drm, "Invalid engine %d:%d", guc_class, instance); return -EPROTO; } - intel_gt_handle_error(guc_to_gt(guc), engine->mask, + /* + * This is an unexpected failure of a hardware feature. So, log a real + * error message not just the informational that comes with the reset. + */ + drm_err(>->i915->drm, "GuC engine reset request failed on %d:%d (%s) because 0x%08X", + guc_class, instance, engine->name, reason); + + intel_gt_handle_error(gt, engine->mask, I915_ERROR_CAPTURE, "GuC failed to reset %s (reason=0x%08x)\n", engine->name, reason); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index ff4b6869b80b..d10b227ac4aa 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -54,65 +54,6 @@ void intel_huc_init_early(struct intel_huc *huc) } } -static int intel_huc_rsa_data_create(struct intel_huc *huc) -{ - struct intel_gt *gt = huc_to_gt(huc); - struct intel_guc *guc = >->uc.guc; - struct i915_vma *vma; - size_t copied; - void *vaddr; - int err; - - err = i915_inject_probe_error(gt->i915, -ENXIO); - if (err) - return err; - - /* - * HuC firmware will sit above GUC_GGTT_TOP and will not map - * through GTT. Unfortunately, this means GuC cannot perform - * the HuC auth. as the rsa offset now falls within the GuC - * inaccessible range. We resort to perma-pinning an additional - * vma within the accessible range that only contains the rsa - * signature. The GuC can use this extra pinning to perform - * the authentication since its GGTT offset will be GuC - * accessible. - */ - GEM_BUG_ON(huc->fw.rsa_size > PAGE_SIZE); - vma = intel_guc_allocate_vma(guc, PAGE_SIZE); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - vaddr = i915_gem_object_pin_map_unlocked(vma->obj, - i915_coherent_map_type(gt->i915, - vma->obj, true)); - if (IS_ERR(vaddr)) { - i915_vma_unpin_and_release(&vma, 0); - err = PTR_ERR(vaddr); - goto unpin_out; - } - - copied = intel_uc_fw_copy_rsa(&huc->fw, vaddr, vma->size); - i915_gem_object_unpin_map(vma->obj); - - if (copied < huc->fw.rsa_size) { - err = -ENOMEM; - goto unpin_out; - } - - huc->rsa_data = vma; - - return 0; - -unpin_out: - i915_vma_unpin_and_release(&vma, 0); - return err; -} - -static void intel_huc_rsa_data_destroy(struct intel_huc *huc) -{ - i915_vma_unpin_and_release(&huc->rsa_data, 0); -} - int intel_huc_init(struct intel_huc *huc) { struct drm_i915_private *i915 = huc_to_gt(huc)->i915; @@ -122,21 +63,10 @@ int intel_huc_init(struct intel_huc *huc) if (err) goto out; - /* - * HuC firmware image is outside GuC accessible range. - * Copy the RSA signature out of the image into - * a perma-pinned region set aside for it - */ - err = intel_huc_rsa_data_create(huc); - if (err) - goto out_fini; - intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOADABLE); return 0; -out_fini: - intel_uc_fw_fini(&huc->fw); out: i915_probe_error(i915, "failed with %d\n", err); return err; @@ -147,7 +77,6 @@ void intel_huc_fini(struct intel_huc *huc) if (!intel_uc_fw_is_loadable(&huc->fw)) return; - intel_huc_rsa_data_destroy(huc); intel_uc_fw_fini(&huc->fw); } @@ -177,7 +106,7 @@ int intel_huc_auth(struct intel_huc *huc) goto fail; ret = intel_guc_auth_huc(guc, - intel_guc_ggtt_offset(guc, huc->rsa_data)); + intel_guc_ggtt_offset(guc, huc->fw.rsa_data)); if (ret) { DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret); goto fail; @@ -199,7 +128,7 @@ int intel_huc_auth(struct intel_huc *huc) fail: i915_probe_error(gt->i915, "HuC: Authentication failed %d\n", ret); - intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_FAIL); + intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL); return ret; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h index daee43b661d4..ae8c8a6c8cc8 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h @@ -15,8 +15,6 @@ struct intel_huc { struct intel_uc_fw fw; /* HuC-specific additions */ - struct i915_vma *rsa_data; - struct { i915_reg_t reg; u32 mask; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 2fef3b0bbe95..09ed29df67bc 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -8,6 +8,7 @@ #include "intel_guc.h" #include "intel_guc_ads.h" #include "intel_guc_submission.h" +#include "gt/intel_rps.h" #include "intel_uc.h" #include "i915_drv.h" @@ -35,7 +36,7 @@ static void uc_expand_default_options(struct intel_uc *uc) } /* Intermediate platforms are HuC authentication only */ - if (IS_ALDERLAKE_S(i915)) { + if (IS_ALDERLAKE_S(i915) && !IS_ADLS_RPLS(i915)) { i915->params.enable_guc = ENABLE_GUC_LOAD_HUC; return; } @@ -462,6 +463,8 @@ static int __uc_init_hw(struct intel_uc *uc) else attempts = 1; + intel_rps_raise_unslice(&uc_to_gt(uc)->rps); + while (attempts--) { /* * Always reset the GuC just before (re)loading, so @@ -499,6 +502,9 @@ static int __uc_init_hw(struct intel_uc *uc) ret = intel_guc_slpc_enable(&guc->slpc); if (ret) goto err_submission; + } else { + /* Restore GT back to RPn for non-SLPC path */ + intel_rps_lower_unslice(&uc_to_gt(uc)->rps); } drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n", @@ -529,6 +535,9 @@ err_submission: err_log_capture: __uc_capture_load_err_log(uc); err_out: + /* Return GT back to RPn */ + intel_rps_lower_unslice(&uc_to_gt(uc)->rps); + __uc_sanitize(uc); if (!ret) { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 3aa87be4f2e4..a5af05bde6f2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -48,22 +48,39 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same * firmware as TGL. */ -#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \ - fw_def(ALDERLAKE_P, 0, guc_def(adlp, 62, 0, 3), huc_def(tgl, 7, 9, 3)) \ - fw_def(ALDERLAKE_S, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \ - fw_def(DG1, 0, guc_def(dg1, 62, 0, 0), huc_def(dg1, 7, 9, 3)) \ - fw_def(ROCKETLAKE, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \ - fw_def(TIGERLAKE, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \ - fw_def(JASPERLAKE, 0, guc_def(ehl, 62, 0, 0), huc_def(ehl, 9, 0, 0)) \ - fw_def(ELKHARTLAKE, 0, guc_def(ehl, 62, 0, 0), huc_def(ehl, 9, 0, 0)) \ - fw_def(ICELAKE, 0, guc_def(icl, 62, 0, 0), huc_def(icl, 9, 0, 0)) \ - fw_def(COMETLAKE, 5, guc_def(cml, 62, 0, 0), huc_def(cml, 4, 0, 0)) \ - fw_def(COMETLAKE, 0, guc_def(kbl, 62, 0, 0), huc_def(kbl, 4, 0, 0)) \ - fw_def(COFFEELAKE, 0, guc_def(kbl, 62, 0, 0), huc_def(kbl, 4, 0, 0)) \ - fw_def(GEMINILAKE, 0, guc_def(glk, 62, 0, 0), huc_def(glk, 4, 0, 0)) \ - fw_def(KABYLAKE, 0, guc_def(kbl, 62, 0, 0), huc_def(kbl, 4, 0, 0)) \ - fw_def(BROXTON, 0, guc_def(bxt, 62, 0, 0), huc_def(bxt, 2, 0, 0)) \ - fw_def(SKYLAKE, 0, guc_def(skl, 62, 0, 0), huc_def(skl, 2, 0, 0)) +#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \ + fw_def(ALDERLAKE_P, 0, guc_def(adlp, 62, 0, 3)) \ + fw_def(ALDERLAKE_S, 0, guc_def(tgl, 62, 0, 0)) \ + fw_def(DG1, 0, guc_def(dg1, 62, 0, 0)) \ + fw_def(ROCKETLAKE, 0, guc_def(tgl, 62, 0, 0)) \ + fw_def(TIGERLAKE, 0, guc_def(tgl, 62, 0, 0)) \ + fw_def(JASPERLAKE, 0, guc_def(ehl, 62, 0, 0)) \ + fw_def(ELKHARTLAKE, 0, guc_def(ehl, 62, 0, 0)) \ + fw_def(ICELAKE, 0, guc_def(icl, 62, 0, 0)) \ + fw_def(COMETLAKE, 5, guc_def(cml, 62, 0, 0)) \ + fw_def(COMETLAKE, 0, guc_def(kbl, 62, 0, 0)) \ + fw_def(COFFEELAKE, 0, guc_def(kbl, 62, 0, 0)) \ + fw_def(GEMINILAKE, 0, guc_def(glk, 62, 0, 0)) \ + fw_def(KABYLAKE, 0, guc_def(kbl, 62, 0, 0)) \ + fw_def(BROXTON, 0, guc_def(bxt, 62, 0, 0)) \ + fw_def(SKYLAKE, 0, guc_def(skl, 62, 0, 0)) + +#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \ + fw_def(ALDERLAKE_P, 0, huc_def(tgl, 7, 9, 3)) \ + fw_def(ALDERLAKE_S, 0, huc_def(tgl, 7, 9, 3)) \ + fw_def(DG1, 0, huc_def(dg1, 7, 9, 3)) \ + fw_def(ROCKETLAKE, 0, huc_def(tgl, 7, 9, 3)) \ + fw_def(TIGERLAKE, 0, huc_def(tgl, 7, 9, 3)) \ + fw_def(JASPERLAKE, 0, huc_def(ehl, 9, 0, 0)) \ + fw_def(ELKHARTLAKE, 0, huc_def(ehl, 9, 0, 0)) \ + fw_def(ICELAKE, 0, huc_def(icl, 9, 0, 0)) \ + fw_def(COMETLAKE, 5, huc_def(cml, 4, 0, 0)) \ + fw_def(COMETLAKE, 0, huc_def(kbl, 4, 0, 0)) \ + fw_def(COFFEELAKE, 0, huc_def(kbl, 4, 0, 0)) \ + fw_def(GEMINILAKE, 0, huc_def(glk, 4, 0, 0)) \ + fw_def(KABYLAKE, 0, huc_def(kbl, 4, 0, 0)) \ + fw_def(BROXTON, 0, huc_def(bxt, 2, 0, 0)) \ + fw_def(SKYLAKE, 0, huc_def(skl, 2, 0, 0)) #define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \ "i915/" \ @@ -79,11 +96,11 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, __MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_) /* All blobs need to be declared via MODULE_FIRMWARE() */ -#define INTEL_UC_MODULE_FW(platform_, revid_, guc_, huc_) \ - MODULE_FIRMWARE(guc_); \ - MODULE_FIRMWARE(huc_); +#define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \ + MODULE_FIRMWARE(uc_); -INTEL_UC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH, MAKE_HUC_FW_PATH) +INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH) +INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH) /* The below structs and macros are used to iterate across the list of blobs */ struct __packed uc_fw_blob { @@ -106,31 +123,47 @@ struct __packed uc_fw_blob { struct __packed uc_fw_platform_requirement { enum intel_platform p; u8 rev; /* first platform rev using this FW */ - const struct uc_fw_blob blobs[INTEL_UC_FW_NUM_TYPES]; + const struct uc_fw_blob blob; }; -#define MAKE_FW_LIST(platform_, revid_, guc_, huc_) \ +#define MAKE_FW_LIST(platform_, revid_, uc_) \ { \ .p = INTEL_##platform_, \ .rev = revid_, \ - .blobs[INTEL_UC_FW_TYPE_GUC] = guc_, \ - .blobs[INTEL_UC_FW_TYPE_HUC] = huc_, \ + .blob = uc_, \ }, +struct fw_blobs_by_type { + const struct uc_fw_platform_requirement *blobs; + u32 count; +}; + static void __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) { - static const struct uc_fw_platform_requirement fw_blobs[] = { - INTEL_UC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, HUC_FW_BLOB) + static const struct uc_fw_platform_requirement blobs_guc[] = { + INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB) + }; + static const struct uc_fw_platform_requirement blobs_huc[] = { + INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB) }; + static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = { + [INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) }, + [INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) }, + }; + static const struct uc_fw_platform_requirement *fw_blobs; enum intel_platform p = INTEL_INFO(i915)->platform; + u32 fw_count; u8 rev = INTEL_REVID(i915); int i; - for (i = 0; i < ARRAY_SIZE(fw_blobs) && p <= fw_blobs[i].p; i++) { + GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all)); + fw_blobs = blobs_all[uc_fw->type].blobs; + fw_count = blobs_all[uc_fw->type].count; + + for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) { if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) { - const struct uc_fw_blob *blob = - &fw_blobs[i].blobs[uc_fw->type]; + const struct uc_fw_blob *blob = &fw_blobs[i].blob; uc_fw->path = blob->path; uc_fw->major_ver_wanted = blob->major; uc_fw->minor_ver_wanted = blob->minor; @@ -140,7 +173,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) /* make sure the list is ordered as expected */ if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) { - for (i = 1; i < ARRAY_SIZE(fw_blobs); i++) { + for (i = 1; i < fw_count; i++) { if (fw_blobs[i].p < fw_blobs[i - 1].p) continue; @@ -322,13 +355,6 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32); /* now RSA */ - if (unlikely(css->key_size_dw != UOS_RSA_SCRATCH_COUNT)) { - drm_warn(&i915->drm, "%s firmware %s: unexpected key size: %u != %u\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, - css->key_size_dw, UOS_RSA_SCRATCH_COUNT); - err = -EPROTO; - goto fail; - } uc_fw->rsa_size = css->key_size_dw * sizeof(u32); /* At least, it should have header, uCode and RSA. Size of all three. */ @@ -540,10 +566,79 @@ fail: i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n", intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err); - intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL); + intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL); return err; } +static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw) +{ + /* + * The HW reads the GuC RSA from memory if the key size is > 256 bytes, + * while it reads it from the 64 RSA registers if it is smaller. + * The HuC RSA is always read from memory. + */ + return uc_fw->type == INTEL_UC_FW_TYPE_HUC || uc_fw->rsa_size > 256; +} + +static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw) +{ + struct intel_gt *gt = __uc_fw_to_gt(uc_fw); + struct i915_vma *vma; + size_t copied; + void *vaddr; + int err; + + err = i915_inject_probe_error(gt->i915, -ENXIO); + if (err) + return err; + + if (!uc_fw_need_rsa_in_memory(uc_fw)) + return 0; + + /* + * uC firmwares will sit above GUC_GGTT_TOP and will not map through + * GGTT. Unfortunately, this means that the GuC HW cannot perform the uC + * authentication from memory, as the RSA offset now falls within the + * GuC inaccessible range. We resort to perma-pinning an additional vma + * within the accessible range that only contains the RSA signature. + * The GuC HW can use this extra pinning to perform the authentication + * since its GGTT offset will be GuC accessible. + */ + GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE); + vma = intel_guc_allocate_vma(>->uc.guc, PAGE_SIZE); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + vaddr = i915_gem_object_pin_map_unlocked(vma->obj, + i915_coherent_map_type(gt->i915, vma->obj, true)); + if (IS_ERR(vaddr)) { + i915_vma_unpin_and_release(&vma, 0); + err = PTR_ERR(vaddr); + goto unpin_out; + } + + copied = intel_uc_fw_copy_rsa(uc_fw, vaddr, vma->size); + i915_gem_object_unpin_map(vma->obj); + + if (copied < uc_fw->rsa_size) { + err = -ENOMEM; + goto unpin_out; + } + + uc_fw->rsa_data = vma; + + return 0; + +unpin_out: + i915_vma_unpin_and_release(&vma, 0); + return err; +} + +static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw) +{ + i915_vma_unpin_and_release(&uc_fw->rsa_data, 0); +} + int intel_uc_fw_init(struct intel_uc_fw *uc_fw) { int err; @@ -558,14 +653,29 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw) if (err) { DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n", intel_uc_fw_type_repr(uc_fw->type), err); - intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL); + goto out; } + err = uc_fw_rsa_data_create(uc_fw); + if (err) { + DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n", + intel_uc_fw_type_repr(uc_fw->type), err); + goto out_unpin; + } + + return 0; + +out_unpin: + i915_gem_object_unpin_pages(uc_fw->obj); +out: + intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_INIT_FAIL); return err; } void intel_uc_fw_fini(struct intel_uc_fw *uc_fw) { + uc_fw_rsa_data_destroy(uc_fw); + if (i915_gem_object_has_pinned_pages(uc_fw->obj)) i915_gem_object_unpin_pages(uc_fw->obj); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index 1e00bf65639e..d9d1dc0b4cbb 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -32,11 +32,12 @@ struct intel_gt; * | | MISSING <--/ | \--> ERROR | * | fetch | V | * | | AVAILABLE | - * +------------+- | -+ + * +------------+- | \ -+ + * | | | \--> INIT FAIL | * | init | V | * | | /------> LOADABLE <----<-----------\ | * +------------+- \ / \ \ \ -+ - * | | FAIL <--< \--> TRANSFERRED \ | + * | | LOAD FAIL <--< \--> TRANSFERRED \ | * | upload | \ / \ / | * | | \---------/ \--> RUNNING | * +------------+---------------------------------------------------+ @@ -50,8 +51,9 @@ enum intel_uc_fw_status { INTEL_UC_FIRMWARE_MISSING, /* blob not found on the system */ INTEL_UC_FIRMWARE_ERROR, /* invalid format or version */ INTEL_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */ + INTEL_UC_FIRMWARE_INIT_FAIL, /* failed to prepare fw objects for load */ INTEL_UC_FIRMWARE_LOADABLE, /* all fw-required objects are ready */ - INTEL_UC_FIRMWARE_FAIL, /* failed to xfer or init/auth the fw */ + INTEL_UC_FIRMWARE_LOAD_FAIL, /* failed to xfer or init/auth the fw */ INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */ INTEL_UC_FIRMWARE_RUNNING /* init/auth done */ }; @@ -84,6 +86,7 @@ struct intel_uc_fw { * or during a GT reset (mutex guarantees single threaded). */ struct i915_vma dummy; + struct i915_vma *rsa_data; /* * The firmware build process will generate a version header file with major and @@ -130,10 +133,12 @@ const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status) return "ERROR"; case INTEL_UC_FIRMWARE_AVAILABLE: return "AVAILABLE"; + case INTEL_UC_FIRMWARE_INIT_FAIL: + return "INIT FAIL"; case INTEL_UC_FIRMWARE_LOADABLE: return "LOADABLE"; - case INTEL_UC_FIRMWARE_FAIL: - return "FAIL"; + case INTEL_UC_FIRMWARE_LOAD_FAIL: + return "LOAD FAIL"; case INTEL_UC_FIRMWARE_TRANSFERRED: return "TRANSFERRED"; case INTEL_UC_FIRMWARE_RUNNING: @@ -155,7 +160,8 @@ static inline int intel_uc_fw_status_to_error(enum intel_uc_fw_status status) return -ENOENT; case INTEL_UC_FIRMWARE_ERROR: return -ENOEXEC; - case INTEL_UC_FIRMWARE_FAIL: + case INTEL_UC_FIRMWARE_INIT_FAIL: + case INTEL_UC_FIRMWARE_LOAD_FAIL: return -EIO; case INTEL_UC_FIRMWARE_SELECTED: return -ESTALE; diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c index fb0e4a7bd8ca..d3327b802b76 100644 --- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c @@ -3,8 +3,21 @@ * Copyright �� 2021 Intel Corporation */ +#include "selftests/igt_spinner.h" #include "selftests/intel_scheduler_helpers.h" +static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) +{ + int err = 0; + + i915_request_get(rq); + i915_request_add(rq); + if (spin && !igt_wait_for_spinner(spin, rq)) + err = -ETIMEDOUT; + + return err; +} + static struct i915_request *nop_user_request(struct intel_context *ce, struct i915_request *from) { @@ -110,12 +123,172 @@ err: return ret; } +/* + * intel_guc_steal_guc_ids - Test to exhaust all guc_ids and then steal one + * + * This test creates a spinner which is used to block all subsequent submissions + * until it completes. Next, a loop creates a context and a NOP request each + * iteration until the guc_ids are exhausted (request creation returns -EAGAIN). + * The spinner is ended, unblocking all requests created in the loop. At this + * point all guc_ids are exhausted but are available to steal. Try to create + * another request which should successfully steal a guc_id. Wait on last + * request to complete, idle GPU, verify a guc_id was stolen via a counter, and + * exit the test. Test also artificially reduces the number of guc_ids so the + * test runs in a timely manner. + */ +static int intel_guc_steal_guc_ids(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_guc *guc = >->uc.guc; + int ret, sv, context_index = 0; + intel_wakeref_t wakeref; + struct intel_engine_cs *engine; + struct intel_context **ce; + struct igt_spinner spin; + struct i915_request *spin_rq = NULL, *rq, *last = NULL; + int number_guc_id_stolen = guc->number_guc_id_stolen; + + ce = kzalloc(sizeof(*ce) * GUC_MAX_LRC_DESCRIPTORS, GFP_KERNEL); + if (!ce) { + pr_err("Context array allocation failed\n"); + return -ENOMEM; + } + + wakeref = intel_runtime_pm_get(gt->uncore->rpm); + engine = intel_selftest_find_any_engine(gt); + sv = guc->submission_state.num_guc_ids; + guc->submission_state.num_guc_ids = 4096; + + /* Create spinner to block requests in below loop */ + ce[context_index] = intel_context_create(engine); + if (IS_ERR(ce[context_index])) { + ret = PTR_ERR(ce[context_index]); + ce[context_index] = NULL; + pr_err("Failed to create context: %d\n", ret); + goto err_wakeref; + } + ret = igt_spinner_init(&spin, engine->gt); + if (ret) { + pr_err("Failed to create spinner: %d\n", ret); + goto err_contexts; + } + spin_rq = igt_spinner_create_request(&spin, ce[context_index], + MI_ARB_CHECK); + if (IS_ERR(spin_rq)) { + ret = PTR_ERR(spin_rq); + pr_err("Failed to create spinner request: %d\n", ret); + goto err_contexts; + } + ret = request_add_spin(spin_rq, &spin); + if (ret) { + pr_err("Failed to add Spinner request: %d\n", ret); + goto err_spin_rq; + } + + /* Use all guc_ids */ + while (ret != -EAGAIN) { + ce[++context_index] = intel_context_create(engine); + if (IS_ERR(ce[context_index])) { + ret = PTR_ERR(ce[context_index--]); + ce[context_index] = NULL; + pr_err("Failed to create context: %d\n", ret); + goto err_spin_rq; + } + + rq = nop_user_request(ce[context_index], spin_rq); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + rq = NULL; + if (ret != -EAGAIN) { + pr_err("Failed to create request, %d: %d\n", + context_index, ret); + goto err_spin_rq; + } + } else { + if (last) + i915_request_put(last); + last = rq; + } + } + + /* Release blocked requests */ + igt_spinner_end(&spin); + ret = intel_selftest_wait_for_rq(spin_rq); + if (ret) { + pr_err("Spin request failed to complete: %d\n", ret); + i915_request_put(last); + goto err_spin_rq; + } + i915_request_put(spin_rq); + igt_spinner_fini(&spin); + spin_rq = NULL; + + /* Wait for last request */ + ret = i915_request_wait(last, 0, HZ * 30); + i915_request_put(last); + if (ret < 0) { + pr_err("Last request failed to complete: %d\n", ret); + goto err_spin_rq; + } + + /* Try to steal guc_id */ + rq = nop_user_request(ce[context_index], NULL); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + pr_err("Failed to steal guc_id, %d: %d\n", context_index, ret); + goto err_spin_rq; + } + + /* Wait for request with stolen guc_id */ + ret = i915_request_wait(rq, 0, HZ); + i915_request_put(rq); + if (ret < 0) { + pr_err("Request with stolen guc_id failed to complete: %d\n", + ret); + goto err_spin_rq; + } + + /* Wait for idle */ + ret = intel_gt_wait_for_idle(gt, HZ * 30); + if (ret < 0) { + pr_err("GT failed to idle: %d\n", ret); + goto err_spin_rq; + } + + /* Verify a guc_id was stolen */ + if (guc->number_guc_id_stolen == number_guc_id_stolen) { + pr_err("No guc_id was stolen"); + ret = -EINVAL; + } else { + ret = 0; + } + +err_spin_rq: + if (spin_rq) { + igt_spinner_end(&spin); + intel_selftest_wait_for_rq(spin_rq); + i915_request_put(spin_rq); + igt_spinner_fini(&spin); + intel_gt_wait_for_idle(gt, HZ * 30); + } +err_contexts: + for (; context_index >= 0 && ce[context_index]; --context_index) + intel_context_put(ce[context_index]); +err_wakeref: + intel_runtime_pm_put(gt->uncore->rpm, wakeref); + kfree(ce); + guc->submission_state.num_guc_ids = sv; + + return ret; +} + int intel_guc_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(intel_guc_scrub_ctbs), + SUBTEST(intel_guc_steal_guc_ids), }; - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); if (intel_gt_is_wedged(gt)) return 0; diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c index 50953c8e8b53..1297ddbf7f88 100644 --- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c @@ -167,7 +167,7 @@ int intel_guc_multi_lrc_live_selftests(struct drm_i915_private *i915) static const struct i915_subtest tests[] = { SUBTEST(intel_guc_multi_lrc_basic), }; - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); if (intel_gt_is_wedged(gt)) return 0; diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 11a8baba6822..9ec064199364 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -427,7 +427,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, plane->tiled = !!(val & SPRITE_TILED); color_order = !!(val & SPRITE_RGB_ORDER_RGBX); - yuv_order = (val & SPRITE_YUV_BYTE_ORDER_MASK) >> + yuv_order = (val & SPRITE_YUV_ORDER_MASK) >> _SPRITE_YUV_ORDER_SHIFT; fmt = (val & SPRITE_PIXFORMAT_MASK) >> _SPRITE_FMT_SHIFT; diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index cbac409f6c8a..f0b69e4dcb52 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -205,7 +205,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915) spin_lock_init(&gvt->scheduler.mmio_context_lock); mutex_init(&gvt->lock); mutex_init(&gvt->sched_lock); - gvt->gt = &i915->gt; + gvt->gt = to_gt(i915); i915->gvt = gvt; init_device_info(gvt); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 6c804102528b..42a0c9ae0a73 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1386,7 +1386,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) enum intel_engine_id i; int ret; - ppgtt = i915_ppgtt_create(&i915->gt, I915_BO_ALLOC_PM_EARLY); + ppgtt = i915_ppgtt_create(to_gt(i915), I915_BO_ALLOC_PM_EARLY); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 3103c1e1fd14..ee2b3a375362 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -426,8 +426,9 @@ replace_barrier(struct i915_active *ref, struct i915_active_fence *active) return true; } -int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence) +int i915_active_add_request(struct i915_active *ref, struct i915_request *rq) { + struct dma_fence *fence = &rq->fence; struct i915_active_fence *active; int err; @@ -436,7 +437,7 @@ int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence) if (err) return err; - active = active_instance(ref, idx); + active = active_instance(ref, i915_request_timeline(rq)->fence_context); if (!active) { err = -ENOMEM; goto out; @@ -477,29 +478,6 @@ __i915_active_set_fence(struct i915_active *ref, return prev; } -static struct i915_active_fence * -__active_fence(struct i915_active *ref, u64 idx) -{ - struct active_node *it; - - it = __active_lookup(ref, idx); - if (unlikely(!it)) { /* Contention with parallel tree builders! */ - spin_lock_irq(&ref->tree_lock); - it = __active_lookup(ref, idx); - spin_unlock_irq(&ref->tree_lock); - } - GEM_BUG_ON(!it); /* slot must be preallocated */ - - return &it->base; -} - -struct dma_fence * -__i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence) -{ - /* Only valid while active, see i915_active_acquire_for_context() */ - return __i915_active_set_fence(ref, __active_fence(ref, idx), fence); -} - struct dma_fence * i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f) { diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h index 5fcdb0e2bc9e..7eb44132183a 100644 --- a/drivers/gpu/drm/i915/i915_active.h +++ b/drivers/gpu/drm/i915/i915_active.h @@ -164,26 +164,11 @@ void __i915_active_init(struct i915_active *ref, __i915_active_init(ref, active, retire, flags, &__mkey, &__wkey); \ } while (0) -struct dma_fence * -__i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence); -int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence); - -static inline int -i915_active_add_request(struct i915_active *ref, struct i915_request *rq) -{ - return i915_active_ref(ref, - i915_request_timeline(rq)->fence_context, - &rq->fence); -} +int i915_active_add_request(struct i915_active *ref, struct i915_request *rq); struct dma_fence * i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f); -static inline bool i915_active_has_exclusive(struct i915_active *ref) -{ - return rcu_access_pointer(ref->excl.fence); -} - int __i915_active_wait(struct i915_active *ref, int state); static inline int i915_active_wait(struct i915_active *ref) { diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h index c149f348a972..b02a78ac87db 100644 --- a/drivers/gpu/drm/i915/i915_active_types.h +++ b/drivers/gpu/drm/i915/i915_active_types.h @@ -15,8 +15,6 @@ #include <linux/rcupdate.h> #include <linux/workqueue.h> -#include "i915_utils.h" - struct i915_active_fence { struct dma_fence __rcu *fence; struct dma_fence_cb cb; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 390d541f64ea..e0e052cdf8b8 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -48,7 +48,6 @@ #include "i915_debugfs_params.h" #include "i915_irq.h" #include "i915_scheduler.h" -#include "i915_trace.h" #include "intel_pm.h" static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) @@ -66,7 +65,7 @@ static int i915_capabilities(struct seq_file *m, void *data) intel_device_info_print_static(INTEL_INFO(i915), &p); intel_device_info_print_runtime(RUNTIME_INFO(i915), &p); i915_print_iommu_status(i915, &p); - intel_gt_info_print(&i915->gt.info, &p); + intel_gt_info_print(&to_gt(i915)->info, &p); intel_driver_caps_print(&i915->caps, &p); kernel_param_lock(THIS_MODULE); @@ -294,7 +293,7 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file) gpu = NULL; with_intel_runtime_pm(&i915->runtime_pm, wakeref) - gpu = i915_gpu_coredump(&i915->gt, ALL_ENGINES); + gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES); if (IS_ERR(gpu)) return PTR_ERR(gpu); @@ -352,7 +351,7 @@ static const struct file_operations i915_error_state_fops = { static int i915_frequency_info(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = node_to_i915(m->private); - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); struct drm_printer p = drm_seq_file_printer(m); intel_gt_pm_frequency_dump(gt, &p); @@ -440,11 +439,11 @@ static int i915_swizzle_info(struct seq_file *m, void *data) static int i915_rps_boost_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct intel_rps *rps = &dev_priv->gt.rps; + struct intel_rps *rps = &to_gt(dev_priv)->rps; seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps))); seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps))); - seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake)); + seq_printf(m, "GPU busy? %s\n", yesno(to_gt(dev_priv)->awake)); seq_printf(m, "Boosts outstanding? %d\n", atomic_read(&rps->num_waiters)); seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive)); @@ -477,7 +476,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) seq_printf(m, "Runtime power status: %s\n", enableddisabled(!dev_priv->power_domains.init_wakeref)); - seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake)); + seq_printf(m, "GPU idle: %s\n", yesno(!to_gt(dev_priv)->awake)); seq_printf(m, "IRQs disabled: %s\n", yesno(!intel_irqs_enabled(dev_priv))); #ifdef CONFIG_PM @@ -509,18 +508,18 @@ static int i915_engine_info(struct seq_file *m, void *unused) wakeref = intel_runtime_pm_get(&i915->runtime_pm); seq_printf(m, "GT awake? %s [%d], %llums\n", - yesno(i915->gt.awake), - atomic_read(&i915->gt.wakeref.count), - ktime_to_ms(intel_gt_get_awake_time(&i915->gt))); + yesno(to_gt(i915)->awake), + atomic_read(&to_gt(i915)->wakeref.count), + ktime_to_ms(intel_gt_get_awake_time(to_gt(i915)))); seq_printf(m, "CS timestamp frequency: %u Hz, %d ns\n", - i915->gt.clock_frequency, - i915->gt.clock_period_ns); + to_gt(i915)->clock_frequency, + to_gt(i915)->clock_period_ns); p = drm_seq_file_printer(m); for_each_uabi_engine(engine, i915) intel_engine_dump(engine, &p, "%s\n", engine->name); - intel_gt_show_timelines(&i915->gt, &p, i915_request_show_with_schedule); + intel_gt_show_timelines(to_gt(i915), &p, i915_request_show_with_schedule); intel_runtime_pm_put(&i915->runtime_pm, wakeref); @@ -559,14 +558,14 @@ static int i915_wedged_get(void *data, u64 *val) { struct drm_i915_private *i915 = data; - return intel_gt_debugfs_reset_show(&i915->gt, val); + return intel_gt_debugfs_reset_show(to_gt(i915), val); } static int i915_wedged_set(void *data, u64 val) { struct drm_i915_private *i915 = data; - return intel_gt_debugfs_reset_store(&i915->gt, val); + return intel_gt_debugfs_reset_store(to_gt(i915), val); } DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, @@ -582,7 +581,7 @@ i915_perf_noa_delay_set(void *data, u64 val) * This would lead to infinite waits as we're doing timestamp * difference on the CS with only 32bits. */ - if (intel_gt_ns_to_clock_interval(&i915->gt, val) > U32_MAX) + if (intel_gt_ns_to_clock_interval(to_gt(i915), val) > U32_MAX) return -EINVAL; atomic64_set(&i915->perf.noa_programming_delay, val); @@ -667,16 +666,18 @@ static int i915_drop_caches_set(void *data, u64 val) { struct drm_i915_private *i915 = data; + unsigned int flags; int ret; DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", val, val & DROP_ALL); - ret = gt_drop_caches(&i915->gt, val); + ret = gt_drop_caches(to_gt(i915), val); if (ret) return ret; fs_reclaim_acquire(GFP_KERNEL); + flags = memalloc_noreclaim_save(); if (val & DROP_BOUND) i915_gem_shrink(NULL, i915, LONG_MAX, NULL, I915_SHRINK_BOUND); @@ -685,6 +686,7 @@ i915_drop_caches_set(void *data, u64 val) if (val & DROP_SHRINK_ALL) i915_gem_shrink_all(i915); + memalloc_noreclaim_restore(flags); fs_reclaim_release(GFP_KERNEL); if (val & DROP_RCU) @@ -703,7 +705,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, static int i915_sseu_status(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = node_to_i915(m->private); - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); return intel_sseu_status(m, gt); } @@ -712,14 +714,14 @@ static int i915_forcewake_open(struct inode *inode, struct file *file) { struct drm_i915_private *i915 = inode->i_private; - return intel_gt_pm_debugfs_forcewake_user_open(&i915->gt); + return intel_gt_pm_debugfs_forcewake_user_open(to_gt(i915)); } static int i915_forcewake_release(struct inode *inode, struct file *file) { struct drm_i915_private *i915 = inode->i_private; - return intel_gt_pm_debugfs_forcewake_user_release(&i915->gt); + return intel_gt_pm_debugfs_forcewake_user_release(to_gt(i915)); } static const struct file_operations i915_forcewake_fops = { diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c index 20424275d41e..783c8676eee2 100644 --- a/drivers/gpu/drm/i915/i915_debugfs_params.c +++ b/drivers/gpu/drm/i915/i915_debugfs_params.c @@ -40,8 +40,8 @@ static int notify_guc(struct drm_i915_private *i915) { int ret = 0; - if (intel_uc_uses_guc_submission(&i915->gt.uc)) - ret = intel_guc_global_policies_update(&i915->gt.uc.guc); + if (intel_uc_uses_guc_submission(&to_gt(i915)->uc)) + ret = intel_guc_global_policies_update(&to_gt(i915)->uc.guc); return ret; } diff --git a/drivers/gpu/drm/i915/i915_deps.c b/drivers/gpu/drm/i915/i915_deps.c new file mode 100644 index 000000000000..999210b37325 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_deps.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include <linux/dma-fence.h> +#include <linux/slab.h> + +#include <drm/ttm/ttm_bo_api.h> + +#include "i915_deps.h" + +/** + * DOC: Set of utilities to dynamically collect dependencies into a + * structure which is fed into the GT migration code. + * + * Once we can do async unbinding, this is also needed to coalesce + * the migration fence with the unbind fences if these are coalesced + * post-migration. + * + * While collecting the individual dependencies, we store the refcounted + * struct dma_fence pointers in a realloc-managed pointer array, since + * that can be easily fed into a dma_fence_array. Other options are + * available, like for example an xarray for similarity with drm/sched. + * Can be changed easily if needed. + * + * A struct i915_deps need to be initialized using i915_deps_init(). + * If i915_deps_add_dependency() or i915_deps_add_resv() return an + * error code they will internally call i915_deps_fini(), which frees + * all internal references and allocations. + */ + +/* Min number of fence pointers in the array when an allocation occurs. */ +#define I915_DEPS_MIN_ALLOC_CHUNK 8U + +static void i915_deps_reset_fences(struct i915_deps *deps) +{ + if (deps->fences != &deps->single) + kfree(deps->fences); + deps->num_deps = 0; + deps->fences_size = 1; + deps->fences = &deps->single; +} + +/** + * i915_deps_init - Initialize an i915_deps structure + * @deps: Pointer to the i915_deps structure to initialize. + * @gfp: The allocation mode for subsequenst allocations. + */ +void i915_deps_init(struct i915_deps *deps, gfp_t gfp) +{ + deps->fences = NULL; + deps->gfp = gfp; + i915_deps_reset_fences(deps); +} + +/** + * i915_deps_fini - Finalize an i915_deps structure + * @deps: Pointer to the i915_deps structure to finalize. + * + * This function drops all fence references taken, conditionally frees and + * then resets the fences array. + */ +void i915_deps_fini(struct i915_deps *deps) +{ + unsigned int i; + + for (i = 0; i < deps->num_deps; ++i) + dma_fence_put(deps->fences[i]); + + if (deps->fences != &deps->single) + kfree(deps->fences); +} + +static int i915_deps_grow(struct i915_deps *deps, struct dma_fence *fence, + const struct ttm_operation_ctx *ctx) +{ + int ret; + + if (deps->num_deps >= deps->fences_size) { + unsigned int new_size = 2 * deps->fences_size; + struct dma_fence **new_fences; + + new_size = max(new_size, I915_DEPS_MIN_ALLOC_CHUNK); + new_fences = kmalloc_array(new_size, sizeof(*new_fences), deps->gfp); + if (!new_fences) + goto sync; + + memcpy(new_fences, deps->fences, + deps->fences_size * sizeof(*new_fences)); + swap(new_fences, deps->fences); + if (new_fences != &deps->single) + kfree(new_fences); + deps->fences_size = new_size; + } + deps->fences[deps->num_deps++] = dma_fence_get(fence); + return 0; + +sync: + if (ctx->no_wait_gpu && !dma_fence_is_signaled(fence)) { + ret = -EBUSY; + goto unref; + } + + ret = dma_fence_wait(fence, ctx->interruptible); + if (ret) + goto unref; + + ret = fence->error; + if (ret) + goto unref; + + return 0; + +unref: + i915_deps_fini(deps); + return ret; +} + +/** + * i915_deps_sync - Wait for all the fences in the dependency collection + * @deps: Pointer to the i915_deps structure the fences of which to wait for. + * @ctx: Pointer to a struct ttm_operation_ctx indicating how the waits + * should be performed. + * + * This function waits for fences in the dependency collection. If it + * encounters an error during the wait or a fence error, the wait for + * further fences is aborted and the error returned. + * + * Return: Zero if successful, Negative error code on error. + */ +int i915_deps_sync(const struct i915_deps *deps, const struct ttm_operation_ctx *ctx) +{ + struct dma_fence **fences = deps->fences; + unsigned int i; + int ret = 0; + + for (i = 0; i < deps->num_deps; ++i, ++fences) { + if (ctx->no_wait_gpu && !dma_fence_is_signaled(*fences)) { + ret = -EBUSY; + break; + } + + ret = dma_fence_wait(*fences, ctx->interruptible); + if (!ret) + ret = (*fences)->error; + if (ret) + break; + } + + return ret; +} + +/** + * i915_deps_add_dependency - Add a fence to the dependency collection + * @deps: Pointer to the i915_deps structure a fence is to be added to. + * @fence: The fence to add. + * @ctx: Pointer to a struct ttm_operation_ctx indicating how waits are to + * be performed if waiting. + * + * Adds a fence to the dependency collection, and takes a reference on it. + * If the fence context is not zero and there was a later fence from the + * same fence context already added, then the fence is not added to the + * dependency collection. If the fence context is not zero and there was + * an earlier fence already added, then the fence will replace the older + * fence from the same context and the reference on the earlier fence will + * be dropped. + * If there is a failure to allocate memory to accommodate the new fence to + * be added, the new fence will instead be waited for and an error may + * be returned; depending on the value of @ctx, or if there was a fence + * error. If an error was returned, the dependency collection will be + * finalized and all fence reference dropped. + * + * Return: 0 if success. Negative error code on error. + */ +int i915_deps_add_dependency(struct i915_deps *deps, + struct dma_fence *fence, + const struct ttm_operation_ctx *ctx) +{ + unsigned int i; + int ret; + + if (!fence) + return 0; + + if (dma_fence_is_signaled(fence)) { + ret = fence->error; + if (ret) + i915_deps_fini(deps); + return ret; + } + + for (i = 0; i < deps->num_deps; ++i) { + struct dma_fence *entry = deps->fences[i]; + + if (!entry->context || entry->context != fence->context) + continue; + + if (dma_fence_is_later(fence, entry)) { + dma_fence_put(entry); + deps->fences[i] = dma_fence_get(fence); + } + + return 0; + } + + return i915_deps_grow(deps, fence, ctx); +} + +/** + * i915_deps_add_resv - Add the fences of a reservation object to a dependency + * collection. + * @deps: Pointer to the i915_deps structure a fence is to be added to. + * @resv: The reservation object, then fences of which to add. + * @ctx: Pointer to a struct ttm_operation_ctx indicating how waits are to + * be performed if waiting. + * + * Calls i915_deps_add_depencency() on the indicated fences of @resv. + * + * Return: Zero on success. Negative error code on error. + */ +int i915_deps_add_resv(struct i915_deps *deps, struct dma_resv *resv, + const struct ttm_operation_ctx *ctx) +{ + struct dma_resv_iter iter; + struct dma_fence *fence; + + dma_resv_assert_held(resv); + dma_resv_for_each_fence(&iter, resv, true, fence) { + int ret = i915_deps_add_dependency(deps, fence, ctx); + + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_deps.h b/drivers/gpu/drm/i915/i915_deps.h new file mode 100644 index 000000000000..d76c0106c910 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_deps.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef _I915_DEPS_H_ +#define _I915_DEPS_H_ + +#include <linux/types.h> + +struct ttm_operation_ctx; +struct dma_fence; +struct dma_resv; + +/** + * struct i915_deps - Collect dependencies into a single dma-fence + * @single: Storage for pointer if the collection is a single fence. + * @fences: Allocated array of fence pointers if more than a single fence; + * otherwise points to the address of @single. + * @num_deps: Current number of dependency fences. + * @fences_size: Size of the @fences array in number of pointers. + * @gfp: Allocation mode. + */ +struct i915_deps { + struct dma_fence *single; + struct dma_fence **fences; + unsigned int num_deps; + unsigned int fences_size; + gfp_t gfp; +}; + +void i915_deps_init(struct i915_deps *deps, gfp_t gfp); + +void i915_deps_fini(struct i915_deps *deps); + +int i915_deps_add_dependency(struct i915_deps *deps, + struct dma_fence *fence, + const struct ttm_operation_ctx *ctx); + +int i915_deps_add_resv(struct i915_deps *deps, struct dma_resv *resv, + const struct ttm_operation_ctx *ctx); + +int i915_deps_sync(const struct i915_deps *deps, + const struct ttm_operation_ctx *ctx); +#endif diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index bbc99fc5888f..95174938b160 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -82,7 +82,6 @@ #include "i915_suspend.h" #include "i915_switcheroo.h" #include "i915_sysfs.h" -#include "i915_trace.h" #include "i915_vgpu.h" #include "intel_dram.h" #include "intel_gvt.h" @@ -292,7 +291,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) static void sanitize_gpu(struct drm_i915_private *i915) { if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) - __intel_gt_reset(&i915->gt, ALL_ENGINES); + __intel_gt_reset(to_gt(i915), ALL_ENGINES); } /** @@ -315,8 +314,9 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) intel_device_info_subplatform_init(dev_priv); intel_step_init(dev_priv); + intel_gt_init_early(to_gt(dev_priv), dev_priv); intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug); - intel_uncore_init_early(&dev_priv->uncore, dev_priv); + intel_uncore_init_early(&dev_priv->uncore, to_gt(dev_priv)); spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); @@ -347,7 +347,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) intel_wopcm_init_early(&dev_priv->wopcm); - intel_gt_init_early(&dev_priv->gt, dev_priv); + __intel_gt_init_early(to_gt(dev_priv), dev_priv); i915_gem_init_early(dev_priv); @@ -368,7 +368,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) err_gem: i915_gem_cleanup_early(dev_priv); - intel_gt_driver_late_release(&dev_priv->gt); + intel_gt_driver_late_release(to_gt(dev_priv)); intel_region_ttm_device_fini(dev_priv); err_ttm: vlv_suspend_cleanup(dev_priv); @@ -387,7 +387,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv) intel_irq_fini(dev_priv); intel_power_domains_cleanup(dev_priv); i915_gem_cleanup_early(dev_priv); - intel_gt_driver_late_release(&dev_priv->gt); + intel_gt_driver_late_release(to_gt(dev_priv)); intel_region_ttm_device_fini(dev_priv); vlv_suspend_cleanup(dev_priv); i915_workqueues_cleanup(dev_priv); @@ -430,7 +430,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) intel_setup_mchbar(dev_priv); intel_device_info_runtime_init(dev_priv); - ret = intel_gt_init_mmio(&dev_priv->gt); + ret = intel_gt_init_mmio(to_gt(dev_priv)); if (ret) goto err_uncore; @@ -587,9 +587,9 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) if (ret) goto err_ggtt; - intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt); + intel_gt_init_hw_early(to_gt(dev_priv), &dev_priv->ggtt); - ret = intel_gt_probe_lmem(&dev_priv->gt); + ret = intel_gt_probe_lmem(to_gt(dev_priv)); if (ret) goto err_mem_regions; @@ -702,7 +702,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) /* Depends on sysfs having been initialized */ i915_perf_register(dev_priv); - intel_gt_driver_register(&dev_priv->gt); + intel_gt_driver_register(to_gt(dev_priv)); intel_display_driver_register(dev_priv); @@ -730,7 +730,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) intel_display_driver_unregister(dev_priv); - intel_gt_driver_unregister(&dev_priv->gt); + intel_gt_driver_unregister(to_gt(dev_priv)); i915_perf_unregister(dev_priv); i915_pmu_unregister(dev_priv); @@ -763,7 +763,7 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) intel_device_info_print_static(INTEL_INFO(dev_priv), &p); intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p); i915_print_iommu_status(dev_priv, &p); - intel_gt_info_print(&dev_priv->gt.info, &p); + intel_gt_info_print(&to_gt(dev_priv)->info, &p); } if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) @@ -1385,7 +1385,7 @@ static int i915_drm_resume_early(struct drm_device *dev) intel_uncore_resume_early(&dev_priv->uncore); - intel_gt_check_and_clear_faults(&dev_priv->gt); + intel_gt_check_and_clear_faults(to_gt(dev_priv)); intel_display_power_resume_early(dev_priv); @@ -1568,7 +1568,7 @@ static int intel_runtime_suspend(struct device *kdev) */ i915_gem_runtime_suspend(dev_priv); - intel_gt_runtime_suspend(&dev_priv->gt); + intel_gt_runtime_suspend(to_gt(dev_priv)); intel_runtime_pm_disable_interrupts(dev_priv); @@ -1584,7 +1584,7 @@ static int intel_runtime_suspend(struct device *kdev) intel_runtime_pm_enable_interrupts(dev_priv); - intel_gt_runtime_resume(&dev_priv->gt); + intel_gt_runtime_resume(to_gt(dev_priv)); enable_rpm_wakeref_asserts(rpm); @@ -1672,7 +1672,7 @@ static int intel_runtime_resume(struct device *kdev) * No point of rolling back things in case of an error, as the best * we can do is to hope that things will still work (and disable RPM). */ - intel_gt_runtime_resume(&dev_priv->gt); + intel_gt_runtime_resume(to_gt(dev_priv)); /* * On VLV/CHV display interrupts are part of the display diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d99e020773ac..0c70ab08fc0c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -89,6 +89,7 @@ #include "intel_device_info.h" #include "intel_memory_region.h" #include "intel_pch.h" +#include "intel_pm_types.h" #include "intel_runtime_pm.h" #include "intel_step.h" #include "intel_uncore.h" @@ -116,30 +117,6 @@ struct drm_i915_gem_object; -enum hpd_pin { - HPD_NONE = 0, - HPD_TV = HPD_NONE, /* TV is known to be unreliable */ - HPD_CRT, - HPD_SDVO_B, - HPD_SDVO_C, - HPD_PORT_A, - HPD_PORT_B, - HPD_PORT_C, - HPD_PORT_D, - HPD_PORT_E, - HPD_PORT_TC1, - HPD_PORT_TC2, - HPD_PORT_TC3, - HPD_PORT_TC4, - HPD_PORT_TC5, - HPD_PORT_TC6, - - HPD_NUM_PINS -}; - -#define for_each_hpd_pin(__pin) \ - for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) - /* Threshold == 5 for long IRQs, 50 for short */ #define HPD_STORM_DEFAULT_THRESHOLD 50 @@ -399,106 +376,8 @@ struct drm_i915_display_funcs { void (*commit_modeset_enables)(struct intel_atomic_state *state); }; -struct intel_fbc_funcs; - #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ -struct intel_fbc { - struct drm_i915_private *i915; - const struct intel_fbc_funcs *funcs; - - /* This is always the inner lock when overlapping with struct_mutex and - * it's the outer lock when overlapping with stolen_lock. */ - struct mutex lock; - unsigned int possible_framebuffer_bits; - unsigned int busy_bits; - struct intel_crtc *crtc; - - struct drm_mm_node compressed_fb; - struct drm_mm_node compressed_llb; - - u8 limit; - - bool false_color; - - bool active; - bool activated; - bool flip_pending; - - bool underrun_detected; - struct work_struct underrun_work; - - /* - * Due to the atomic rules we can't access some structures without the - * appropriate locking, so we cache information here in order to avoid - * these problems. - */ - struct intel_fbc_state_cache { - struct { - unsigned int mode_flags; - u32 hsw_bdw_pixel_rate; - } crtc; - - struct { - unsigned int rotation; - int src_w; - int src_h; - bool visible; - /* - * Display surface base address adjustement for - * pageflips. Note that on gen4+ this only adjusts up - * to a tile, offsets within a tile are handled in - * the hw itself (with the TILEOFF register). - */ - int adjusted_x; - int adjusted_y; - - u16 pixel_blend_mode; - } plane; - - struct { - const struct drm_format_info *format; - unsigned int stride; - u64 modifier; - } fb; - - unsigned int fence_y_offset; - u16 interval; - s8 fence_id; - bool psr2_active; - } state_cache; - - /* - * This structure contains everything that's relevant to program the - * hardware registers. When we want to figure out if we need to disable - * and re-enable FBC for a new configuration we just check if there's - * something different in the struct. The genx_fbc_activate functions - * are supposed to read from it in order to program the registers. - */ - struct intel_fbc_reg_params { - struct { - enum pipe pipe; - enum i9xx_plane_id i9xx_plane; - } crtc; - - struct { - const struct drm_format_info *format; - unsigned int stride; - u64 modifier; - } fb; - - unsigned int cfb_stride; - unsigned int cfb_size; - unsigned int fence_y_offset; - u16 override_cfb_stride; - u16 interval; - s8 fence_id; - bool plane_visible; - } params; - - const char *no_fbc_reason; -}; - /* * HIGH_RR is the highest eDP panel refresh rate read from EDID * LOW_RR is the lowest eDP panel refresh rate found from EDID @@ -535,7 +414,6 @@ struct i915_drrs { #define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8) struct intel_fbdev; -struct intel_fbc_work; struct intel_gmbus { struct i2c_adapter adapter; @@ -595,7 +473,7 @@ struct i915_gem_mm { * List of objects which are pending destruction. */ struct llist_head free_list; - struct work_struct free_work; + struct delayed_work free_work; /** * Count of objects pending destructions. Used to skip needlessly * waiting on an RCU barrier if no objects are waiting to be freed. @@ -730,69 +608,6 @@ struct intel_vbt_data { struct sdvo_device_mapping sdvo_mappings[2]; }; -enum intel_ddb_partitioning { - INTEL_DDB_PART_1_2, - INTEL_DDB_PART_5_6, /* IVB+ */ -}; - -struct ilk_wm_values { - u32 wm_pipe[3]; - u32 wm_lp[3]; - u32 wm_lp_spr[3]; - bool enable_fbc_wm; - enum intel_ddb_partitioning partitioning; -}; - -struct g4x_pipe_wm { - u16 plane[I915_MAX_PLANES]; - u16 fbc; -}; - -struct g4x_sr_wm { - u16 plane; - u16 cursor; - u16 fbc; -}; - -struct vlv_wm_ddl_values { - u8 plane[I915_MAX_PLANES]; -}; - -struct vlv_wm_values { - struct g4x_pipe_wm pipe[3]; - struct g4x_sr_wm sr; - struct vlv_wm_ddl_values ddl[3]; - u8 level; - bool cxsr; -}; - -struct g4x_wm_values { - struct g4x_pipe_wm pipe[2]; - struct g4x_sr_wm sr; - struct g4x_sr_wm hpll; - bool cxsr; - bool hpll_en; - bool fbc_en; -}; - -struct skl_ddb_entry { - u16 start, end; /* in number of blocks, 'end' is exclusive */ -}; - -static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry) -{ - return entry->end - entry->start; -} - -static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, - const struct skl_ddb_entry *e2) -{ - if (e1->start == e2->start && e1->end == e2->end) - return true; - - return false; -} - struct i915_frontbuffer_tracking { spinlock_t lock; @@ -934,7 +749,7 @@ struct drm_i915_private { u32 pipestat_irq_mask[I915_MAX_PIPES]; struct i915_hotplug hotplug; - struct intel_fbc fbc; + struct intel_fbc *fbc; struct i915_drrs drrs; struct intel_opregion opregion; struct intel_vbt_data vbt; @@ -1029,9 +844,6 @@ struct drm_i915_private { /* Kernel Modesetting */ - struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; - struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; - /** * dpll and cdclk state is protected by connection_mutex * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll. @@ -1193,7 +1005,7 @@ struct drm_i915_private { struct i915_perf perf; /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ - struct intel_gt gt; + struct intel_gt gt0; struct { struct i915_gem_contexts { @@ -1265,6 +1077,11 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) return pci_get_drvdata(pdev); } +static inline struct intel_gt *to_gt(struct drm_i915_private *i915) +{ + return &i915->gt0; +} + /* Simple iterator over all initialised engines */ #define for_each_engine(engine__, dev_priv__, id__) \ for ((id__) = 0; \ @@ -1469,6 +1286,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10) #define IS_DG2_G11(dev_priv) \ IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11) +#define IS_ADLS_RPLS(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL_S) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev_priv) \ @@ -1598,7 +1417,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, (IS_SUBPLATFORM(__i915, INTEL_DG2, INTEL_SUBPLATFORM_##variant) && \ IS_GRAPHICS_STEP(__i915, since, until)) -#define IS_DG2_DISP_STEP(__i915, since, until) \ +#define IS_DG2_DISPLAY_STEP(__i915, since, until) \ (IS_DG2(__i915) && \ IS_DISPLAY_STEP(__i915, since, until)) @@ -1695,7 +1514,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_PSR_HW_TRACKING(dev_priv) \ (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking) #define HAS_PSR2_SEL_FETCH(dev_priv) (GRAPHICS_VER(dev_priv) >= 12) -#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0) +#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->display.cpu_transcoder_mask & BIT(trans)) != 0) #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6) #define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p) @@ -1713,6 +1532,14 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_MSLICES(dev_priv) \ (INTEL_INFO(dev_priv)->has_mslices) +/* + * Set this flag, when platform requires 64K GTT page sizes or larger for + * device local memory access. Also this flag implies that we require or + * at least support the compact PT layout for the ppGTT when using the 64K + * GTT pages. + */ +#define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages) + #define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) #define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i)) @@ -1726,7 +1553,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_PXP(dev_priv) ((IS_ENABLED(CONFIG_DRM_I915_PXP) && \ INTEL_INFO(dev_priv)->has_pxp) && \ - VDBOX_MASK(&dev_priv->gt)) + VDBOX_MASK(to_gt(dev_priv))) #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch) @@ -1740,9 +1567,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define GT_FREQUENCY_MULTIPLIER 50 #define GEN9_FREQ_SCALER 3 -#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask)) +#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->display.pipe_mask)) -#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0) +#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.pipe_mask != 0) #define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 11) @@ -1809,7 +1636,7 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) * armed the work again. */ while (atomic_read(&i915->mm.free_count)) { - flush_work(&i915->mm.free_work); + flush_delayed_work(&i915->mm.free_work); flush_delayed_work(&i915->bdev.wq); rcu_barrier(); } @@ -1843,13 +1670,10 @@ i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view, u64 size, u64 alignment, u64 flags); -static inline struct i915_vma * __must_check +struct i915_vma * __must_check i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view, - u64 size, u64 alignment, u64 flags) -{ - return i915_gem_object_ggtt_pin_ww(obj, NULL, view, size, alignment, flags); -} + u64 size, u64 alignment, u64 flags); int i915_gem_object_unbind(struct drm_i915_gem_object *obj, unsigned long flags); @@ -1968,14 +1792,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv) int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -/* i915_mm.c */ -int remap_io_mapping(struct vm_area_struct *vma, - unsigned long addr, unsigned long pfn, unsigned long size, - struct io_mapping *iomap); -int remap_io_sg(struct vm_area_struct *vma, - unsigned long addr, unsigned long size, - struct scatterlist *sgl, resource_size_t iobase); - static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) { if (GRAPHICS_VER(i915) >= 11) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 527228d4da7e..915bf431f320 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -877,6 +877,8 @@ i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj, struct i915_vma *vma; int ret; + GEM_WARN_ON(!ww); + if (flags & PIN_MAPPABLE && (!view || view->type == I915_GGTT_VIEW_NORMAL)) { /* @@ -936,10 +938,7 @@ new_vma: return ERR_PTR(ret); } - if (ww) - ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL); - else - ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); + ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL); if (ret) return ERR_PTR(ret); @@ -959,6 +958,29 @@ new_vma: return vma; } +struct i915_vma * __must_check +i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view, + u64 size, u64 alignment, u64 flags) +{ + struct i915_gem_ww_ctx ww; + struct i915_vma *ret; + int err; + + for_i915_gem_ww(&ww, err, true) { + err = i915_gem_object_lock(obj, &ww); + if (err) + continue; + + ret = i915_gem_object_ggtt_pin_ww(obj, &ww, view, size, + alignment, flags); + if (IS_ERR(ret)) + err = PTR_ERR(ret); + } + + return err ? ERR_PTR(err) : ret; +} + int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -1049,7 +1071,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) if (ret) return ret; - intel_uc_fetch_firmwares(&dev_priv->gt.uc); + intel_uc_fetch_firmwares(&to_gt(dev_priv)->uc); intel_wopcm_init(&dev_priv->wopcm); ret = i915_init_ggtt(dev_priv); @@ -1069,7 +1091,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) */ intel_init_clock_gating(dev_priv); - ret = intel_gt_init(&dev_priv->gt); + ret = intel_gt_init(to_gt(dev_priv)); if (ret) goto err_unlock; @@ -1085,7 +1107,7 @@ err_unlock: i915_gem_drain_workqueue(dev_priv); if (ret != -EIO) - intel_uc_cleanup_firmwares(&dev_priv->gt.uc); + intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc); if (ret == -EIO) { /* @@ -1093,10 +1115,10 @@ err_unlock: * as wedged. But we only want to do this when the GPU is angry, * for all other failure, such as an allocation failure, bail. */ - if (!intel_gt_is_wedged(&dev_priv->gt)) { + if (!intel_gt_is_wedged(to_gt(dev_priv))) { i915_probe_error(dev_priv, "Failed to initialize GPU, declaring it wedged!\n"); - intel_gt_set_wedged(&dev_priv->gt); + intel_gt_set_wedged(to_gt(dev_priv)); } /* Minimal basic recovery for KMS */ @@ -1127,7 +1149,7 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv) intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref); i915_gem_suspend_late(dev_priv); - intel_gt_driver_remove(&dev_priv->gt); + intel_gt_driver_remove(to_gt(dev_priv)); dev_priv->uabi_engines = RB_ROOT; /* Flush any outstanding unpin_work. */ @@ -1138,9 +1160,9 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv) void i915_gem_driver_release(struct drm_i915_private *dev_priv) { - intel_gt_driver_release(&dev_priv->gt); + intel_gt_driver_release(to_gt(dev_priv)); - intel_uc_cleanup_firmwares(&dev_priv->gt.uc); + intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc); i915_gem_drain_freed_objects(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c index 77490cb5ff9c..7f80ad247bc8 100644 --- a/drivers/gpu/drm/i915/i915_getparam.c +++ b/drivers/gpu/drm/i915/i915_getparam.c @@ -13,7 +13,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, { struct drm_i915_private *i915 = to_i915(dev); struct pci_dev *pdev = to_pci_dev(dev->dev); - const struct sseu_dev_info *sseu = &i915->gt.info.sseu; + const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu; drm_i915_getparam_t *param = data; int value = 0; @@ -82,8 +82,8 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, break; case I915_PARAM_HAS_GPU_RESET: value = i915->params.enable_hangcheck && - intel_has_gpu_reset(&i915->gt); - if (value && intel_has_reset_engine(&i915->gt)) + intel_has_gpu_reset(to_gt(i915)); + if (value && intel_has_reset_engine(to_gt(i915))) value = 2; break; case I915_PARAM_HAS_RESOURCE_STREAMER: @@ -96,7 +96,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, value = sseu->min_eu_in_pool; break; case I915_PARAM_HUC_STATUS: - value = intel_huc_check_status(&i915->gt.uc.huc); + value = intel_huc_check_status(&to_gt(i915)->uc.huc); if (value < 0) return value; break; @@ -158,7 +158,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, return -ENODEV; break; case I915_PARAM_CS_TIMESTAMP_FREQUENCY: - value = i915->gt.clock_frequency; + value = to_gt(i915)->clock_frequency; break; case I915_PARAM_MMAP_GTT_COHERENT: value = INTEL_INFO(i915)->has_coherent_ggtt; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 96d2d99f5b98..5ae812d60abe 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -505,7 +505,7 @@ static void error_print_context(struct drm_i915_error_state_buf *m, const char *header, const struct i915_gem_context_coredump *ctx) { - const u32 period = m->i915->gt.clock_period_ns; + const u32 period = to_gt(m->i915)->clock_period_ns; err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n", header, ctx->comm, ctx->pid, ctx->sched_attr.priority, @@ -1415,18 +1415,15 @@ static struct i915_vma_coredump * create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma, const char *name, struct i915_vma_compress *compress) { - struct i915_vma_coredump *ret = NULL; + struct i915_vma_coredump *ret; struct i915_vma_snapshot tmp; - bool lockdep_cookie; if (!vma) return NULL; + GEM_WARN_ON(!i915_vma_is_pinned(vma)); i915_vma_snapshot_init_onstack(&tmp, vma, name); - if (i915_vma_snapshot_resource_pin(&tmp, &lockdep_cookie)) { - ret = i915_vma_coredump_create(gt, &tmp, compress); - i915_vma_snapshot_resource_unpin(&tmp, lockdep_cookie); - } + ret = i915_vma_coredump_create(gt, &tmp, compress); i915_vma_snapshot_put_onstack(&tmp); return ret; @@ -1849,7 +1846,7 @@ i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp) error->time = ktime_get_real(); error->boottime = ktime_get_boottime(); - error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time); + error->uptime = ktime_sub(ktime_get(), to_gt(i915)->last_init_time); error->capture = jiffies; capture_gen(error); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6aea159abc50..21f75b069fa8 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -35,6 +35,7 @@ #include <drm/drm_drv.h> #include "display/intel_de.h" +#include "display/intel_display_trace.h" #include "display/intel_display_types.h" #include "display/intel_fifo_underrun.h" #include "display/intel_hotplug.h" @@ -49,7 +50,6 @@ #include "i915_drv.h" #include "i915_irq.h" -#include "i915_trace.h" #include "intel_pm.h" /** @@ -224,7 +224,7 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) static void intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); drm_crtc_handle_vblank(&crtc->base); } @@ -1040,7 +1040,7 @@ static void ivb_parity_work(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), l3_parity.error_work); - struct intel_gt *gt = &dev_priv->gt; + struct intel_gt *gt = to_gt(dev_priv); u32 error_status, row, bank, subbank; char *parity_event[6]; u32 misccpctl; @@ -1318,7 +1318,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, u32 crc2, u32 crc3, u32 crc4) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; @@ -1357,7 +1357,7 @@ display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, static void flip_done_handler(struct drm_i915_private *i915, enum pipe pipe) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); struct drm_crtc_state *crtc_state = crtc->base.state; struct drm_pending_vblank_event *e = crtc_state->event; struct drm_device *dev = &i915->drm; @@ -1718,9 +1718,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); if (gt_iir) - gen6_gt_irq_handler(&dev_priv->gt, gt_iir); + gen6_gt_irq_handler(to_gt(dev_priv), gt_iir); if (pm_iir) - gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir); + gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir); if (hotplug_status) i9xx_hpd_irq_handler(dev_priv, hotplug_status); @@ -1777,7 +1777,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) ier = intel_uncore_read(&dev_priv->uncore, VLV_IER); intel_uncore_write(&dev_priv->uncore, VLV_IER, 0); - gen8_gt_irq_handler(&dev_priv->gt, master_ctl); + gen8_gt_irq_handler(to_gt(dev_priv), master_ctl); if (iir & I915_DISPLAY_PORT_INTERRUPT) hotplug_status = i9xx_hpd_irq_ack(dev_priv); @@ -2108,7 +2108,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, } if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT) - gen5_rps_irq_handler(&dev_priv->gt.rps); + gen5_rps_irq_handler(&to_gt(dev_priv)->rps); } static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, @@ -2189,9 +2189,9 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg) if (gt_iir) { raw_reg_write(regs, GTIIR, gt_iir); if (GRAPHICS_VER(i915) >= 6) - gen6_gt_irq_handler(&i915->gt, gt_iir); + gen6_gt_irq_handler(to_gt(i915), gt_iir); else - gen5_gt_irq_handler(&i915->gt, gt_iir); + gen5_gt_irq_handler(to_gt(i915), gt_iir); ret = IRQ_HANDLED; } @@ -2209,7 +2209,7 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg) u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR); if (pm_iir) { raw_reg_write(regs, GEN6_PMIIR, pm_iir); - gen6_rps_irq_handler(&i915->gt.rps, pm_iir); + gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir); ret = IRQ_HANDLED; } } @@ -2635,7 +2635,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) } /* Find, queue (onto bottom-halves), then clear each source */ - gen8_gt_irq_handler(&dev_priv->gt, master_ctl); + gen8_gt_irq_handler(to_gt(dev_priv), master_ctl); /* IRQs are synced during runtime_suspend, we don't require a wakeref */ if (master_ctl & ~GEN8_GT_IRQS) { @@ -2715,7 +2715,7 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) { struct drm_i915_private *i915 = arg; void __iomem * const regs = i915->uncore.regs; - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); u32 master_ctl; u32 gu_misc_iir; @@ -2771,7 +2771,7 @@ static inline void dg1_master_intr_enable(void __iomem * const regs) static irqreturn_t dg1_irq_handler(int irq, void *arg) { struct drm_i915_private * const i915 = arg; - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); void __iomem * const regs = gt->uncore->regs; u32 master_tile_ctl, master_ctl; u32 gu_misc_iir; @@ -3075,7 +3075,7 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv) intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); } - gen5_gt_irq_reset(&dev_priv->gt); + gen5_gt_irq_reset(to_gt(dev_priv)); ibx_irq_reset(dev_priv); } @@ -3085,7 +3085,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv) intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); - gen5_gt_irq_reset(&dev_priv->gt); + gen5_gt_irq_reset(to_gt(dev_priv)); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display_irqs_enabled) @@ -3119,7 +3119,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv) gen8_master_intr_disable(dev_priv->uncore.regs); - gen8_gt_irq_reset(&dev_priv->gt); + gen8_gt_irq_reset(to_gt(dev_priv)); gen8_display_irq_reset(dev_priv); GEN3_IRQ_RESET(uncore, GEN8_PCU_); @@ -3173,7 +3173,7 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) static void gen11_irq_reset(struct drm_i915_private *dev_priv) { - struct intel_gt *gt = &dev_priv->gt; + struct intel_gt *gt = to_gt(dev_priv); struct intel_uncore *uncore = gt->uncore; gen11_master_intr_disable(dev_priv->uncore.regs); @@ -3187,7 +3187,7 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv) static void dg1_irq_reset(struct drm_i915_private *dev_priv) { - struct intel_gt *gt = &dev_priv->gt; + struct intel_gt *gt = to_gt(dev_priv); struct intel_uncore *uncore = gt->uncore; dg1_master_intr_disable(dev_priv->uncore.regs); @@ -3252,7 +3252,7 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv) intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0); intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); - gen8_gt_irq_reset(&dev_priv->gt); + gen8_gt_irq_reset(to_gt(dev_priv)); GEN3_IRQ_RESET(uncore, GEN8_PCU_); @@ -3709,7 +3709,7 @@ static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) ibx_irq_postinstall(dev_priv); - gen5_gt_irq_postinstall(&dev_priv->gt); + gen5_gt_irq_postinstall(to_gt(dev_priv)); GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask, display_mask | extra_mask); @@ -3746,7 +3746,7 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) { - gen5_gt_irq_postinstall(&dev_priv->gt); + gen5_gt_irq_postinstall(to_gt(dev_priv)); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display_irqs_enabled) @@ -3852,7 +3852,7 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) else if (HAS_PCH_SPLIT(dev_priv)) ibx_irq_postinstall(dev_priv); - gen8_gt_irq_postinstall(&dev_priv->gt); + gen8_gt_irq_postinstall(to_gt(dev_priv)); gen8_de_irq_postinstall(dev_priv); gen8_master_intr_enable(dev_priv->uncore.regs); @@ -3871,7 +3871,7 @@ static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv) static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) { - struct intel_gt *gt = &dev_priv->gt; + struct intel_gt *gt = to_gt(dev_priv); struct intel_uncore *uncore = gt->uncore; u32 gu_misc_masked = GEN11_GU_MISC_GSE; @@ -3889,7 +3889,7 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) { - struct intel_gt *gt = &dev_priv->gt; + struct intel_gt *gt = to_gt(dev_priv); struct intel_uncore *uncore = gt->uncore; u32 gu_misc_masked = GEN11_GU_MISC_GSE; @@ -3910,7 +3910,7 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) { - gen8_gt_irq_postinstall(&dev_priv->gt); + gen8_gt_irq_postinstall(to_gt(dev_priv)); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display_irqs_enabled) @@ -4073,7 +4073,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir); if (iir & I915_USER_INTERRUPT) - intel_engine_cs_irq(dev_priv->gt.engine[RCS0], iir); + intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir); if (iir & I915_MASTER_ERROR_INTERRUPT) i8xx_error_irq_handler(dev_priv, eir, eir_stuck); @@ -4181,7 +4181,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); if (iir & I915_USER_INTERRUPT) - intel_engine_cs_irq(dev_priv->gt.engine[RCS0], iir); + intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir); if (iir & I915_MASTER_ERROR_INTERRUPT) i9xx_error_irq_handler(dev_priv, eir, eir_stuck); @@ -4326,11 +4326,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); if (iir & I915_USER_INTERRUPT) - intel_engine_cs_irq(dev_priv->gt.engine[RCS0], + intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir); if (iir & I915_BSD_USER_INTERRUPT) - intel_engine_cs_irq(dev_priv->gt.engine[VCS0], + intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0], iir >> 25); if (iir & I915_MASTER_ERROR_INTERRUPT) @@ -4381,7 +4381,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */ if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11) - dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16; + to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16; if (!HAS_DISPLAY(dev_priv)) return; diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c index 666808cb3a32..7998bc74ab49 100644 --- a/drivers/gpu/drm/i915/i915_mm.c +++ b/drivers/gpu/drm/i915/i915_mm.c @@ -27,6 +27,7 @@ #include "i915_drv.h" +#include "i915_mm.h" struct remap_pfn { struct mm_struct *mm; @@ -37,17 +38,6 @@ struct remap_pfn { resource_size_t iobase; }; -static int remap_pfn(pte_t *pte, unsigned long addr, void *data) -{ - struct remap_pfn *r = data; - - /* Special PTE are not associated with any struct page */ - set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); - r->pfn++; - - return 0; -} - #define use_dma(io) ((io) != -1) static inline unsigned long sgt_pfn(const struct remap_pfn *r) @@ -77,6 +67,20 @@ static int remap_sg(pte_t *pte, unsigned long addr, void *data) return 0; } +#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP) + +#if IS_ENABLED(CONFIG_X86) +static int remap_pfn(pte_t *pte, unsigned long addr, void *data) +{ + struct remap_pfn *r = data; + + /* Special PTE are not associated with any struct page */ + set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); + r->pfn++; + + return 0; +} + /** * remap_io_mapping - remap an IO mapping to userspace * @vma: user vma to map to @@ -94,7 +98,6 @@ int remap_io_mapping(struct vm_area_struct *vma, struct remap_pfn r; int err; -#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP) GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ @@ -111,6 +114,7 @@ int remap_io_mapping(struct vm_area_struct *vma, return 0; } +#endif /** * remap_io_sg - remap an IO mapping to userspace diff --git a/drivers/gpu/drm/i915/i915_mm.h b/drivers/gpu/drm/i915/i915_mm.h new file mode 100644 index 000000000000..76f1d53bdf34 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_mm.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __I915_MM_H__ +#define __I915_MM_H__ + +#include <linux/types.h> + +struct vm_area_struct; +struct io_mapping; +struct scatterlist; + +#if IS_ENABLED(CONFIG_X86) +int remap_io_mapping(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size, + struct io_mapping *iomap); +#else +static inline +int remap_io_mapping(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size, + struct io_mapping *iomap) +{ + pr_err("Architecture has no %s() and shouldn't be calling this function\n", __func__); + WARN_ON_ONCE(1); + return 0; +} +#endif + +int remap_io_sg(struct vm_area_struct *vma, + unsigned long addr, unsigned long size, + struct scatterlist *sgl, resource_size_t iobase); + +#endif /* __I915_MM_H__ */ diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index e07f4cfea63a..525ae832aa9a 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -140,6 +140,9 @@ i915_param_named_unsafe(invert_brightness, int, 0400, i915_param_named(disable_display, bool, 0400, "Disable display (default: false)"); +i915_param_named(memtest, bool, 0400, + "Perform a read/write test of all device memory on module load (default: off)"); + i915_param_named(mmio_debug, int, 0400, "Enable the MMIO debug code for the first N failures (default: off). " "This may negatively affect performance."); diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 8d725b64592d..c9d53ff910a0 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -64,6 +64,7 @@ struct drm_printer; param(char *, guc_firmware_path, NULL, 0400) \ param(char *, huc_firmware_path, NULL, 0400) \ param(char *, dmc_firmware_path, NULL, 0400) \ + param(bool, memtest, false, 0400) \ param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \ param(int, edp_vswing, 0, 0400) \ param(unsigned int, reset, 3, 0600) \ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index f01cba4ec283..261294df535c 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -22,8 +22,6 @@ * */ -#include <linux/vga_switcheroo.h> - #include <drm/drm_drv.h> #include <drm/i915_pciids.h> @@ -164,8 +162,8 @@ #define I830_FEATURES \ GEN(2), \ .is_mobile = 1, \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_overlay = 1, \ .display.cursor_needs_physical = 1, \ .display.overlay_needs_physical = 1, \ @@ -185,8 +183,8 @@ #define I845_FEATURES \ GEN(2), \ - .pipe_mask = BIT(PIPE_A), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A), \ + .display.pipe_mask = BIT(PIPE_A), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A), \ .display.has_overlay = 1, \ .display.overlay_needs_physical = 1, \ .display.has_gmch = 1, \ @@ -227,8 +225,8 @@ static const struct intel_device_info i865g_info = { #define GEN3_FEATURES \ GEN(3), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ .platform_engine_mask = BIT(RCS0), \ @@ -317,8 +315,8 @@ static const struct intel_device_info pnv_m_info = { #define GEN4_FEATURES \ GEN(4), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ @@ -370,8 +368,8 @@ static const struct intel_device_info gm45_info = { #define GEN5_FEATURES \ GEN(5), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0), \ .has_snoop = true, \ @@ -400,8 +398,8 @@ static const struct intel_device_info ilk_m_info = { #define GEN6_FEATURES \ GEN(6), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ @@ -451,8 +449,8 @@ static const struct intel_device_info snb_m_gt2_info = { #define GEN7_FEATURES \ GEN(7), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ @@ -506,8 +504,8 @@ static const struct intel_device_info ivb_q_info = { GEN7_FEATURES, PLATFORM(INTEL_IVYBRIDGE), .gt = 2, - .pipe_mask = 0, /* legal, last one wins */ - .cpu_transcoder_mask = 0, + .display.pipe_mask = 0, /* legal, last one wins */ + .display.cpu_transcoder_mask = 0, .has_l3_dpf = 1, }; @@ -515,8 +513,8 @@ static const struct intel_device_info vlv_info = { PLATFORM(INTEL_VALLEYVIEW), GEN(7), .is_lp = 1, - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), .has_runtime_pm = 1, .has_rc6 = 1, .has_reset_engine = true, @@ -540,7 +538,7 @@ static const struct intel_device_info vlv_info = { #define G75_FEATURES \ GEN7_FEATURES, \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \ .display.has_ddi = 1, \ .display.has_fpga_dbg = 1, \ @@ -610,8 +608,8 @@ static const struct intel_device_info bdw_gt3_info = { static const struct intel_device_info chv_info = { PLATFORM(INTEL_CHERRYVIEW), GEN(8), - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), .display.has_hotplug = 1, .is_lp = 1, .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), @@ -688,8 +686,8 @@ static const struct intel_device_info skl_gt4_info = { .dbuf.slice_mask = BIT(DBUF_S1), \ .display.has_hotplug = 1, \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \ .has_64bit_reloc = 1, \ @@ -797,8 +795,8 @@ static const struct intel_device_info cml_gt2_info = { #define GEN11_FEATURES \ GEN9_FEATURES, \ GEN11_DEFAULT_PAGE_SIZES, \ - .abox_mask = BIT(0), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .display.abox_mask = BIT(0), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \ .pipe_offsets = { \ @@ -849,9 +847,9 @@ static const struct intel_device_info jsl_info = { #define GEN12_FEATURES \ GEN11_FEATURES, \ GEN(12), \ - .abox_mask = GENMASK(2, 1), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .display.abox_mask = GENMASK(2, 1), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \ .pipe_offsets = { \ @@ -886,9 +884,9 @@ static const struct intel_device_info tgl_info = { static const struct intel_device_info rkl_info = { GEN12_FEATURES, PLATFORM(INTEL_ROCKETLAKE), - .abox_mask = BIT(0), - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + .display.abox_mask = BIT(0), + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), .display.has_hti = 1, .display.has_psr_hw_tracking = 0, @@ -908,7 +906,7 @@ static const struct intel_device_info dg1_info = { DGFX_FEATURES, .graphics.rel = 10, PLATFORM(INTEL_DG1), - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .require_force_probe = 1, .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | @@ -920,7 +918,7 @@ static const struct intel_device_info dg1_info = { static const struct intel_device_info adl_s_info = { GEN12_FEATURES, PLATFORM(INTEL_ALDERLAKE_S), - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .display.has_hti = 1, .display.has_psr_hw_tracking = 0, .platform_engine_mask = @@ -937,8 +935,11 @@ static const struct intel_device_info adl_s_info = { } #define XE_LPD_FEATURES \ - .abox_mask = GENMASK(1, 0), \ - .color = { .degamma_lut_size = 0, .gamma_lut_size = 0 }, \ + .display.abox_mask = GENMASK(1, 0), \ + .color = { .degamma_lut_size = 128, .gamma_lut_size = 1024, \ + .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ + DRM_COLOR_LUT_EQUAL_CHANNELS, \ + }, \ .dbuf.size = 4096, \ .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \ BIT(DBUF_S4), \ @@ -954,7 +955,7 @@ static const struct intel_device_info adl_s_info = { .display.has_ipc = 1, \ .display.has_psr = 1, \ .display.ver = 13, \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ .pipe_offsets = { \ [TRANSCODER_A] = PIPE_A_OFFSET, \ [TRANSCODER_B] = PIPE_B_OFFSET, \ @@ -977,8 +978,7 @@ static const struct intel_device_info adl_p_info = { GEN12_FEATURES, XE_LPD_FEATURES, PLATFORM(INTEL_ALDERLAKE_P), - .require_force_probe = 1, - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), .display.has_cdclk_crawl = 1, @@ -1027,7 +1027,7 @@ static const struct intel_device_info xehpsdv_info = { DGFX_FEATURES, PLATFORM(INTEL_XEHPSDV), .display = { }, - .pipe_mask = 0, + .has_64k_pages = 1, .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) | @@ -1045,12 +1045,13 @@ static const struct intel_device_info dg2_info = { .graphics.rel = 55, .media.rel = 55, PLATFORM(INTEL_DG2), + .has_64k_pages = 1, .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VECS1) | BIT(VCS0) | BIT(VCS2), .require_force_probe = 1, - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D), }; @@ -1131,6 +1132,7 @@ static const struct pci_device_id pciidlist[] = { INTEL_ADLS_IDS(&adl_s_info), INTEL_ADLP_IDS(&adl_p_info), INTEL_DG1_IDS(&dg1_info), + INTEL_RPLS_IDS(&adl_s_info), {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, pciidlist); @@ -1203,11 +1205,8 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (PCI_FUNC(pdev->devfn)) return -ENODEV; - /* - * apple-gmux is needed on dual GPU MacBook Pro - * to probe the panel if we're the inactive GPU. - */ - if (vga_switcheroo_client_probe_defer(pdev)) + /* Detect if we need to wait for other drivers early on */ + if (intel_modeset_probe_defer(pdev)) return -EPROBE_DEFER; err = i915_driver_probe(pdev, ent); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 2f01b8c0284c..e27f3b7cf094 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -4273,26 +4273,6 @@ static struct ctl_table oa_table[] = { {} }; -static struct ctl_table i915_root[] = { - { - .procname = "i915", - .maxlen = 0, - .mode = 0555, - .child = oa_table, - }, - {} -}; - -static struct ctl_table dev_root[] = { - { - .procname = "dev", - .maxlen = 0, - .mode = 0555, - .child = i915_root, - }, - {} -}; - static void oa_init_supported_formats(struct i915_perf *perf) { struct drm_i915_private *i915 = perf->i915; @@ -4443,7 +4423,7 @@ void i915_perf_init(struct drm_i915_private *i915) mutex_init(&perf->lock); /* Choose a representative limit */ - oa_sample_rate_hard_limit = i915->gt.clock_frequency / 2; + oa_sample_rate_hard_limit = to_gt(i915)->clock_frequency / 2; mutex_init(&perf->metrics_lock); idr_init_base(&perf->metrics_idr, 1); @@ -4488,7 +4468,7 @@ static int destroy_config(int id, void *p, void *data) int i915_perf_sysctl_register(void) { - sysctl_header = register_sysctl_table(dev_root); + sysctl_header = register_sysctl("dev/i915", oa_table); return 0; } diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 0b488d49694c..ea655161793e 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -210,8 +210,8 @@ static void init_rc6(struct i915_pmu *pmu) struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); intel_wakeref_t wakeref; - with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) { - pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); + with_intel_runtime_pm(to_gt(i915)->uncore->rpm, wakeref) { + pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(to_gt(i915)); pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = pmu->sample[__I915_SAMPLE_RC6].cur; pmu->sleep_last = ktime_get_raw(); @@ -222,7 +222,7 @@ static void park_rc6(struct drm_i915_private *i915) { struct i915_pmu *pmu = &i915->pmu; - pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); + pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(to_gt(i915)); pmu->sleep_last = ktime_get_raw(); } @@ -419,7 +419,7 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) struct drm_i915_private *i915 = container_of(hrtimer, struct drm_i915_private, pmu.timer); struct i915_pmu *pmu = &i915->pmu; - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); unsigned int period_ns; ktime_t now; @@ -476,7 +476,7 @@ engine_event_status(struct intel_engine_cs *engine, static int config_status(struct drm_i915_private *i915, u64 config) { - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); switch (config) { case I915_PMU_ACTUAL_FREQUENCY: @@ -601,10 +601,10 @@ static u64 __i915_pmu_event_read(struct perf_event *event) val = READ_ONCE(pmu->irq_count); break; case I915_PMU_RC6_RESIDENCY: - val = get_rc6(&i915->gt); + val = get_rc6(to_gt(i915)); break; case I915_PMU_SOFTWARE_GT_AWAKE_TIME: - val = ktime_to_ns(intel_gt_get_awake_time(&i915->gt)); + val = ktime_to_ns(intel_gt_get_awake_time(to_gt(i915))); break; } } diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index 51b368be0fc4..2dfbc22857a3 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -31,7 +31,7 @@ static int copy_query_item(void *query_hdr, size_t query_sz, static int query_topology_info(struct drm_i915_private *dev_priv, struct drm_i915_query_item *query_item) { - const struct sseu_dev_info *sseu = &dev_priv->gt.info.sseu; + const struct sseu_dev_info *sseu = &to_gt(dev_priv)->info.sseu; struct drm_i915_query_topology_info topo; u32 slice_length, subslice_length, eu_length, total_length; int ret; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3450818802c2..c32420cb8ed5 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2265,6 +2265,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define SNPS_PHY_MPLLB_DP2_MODE REG_BIT(9) #define SNPS_PHY_MPLLB_WORD_DIV2_EN REG_BIT(8) #define SNPS_PHY_MPLLB_TX_CLK_DIV REG_GENMASK(7, 5) +#define SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL REG_BIT(0) #define SNPS_PHY_MPLLB_FRACN1(phy) _MMIO_SNPS(phy, 0x168008) #define SNPS_PHY_MPLLB_FRACN_EN REG_BIT(31) @@ -2720,6 +2721,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28) #define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24) +#define GEN8_RTCR _MMIO(0x4260) +#define GEN8_M1TCR _MMIO(0x4264) +#define GEN8_M2TCR _MMIO(0x4268) +#define GEN8_BTCR _MMIO(0x426c) +#define GEN8_VTCR _MMIO(0x4270) + #if 0 #define PRB0_TAIL _MMIO(0x2030) #define PRB0_HEAD _MMIO(0x2034) @@ -2818,6 +2825,11 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define FAULT_VA_HIGH_BITS (0xf << 0) #define FAULT_GTT_SEL (1 << 4) +#define GEN12_GFX_TLB_INV_CR _MMIO(0xced8) +#define GEN12_VD_TLB_INV_CR _MMIO(0xcedc) +#define GEN12_VE_TLB_INV_CR _MMIO(0xcee0) +#define GEN12_BLT_TLB_INV_CR _MMIO(0xcee4) + #define GEN12_AUX_ERR_DBG _MMIO(0x43f4) #define FPGA_DBG _MMIO(0x42300) @@ -6966,7 +6978,7 @@ enum { #define DVS_SOURCE_KEY (1 << 22) #define DVS_RGB_ORDER_XBGR (1 << 20) #define DVS_YUV_FORMAT_BT709 (1 << 18) -#define DVS_YUV_BYTE_ORDER_MASK (3 << 16) +#define DVS_YUV_ORDER_MASK (3 << 16) #define DVS_YUV_ORDER_YUYV (0 << 16) #define DVS_YUV_ORDER_UYVY (1 << 16) #define DVS_YUV_ORDER_YVYU (2 << 16) @@ -7045,7 +7057,7 @@ enum { #define SPRITE_RGB_ORDER_RGBX (1 << 20) /* only for 888 and 161616 */ #define SPRITE_YUV_TO_RGB_CSC_DISABLE (1 << 19) #define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) /* 0 is BT601 */ -#define SPRITE_YUV_BYTE_ORDER_MASK (3 << 16) +#define SPRITE_YUV_ORDER_MASK (3 << 16) #define SPRITE_YUV_ORDER_YUYV (0 << 16) #define SPRITE_YUV_ORDER_UYVY (1 << 16) #define SPRITE_YUV_ORDER_YVYU (2 << 16) @@ -7130,7 +7142,7 @@ enum { #define SP_ALPHA_PREMULTIPLY (1 << 23) /* CHV pipe B */ #define SP_SOURCE_KEY (1 << 22) #define SP_YUV_FORMAT_BT709 (1 << 18) -#define SP_YUV_BYTE_ORDER_MASK (3 << 16) +#define SP_YUV_ORDER_MASK (3 << 16) #define SP_YUV_ORDER_YUYV (0 << 16) #define SP_YUV_ORDER_UYVY (1 << 16) #define SP_YUV_ORDER_YVYU (2 << 16) @@ -7271,10 +7283,10 @@ enum { #define PLANE_CTL_YUV420_Y_PLANE (1 << 19) #define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) #define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16) -#define PLANE_CTL_YUV422_YUYV (0 << 16) -#define PLANE_CTL_YUV422_UYVY (1 << 16) -#define PLANE_CTL_YUV422_YVYU (2 << 16) -#define PLANE_CTL_YUV422_VYUY (3 << 16) +#define PLANE_CTL_YUV422_ORDER_YUYV (0 << 16) +#define PLANE_CTL_YUV422_ORDER_UYVY (1 << 16) +#define PLANE_CTL_YUV422_ORDER_YVYU (2 << 16) +#define PLANE_CTL_YUV422_ORDER_VYUY (3 << 16) #define PLANE_CTL_RENDER_DECOMPRESSION_ENABLE (1 << 15) #define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14) #define PLANE_CTL_CLEAR_COLOR_DISABLE (1 << 13) /* TGL+ */ @@ -7328,10 +7340,10 @@ enum { #define _PLANE_CUS_CTL_1_A 0x701c8 #define _PLANE_CUS_CTL_2_A 0x702c8 #define PLANE_CUS_ENABLE (1 << 31) -#define PLANE_CUS_PLANE_4_RKL (0 << 30) -#define PLANE_CUS_PLANE_5_RKL (1 << 30) -#define PLANE_CUS_PLANE_6 (0 << 30) -#define PLANE_CUS_PLANE_7 (1 << 30) +#define PLANE_CUS_Y_PLANE_4_RKL (0 << 30) +#define PLANE_CUS_Y_PLANE_5_RKL (1 << 30) +#define PLANE_CUS_Y_PLANE_6_ICL (0 << 30) +#define PLANE_CUS_Y_PLANE_7_ICL (1 << 30) #define PLANE_CUS_HPHASE_SIGN_NEGATIVE (1 << 19) #define PLANE_CUS_HPHASE_0 (0 << 16) #define PLANE_CUS_HPHASE_0_25 (1 << 16) @@ -7363,12 +7375,12 @@ enum { #define _PLANE_NV12_BUF_CFG_1_A 0x70278 #define _PLANE_NV12_BUF_CFG_2_A 0x70378 -#define _PLANE_CC_VAL_1_B 0x711b4 -#define _PLANE_CC_VAL_2_B 0x712b4 -#define _PLANE_CC_VAL_1(pipe) _PIPE(pipe, _PLANE_CC_VAL_1_A, _PLANE_CC_VAL_1_B) -#define _PLANE_CC_VAL_2(pipe) _PIPE(pipe, _PLANE_CC_VAL_2_A, _PLANE_CC_VAL_2_B) -#define PLANE_CC_VAL(pipe, plane) \ - _MMIO_PLANE(plane, _PLANE_CC_VAL_1(pipe), _PLANE_CC_VAL_2(pipe)) +#define _PLANE_CC_VAL_1_B 0x711b4 +#define _PLANE_CC_VAL_2_B 0x712b4 +#define _PLANE_CC_VAL_1(pipe, dw) (_PIPE(pipe, _PLANE_CC_VAL_1_A, _PLANE_CC_VAL_1_B) + (dw) * 4) +#define _PLANE_CC_VAL_2(pipe, dw) (_PIPE(pipe, _PLANE_CC_VAL_2_A, _PLANE_CC_VAL_2_B) + (dw) * 4) +#define PLANE_CC_VAL(pipe, plane, dw) \ + _MMIO_PLANE((plane), _PLANE_CC_VAL_1((pipe), (dw)), _PLANE_CC_VAL_2((pipe), (dw))) /* Input CSC Register Definitions */ #define _PLANE_INPUT_CSC_RY_GY_1_A 0x701E0 @@ -8573,8 +8585,9 @@ enum { _PIPEB_CHICKEN) #define UNDERRUN_RECOVERY_DISABLE_ADLP REG_BIT(30) #define UNDERRUN_RECOVERY_ENABLE_DG2 REG_BIT(30) -#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU (1 << 15) -#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7) +#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU REG_BIT(15) +#define DG2_RENDER_CCSTAG_4_3_EN REG_BIT(12) +#define PER_PIXEL_ALPHA_BYPASS_EN REG_BIT(7) #define VFLSKPD _MMIO(0x62a8) #define DIS_OVER_FETCH_CACHE REG_BIT(1) @@ -9415,6 +9428,7 @@ enum { #define GEN6_OFFSET(x) ((x) << 19) #define GEN6_AGGRESSIVE_TURBO (0 << 15) #define GEN9_SW_REQ_UNSLICE_RATIO_SHIFT 23 +#define GEN9_IGNORE_SLICE_RATIO (0 << 0) #define GEN6_RC_VIDEO_FREQ _MMIO(0xA00C) #define GEN6_RC_CONTROL _MMIO(0xA090) @@ -9450,6 +9464,9 @@ enum { #define GEN6_RP_UP_BUSY_CONT (0x4 << 3) #define GEN6_RP_DOWN_IDLE_AVG (0x2 << 0) #define GEN6_RP_DOWN_IDLE_CONT (0x1 << 0) +#define GEN6_RPSWCTL_SHIFT 9 +#define GEN9_RPSWCTL_ENABLE (0x2 << GEN6_RPSWCTL_SHIFT) +#define GEN9_RPSWCTL_DISABLE (0x0 << GEN6_RPSWCTL_SHIFT) #define GEN6_RP_UP_THRESHOLD _MMIO(0xA02C) #define GEN6_RP_DOWN_THRESHOLD _MMIO(0xA030) #define GEN6_RP_CUR_UP_EI _MMIO(0xA050) @@ -10654,6 +10671,14 @@ enum skl_power_gate { #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16) #define CDCLK_FREQ_DECIMAL_MASK (0x7ff) +/* CDCLK_SQUASH_CTL */ +#define CDCLK_SQUASH_CTL _MMIO(0x46008) +#define CDCLK_SQUASH_ENABLE REG_BIT(31) +#define CDCLK_SQUASH_WINDOW_SIZE_MASK REG_GENMASK(27, 24) +#define CDCLK_SQUASH_WINDOW_SIZE(x) REG_FIELD_PREP(CDCLK_SQUASH_WINDOW_SIZE_MASK, (x)) +#define CDCLK_SQUASH_WAVEFORM_MASK REG_GENMASK(15, 0) +#define CDCLK_SQUASH_WAVEFORM(x) REG_FIELD_PREP(CDCLK_SQUASH_WAVEFORM_MASK, (x)) + /* LCPLL_CTL */ #define LCPLL1_CTL _MMIO(0x46010) #define LCPLL2_CTL _MMIO(0x46014) @@ -11152,8 +11177,12 @@ enum skl_power_gate { _DKL_PHY2_BASE) + \ _DKL_TX_DPCNTL1) -#define _DKL_TX_DPCNTL2 0x2C8 -#define DKL_TX_DP20BITMODE (1 << 2) +#define _DKL_TX_DPCNTL2 0x2C8 +#define DKL_TX_DP20BITMODE REG_BIT(2) +#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK REG_GENMASK(4, 3) +#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK, (val)) +#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK REG_GENMASK(6, 5) +#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, (val)) #define DKL_TX_DPCNTL2(tc_port) _MMIO(_PORT(tc_port, \ _DKL_PHY1_BASE, \ _DKL_PHY2_BASE) + \ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index ad175d662b4e..76cf5ac91e94 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -42,6 +42,7 @@ #include "gt/intel_rps.h" #include "i915_active.h" +#include "i915_deps.h" #include "i915_drv.h" #include "i915_trace.h" #include "intel_pm.h" @@ -308,6 +309,7 @@ void i915_request_free_capture_list(struct i915_capture_list *capture) struct i915_capture_list *next = capture->next; i915_vma_snapshot_put(capture->vma_snapshot); + kfree(capture); capture = next; } } @@ -1543,6 +1545,27 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) } /** + * i915_request_await_deps - set this request to (async) wait upon a struct + * i915_deps dma_fence collection + * @rq: request we are wishing to use + * @deps: The struct i915_deps containing the dependencies. + * + * Returns 0 if successful, negative error code on error. + */ +int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps) +{ + int i, err; + + for (i = 0; i < deps->num_deps; ++i) { + err = i915_request_await_dma_fence(rq, deps->fences[i]); + if (err) + return err; + } + + return 0; +} + +/** * i915_request_await_object - set this request to (async) wait upon a bo * @to: request we are wishing to use * @obj: object which may be in use on another ring. diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 0ed01979491b..170ee78c2858 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -47,6 +47,7 @@ struct drm_file; struct drm_i915_gem_object; struct drm_printer; +struct i915_deps; struct i915_request; #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) @@ -411,6 +412,7 @@ int i915_request_await_object(struct i915_request *to, bool write); int i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence); +int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps); int i915_request_await_execution(struct i915_request *rq, struct dma_fence *fence); @@ -657,7 +659,8 @@ i915_request_timeline(const struct i915_request *rq) { /* Valid only while the request is being constructed (or retired). */ return rcu_dereference_protected(rq->timeline, - lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex)); + lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex) || + test_bit(CONTEXT_IS_PARKING, &rq->context->flags)); } static inline struct i915_gem_context * diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 59d441cedc75..fae4d1f4f275 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -52,7 +52,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv, u64 res = 0; with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) - res = intel_rc6_residency_us(&dev_priv->gt.rc6, reg); + res = intel_rc6_residency_us(&to_gt(dev_priv)->rc6, reg); return DIV_ROUND_CLOSEST_ULL(res, 1000); } @@ -260,7 +260,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); - struct intel_rps *rps = &i915->gt.rps; + struct intel_rps *rps = &to_gt(i915)->rps; return sysfs_emit(buf, "%d\n", intel_rps_read_actual_frequency(rps)); } @@ -269,7 +269,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); - struct intel_rps *rps = &i915->gt.rps; + struct intel_rps *rps = &to_gt(i915)->rps; return sysfs_emit(buf, "%d\n", intel_rps_get_requested_frequency(rps)); } @@ -277,7 +277,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); - struct intel_rps *rps = &i915->gt.rps; + struct intel_rps *rps = &to_gt(i915)->rps; return sysfs_emit(buf, "%d\n", intel_rps_get_boost_frequency(rps)); } @@ -287,7 +287,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, const char *buf, size_t count) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); - struct intel_rps *rps = &dev_priv->gt.rps; + struct intel_rps *rps = &to_gt(dev_priv)->rps; ssize_t ret; u32 val; @@ -304,7 +304,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); - struct intel_rps *rps = &dev_priv->gt.rps; + struct intel_rps *rps = &to_gt(dev_priv)->rps; return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->efficient_freq)); } @@ -312,7 +312,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); - struct intel_gt *gt = &dev_priv->gt; + struct intel_gt *gt = to_gt(dev_priv); struct intel_rps *rps = >->rps; return sysfs_emit(buf, "%d\n", intel_rps_get_max_frequency(rps)); @@ -323,7 +323,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, const char *buf, size_t count) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); - struct intel_gt *gt = &dev_priv->gt; + struct intel_gt *gt = to_gt(dev_priv); struct intel_rps *rps = >->rps; ssize_t ret; u32 val; @@ -340,7 +340,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); struct intel_rps *rps = >->rps; return sysfs_emit(buf, "%d\n", intel_rps_get_min_frequency(rps)); @@ -351,7 +351,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, const char *buf, size_t count) { struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); - struct intel_rps *rps = &i915->gt.rps; + struct intel_rps *rps = &to_gt(i915)->rps; ssize_t ret; u32 val; @@ -381,7 +381,7 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); - struct intel_rps *rps = &dev_priv->gt.rps; + struct intel_rps *rps = &to_gt(dev_priv)->rps; u32 val; if (attr == &dev_attr_gt_RP0_freq_mhz) diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 6b8fb6ffe8da..37b5c9e9d260 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -1,4 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM i915 + #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) #define _I915_TRACE_H_ @@ -8,582 +12,11 @@ #include <drm/drm_drv.h> -#include "display/intel_crtc.h" -#include "display/intel_display_types.h" #include "gt/intel_engine.h" #include "i915_drv.h" #include "i915_irq.h" -#undef TRACE_SYSTEM -#define TRACE_SYSTEM i915 -#define TRACE_INCLUDE_FILE i915_trace - -/* watermark/fifo updates */ - -TRACE_EVENT(intel_pipe_enable, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __array(u32, frame, 3) - __array(u32, scanline, 3) - __field(enum pipe, pipe) - ), - TP_fast_assign( - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc *it__; - for_each_intel_crtc(&dev_priv->drm, it__) { - __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); - __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); - } - __entry->pipe = crtc->pipe; - ), - - TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", - pipe_name(__entry->pipe), - __entry->frame[PIPE_A], __entry->scanline[PIPE_A], - __entry->frame[PIPE_B], __entry->scanline[PIPE_B], - __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) -); - -TRACE_EVENT(intel_pipe_disable, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __array(u32, frame, 3) - __array(u32, scanline, 3) - __field(enum pipe, pipe) - ), - - TP_fast_assign( - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc *it__; - for_each_intel_crtc(&dev_priv->drm, it__) { - __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); - __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); - } - __entry->pipe = crtc->pipe; - ), - - TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", - pipe_name(__entry->pipe), - __entry->frame[PIPE_A], __entry->scanline[PIPE_A], - __entry->frame[PIPE_B], __entry->scanline[PIPE_B], - __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) -); - -TRACE_EVENT(intel_pipe_crc, - TP_PROTO(struct intel_crtc *crtc, const u32 *crcs), - TP_ARGS(crtc, crcs), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __array(u32, crcs, 5) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - memcpy(__entry->crcs, crcs, sizeof(__entry->crcs)); - ), - - TP_printk("pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x", - pipe_name(__entry->pipe), __entry->frame, __entry->scanline, - __entry->crcs[0], __entry->crcs[1], __entry->crcs[2], - __entry->crcs[3], __entry->crcs[4]) -); - -TRACE_EVENT(intel_cpu_fifo_underrun, - TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), - TP_ARGS(dev_priv, pipe), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - __entry->pipe = pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pipe %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), - __entry->frame, __entry->scanline) -); - -TRACE_EVENT(intel_pch_fifo_underrun, - TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pch_transcoder), - TP_ARGS(dev_priv, pch_transcoder), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - enum pipe pipe = pch_transcoder; - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - __entry->pipe = pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pch transcoder %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), - __entry->frame, __entry->scanline) -); - -TRACE_EVENT(intel_memory_cxsr, - TP_PROTO(struct drm_i915_private *dev_priv, bool old, bool new), - TP_ARGS(dev_priv, old, new), - - TP_STRUCT__entry( - __array(u32, frame, 3) - __array(u32, scanline, 3) - __field(bool, old) - __field(bool, new) - ), - - TP_fast_assign( - struct intel_crtc *crtc; - for_each_intel_crtc(&dev_priv->drm, crtc) { - __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc); - __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc); - } - __entry->old = old; - __entry->new = new; - ), - - TP_printk("%s->%s, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", - onoff(__entry->old), onoff(__entry->new), - __entry->frame[PIPE_A], __entry->scanline[PIPE_A], - __entry->frame[PIPE_B], __entry->scanline[PIPE_B], - __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) -); - -TRACE_EVENT(g4x_wm, - TP_PROTO(struct intel_crtc *crtc, const struct g4x_wm_values *wm), - TP_ARGS(crtc, wm), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __field(u16, primary) - __field(u16, sprite) - __field(u16, cursor) - __field(u16, sr_plane) - __field(u16, sr_cursor) - __field(u16, sr_fbc) - __field(u16, hpll_plane) - __field(u16, hpll_cursor) - __field(u16, hpll_fbc) - __field(bool, cxsr) - __field(bool, hpll) - __field(bool, fbc) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; - __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; - __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR]; - __entry->sr_plane = wm->sr.plane; - __entry->sr_cursor = wm->sr.cursor; - __entry->sr_fbc = wm->sr.fbc; - __entry->hpll_plane = wm->hpll.plane; - __entry->hpll_cursor = wm->hpll.cursor; - __entry->hpll_fbc = wm->hpll.fbc; - __entry->cxsr = wm->cxsr; - __entry->hpll = wm->hpll_en; - __entry->fbc = wm->fbc_en; - ), - - TP_printk("pipe %c, frame=%u, scanline=%u, wm %d/%d/%d, sr %s/%d/%d/%d, hpll %s/%d/%d/%d, fbc %s", - pipe_name(__entry->pipe), __entry->frame, __entry->scanline, - __entry->primary, __entry->sprite, __entry->cursor, - yesno(__entry->cxsr), __entry->sr_plane, __entry->sr_cursor, __entry->sr_fbc, - yesno(__entry->hpll), __entry->hpll_plane, __entry->hpll_cursor, __entry->hpll_fbc, - yesno(__entry->fbc)) -); - -TRACE_EVENT(vlv_wm, - TP_PROTO(struct intel_crtc *crtc, const struct vlv_wm_values *wm), - TP_ARGS(crtc, wm), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __field(u32, level) - __field(u32, cxsr) - __field(u32, primary) - __field(u32, sprite0) - __field(u32, sprite1) - __field(u32, cursor) - __field(u32, sr_plane) - __field(u32, sr_cursor) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - __entry->level = wm->level; - __entry->cxsr = wm->cxsr; - __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; - __entry->sprite0 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; - __entry->sprite1 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE1]; - __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR]; - __entry->sr_plane = wm->sr.plane; - __entry->sr_cursor = wm->sr.cursor; - ), - - TP_printk("pipe %c, frame=%u, scanline=%u, level=%d, cxsr=%d, wm %d/%d/%d/%d, sr %d/%d", - pipe_name(__entry->pipe), __entry->frame, - __entry->scanline, __entry->level, __entry->cxsr, - __entry->primary, __entry->sprite0, __entry->sprite1, __entry->cursor, - __entry->sr_plane, __entry->sr_cursor) -); - -TRACE_EVENT(vlv_fifo_size, - TP_PROTO(struct intel_crtc *crtc, u32 sprite0_start, u32 sprite1_start, u32 fifo_size), - TP_ARGS(crtc, sprite0_start, sprite1_start, fifo_size), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __field(u32, sprite0_start) - __field(u32, sprite1_start) - __field(u32, fifo_size) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - __entry->sprite0_start = sprite0_start; - __entry->sprite1_start = sprite1_start; - __entry->fifo_size = fifo_size; - ), - - TP_printk("pipe %c, frame=%u, scanline=%u, %d/%d/%d", - pipe_name(__entry->pipe), __entry->frame, - __entry->scanline, __entry->sprite0_start, - __entry->sprite1_start, __entry->fifo_size) -); - -/* plane updates */ - -TRACE_EVENT(intel_plane_update_noarm, - TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), - TP_ARGS(plane, crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __array(int, src, 4) - __array(int, dst, 4) - __string(name, plane->name) - ), - - TP_fast_assign( - __assign_str(name, plane->name); - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); - memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); - ), - - TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT, - pipe_name(__entry->pipe), __get_str(name), - __entry->frame, __entry->scanline, - DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src), - DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) -); - -TRACE_EVENT(intel_plane_update_arm, - TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), - TP_ARGS(plane, crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __array(int, src, 4) - __array(int, dst, 4) - __string(name, plane->name) - ), - - TP_fast_assign( - __assign_str(name, plane->name); - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); - memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); - ), - - TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT, - pipe_name(__entry->pipe), __get_str(name), - __entry->frame, __entry->scanline, - DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src), - DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) -); - -TRACE_EVENT(intel_plane_disable_arm, - TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), - TP_ARGS(plane, crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __string(name, plane->name) - ), - - TP_fast_assign( - __assign_str(name, plane->name); - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pipe %c, plane %s, frame=%u, scanline=%u", - pipe_name(__entry->pipe), __get_str(name), - __entry->frame, __entry->scanline) -); - -/* fbc */ - -TRACE_EVENT(intel_fbc_activate, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pipe %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), __entry->frame, __entry->scanline) -); - -TRACE_EVENT(intel_fbc_deactivate, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pipe %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), __entry->frame, __entry->scanline) -); - -TRACE_EVENT(intel_fbc_nuke, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pipe %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), __entry->frame, __entry->scanline) -); - -/* pipe updates */ - -TRACE_EVENT(intel_crtc_vblank_work_start, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pipe %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), __entry->frame, - __entry->scanline) -); - -TRACE_EVENT(intel_crtc_vblank_work_end, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pipe %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), __entry->frame, - __entry->scanline) -); - -TRACE_EVENT(intel_pipe_update_start, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __field(u32, min) - __field(u32, max) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - __entry->min = crtc->debug.min_vbl; - __entry->max = crtc->debug.max_vbl; - ), - - TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", - pipe_name(__entry->pipe), __entry->frame, - __entry->scanline, __entry->min, __entry->max) -); - -TRACE_EVENT(intel_pipe_update_vblank_evaded, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __field(u32, min) - __field(u32, max) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = crtc->debug.start_vbl_count; - __entry->scanline = crtc->debug.scanline_start; - __entry->min = crtc->debug.min_vbl; - __entry->max = crtc->debug.max_vbl; - ), - - TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", - pipe_name(__entry->pipe), __entry->frame, - __entry->scanline, __entry->min, __entry->max) -); - -TRACE_EVENT(intel_pipe_update_end, - TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end), - TP_ARGS(crtc, frame, scanline_end), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = frame; - __entry->scanline = scanline_end; - ), - - TP_printk("pipe %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), __entry->frame, - __entry->scanline) -); - -/* frontbuffer tracking */ - -TRACE_EVENT(intel_frontbuffer_invalidate, - TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin), - TP_ARGS(frontbuffer_bits, origin), - - TP_STRUCT__entry( - __field(unsigned int, frontbuffer_bits) - __field(unsigned int, origin) - ), - - TP_fast_assign( - __entry->frontbuffer_bits = frontbuffer_bits; - __entry->origin = origin; - ), - - TP_printk("frontbuffer_bits=0x%08x, origin=%u", - __entry->frontbuffer_bits, __entry->origin) -); - -TRACE_EVENT(intel_frontbuffer_flush, - TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin), - TP_ARGS(frontbuffer_bits, origin), - - TP_STRUCT__entry( - __field(unsigned int, frontbuffer_bits) - __field(unsigned int, origin) - ), - - TP_fast_assign( - __entry->frontbuffer_bits = frontbuffer_bits; - __entry->origin = origin; - ), - - TP_printk("frontbuffer_bits=0x%08x, origin=%u", - __entry->frontbuffer_bits, __entry->origin) -); - /* object tracking */ TRACE_EVENT(i915_gem_object_create, @@ -1331,5 +764,7 @@ DEFINE_EVENT(i915_context, i915_context_free, /* This part must be outside protection */ #undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915 +#define TRACE_INCLUDE_FILE i915_trace #include <trace/define_trace.h> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 927f0d4f8e11..c0d6d5526abe 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -109,7 +109,6 @@ vma_create(struct drm_i915_gem_object *obj, return ERR_PTR(-ENOMEM); kref_init(&vma->ref); - mutex_init(&vma->pages_mutex); vma->vm = i915_vm_get(vm); vma->ops = &vm->vma_ops; vma->obj = obj; @@ -356,22 +355,18 @@ int i915_vma_wait_for_bind(struct i915_vma *vma) #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) static int i915_vma_verify_bind_complete(struct i915_vma *vma) { - int err = 0; - - if (i915_active_has_exclusive(&vma->active)) { - struct dma_fence *fence = - i915_active_fence_get(&vma->active.excl); + struct dma_fence *fence = i915_active_fence_get(&vma->active.excl); + int err; - if (!fence) - return 0; + if (!fence) + return 0; - if (dma_fence_is_signaled(fence)) - err = fence->error; - else - err = -EBUSY; + if (dma_fence_is_signaled(fence)) + err = fence->error; + else + err = -EBUSY; - dma_fence_put(fence); - } + dma_fence_put(fence); return err; } @@ -398,6 +393,7 @@ int i915_vma_bind(struct i915_vma *vma, u32 bind_flags; u32 vma_flags; + lockdep_assert_held(&vma->vm->mutex); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(vma->size > vma->node.size); @@ -419,7 +415,7 @@ int i915_vma_bind(struct i915_vma *vma, if (bind_flags == 0) return 0; - GEM_BUG_ON(!vma->pages); + GEM_BUG_ON(!atomic_read(&vma->pages_count)); trace_i915_vma_bind(vma, bind_flags); if (work && bind_flags & vma->vm->bind_async_flags) { @@ -461,6 +457,9 @@ int i915_vma_bind(struct i915_vma *vma, vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags); } + if (vma->obj) + set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags); + atomic_or(bind_flags, &vma->flags); return 0; } @@ -820,10 +819,337 @@ unpinned: return pinned; } -static int vma_get_pages(struct i915_vma *vma) +static struct scatterlist * +rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, + unsigned int width, unsigned int height, + unsigned int src_stride, unsigned int dst_stride, + struct sg_table *st, struct scatterlist *sg) { - int err = 0; - bool pinned_pages = true; + unsigned int column, row; + unsigned int src_idx; + + for (column = 0; column < width; column++) { + unsigned int left; + + src_idx = src_stride * (height - 1) + column + offset; + for (row = 0; row < height; row++) { + st->nents++; + /* + * We don't need the pages, but need to initialize + * the entries so the sg list can be happily traversed. + * The only thing we need are DMA addresses. + */ + sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); + sg_dma_address(sg) = + i915_gem_object_get_dma_address(obj, src_idx); + sg_dma_len(sg) = I915_GTT_PAGE_SIZE; + sg = sg_next(sg); + src_idx -= src_stride; + } + + left = (dst_stride - height) * I915_GTT_PAGE_SIZE; + + if (!left) + continue; + + st->nents++; + + /* + * The DE ignores the PTEs for the padding tiles, the sg entry + * here is just a conenience to indicate how many padding PTEs + * to insert at this spot. + */ + sg_set_page(sg, NULL, left, 0); + sg_dma_address(sg) = 0; + sg_dma_len(sg) = left; + sg = sg_next(sg); + } + + return sg; +} + +static noinline struct sg_table * +intel_rotate_pages(struct intel_rotation_info *rot_info, + struct drm_i915_gem_object *obj) +{ + unsigned int size = intel_rotation_info_size(rot_info); + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct sg_table *st; + struct scatterlist *sg; + int ret = -ENOMEM; + int i; + + /* Allocate target SG list. */ + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + goto err_st_alloc; + + ret = sg_alloc_table(st, size, GFP_KERNEL); + if (ret) + goto err_sg_alloc; + + st->nents = 0; + sg = st->sgl; + + for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) + sg = rotate_pages(obj, rot_info->plane[i].offset, + rot_info->plane[i].width, rot_info->plane[i].height, + rot_info->plane[i].src_stride, + rot_info->plane[i].dst_stride, + st, sg); + + return st; + +err_sg_alloc: + kfree(st); +err_st_alloc: + + drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", + obj->base.size, rot_info->plane[0].width, + rot_info->plane[0].height, size); + + return ERR_PTR(ret); +} + +static struct scatterlist * +remap_pages(struct drm_i915_gem_object *obj, + unsigned int offset, unsigned int alignment_pad, + unsigned int width, unsigned int height, + unsigned int src_stride, unsigned int dst_stride, + struct sg_table *st, struct scatterlist *sg) +{ + unsigned int row; + + if (!width || !height) + return sg; + + if (alignment_pad) { + st->nents++; + + /* + * The DE ignores the PTEs for the padding tiles, the sg entry + * here is just a convenience to indicate how many padding PTEs + * to insert at this spot. + */ + sg_set_page(sg, NULL, alignment_pad * 4096, 0); + sg_dma_address(sg) = 0; + sg_dma_len(sg) = alignment_pad * 4096; + sg = sg_next(sg); + } + + for (row = 0; row < height; row++) { + unsigned int left = width * I915_GTT_PAGE_SIZE; + + while (left) { + dma_addr_t addr; + unsigned int length; + + /* + * We don't need the pages, but need to initialize + * the entries so the sg list can be happily traversed. + * The only thing we need are DMA addresses. + */ + + addr = i915_gem_object_get_dma_address_len(obj, offset, &length); + + length = min(left, length); + + st->nents++; + + sg_set_page(sg, NULL, length, 0); + sg_dma_address(sg) = addr; + sg_dma_len(sg) = length; + sg = sg_next(sg); + + offset += length / I915_GTT_PAGE_SIZE; + left -= length; + } + + offset += src_stride - width; + + left = (dst_stride - width) * I915_GTT_PAGE_SIZE; + + if (!left) + continue; + + st->nents++; + + /* + * The DE ignores the PTEs for the padding tiles, the sg entry + * here is just a conenience to indicate how many padding PTEs + * to insert at this spot. + */ + sg_set_page(sg, NULL, left, 0); + sg_dma_address(sg) = 0; + sg_dma_len(sg) = left; + sg = sg_next(sg); + } + + return sg; +} + +static noinline struct sg_table * +intel_remap_pages(struct intel_remapped_info *rem_info, + struct drm_i915_gem_object *obj) +{ + unsigned int size = intel_remapped_info_size(rem_info); + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct sg_table *st; + struct scatterlist *sg; + unsigned int gtt_offset = 0; + int ret = -ENOMEM; + int i; + + /* Allocate target SG list. */ + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + goto err_st_alloc; + + ret = sg_alloc_table(st, size, GFP_KERNEL); + if (ret) + goto err_sg_alloc; + + st->nents = 0; + sg = st->sgl; + + for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { + unsigned int alignment_pad = 0; + + if (rem_info->plane_alignment) + alignment_pad = ALIGN(gtt_offset, rem_info->plane_alignment) - gtt_offset; + + sg = remap_pages(obj, + rem_info->plane[i].offset, alignment_pad, + rem_info->plane[i].width, rem_info->plane[i].height, + rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride, + st, sg); + + gtt_offset += alignment_pad + + rem_info->plane[i].dst_stride * rem_info->plane[i].height; + } + + i915_sg_trim(st); + + return st; + +err_sg_alloc: + kfree(st); +err_st_alloc: + + drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n", + obj->base.size, rem_info->plane[0].width, + rem_info->plane[0].height, size); + + return ERR_PTR(ret); +} + +static noinline struct sg_table * +intel_partial_pages(const struct i915_ggtt_view *view, + struct drm_i915_gem_object *obj) +{ + struct sg_table *st; + struct scatterlist *sg, *iter; + unsigned int count = view->partial.size; + unsigned int offset; + int ret = -ENOMEM; + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + goto err_st_alloc; + + ret = sg_alloc_table(st, count, GFP_KERNEL); + if (ret) + goto err_sg_alloc; + + iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset); + GEM_BUG_ON(!iter); + + sg = st->sgl; + st->nents = 0; + do { + unsigned int len; + + len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), + count << PAGE_SHIFT); + sg_set_page(sg, NULL, len, 0); + sg_dma_address(sg) = + sg_dma_address(iter) + (offset << PAGE_SHIFT); + sg_dma_len(sg) = len; + + st->nents++; + count -= len >> PAGE_SHIFT; + if (count == 0) { + sg_mark_end(sg); + i915_sg_trim(st); /* Drop any unused tail entries. */ + + return st; + } + + sg = __sg_next(sg); + iter = __sg_next(iter); + offset = 0; + } while (1); + +err_sg_alloc: + kfree(st); +err_st_alloc: + return ERR_PTR(ret); +} + +static int +__i915_vma_get_pages(struct i915_vma *vma) +{ + struct sg_table *pages; + int ret; + + /* + * The vma->pages are only valid within the lifespan of the borrowed + * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so + * must be the vma->pages. A simple rule is that vma->pages must only + * be accessed when the obj->mm.pages are pinned. + */ + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); + + switch (vma->ggtt_view.type) { + default: + GEM_BUG_ON(vma->ggtt_view.type); + fallthrough; + case I915_GGTT_VIEW_NORMAL: + pages = vma->obj->mm.pages; + break; + + case I915_GGTT_VIEW_ROTATED: + pages = + intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); + break; + + case I915_GGTT_VIEW_REMAPPED: + pages = + intel_remap_pages(&vma->ggtt_view.remapped, vma->obj); + break; + + case I915_GGTT_VIEW_PARTIAL: + pages = intel_partial_pages(&vma->ggtt_view, vma->obj); + break; + } + + ret = 0; + if (IS_ERR(pages)) { + ret = PTR_ERR(pages); + pages = NULL; + drm_err(&vma->vm->i915->drm, + "Failed to get pages for VMA view type %u (%d)!\n", + vma->ggtt_view.type, ret); + } + + vma->pages = pages; + + return ret; +} + +I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma) +{ + int err; if (atomic_add_unless(&vma->pages_count, 1, 0)) return 0; @@ -832,25 +1158,17 @@ static int vma_get_pages(struct i915_vma *vma) if (err) return err; - /* Allocations ahoy! */ - if (mutex_lock_interruptible(&vma->pages_mutex)) { - err = -EINTR; - goto unpin; - } + err = __i915_vma_get_pages(vma); + if (err) + goto err_unpin; - if (!atomic_read(&vma->pages_count)) { - err = vma->ops->set_pages(vma); - if (err) - goto unlock; - pinned_pages = false; - } + vma->page_sizes = vma->obj->mm.page_sizes; atomic_inc(&vma->pages_count); -unlock: - mutex_unlock(&vma->pages_mutex); -unpin: - if (pinned_pages) - __i915_gem_object_unpin_pages(vma->obj); + return 0; + +err_unpin: + __i915_gem_object_unpin_pages(vma->obj); return err; } @@ -858,18 +1176,31 @@ unpin: static void __vma_put_pages(struct i915_vma *vma, unsigned int count) { /* We allocate under vma_get_pages, so beware the shrinker */ - mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING); + struct sg_table *pages = READ_ONCE(vma->pages); + GEM_BUG_ON(atomic_read(&vma->pages_count) < count); + if (atomic_sub_return(count, &vma->pages_count) == 0) { - vma->ops->clear_pages(vma); - GEM_BUG_ON(vma->pages); + /* + * The atomic_sub_return is a read barrier for the READ_ONCE of + * vma->pages above. + * + * READ_ONCE is safe because this is either called from the same + * function (i915_vma_pin_ww), or guarded by vma->vm->mutex. + * + * TODO: We're leaving vma->pages dangling, until vma->obj->resv + * lock is required. + */ + if (pages != vma->obj->mm.pages) { + sg_free_table(pages); + kfree(pages); + } i915_gem_object_unpin_pages(vma->obj); } - mutex_unlock(&vma->pages_mutex); } -static void vma_put_pages(struct i915_vma *vma) +I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma) { if (atomic_add_unless(&vma->pages_count, -1, 1)) return; @@ -900,10 +1231,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, unsigned int bound; int err; -#ifdef CONFIG_PROVE_LOCKING - if (debug_locks && !WARN_ON(!ww)) - assert_vma_held(vma); -#endif + assert_vma_held(vma); + GEM_BUG_ON(!ww); BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); @@ -914,7 +1243,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK)) return 0; - err = vma_get_pages(vma); + err = i915_vma_get_pages(vma); if (err) return err; @@ -1042,10 +1371,11 @@ err_fence: err_rpm: if (wakeref) intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); + if (moving) dma_fence_put(moving); - vma_put_pages(vma); + i915_vma_put_pages(vma); return err; } @@ -1060,23 +1390,15 @@ static void flush_idle_contexts(struct intel_gt *gt) intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); } -int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, - u32 align, unsigned int flags) +static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, + u32 align, unsigned int flags) { struct i915_address_space *vm = vma->vm; int err; - GEM_BUG_ON(!i915_vma_is_ggtt(vma)); - -#ifdef CONFIG_LOCKDEP - WARN_ON(!ww && dma_resv_held(vma->obj->base.resv)); -#endif - do { - if (ww) - err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL); - else - err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL); + err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL); + if (err != -ENOSPC) { if (!err) { err = i915_vma_wait_for_bind(vma); @@ -1095,6 +1417,30 @@ int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, } while (1); } +int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, + u32 align, unsigned int flags) +{ + struct i915_gem_ww_ctx _ww; + int err; + + GEM_BUG_ON(!i915_vma_is_ggtt(vma)); + + if (ww) + return __i915_ggtt_pin(vma, ww, align, flags); + +#ifdef CONFIG_LOCKDEP + WARN_ON(dma_resv_held(vma->obj->base.resv)); +#endif + + for_i915_gem_ww(&_ww, err, true) { + err = i915_gem_object_lock(vma->obj, &_ww); + if (!err) + err = __i915_ggtt_pin(vma, &_ww, align, flags); + } + + return err; +} + static void __vma_close(struct i915_vma *vma, struct intel_gt *gt) { /* @@ -1249,7 +1595,7 @@ __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma) return __i915_request_await_exclusive(rq, &vma->active); } -int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) +static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) { int err; diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 4033aa08d5e4..32719431b3df 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -55,8 +55,6 @@ static inline bool i915_vma_is_active(const struct i915_vma *vma) /* do not reserve memory to prevent deadlocks */ #define __EXEC_OBJECT_NO_RESERVE BIT(31) -int __must_check __i915_vma_move_to_active(struct i915_vma *vma, - struct i915_request *rq); int __must_check _i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq, struct dma_fence *fence, @@ -433,4 +431,7 @@ static inline int i915_vma_sync(struct i915_vma *vma) void i915_vma_module_exit(void); int i915_vma_module_init(void); +I915_SELFTEST_DECLARE(int i915_vma_get_pages(struct i915_vma *vma)); +I915_SELFTEST_DECLARE(void i915_vma_put_pages(struct i915_vma *vma)); + #endif diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h index f03fa96a1701..ca575e129ced 100644 --- a/drivers/gpu/drm/i915/i915_vma_types.h +++ b/drivers/gpu/drm/i915/i915_vma_types.h @@ -270,7 +270,6 @@ struct i915_vma { #define I915_VMA_PAGES_BIAS 24 #define I915_VMA_PAGES_ACTIVE (BIT(24) | 1) atomic_t pages_count; /* number of active binds to the pages */ - struct mutex pages_mutex; /* protect acquire/release of backing pages */ /** * Support different GGTT views into the same object. diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index e6605b5181a5..04fd266d70e2 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -170,6 +170,10 @@ static const u16 subplatform_portf_ids[] = { INTEL_ICL_PORT_F_IDS(0), }; +static const u16 subplatform_rpls_ids[] = { + INTEL_RPLS_IDS(0), +}; + static bool find_devid(u16 id, const u16 *p, unsigned int num) { for (; num; num--, p++) { @@ -206,6 +210,9 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915) } else if (find_devid(devid, subplatform_portf_ids, ARRAY_SIZE(subplatform_portf_ids))) { mask = BIT(INTEL_SUBPLATFORM_PORTF); + } else if (find_devid(devid, subplatform_rpls_ids, + ARRAY_SIZE(subplatform_rpls_ids))) { + mask = BIT(INTEL_SUBPLATFORM_RPL_S); } if (IS_TIGERLAKE(i915)) { @@ -319,33 +326,33 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { drm_info(&dev_priv->drm, "Display fused off, disabling\n"); - info->pipe_mask = 0; - info->cpu_transcoder_mask = 0; + info->display.pipe_mask = 0; + info->display.cpu_transcoder_mask = 0; } else if (fuse_strap & IVB_PIPE_C_DISABLE) { drm_info(&dev_priv->drm, "PipeC fused off\n"); - info->pipe_mask &= ~BIT(PIPE_C); - info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); + info->display.pipe_mask &= ~BIT(PIPE_C); + info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_C); } } else if (HAS_DISPLAY(dev_priv) && DISPLAY_VER(dev_priv) >= 9) { u32 dfsm = intel_de_read(dev_priv, SKL_DFSM); if (dfsm & SKL_DFSM_PIPE_A_DISABLE) { - info->pipe_mask &= ~BIT(PIPE_A); - info->cpu_transcoder_mask &= ~BIT(TRANSCODER_A); + info->display.pipe_mask &= ~BIT(PIPE_A); + info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_A); } if (dfsm & SKL_DFSM_PIPE_B_DISABLE) { - info->pipe_mask &= ~BIT(PIPE_B); - info->cpu_transcoder_mask &= ~BIT(TRANSCODER_B); + info->display.pipe_mask &= ~BIT(PIPE_B); + info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_B); } if (dfsm & SKL_DFSM_PIPE_C_DISABLE) { - info->pipe_mask &= ~BIT(PIPE_C); - info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); + info->display.pipe_mask &= ~BIT(PIPE_C); + info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_C); } if (DISPLAY_VER(dev_priv) >= 12 && (dfsm & TGL_DFSM_PIPE_D_DISABLE)) { - info->pipe_mask &= ~BIT(PIPE_D); - info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D); + info->display.pipe_mask &= ~BIT(PIPE_D); + info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_D); } if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE) diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 669f0d26c3c3..78597d382445 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -110,6 +110,9 @@ enum intel_platform { #define INTEL_SUBPLATFORM_G10 0 #define INTEL_SUBPLATFORM_G11 1 +/* ADL-S */ +#define INTEL_SUBPLATFORM_RPL_S 0 + enum intel_ppgtt_type { INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE, INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING, @@ -123,6 +126,7 @@ enum intel_ppgtt_type { func(is_dgfx); \ /* Keep has_* in alphabetical order */ \ func(has_64bit_reloc); \ + func(has_64k_pages); \ func(gpu_reset_clobbers_display); \ func(has_reset_engine); \ func(has_global_mocs); \ @@ -192,11 +196,6 @@ struct intel_device_info { u8 gt; /* GT number, 0 if undefined */ - u8 pipe_mask; - u8 cpu_transcoder_mask; - - u8 abox_mask; - #define DEFINE_FLAG(name) u8 name:1 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); #undef DEFINE_FLAG @@ -205,6 +204,10 @@ struct intel_device_info { u8 ver; u8 rel; + u8 pipe_mask; + u8 cpu_transcoder_mask; + u8 abox_mask; + #define DEFINE_FLAG(name) u8 name:1 DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG); #undef DEFINE_FLAG diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 4e70c1a9ef2e..cf6e98962d82 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -109,7 +109,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) return 0; } - if (intel_uc_wants_guc_submission(&dev_priv->gt.uc)) { + if (intel_uc_wants_guc_submission(&to_gt(dev_priv)->uc)) { drm_err(&dev_priv->drm, "i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n"); return -EIO; diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index b43121609e25..c70d7e286a51 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -3,6 +3,8 @@ * Copyright © 2019 Intel Corporation */ +#include <linux/prandom.h> + #include "intel_memory_region.h" #include "i915_drv.h" #include "i915_ttm_buddy_manager.h" @@ -29,6 +31,110 @@ static const struct { }, }; +static int __iopagetest(struct intel_memory_region *mem, + u8 __iomem *va, int pagesize, + u8 value, resource_size_t offset, + const void *caller) +{ + int byte = prandom_u32_max(pagesize); + u8 result[3]; + + memset_io(va, value, pagesize); /* or GPF! */ + wmb(); + + result[0] = ioread8(va); + result[1] = ioread8(va + byte); + result[2] = ioread8(va + pagesize - 1); + if (memchr_inv(result, value, sizeof(result))) { + dev_err(mem->i915->drm.dev, + "Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n", + &mem->region, &mem->io_start, &offset, caller, + value, result[0], result[1], result[2]); + return -EINVAL; + } + + return 0; +} + +static int iopagetest(struct intel_memory_region *mem, + resource_size_t offset, + const void *caller) +{ + const u8 val[] = { 0x0, 0xa5, 0xc3, 0xf0 }; + void __iomem *va; + int err; + int i; + + va = ioremap_wc(mem->io_start + offset, PAGE_SIZE); + if (!va) { + dev_err(mem->i915->drm.dev, + "Failed to ioremap memory region [%pa + %pa] for %ps\n", + &mem->io_start, &offset, caller); + return -EFAULT; + } + + for (i = 0; i < ARRAY_SIZE(val); i++) { + err = __iopagetest(mem, va, PAGE_SIZE, val[i], offset, caller); + if (err) + break; + + err = __iopagetest(mem, va, PAGE_SIZE, ~val[i], offset, caller); + if (err) + break; + } + + iounmap(va); + return err; +} + +static resource_size_t random_page(resource_size_t last) +{ + /* Limited to low 44b (16TiB), but should suffice for a spot check */ + return prandom_u32_max(last >> PAGE_SHIFT) << PAGE_SHIFT; +} + +static int iomemtest(struct intel_memory_region *mem, + bool test_all, + const void *caller) +{ + resource_size_t last = resource_size(&mem->region) - PAGE_SIZE; + resource_size_t page; + int err; + + /* + * Quick test to check read/write access to the iomap (backing store). + * + * Write a byte, read it back. If the iomapping fails, we expect + * a GPF preventing further execution. If the backing store does not + * exist, the read back will return garbage. We check a couple of pages, + * the first and last of the specified region to confirm the backing + * store + iomap does cover the entire memory region; and we check + * a random offset within as a quick spot check for bad memory. + */ + + if (test_all) { + for (page = 0; page <= last; page += PAGE_SIZE) { + err = iopagetest(mem, page, caller); + if (err) + return err; + } + } else { + err = iopagetest(mem, 0, caller); + if (err) + return err; + + err = iopagetest(mem, last, caller); + if (err) + return err; + + err = iopagetest(mem, random_page(last), caller); + if (err) + return err; + } + + return 0; +} + struct intel_memory_region * intel_memory_region_lookup(struct drm_i915_private *i915, u16 class, u16 instance) @@ -90,6 +196,21 @@ void intel_memory_region_debug(struct intel_memory_region *mr, &mr->total, &mr->avail); } +static int intel_memory_region_memtest(struct intel_memory_region *mem, + void *caller) +{ + struct drm_i915_private *i915 = mem->i915; + int err = 0; + + if (!mem->io_start) + return 0; + + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) || i915->params.memtest) + err = iomemtest(mem, i915->params.memtest, caller); + + return err; +} + struct intel_memory_region * intel_memory_region_create(struct drm_i915_private *i915, resource_size_t start, @@ -126,8 +247,15 @@ intel_memory_region_create(struct drm_i915_private *i915, goto err_free; } + err = intel_memory_region_memtest(mem, (void *)_RET_IP_); + if (err) + goto err_release; + return mem; +err_release: + if (mem->ops->release) + mem->ops->release(mem); err_free: kfree(mem); return ERR_PTR(err); diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index d1d4b97b86f5..da8f82c2342f 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -129,6 +129,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) return PCH_JSP; case INTEL_PCH_ADP_DEVICE_ID_TYPE: case INTEL_PCH_ADP2_DEVICE_ID_TYPE: + case INTEL_PCH_ADP3_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Alder Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) && !IS_ALDERLAKE_P(dev_priv)); diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h index 7c0d83d292dc..6bff77521094 100644 --- a/drivers/gpu/drm/i915/intel_pch.h +++ b/drivers/gpu/drm/i915/intel_pch.h @@ -57,6 +57,7 @@ enum intel_pch { #define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880 #define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80 #define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180 +#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index cff0f32bedc9..434b1f8b7fe3 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -36,6 +36,7 @@ #include "display/intel_atomic_plane.h" #include "display/intel_bw.h" #include "display/intel_de.h" +#include "display/intel_display_trace.h" #include "display/intel_display_types.h" #include "display/intel_fb.h" #include "display/intel_fbc.h" @@ -47,7 +48,6 @@ #include "i915_drv.h" #include "i915_fixed.h" #include "i915_irq.h" -#include "i915_trace.h" #include "intel_pcode.h" #include "intel_pm.h" #include "vlv_sideband.h" @@ -989,7 +989,7 @@ static void g4x_write_wm_values(struct drm_i915_private *dev_priv, enum pipe pipe; for_each_pipe(dev_priv, pipe) - trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm); + trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(wm->sr.plane, SR) | @@ -1021,7 +1021,7 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv, enum pipe pipe; for_each_pipe(dev_priv, pipe) { - trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm); + trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe), (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | @@ -2336,6 +2336,20 @@ static void i965_update_wm(struct drm_i915_private *dev_priv) #undef FW_WM +static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915, + enum i9xx_plane_id i9xx_plane) +{ + struct intel_plane *plane; + + for_each_intel_plane(&i915->drm, plane) { + if (plane->id == PLANE_PRIMARY && + plane->i9xx_plane == i9xx_plane) + return intel_crtc_for_pipe(i915, plane->pipe); + } + + return NULL; +} + static void i9xx_update_wm(struct drm_i915_private *dev_priv) { const struct intel_watermark_params *wm_info; @@ -2357,7 +2371,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) fifo_size = i830_get_fifo_size(dev_priv, PLANE_A); else fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A); - crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A); + crtc = intel_crtc_for_plane(dev_priv, PLANE_A); if (intel_crtc_active(crtc)) { const struct drm_display_mode *pipe_mode = &crtc->config->hw.pipe_mode; @@ -2387,7 +2401,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) fifo_size = i830_get_fifo_size(dev_priv, PLANE_B); else fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B); - crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B); + crtc = intel_crtc_for_plane(dev_priv, PLANE_B); if (intel_crtc_active(crtc)) { const struct drm_display_mode *pipe_mode = &crtc->config->hw.pipe_mode; @@ -3369,13 +3383,8 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv, } /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ - /* - * FIXME this is racy. FBC might get enabled later. - * What we should check here is whether FBC can be - * enabled sometime later. - */ - if (DISPLAY_VER(dev_priv) == 5 && !merged->fbc_wm_enabled && - intel_fbc_is_active(&dev_priv->fbc)) { + if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) && + dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) { for (level = 2; level <= max_level; level++) { struct intel_wm_level *wm = &merged->wm[level]; @@ -6909,7 +6918,7 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv) for_each_intel_plane(&dev_priv->drm, plane) { struct intel_crtc *crtc = - intel_get_crtc_for_pipe(dev_priv, plane->pipe); + intel_crtc_for_pipe(dev_priv, plane->pipe); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane_state *plane_state = @@ -7065,7 +7074,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv) for_each_intel_plane(&dev_priv->drm, plane) { struct intel_crtc *crtc = - intel_get_crtc_for_pipe(dev_priv, plane->pipe); + intel_crtc_for_pipe(dev_priv, plane->pipe); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane_state *plane_state = @@ -7452,9 +7461,9 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv) { - /* Wa_1409120013:tgl,rkl,adl-s,dg1 */ + /* Wa_1409120013:tgl,rkl,adl-s,dg1,dg2 */ if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) || - IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv)) + IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_DG2(dev_priv)) intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, DPFC_CHICKEN_COMP_DUMMY_PIXEL); diff --git a/drivers/gpu/drm/i915/intel_pm_types.h b/drivers/gpu/drm/i915/intel_pm_types.h new file mode 100644 index 000000000000..211632f58751 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_pm_types.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __INTEL_PM_TYPES_H__ +#define __INTEL_PM_TYPES_H__ + +#include <linux/types.h> + +#include "display/intel_display.h" + +enum intel_ddb_partitioning { + INTEL_DDB_PART_1_2, + INTEL_DDB_PART_5_6, /* IVB+ */ +}; + +struct ilk_wm_values { + u32 wm_pipe[3]; + u32 wm_lp[3]; + u32 wm_lp_spr[3]; + bool enable_fbc_wm; + enum intel_ddb_partitioning partitioning; +}; + +struct g4x_pipe_wm { + u16 plane[I915_MAX_PLANES]; + u16 fbc; +}; + +struct g4x_sr_wm { + u16 plane; + u16 cursor; + u16 fbc; +}; + +struct vlv_wm_ddl_values { + u8 plane[I915_MAX_PLANES]; +}; + +struct vlv_wm_values { + struct g4x_pipe_wm pipe[3]; + struct g4x_sr_wm sr; + struct vlv_wm_ddl_values ddl[3]; + u8 level; + bool cxsr; +}; + +struct g4x_wm_values { + struct g4x_pipe_wm pipe[2]; + struct g4x_sr_wm sr; + struct g4x_sr_wm hpll; + bool cxsr; + bool hpll_en; + bool fbc_en; +}; + +struct skl_ddb_entry { + u16 start, end; /* in number of blocks, 'end' is exclusive */ +}; + +static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry) +{ + return entry->end - entry->start; +} + +static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, + const struct skl_ddb_entry *e2) +{ + if (e1->start == e2->start && e1->end == e2->end) + return true; + + return false; +} + +#endif /* __INTEL_PM_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 22dab36afcb6..53f1ccb78849 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -68,6 +68,9 @@ static noinline depot_stack_handle_t __save_depot_stack(void) static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { spin_lock_init(&rpm->debug.lock); + + if (rpm->available) + stack_depot_init(); } static noinline depot_stack_handle_t diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index abdac78d3976..778da3179b3c 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -724,7 +724,8 @@ void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, } static void __intel_uncore_forcewake_put(struct intel_uncore *uncore, - enum forcewake_domains fw_domains) + enum forcewake_domains fw_domains, + bool delayed) { struct intel_uncore_forcewake_domain *domain; unsigned int tmp; @@ -739,7 +740,11 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore, continue; } - fw_domains_put(uncore, domain->mask); + if (delayed && + !(domain->uncore->fw_domains_timer & domain->mask)) + fw_domain_arm_timer(domain); + else + fw_domains_put(uncore, domain->mask); } } @@ -760,7 +765,20 @@ void intel_uncore_forcewake_put(struct intel_uncore *uncore, return; spin_lock_irqsave(&uncore->lock, irqflags); - __intel_uncore_forcewake_put(uncore, fw_domains); + __intel_uncore_forcewake_put(uncore, fw_domains, false); + spin_unlock_irqrestore(&uncore->lock, irqflags); +} + +void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore, + enum forcewake_domains fw_domains) +{ + unsigned long irqflags; + + if (!uncore->fw_get_funcs) + return; + + spin_lock_irqsave(&uncore->lock, irqflags); + __intel_uncore_forcewake_put(uncore, fw_domains, true); spin_unlock_irqrestore(&uncore->lock, irqflags); } @@ -802,7 +820,7 @@ void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore, if (!uncore->fw_get_funcs) return; - __intel_uncore_forcewake_put(uncore, fw_domains); + __intel_uncore_forcewake_put(uncore, fw_domains, false); } void assert_forcewakes_inactive(struct intel_uncore *uncore) @@ -2061,12 +2079,13 @@ void intel_uncore_cleanup_mmio(struct intel_uncore *uncore) } void intel_uncore_init_early(struct intel_uncore *uncore, - struct drm_i915_private *i915) + struct intel_gt *gt) { spin_lock_init(&uncore->lock); - uncore->i915 = i915; - uncore->rpm = &i915->runtime_pm; - uncore->debug = &i915->mmio_debug; + uncore->i915 = gt->i915; + uncore->gt = gt; + uncore->rpm = >->i915->runtime_pm; + uncore->debug = >->i915->mmio_debug; } static void uncore_raw_init(struct intel_uncore *uncore) diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index d1d17b04e29f..2a15b2b2e2fc 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -129,6 +129,7 @@ struct intel_uncore { void __iomem *regs; struct drm_i915_private *i915; + struct intel_gt *gt; struct intel_runtime_pm *rpm; spinlock_t lock; /** lock is also taken in irq contexts. */ @@ -217,7 +218,7 @@ u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore, void intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug); void intel_uncore_init_early(struct intel_uncore *uncore, - struct drm_i915_private *i915); + struct intel_gt *gt); int intel_uncore_setup_mmio(struct intel_uncore *uncore); int intel_uncore_init_mmio(struct intel_uncore *uncore); void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore, @@ -245,6 +246,8 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore, enum forcewake_domains domains); void intel_uncore_forcewake_put(struct intel_uncore *uncore, enum forcewake_domains domains); +void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore, + enum forcewake_domains domains); void intel_uncore_forcewake_flush(struct intel_uncore *uncore, enum forcewake_domains fw_domains); diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c index 5e511bb891f9..f06d21005106 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.c +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -220,7 +220,7 @@ static bool __wopcm_regs_locked(struct intel_uncore *uncore, void intel_wopcm_init(struct intel_wopcm *wopcm) { struct drm_i915_private *i915 = wopcm_to_i915(wopcm); - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); u32 guc_fw_size = intel_uc_fw_get_upload_size(>->uc.guc.fw); u32 huc_fw_size = intel_uc_fw_get_upload_size(>->uc.huc.fw); u32 ctx_rsvd = context_reserved_size(i915); diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c index e2314ad9546d..15311eaed848 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c @@ -44,6 +44,11 @@ struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp) return container_of(pxp, struct intel_gt, pxp); } +bool intel_pxp_is_enabled(const struct intel_pxp *pxp) +{ + return pxp->ce; +} + bool intel_pxp_is_active(const struct intel_pxp *pxp) { return pxp->arb_is_valid; diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h index aa262258d4d4..73847e535cab 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h @@ -6,17 +6,15 @@ #ifndef __INTEL_PXP_H__ #define __INTEL_PXP_H__ -#include "intel_pxp_types.h" +#include <linux/errno.h> +#include <linux/types.h> +struct intel_pxp; struct drm_i915_gem_object; -static inline bool intel_pxp_is_enabled(const struct intel_pxp *pxp) -{ - return pxp->ce; -} - #ifdef CONFIG_DRM_I915_PXP struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp); +bool intel_pxp_is_enabled(const struct intel_pxp *pxp); bool intel_pxp_is_active(const struct intel_pxp *pxp); void intel_pxp_init(struct intel_pxp *pxp); @@ -48,6 +46,11 @@ static inline int intel_pxp_start(struct intel_pxp *pxp) return -ENODEV; } +static inline bool intel_pxp_is_enabled(const struct intel_pxp *pxp) +{ + return false; +} + static inline bool intel_pxp_is_active(const struct intel_pxp *pxp) { return false; diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c index 5d169624ad60..4b6f5655fab5 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c @@ -16,7 +16,9 @@ static inline struct intel_pxp *i915_dev_to_pxp(struct device *i915_kdev) { - return &kdev_to_i915(i915_kdev)->gt.pxp; + struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); + + return &to_gt(i915)->pxp; } static int intel_pxp_tee_io_message(struct intel_pxp *pxp, @@ -105,9 +107,12 @@ static int i915_pxp_tee_component_bind(struct device *i915_kdev, static void i915_pxp_tee_component_unbind(struct device *i915_kdev, struct device *tee_kdev, void *data) { + struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev); + intel_wakeref_t wakeref; - intel_pxp_fini_hw(pxp); + with_intel_runtime_pm_if_in_use(&i915->runtime_pm, wakeref) + intel_pxp_fini_hw(pxp); mutex_lock(&pxp->tee_mutex); pxp->pxp_component = NULL; diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h index 73ef7d1754e1..7ce5f37ee12e 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h @@ -7,9 +7,7 @@ #define __INTEL_PXP_TYPES_H__ #include <linux/completion.h> -#include <linux/list.h> #include <linux/mutex.h> -#include <linux/spinlock.h> #include <linux/types.h> #include <linux/workqueue.h> diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index 61bf4560d8af..2dac9be1de58 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -254,7 +254,7 @@ int i915_active_live_selftests(struct drm_i915_private *i915) SUBTEST(live_active_barrier), }; - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 152d9ab135b1..b5576888cd78 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -248,7 +248,7 @@ int i915_gem_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_gem_ww_ctx), }; - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; return i915_live_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 7e0658a77659..75b709c26dd3 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -545,7 +545,7 @@ int i915_gem_evict_mock_selftests(void) return -ENOMEM; with_intel_runtime_pm(&i915->runtime_pm, wakeref) - err = i915_subtests(tests, &i915->gt); + err = i915_subtests(tests, to_gt(i915)); mock_destroy_device(i915); return err; @@ -557,8 +557,8 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_evict_contexts), }; - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; - return intel_gt_live_subtests(tests, &i915->gt); + return intel_gt_live_subtests(tests, to_gt(i915)); } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 46f4236039a9..575705c3bce9 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -155,7 +155,7 @@ static int igt_ppgtt_alloc(void *arg) if (!HAS_PPGTT(dev_priv)) return 0; - ppgtt = i915_ppgtt_create(&dev_priv->gt, 0); + ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); @@ -1053,7 +1053,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, if (IS_ERR(file)) return PTR_ERR(file); - ppgtt = i915_ppgtt_create(&dev_priv->gt, 0); + ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_free; @@ -1275,7 +1275,7 @@ static void track_vma_bind(struct i915_vma *vma) __i915_gem_object_pin_pages(obj); - GEM_BUG_ON(vma->pages); + GEM_BUG_ON(atomic_read(&vma->pages_count)); atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE); __i915_gem_object_pin_pages(obj); vma->pages = obj->mm.pages; @@ -1953,7 +1953,9 @@ static int igt_cs_tlb(void *arg) goto end; } - err = vma->ops->set_pages(vma); + i915_gem_object_lock(bbe, NULL); + err = i915_vma_get_pages(vma); + i915_gem_object_unlock(bbe); if (err) goto end; @@ -1994,7 +1996,7 @@ end_ww: i915_request_put(rq); } - vma->ops->clear_pages(vma); + i915_vma_put_pages(vma); err = context_sync(ce); if (err) { @@ -2009,7 +2011,9 @@ end_ww: goto end; } - err = vma->ops->set_pages(vma); + i915_gem_object_lock(act, NULL); + err = i915_vma_get_pages(vma); + i915_gem_object_unlock(act); if (err) goto end; @@ -2047,7 +2051,7 @@ end_ww: } end_spin(batch, count - 1); - vma->ops->clear_pages(vma); + i915_vma_put_pages(vma); err = context_sync(ce); if (err) { diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c index 9e9a6cb1d9e5..88db2e3d81d0 100644 --- a/drivers/gpu/drm/i915/selftests/i915_perf.c +++ b/drivers/gpu/drm/i915/selftests/i915_perf.c @@ -424,7 +424,7 @@ int i915_perf_live_selftests(struct drm_i915_private *i915) if (!perf->metrics_kobj || !perf->ops.enable_metric_set) return 0; - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; err = alloc_empty_config(&i915->perf); diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 9979ef9197cd..92a859b34190 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -841,7 +841,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915) __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); - intel_gt_chipset_flush(&i915->gt); + intel_gt_chipset_flush(to_gt(i915)); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) { @@ -982,7 +982,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) if (IS_ERR(obj)) return ERR_CAST(obj); - vma = i915_vma_instance(obj, i915->gt.vm, NULL); + vma = i915_vma_instance(obj, to_gt(i915)->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err; @@ -1014,7 +1014,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); - intel_gt_chipset_flush(&i915->gt); + intel_gt_chipset_flush(to_gt(i915)); return vma; @@ -1700,7 +1700,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915) SUBTEST(live_breadcrumbs_smoketest), }; - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; return i915_subtests(tests, i915); @@ -3091,7 +3091,7 @@ int i915_request_perf_selftests(struct drm_i915_private *i915) SUBTEST(perf_parallel_engines), }; - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index 484759c9409c..2d6d7bd13c3c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -298,10 +298,10 @@ int __i915_live_setup(void *data) struct drm_i915_private *i915 = data; /* The selftests expect an idle system */ - if (intel_gt_pm_wait_for_idle(&i915->gt)) + if (intel_gt_pm_wait_for_idle(to_gt(i915))) return -EIO; - return intel_gt_terminally_wedged(&i915->gt); + return intel_gt_terminally_wedged(to_gt(i915)); } int __i915_live_teardown(int err, void *data) diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 1f10fe36619b..5c5809dfe9b2 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -691,7 +691,11 @@ static int igt_vma_rotate_remap(void *arg) } i915_vma_unpin(vma); - + err = i915_vma_unbind(vma); + if (err) { + pr_err("Unbinding returned %i\n", err); + goto out_object; + } cond_resched(); } } @@ -848,6 +852,11 @@ static int igt_vma_partial(void *arg) i915_vma_unpin(vma); nvma++; + err = i915_vma_unbind(vma); + if (err) { + pr_err("Unbinding returned %i\n", err); + goto out_object; + } cond_resched(); } @@ -882,6 +891,12 @@ static int igt_vma_partial(void *arg) i915_vma_unpin(vma); + err = i915_vma_unbind(vma); + if (err) { + pr_err("Unbinding returned %i\n", err); + goto out_object; + } + count = 0; list_for_each_entry(vma, &obj->vma.list, obj_link) count++; diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c index a6c71fca61aa..b84594601d30 100644 --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c @@ -14,7 +14,7 @@ int igt_flush_test(struct drm_i915_private *i915) { - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); int ret = intel_gt_is_wedged(gt) ? -EIO : 0; cond_resched(); diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c index 1c721542e277..72b58b66692a 100644 --- a/drivers/gpu/drm/i915/selftests/igt_live_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c @@ -16,7 +16,7 @@ int igt_live_test_begin(struct igt_live_test *t, const char *func, const char *name) { - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); struct intel_engine_cs *engine; enum intel_engine_id id; int err; @@ -57,7 +57,7 @@ int igt_live_test_end(struct igt_live_test *t) return -EIO; } - for_each_engine(engine, &i915->gt, id) { + for_each_engine(engine, to_gt(i915), id) { if (t->reset_engine[id] == i915_reset_engine_count(&i915->gpu_error, engine)) continue; diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index d3b7fb4d52d1..7acba1d2135e 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -1218,7 +1218,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915) return 0; } - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; return i915_live_subtests(tests, i915); @@ -1230,7 +1230,7 @@ int intel_memory_region_perf_selftests(struct drm_i915_private *i915) SUBTEST(perf_memcpy), }; - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(to_gt(i915))) return 0; return i915_live_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index bc8128170a99..cdd196783535 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -344,5 +344,5 @@ int intel_uncore_live_selftests(struct drm_i915_private *i915) SUBTEST(live_forcewake_domains), }; - return intel_gt_live_subtests(tests, &i915->gt); + return intel_gt_live_subtests(tests, to_gt(i915)); } diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index d0e2e61de8d4..8aa7b1d33865 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -45,7 +45,7 @@ void mock_device_flush(struct drm_i915_private *i915) { - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); struct intel_engine_cs *engine; enum intel_engine_id id; @@ -64,7 +64,7 @@ static void mock_device_release(struct drm_device *dev) goto out; mock_device_flush(i915); - intel_gt_driver_remove(&i915->gt); + intel_gt_driver_remove(to_gt(i915)); i915_gem_drain_workqueue(i915); i915_gem_drain_freed_objects(i915); @@ -73,7 +73,7 @@ static void mock_device_release(struct drm_device *dev) destroy_workqueue(i915->wq); intel_region_ttm_device_fini(i915); - intel_gt_driver_late_release(&i915->gt); + intel_gt_driver_late_release(to_gt(i915)); intel_memory_regions_driver_release(i915); drm_mode_config_cleanup(&i915->drm); @@ -175,14 +175,14 @@ struct drm_i915_private *mock_gem_device(void) mkwrite_device_info(i915)->memory_regions = REGION_SMEM; intel_memory_regions_hw_probe(i915); - mock_uncore_init(&i915->uncore, i915); - spin_lock_init(&i915->gpu_error.lock); i915_gem_init__mm(i915); - intel_gt_init_early(&i915->gt, i915); - atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */ - i915->gt.awake = -ENODEV; + intel_gt_init_early(to_gt(i915), i915); + __intel_gt_init_early(to_gt(i915), i915); + mock_uncore_init(&i915->uncore, i915); + atomic_inc(&to_gt(i915)->wakeref.count); /* disable; no hw support */ + to_gt(i915)->awake = -ENODEV; ret = intel_region_ttm_device_init(i915); if (ret) @@ -195,19 +195,19 @@ struct drm_i915_private *mock_gem_device(void) mock_init_contexts(i915); mock_init_ggtt(i915, &i915->ggtt); - i915->gt.vm = i915_vm_get(&i915->ggtt.vm); + to_gt(i915)->vm = i915_vm_get(&i915->ggtt.vm); mkwrite_device_info(i915)->platform_engine_mask = BIT(0); - i915->gt.info.engine_mask = BIT(0); + to_gt(i915)->info.engine_mask = BIT(0); - i915->gt.engine[RCS0] = mock_engine(i915, "mock", RCS0); - if (!i915->gt.engine[RCS0]) + to_gt(i915)->engine[RCS0] = mock_engine(i915, "mock", RCS0); + if (!to_gt(i915)->engine[RCS0]) goto err_unlock; - if (mock_engine_init(i915->gt.engine[RCS0])) + if (mock_engine_init(to_gt(i915)->engine[RCS0])) goto err_context; - __clear_bit(I915_WEDGED, &i915->gt.reset.flags); + __clear_bit(I915_WEDGED, &to_gt(i915)->reset.flags); intel_engines_driver_register(i915); i915->do_release = true; @@ -216,13 +216,13 @@ struct drm_i915_private *mock_gem_device(void) return i915; err_context: - intel_gt_driver_remove(&i915->gt); + intel_gt_driver_remove(to_gt(i915)); err_unlock: destroy_workqueue(i915->wq); err_drv: intel_region_ttm_device_fini(i915); err_ttm: - intel_gt_driver_late_release(&i915->gt); + intel_gt_driver_late_release(to_gt(i915)); intel_memory_regions_driver_release(i915); drm_mode_config_cleanup(&i915->drm); mock_destroy_device(i915); diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index cc047ec594f9..1802baf80a17 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -70,7 +70,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name) if (!ppgtt) return NULL; - ppgtt->vm.gt = &i915->gt; + ppgtt->vm.gt = to_gt(i915); ppgtt->vm.i915 = i915; ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); ppgtt->vm.dma = i915->drm.dev; @@ -78,6 +78,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name) i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); ppgtt->vm.alloc_pt_dma = alloc_pt_dma; + ppgtt->vm.alloc_scratch_dma = alloc_pt_dma; ppgtt->vm.clear_range = mock_clear_range; ppgtt->vm.insert_page = mock_insert_page; @@ -86,8 +87,6 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name) ppgtt->vm.vma_ops.bind_vma = mock_bind_ppgtt; ppgtt->vm.vma_ops.unbind_vma = mock_unbind_ppgtt; - ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; - ppgtt->vm.vma_ops.clear_pages = clear_pages; return ppgtt; } @@ -109,7 +108,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt) { memset(ggtt, 0, sizeof(*ggtt)); - ggtt->vm.gt = &i915->gt; + ggtt->vm.gt = to_gt(i915); ggtt->vm.i915 = i915; ggtt->vm.is_ggtt = true; @@ -118,6 +117,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt) ggtt->vm.total = 4096 * PAGE_SIZE; ggtt->vm.alloc_pt_dma = alloc_pt_dma; + ggtt->vm.alloc_scratch_dma = alloc_pt_dma; ggtt->vm.clear_range = mock_clear_range; ggtt->vm.insert_page = mock_insert_page; @@ -126,11 +126,9 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt) ggtt->vm.vma_ops.bind_vma = mock_bind_ggtt; ggtt->vm.vma_ops.unbind_vma = mock_unbind_ggtt; - ggtt->vm.vma_ops.set_pages = ggtt_set_pages; - ggtt->vm.vma_ops.clear_pages = clear_pages; i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); - i915->gt.ggtt = ggtt; + to_gt(i915)->ggtt = ggtt; } void mock_fini_ggtt(struct i915_ggtt *ggtt) diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c index ca57e4008701..f2d6be5e1230 100644 --- a/drivers/gpu/drm/i915/selftests/mock_uncore.c +++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c @@ -42,7 +42,7 @@ __nop_read(64) void mock_uncore_init(struct intel_uncore *uncore, struct drm_i915_private *i915) { - intel_uncore_init_early(uncore, i915); + intel_uncore_init_early(uncore, to_gt(i915)); ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, nop); ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, nop); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c index 141cb36b9c07..3a53ebc4e172 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c @@ -205,9 +205,15 @@ static const struct mtk_disp_ccorr_data mt8183_ccorr_driver_data = { .matrix_bits = 10, }; +static const struct mtk_disp_ccorr_data mt8192_ccorr_driver_data = { + .matrix_bits = 11, +}; + static const struct of_device_id mtk_disp_ccorr_driver_dt_match[] = { { .compatible = "mediatek,mt8183-disp-ccorr", .data = &mt8183_ccorr_driver_data}, + { .compatible = "mediatek,mt8192-disp-ccorr", + .data = &mt8192_ccorr_driver_data}, {}, }; MODULE_DEVICE_TABLE(of, mtk_disp_ccorr_driver_dt_match); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 5326989d5206..2146299e5f52 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -456,6 +456,22 @@ static const struct mtk_disp_ovl_data mt8183_ovl_2l_driver_data = { .fmt_rgb565_is_0 = true, }; +static const struct mtk_disp_ovl_data mt8192_ovl_driver_data = { + .addr = DISP_REG_OVL_ADDR_MT8173, + .gmc_bits = 10, + .layer_nr = 4, + .fmt_rgb565_is_0 = true, + .smi_id_en = true, +}; + +static const struct mtk_disp_ovl_data mt8192_ovl_2l_driver_data = { + .addr = DISP_REG_OVL_ADDR_MT8173, + .gmc_bits = 10, + .layer_nr = 2, + .fmt_rgb565_is_0 = true, + .smi_id_en = true, +}; + static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = { { .compatible = "mediatek,mt2701-disp-ovl", .data = &mt2701_ovl_driver_data}, @@ -465,6 +481,10 @@ static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = { .data = &mt8183_ovl_driver_data}, { .compatible = "mediatek,mt8183-disp-ovl-2l", .data = &mt8183_ovl_2l_driver_data}, + { .compatible = "mediatek,mt8192-disp-ovl", + .data = &mt8192_ovl_driver_data}, + { .compatible = "mediatek,mt8192-disp-ovl-2l", + .data = &mt8192_ovl_2l_driver_data}, {}, }; MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 75d7f45579e2..d41a3970b944 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -353,6 +353,10 @@ static const struct mtk_disp_rdma_data mt8183_rdma_driver_data = { .fifo_size = 5 * SZ_1K, }; +static const struct mtk_disp_rdma_data mt8192_rdma_driver_data = { + .fifo_size = 5 * SZ_1K, +}; + static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = { { .compatible = "mediatek,mt2701-disp-rdma", .data = &mt2701_rdma_driver_data}, @@ -360,6 +364,8 @@ static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = { .data = &mt8173_rdma_driver_data}, { .compatible = "mediatek,mt8183-disp-rdma", .data = &mt8183_rdma_driver_data}, + { .compatible = "mediatek,mt8192-disp-rdma", + .data = &mt8192_rdma_driver_data}, {}, }; MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index a4e80e499674..d661edf7e0fe 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -4,6 +4,8 @@ */ #include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/mailbox_controller.h> #include <linux/pm_runtime.h> #include <linux/soc/mediatek/mtk-cmdq.h> #include <linux/soc/mediatek/mtk-mmsys.h> @@ -50,8 +52,10 @@ struct mtk_drm_crtc { bool pending_async_planes; #if IS_REACHABLE(CONFIG_MTK_CMDQ) - struct cmdq_client *cmdq_client; + struct cmdq_client cmdq_client; + struct cmdq_pkt cmdq_handle; u32 cmdq_event; + u32 cmdq_vblank_cnt; #endif struct device *mmsys_dev; @@ -104,12 +108,60 @@ static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) } } +#if IS_REACHABLE(CONFIG_MTK_CMDQ) +static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, + size_t size) +{ + struct device *dev; + dma_addr_t dma_addr; + + pkt->va_base = kzalloc(size, GFP_KERNEL); + if (!pkt->va_base) { + kfree(pkt); + return -ENOMEM; + } + pkt->buf_size = size; + pkt->cl = (void *)client; + + dev = client->chan->mbox->dev; + dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size); + kfree(pkt->va_base); + kfree(pkt); + return -ENOMEM; + } + + pkt->pa_base = dma_addr; + + return 0; +} + +static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt) +{ + struct cmdq_client *client = (struct cmdq_client *)pkt->cl; + + dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size, + DMA_TO_DEVICE); + kfree(pkt->va_base); + kfree(pkt); +} +#endif + static void mtk_drm_crtc_destroy(struct drm_crtc *crtc) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); mtk_mutex_put(mtk_crtc->mutex); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle); + if (mtk_crtc->cmdq_client.chan) { + mbox_free_channel(mtk_crtc->cmdq_client.chan); + mtk_crtc->cmdq_client.chan = NULL; + } +#endif drm_crtc_cleanup(crtc); } @@ -222,9 +274,46 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, } #if IS_REACHABLE(CONFIG_MTK_CMDQ) -static void ddp_cmdq_cb(struct cmdq_cb_data data) +static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) { - cmdq_pkt_destroy(data.data); + struct cmdq_cb_data *data = mssg; + struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client); + struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client); + struct mtk_crtc_state *state; + unsigned int i; + + if (data->sta < 0) + return; + + state = to_mtk_crtc_state(mtk_crtc->base.state); + + state->pending_config = false; + + if (mtk_crtc->pending_planes) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + + plane_state->pending.config = false; + } + mtk_crtc->pending_planes = false; + } + + if (mtk_crtc->pending_async_planes) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + + plane_state->pending.async_config = false; + } + mtk_crtc->pending_async_planes = false; + } + + mtk_crtc->cmdq_vblank_cnt = 0; } #endif @@ -378,7 +467,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc, state->pending_vrefresh, 0, cmdq_handle); - state->pending_config = false; + if (!cmdq_handle) + state->pending_config = false; } if (mtk_crtc->pending_planes) { @@ -398,9 +488,12 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc, mtk_ddp_comp_layer_config(comp, local_layer, plane_state, cmdq_handle); - plane_state->pending.config = false; + if (!cmdq_handle) + plane_state->pending.config = false; } - mtk_crtc->pending_planes = false; + + if (!cmdq_handle) + mtk_crtc->pending_planes = false; } if (mtk_crtc->pending_async_planes) { @@ -420,9 +513,12 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc, mtk_ddp_comp_layer_config(comp, local_layer, plane_state, cmdq_handle); - plane_state->pending.async_config = false; + if (!cmdq_handle) + plane_state->pending.async_config = false; } - mtk_crtc->pending_async_planes = false; + + if (!cmdq_handle) + mtk_crtc->pending_async_planes = false; } } @@ -430,7 +526,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, bool needs_vblank) { #if IS_REACHABLE(CONFIG_MTK_CMDQ) - struct cmdq_pkt *cmdq_handle; + struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle; #endif struct drm_crtc *crtc = &mtk_crtc->base; struct mtk_drm_private *priv = crtc->dev->dev_private; @@ -468,14 +564,28 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, mtk_mutex_release(mtk_crtc->mutex); } #if IS_REACHABLE(CONFIG_MTK_CMDQ) - if (mtk_crtc->cmdq_client) { - mbox_flush(mtk_crtc->cmdq_client->chan, 2000); - cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE); + if (mtk_crtc->cmdq_client.chan) { + mbox_flush(mtk_crtc->cmdq_client.chan, 2000); + cmdq_handle->cmd_buf_size = 0; cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false); mtk_crtc_ddp_config(crtc, cmdq_handle); cmdq_pkt_finalize(cmdq_handle); - cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle); + dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev, + cmdq_handle->pa_base, + cmdq_handle->cmd_buf_size, + DMA_TO_DEVICE); + /* + * CMDQ command should execute in next 3 vblank. + * One vblank interrupt before send message (occasionally) + * and one vblank interrupt after cmdq done, + * so it's timeout after 3 vblank interrupt. + * If it fail to execute in next 3 vblank, timeout happen. + */ + mtk_crtc->cmdq_vblank_cnt = 3; + + mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle); + mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0); } #endif mtk_crtc->config_updating = false; @@ -489,12 +599,15 @@ static void mtk_crtc_ddp_irq(void *data) struct mtk_drm_private *priv = crtc->dev->dev_private; #if IS_REACHABLE(CONFIG_MTK_CMDQ) - if (!priv->data->shadow_register && !mtk_crtc->cmdq_client) + if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan) + mtk_crtc_ddp_config(crtc, NULL); + else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0) + DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n", + drm_crtc_index(&mtk_crtc->base)); #else if (!priv->data->shadow_register) -#endif mtk_crtc_ddp_config(crtc, NULL); - +#endif mtk_drm_finish_page_flip(mtk_crtc); } @@ -829,16 +942,20 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, mutex_init(&mtk_crtc->hw_lock); #if IS_REACHABLE(CONFIG_MTK_CMDQ) - mtk_crtc->cmdq_client = - cmdq_mbox_create(mtk_crtc->mmsys_dev, - drm_crtc_index(&mtk_crtc->base)); - if (IS_ERR(mtk_crtc->cmdq_client)) { + mtk_crtc->cmdq_client.client.dev = mtk_crtc->mmsys_dev; + mtk_crtc->cmdq_client.client.tx_block = false; + mtk_crtc->cmdq_client.client.knows_txdone = true; + mtk_crtc->cmdq_client.client.rx_callback = ddp_cmdq_cb; + mtk_crtc->cmdq_client.chan = + mbox_request_channel(&mtk_crtc->cmdq_client.client, + drm_crtc_index(&mtk_crtc->base)); + if (IS_ERR(mtk_crtc->cmdq_client.chan)) { dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", drm_crtc_index(&mtk_crtc->base)); - mtk_crtc->cmdq_client = NULL; + mtk_crtc->cmdq_client.chan = NULL; } - if (mtk_crtc->cmdq_client) { + if (mtk_crtc->cmdq_client.chan) { ret = of_property_read_u32_index(priv->mutex_node, "mediatek,gce-events", drm_crtc_index(&mtk_crtc->base), @@ -846,8 +963,18 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, if (ret) { dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", drm_crtc_index(&mtk_crtc->base)); - cmdq_mbox_destroy(mtk_crtc->cmdq_client); - mtk_crtc->cmdq_client = NULL; + mbox_free_channel(mtk_crtc->cmdq_client.chan); + mtk_crtc->cmdq_client.chan = NULL; + } else { + ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client, + &mtk_crtc->cmdq_handle, + PAGE_SIZE); + if (ret) { + dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n", + drm_crtc_index(&mtk_crtc->base)); + mbox_free_channel(mtk_crtc->cmdq_client.chan); + mtk_crtc->cmdq_client.chan = NULL; + } } } #endif diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 99cbf44463e4..b4b682bc1991 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -20,45 +20,39 @@ #include "mtk_drm_ddp_comp.h" #include "mtk_drm_crtc.h" -#define DISP_OD_EN 0x0000 -#define DISP_OD_INTEN 0x0008 -#define DISP_OD_INTSTA 0x000c -#define DISP_OD_CFG 0x0020 -#define DISP_OD_SIZE 0x0030 -#define DISP_DITHER_5 0x0114 -#define DISP_DITHER_7 0x011c -#define DISP_DITHER_15 0x013c -#define DISP_DITHER_16 0x0140 -#define DISP_REG_UFO_START 0x0000 - -#define DISP_DITHER_EN 0x0000 +#define DISP_REG_DITHER_EN 0x0000 #define DITHER_EN BIT(0) -#define DISP_DITHER_CFG 0x0020 +#define DISP_REG_DITHER_CFG 0x0020 #define DITHER_RELAY_MODE BIT(0) #define DITHER_ENGINE_EN BIT(1) -#define DISP_DITHER_SIZE 0x0030 - -#define LUT_10BIT_MASK 0x03ff - -#define OD_RELAYMODE BIT(0) - -#define UFO_BYPASS BIT(2) - #define DISP_DITHERING BIT(2) +#define DISP_REG_DITHER_SIZE 0x0030 +#define DISP_REG_DITHER_5 0x0114 +#define DISP_REG_DITHER_7 0x011c +#define DISP_REG_DITHER_15 0x013c #define DITHER_LSB_ERR_SHIFT_R(x) (((x) & 0x7) << 28) -#define DITHER_OVFLW_BIT_R(x) (((x) & 0x7) << 24) #define DITHER_ADD_LSHIFT_R(x) (((x) & 0x7) << 20) -#define DITHER_ADD_RSHIFT_R(x) (((x) & 0x7) << 16) #define DITHER_NEW_BIT_MODE BIT(0) +#define DISP_REG_DITHER_16 0x0140 #define DITHER_LSB_ERR_SHIFT_B(x) (((x) & 0x7) << 28) -#define DITHER_OVFLW_BIT_B(x) (((x) & 0x7) << 24) #define DITHER_ADD_LSHIFT_B(x) (((x) & 0x7) << 20) -#define DITHER_ADD_RSHIFT_B(x) (((x) & 0x7) << 16) #define DITHER_LSB_ERR_SHIFT_G(x) (((x) & 0x7) << 12) -#define DITHER_OVFLW_BIT_G(x) (((x) & 0x7) << 8) #define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4) -#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0) + +#define DISP_REG_OD_EN 0x0000 +#define DISP_REG_OD_CFG 0x0020 +#define OD_RELAYMODE BIT(0) +#define DISP_REG_OD_SIZE 0x0030 + +#define DISP_REG_POSTMASK_EN 0x0000 +#define POSTMASK_EN BIT(0) +#define DISP_REG_POSTMASK_CFG 0x0020 +#define POSTMASK_RELAY_MODE BIT(0) +#define DISP_REG_POSTMASK_SIZE 0x0030 + +#define DISP_REG_UFO_START 0x0000 +#define UFO_BYPASS BIT(2) struct mtk_ddp_comp_dev { struct clk *clk; @@ -134,25 +128,52 @@ void mtk_dither_set_common(void __iomem *regs, struct cmdq_client_reg *cmdq_reg, return; if (bpc >= MTK_MIN_BPC) { - mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_DITHER_5); - mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_DITHER_7); + mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_REG_DITHER_5); + mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_REG_DITHER_7); mtk_ddp_write(cmdq_pkt, DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) | DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) | DITHER_NEW_BIT_MODE, - cmdq_reg, regs, DISP_DITHER_15); + cmdq_reg, regs, DISP_REG_DITHER_15); mtk_ddp_write(cmdq_pkt, DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) | DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) | DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) | DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc), - cmdq_reg, regs, DISP_DITHER_16); + cmdq_reg, regs, DISP_REG_DITHER_16); mtk_ddp_write(cmdq_pkt, dither_en, cmdq_reg, regs, cfg); } } +static void mtk_dither_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE); + mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs, + DISP_REG_DITHER_CFG); + mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG, + DITHER_ENGINE_EN, cmdq_pkt); +} + +static void mtk_dither_start(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel(DITHER_EN, priv->regs + DISP_REG_DITHER_EN); +} + +static void mtk_dither_stop(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel_relaxed(0x0, priv->regs + DISP_REG_DITHER_EN); +} + static void mtk_dither_set(struct device *dev, unsigned int bpc, - unsigned int cfg, struct cmdq_pkt *cmdq_pkt) + unsigned int cfg, struct cmdq_pkt *cmdq_pkt) { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); @@ -166,49 +187,49 @@ static void mtk_od_config(struct device *dev, unsigned int w, { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_OD_SIZE); - mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, &priv->cmdq_reg, priv->regs, DISP_OD_CFG); - mtk_dither_set(dev, bpc, DISP_OD_CFG, cmdq_pkt); + mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_OD_SIZE); + mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, &priv->cmdq_reg, priv->regs, DISP_REG_OD_CFG); + mtk_dither_set(dev, bpc, DISP_REG_OD_CFG, cmdq_pkt); } static void mtk_od_start(struct device *dev) { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - writel(1, priv->regs + DISP_OD_EN); + writel(1, priv->regs + DISP_REG_OD_EN); } -static void mtk_ufoe_start(struct device *dev) +static void mtk_postmask_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - writel(UFO_BYPASS, priv->regs + DISP_REG_UFO_START); + mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, + DISP_REG_POSTMASK_SIZE); + mtk_ddp_write(cmdq_pkt, POSTMASK_RELAY_MODE, &priv->cmdq_reg, + priv->regs, DISP_REG_POSTMASK_CFG); } -static void mtk_dither_config(struct device *dev, unsigned int w, - unsigned int h, unsigned int vrefresh, - unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +static void mtk_postmask_start(struct device *dev) { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_DITHER_SIZE); - mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs, DISP_DITHER_CFG); - mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_DITHER_CFG, - DITHER_ENGINE_EN, cmdq_pkt); + writel(POSTMASK_EN, priv->regs + DISP_REG_POSTMASK_EN); } -static void mtk_dither_start(struct device *dev) +static void mtk_postmask_stop(struct device *dev) { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - writel(DITHER_EN, priv->regs + DISP_DITHER_EN); + writel_relaxed(0x0, priv->regs + DISP_REG_POSTMASK_EN); } -static void mtk_dither_stop(struct device *dev) +static void mtk_ufoe_start(struct device *dev) { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - writel_relaxed(0x0, priv->regs + DISP_DITHER_EN); + writel(UFO_BYPASS, priv->regs + DISP_REG_UFO_START); } static const struct mtk_ddp_comp_funcs ddp_aal = { @@ -286,6 +307,14 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = { .bgclr_in_off = mtk_ovl_bgclr_in_off, }; +static const struct mtk_ddp_comp_funcs ddp_postmask = { + .clk_enable = mtk_ddp_clk_enable, + .clk_disable = mtk_ddp_clk_disable, + .config = mtk_postmask_config, + .start = mtk_postmask_start, + .stop = mtk_postmask_stop, +}; + static const struct mtk_ddp_comp_funcs ddp_rdma = { .clk_enable = mtk_rdma_clk_enable, .clk_disable = mtk_rdma_clk_disable, @@ -305,22 +334,23 @@ static const struct mtk_ddp_comp_funcs ddp_ufoe = { }; static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = { + [MTK_DISP_AAL] = "aal", + [MTK_DISP_BLS] = "bls", + [MTK_DISP_CCORR] = "ccorr", + [MTK_DISP_COLOR] = "color", + [MTK_DISP_DITHER] = "dither", + [MTK_DISP_GAMMA] = "gamma", + [MTK_DISP_MUTEX] = "mutex", + [MTK_DISP_OD] = "od", [MTK_DISP_OVL] = "ovl", [MTK_DISP_OVL_2L] = "ovl-2l", + [MTK_DISP_POSTMASK] = "postmask", + [MTK_DISP_PWM] = "pwm", [MTK_DISP_RDMA] = "rdma", - [MTK_DISP_WDMA] = "wdma", - [MTK_DISP_COLOR] = "color", - [MTK_DISP_CCORR] = "ccorr", - [MTK_DISP_AAL] = "aal", - [MTK_DISP_GAMMA] = "gamma", - [MTK_DISP_DITHER] = "dither", [MTK_DISP_UFOE] = "ufoe", - [MTK_DSI] = "dsi", + [MTK_DISP_WDMA] = "wdma", [MTK_DPI] = "dpi", - [MTK_DISP_PWM] = "pwm", - [MTK_DISP_MUTEX] = "mutex", - [MTK_DISP_OD] = "od", - [MTK_DISP_BLS] = "bls", + [MTK_DSI] = "dsi", }; struct mtk_ddp_comp_match { @@ -330,35 +360,38 @@ struct mtk_ddp_comp_match { }; static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { - [DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal }, - [DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal }, - [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL }, - [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr }, - [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color }, - [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color }, - [DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither }, - [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, &ddp_dpi }, - [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, &ddp_dpi }, - [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, &ddp_dsi }, - [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, &ddp_dsi }, - [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, &ddp_dsi }, - [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, &ddp_dsi }, - [DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma }, - [DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od }, - [DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od }, - [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, &ddp_ovl }, - [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, &ddp_ovl }, - [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, &ddp_ovl }, - [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, &ddp_ovl }, - [DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL }, - [DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL }, - [DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL }, - [DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, &ddp_rdma }, - [DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, &ddp_rdma }, - [DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, &ddp_rdma }, - [DDP_COMPONENT_UFOE] = { MTK_DISP_UFOE, 0, &ddp_ufoe }, - [DDP_COMPONENT_WDMA0] = { MTK_DISP_WDMA, 0, NULL }, - [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL }, + [DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal }, + [DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal }, + [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL }, + [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr }, + [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color }, + [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color }, + [DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither }, + [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, &ddp_dpi }, + [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, &ddp_dpi }, + [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, &ddp_dsi }, + [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, &ddp_dsi }, + [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, &ddp_dsi }, + [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, &ddp_dsi }, + [DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma }, + [DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od }, + [DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od }, + [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, &ddp_ovl }, + [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, &ddp_ovl }, + [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, &ddp_ovl }, + [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, &ddp_ovl }, + [DDP_COMPONENT_OVL_2L2] = { MTK_DISP_OVL_2L, 2, &ddp_ovl }, + [DDP_COMPONENT_POSTMASK0] = { MTK_DISP_POSTMASK, 0, &ddp_postmask }, + [DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL }, + [DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL }, + [DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL }, + [DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, &ddp_rdma }, + [DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, &ddp_rdma }, + [DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, &ddp_rdma }, + [DDP_COMPONENT_RDMA4] = { MTK_DISP_RDMA, 4, &ddp_rdma }, + [DDP_COMPONENT_UFOE] = { MTK_DISP_UFOE, 0, &ddp_ufoe }, + [DDP_COMPONENT_WDMA0] = { MTK_DISP_WDMA, 0, NULL }, + [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL }, }; static bool mtk_drm_find_comp_in_ddp(struct device *dev, @@ -475,12 +508,12 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp, type == MTK_DISP_CCORR || type == MTK_DISP_COLOR || type == MTK_DISP_GAMMA || - type == MTK_DPI || - type == MTK_DSI || type == MTK_DISP_OVL || type == MTK_DISP_OVL_2L || type == MTK_DISP_PWM || - type == MTK_DISP_RDMA) + type == MTK_DISP_RDMA || + type == MTK_DPI || + type == MTK_DSI) return 0; priv = devm_kzalloc(comp->dev, sizeof(*priv), GFP_KERNEL); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index bb914d976cf5..4c6a98662305 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -18,22 +18,23 @@ struct mtk_plane_state; struct drm_crtc_state; enum mtk_ddp_comp_type { - MTK_DISP_OVL, - MTK_DISP_OVL_2L, - MTK_DISP_RDMA, - MTK_DISP_WDMA, - MTK_DISP_COLOR, + MTK_DISP_AAL, + MTK_DISP_BLS, MTK_DISP_CCORR, + MTK_DISP_COLOR, MTK_DISP_DITHER, - MTK_DISP_AAL, MTK_DISP_GAMMA, - MTK_DISP_UFOE, - MTK_DSI, - MTK_DPI, - MTK_DISP_PWM, MTK_DISP_MUTEX, MTK_DISP_OD, - MTK_DISP_BLS, + MTK_DISP_OVL, + MTK_DISP_OVL_2L, + MTK_DISP_POSTMASK, + MTK_DISP_PWM, + MTK_DISP_RDMA, + MTK_DISP_UFOE, + MTK_DISP_WDMA, + MTK_DPI, + MTK_DSI, MTK_DDP_COMP_TYPE_MAX, }; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index e336358fee20..dd029307be7d 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -158,6 +158,25 @@ static const enum mtk_ddp_comp_id mt8183_mtk_ddp_ext[] = { DDP_COMPONENT_DPI0, }; +static const enum mtk_ddp_comp_id mt8192_mtk_ddp_main[] = { + DDP_COMPONENT_OVL0, + DDP_COMPONENT_OVL_2L0, + DDP_COMPONENT_RDMA0, + DDP_COMPONENT_COLOR0, + DDP_COMPONENT_CCORR, + DDP_COMPONENT_AAL0, + DDP_COMPONENT_GAMMA, + DDP_COMPONENT_POSTMASK0, + DDP_COMPONENT_DITHER, + DDP_COMPONENT_DSI0, +}; + +static const enum mtk_ddp_comp_id mt8192_mtk_ddp_ext[] = { + DDP_COMPONENT_OVL_2L2, + DDP_COMPONENT_RDMA4, + DDP_COMPONENT_DPI0, +}; + static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = { .main_path = mt2701_mtk_ddp_main, .main_len = ARRAY_SIZE(mt2701_mtk_ddp_main), @@ -202,6 +221,13 @@ static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = { .ext_len = ARRAY_SIZE(mt8183_mtk_ddp_ext), }; +static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = { + .main_path = mt8192_mtk_ddp_main, + .main_len = ARRAY_SIZE(mt8192_mtk_ddp_main), + .ext_path = mt8192_mtk_ddp_ext, + .ext_len = ARRAY_SIZE(mt8192_mtk_ddp_ext), +}; + static int mtk_drm_kms_init(struct drm_device *drm) { struct mtk_drm_private *private = drm->dev_private; @@ -400,68 +426,36 @@ static const struct component_master_ops mtk_drm_ops = { }; static const struct of_device_id mtk_ddp_comp_dt_ids[] = { - { .compatible = "mediatek,mt2701-disp-ovl", - .data = (void *)MTK_DISP_OVL }, - { .compatible = "mediatek,mt8167-disp-ovl", - .data = (void *)MTK_DISP_OVL }, - { .compatible = "mediatek,mt8173-disp-ovl", - .data = (void *)MTK_DISP_OVL }, - { .compatible = "mediatek,mt8183-disp-ovl", - .data = (void *)MTK_DISP_OVL }, - { .compatible = "mediatek,mt8183-disp-ovl-2l", - .data = (void *)MTK_DISP_OVL_2L }, - { .compatible = "mediatek,mt2701-disp-rdma", - .data = (void *)MTK_DISP_RDMA }, - { .compatible = "mediatek,mt8167-disp-rdma", - .data = (void *)MTK_DISP_RDMA }, - { .compatible = "mediatek,mt8173-disp-rdma", - .data = (void *)MTK_DISP_RDMA }, - { .compatible = "mediatek,mt8183-disp-rdma", - .data = (void *)MTK_DISP_RDMA }, - { .compatible = "mediatek,mt8173-disp-wdma", - .data = (void *)MTK_DISP_WDMA }, + { .compatible = "mediatek,mt8167-disp-aal", + .data = (void *)MTK_DISP_AAL}, + { .compatible = "mediatek,mt8173-disp-aal", + .data = (void *)MTK_DISP_AAL}, + { .compatible = "mediatek,mt8183-disp-aal", + .data = (void *)MTK_DISP_AAL}, + { .compatible = "mediatek,mt8192-disp-aal", + .data = (void *)MTK_DISP_AAL}, { .compatible = "mediatek,mt8167-disp-ccorr", .data = (void *)MTK_DISP_CCORR }, { .compatible = "mediatek,mt8183-disp-ccorr", .data = (void *)MTK_DISP_CCORR }, + { .compatible = "mediatek,mt8192-disp-ccorr", + .data = (void *)MTK_DISP_CCORR }, { .compatible = "mediatek,mt2701-disp-color", .data = (void *)MTK_DISP_COLOR }, { .compatible = "mediatek,mt8167-disp-color", .data = (void *)MTK_DISP_COLOR }, { .compatible = "mediatek,mt8173-disp-color", .data = (void *)MTK_DISP_COLOR }, - { .compatible = "mediatek,mt8167-disp-aal", - .data = (void *)MTK_DISP_AAL}, - { .compatible = "mediatek,mt8173-disp-aal", - .data = (void *)MTK_DISP_AAL}, - { .compatible = "mediatek,mt8183-disp-aal", - .data = (void *)MTK_DISP_AAL}, + { .compatible = "mediatek,mt8167-disp-dither", + .data = (void *)MTK_DISP_DITHER }, + { .compatible = "mediatek,mt8183-disp-dither", + .data = (void *)MTK_DISP_DITHER }, { .compatible = "mediatek,mt8167-disp-gamma", .data = (void *)MTK_DISP_GAMMA, }, { .compatible = "mediatek,mt8173-disp-gamma", .data = (void *)MTK_DISP_GAMMA, }, { .compatible = "mediatek,mt8183-disp-gamma", .data = (void *)MTK_DISP_GAMMA, }, - { .compatible = "mediatek,mt8167-disp-dither", - .data = (void *)MTK_DISP_DITHER }, - { .compatible = "mediatek,mt8183-disp-dither", - .data = (void *)MTK_DISP_DITHER }, - { .compatible = "mediatek,mt8173-disp-ufoe", - .data = (void *)MTK_DISP_UFOE }, - { .compatible = "mediatek,mt2701-dsi", - .data = (void *)MTK_DSI }, - { .compatible = "mediatek,mt8167-dsi", - .data = (void *)MTK_DSI }, - { .compatible = "mediatek,mt8173-dsi", - .data = (void *)MTK_DSI }, - { .compatible = "mediatek,mt8183-dsi", - .data = (void *)MTK_DSI }, - { .compatible = "mediatek,mt2701-dpi", - .data = (void *)MTK_DPI }, - { .compatible = "mediatek,mt8173-dpi", - .data = (void *)MTK_DPI }, - { .compatible = "mediatek,mt8183-dpi", - .data = (void *)MTK_DPI }, { .compatible = "mediatek,mt2701-disp-mutex", .data = (void *)MTK_DISP_MUTEX }, { .compatible = "mediatek,mt2712-disp-mutex", @@ -472,14 +466,60 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = { .data = (void *)MTK_DISP_MUTEX }, { .compatible = "mediatek,mt8183-disp-mutex", .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt8192-disp-mutex", + .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt8173-disp-od", + .data = (void *)MTK_DISP_OD }, + { .compatible = "mediatek,mt2701-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8167-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8173-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8183-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8192-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8183-disp-ovl-2l", + .data = (void *)MTK_DISP_OVL_2L }, + { .compatible = "mediatek,mt8192-disp-ovl-2l", + .data = (void *)MTK_DISP_OVL_2L }, + { .compatible = "mediatek,mt8192-disp-postmask", + .data = (void *)MTK_DISP_POSTMASK }, { .compatible = "mediatek,mt2701-disp-pwm", .data = (void *)MTK_DISP_BLS }, { .compatible = "mediatek,mt8167-disp-pwm", .data = (void *)MTK_DISP_PWM }, { .compatible = "mediatek,mt8173-disp-pwm", .data = (void *)MTK_DISP_PWM }, - { .compatible = "mediatek,mt8173-disp-od", - .data = (void *)MTK_DISP_OD }, + { .compatible = "mediatek,mt2701-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8167-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8173-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8183-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8192-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8173-disp-ufoe", + .data = (void *)MTK_DISP_UFOE }, + { .compatible = "mediatek,mt8173-disp-wdma", + .data = (void *)MTK_DISP_WDMA }, + { .compatible = "mediatek,mt2701-dpi", + .data = (void *)MTK_DPI }, + { .compatible = "mediatek,mt8167-dsi", + .data = (void *)MTK_DSI }, + { .compatible = "mediatek,mt8173-dpi", + .data = (void *)MTK_DPI }, + { .compatible = "mediatek,mt8183-dpi", + .data = (void *)MTK_DPI }, + { .compatible = "mediatek,mt2701-dsi", + .data = (void *)MTK_DSI }, + { .compatible = "mediatek,mt8173-dsi", + .data = (void *)MTK_DSI }, + { .compatible = "mediatek,mt8183-dsi", + .data = (void *)MTK_DSI }, { } }; @@ -496,6 +536,8 @@ static const struct of_device_id mtk_drm_of_ids[] = { .data = &mt8173_mmsys_driver_data}, { .compatible = "mediatek,mt8183-mmsys", .data = &mt8183_mmsys_driver_data}, + { .compatible = "mediatek,mt8192-mmsys", + .data = &mt8192_mmsys_driver_data}, { } }; MODULE_DEVICE_TABLE(of, mtk_drm_of_ids); @@ -571,8 +613,8 @@ static int mtk_drm_probe(struct platform_device *pdev) comp_type == MTK_DISP_OVL || comp_type == MTK_DISP_OVL_2L || comp_type == MTK_DISP_RDMA || - comp_type == MTK_DSI || - comp_type == MTK_DPI) { + comp_type == MTK_DPI || + comp_type == MTK_DSI) { dev_info(dev, "Adding component match for %pOF\n", node); drm_of_component_match_add(dev, &match, compare_of, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index 734a1fb052df..075747a6d4aa 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -44,9 +44,10 @@ static void mtk_plane_reset(struct drm_plane *plane) state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return; - plane->state = &state->base; } + __drm_atomic_helper_plane_reset(plane, &state->base); + state->base.plane = plane; state->pending.format = DRM_FORMAT_RGB565; } diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 5838c44cbf6f..3196189429bc 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1224,12 +1224,14 @@ static int mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge, return MODE_BAD; } - if (hdmi->conf->cea_modes_only && !drm_match_cea_mode(mode)) - return MODE_BAD; + if (hdmi->conf) { + if (hdmi->conf->cea_modes_only && !drm_match_cea_mode(mode)) + return MODE_BAD; - if (hdmi->conf->max_mode_clock && - mode->clock > hdmi->conf->max_mode_clock) - return MODE_CLOCK_HIGH; + if (hdmi->conf->max_mode_clock && + mode->clock > hdmi->conf->max_mode_clock) + return MODE_CLOCK_HIGH; + } if (mode->clock < 27000) return MODE_CLOCK_LOW; diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 75015b0e165e..4fd931413705 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -66,6 +66,7 @@ config DRM_MSM_HDMI_HDCP config DRM_MSM_DP bool "Enable DisplayPort support in MSM DRM driver" depends on DRM_MSM + select RATIONAL default y help Compile in support for DP driver in MSM DRM driver. DP external diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 093454457545..03ab55c37beb 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -19,7 +19,7 @@ msm-y := \ hdmi/hdmi.o \ hdmi/hdmi_audio.o \ hdmi/hdmi_bridge.o \ - hdmi/hdmi_connector.o \ + hdmi/hdmi_hpd.o \ hdmi/hdmi_i2c.o \ hdmi/hdmi_phy.o \ hdmi/hdmi_phy_8960.o \ @@ -27,12 +27,6 @@ msm-y := \ hdmi/hdmi_phy_8x60.o \ hdmi/hdmi_phy_8x74.o \ hdmi/hdmi_pll_8960.o \ - edp/edp.o \ - edp/edp_aux.o \ - edp/edp_bridge.o \ - edp/edp_connector.o \ - edp/edp_ctrl.o \ - edp/edp_phy.o \ disp/mdp_format.o \ disp/mdp_kms.o \ disp/mdp4/mdp4_crtc.o \ diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c index bdc989183c64..22e8295a5e2b 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -12,7 +12,6 @@ static bool a2xx_idle(struct msm_gpu *gpu); static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) { - struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_ringbuffer *ring = submit->ring; unsigned int i; @@ -23,7 +22,7 @@ static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: /* ignore if there has not been a ctx switch: */ - if (priv->lastctx == submit->queue->ctx) + if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 8fb847c174ff..2e481e2692ba 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -30,7 +30,6 @@ static bool a3xx_idle(struct msm_gpu *gpu); static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) { - struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_ringbuffer *ring = submit->ring; unsigned int i; @@ -41,7 +40,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: /* ignore if there has not been a ctx switch: */ - if (priv->lastctx == submit->queue->ctx) + if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index a96ee79cc5e0..c5524d6e8705 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -24,7 +24,6 @@ static bool a4xx_idle(struct msm_gpu *gpu); static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) { - struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_ringbuffer *ring = submit->ring; unsigned int i; @@ -35,7 +34,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: /* ignore if there has not been a ctx switch: */ - if (priv->lastctx == submit->queue->ctx) + if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c index dd593ec2bc56..6bd397a85834 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c @@ -107,7 +107,7 @@ reset_set(void *data, u64 val) * try to reset an active GPU. */ - mutex_lock(&dev->struct_mutex); + mutex_lock(&gpu->lock); release_firmware(adreno_gpu->fw[ADRENO_FW_PM4]); adreno_gpu->fw[ADRENO_FW_PM4] = NULL; @@ -133,7 +133,7 @@ reset_set(void *data, u64 val) gpu->funcs->recover(gpu); pm_runtime_put_sync(&gpu->pdev->dev); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&gpu->lock); return 0; } diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 5e2750eb3810..3d28fcf841a6 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -65,7 +65,6 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) { - struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_ringbuffer *ring = submit->ring; struct msm_gem_object *obj; uint32_t *ptr, dwords; @@ -76,7 +75,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (priv->lastctx == submit->queue->ctx) + if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -126,12 +125,11 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); - struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_ringbuffer *ring = submit->ring; unsigned int i, ibs = 0; if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) { - priv->lastctx = NULL; + gpu->cur_ctx_seqno = 0; a5xx_submit_in_rb(gpu, submit); return; } @@ -166,7 +164,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (priv->lastctx == submit->queue->ctx) + if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -441,7 +439,7 @@ void a5xx_set_hwcg(struct msm_gpu *gpu, bool state) const struct adreno_five_hwcg_regs *regs; unsigned int i, sz; - if (adreno_is_a508(adreno_gpu)) { + if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu)) { regs = a50x_hwcg; sz = ARRAY_SIZE(a50x_hwcg); } else if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) { @@ -485,7 +483,7 @@ static int a5xx_me_init(struct msm_gpu *gpu) OUT_RING(ring, 0x00000000); /* Specify workarounds for various microcode issues */ - if (adreno_is_a530(adreno_gpu)) { + if (adreno_is_a506(adreno_gpu) || adreno_is_a530(adreno_gpu)) { /* Workaround for token end syncs * Force a WFI after every direct-render 3D mode draw and every * 2D mode 3 draw @@ -620,8 +618,16 @@ static int a5xx_ucode_init(struct msm_gpu *gpu) static int a5xx_zap_shader_resume(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); int ret; + /* + * Adreno 506 have CPZ Retention feature and doesn't require + * to resume zap shader + */ + if (adreno_is_a506(adreno_gpu)) + return 0; + ret = qcom_scm_set_remote_state(SCM_GPU_ZAP_SHADER_RESUME, GPU_PAS_ID); if (ret) DRM_ERROR("%s: zap-shader resume failed: %d\n", @@ -733,9 +739,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu) 0x00100000 + adreno_gpu->gmem - 1); gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000); - if (adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu)) { + if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) || + adreno_is_a510(adreno_gpu)) { gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20); - if (adreno_is_a508(adreno_gpu)) + if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu)) gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400); else gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20); @@ -751,7 +758,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16); } - if (adreno_is_a508(adreno_gpu)) + if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu)) gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x100 << 11 | 0x100 << 22)); else if (adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) || @@ -769,8 +776,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu) * Disable the RB sampler datapath DP2 clock gating optimization * for 1-SP GPUs, as it is enabled by default. */ - if (adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) || - adreno_is_a512(adreno_gpu)) + if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) || + adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9)); /* Disable UCHE global filter as SP can invalidate/flush independently */ @@ -851,10 +858,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu) /* UCHE */ gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16)); - if (adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) || - adreno_is_a510(adreno_gpu) || adreno_is_a512(adreno_gpu) || - adreno_is_a530(adreno_gpu)) - gpu_write(gpu, REG_A5XX_CP_PROTECT(17), + /* SMMU */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(17), ADRENO_PROTECT_RW(0x10000, 0x8000)); gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0); @@ -895,8 +900,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) if (ret) return ret; - if (!(adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) || - adreno_is_a510(adreno_gpu) || adreno_is_a512(adreno_gpu))) + if (adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)) a5xx_gpmu_ucode_init(gpu); ret = a5xx_ucode_init(gpu); @@ -927,6 +931,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu) if (IS_ERR(a5xx_gpu->shadow)) return PTR_ERR(a5xx_gpu->shadow); + + msm_gem_object_set_name(a5xx_gpu->shadow_bo, "shadow"); } gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR, @@ -1254,6 +1260,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu) static irqreturn_t a5xx_irq(struct msm_gpu *gpu) { + struct msm_drm_private *priv = gpu->dev->dev_private; u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS); /* @@ -1263,6 +1270,11 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR); + if (priv->disable_err_irq) { + status &= A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | + A5XX_RBBM_INT_0_MASK_CP_SW; + } + /* Pass status to a5xx_rbbm_err_irq because we've already cleared it */ if (status & RBBM_ERROR_MASK) a5xx_rbbm_err_irq(gpu, status); @@ -1338,7 +1350,7 @@ static int a5xx_pm_resume(struct msm_gpu *gpu) if (ret) return ret; - /* Adreno 508, 509, 510, 512 needs manual RBBM sus/res control */ + /* Adreno 506, 508, 509, 510, 512 needs manual RBBM sus/res control */ if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) { /* Halt the sp_input_clk at HM level */ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055); @@ -1381,8 +1393,9 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu) u32 mask = 0xf; int i, ret; - /* A508, A510 have 3 XIN ports in VBIF */ - if (adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu)) + /* A506, A508, A510 have 3 XIN ports in VBIF */ + if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) || + adreno_is_a510(adreno_gpu)) mask = 0x7; /* Clear the VBIF pipe before shutting down */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 71e52b2b2025..3e325e2a2b1b 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -1146,7 +1146,7 @@ static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) } static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, - size_t size, u64 iova) + size_t size, u64 iova, const char *name) { struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct drm_device *dev = a6xx_gpu->base.base.dev; @@ -1181,6 +1181,8 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, bo->virt = msm_gem_get_vaddr(bo->obj); bo->size = size; + msm_gem_object_set_name(bo->obj, name); + return 0; } @@ -1515,7 +1517,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) */ gmu->dummy.size = SZ_4K; if (adreno_is_a660_family(adreno_gpu)) { - ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000); + ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, + 0x60400000, "debug"); if (ret) goto err_memory; @@ -1523,42 +1526,46 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) } /* Allocate memory for the GMU dummy page */ - ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, 0x60000000); + ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, + 0x60000000, "dummy"); if (ret) goto err_memory; + /* Note that a650 family also includes a660 family: */ if (adreno_is_a650_family(adreno_gpu)) { ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, - SZ_16M - SZ_16K, 0x04000); + SZ_16M - SZ_16K, 0x04000, "icache"); if (ret) goto err_memory; } else if (adreno_is_a640_family(adreno_gpu)) { ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, - SZ_256K - SZ_16K, 0x04000); + SZ_256K - SZ_16K, 0x04000, "icache"); if (ret) goto err_memory; ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache, - SZ_256K - SZ_16K, 0x44000); + SZ_256K - SZ_16K, 0x44000, "dcache"); if (ret) goto err_memory; } else { + BUG_ON(adreno_is_a660_family(adreno_gpu)); + /* HFI v1, has sptprac */ gmu->legacy = true; /* Allocate memory for the GMU debug region */ - ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0); + ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug"); if (ret) goto err_memory; } /* Allocate memory for for the HFI queues */ - ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0); + ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi"); if (ret) goto err_memory; /* Allocate memory for the GMU log region */ - ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0); + ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0, "log"); if (ret) goto err_memory; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 78aad5216a61..17cfad6424db 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -106,7 +106,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, u32 asid; u64 memptr = rbmemptr(ring, ttbr0); - if (ctx->seqno == a6xx_gpu->cur_ctx_seqno) + if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno) return; if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) @@ -138,14 +138,11 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, OUT_PKT7(ring, CP_EVENT_WRITE, 1); OUT_RING(ring, 0x31); - - a6xx_gpu->cur_ctx_seqno = ctx->seqno; } static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) { unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; - struct msm_drm_private *priv = gpu->dev->dev_private; struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct msm_ringbuffer *ring = submit->ring; @@ -177,7 +174,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: - if (priv->lastctx == submit->queue->ctx) + if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough; case MSM_SUBMIT_CMD_BUF: @@ -1071,6 +1068,8 @@ static int hw_init(struct msm_gpu *gpu) if (IS_ERR(a6xx_gpu->shadow)) return PTR_ERR(a6xx_gpu->shadow); + + msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow"); } gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO, @@ -1081,7 +1080,7 @@ static int hw_init(struct msm_gpu *gpu) /* Always come up on rb 0 */ a6xx_gpu->cur_ring = gpu->rb[0]; - a6xx_gpu->cur_ctx_seqno = 0; + gpu->cur_ctx_seqno = 0; /* Enable the SQE_to start the CP engine */ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1); @@ -1376,10 +1375,14 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu) static irqreturn_t a6xx_irq(struct msm_gpu *gpu) { + struct msm_drm_private *priv = gpu->dev->dev_private; u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS); gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status); + if (priv->disable_err_irq) + status &= A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS; + if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT) a6xx_fault_detect_irq(gpu); @@ -1557,6 +1560,8 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) for (i = 0; i < gpu->nr_rings; i++) a6xx_gpu->shadow[i] = 0; + gpu->suspend_count++; + return 0; } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index 8e5527c881b1..86e0a7c3fe6d 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -20,16 +20,6 @@ struct a6xx_gpu { struct msm_ringbuffer *cur_ring; - /** - * cur_ctx_seqno: - * - * The ctx->seqno value of the context with current pgtables - * installed. Tracked by seqno rather than pointer value to - * avoid dangling pointers, and cases where a ctx can be freed - * and a new one created with the same address. - */ - int cur_ctx_seqno; - struct a6xx_gmu gmu; struct drm_gem_object *shadow_bo; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 6e90209cd543..55f443328d8e 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -42,7 +42,15 @@ struct a6xx_gpu_state { struct a6xx_gpu_state_obj *cx_debugbus; int nr_cx_debugbus; + struct msm_gpu_state_bo *gmu_log; + struct msm_gpu_state_bo *gmu_hfi; + struct msm_gpu_state_bo *gmu_debug; + + s32 hfi_queue_history[2][HFI_HISTORY_SZ]; + struct list_head objs; + + bool gpu_initialized; }; static inline int CRASHDUMP_WRITE(u64 *in, u32 reg, u32 val) @@ -800,6 +808,45 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu, &a6xx_state->gmu_registers[2], false); } +static struct msm_gpu_state_bo *a6xx_snapshot_gmu_bo( + struct a6xx_gpu_state *a6xx_state, struct a6xx_gmu_bo *bo) +{ + struct msm_gpu_state_bo *snapshot; + + snapshot = state_kcalloc(a6xx_state, 1, sizeof(*snapshot)); + if (!snapshot) + return NULL; + + snapshot->iova = bo->iova; + snapshot->size = bo->size; + snapshot->data = kvzalloc(snapshot->size, GFP_KERNEL); + if (!snapshot->data) + return NULL; + + memcpy(snapshot->data, bo->virt, bo->size); + + return snapshot; +} + +static void a6xx_snapshot_gmu_hfi_history(struct msm_gpu *gpu, + struct a6xx_gpu_state *a6xx_state) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + unsigned i, j; + + BUILD_BUG_ON(ARRAY_SIZE(gmu->queues) != ARRAY_SIZE(a6xx_state->hfi_queue_history)); + + for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) { + struct a6xx_hfi_queue *queue = &gmu->queues[i]; + for (j = 0; j < HFI_HISTORY_SZ; j++) { + unsigned idx = (j + queue->history_idx) % HFI_HISTORY_SZ; + a6xx_state->hfi_queue_history[i][j] = queue->history[idx]; + } + } +} + #define A6XX_GBIF_REGLIST_SIZE 1 static void a6xx_get_registers(struct msm_gpu *gpu, struct a6xx_gpu_state *a6xx_state, @@ -937,6 +984,12 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu) a6xx_get_gmu_registers(gpu, a6xx_state); + a6xx_state->gmu_log = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.log); + a6xx_state->gmu_hfi = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.hfi); + a6xx_state->gmu_debug = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.debug); + + a6xx_snapshot_gmu_hfi_history(gpu, a6xx_state); + /* If GX isn't on the rest of the data isn't going to be accessible */ if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) return &a6xx_state->base; @@ -950,7 +1003,8 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu) * write out GPU state, so we need to skip this when the SMMU is * stalled in response to an iova fault */ - if (!stalled && !a6xx_crashdumper_init(gpu, &_dumper)) { + if (!stalled && !gpu->needs_hw_init && + !a6xx_crashdumper_init(gpu, &_dumper)) { dumper = &_dumper; } @@ -967,6 +1021,8 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu) if (snapshot_debugbus) a6xx_get_debugbus(gpu, a6xx_state); + a6xx_state->gpu_initialized = !gpu->needs_hw_init; + return &a6xx_state->base; } @@ -978,6 +1034,12 @@ static void a6xx_gpu_state_destroy(struct kref *kref) struct a6xx_gpu_state *a6xx_state = container_of(state, struct a6xx_gpu_state, base); + if (a6xx_state->gmu_log) + kvfree(a6xx_state->gmu_log->data); + + if (a6xx_state->gmu_hfi) + kvfree(a6xx_state->gmu_hfi->data); + list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node) kfree(obj); @@ -1189,8 +1251,48 @@ void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state, if (IS_ERR_OR_NULL(state)) return; + drm_printf(p, "gpu-initialized: %d\n", a6xx_state->gpu_initialized); + adreno_show(gpu, state, p); + drm_puts(p, "gmu-log:\n"); + if (a6xx_state->gmu_log) { + struct msm_gpu_state_bo *gmu_log = a6xx_state->gmu_log; + + drm_printf(p, " iova: 0x%016llx\n", gmu_log->iova); + drm_printf(p, " size: %zu\n", gmu_log->size); + adreno_show_object(p, &gmu_log->data, gmu_log->size, + &gmu_log->encoded); + } + + drm_puts(p, "gmu-hfi:\n"); + if (a6xx_state->gmu_hfi) { + struct msm_gpu_state_bo *gmu_hfi = a6xx_state->gmu_hfi; + unsigned i, j; + + drm_printf(p, " iova: 0x%016llx\n", gmu_hfi->iova); + drm_printf(p, " size: %zu\n", gmu_hfi->size); + for (i = 0; i < ARRAY_SIZE(a6xx_state->hfi_queue_history); i++) { + drm_printf(p, " queue-history[%u]:", i); + for (j = 0; j < HFI_HISTORY_SZ; j++) { + drm_printf(p, " %d", a6xx_state->hfi_queue_history[i][j]); + } + drm_printf(p, "\n"); + } + adreno_show_object(p, &gmu_hfi->data, gmu_hfi->size, + &gmu_hfi->encoded); + } + + drm_puts(p, "gmu-debug:\n"); + if (a6xx_state->gmu_debug) { + struct msm_gpu_state_bo *gmu_debug = a6xx_state->gmu_debug; + + drm_printf(p, " iova: 0x%016llx\n", gmu_debug->iova); + drm_printf(p, " size: %zu\n", gmu_debug->size); + adreno_show_object(p, &gmu_debug->data, gmu_debug->size, + &gmu_debug->encoded); + } + drm_puts(p, "registers:\n"); for (i = 0; i < a6xx_state->nr_registers; i++) { struct a6xx_gpu_state_obj *obj = &a6xx_state->registers[i]; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c index d4c65bf0a1b7..d73fce5fdf1f 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c @@ -36,6 +36,8 @@ static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu, hdr = queue->data[index]; + queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index; + /* * If we are to assume that the GMU firmware is in fact a rational actor * and is programmed to not send us a larger response than we expect @@ -75,6 +77,8 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, return -ENOSPC; } + queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index; + for (i = 0; i < dwords; i++) { queue->data[index] = data[i]; index = (index + 1) % header->size; @@ -600,6 +604,9 @@ void a6xx_hfi_stop(struct a6xx_gmu *gmu) queue->header->read_index = 0; queue->header->write_index = 0; + + memset(&queue->history, 0xff, sizeof(queue->history)); + queue->history_idx = 0; } } @@ -612,6 +619,9 @@ static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue, queue->data = virt; atomic_set(&queue->seqnum, 0); + memset(&queue->history, 0xff, sizeof(queue->history)); + queue->history_idx = 0; + /* Set up the shared memory header */ header->iova = iova; header->type = 10 << 8 | id; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h index 2bd670ca42d6..528110169398 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h @@ -33,6 +33,17 @@ struct a6xx_hfi_queue { spinlock_t lock; u32 *data; atomic_t seqnum; + + /* + * Tracking for the start index of the last N messages in the + * queue, for the benefit of devcore dump / crashdec (since + * parsing in the reverse direction to decode the last N + * messages is difficult to do and would rely on heuristics + * which are not guaranteed to be correct) + */ +#define HFI_HISTORY_SZ 8 + s32 history[HFI_HISTORY_SZ]; + u8 history_idx; }; /* This is the outgoing queue to the GMU */ diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 2a6ce76656aa..fb261930ad1c 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -132,6 +132,24 @@ static const struct adreno_info gpulist[] = { .inactive_period = DRM_MSM_INACTIVE_PERIOD, .init = a4xx_gpu_init, }, { + .rev = ADRENO_REV(5, 0, 6, ANY_ID), + .revn = 506, + .name = "A506", + .fw = { + [ADRENO_FW_PM4] = "a530_pm4.fw", + [ADRENO_FW_PFP] = "a530_pfp.fw", + }, + .gmem = (SZ_128K + SZ_8K), + /* + * Increase inactive period to 250 to avoid bouncing + * the GDSC which appears to make it grumpy + */ + .inactive_period = 250, + .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI | + ADRENO_QUIRK_LMLOADKILL_DISABLE, + .init = a5xx_gpu_init, + .zapfw = "a506_zap.mdt", + }, { .rev = ADRENO_REV(5, 0, 8, ANY_ID), .revn = 508, .name = "A508", @@ -408,9 +426,9 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) return NULL; } - mutex_lock(&dev->struct_mutex); + mutex_lock(&gpu->lock); ret = msm_gpu_hw_init(gpu); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&gpu->lock); pm_runtime_put_autosuspend(&pdev->dev); if (ret) { DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret); @@ -427,13 +445,6 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) return gpu; } -static void set_gpu_pdev(struct drm_device *dev, - struct platform_device *pdev) -{ - struct msm_drm_private *priv = dev->dev_private; - priv->gpu_pdev = pdev; -} - static int find_chipid(struct device *dev, struct adreno_rev *rev) { struct device_node *node = dev->of_node; @@ -482,8 +493,8 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) { static struct adreno_platform_config config = {}; const struct adreno_info *info; - struct drm_device *drm = dev_get_drvdata(master); - struct msm_drm_private *priv = drm->dev_private; + struct msm_drm_private *priv = dev_get_drvdata(master); + struct drm_device *drm = priv->dev; struct msm_gpu *gpu; int ret; @@ -492,7 +503,7 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) return ret; dev->platform_data = &config; - set_gpu_pdev(drm, to_platform_device(dev)); + priv->gpu_pdev = to_platform_device(dev); info = adreno_info(config.rev); @@ -521,12 +532,13 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) static void adreno_unbind(struct device *dev, struct device *master, void *data) { + struct msm_drm_private *priv = dev_get_drvdata(master); struct msm_gpu *gpu = dev_to_gpu(dev); pm_runtime_force_suspend(dev); gpu->funcs->destroy(gpu); - set_gpu_pdev(dev_get_drvdata(master), NULL); + priv->gpu_pdev = NULL; } static const struct component_ops a3xx_ops = { @@ -596,9 +608,27 @@ static int adreno_resume(struct device *dev) return gpu->funcs->pm_resume(gpu); } +static int active_submits(struct msm_gpu *gpu) +{ + int active_submits; + mutex_lock(&gpu->active_lock); + active_submits = gpu->active_submits; + mutex_unlock(&gpu->active_lock); + return active_submits; +} + static int adreno_suspend(struct device *dev) { struct msm_gpu *gpu = dev_to_gpu(dev); + int remaining; + + remaining = wait_event_timeout(gpu->retire_event, + active_submits(gpu) == 0, + msecs_to_jiffies(1000)); + if (remaining == 0) { + dev_err(dev, "Timeout waiting for GPU to suspend\n"); + return -EBUSY; + } return gpu->funcs->pm_suspend(gpu); } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 748665232d29..f33cfa4ef1c8 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -504,6 +504,8 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state) struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); int i, count = 0; + WARN_ON(!mutex_is_locked(&gpu->lock)); + kref_init(&state->ref); ktime_get_real_ts64(&state->time); @@ -630,7 +632,7 @@ static char *adreno_gpu_ascii85_encode(u32 *src, size_t len) } /* len is expected to be in bytes */ -static void adreno_show_object(struct drm_printer *p, void **ptr, int len, +void adreno_show_object(struct drm_printer *p, void **ptr, int len, bool *encoded) { if (!*ptr || !len) diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 225c277a6223..cffabe7d33c1 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -201,6 +201,11 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu) return gpu->revn == 430; } +static inline int adreno_is_a506(struct adreno_gpu *gpu) +{ + return gpu->revn == 506; +} + static inline int adreno_is_a508(struct adreno_gpu *gpu) { return gpu->revn == 508; @@ -306,6 +311,8 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state); int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state); int adreno_gpu_state_put(struct msm_gpu_state *state); +void adreno_show_object(struct drm_printer *p, void **ptr, int len, + bool *encoded); /* * Common helper function to initialize the default address space for arm-smmu diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 967245b8cc02..e7c9fe1a250f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -337,7 +337,8 @@ static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) } static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, - struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer) + struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer, + struct dpu_hw_stage_cfg *stage_cfg) { struct drm_plane *plane; struct drm_framebuffer *fb; @@ -346,7 +347,6 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, struct dpu_plane_state *pstate = NULL; struct dpu_format *format; struct dpu_hw_ctl *ctl = mixer->lm_ctl; - struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg; u32 flush_mask; uint32_t stage_idx, lm_idx; @@ -422,6 +422,7 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) struct dpu_crtc_mixer *mixer = cstate->mixers; struct dpu_hw_ctl *ctl; struct dpu_hw_mixer *lm; + struct dpu_hw_stage_cfg stage_cfg; int i; DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name); @@ -435,9 +436,9 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) } /* initialize stage cfg */ - memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg)); + memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg)); - _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer); + _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg); for (i = 0; i < cstate->num_mixers; i++) { ctl = mixer[i].lm_ctl; @@ -458,7 +459,7 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) mixer[i].flush_mask); ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, - &dpu_crtc->stage_cfg); + &stage_cfg); } } @@ -923,6 +924,20 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc) return &cstate->base; } +static void dpu_crtc_atomic_print_state(struct drm_printer *p, + const struct drm_crtc_state *state) +{ + const struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); + int i; + + for (i = 0; i < cstate->num_mixers; i++) { + drm_printf(p, "\tlm[%d]=%d\n", i, cstate->mixers[i].hw_lm->idx - LM_0); + drm_printf(p, "\tctl[%d]=%d\n", i, cstate->mixers[i].lm_ctl->idx - CTL_0); + if (cstate->mixers[i].hw_dspp) + drm_printf(p, "\tdspp[%d]=%d\n", i, cstate->mixers[i].hw_dspp->idx - DSPP_0); + } +} + static void dpu_crtc_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { @@ -1423,15 +1438,16 @@ DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state); static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) { struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct dentry *debugfs_root; - dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name, + debugfs_root = debugfs_create_dir(dpu_crtc->name, crtc->dev->primary->debugfs_root); debugfs_create_file("status", 0400, - dpu_crtc->debugfs_root, + debugfs_root, dpu_crtc, &_dpu_debugfs_status_fops); debugfs_create_file("state", 0600, - dpu_crtc->debugfs_root, + debugfs_root, &dpu_crtc->base, &dpu_crtc_debugfs_state_fops); @@ -1449,13 +1465,6 @@ static int dpu_crtc_late_register(struct drm_crtc *crtc) return _dpu_crtc_init_debugfs(crtc); } -static void dpu_crtc_early_unregister(struct drm_crtc *crtc) -{ - struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); - - debugfs_remove_recursive(dpu_crtc->debugfs_root); -} - static const struct drm_crtc_funcs dpu_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .destroy = dpu_crtc_destroy, @@ -1463,8 +1472,8 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = { .reset = dpu_crtc_reset, .atomic_duplicate_state = dpu_crtc_duplicate_state, .atomic_destroy_state = dpu_crtc_destroy_state, + .atomic_print_state = dpu_crtc_atomic_print_state, .late_register = dpu_crtc_late_register, - .early_unregister = dpu_crtc_early_unregister, .verify_crc_source = dpu_crtc_verify_crc_source, .set_crc_source = dpu_crtc_set_crc_source, .enable_vblank = msm_crtc_enable_vblank, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h index ae9546ca1359..b8785c394fcc 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h @@ -129,8 +129,6 @@ struct dpu_crtc_frame_event { * @drm_requested_vblank : Whether vblanks have been enabled in the encoder * @property_info : Opaque structure for generic property support * @property_defaults : Array of default values for generic property support - * @stage_cfg : H/w mixer stage configuration - * @debugfs_root : Parent of debugfs node * @vblank_cb_count : count of vblank callback since last reset * @play_count : frame count between crtc enable and disable * @vblank_cb_time : ktime at vblank count reset @@ -161,9 +159,6 @@ struct dpu_crtc { struct drm_pending_vblank_event *event; u32 vsync_count; - struct dpu_hw_stage_cfg stage_cfg; - struct dentry *debugfs_root; - u32 vblank_cb_count; u64 play_count; ktime_t vblank_cb_time; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index e7ee4cfb8461..1e648db439f9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -995,9 +995,6 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, trace_dpu_enc_mode_set(DRMID(drm_enc)); - if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) - msm_dp_display_mode_set(dpu_enc->dp, drm_enc, mode, adj_mode); - list_for_each_entry(conn_iter, connector_list, head) if (conn_iter->encoder == drm_enc) conn = conn_iter; @@ -1148,10 +1145,6 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc) struct msm_drm_private *priv; struct drm_display_mode *cur_mode = NULL; - if (!drm_enc) { - DPU_ERROR("invalid encoder\n"); - return; - } dpu_enc = to_dpu_encoder_virt(drm_enc); mutex_lock(&dpu_enc->enc_lock); @@ -1177,14 +1170,6 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc) _dpu_encoder_virt_enable_helper(drm_enc); - if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) { - ret = msm_dp_display_enable(dpu_enc->dp, drm_enc); - if (ret) { - DPU_ERROR_ENC(dpu_enc, "dp display enable failed: %d\n", - ret); - goto out; - } - } dpu_enc->enabled = true; out: @@ -1197,14 +1182,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) struct msm_drm_private *priv; int i = 0; - if (!drm_enc) { - DPU_ERROR("invalid encoder\n"); - return; - } else if (!drm_enc->dev) { - DPU_ERROR("invalid dev\n"); - return; - } - dpu_enc = to_dpu_encoder_virt(drm_enc); DPU_DEBUG_ENC(dpu_enc, "\n"); @@ -1218,11 +1195,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) /* wait for idle */ dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE); - if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) { - if (msm_dp_display_pre_disable(dpu_enc->dp, drm_enc)) - DPU_ERROR_ENC(dpu_enc, "dp display push idle failed\n"); - } - dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP); for (i = 0; i < dpu_enc->num_phys_encs; i++) { @@ -1247,11 +1219,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); - if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) { - if (msm_dp_display_disable(dpu_enc->dp, drm_enc)) - DPU_ERROR_ENC(dpu_enc, "dp display disable failed\n"); - } - mutex_unlock(&dpu_enc->enc_lock); } @@ -2128,11 +2095,8 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t) static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = { .mode_set = dpu_encoder_virt_mode_set, .disable = dpu_encoder_virt_disable, - .enable = dpu_kms_encoder_enable, + .enable = dpu_encoder_virt_enable, .atomic_check = dpu_encoder_virt_atomic_check, - - /* This is called by dpu_kms_encoder_enable */ - .commit = dpu_encoder_virt_enable, }; static const struct drm_encoder_funcs dpu_encoder_funcs = { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index 185379b18572..ddd9d89cd456 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -698,17 +698,17 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init( { struct dpu_encoder_phys *phys_enc = NULL; struct dpu_encoder_irq *irq; - int i, ret = 0; + int i; if (!p) { - ret = -EINVAL; - goto fail; + DPU_ERROR("failed to create encoder due to invalid parameter\n"); + return ERR_PTR(-EINVAL); } phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL); if (!phys_enc) { - ret = -ENOMEM; - goto fail; + DPU_ERROR("failed to create encoder due to memory allocation error\n"); + return ERR_PTR(-ENOMEM); } phys_enc->hw_mdptop = p->dpu_kms->hw_mdp; @@ -748,11 +748,4 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init( DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->intf_idx); return phys_enc; - -fail: - DPU_ERROR("failed to create encoder\n"); - if (phys_enc) - dpu_encoder_phys_vid_destroy(phys_enc); - - return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index ce6f32a919e5..aa75991903a6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -45,7 +45,7 @@ (PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2)) #define CTL_SC7280_MASK \ - (BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_FETCH_ACTIVE)) + (BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_FETCH_ACTIVE) | BIT(DPU_CTL_VM_CFG)) #define MERGE_3D_SM8150_MASK (0) @@ -856,9 +856,9 @@ static const struct dpu_intf_cfg sm8150_intf[] = { }; static const struct dpu_intf_cfg sc7280_intf[] = { - INTF_BLK("intf_0", INTF_0, 0x34000, INTF_DP, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25), + INTF_BLK("intf_0", INTF_0, 0x34000, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25), INTF_BLK("intf_1", INTF_1, 0x35000, INTF_DSI, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 26, 27), - INTF_BLK("intf_5", INTF_5, 0x39000, INTF_EDP, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 22, 23), + INTF_BLK("intf_5", INTF_5, 0x39000, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 22, 23), }; /************************************************************* diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index 4ade44bbd37e..31af04afda7d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -179,13 +179,16 @@ enum { /** * CTL sub-blocks - * @DPU_CTL_SPLIT_DISPLAY CTL supports video mode split display + * @DPU_CTL_SPLIT_DISPLAY: CTL supports video mode split display + * @DPU_CTL_FETCH_ACTIVE: Active CTL for fetch HW (SSPPs) + * @DPU_CTL_VM_CFG: CTL config to support multiple VMs * @DPU_CTL_MAX */ enum { DPU_CTL_SPLIT_DISPLAY = 0x1, DPU_CTL_ACTIVE_CFG, DPU_CTL_FETCH_ACTIVE, + DPU_CTL_VM_CFG, DPU_CTL_MAX }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c index 64740ddb983e..02da9ecf71f1 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c @@ -36,6 +36,7 @@ #define MERGE_3D_IDX 23 #define INTF_IDX 31 #define CTL_INVALID_BIT 0xffff +#define CTL_DEFAULT_GROUP_ID 0xf static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0, @@ -498,6 +499,13 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx, u32 intf_active = 0; u32 mode_sel = 0; + /* CTL_TOP[31:28] carries group_id to collate CTL paths + * per VM. Explicitly disable it until VM support is + * added in SW. Power on reset value is not disable. + */ + if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features))) + mode_sel = CTL_DEFAULT_GROUP_ID << 28; + if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD) mode_sel |= BIT(17); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c index a98e964c3b6f..355894a3b48c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c @@ -26,9 +26,16 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx, struct dpu_hw_pcc_cfg *cfg) { - u32 base = ctx->cap->sblk->pcc.base; + u32 base; - if (!ctx || !base) { + if (!ctx) { + DRM_ERROR("invalid ctx %pK\n", ctx); + return; + } + + base = ctx->cap->sblk->pcc.base; + + if (!base) { DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base); return; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c index d2b6dca487e3..a77a5eaa78ad 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c @@ -30,6 +30,9 @@ #define MDP_AD4_INTR_STATUS_OFF 0x420 #define MDP_INTF_0_OFF_REV_7xxx 0x34000 #define MDP_INTF_1_OFF_REV_7xxx 0x35000 +#define MDP_INTF_2_OFF_REV_7xxx 0x36000 +#define MDP_INTF_3_OFF_REV_7xxx 0x37000 +#define MDP_INTF_4_OFF_REV_7xxx 0x38000 #define MDP_INTF_5_OFF_REV_7xxx 0x39000 /** @@ -111,6 +114,21 @@ static const struct dpu_intr_reg dpu_intr_set[] = { MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS }, { + MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR, + MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN, + MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS + }, + { + MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR, + MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN, + MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS + }, + { + MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR, + MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN, + MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS + }, + { MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR, MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN, MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h index d50e78c9f148..1ab75cccd145 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h @@ -26,6 +26,9 @@ enum dpu_hw_intr_reg { MDP_AD4_1_INTR, MDP_INTF0_7xxx_INTR, MDP_INTF1_7xxx_INTR, + MDP_INTF2_7xxx_INTR, + MDP_INTF3_7xxx_INTR, + MDP_INTF4_7xxx_INTR, MDP_INTF5_7xxx_INTR, MDP_INTR_MAX, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c index f9460672176a..09cdc3576653 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c @@ -8,6 +8,8 @@ #include "dpu_hw_sspp.h" #include "dpu_kms.h" +#include <drm/drm_file.h> + #define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087 /* DPU_SSPP_SRC */ @@ -75,6 +77,7 @@ #define SSPP_TRAFFIC_SHAPER 0x130 #define SSPP_CDP_CNTL 0x134 #define SSPP_UBWC_ERROR_STATUS 0x138 +#define SSPP_CDP_CNTL_REC1 0x13c #define SSPP_TRAFFIC_SHAPER_PREFILL 0x150 #define SSPP_TRAFFIC_SHAPER_REC1_PREFILL 0x154 #define SSPP_TRAFFIC_SHAPER_REC1 0x158 @@ -413,13 +416,11 @@ static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_pipe *ctx, static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx, struct dpu_hw_pipe_cfg *sspp, - struct dpu_hw_pixel_ext *pe, void *scaler_cfg) { u32 idx; struct dpu_hw_scaler3_cfg *scaler3_cfg = scaler_cfg; - (void)pe; if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp || !scaler3_cfg) return; @@ -539,7 +540,7 @@ static void dpu_hw_sspp_setup_sourceaddress(struct dpu_hw_pipe *ctx, } static void dpu_hw_sspp_setup_csc(struct dpu_hw_pipe *ctx, - struct dpu_csc_cfg *data) + const struct dpu_csc_cfg *data) { u32 idx; bool csc10 = false; @@ -571,19 +572,20 @@ static void dpu_hw_sspp_setup_solidfill(struct dpu_hw_pipe *ctx, u32 color, enum } static void dpu_hw_sspp_setup_danger_safe_lut(struct dpu_hw_pipe *ctx, - struct dpu_hw_pipe_qos_cfg *cfg) + u32 danger_lut, + u32 safe_lut) { u32 idx; if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx)) return; - DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, cfg->danger_lut); - DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, cfg->safe_lut); + DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, danger_lut); + DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, safe_lut); } static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx, - struct dpu_hw_pipe_qos_cfg *cfg) + u64 creq_lut) { u32 idx; @@ -591,11 +593,11 @@ static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx, return; if (ctx->cap && test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features)) { - DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, cfg->creq_lut); + DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, creq_lut); DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx, - cfg->creq_lut >> 32); + creq_lut >> 32); } else { - DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut); + DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, creq_lut); } } @@ -625,10 +627,12 @@ static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_pipe *ctx, } static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx, - struct dpu_hw_pipe_cdp_cfg *cfg) + struct dpu_hw_pipe_cdp_cfg *cfg, + enum dpu_sspp_multirect_index index) { u32 idx; u32 cdp_cntl = 0; + u32 cdp_cntl_offset = 0; if (!ctx || !cfg) return; @@ -636,6 +640,11 @@ static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx, if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx)) return; + if (index == DPU_SSPP_RECT_SOLO || index == DPU_SSPP_RECT_0) + cdp_cntl_offset = SSPP_CDP_CNTL; + else + cdp_cntl_offset = SSPP_CDP_CNTL_REC1; + if (cfg->enable) cdp_cntl |= BIT(0); if (cfg->ubwc_meta_enable) @@ -645,7 +654,7 @@ static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx, if (cfg->preload_ahead == DPU_SSPP_CDP_PRELOAD_AHEAD_64) cdp_cntl |= BIT(3); - DPU_REG_WRITE(&ctx->hw, SSPP_CDP_CNTL, cdp_cntl); + DPU_REG_WRITE(&ctx->hw, cdp_cntl_offset, cdp_cntl); } static void _setup_layer_ops(struct dpu_hw_pipe *c, @@ -685,6 +694,71 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c, c->ops.setup_cdp = dpu_hw_sspp_setup_cdp; } +#ifdef CONFIG_DEBUG_FS +int _dpu_hw_sspp_init_debugfs(struct dpu_hw_pipe *hw_pipe, struct dpu_kms *kms, struct dentry *entry) +{ + const struct dpu_sspp_cfg *cfg = hw_pipe->cap; + const struct dpu_sspp_sub_blks *sblk = cfg->sblk; + struct dentry *debugfs_root; + char sspp_name[32]; + + snprintf(sspp_name, sizeof(sspp_name), "%d", hw_pipe->idx); + + /* create overall sub-directory for the pipe */ + debugfs_root = + debugfs_create_dir(sspp_name, entry); + + /* don't error check these */ + debugfs_create_xul("features", 0600, + debugfs_root, (unsigned long *)&hw_pipe->cap->features); + + /* add register dump support */ + dpu_debugfs_create_regset32("src_blk", 0400, + debugfs_root, + sblk->src_blk.base + cfg->base, + sblk->src_blk.len, + kms); + + if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) || + cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) || + cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) || + cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) + dpu_debugfs_create_regset32("scaler_blk", 0400, + debugfs_root, + sblk->scaler_blk.base + cfg->base, + sblk->scaler_blk.len, + kms); + + if (cfg->features & BIT(DPU_SSPP_CSC) || + cfg->features & BIT(DPU_SSPP_CSC_10BIT)) + dpu_debugfs_create_regset32("csc_blk", 0400, + debugfs_root, + sblk->csc_blk.base + cfg->base, + sblk->csc_blk.len, + kms); + + debugfs_create_u32("xin_id", + 0400, + debugfs_root, + (u32 *) &cfg->xin_id); + debugfs_create_u32("clk_ctrl", + 0400, + debugfs_root, + (u32 *) &cfg->clk_ctrl); + debugfs_create_x32("creq_vblank", + 0600, + debugfs_root, + (u32 *) &sblk->creq_vblank); + debugfs_create_x32("danger_vblank", + 0600, + debugfs_root, + (u32 *) &sblk->danger_vblank); + + return 0; +} +#endif + + static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp, void __iomem *addr, struct dpu_mdss_cfg *catalog, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h index fdfd4b46e2c6..92b071b78fdb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h @@ -25,11 +25,17 @@ struct dpu_hw_pipe; /** * Define all scaler feature bits in catalog */ -#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \ - (1UL << DPU_SSPP_SCALER_QSEED2) | \ - (1UL << DPU_SSPP_SCALER_QSEED3) | \ - (1UL << DPU_SSPP_SCALER_QSEED3LITE) | \ - (1UL << DPU_SSPP_SCALER_QSEED4)) +#define DPU_SSPP_SCALER (BIT(DPU_SSPP_SCALER_RGB) | \ + BIT(DPU_SSPP_SCALER_QSEED2) | \ + BIT(DPU_SSPP_SCALER_QSEED3) | \ + BIT(DPU_SSPP_SCALER_QSEED3LITE) | \ + BIT(DPU_SSPP_SCALER_QSEED4)) + +/* + * Define all CSC feature bits in catalog + */ +#define DPU_SSPP_CSC_ANY (BIT(DPU_SSPP_CSC) | \ + BIT(DPU_SSPP_CSC_10BIT)) /** * Component indices @@ -166,18 +172,12 @@ struct dpu_hw_pipe_cfg { /** * struct dpu_hw_pipe_qos_cfg : Source pipe QoS configuration - * @danger_lut: LUT for generate danger level based on fill level - * @safe_lut: LUT for generate safe level based on fill level - * @creq_lut: LUT for generate creq level based on fill level * @creq_vblank: creq value generated to vbif during vertical blanking * @danger_vblank: danger value generated during vertical blanking * @vblank_en: enable creq_vblank and danger_vblank during vblank * @danger_safe_en: enable danger safe generation */ struct dpu_hw_pipe_qos_cfg { - u32 danger_lut; - u32 safe_lut; - u64 creq_lut; u32 creq_vblank; u32 danger_vblank; bool vblank_en; @@ -268,7 +268,7 @@ struct dpu_hw_sspp_ops { * @ctx: Pointer to pipe context * @data: Pointer to config structure */ - void (*setup_csc)(struct dpu_hw_pipe *ctx, struct dpu_csc_cfg *data); + void (*setup_csc)(struct dpu_hw_pipe *ctx, const struct dpu_csc_cfg *data); /** * setup_solidfill - enable/disable colorfill @@ -302,20 +302,22 @@ struct dpu_hw_sspp_ops { /** * setup_danger_safe_lut - setup danger safe LUTs * @ctx: Pointer to pipe context - * @cfg: Pointer to pipe QoS configuration + * @danger_lut: LUT for generate danger level based on fill level + * @safe_lut: LUT for generate safe level based on fill level * */ void (*setup_danger_safe_lut)(struct dpu_hw_pipe *ctx, - struct dpu_hw_pipe_qos_cfg *cfg); + u32 danger_lut, + u32 safe_lut); /** * setup_creq_lut - setup CREQ LUT * @ctx: Pointer to pipe context - * @cfg: Pointer to pipe QoS configuration + * @creq_lut: LUT for generate creq level based on fill level * */ void (*setup_creq_lut)(struct dpu_hw_pipe *ctx, - struct dpu_hw_pipe_qos_cfg *cfg); + u64 creq_lut); /** * setup_qos_ctrl - setup QoS control @@ -338,12 +340,10 @@ struct dpu_hw_sspp_ops { * setup_scaler - setup scaler * @ctx: Pointer to pipe context * @pipe_cfg: Pointer to pipe configuration - * @pe_cfg: Pointer to pixel extension configuration * @scaler_cfg: Pointer to scaler configuration */ void (*setup_scaler)(struct dpu_hw_pipe *ctx, struct dpu_hw_pipe_cfg *pipe_cfg, - struct dpu_hw_pixel_ext *pe_cfg, void *scaler_cfg); /** @@ -356,9 +356,11 @@ struct dpu_hw_sspp_ops { * setup_cdp - setup client driven prefetch * @ctx: Pointer to pipe context * @cfg: Pointer to cdp configuration + * @index: rectangle index in multirect */ void (*setup_cdp)(struct dpu_hw_pipe *ctx, - struct dpu_hw_pipe_cdp_cfg *cfg); + struct dpu_hw_pipe_cdp_cfg *cfg, + enum dpu_sspp_multirect_index index); }; /** @@ -385,6 +387,7 @@ struct dpu_hw_pipe { struct dpu_hw_sspp_ops ops; }; +struct dpu_kms; /** * dpu_hw_sspp_init - initializes the sspp hw driver object. * Should be called once before accessing every pipe. @@ -404,5 +407,8 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx, */ void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx); +void dpu_debugfs_sspp_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root); +int _dpu_hw_sspp_init_debugfs(struct dpu_hw_pipe *hw_pipe, struct dpu_kms *kms, struct dentry *entry); + #endif /*_DPU_HW_SSPP_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c index f94584c982cd..aad85116b0a0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c @@ -374,7 +374,7 @@ u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c, void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c, u32 csc_reg_off, - struct dpu_csc_cfg *data, bool csc10) + const struct dpu_csc_cfg *data, bool csc10) { static const u32 matrix_shift = 7; u32 clamp_shift = csc10 ? 16 : 8; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h index 6d4911957e33..39134754579e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h @@ -322,6 +322,6 @@ u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c, void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c, u32 csc_reg_off, - struct dpu_csc_cfg *data, bool csc10); + const struct dpu_csc_cfg *data, bool csc10); #endif /* _DPU_HW_UTIL_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index a15b26428280..47fe11a84a77 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -21,14 +21,14 @@ #include "msm_gem.h" #include "disp/msm_disp_snapshot.h" -#include "dpu_kms.h" #include "dpu_core_irq.h" +#include "dpu_crtc.h" +#include "dpu_encoder.h" #include "dpu_formats.h" #include "dpu_hw_vbif.h" -#include "dpu_vbif.h" -#include "dpu_encoder.h" +#include "dpu_kms.h" #include "dpu_plane.h" -#include "dpu_crtc.h" +#include "dpu_vbif.h" #define CREATE_TRACE_POINTS #include "dpu_trace.h" @@ -73,8 +73,8 @@ static int _dpu_danger_signal_status(struct seq_file *s, &status); } else { seq_puts(s, "\nSafe signal status:\n"); - if (kms->hw_mdp->ops.get_danger_status) - kms->hw_mdp->ops.get_danger_status(kms->hw_mdp, + if (kms->hw_mdp->ops.get_safe_status) + kms->hw_mdp->ops.get_safe_status(kms->hw_mdp, &status); } pm_runtime_put_sync(&kms->pdev->dev); @@ -82,7 +82,7 @@ static int _dpu_danger_signal_status(struct seq_file *s, seq_printf(s, "MDP : 0x%x\n", status.mdp); for (i = SSPP_VIG0; i < SSPP_MAX; i++) - seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0, + seq_printf(s, "SSPP%d : 0x%x \n", i - SSPP_VIG0, status.sspp[i]); seq_puts(s, "\n"); @@ -101,6 +101,73 @@ static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v) } DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats); +static ssize_t _dpu_plane_danger_read(struct file *file, + char __user *buff, size_t count, loff_t *ppos) +{ + struct dpu_kms *kms = file->private_data; + int len; + char buf[40]; + + len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl); + + return simple_read_from_buffer(buff, count, ppos, buf, len); +} + +static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable) +{ + struct drm_plane *plane; + + drm_for_each_plane(plane, kms->dev) { + if (plane->fb && plane->state) { + dpu_plane_danger_signal_ctrl(plane, enable); + DPU_DEBUG("plane:%d img:%dx%d ", + plane->base.id, plane->fb->width, + plane->fb->height); + DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n", + plane->state->src_x >> 16, + plane->state->src_y >> 16, + plane->state->src_w >> 16, + plane->state->src_h >> 16, + plane->state->crtc_x, plane->state->crtc_y, + plane->state->crtc_w, plane->state->crtc_h); + } else { + DPU_DEBUG("Inactive plane:%d\n", plane->base.id); + } + } +} + +static ssize_t _dpu_plane_danger_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct dpu_kms *kms = file->private_data; + int disable_panic; + int ret; + + ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic); + if (ret) + return ret; + + if (disable_panic) { + /* Disable panic signal for all active pipes */ + DPU_DEBUG("Disabling danger:\n"); + _dpu_plane_set_danger_state(kms, false); + kms->has_danger_ctrl = false; + } else { + /* Enable panic signal for all active pipes */ + DPU_DEBUG("Enabling danger:\n"); + kms->has_danger_ctrl = true; + _dpu_plane_set_danger_state(kms, true); + } + + return count; +} + +static const struct file_operations dpu_plane_danger_enable = { + .open = simple_open, + .read = _dpu_plane_danger_read, + .write = _dpu_plane_danger_write, +}; + static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms, struct dentry *parent) { @@ -110,8 +177,20 @@ static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms, dpu_kms, &dpu_debugfs_danger_stats_fops); debugfs_create_file("safe_status", 0600, entry, dpu_kms, &dpu_debugfs_safe_stats_fops); + debugfs_create_file("disable_danger", 0600, entry, + dpu_kms, &dpu_plane_danger_enable); + } +/* + * Companion structure for dpu_debugfs_create_regset32. + */ +struct dpu_debugfs_regset32 { + uint32_t offset; + uint32_t blk_len; + struct dpu_kms *dpu_kms; +}; + static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data) { struct dpu_debugfs_regset32 *regset = s->private; @@ -159,24 +238,23 @@ static const struct file_operations dpu_fops_regset32 = { .release = single_release, }; -void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset, +void dpu_debugfs_create_regset32(const char *name, umode_t mode, + void *parent, uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms) { - if (regset) { - regset->offset = offset; - regset->blk_len = length; - regset->dpu_kms = dpu_kms; - } -} + struct dpu_debugfs_regset32 *regset; -void dpu_debugfs_create_regset32(const char *name, umode_t mode, - void *parent, struct dpu_debugfs_regset32 *regset) -{ - if (!name || !regset || !regset->dpu_kms || !regset->blk_len) + if (WARN_ON(!name || !dpu_kms || !length)) + return; + + regset = devm_kzalloc(&dpu_kms->pdev->dev, sizeof(*regset), GFP_KERNEL); + if (!regset) return; /* make sure offset is a multiple of 4 */ - regset->offset = round_down(regset->offset, 4); + regset->offset = round_down(offset, 4); + regset->blk_len = length; + regset->dpu_kms = dpu_kms; debugfs_create_file(name, mode, parent, regset, &dpu_fops_regset32); } @@ -203,6 +281,7 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) dpu_debugfs_danger_init(dpu_kms, entry); dpu_debugfs_vbif_init(dpu_kms, entry); dpu_debugfs_core_irq_init(dpu_kms, entry); + dpu_debugfs_sspp_init(dpu_kms, entry); for (i = 0; i < ARRAY_SIZE(priv->dp); i++) { if (priv->dp[i]) @@ -384,28 +463,6 @@ static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask) } } -/* - * Override the encoder enable since we need to setup the inline rotator and do - * some crtc magic before enabling any bridge that might be present. - */ -void dpu_kms_encoder_enable(struct drm_encoder *encoder) -{ - const struct drm_encoder_helper_funcs *funcs = encoder->helper_private; - struct drm_device *dev = encoder->dev; - struct drm_crtc *crtc; - - /* Forward this enable call to the commit hook */ - if (funcs && funcs->commit) - funcs->commit(encoder); - - drm_for_each_crtc(crtc, dev) { - if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder))) - continue; - - trace_dpu_kms_enc_enable(DRMID(crtc)); - } -} - static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask) { struct dpu_kms *dpu_kms = to_dpu_kms(kms); @@ -863,6 +920,11 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len, dpu_kms->mmio + cat->sspp[i].base, "sspp_%d", i); + /* dump LM sub-blocks HW regs info */ + for (i = 0; i < cat->mixer_count; i++) + msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len, + dpu_kms->mmio + cat->mixer[i].base, "lm_%d", i); + msm_disp_snapshot_add_block(disp_state, top->hw.length, dpu_kms->mmio + top->hw.blk_off, "top"); @@ -1153,9 +1215,9 @@ struct msm_kms *dpu_kms_init(struct drm_device *dev) static int dpu_bind(struct device *dev, struct device *master, void *data) { - struct drm_device *ddev = dev_get_drvdata(master); + struct msm_drm_private *priv = dev_get_drvdata(master); struct platform_device *pdev = to_platform_device(dev); - struct msm_drm_private *priv = ddev->dev_private; + struct drm_device *ddev = priv->dev; struct dpu_kms *dpu_kms; struct dss_module_power *mp; int ret = 0; @@ -1285,7 +1347,7 @@ static const struct dev_pm_ops dpu_pm_ops = { pm_runtime_force_resume) }; -static const struct of_device_id dpu_dt_match[] = { +const struct of_device_id dpu_dt_match[] = { { .compatible = "qcom,sdm845-dpu", }, { .compatible = "qcom,sc7180-dpu", }, { .compatible = "qcom,sc7280-dpu", }, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index 775bcbda860f..2d385b4b7f5e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -160,34 +160,10 @@ struct dpu_global_state * * Documentation/filesystems/debugfs.rst * - * @dpu_debugfs_setup_regset32: Initialize data for dpu_debugfs_create_regset32 * @dpu_debugfs_create_regset32: Create 32-bit register dump file - * @dpu_debugfs_get_root: Get root dentry for DPU_KMS's debugfs node */ /** - * Companion structure for dpu_debugfs_create_regset32. Do not initialize the - * members of this structure explicitly; use dpu_debugfs_setup_regset32 instead. - */ -struct dpu_debugfs_regset32 { - uint32_t offset; - uint32_t blk_len; - struct dpu_kms *dpu_kms; -}; - -/** - * dpu_debugfs_setup_regset32 - Initialize register block definition for debugfs - * This function is meant to initialize dpu_debugfs_regset32 structures for use - * with dpu_debugfs_create_regset32. - * @regset: opaque register definition structure - * @offset: sub-block offset - * @length: sub-block length, in bytes - * @dpu_kms: pointer to dpu kms structure - */ -void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset, - uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms); - -/** * dpu_debugfs_create_regset32 - Create register read back file for debugfs * * This function is almost identical to the standard debugfs_create_regset32() @@ -195,20 +171,16 @@ void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset, * names/offsets do not need to be provided. The 'read' function simply outputs * sequential register values over a specified range. * - * Similar to the related debugfs_create_regset32 API, the structure pointed to - * by regset needs to persist for the lifetime of the created file. The calling - * code is responsible for initialization/management of this structure. - * - * The structure pointed to by regset is meant to be opaque. Please use - * dpu_debugfs_setup_regset32 to initialize it. - * * @name: File name within debugfs * @mode: File mode within debugfs * @parent: Parent directory entry within debugfs, can be NULL - * @regset: Pointer to persistent register block definition + * @offset: sub-block offset + * @length: sub-block length, in bytes + * @dpu_kms: pointer to dpu kms structure */ void dpu_debugfs_create_regset32(const char *name, umode_t mode, - void *parent, struct dpu_debugfs_regset32 *regset); + void *parent, + uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms); /** * dpu_debugfs_get_root - Return root directory entry for KMS's debugfs @@ -235,8 +207,6 @@ void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms); int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); -void dpu_kms_encoder_enable(struct drm_encoder *encoder); - /** * dpu_kms_get_clk_rate() - get the clock rate * @dpu_kms: pointer to dpu_kms structure diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c index b466784d9822..131c1f1a869c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c @@ -111,7 +111,7 @@ static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss) struct device *dev; struct irq_domain *domain; - dev = dpu_mdss->base.dev->dev; + dev = dpu_mdss->base.dev; domain = irq_domain_add_linear(dev->of_node, 32, &dpu_mdss_irqdomain_ops, dpu_mdss); @@ -184,16 +184,15 @@ static int dpu_mdss_disable(struct msm_mdss *mdss) return ret; } -static void dpu_mdss_destroy(struct drm_device *dev) +static void dpu_mdss_destroy(struct msm_mdss *mdss) { - struct platform_device *pdev = to_platform_device(dev->dev); - struct msm_drm_private *priv = dev->dev_private; - struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss); + struct platform_device *pdev = to_platform_device(mdss->dev); + struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss); struct dss_module_power *mp = &dpu_mdss->mp; int irq; - pm_runtime_suspend(dev->dev); - pm_runtime_disable(dev->dev); + pm_runtime_suspend(mdss->dev); + pm_runtime_disable(mdss->dev); _dpu_mdss_irq_domain_fini(dpu_mdss); irq = platform_get_irq(pdev, 0); irq_set_chained_handler_and_data(irq, NULL, NULL); @@ -203,7 +202,6 @@ static void dpu_mdss_destroy(struct drm_device *dev) if (dpu_mdss->mmio) devm_iounmap(&pdev->dev, dpu_mdss->mmio); dpu_mdss->mmio = NULL; - priv->mdss = NULL; } static const struct msm_mdss_funcs mdss_funcs = { @@ -212,16 +210,15 @@ static const struct msm_mdss_funcs mdss_funcs = { .destroy = dpu_mdss_destroy, }; -int dpu_mdss_init(struct drm_device *dev) +int dpu_mdss_init(struct platform_device *pdev) { - struct platform_device *pdev = to_platform_device(dev->dev); - struct msm_drm_private *priv = dev->dev_private; + struct msm_drm_private *priv = platform_get_drvdata(pdev); struct dpu_mdss *dpu_mdss; struct dss_module_power *mp; int ret; int irq; - dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL); + dpu_mdss = devm_kzalloc(&pdev->dev, sizeof(*dpu_mdss), GFP_KERNEL); if (!dpu_mdss) return -ENOMEM; @@ -238,7 +235,7 @@ int dpu_mdss_init(struct drm_device *dev) goto clk_parse_err; } - dpu_mdss->base.dev = dev; + dpu_mdss->base.dev = &pdev->dev; dpu_mdss->base.funcs = &mdss_funcs; ret = _dpu_mdss_irq_domain_add(dpu_mdss); @@ -256,7 +253,7 @@ int dpu_mdss_init(struct drm_device *dev) priv->mdss = &dpu_mdss->base; - pm_runtime_enable(dev->dev); + pm_runtime_enable(&pdev->dev); return 0; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index a3e3b9d1b82e..ca75089c9d61 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -13,7 +13,6 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_uapi.h> #include <drm/drm_damage_helper.h> -#include <drm/drm_file.h> #include <drm/drm_gem_atomic_helper.h> #include "msm_drv.h" @@ -90,7 +89,6 @@ enum dpu_plane_qos { /* * struct dpu_plane - local dpu plane structure * @aspace: address space pointer - * @csc_ptr: Points to dpu_csc_cfg structure to use for current * @mplane_list: List of multirect planes of the same pipe * @catalog: Points to dpu catalog structure * @revalidate: force revalidation of all the plane properties @@ -101,29 +99,14 @@ struct dpu_plane { struct mutex lock; enum dpu_sspp pipe; - uint32_t features; /* capabilities from catalog */ struct dpu_hw_pipe *pipe_hw; - struct dpu_hw_pipe_cfg pipe_cfg; - struct dpu_hw_pipe_qos_cfg pipe_qos_cfg; uint32_t color_fill; bool is_error; bool is_rt_pipe; bool is_virtual; struct list_head mplane_list; struct dpu_mdss_cfg *catalog; - - struct dpu_csc_cfg *csc_ptr; - - const struct dpu_sspp_sub_blks *pipe_sblk; - char pipe_name[DPU_NAME_SIZE]; - - /* debugfs related stuff */ - struct dentry *debugfs_root; - struct dpu_debugfs_regset32 debugfs_src; - struct dpu_debugfs_regset32 debugfs_scaler; - struct dpu_debugfs_regset32 debugfs_csc; - bool debugfs_default_scale; }; static const uint64_t supported_format_modifiers[] = { @@ -145,14 +128,15 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane) * _dpu_plane_calc_bw - calculate bandwidth required for a plane * @plane: Pointer to drm plane. * @fb: Pointer to framebuffer associated with the given plane + * @pipe_cfg: Pointer to pipe configuration * Result: Updates calculated bandwidth in the plane state. * BW Equation: src_w * src_h * bpp * fps * (v_total / v_dest) * Prefill BW Equation: line src bytes * line_time */ static void _dpu_plane_calc_bw(struct drm_plane *plane, - struct drm_framebuffer *fb) + struct drm_framebuffer *fb, + struct dpu_hw_pipe_cfg *pipe_cfg) { - struct dpu_plane *pdpu = to_dpu_plane(plane); struct dpu_plane_state *pstate; struct drm_display_mode *mode; const struct dpu_format *fmt = NULL; @@ -169,9 +153,9 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane, fmt = dpu_get_dpu_format_ext(fb->format->format, fb->modifier); - src_width = drm_rect_width(&pdpu->pipe_cfg.src_rect); - src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect); - dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect); + src_width = drm_rect_width(&pipe_cfg->src_rect); + src_height = drm_rect_height(&pipe_cfg->src_rect); + dst_height = drm_rect_height(&pipe_cfg->dst_rect); fps = drm_mode_vrefresh(mode); vbp = mode->vtotal - mode->vsync_end; vpw = mode->vsync_end - mode->vsync_start; @@ -202,12 +186,12 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane, /** * _dpu_plane_calc_clk - calculate clock required for a plane * @plane: Pointer to drm plane. + * @pipe_cfg: Pointer to pipe configuration * Result: Updates calculated clock in the plane state. * Clock equation: dst_w * v_total * fps * (src_h / dst_h) */ -static void _dpu_plane_calc_clk(struct drm_plane *plane) +static void _dpu_plane_calc_clk(struct drm_plane *plane, struct dpu_hw_pipe_cfg *pipe_cfg) { - struct dpu_plane *pdpu = to_dpu_plane(plane); struct dpu_plane_state *pstate; struct drm_display_mode *mode; int dst_width, src_height, dst_height, fps; @@ -215,9 +199,9 @@ static void _dpu_plane_calc_clk(struct drm_plane *plane) pstate = to_dpu_plane_state(plane->state); mode = &plane->state->crtc->mode; - src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect); - dst_width = drm_rect_width(&pdpu->pipe_cfg.dst_rect); - dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect); + src_height = drm_rect_height(&pipe_cfg->src_rect); + dst_width = drm_rect_width(&pipe_cfg->dst_rect); + dst_height = drm_rect_height(&pipe_cfg->dst_rect); fps = drm_mode_vrefresh(mode); pstate->plane_clk = @@ -254,14 +238,17 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane, fixed_buff_size = pdpu->catalog->caps->pixel_ram_size; list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) { + u32 tmp_width; + if (!tmp->base.state->visible) continue; + tmp_width = drm_rect_width(&tmp->base.state->src) >> 16; DPU_DEBUG("plane%d/%d src_width:%d/%d\n", pdpu->base.base.id, tmp->base.base.id, src_width, - drm_rect_width(&tmp->pipe_cfg.src_rect)); + tmp_width); src_width = max_t(u32, src_width, - drm_rect_width(&tmp->pipe_cfg.src_rect)); + tmp_width); } if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) { @@ -321,9 +308,10 @@ static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl, * _dpu_plane_set_qos_lut - set QoS LUT of the given plane * @plane: Pointer to drm plane * @fb: Pointer to framebuffer associated with the given plane + * @pipe_cfg: Pointer to pipe configuration */ static void _dpu_plane_set_qos_lut(struct drm_plane *plane, - struct drm_framebuffer *fb) + struct drm_framebuffer *fb, struct dpu_hw_pipe_cfg *pipe_cfg) { struct dpu_plane *pdpu = to_dpu_plane(plane); const struct dpu_format *fmt = NULL; @@ -337,7 +325,7 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane, fb->format->format, fb->modifier); total_fl = _dpu_plane_calc_fill_level(plane, fmt, - drm_rect_width(&pdpu->pipe_cfg.src_rect)); + drm_rect_width(&pipe_cfg->src_rect)); if (fmt && DPU_FORMAT_IS_LINEAR(fmt)) lut_usage = DPU_QOS_LUT_USAGE_LINEAR; @@ -348,8 +336,6 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane, qos_lut = _dpu_plane_get_qos_lut( &pdpu->catalog->perf.qos_lut_tbl[lut_usage], total_fl); - pdpu->pipe_qos_cfg.creq_lut = qos_lut; - trace_dpu_perf_set_qos_luts(pdpu->pipe - SSPP_VIG0, (fmt) ? fmt->base.pixel_format : 0, pdpu->is_rt_pipe, total_fl, qos_lut, lut_usage); @@ -359,7 +345,7 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane, fmt ? (char *)&fmt->base.pixel_format : NULL, pdpu->is_rt_pipe, total_fl, qos_lut); - pdpu->pipe_hw->ops.setup_creq_lut(pdpu->pipe_hw, &pdpu->pipe_qos_cfg); + pdpu->pipe_hw->ops.setup_creq_lut(pdpu->pipe_hw, qos_lut); } /** @@ -397,24 +383,21 @@ static void _dpu_plane_set_danger_lut(struct drm_plane *plane, } } - pdpu->pipe_qos_cfg.danger_lut = danger_lut; - pdpu->pipe_qos_cfg.safe_lut = safe_lut; - trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0, (fmt) ? fmt->base.pixel_format : 0, (fmt) ? fmt->fetch_mode : 0, - pdpu->pipe_qos_cfg.danger_lut, - pdpu->pipe_qos_cfg.safe_lut); + danger_lut, + safe_lut); DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n", pdpu->pipe - SSPP_VIG0, fmt ? (char *)&fmt->base.pixel_format : NULL, fmt ? fmt->fetch_mode : -1, - pdpu->pipe_qos_cfg.danger_lut, - pdpu->pipe_qos_cfg.safe_lut); + danger_lut, + safe_lut); pdpu->pipe_hw->ops.setup_danger_safe_lut(pdpu->pipe_hw, - &pdpu->pipe_qos_cfg); + danger_lut, safe_lut); } /** @@ -427,47 +410,51 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane, bool enable, u32 flags) { struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_hw_pipe_qos_cfg pipe_qos_cfg; + + memset(&pipe_qos_cfg, 0, sizeof(pipe_qos_cfg)); if (flags & DPU_PLANE_QOS_VBLANK_CTRL) { - pdpu->pipe_qos_cfg.creq_vblank = pdpu->pipe_sblk->creq_vblank; - pdpu->pipe_qos_cfg.danger_vblank = - pdpu->pipe_sblk->danger_vblank; - pdpu->pipe_qos_cfg.vblank_en = enable; + pipe_qos_cfg.creq_vblank = pdpu->pipe_hw->cap->sblk->creq_vblank; + pipe_qos_cfg.danger_vblank = + pdpu->pipe_hw->cap->sblk->danger_vblank; + pipe_qos_cfg.vblank_en = enable; } if (flags & DPU_PLANE_QOS_VBLANK_AMORTIZE) { /* this feature overrules previous VBLANK_CTRL */ - pdpu->pipe_qos_cfg.vblank_en = false; - pdpu->pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */ + pipe_qos_cfg.vblank_en = false; + pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */ } if (flags & DPU_PLANE_QOS_PANIC_CTRL) - pdpu->pipe_qos_cfg.danger_safe_en = enable; + pipe_qos_cfg.danger_safe_en = enable; if (!pdpu->is_rt_pipe) { - pdpu->pipe_qos_cfg.vblank_en = false; - pdpu->pipe_qos_cfg.danger_safe_en = false; + pipe_qos_cfg.vblank_en = false; + pipe_qos_cfg.danger_safe_en = false; } DPU_DEBUG_PLANE(pdpu, "pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n", pdpu->pipe - SSPP_VIG0, - pdpu->pipe_qos_cfg.danger_safe_en, - pdpu->pipe_qos_cfg.vblank_en, - pdpu->pipe_qos_cfg.creq_vblank, - pdpu->pipe_qos_cfg.danger_vblank, + pipe_qos_cfg.danger_safe_en, + pipe_qos_cfg.vblank_en, + pipe_qos_cfg.creq_vblank, + pipe_qos_cfg.danger_vblank, pdpu->is_rt_pipe); pdpu->pipe_hw->ops.setup_qos_ctrl(pdpu->pipe_hw, - &pdpu->pipe_qos_cfg); + &pipe_qos_cfg); } /** * _dpu_plane_set_ot_limit - set OT limit for the given plane * @plane: Pointer to drm plane * @crtc: Pointer to drm crtc + * @pipe_cfg: Pointer to pipe configuration */ static void _dpu_plane_set_ot_limit(struct drm_plane *plane, - struct drm_crtc *crtc) + struct drm_crtc *crtc, struct dpu_hw_pipe_cfg *pipe_cfg) { struct dpu_plane *pdpu = to_dpu_plane(plane); struct dpu_vbif_set_ot_params ot_params; @@ -476,8 +463,8 @@ static void _dpu_plane_set_ot_limit(struct drm_plane *plane, memset(&ot_params, 0, sizeof(ot_params)); ot_params.xin_id = pdpu->pipe_hw->cap->xin_id; ot_params.num = pdpu->pipe_hw->idx - SSPP_NONE; - ot_params.width = drm_rect_width(&pdpu->pipe_cfg.src_rect); - ot_params.height = drm_rect_height(&pdpu->pipe_cfg.src_rect); + ot_params.width = drm_rect_width(&pipe_cfg->src_rect); + ot_params.height = drm_rect_height(&pipe_cfg->src_rect); ot_params.is_wfd = !pdpu->is_rt_pipe; ot_params.frame_rate = drm_mode_vrefresh(&crtc->mode); ot_params.vbif_idx = VBIF_RT; @@ -541,14 +528,12 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu, struct dpu_plane_state *pstate, uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h, struct dpu_hw_scaler3_cfg *scale_cfg, + struct dpu_hw_pixel_ext *pixel_ext, const struct dpu_format *fmt, uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v) { uint32_t i; - memset(scale_cfg, 0, sizeof(*scale_cfg)); - memset(&pstate->pixel_ext, 0, sizeof(struct dpu_hw_pixel_ext)); - scale_cfg->phase_step_x[DPU_SSPP_COMP_0] = mult_frac((1 << PHASE_STEP_SHIFT), src_w, dst_w); scale_cfg->phase_step_y[DPU_SSPP_COMP_0] = @@ -587,9 +572,9 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu, scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V; } - pstate->pixel_ext.num_ext_pxls_top[i] = + pixel_ext->num_ext_pxls_top[i] = scale_cfg->src_height[i]; - pstate->pixel_ext.num_ext_pxls_left[i] = + pixel_ext->num_ext_pxls_left[i] = scale_cfg->src_width[i]; } if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h) @@ -606,68 +591,97 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu, scale_cfg->enable = 1; } -static void _dpu_plane_setup_csc(struct dpu_plane *pdpu) -{ - static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = { - { - /* S15.16 format */ - 0x00012A00, 0x00000000, 0x00019880, - 0x00012A00, 0xFFFF9B80, 0xFFFF3000, - 0x00012A00, 0x00020480, 0x00000000, +static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = { + { + /* S15.16 format */ + 0x00012A00, 0x00000000, 0x00019880, + 0x00012A00, 0xFFFF9B80, 0xFFFF3000, + 0x00012A00, 0x00020480, 0x00000000, + }, + /* signed bias */ + { 0xfff0, 0xff80, 0xff80,}, + { 0x0, 0x0, 0x0,}, + /* unsigned clamp */ + { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,}, + { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,}, +}; + +static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = { + { + /* S15.16 format */ + 0x00012A00, 0x00000000, 0x00019880, + 0x00012A00, 0xFFFF9B80, 0xFFFF3000, + 0x00012A00, 0x00020480, 0x00000000, }, - /* signed bias */ - { 0xfff0, 0xff80, 0xff80,}, - { 0x0, 0x0, 0x0,}, - /* unsigned clamp */ - { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,}, - { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,}, - }; - static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = { - { - /* S15.16 format */ - 0x00012A00, 0x00000000, 0x00019880, - 0x00012A00, 0xFFFF9B80, 0xFFFF3000, - 0x00012A00, 0x00020480, 0x00000000, - }, - /* signed bias */ - { 0xffc0, 0xfe00, 0xfe00,}, - { 0x0, 0x0, 0x0,}, - /* unsigned clamp */ - { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,}, - { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,}, - }; + /* signed bias */ + { 0xffc0, 0xfe00, 0xfe00,}, + { 0x0, 0x0, 0x0,}, + /* unsigned clamp */ + { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,}, + { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,}, +}; + +static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_plane *pdpu, const struct dpu_format *fmt) +{ + const struct dpu_csc_cfg *csc_ptr; if (!pdpu) { DPU_ERROR("invalid plane\n"); - return; + return NULL; } - if (BIT(DPU_SSPP_CSC_10BIT) & pdpu->features) - pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc10_YUV2RGB_601L; + if (!DPU_FORMAT_IS_YUV(fmt)) + return NULL; + + if (BIT(DPU_SSPP_CSC_10BIT) & pdpu->pipe_hw->cap->features) + csc_ptr = &dpu_csc10_YUV2RGB_601L; else - pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc_YUV2RGB_601L; + csc_ptr = &dpu_csc_YUV2RGB_601L; DPU_DEBUG_PLANE(pdpu, "using 0x%X 0x%X 0x%X...\n", - pdpu->csc_ptr->csc_mv[0], - pdpu->csc_ptr->csc_mv[1], - pdpu->csc_ptr->csc_mv[2]); + csc_ptr->csc_mv[0], + csc_ptr->csc_mv[1], + csc_ptr->csc_mv[2]); + + return csc_ptr; } static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu, struct dpu_plane_state *pstate, - const struct dpu_format *fmt, bool color_fill) + const struct dpu_format *fmt, bool color_fill, + struct dpu_hw_pipe_cfg *pipe_cfg) { const struct drm_format_info *info = drm_format_info(fmt->base.pixel_format); + struct dpu_hw_scaler3_cfg scaler3_cfg; + struct dpu_hw_pixel_ext pixel_ext; + + memset(&scaler3_cfg, 0, sizeof(scaler3_cfg)); + memset(&pixel_ext, 0, sizeof(pixel_ext)); /* don't chroma subsample if decimating */ /* update scaler. calculate default config for QSEED3 */ _dpu_plane_setup_scaler3(pdpu, pstate, - drm_rect_width(&pdpu->pipe_cfg.src_rect), - drm_rect_height(&pdpu->pipe_cfg.src_rect), - drm_rect_width(&pdpu->pipe_cfg.dst_rect), - drm_rect_height(&pdpu->pipe_cfg.dst_rect), - &pstate->scaler3_cfg, fmt, + drm_rect_width(&pipe_cfg->src_rect), + drm_rect_height(&pipe_cfg->src_rect), + drm_rect_width(&pipe_cfg->dst_rect), + drm_rect_height(&pipe_cfg->dst_rect), + &scaler3_cfg, &pixel_ext, fmt, info->hsub, info->vsub); + + if (pdpu->pipe_hw->ops.setup_pe) + pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw, + &pixel_ext); + + /** + * when programmed in multirect mode, scalar block will be + * bypassed. Still we need to update alpha and bitwidth + * ONLY for RECT0 + */ + if (pdpu->pipe_hw->ops.setup_scaler && + pstate->multirect_index != DPU_SSPP_RECT_1) + pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw, + pipe_cfg, + &scaler3_cfg); } /** @@ -683,6 +697,7 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu, const struct dpu_format *fmt; const struct drm_plane *plane = &pdpu->base; struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state); + struct dpu_hw_pipe_cfg pipe_cfg; DPU_DEBUG_PLANE(pdpu, "\n"); @@ -699,13 +714,14 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu, pstate->multirect_index); /* override scaler/decimation if solid fill */ - pdpu->pipe_cfg.src_rect.x1 = 0; - pdpu->pipe_cfg.src_rect.y1 = 0; - pdpu->pipe_cfg.src_rect.x2 = - drm_rect_width(&pdpu->pipe_cfg.dst_rect); - pdpu->pipe_cfg.src_rect.y2 = - drm_rect_height(&pdpu->pipe_cfg.dst_rect); - _dpu_plane_setup_scaler(pdpu, pstate, fmt, true); + pipe_cfg.dst_rect = pstate->base.dst; + + pipe_cfg.src_rect.x1 = 0; + pipe_cfg.src_rect.y1 = 0; + pipe_cfg.src_rect.x2 = + drm_rect_width(&pipe_cfg.dst_rect); + pipe_cfg.src_rect.y2 = + drm_rect_height(&pipe_cfg.dst_rect); if (pdpu->pipe_hw->ops.setup_format) pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw, @@ -714,18 +730,10 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu, if (pdpu->pipe_hw->ops.setup_rects) pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw, - &pdpu->pipe_cfg, + &pipe_cfg, pstate->multirect_index); - if (pdpu->pipe_hw->ops.setup_pe) - pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw, - &pstate->pixel_ext); - - if (pdpu->pipe_hw->ops.setup_scaler && - pstate->multirect_index != DPU_SSPP_RECT_1) - pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw, - &pdpu->pipe_cfg, &pstate->pixel_ext, - &pstate->scaler3_cfg); + _dpu_plane_setup_scaler(pdpu, pstate, fmt, true, &pipe_cfg); } return 0; @@ -964,10 +972,10 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); - min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxupscale); + min_scale = FRAC_16_16(1, pdpu->pipe_hw->cap->sblk->maxupscale); ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, min_scale, - pdpu->pipe_sblk->maxdwnscale << 16, + pdpu->pipe_hw->cap->sblk->maxdwnscale << 16, true, true); if (ret) { DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret); @@ -993,9 +1001,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1; if (DPU_FORMAT_IS_YUV(fmt) && - (!(pdpu->features & DPU_SSPP_SCALER) || - !(pdpu->features & (BIT(DPU_SSPP_CSC) - | BIT(DPU_SSPP_CSC_10BIT))))) { + (!(pdpu->pipe_hw->cap->features & DPU_SSPP_SCALER) || + !(pdpu->pipe_hw->cap->features & DPU_SSPP_CSC_ANY))) { DPU_DEBUG_PLANE(pdpu, "plane doesn't have scaler/csc for yuv\n"); return -EINVAL; @@ -1056,8 +1063,13 @@ void dpu_plane_flush(struct drm_plane *plane) else if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) /* force 100% alpha */ _dpu_plane_color_fill(pdpu, pdpu->color_fill, 0xFF); - else if (pdpu->pipe_hw && pdpu->csc_ptr && pdpu->pipe_hw->ops.setup_csc) - pdpu->pipe_hw->ops.setup_csc(pdpu->pipe_hw, pdpu->csc_ptr); + else if (pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_csc) { + const struct dpu_format *fmt = to_dpu_format(msm_framebuffer_format(plane->state->fb)); + const struct dpu_csc_cfg *csc_ptr = _dpu_plane_get_csc(pdpu, fmt); + + if (csc_ptr) + pdpu->pipe_hw->ops.setup_csc(pdpu->pipe_hw, csc_ptr); + } /* flag h/w flush complete */ if (plane->state) @@ -1091,10 +1103,11 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) bool is_rt_pipe, update_qos_remap; const struct dpu_format *fmt = to_dpu_format(msm_framebuffer_format(fb)); + struct dpu_hw_pipe_cfg pipe_cfg; - memset(&(pdpu->pipe_cfg), 0, sizeof(struct dpu_hw_pipe_cfg)); + memset(&pipe_cfg, 0, sizeof(struct dpu_hw_pipe_cfg)); - _dpu_plane_set_scanout(plane, pstate, &pdpu->pipe_cfg, fb); + _dpu_plane_set_scanout(plane, pstate, &pipe_cfg, fb); pstate->pending = true; @@ -1106,17 +1119,15 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) crtc->base.id, DRM_RECT_ARG(&state->dst), (char *)&fmt->base.pixel_format, DPU_FORMAT_IS_UBWC(fmt)); - pdpu->pipe_cfg.src_rect = state->src; + pipe_cfg.src_rect = state->src; /* state->src is 16.16, src_rect is not */ - pdpu->pipe_cfg.src_rect.x1 >>= 16; - pdpu->pipe_cfg.src_rect.x2 >>= 16; - pdpu->pipe_cfg.src_rect.y1 >>= 16; - pdpu->pipe_cfg.src_rect.y2 >>= 16; - - pdpu->pipe_cfg.dst_rect = state->dst; + pipe_cfg.src_rect.x1 >>= 16; + pipe_cfg.src_rect.x2 >>= 16; + pipe_cfg.src_rect.y1 >>= 16; + pipe_cfg.src_rect.y2 >>= 16; - _dpu_plane_setup_scaler(pdpu, pstate, fmt, false); + pipe_cfg.dst_rect = state->dst; /* override for color fill */ if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) { @@ -1126,25 +1137,11 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) if (pdpu->pipe_hw->ops.setup_rects) { pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw, - &pdpu->pipe_cfg, + &pipe_cfg, pstate->multirect_index); } - if (pdpu->pipe_hw->ops.setup_pe && - (pstate->multirect_index != DPU_SSPP_RECT_1)) - pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw, - &pstate->pixel_ext); - - /** - * when programmed in multirect mode, scalar block will be - * bypassed. Still we need to update alpha and bitwidth - * ONLY for RECT0 - */ - if (pdpu->pipe_hw->ops.setup_scaler && - pstate->multirect_index != DPU_SSPP_RECT_1) - pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw, - &pdpu->pipe_cfg, &pstate->pixel_ext, - &pstate->scaler3_cfg); + _dpu_plane_setup_scaler(pdpu, pstate, fmt, false, &pipe_cfg); if (pdpu->pipe_hw->ops.setup_multirect) pdpu->pipe_hw->ops.setup_multirect( @@ -1173,35 +1170,29 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) pstate->multirect_index); if (pdpu->pipe_hw->ops.setup_cdp) { - struct dpu_hw_pipe_cdp_cfg *cdp_cfg = &pstate->cdp_cfg; + struct dpu_hw_pipe_cdp_cfg cdp_cfg; - memset(cdp_cfg, 0, sizeof(struct dpu_hw_pipe_cdp_cfg)); + memset(&cdp_cfg, 0, sizeof(struct dpu_hw_pipe_cdp_cfg)); - cdp_cfg->enable = pdpu->catalog->perf.cdp_cfg + cdp_cfg.enable = pdpu->catalog->perf.cdp_cfg [DPU_PERF_CDP_USAGE_RT].rd_enable; - cdp_cfg->ubwc_meta_enable = + cdp_cfg.ubwc_meta_enable = DPU_FORMAT_IS_UBWC(fmt); - cdp_cfg->tile_amortize_enable = + cdp_cfg.tile_amortize_enable = DPU_FORMAT_IS_UBWC(fmt) || DPU_FORMAT_IS_TILE(fmt); - cdp_cfg->preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64; + cdp_cfg.preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64; - pdpu->pipe_hw->ops.setup_cdp(pdpu->pipe_hw, cdp_cfg); + pdpu->pipe_hw->ops.setup_cdp(pdpu->pipe_hw, &cdp_cfg, pstate->multirect_index); } - - /* update csc */ - if (DPU_FORMAT_IS_YUV(fmt)) - _dpu_plane_setup_csc(pdpu); - else - pdpu->csc_ptr = NULL; } - _dpu_plane_set_qos_lut(plane, fb); + _dpu_plane_set_qos_lut(plane, fb, &pipe_cfg); _dpu_plane_set_danger_lut(plane, fb); if (plane->type != DRM_PLANE_TYPE_CURSOR) { _dpu_plane_set_qos_ctrl(plane, true, DPU_PLANE_QOS_PANIC_CTRL); - _dpu_plane_set_ot_limit(plane, crtc); + _dpu_plane_set_ot_limit(plane, crtc, &pipe_cfg); } update_qos_remap = (is_rt_pipe != pdpu->is_rt_pipe) || @@ -1215,9 +1206,9 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) _dpu_plane_set_qos_remap(plane); } - _dpu_plane_calc_bw(plane, fb); + _dpu_plane_calc_bw(plane, fb, &pipe_cfg); - _dpu_plane_calc_clk(plane); + _dpu_plane_calc_clk(plane, &pipe_cfg); } static void _dpu_plane_atomic_disable(struct drm_plane *plane) @@ -1314,6 +1305,46 @@ dpu_plane_duplicate_state(struct drm_plane *plane) return &pstate->base; } +static const char * const multirect_mode_name[] = { + [DPU_SSPP_MULTIRECT_NONE] = "none", + [DPU_SSPP_MULTIRECT_PARALLEL] = "parallel", + [DPU_SSPP_MULTIRECT_TIME_MX] = "time_mx", +}; + +static const char * const multirect_index_name[] = { + [DPU_SSPP_RECT_SOLO] = "solo", + [DPU_SSPP_RECT_0] = "rect_0", + [DPU_SSPP_RECT_1] = "rect_1", +}; + +static const char *dpu_get_multirect_mode(enum dpu_sspp_multirect_mode mode) +{ + if (WARN_ON(mode >= ARRAY_SIZE(multirect_mode_name))) + return "unknown"; + + return multirect_mode_name[mode]; +} + +static const char *dpu_get_multirect_index(enum dpu_sspp_multirect_index index) +{ + if (WARN_ON(index >= ARRAY_SIZE(multirect_index_name))) + return "unknown"; + + return multirect_index_name[index]; +} + +static void dpu_plane_atomic_print_state(struct drm_printer *p, + const struct drm_plane_state *state) +{ + const struct dpu_plane_state *pstate = to_dpu_plane_state(state); + const struct dpu_plane *pdpu = to_dpu_plane(state->plane); + + drm_printf(p, "\tstage=%d\n", pstate->stage); + drm_printf(p, "\tsspp=%s\n", pdpu->pipe_hw->cap->name); + drm_printf(p, "\tmultirect_mode=%s\n", dpu_get_multirect_mode(pstate->multirect_mode)); + drm_printf(p, "\tmultirect_index=%s\n", dpu_get_multirect_index(pstate->multirect_index)); +} + static void dpu_plane_reset(struct drm_plane *plane) { struct dpu_plane *pdpu; @@ -1343,7 +1374,7 @@ static void dpu_plane_reset(struct drm_plane *plane) } #ifdef CONFIG_DEBUG_FS -static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) +void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) { struct dpu_plane *pdpu = to_dpu_plane(plane); struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); @@ -1356,167 +1387,23 @@ static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) pm_runtime_put_sync(&dpu_kms->pdev->dev); } -static ssize_t _dpu_plane_danger_read(struct file *file, - char __user *buff, size_t count, loff_t *ppos) -{ - struct dpu_kms *kms = file->private_data; - int len; - char buf[40]; - - len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl); - - return simple_read_from_buffer(buff, count, ppos, buf, len); -} - -static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable) +/* SSPP live inside dpu_plane private data only. Enumerate them here. */ +void dpu_debugfs_sspp_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root) { struct drm_plane *plane; + struct dentry *entry = debugfs_create_dir("sspp", debugfs_root); - drm_for_each_plane(plane, kms->dev) { - if (plane->fb && plane->state) { - dpu_plane_danger_signal_ctrl(plane, enable); - DPU_DEBUG("plane:%d img:%dx%d ", - plane->base.id, plane->fb->width, - plane->fb->height); - DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n", - plane->state->src_x >> 16, - plane->state->src_y >> 16, - plane->state->src_w >> 16, - plane->state->src_h >> 16, - plane->state->crtc_x, plane->state->crtc_y, - plane->state->crtc_w, plane->state->crtc_h); - } else { - DPU_DEBUG("Inactive plane:%d\n", plane->base.id); - } - } -} - -static ssize_t _dpu_plane_danger_write(struct file *file, - const char __user *user_buf, size_t count, loff_t *ppos) -{ - struct dpu_kms *kms = file->private_data; - int disable_panic; - int ret; - - ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic); - if (ret) - return ret; - - if (disable_panic) { - /* Disable panic signal for all active pipes */ - DPU_DEBUG("Disabling danger:\n"); - _dpu_plane_set_danger_state(kms, false); - kms->has_danger_ctrl = false; - } else { - /* Enable panic signal for all active pipes */ - DPU_DEBUG("Enabling danger:\n"); - kms->has_danger_ctrl = true; - _dpu_plane_set_danger_state(kms, true); - } - - return count; -} + if (IS_ERR(entry)) + return; -static const struct file_operations dpu_plane_danger_enable = { - .open = simple_open, - .read = _dpu_plane_danger_read, - .write = _dpu_plane_danger_write, -}; + drm_for_each_plane(plane, dpu_kms->dev) { + struct dpu_plane *pdpu = to_dpu_plane(plane); -static int _dpu_plane_init_debugfs(struct drm_plane *plane) -{ - struct dpu_plane *pdpu = to_dpu_plane(plane); - struct dpu_kms *kms = _dpu_plane_get_kms(plane); - const struct dpu_sspp_cfg *cfg = pdpu->pipe_hw->cap; - const struct dpu_sspp_sub_blks *sblk = cfg->sblk; - - /* create overall sub-directory for the pipe */ - pdpu->debugfs_root = - debugfs_create_dir(pdpu->pipe_name, - plane->dev->primary->debugfs_root); - - /* don't error check these */ - debugfs_create_x32("features", 0600, - pdpu->debugfs_root, &pdpu->features); - - /* add register dump support */ - dpu_debugfs_setup_regset32(&pdpu->debugfs_src, - sblk->src_blk.base + cfg->base, - sblk->src_blk.len, - kms); - dpu_debugfs_create_regset32("src_blk", 0400, - pdpu->debugfs_root, &pdpu->debugfs_src); - - if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) || - cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) || - cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) || - cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) { - dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler, - sblk->scaler_blk.base + cfg->base, - sblk->scaler_blk.len, - kms); - dpu_debugfs_create_regset32("scaler_blk", 0400, - pdpu->debugfs_root, - &pdpu->debugfs_scaler); - debugfs_create_bool("default_scaling", - 0600, - pdpu->debugfs_root, - &pdpu->debugfs_default_scale); - } - - if (cfg->features & BIT(DPU_SSPP_CSC) || - cfg->features & BIT(DPU_SSPP_CSC_10BIT)) { - dpu_debugfs_setup_regset32(&pdpu->debugfs_csc, - sblk->csc_blk.base + cfg->base, - sblk->csc_blk.len, - kms); - dpu_debugfs_create_regset32("csc_blk", 0400, - pdpu->debugfs_root, &pdpu->debugfs_csc); + _dpu_hw_sspp_init_debugfs(pdpu->pipe_hw, dpu_kms, entry); } - - debugfs_create_u32("xin_id", - 0400, - pdpu->debugfs_root, - (u32 *) &cfg->xin_id); - debugfs_create_u32("clk_ctrl", - 0400, - pdpu->debugfs_root, - (u32 *) &cfg->clk_ctrl); - debugfs_create_x32("creq_vblank", - 0600, - pdpu->debugfs_root, - (u32 *) &sblk->creq_vblank); - debugfs_create_x32("danger_vblank", - 0600, - pdpu->debugfs_root, - (u32 *) &sblk->danger_vblank); - - debugfs_create_file("disable_danger", - 0600, - pdpu->debugfs_root, - kms, &dpu_plane_danger_enable); - - return 0; -} -#else -static int _dpu_plane_init_debugfs(struct drm_plane *plane) -{ - return 0; } #endif -static int dpu_plane_late_register(struct drm_plane *plane) -{ - return _dpu_plane_init_debugfs(plane); -} - -static void dpu_plane_early_unregister(struct drm_plane *plane) -{ - struct dpu_plane *pdpu = to_dpu_plane(plane); - - debugfs_remove_recursive(pdpu->debugfs_root); -} - static bool dpu_plane_format_mod_supported(struct drm_plane *plane, uint32_t format, uint64_t modifier) { @@ -1541,8 +1428,7 @@ static const struct drm_plane_funcs dpu_plane_funcs = { .reset = dpu_plane_reset, .atomic_duplicate_state = dpu_plane_duplicate_state, .atomic_destroy_state = dpu_plane_destroy_state, - .late_register = dpu_plane_late_register, - .early_unregister = dpu_plane_early_unregister, + .atomic_print_state = dpu_plane_atomic_print_state, .format_mod_supported = dpu_plane_format_mod_supported, }; @@ -1609,21 +1495,13 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev, goto clean_sspp; } - /* cache features mask for later */ - pdpu->features = pdpu->pipe_hw->cap->features; - pdpu->pipe_sblk = pdpu->pipe_hw->cap->sblk; - if (!pdpu->pipe_sblk) { - DPU_ERROR("[%u]invalid sblk\n", pipe); - goto clean_sspp; - } - if (pdpu->is_virtual) { - format_list = pdpu->pipe_sblk->virt_format_list; - num_formats = pdpu->pipe_sblk->virt_num_formats; + format_list = pdpu->pipe_hw->cap->sblk->virt_format_list; + num_formats = pdpu->pipe_hw->cap->sblk->virt_num_formats; } else { - format_list = pdpu->pipe_sblk->format_list; - num_formats = pdpu->pipe_sblk->num_formats; + format_list = pdpu->pipe_hw->cap->sblk->format_list; + num_formats = pdpu->pipe_hw->cap->sblk->num_formats; } ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs, @@ -1663,12 +1541,9 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev, /* success! finalize initialization */ drm_plane_helper_add(plane, &dpu_plane_helper_funcs); - /* save user friendly pipe name for later */ - snprintf(pdpu->pipe_name, DPU_NAME_SIZE, "plane%u", plane->base.id); - mutex_init(&pdpu->lock); - DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", pdpu->pipe_name, + DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", plane->name, pipe, plane->base.id, master_plane_id); return plane; @@ -1676,6 +1551,7 @@ clean_sspp: if (pdpu && pdpu->pipe_hw) dpu_hw_sspp_destroy(pdpu->pipe_hw); clean_plane: + list_del(&pdpu->mplane_list); kfree(pdpu); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h index 34e03ac05f4a..9d51dad5c6a5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h @@ -23,9 +23,6 @@ * @multirect_index: index of the rectangle of SSPP * @multirect_mode: parallel or time multiplex multirect mode * @pending: whether the current update is still pending - * @scaler3_cfg: configuration data for scaler3 - * @pixel_ext: configuration data for pixel extensions - * @cdp_cfg: CDP configuration * @plane_fetch_bw: calculated BW per plane * @plane_clk: calculated clk per plane */ @@ -38,11 +35,6 @@ struct dpu_plane_state { uint32_t multirect_mode; bool pending; - /* scaler configuration */ - struct dpu_hw_scaler3_cfg scaler3_cfg; - struct dpu_hw_pixel_ext pixel_ext; - - struct dpu_hw_pipe_cdp_cfg cdp_cfg; u64 plane_fetch_bw; u64 plane_clk; }; @@ -134,4 +126,10 @@ void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state); int dpu_plane_color_fill(struct drm_plane *plane, uint32_t color, uint32_t alpha); +#ifdef CONFIG_DEBUG_FS +void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable); +#else +static inline void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) {} +#endif + #endif /* _DPU_PLANE_H_ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h index 37bba57675a8..54d74341e690 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h @@ -266,10 +266,6 @@ DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_commit, TP_PROTO(uint32_t drm_id), TP_ARGS(drm_id) ); -DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_enc_enable, - TP_PROTO(uint32_t drm_id), - TP_ARGS(drm_id) -); DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_commit, TP_PROTO(uint32_t drm_id), TP_ARGS(drm_id) diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 7b242246d4e7..12a5f81e402b 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -370,22 +370,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, switch (intf->type) { case INTF_eDP: - if (!priv->edp) - break; - - ctl = mdp5_ctlm_request(ctlm, intf->num); - if (!ctl) { - ret = -EINVAL; - break; - } - - encoder = construct_encoder(mdp5_kms, intf, ctl); - if (IS_ERR(encoder)) { - ret = PTR_ERR(encoder); - break; - } - - ret = msm_edp_modeset_init(priv->edp, dev, encoder); + DRM_DEV_INFO(dev->dev, "Skipping eDP interface %d\n", intf->num); break; case INTF_HDMI: if (!priv->hdmi) @@ -936,7 +921,8 @@ fail: static int mdp5_bind(struct device *dev, struct device *master, void *data) { - struct drm_device *ddev = dev_get_drvdata(master); + struct msm_drm_private *priv = dev_get_drvdata(master); + struct drm_device *ddev = priv->dev; struct platform_device *pdev = to_platform_device(dev); DBG(""); @@ -1031,7 +1017,7 @@ static const struct dev_pm_ops mdp5_pm_ops = { SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL) }; -static const struct of_device_id mdp5_dt_match[] = { +const struct of_device_id mdp5_dt_match[] = { { .compatible = "qcom,mdp5", }, /* to support downstream DT files */ { .compatible = "qcom,mdss_mdp", }, diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c index 0ea53420bc40..b3f79c2277e9 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c @@ -16,8 +16,6 @@ struct mdp5_mdss { void __iomem *mmio, *vbif; - struct regulator *vdd; - struct clk *ahb_clk; struct clk *axi_clk; struct clk *vsync_clk; @@ -114,7 +112,7 @@ static const struct irq_domain_ops mdss_hw_irqdomain_ops = { static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss) { - struct device *dev = mdp5_mdss->base.dev->dev; + struct device *dev = mdp5_mdss->base.dev; struct irq_domain *d; d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops, @@ -157,7 +155,7 @@ static int mdp5_mdss_disable(struct msm_mdss *mdss) static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss) { struct platform_device *pdev = - to_platform_device(mdp5_mdss->base.dev->dev); + to_platform_device(mdp5_mdss->base.dev); mdp5_mdss->ahb_clk = msm_clk_get(pdev, "iface"); if (IS_ERR(mdp5_mdss->ahb_clk)) @@ -174,10 +172,9 @@ static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss) return 0; } -static void mdp5_mdss_destroy(struct drm_device *dev) +static void mdp5_mdss_destroy(struct msm_mdss *mdss) { - struct msm_drm_private *priv = dev->dev_private; - struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(priv->mdss); + struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss); if (!mdp5_mdss) return; @@ -185,9 +182,7 @@ static void mdp5_mdss_destroy(struct drm_device *dev) irq_domain_remove(mdp5_mdss->irqcontroller.domain); mdp5_mdss->irqcontroller.domain = NULL; - regulator_disable(mdp5_mdss->vdd); - - pm_runtime_disable(dev->dev); + pm_runtime_disable(mdss->dev); } static const struct msm_mdss_funcs mdss_funcs = { @@ -196,25 +191,24 @@ static const struct msm_mdss_funcs mdss_funcs = { .destroy = mdp5_mdss_destroy, }; -int mdp5_mdss_init(struct drm_device *dev) +int mdp5_mdss_init(struct platform_device *pdev) { - struct platform_device *pdev = to_platform_device(dev->dev); - struct msm_drm_private *priv = dev->dev_private; + struct msm_drm_private *priv = platform_get_drvdata(pdev); struct mdp5_mdss *mdp5_mdss; int ret; DBG(""); - if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss")) + if (!of_device_is_compatible(pdev->dev.of_node, "qcom,mdss")) return 0; - mdp5_mdss = devm_kzalloc(dev->dev, sizeof(*mdp5_mdss), GFP_KERNEL); + mdp5_mdss = devm_kzalloc(&pdev->dev, sizeof(*mdp5_mdss), GFP_KERNEL); if (!mdp5_mdss) { ret = -ENOMEM; goto fail; } - mdp5_mdss->base.dev = dev; + mdp5_mdss->base.dev = &pdev->dev; mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS"); if (IS_ERR(mdp5_mdss->mmio)) { @@ -230,45 +224,29 @@ int mdp5_mdss_init(struct drm_device *dev) ret = msm_mdss_get_clocks(mdp5_mdss); if (ret) { - DRM_DEV_ERROR(dev->dev, "failed to get clocks: %d\n", ret); - goto fail; - } - - /* Regulator to enable GDSCs in downstream kernels */ - mdp5_mdss->vdd = devm_regulator_get(dev->dev, "vdd"); - if (IS_ERR(mdp5_mdss->vdd)) { - ret = PTR_ERR(mdp5_mdss->vdd); - goto fail; - } - - ret = regulator_enable(mdp5_mdss->vdd); - if (ret) { - DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", - ret); + DRM_DEV_ERROR(&pdev->dev, "failed to get clocks: %d\n", ret); goto fail; } - ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0), + ret = devm_request_irq(&pdev->dev, platform_get_irq(pdev, 0), mdss_irq, 0, "mdss_isr", mdp5_mdss); if (ret) { - DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret); - goto fail_irq; + DRM_DEV_ERROR(&pdev->dev, "failed to init irq: %d\n", ret); + goto fail; } ret = mdss_irq_domain_init(mdp5_mdss); if (ret) { - DRM_DEV_ERROR(dev->dev, "failed to init sub-block irqs: %d\n", ret); - goto fail_irq; + DRM_DEV_ERROR(&pdev->dev, "failed to init sub-block irqs: %d\n", ret); + goto fail; } mdp5_mdss->base.funcs = &mdss_funcs; priv->mdss = &mdp5_mdss->base; - pm_runtime_enable(dev->dev); + pm_runtime_enable(&pdev->dev); return 0; -fail_irq: - regulator_disable(mdp5_mdss->vdd); fail: return ret; } diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c index a4a7cb06bc87..e75b97127c0d 100644 --- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c @@ -28,29 +28,42 @@ static ssize_t __maybe_unused disp_devcoredump_read(char *buffer, loff_t offset, return count - iter.remain; } -static void _msm_disp_snapshot_work(struct kthread_work *work) +struct msm_disp_state * +msm_disp_snapshot_state_sync(struct msm_kms *kms) { - struct msm_kms *kms = container_of(work, struct msm_kms, dump_work); struct drm_device *drm_dev = kms->dev; struct msm_disp_state *disp_state; - struct drm_printer p; + + WARN_ON(!mutex_is_locked(&kms->dump_mutex)); disp_state = kzalloc(sizeof(struct msm_disp_state), GFP_KERNEL); if (!disp_state) - return; + return ERR_PTR(-ENOMEM); disp_state->dev = drm_dev->dev; disp_state->drm_dev = drm_dev; INIT_LIST_HEAD(&disp_state->blocks); - /* Serialize dumping here */ - mutex_lock(&kms->dump_mutex); - msm_disp_snapshot_capture_state(disp_state); + return disp_state; +} + +static void _msm_disp_snapshot_work(struct kthread_work *work) +{ + struct msm_kms *kms = container_of(work, struct msm_kms, dump_work); + struct msm_disp_state *disp_state; + struct drm_printer p; + + /* Serialize dumping here */ + mutex_lock(&kms->dump_mutex); + disp_state = msm_disp_snapshot_state_sync(kms); mutex_unlock(&kms->dump_mutex); + if (IS_ERR(disp_state)) + return; + if (MSM_DISP_SNAPSHOT_DUMP_IN_CONSOLE) { p = drm_info_printer(disp_state->drm_dev->dev); msm_disp_state_print(disp_state, &p); diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h index 4c619307612c..b5f452bd7ada 100644 --- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h @@ -39,7 +39,7 @@ * @dev: device pointer * @drm_dev: drm device pointer * @atomic_state: atomic state duplicated at the time of the error - * @timestamp: timestamp at which the coredump was captured + * @time: timestamp at which the coredump was captured */ struct msm_disp_state { struct device *dev; @@ -49,7 +49,7 @@ struct msm_disp_state { struct drm_atomic_state *atomic_state; - ktime_t timestamp; + struct timespec64 time; }; /** @@ -85,6 +85,16 @@ int msm_disp_snapshot_init(struct drm_device *drm_dev); void msm_disp_snapshot_destroy(struct drm_device *drm_dev); /** + * msm_disp_snapshot_state_sync - synchronously snapshot display state + * @kms: the kms object + * + * Returns state or error + * + * Must be called with &kms->dump_mutex held + */ +struct msm_disp_state *msm_disp_snapshot_state_sync(struct msm_kms *kms); + +/** * msm_disp_snapshot_state - trigger to dump the display snapshot * @drm_dev: handle to drm device diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c index 2e1acb1bc390..5d2ff6791058 100644 --- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c @@ -5,6 +5,8 @@ #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ +#include <generated/utsrelease.h> + #include "msm_disp_snapshot.h" static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *base_addr) @@ -79,10 +81,11 @@ void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p) } drm_printf(p, "---\n"); - + drm_printf(p, "kernel: " UTS_RELEASE "\n"); drm_printf(p, "module: " KBUILD_MODNAME "\n"); drm_printf(p, "dpu devcoredump\n"); - drm_printf(p, "timestamp %lld\n", ktime_to_ns(state->timestamp)); + drm_printf(p, "time: %lld.%09ld\n", + state->time.tv_sec, state->time.tv_nsec); list_for_each_entry_safe(block, tmp, &state->blocks, node) { drm_printf(p, "====================%s================\n", block->name); @@ -100,7 +103,7 @@ static void msm_disp_capture_atomic_state(struct msm_disp_state *disp_state) struct drm_device *ddev; struct drm_modeset_acquire_ctx ctx; - disp_state->timestamp = ktime_get(); + ktime_get_real_ts64(&disp_state->time); ddev = disp_state->drm_dev; diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index ecf20458c75e..88ca6c3aedd3 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -119,13 +119,13 @@ void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl) static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl) { u32 config = 0, tbd; - u8 *dpcd = ctrl->panel->dpcd; + const u8 *dpcd = ctrl->panel->dpcd; /* Default-> LSCLK DIV: 1/4 LCLK */ config |= (2 << DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT); /* Scrambler reset enable */ - if (dpcd[DP_EDP_CONFIGURATION_CAP] & DP_ALTERNATE_SCRAMBLER_RESET_CAP) + if (drm_dp_alternate_scrambler_reset_cap(dpcd)) config |= DP_CONFIGURATION_CTRL_ASSR; tbd = dp_link_get_test_bits_depth(ctrl->link, @@ -1228,7 +1228,10 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, int *training_step) { int ret = 0; + const u8 *dpcd = ctrl->panel->dpcd; u8 encoding = DP_SET_ANSI_8B10B; + u8 ssc; + u8 assr; struct dp_link_info link_info = {0}; dp_ctrl_config_ctrl(ctrl); @@ -1238,9 +1241,21 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING; dp_aux_link_configure(ctrl->aux, &link_info); + + if (drm_dp_max_downspread(dpcd)) { + ssc = DP_SPREAD_AMP_0_5; + drm_dp_dpcd_write(ctrl->aux, DP_DOWNSPREAD_CTRL, &ssc, 1); + } + drm_dp_dpcd_write(ctrl->aux, DP_MAIN_LINK_CHANNEL_CODING_SET, &encoding, 1); + if (drm_dp_alternate_scrambler_reset_cap(dpcd)) { + assr = DP_ALTERNATE_SCRAMBLER_RESET_ENABLE; + drm_dp_dpcd_write(ctrl->aux, DP_EDP_CONFIGURATION_SET, + &assr, 1); + } + ret = dp_ctrl_link_train_1(ctrl, training_step); if (ret) { DRM_ERROR("link training #1 failed. ret=%d\n", ret); @@ -1312,9 +1327,11 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl) struct dp_io *dp_io = &ctrl->parser->io; struct phy *phy = dp_io->phy; struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp; + const u8 *dpcd = ctrl->panel->dpcd; opts_dp->lanes = ctrl->link->link_params.num_lanes; opts_dp->link_rate = ctrl->link->link_params.rate / 100; + opts_dp->ssc = drm_dp_max_downspread(dpcd); dp_ctrl_set_clock_rate(ctrl, DP_CTRL_PM, "ctrl_link", ctrl->link->link_params.rate * 1000); @@ -1406,7 +1423,7 @@ void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl) static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl) { - u8 *dpcd = ctrl->panel->dpcd; + const u8 *dpcd = ctrl->panel->dpcd; /* * For better interop experience, used a fixed NVID=0x8000 diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index aba8aa47ed76..7cc4d21f2091 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -135,8 +135,18 @@ static const struct msm_dp_config sc7180_dp_cfg = { .num_descs = 1, }; +static const struct msm_dp_config sc7280_dp_cfg = { + .descs = (const struct msm_dp_desc[]) { + [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort }, + [MSM_DP_CONTROLLER_1] = { .io_start = 0x0aea0000, .connector_type = DRM_MODE_CONNECTOR_eDP }, + }, + .num_descs = 2, +}; + static const struct of_device_id dp_dt_match[] = { { .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_cfg }, + { .compatible = "qcom,sc7280-dp", .data = &sc7280_dp_cfg }, + { .compatible = "qcom,sc7280-edp", .data = &sc7280_dp_cfg }, {} }; @@ -224,13 +234,10 @@ static int dp_display_bind(struct device *dev, struct device *master, { int rc = 0; struct dp_display_private *dp = dev_get_dp_display_private(dev); - struct msm_drm_private *priv; - struct drm_device *drm; - - drm = dev_get_drvdata(master); + struct msm_drm_private *priv = dev_get_drvdata(master); + struct drm_device *drm = priv->dev; dp->dp_display.drm_dev = drm; - priv = drm->dev_private; priv->dp[dp->id] = &dp->dp_display; rc = dp->parser->parse(dp->parser, dp->dp_display.connector_type); @@ -266,8 +273,7 @@ static void dp_display_unbind(struct device *dev, struct device *master, void *data) { struct dp_display_private *dp = dev_get_dp_display_private(dev); - struct drm_device *drm = dev_get_drvdata(master); - struct msm_drm_private *priv = drm->dev_private; + struct msm_drm_private *priv = dev_get_drvdata(master); dp_power_client_deinit(dp->power); dp_aux_unregister(dp->aux); @@ -410,12 +416,11 @@ static int dp_display_usbpd_configure_cb(struct device *dev) static int dp_display_usbpd_disconnect_cb(struct device *dev) { - int rc = 0; struct dp_display_private *dp = dev_get_dp_display_private(dev); dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); - return rc; + return 0; } static void dp_display_handle_video_request(struct dp_display_private *dp) @@ -522,11 +527,8 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) dp->hpd_state = ST_CONNECT_PENDING; - hpd->hpd_high = 1; - ret = dp_display_usbpd_configure_cb(&dp->pdev->dev); if (ret) { /* link train failed */ - hpd->hpd_high = 0; dp->hpd_state = ST_DISCONNECTED; if (ret == -ECONNRESET) { /* cable unplugged */ @@ -603,7 +605,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) /* triggered by irq_hdp with sink_count = 0 */ if (dp->link->sink_count == 0) { dp_ctrl_off_phy(dp->ctrl); - hpd->hpd_high = 0; dp->core_initialized = false; } mutex_unlock(&dp->event_mutex); @@ -627,8 +628,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) /* disable HPD plug interrupts */ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, false); - hpd->hpd_high = 0; - /* * We don't need separate work for disconnect as * connect/attention interrupts are disabled @@ -693,9 +692,15 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) return 0; } - ret = dp_display_usbpd_attention_cb(&dp->pdev->dev); - if (ret == -ECONNRESET) { /* cable unplugged */ - dp->core_initialized = false; + /* + * dp core (ahb/aux clks) must be initialized before + * irq_hpd be handled + */ + if (dp->core_initialized) { + ret = dp_display_usbpd_attention_cb(&dp->pdev->dev); + if (ret == -ECONNRESET) { /* cable unplugged */ + dp->core_initialized = false; + } } DRM_DEBUG_DP("hpd_state=%d\n", state); @@ -707,9 +712,9 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) static void dp_display_deinit_sub_modules(struct dp_display_private *dp) { dp_debug_put(dp->debug); + dp_audio_put(dp->audio); dp_panel_put(dp->panel); dp_aux_put(dp->aux); - dp_audio_put(dp->audio); } static int dp_init_sub_modules(struct dp_display_private *dp) @@ -1481,6 +1486,18 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, } priv->connectors[priv->num_connectors++] = dp_display->connector; + + dp_display->bridge = msm_dp_bridge_init(dp_display, dev, encoder); + if (IS_ERR(dp_display->bridge)) { + ret = PTR_ERR(dp_display->bridge); + DRM_DEV_ERROR(dev->dev, + "failed to create dp bridge: %d\n", ret); + dp_display->bridge = NULL; + return ret; + } + + priv->bridges[priv->num_bridges++] = dp_display->bridge; + return 0; } @@ -1584,8 +1601,8 @@ int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder) } void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) { struct dp_display_private *dp_display; diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h index 8e80e3bac394..e3adcd578a90 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.h +++ b/drivers/gpu/drm/msm/dp/dp_display.h @@ -13,6 +13,7 @@ struct msm_dp { struct drm_device *drm_dev; struct device *codec_dev; + struct drm_bridge *bridge; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_bridge *panel_bridge; diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c index 76856c4ee1d6..d4d360d19eba 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -12,6 +12,14 @@ #include "msm_kms.h" #include "dp_drm.h" + +struct msm_dp_bridge { + struct drm_bridge bridge; + struct msm_dp *dp_display; +}; + +#define to_dp_display(x) container_of((x), struct msm_dp_bridge, bridge) + struct dp_connector { struct drm_connector base; struct msm_dp *dp_display; @@ -173,3 +181,70 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display) return connector; } + +static void dp_bridge_mode_set(struct drm_bridge *drm_bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + struct msm_dp_bridge *dp_bridge = to_dp_display(drm_bridge); + struct msm_dp *dp_display = dp_bridge->dp_display; + + msm_dp_display_mode_set(dp_display, drm_bridge->encoder, mode, adjusted_mode); +} + +static void dp_bridge_enable(struct drm_bridge *drm_bridge) +{ + struct msm_dp_bridge *dp_bridge = to_dp_display(drm_bridge); + struct msm_dp *dp_display = dp_bridge->dp_display; + + msm_dp_display_enable(dp_display, drm_bridge->encoder); +} + +static void dp_bridge_disable(struct drm_bridge *drm_bridge) +{ + struct msm_dp_bridge *dp_bridge = to_dp_display(drm_bridge); + struct msm_dp *dp_display = dp_bridge->dp_display; + + msm_dp_display_pre_disable(dp_display, drm_bridge->encoder); +} + +static void dp_bridge_post_disable(struct drm_bridge *drm_bridge) +{ + struct msm_dp_bridge *dp_bridge = to_dp_display(drm_bridge); + struct msm_dp *dp_display = dp_bridge->dp_display; + + msm_dp_display_disable(dp_display, drm_bridge->encoder); +} + +static const struct drm_bridge_funcs dp_bridge_ops = { + .enable = dp_bridge_enable, + .disable = dp_bridge_disable, + .post_disable = dp_bridge_post_disable, + .mode_set = dp_bridge_mode_set, +}; + +struct drm_bridge *msm_dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, + struct drm_encoder *encoder) +{ + int rc; + struct msm_dp_bridge *dp_bridge; + struct drm_bridge *bridge; + + dp_bridge = devm_kzalloc(dev->dev, sizeof(*dp_bridge), GFP_KERNEL); + if (!dp_bridge) + return ERR_PTR(-ENOMEM); + + dp_bridge->dp_display = dp_display; + + bridge = &dp_bridge->bridge; + bridge->funcs = &dp_bridge_ops; + bridge->encoder = encoder; + + rc = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (rc) { + DRM_ERROR("failed to attach bridge, rc=%d\n", rc); + return ERR_PTR(rc); + } + + return bridge; +} diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c index e1c90fa47411..db98a1d431eb 100644 --- a/drivers/gpu/drm/msm/dp/dp_hpd.c +++ b/drivers/gpu/drm/msm/dp/dp_hpd.c @@ -32,8 +32,6 @@ int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd) hpd_priv = container_of(dp_usbpd, struct dp_hpd_private, dp_usbpd); - dp_usbpd->hpd_high = hpd; - if (!hpd_priv->dp_cb || !hpd_priv->dp_cb->configure || !hpd_priv->dp_cb->disconnect) { pr_err("hpd dp_cb not initialized\n"); diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.h b/drivers/gpu/drm/msm/dp/dp_hpd.h index 5bc5bb64680f..8feec5aa5027 100644 --- a/drivers/gpu/drm/msm/dp/dp_hpd.h +++ b/drivers/gpu/drm/msm/dp/dp_hpd.h @@ -26,7 +26,6 @@ enum plug_orientation { * @multi_func: multi-function preferred * @usb_config_req: request to switch to usb * @exit_dp_mode: request exit from displayport mode - * @hpd_high: Hot Plug Detect signal is high. * @hpd_irq: Change in the status since last message * @alt_mode_cfg_done: bool to specify alt mode status * @debug_en: bool to specify debug mode @@ -39,7 +38,6 @@ struct dp_usbpd { bool multi_func; bool usb_config_req; bool exit_dp_mode; - bool hpd_high; bool hpd_irq; bool alt_mode_cfg_done; bool debug_en; diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c index a5bdfc5029de..d4d31e5bda07 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.c +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -737,18 +737,25 @@ static int dp_link_parse_sink_count(struct dp_link *dp_link) return 0; } -static void dp_link_parse_sink_status_field(struct dp_link_private *link) +static int dp_link_parse_sink_status_field(struct dp_link_private *link) { int len = 0; link->prev_sink_count = link->dp_link.sink_count; - dp_link_parse_sink_count(&link->dp_link); + len = dp_link_parse_sink_count(&link->dp_link); + if (len < 0) { + DRM_ERROR("DP parse sink count failed\n"); + return len; + } len = drm_dp_dpcd_read_link_status(link->aux, link->link_status); - if (len < DP_LINK_STATUS_SIZE) + if (len < DP_LINK_STATUS_SIZE) { DRM_ERROR("DP link status read failed\n"); - dp_link_parse_request(link); + return len; + } + + return dp_link_parse_request(link); } /** @@ -1023,7 +1030,9 @@ int dp_link_process_request(struct dp_link *dp_link) dp_link_reset_data(link); - dp_link_parse_sink_status_field(link); + ret = dp_link_parse_sink_status_field(link); + if (ret) + return ret; if (link->request.test_requested == DP_TEST_LINK_EDID_READ) { dp_link->sink_request |= DP_TEST_LINK_EDID_READ; diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index 5cd230a5d5d3..0fe02529b5e7 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -40,7 +40,12 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi) of_node_put(phy_node); - if (!phy_pdev || !msm_dsi->phy) { + if (!phy_pdev) { + DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__); + return -EPROBE_DEFER; + } + if (!msm_dsi->phy) { + put_device(&phy_pdev->dev); DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__); return -EPROBE_DEFER; } @@ -110,8 +115,7 @@ destroy_dsi: static int dsi_bind(struct device *dev, struct device *master, void *data) { - struct drm_device *drm = dev_get_drvdata(master); - struct msm_drm_private *priv = drm->dev_private; + struct msm_drm_private *priv = dev_get_drvdata(master); struct msm_dsi *msm_dsi = dev_get_drvdata(dev); priv->dsi[msm_dsi->id] = msm_dsi; @@ -122,8 +126,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) static void dsi_unbind(struct device *dev, struct device *master, void *data) { - struct drm_device *drm = dev_get_drvdata(master); - struct msm_drm_private *priv = drm->dev_private; + struct msm_drm_private *priv = dev_get_drvdata(master); struct msm_dsi *msm_dsi = dev_get_drvdata(dev); priv->dsi[msm_dsi->id] = NULL; @@ -225,9 +228,13 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, goto fail; } - if (!msm_dsi_manager_validate_current_config(msm_dsi->id)) { - ret = -EINVAL; - goto fail; + if (msm_dsi_is_bonded_dsi(msm_dsi) && + !msm_dsi_is_master_dsi(msm_dsi)) { + /* + * Do not return an eror here, + * Just skip creating encoder/connector for the slave-DSI. + */ + return 0; } msm_dsi->encoder = encoder; diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index bb39e7ca802d..c8dedc95428c 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -82,7 +82,6 @@ int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len); int msm_dsi_manager_register(struct msm_dsi *msm_dsi); void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi); -bool msm_dsi_manager_validate_current_config(u8 id); void msm_dsi_manager_tpg_enable(void); /* msm dsi */ @@ -120,6 +119,8 @@ unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host); struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host); int msm_dsi_host_register(struct mipi_dsi_host *host); void msm_dsi_host_unregister(struct mipi_dsi_host *host); +void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host, + struct msm_dsi_phy *src_phy); int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, struct msm_dsi_phy *src_phy); void msm_dsi_host_reset_phy(struct mipi_dsi_host *host); @@ -173,8 +174,6 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, void msm_dsi_phy_disable(struct msm_dsi_phy *phy); void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy, enum msm_dsi_phy_usecase uc); -int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy, - struct clk **byte_clk_provider, struct clk **pixel_clk_provider); void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy); int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy); void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy); diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index a6893cc45fe4..6b3ced4aaaf5 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -2020,7 +2020,7 @@ void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host, /* TODO: unvote for bus bandwidth */ cfg_hnd->ops->link_clk_disable(msm_host); - pm_runtime_put_autosuspend(&msm_host->pdev->dev); + pm_runtime_put(&msm_host->pdev->dev); } int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host, @@ -2179,57 +2179,12 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base, wmb(); } -int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, +void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host, struct msm_dsi_phy *src_phy) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); - struct clk *byte_clk_provider, *pixel_clk_provider; - int ret; msm_host->cphy_mode = src_phy->cphy_mode; - - ret = msm_dsi_phy_get_clk_provider(src_phy, - &byte_clk_provider, &pixel_clk_provider); - if (ret) { - pr_info("%s: can't get provider from pll, don't set parent\n", - __func__); - return 0; - } - - ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider); - if (ret) { - pr_err("%s: can't set parent to byte_clk_src. ret=%d\n", - __func__, ret); - goto exit; - } - - ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider); - if (ret) { - pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n", - __func__, ret); - goto exit; - } - - if (msm_host->dsi_clk_src) { - ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider); - if (ret) { - pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n", - __func__, ret); - goto exit; - } - } - - if (msm_host->esc_clk_src) { - ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider); - if (ret) { - pr_err("%s: can't set parent to esc_clk_src. ret=%d\n", - __func__, ret); - goto exit; - } - } - -exit: - return ret; } void msm_dsi_host_reset_phy(struct mipi_dsi_host *host) @@ -2297,7 +2252,7 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host) */ /* if (msm_panel->mode == MSM_DSI_CMD_MODE) { * dsi_link_clk_disable(msm_host); - * pm_runtime_put_autosuspend(&msm_host->pdev->dev); + * pm_runtime_put(&msm_host->pdev->dev); * } */ msm_host->enabled = true; @@ -2389,7 +2344,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host, fail_disable_clk: cfg_hnd->ops->link_clk_disable(msm_host); - pm_runtime_put_autosuspend(&msm_host->pdev->dev); + pm_runtime_put(&msm_host->pdev->dev); fail_disable_reg: dsi_host_regulator_disable(msm_host); unlock_ret: @@ -2416,7 +2371,7 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host) pinctrl_pm_select_sleep_state(&msm_host->pdev->dev); cfg_hnd->ops->link_clk_disable(msm_host); - pm_runtime_put_autosuspend(&msm_host->pdev->dev); + pm_runtime_put(&msm_host->pdev->dev); dsi_host_regulator_disable(msm_host); diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 01bf8d907933..f19bae475c96 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -79,10 +79,8 @@ static int dsi_mgr_setup_components(int id) return ret; msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE); - ret = msm_dsi_host_set_src_pll(msm_dsi->host, msm_dsi->phy); - } else if (!other_dsi) { - ret = 0; - } else { + msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); + } else if (other_dsi) { struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ? msm_dsi : other_dsi; struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ? @@ -106,13 +104,11 @@ static int dsi_mgr_setup_components(int id) MSM_DSI_PHY_MASTER); msm_dsi_phy_set_usecase(clk_slave_dsi->phy, MSM_DSI_PHY_SLAVE); - ret = msm_dsi_host_set_src_pll(msm_dsi->host, clk_master_dsi->phy); - if (ret) - return ret; - ret = msm_dsi_host_set_src_pll(other_dsi->host, clk_master_dsi->phy); + msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); + msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy); } - return ret; + return 0; } static int enable_phy(struct msm_dsi *msm_dsi, @@ -649,23 +645,6 @@ fail: return ERR_PTR(ret); } -bool msm_dsi_manager_validate_current_config(u8 id) -{ - bool is_bonded_dsi = IS_BONDED_DSI(); - - /* - * For bonded DSI, we only have one drm panel. For this - * use case, we register only one bridge/connector. - * Skip bridge/connector initialisation if it is - * slave-DSI for bonded DSI configuration. - */ - if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) { - DBG("Skip bridge registration for slave DSI->id: %d\n", id); - return false; - } - return true; -} - /* initialize bridge */ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id) { diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 9842e04b5858..2027b38617ab 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -602,7 +602,7 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy) static void dsi_phy_disable_resource(struct msm_dsi_phy *phy) { clk_disable_unprepare(phy->ahb_clk); - pm_runtime_put_autosuspend(&phy->pdev->dev); + pm_runtime_put(&phy->pdev->dev); } static const struct of_device_id dsi_phy_dt_match[] = { @@ -808,12 +808,14 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, struct msm_dsi_phy_clk_request *clk_req, struct msm_dsi_phy_shared_timings *shared_timings) { - struct device *dev = &phy->pdev->dev; + struct device *dev; int ret; if (!phy || !phy->cfg->ops.enable) return -EINVAL; + dev = &phy->pdev->dev; + ret = dsi_phy_enable_resource(phy); if (ret) { DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n", @@ -892,17 +894,6 @@ bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable) return phy->cfg->ops.set_continuous_clock(phy, enable); } -int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy, - struct clk **byte_clk_provider, struct clk **pixel_clk_provider) -{ - if (byte_clk_provider) - *byte_clk_provider = phy->provided_clocks->hws[DSI_BYTE_PLL_CLK]->clk; - if (pixel_clk_provider) - *pixel_clk_provider = phy->provided_clocks->hws[DSI_PIXEL_PLL_CLK]->clk; - - return 0; -} - void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy) { if (phy->cfg->ops.save_pll_state) { diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c deleted file mode 100644 index 106a67473af5..000000000000 --- a/drivers/gpu/drm/msm/edp/edp.c +++ /dev/null @@ -1,198 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. - */ - -#include <linux/of_irq.h> -#include "edp.h" - -static irqreturn_t edp_irq(int irq, void *dev_id) -{ - struct msm_edp *edp = dev_id; - - /* Process eDP irq */ - return msm_edp_ctrl_irq(edp->ctrl); -} - -static void edp_destroy(struct platform_device *pdev) -{ - struct msm_edp *edp = platform_get_drvdata(pdev); - - if (!edp) - return; - - if (edp->ctrl) { - msm_edp_ctrl_destroy(edp->ctrl); - edp->ctrl = NULL; - } - - platform_set_drvdata(pdev, NULL); -} - -/* construct eDP at bind/probe time, grab all the resources. */ -static struct msm_edp *edp_init(struct platform_device *pdev) -{ - struct msm_edp *edp = NULL; - int ret; - - if (!pdev) { - pr_err("no eDP device\n"); - ret = -ENXIO; - goto fail; - } - - edp = devm_kzalloc(&pdev->dev, sizeof(*edp), GFP_KERNEL); - if (!edp) { - ret = -ENOMEM; - goto fail; - } - DBG("eDP probed=%p", edp); - - edp->pdev = pdev; - platform_set_drvdata(pdev, edp); - - ret = msm_edp_ctrl_init(edp); - if (ret) - goto fail; - - return edp; - -fail: - if (edp) - edp_destroy(pdev); - - return ERR_PTR(ret); -} - -static int edp_bind(struct device *dev, struct device *master, void *data) -{ - struct drm_device *drm = dev_get_drvdata(master); - struct msm_drm_private *priv = drm->dev_private; - struct msm_edp *edp; - - DBG(""); - edp = edp_init(to_platform_device(dev)); - if (IS_ERR(edp)) - return PTR_ERR(edp); - priv->edp = edp; - - return 0; -} - -static void edp_unbind(struct device *dev, struct device *master, void *data) -{ - struct drm_device *drm = dev_get_drvdata(master); - struct msm_drm_private *priv = drm->dev_private; - - DBG(""); - if (priv->edp) { - edp_destroy(to_platform_device(dev)); - priv->edp = NULL; - } -} - -static const struct component_ops edp_ops = { - .bind = edp_bind, - .unbind = edp_unbind, -}; - -static int edp_dev_probe(struct platform_device *pdev) -{ - DBG(""); - return component_add(&pdev->dev, &edp_ops); -} - -static int edp_dev_remove(struct platform_device *pdev) -{ - DBG(""); - component_del(&pdev->dev, &edp_ops); - return 0; -} - -static const struct of_device_id dt_match[] = { - { .compatible = "qcom,mdss-edp" }, - {} -}; - -static struct platform_driver edp_driver = { - .probe = edp_dev_probe, - .remove = edp_dev_remove, - .driver = { - .name = "msm_edp", - .of_match_table = dt_match, - }, -}; - -void __init msm_edp_register(void) -{ - DBG(""); - platform_driver_register(&edp_driver); -} - -void __exit msm_edp_unregister(void) -{ - DBG(""); - platform_driver_unregister(&edp_driver); -} - -/* Second part of initialization, the drm/kms level modeset_init */ -int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, - struct drm_encoder *encoder) -{ - struct platform_device *pdev = edp->pdev; - struct msm_drm_private *priv = dev->dev_private; - int ret; - - edp->encoder = encoder; - edp->dev = dev; - - edp->bridge = msm_edp_bridge_init(edp); - if (IS_ERR(edp->bridge)) { - ret = PTR_ERR(edp->bridge); - DRM_DEV_ERROR(dev->dev, "failed to create eDP bridge: %d\n", ret); - edp->bridge = NULL; - goto fail; - } - - edp->connector = msm_edp_connector_init(edp); - if (IS_ERR(edp->connector)) { - ret = PTR_ERR(edp->connector); - DRM_DEV_ERROR(dev->dev, "failed to create eDP connector: %d\n", ret); - edp->connector = NULL; - goto fail; - } - - edp->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); - if (edp->irq < 0) { - ret = edp->irq; - DRM_DEV_ERROR(dev->dev, "failed to get IRQ: %d\n", ret); - goto fail; - } - - ret = devm_request_irq(&pdev->dev, edp->irq, - edp_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, - "edp_isr", edp); - if (ret < 0) { - DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n", - edp->irq, ret); - goto fail; - } - - priv->bridges[priv->num_bridges++] = edp->bridge; - priv->connectors[priv->num_connectors++] = edp->connector; - - return 0; - -fail: - /* bridge/connector are normally destroyed by drm */ - if (edp->bridge) { - edp_bridge_destroy(edp->bridge); - edp->bridge = NULL; - } - if (edp->connector) { - edp->connector->funcs->destroy(edp->connector); - edp->connector = NULL; - } - - return ret; -} diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h deleted file mode 100644 index 7907e0f5988f..000000000000 --- a/drivers/gpu/drm/msm/edp/edp.xml.h +++ /dev/null @@ -1,388 +0,0 @@ -#ifndef EDP_XML -#define EDP_XML - -/* Autogenerated file, DO NOT EDIT manually! - -This file was generated by the rules-ng-ng headergen tool in this git repository: -http://github.com/freedreno/envytools/ -git clone https://github.com/freedreno/envytools.git - -The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44) -- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44) - -Copyright (C) 2013-2021 by the following authors: -- Rob Clark <robdclark@gmail.com> (robclark) -- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice (including the -next paragraph) shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - - -enum edp_color_depth { - EDP_6BIT = 0, - EDP_8BIT = 1, - EDP_10BIT = 2, - EDP_12BIT = 3, - EDP_16BIT = 4, -}; - -enum edp_component_format { - EDP_RGB = 0, - EDP_YUV422 = 1, - EDP_YUV444 = 2, -}; - -#define REG_EDP_MAINLINK_CTRL 0x00000004 -#define EDP_MAINLINK_CTRL_ENABLE 0x00000001 -#define EDP_MAINLINK_CTRL_RESET 0x00000002 - -#define REG_EDP_STATE_CTRL 0x00000008 -#define EDP_STATE_CTRL_TRAIN_PATTERN_1 0x00000001 -#define EDP_STATE_CTRL_TRAIN_PATTERN_2 0x00000002 -#define EDP_STATE_CTRL_TRAIN_PATTERN_3 0x00000004 -#define EDP_STATE_CTRL_SYMBOL_ERR_RATE_MEAS 0x00000008 -#define EDP_STATE_CTRL_PRBS7 0x00000010 -#define EDP_STATE_CTRL_CUSTOM_80_BIT_PATTERN 0x00000020 -#define EDP_STATE_CTRL_SEND_VIDEO 0x00000040 -#define EDP_STATE_CTRL_PUSH_IDLE 0x00000080 - -#define REG_EDP_CONFIGURATION_CTRL 0x0000000c -#define EDP_CONFIGURATION_CTRL_SYNC_CLK 0x00000001 -#define EDP_CONFIGURATION_CTRL_STATIC_MVID 0x00000002 -#define EDP_CONFIGURATION_CTRL_PROGRESSIVE 0x00000004 -#define EDP_CONFIGURATION_CTRL_LANES__MASK 0x00000030 -#define EDP_CONFIGURATION_CTRL_LANES__SHIFT 4 -static inline uint32_t EDP_CONFIGURATION_CTRL_LANES(uint32_t val) -{ - return ((val) << EDP_CONFIGURATION_CTRL_LANES__SHIFT) & EDP_CONFIGURATION_CTRL_LANES__MASK; -} -#define EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING 0x00000040 -#define EDP_CONFIGURATION_CTRL_COLOR__MASK 0x00000100 -#define EDP_CONFIGURATION_CTRL_COLOR__SHIFT 8 -static inline uint32_t EDP_CONFIGURATION_CTRL_COLOR(enum edp_color_depth val) -{ - return ((val) << EDP_CONFIGURATION_CTRL_COLOR__SHIFT) & EDP_CONFIGURATION_CTRL_COLOR__MASK; -} - -#define REG_EDP_SOFTWARE_MVID 0x00000014 - -#define REG_EDP_SOFTWARE_NVID 0x00000018 - -#define REG_EDP_TOTAL_HOR_VER 0x0000001c -#define EDP_TOTAL_HOR_VER_HORIZ__MASK 0x0000ffff -#define EDP_TOTAL_HOR_VER_HORIZ__SHIFT 0 -static inline uint32_t EDP_TOTAL_HOR_VER_HORIZ(uint32_t val) -{ - return ((val) << EDP_TOTAL_HOR_VER_HORIZ__SHIFT) & EDP_TOTAL_HOR_VER_HORIZ__MASK; -} -#define EDP_TOTAL_HOR_VER_VERT__MASK 0xffff0000 -#define EDP_TOTAL_HOR_VER_VERT__SHIFT 16 -static inline uint32_t EDP_TOTAL_HOR_VER_VERT(uint32_t val) -{ - return ((val) << EDP_TOTAL_HOR_VER_VERT__SHIFT) & EDP_TOTAL_HOR_VER_VERT__MASK; -} - -#define REG_EDP_START_HOR_VER_FROM_SYNC 0x00000020 -#define EDP_START_HOR_VER_FROM_SYNC_HORIZ__MASK 0x0000ffff -#define EDP_START_HOR_VER_FROM_SYNC_HORIZ__SHIFT 0 -static inline uint32_t EDP_START_HOR_VER_FROM_SYNC_HORIZ(uint32_t val) -{ - return ((val) << EDP_START_HOR_VER_FROM_SYNC_HORIZ__SHIFT) & EDP_START_HOR_VER_FROM_SYNC_HORIZ__MASK; -} -#define EDP_START_HOR_VER_FROM_SYNC_VERT__MASK 0xffff0000 -#define EDP_START_HOR_VER_FROM_SYNC_VERT__SHIFT 16 -static inline uint32_t EDP_START_HOR_VER_FROM_SYNC_VERT(uint32_t val) -{ - return ((val) << EDP_START_HOR_VER_FROM_SYNC_VERT__SHIFT) & EDP_START_HOR_VER_FROM_SYNC_VERT__MASK; -} - -#define REG_EDP_HSYNC_VSYNC_WIDTH_POLARITY 0x00000024 -#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__MASK 0x00007fff -#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__SHIFT 0 -static inline uint32_t EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ(uint32_t val) -{ - return ((val) << EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__SHIFT) & EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__MASK; -} -#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_NHSYNC 0x00008000 -#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__MASK 0x7fff0000 -#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__SHIFT 16 -static inline uint32_t EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT(uint32_t val) -{ - return ((val) << EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__SHIFT) & EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__MASK; -} -#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_NVSYNC 0x80000000 - -#define REG_EDP_ACTIVE_HOR_VER 0x00000028 -#define EDP_ACTIVE_HOR_VER_HORIZ__MASK 0x0000ffff -#define EDP_ACTIVE_HOR_VER_HORIZ__SHIFT 0 -static inline uint32_t EDP_ACTIVE_HOR_VER_HORIZ(uint32_t val) -{ - return ((val) << EDP_ACTIVE_HOR_VER_HORIZ__SHIFT) & EDP_ACTIVE_HOR_VER_HORIZ__MASK; -} -#define EDP_ACTIVE_HOR_VER_VERT__MASK 0xffff0000 -#define EDP_ACTIVE_HOR_VER_VERT__SHIFT 16 -static inline uint32_t EDP_ACTIVE_HOR_VER_VERT(uint32_t val) -{ - return ((val) << EDP_ACTIVE_HOR_VER_VERT__SHIFT) & EDP_ACTIVE_HOR_VER_VERT__MASK; -} - -#define REG_EDP_MISC1_MISC0 0x0000002c -#define EDP_MISC1_MISC0_MISC0__MASK 0x000000ff -#define EDP_MISC1_MISC0_MISC0__SHIFT 0 -static inline uint32_t EDP_MISC1_MISC0_MISC0(uint32_t val) -{ - return ((val) << EDP_MISC1_MISC0_MISC0__SHIFT) & EDP_MISC1_MISC0_MISC0__MASK; -} -#define EDP_MISC1_MISC0_SYNC 0x00000001 -#define EDP_MISC1_MISC0_COMPONENT_FORMAT__MASK 0x00000006 -#define EDP_MISC1_MISC0_COMPONENT_FORMAT__SHIFT 1 -static inline uint32_t EDP_MISC1_MISC0_COMPONENT_FORMAT(enum edp_component_format val) -{ - return ((val) << EDP_MISC1_MISC0_COMPONENT_FORMAT__SHIFT) & EDP_MISC1_MISC0_COMPONENT_FORMAT__MASK; -} -#define EDP_MISC1_MISC0_CEA 0x00000008 -#define EDP_MISC1_MISC0_BT709_5 0x00000010 -#define EDP_MISC1_MISC0_COLOR__MASK 0x000000e0 -#define EDP_MISC1_MISC0_COLOR__SHIFT 5 -static inline uint32_t EDP_MISC1_MISC0_COLOR(enum edp_color_depth val) -{ - return ((val) << EDP_MISC1_MISC0_COLOR__SHIFT) & EDP_MISC1_MISC0_COLOR__MASK; -} -#define EDP_MISC1_MISC0_MISC1__MASK 0x0000ff00 -#define EDP_MISC1_MISC0_MISC1__SHIFT 8 -static inline uint32_t EDP_MISC1_MISC0_MISC1(uint32_t val) -{ - return ((val) << EDP_MISC1_MISC0_MISC1__SHIFT) & EDP_MISC1_MISC0_MISC1__MASK; -} -#define EDP_MISC1_MISC0_INTERLACED_ODD 0x00000100 -#define EDP_MISC1_MISC0_STEREO__MASK 0x00000600 -#define EDP_MISC1_MISC0_STEREO__SHIFT 9 -static inline uint32_t EDP_MISC1_MISC0_STEREO(uint32_t val) -{ - return ((val) << EDP_MISC1_MISC0_STEREO__SHIFT) & EDP_MISC1_MISC0_STEREO__MASK; -} - -#define REG_EDP_PHY_CTRL 0x00000074 -#define EDP_PHY_CTRL_SW_RESET_PLL 0x00000001 -#define EDP_PHY_CTRL_SW_RESET 0x00000004 - -#define REG_EDP_MAINLINK_READY 0x00000084 -#define EDP_MAINLINK_READY_TRAIN_PATTERN_1_READY 0x00000008 -#define EDP_MAINLINK_READY_TRAIN_PATTERN_2_READY 0x00000010 -#define EDP_MAINLINK_READY_TRAIN_PATTERN_3_READY 0x00000020 - -#define REG_EDP_AUX_CTRL 0x00000300 -#define EDP_AUX_CTRL_ENABLE 0x00000001 -#define EDP_AUX_CTRL_RESET 0x00000002 - -#define REG_EDP_INTERRUPT_REG_1 0x00000308 -#define EDP_INTERRUPT_REG_1_HPD 0x00000001 -#define EDP_INTERRUPT_REG_1_HPD_ACK 0x00000002 -#define EDP_INTERRUPT_REG_1_HPD_EN 0x00000004 -#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE 0x00000008 -#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE_ACK 0x00000010 -#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE_EN 0x00000020 -#define EDP_INTERRUPT_REG_1_WRONG_ADDR 0x00000040 -#define EDP_INTERRUPT_REG_1_WRONG_ADDR_ACK 0x00000080 -#define EDP_INTERRUPT_REG_1_WRONG_ADDR_EN 0x00000100 -#define EDP_INTERRUPT_REG_1_TIMEOUT 0x00000200 -#define EDP_INTERRUPT_REG_1_TIMEOUT_ACK 0x00000400 -#define EDP_INTERRUPT_REG_1_TIMEOUT_EN 0x00000800 -#define EDP_INTERRUPT_REG_1_NACK_DEFER 0x00001000 -#define EDP_INTERRUPT_REG_1_NACK_DEFER_ACK 0x00002000 -#define EDP_INTERRUPT_REG_1_NACK_DEFER_EN 0x00004000 -#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT 0x00008000 -#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT_ACK 0x00010000 -#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT_EN 0x00020000 -#define EDP_INTERRUPT_REG_1_I2C_NACK 0x00040000 -#define EDP_INTERRUPT_REG_1_I2C_NACK_ACK 0x00080000 -#define EDP_INTERRUPT_REG_1_I2C_NACK_EN 0x00100000 -#define EDP_INTERRUPT_REG_1_I2C_DEFER 0x00200000 -#define EDP_INTERRUPT_REG_1_I2C_DEFER_ACK 0x00400000 -#define EDP_INTERRUPT_REG_1_I2C_DEFER_EN 0x00800000 -#define EDP_INTERRUPT_REG_1_PLL_UNLOCK 0x01000000 -#define EDP_INTERRUPT_REG_1_PLL_UNLOCK_ACK 0x02000000 -#define EDP_INTERRUPT_REG_1_PLL_UNLOCK_EN 0x04000000 -#define EDP_INTERRUPT_REG_1_AUX_ERROR 0x08000000 -#define EDP_INTERRUPT_REG_1_AUX_ERROR_ACK 0x10000000 -#define EDP_INTERRUPT_REG_1_AUX_ERROR_EN 0x20000000 - -#define REG_EDP_INTERRUPT_REG_2 0x0000030c -#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO 0x00000001 -#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO_ACK 0x00000002 -#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO_EN 0x00000004 -#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT 0x00000008 -#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT_ACK 0x00000010 -#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT_EN 0x00000020 -#define EDP_INTERRUPT_REG_2_FRAME_END 0x00000200 -#define EDP_INTERRUPT_REG_2_FRAME_END_ACK 0x00000080 -#define EDP_INTERRUPT_REG_2_FRAME_END_EN 0x00000100 -#define EDP_INTERRUPT_REG_2_CRC_UPDATED 0x00000200 -#define EDP_INTERRUPT_REG_2_CRC_UPDATED_ACK 0x00000400 -#define EDP_INTERRUPT_REG_2_CRC_UPDATED_EN 0x00000800 - -#define REG_EDP_INTERRUPT_TRANS_NUM 0x00000310 - -#define REG_EDP_AUX_DATA 0x00000314 -#define EDP_AUX_DATA_READ 0x00000001 -#define EDP_AUX_DATA_DATA__MASK 0x0000ff00 -#define EDP_AUX_DATA_DATA__SHIFT 8 -static inline uint32_t EDP_AUX_DATA_DATA(uint32_t val) -{ - return ((val) << EDP_AUX_DATA_DATA__SHIFT) & EDP_AUX_DATA_DATA__MASK; -} -#define EDP_AUX_DATA_INDEX__MASK 0x00ff0000 -#define EDP_AUX_DATA_INDEX__SHIFT 16 -static inline uint32_t EDP_AUX_DATA_INDEX(uint32_t val) -{ - return ((val) << EDP_AUX_DATA_INDEX__SHIFT) & EDP_AUX_DATA_INDEX__MASK; -} -#define EDP_AUX_DATA_INDEX_WRITE 0x80000000 - -#define REG_EDP_AUX_TRANS_CTRL 0x00000318 -#define EDP_AUX_TRANS_CTRL_I2C 0x00000100 -#define EDP_AUX_TRANS_CTRL_GO 0x00000200 - -#define REG_EDP_AUX_STATUS 0x00000324 - -static inline uint32_t REG_EDP_PHY_LN(uint32_t i0) { return 0x00000400 + 0x40*i0; } - -static inline uint32_t REG_EDP_PHY_LN_PD_CTL(uint32_t i0) { return 0x00000404 + 0x40*i0; } - -#define REG_EDP_PHY_GLB_VM_CFG0 0x00000510 - -#define REG_EDP_PHY_GLB_VM_CFG1 0x00000514 - -#define REG_EDP_PHY_GLB_MISC9 0x00000518 - -#define REG_EDP_PHY_GLB_CFG 0x00000528 - -#define REG_EDP_PHY_GLB_PD_CTL 0x0000052c - -#define REG_EDP_PHY_GLB_PHY_STATUS 0x00000598 - -#define REG_EDP_28nm_PHY_PLL_REFCLK_CFG 0x00000000 - -#define REG_EDP_28nm_PHY_PLL_POSTDIV1_CFG 0x00000004 - -#define REG_EDP_28nm_PHY_PLL_CHGPUMP_CFG 0x00000008 - -#define REG_EDP_28nm_PHY_PLL_VCOLPF_CFG 0x0000000c - -#define REG_EDP_28nm_PHY_PLL_VREG_CFG 0x00000010 - -#define REG_EDP_28nm_PHY_PLL_PWRGEN_CFG 0x00000014 - -#define REG_EDP_28nm_PHY_PLL_DMUX_CFG 0x00000018 - -#define REG_EDP_28nm_PHY_PLL_AMUX_CFG 0x0000001c - -#define REG_EDP_28nm_PHY_PLL_GLB_CFG 0x00000020 -#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B 0x00000001 -#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B 0x00000002 -#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B 0x00000004 -#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE 0x00000008 - -#define REG_EDP_28nm_PHY_PLL_POSTDIV2_CFG 0x00000024 - -#define REG_EDP_28nm_PHY_PLL_POSTDIV3_CFG 0x00000028 - -#define REG_EDP_28nm_PHY_PLL_LPFR_CFG 0x0000002c - -#define REG_EDP_28nm_PHY_PLL_LPFC1_CFG 0x00000030 - -#define REG_EDP_28nm_PHY_PLL_LPFC2_CFG 0x00000034 - -#define REG_EDP_28nm_PHY_PLL_SDM_CFG0 0x00000038 - -#define REG_EDP_28nm_PHY_PLL_SDM_CFG1 0x0000003c - -#define REG_EDP_28nm_PHY_PLL_SDM_CFG2 0x00000040 - -#define REG_EDP_28nm_PHY_PLL_SDM_CFG3 0x00000044 - -#define REG_EDP_28nm_PHY_PLL_SDM_CFG4 0x00000048 - -#define REG_EDP_28nm_PHY_PLL_SSC_CFG0 0x0000004c - -#define REG_EDP_28nm_PHY_PLL_SSC_CFG1 0x00000050 - -#define REG_EDP_28nm_PHY_PLL_SSC_CFG2 0x00000054 - -#define REG_EDP_28nm_PHY_PLL_SSC_CFG3 0x00000058 - -#define REG_EDP_28nm_PHY_PLL_LKDET_CFG0 0x0000005c - -#define REG_EDP_28nm_PHY_PLL_LKDET_CFG1 0x00000060 - -#define REG_EDP_28nm_PHY_PLL_LKDET_CFG2 0x00000064 - -#define REG_EDP_28nm_PHY_PLL_TEST_CFG 0x00000068 -#define EDP_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET 0x00000001 - -#define REG_EDP_28nm_PHY_PLL_CAL_CFG0 0x0000006c - -#define REG_EDP_28nm_PHY_PLL_CAL_CFG1 0x00000070 - -#define REG_EDP_28nm_PHY_PLL_CAL_CFG2 0x00000074 - -#define REG_EDP_28nm_PHY_PLL_CAL_CFG3 0x00000078 - -#define REG_EDP_28nm_PHY_PLL_CAL_CFG4 0x0000007c - -#define REG_EDP_28nm_PHY_PLL_CAL_CFG5 0x00000080 - -#define REG_EDP_28nm_PHY_PLL_CAL_CFG6 0x00000084 - -#define REG_EDP_28nm_PHY_PLL_CAL_CFG7 0x00000088 - -#define REG_EDP_28nm_PHY_PLL_CAL_CFG8 0x0000008c - -#define REG_EDP_28nm_PHY_PLL_CAL_CFG9 0x00000090 - -#define REG_EDP_28nm_PHY_PLL_CAL_CFG10 0x00000094 - -#define REG_EDP_28nm_PHY_PLL_CAL_CFG11 0x00000098 - -#define REG_EDP_28nm_PHY_PLL_EFUSE_CFG 0x0000009c - -#define REG_EDP_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0 - - -#endif /* EDP_XML */ diff --git a/drivers/gpu/drm/msm/edp/edp_aux.c b/drivers/gpu/drm/msm/edp/edp_aux.c deleted file mode 100644 index e3d85c622cfb..000000000000 --- a/drivers/gpu/drm/msm/edp/edp_aux.c +++ /dev/null @@ -1,265 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. - */ - -#include "edp.h" -#include "edp.xml.h" - -#define AUX_CMD_FIFO_LEN 144 -#define AUX_CMD_NATIVE_MAX 16 -#define AUX_CMD_I2C_MAX 128 - -#define EDP_INTR_AUX_I2C_ERR \ - (EDP_INTERRUPT_REG_1_WRONG_ADDR | EDP_INTERRUPT_REG_1_TIMEOUT | \ - EDP_INTERRUPT_REG_1_NACK_DEFER | EDP_INTERRUPT_REG_1_WRONG_DATA_CNT | \ - EDP_INTERRUPT_REG_1_I2C_NACK | EDP_INTERRUPT_REG_1_I2C_DEFER) -#define EDP_INTR_TRANS_STATUS \ - (EDP_INTERRUPT_REG_1_AUX_I2C_DONE | EDP_INTR_AUX_I2C_ERR) - -struct edp_aux { - void __iomem *base; - bool msg_err; - - struct completion msg_comp; - - /* To prevent the message transaction routine from reentry. */ - struct mutex msg_mutex; - - struct drm_dp_aux drm_aux; -}; -#define to_edp_aux(x) container_of(x, struct edp_aux, drm_aux) - -static int edp_msg_fifo_tx(struct edp_aux *aux, struct drm_dp_aux_msg *msg) -{ - u32 data[4]; - u32 reg, len; - bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); - bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); - u8 *msgdata = msg->buffer; - int i; - - if (read) - len = 4; - else - len = msg->size + 4; - - /* - * cmd fifo only has depth of 144 bytes - */ - if (len > AUX_CMD_FIFO_LEN) - return -EINVAL; - - /* Pack cmd and write to HW */ - data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */ - if (read) - data[0] |= BIT(4); /* R/W */ - - data[1] = (msg->address >> 8) & 0xff; /* addr[15:8] */ - data[2] = msg->address & 0xff; /* addr[7:0] */ - data[3] = (msg->size - 1) & 0xff; /* len[7:0] */ - - for (i = 0; i < len; i++) { - reg = (i < 4) ? data[i] : msgdata[i - 4]; - reg = EDP_AUX_DATA_DATA(reg); /* index = 0, write */ - if (i == 0) - reg |= EDP_AUX_DATA_INDEX_WRITE; - edp_write(aux->base + REG_EDP_AUX_DATA, reg); - } - - reg = 0; /* Transaction number is always 1 */ - if (!native) /* i2c */ - reg |= EDP_AUX_TRANS_CTRL_I2C; - - reg |= EDP_AUX_TRANS_CTRL_GO; - edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, reg); - - return 0; -} - -static int edp_msg_fifo_rx(struct edp_aux *aux, struct drm_dp_aux_msg *msg) -{ - u32 data; - u8 *dp; - int i; - u32 len = msg->size; - - edp_write(aux->base + REG_EDP_AUX_DATA, - EDP_AUX_DATA_INDEX_WRITE | EDP_AUX_DATA_READ); /* index = 0 */ - - dp = msg->buffer; - - /* discard first byte */ - data = edp_read(aux->base + REG_EDP_AUX_DATA); - for (i = 0; i < len; i++) { - data = edp_read(aux->base + REG_EDP_AUX_DATA); - dp[i] = (u8)((data >> 8) & 0xff); - } - - return 0; -} - -/* - * This function does the real job to process an AUX transaction. - * It will call msm_edp_aux_ctrl() function to reset the AUX channel, - * if the waiting is timeout. - * The caller who triggers the transaction should avoid the - * msm_edp_aux_ctrl() running concurrently in other threads, i.e. - * start transaction only when AUX channel is fully enabled. - */ -static ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux, - struct drm_dp_aux_msg *msg) -{ - struct edp_aux *aux = to_edp_aux(drm_aux); - ssize_t ret; - unsigned long time_left; - bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); - bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); - - /* Ignore address only message */ - if ((msg->size == 0) || (msg->buffer == NULL)) { - msg->reply = native ? - DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; - return msg->size; - } - - /* msg sanity check */ - if ((native && (msg->size > AUX_CMD_NATIVE_MAX)) || - (msg->size > AUX_CMD_I2C_MAX)) { - pr_err("%s: invalid msg: size(%zu), request(%x)\n", - __func__, msg->size, msg->request); - return -EINVAL; - } - - mutex_lock(&aux->msg_mutex); - - aux->msg_err = false; - reinit_completion(&aux->msg_comp); - - ret = edp_msg_fifo_tx(aux, msg); - if (ret < 0) - goto unlock_exit; - - DBG("wait_for_completion"); - time_left = wait_for_completion_timeout(&aux->msg_comp, - msecs_to_jiffies(300)); - if (!time_left) { - /* - * Clear GO and reset AUX channel - * to cancel the current transaction. - */ - edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0); - msm_edp_aux_ctrl(aux, 1); - pr_err("%s: aux timeout,\n", __func__); - ret = -ETIMEDOUT; - goto unlock_exit; - } - DBG("completion"); - - if (!aux->msg_err) { - if (read) { - ret = edp_msg_fifo_rx(aux, msg); - if (ret < 0) - goto unlock_exit; - } - - msg->reply = native ? - DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; - } else { - /* Reply defer to retry */ - msg->reply = native ? - DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER; - /* - * The sleep time in caller is not long enough to make sure - * our H/W completes transactions. Add more defer time here. - */ - msleep(100); - } - - /* Return requested size for success or retry */ - ret = msg->size; - -unlock_exit: - mutex_unlock(&aux->msg_mutex); - return ret; -} - -void *msm_edp_aux_init(struct msm_edp *edp, void __iomem *regbase, struct drm_dp_aux **drm_aux) -{ - struct device *dev = &edp->pdev->dev; - struct edp_aux *aux = NULL; - int ret; - - DBG(""); - aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL); - if (!aux) - return NULL; - - aux->base = regbase; - mutex_init(&aux->msg_mutex); - init_completion(&aux->msg_comp); - - aux->drm_aux.name = "msm_edp_aux"; - aux->drm_aux.dev = dev; - aux->drm_aux.drm_dev = edp->dev; - aux->drm_aux.transfer = edp_aux_transfer; - ret = drm_dp_aux_register(&aux->drm_aux); - if (ret) { - pr_err("%s: failed to register drm aux: %d\n", __func__, ret); - mutex_destroy(&aux->msg_mutex); - } - - if (drm_aux && aux) - *drm_aux = &aux->drm_aux; - - return aux; -} - -void msm_edp_aux_destroy(struct device *dev, struct edp_aux *aux) -{ - if (aux) { - drm_dp_aux_unregister(&aux->drm_aux); - mutex_destroy(&aux->msg_mutex); - } -} - -irqreturn_t msm_edp_aux_irq(struct edp_aux *aux, u32 isr) -{ - if (isr & EDP_INTR_TRANS_STATUS) { - DBG("isr=%x", isr); - edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0); - - if (isr & EDP_INTR_AUX_I2C_ERR) - aux->msg_err = true; - else - aux->msg_err = false; - - complete(&aux->msg_comp); - } - - return IRQ_HANDLED; -} - -void msm_edp_aux_ctrl(struct edp_aux *aux, int enable) -{ - u32 data; - - DBG("enable=%d", enable); - data = edp_read(aux->base + REG_EDP_AUX_CTRL); - - if (enable) { - data |= EDP_AUX_CTRL_RESET; - edp_write(aux->base + REG_EDP_AUX_CTRL, data); - /* Make sure full reset */ - wmb(); - usleep_range(500, 1000); - - data &= ~EDP_AUX_CTRL_RESET; - data |= EDP_AUX_CTRL_ENABLE; - edp_write(aux->base + REG_EDP_AUX_CTRL, data); - } else { - data &= ~EDP_AUX_CTRL_ENABLE; - edp_write(aux->base + REG_EDP_AUX_CTRL, data); - } -} - diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c deleted file mode 100644 index c69a37e0c708..000000000000 --- a/drivers/gpu/drm/msm/edp/edp_bridge.c +++ /dev/null @@ -1,111 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. - */ - -#include "edp.h" - -struct edp_bridge { - struct drm_bridge base; - struct msm_edp *edp; -}; -#define to_edp_bridge(x) container_of(x, struct edp_bridge, base) - -void edp_bridge_destroy(struct drm_bridge *bridge) -{ -} - -static void edp_bridge_pre_enable(struct drm_bridge *bridge) -{ - struct edp_bridge *edp_bridge = to_edp_bridge(bridge); - struct msm_edp *edp = edp_bridge->edp; - - DBG(""); - msm_edp_ctrl_power(edp->ctrl, true); -} - -static void edp_bridge_enable(struct drm_bridge *bridge) -{ - DBG(""); -} - -static void edp_bridge_disable(struct drm_bridge *bridge) -{ - DBG(""); -} - -static void edp_bridge_post_disable(struct drm_bridge *bridge) -{ - struct edp_bridge *edp_bridge = to_edp_bridge(bridge); - struct msm_edp *edp = edp_bridge->edp; - - DBG(""); - msm_edp_ctrl_power(edp->ctrl, false); -} - -static void edp_bridge_mode_set(struct drm_bridge *bridge, - const struct drm_display_mode *mode, - const struct drm_display_mode *adjusted_mode) -{ - struct drm_device *dev = bridge->dev; - struct drm_connector *connector; - struct edp_bridge *edp_bridge = to_edp_bridge(bridge); - struct msm_edp *edp = edp_bridge->edp; - - DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode)); - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - struct drm_encoder *encoder = connector->encoder; - struct drm_bridge *first_bridge; - - if (!connector->encoder) - continue; - - first_bridge = drm_bridge_chain_get_first_bridge(encoder); - if (bridge == first_bridge) { - msm_edp_ctrl_timing_cfg(edp->ctrl, - adjusted_mode, &connector->display_info); - break; - } - } -} - -static const struct drm_bridge_funcs edp_bridge_funcs = { - .pre_enable = edp_bridge_pre_enable, - .enable = edp_bridge_enable, - .disable = edp_bridge_disable, - .post_disable = edp_bridge_post_disable, - .mode_set = edp_bridge_mode_set, -}; - -/* initialize bridge */ -struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp) -{ - struct drm_bridge *bridge = NULL; - struct edp_bridge *edp_bridge; - int ret; - - edp_bridge = devm_kzalloc(edp->dev->dev, - sizeof(*edp_bridge), GFP_KERNEL); - if (!edp_bridge) { - ret = -ENOMEM; - goto fail; - } - - edp_bridge->edp = edp; - - bridge = &edp_bridge->base; - bridge->funcs = &edp_bridge_funcs; - - ret = drm_bridge_attach(edp->encoder, bridge, NULL, 0); - if (ret) - goto fail; - - return bridge; - -fail: - if (bridge) - edp_bridge_destroy(bridge); - - return ERR_PTR(ret); -} diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c deleted file mode 100644 index 73cb5fd97a5a..000000000000 --- a/drivers/gpu/drm/msm/edp/edp_connector.c +++ /dev/null @@ -1,132 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. - */ - -#include "drm/drm_edid.h" -#include "msm_kms.h" -#include "edp.h" - -struct edp_connector { - struct drm_connector base; - struct msm_edp *edp; -}; -#define to_edp_connector(x) container_of(x, struct edp_connector, base) - -static enum drm_connector_status edp_connector_detect( - struct drm_connector *connector, bool force) -{ - struct edp_connector *edp_connector = to_edp_connector(connector); - struct msm_edp *edp = edp_connector->edp; - - DBG(""); - return msm_edp_ctrl_panel_connected(edp->ctrl) ? - connector_status_connected : connector_status_disconnected; -} - -static void edp_connector_destroy(struct drm_connector *connector) -{ - struct edp_connector *edp_connector = to_edp_connector(connector); - - DBG(""); - - drm_connector_cleanup(connector); - - kfree(edp_connector); -} - -static int edp_connector_get_modes(struct drm_connector *connector) -{ - struct edp_connector *edp_connector = to_edp_connector(connector); - struct msm_edp *edp = edp_connector->edp; - - struct edid *drm_edid = NULL; - int ret = 0; - - DBG(""); - ret = msm_edp_ctrl_get_panel_info(edp->ctrl, connector, &drm_edid); - if (ret) - return ret; - - drm_connector_update_edid_property(connector, drm_edid); - if (drm_edid) - ret = drm_add_edid_modes(connector, drm_edid); - - return ret; -} - -static int edp_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - struct edp_connector *edp_connector = to_edp_connector(connector); - struct msm_edp *edp = edp_connector->edp; - struct msm_drm_private *priv = connector->dev->dev_private; - struct msm_kms *kms = priv->kms; - long actual, requested; - - requested = 1000 * mode->clock; - actual = kms->funcs->round_pixclk(kms, - requested, edp_connector->edp->encoder); - - DBG("requested=%ld, actual=%ld", requested, actual); - if (actual != requested) - return MODE_CLOCK_RANGE; - - if (!msm_edp_ctrl_pixel_clock_valid( - edp->ctrl, mode->clock, NULL, NULL)) - return MODE_CLOCK_RANGE; - - /* Invalidate all modes if color format is not supported */ - if (connector->display_info.bpc > 8) - return MODE_BAD; - - return MODE_OK; -} - -static const struct drm_connector_funcs edp_connector_funcs = { - .detect = edp_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = edp_connector_destroy, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static const struct drm_connector_helper_funcs edp_connector_helper_funcs = { - .get_modes = edp_connector_get_modes, - .mode_valid = edp_connector_mode_valid, -}; - -/* initialize connector */ -struct drm_connector *msm_edp_connector_init(struct msm_edp *edp) -{ - struct drm_connector *connector = NULL; - struct edp_connector *edp_connector; - int ret; - - edp_connector = kzalloc(sizeof(*edp_connector), GFP_KERNEL); - if (!edp_connector) - return ERR_PTR(-ENOMEM); - - edp_connector->edp = edp; - - connector = &edp_connector->base; - - ret = drm_connector_init(edp->dev, connector, &edp_connector_funcs, - DRM_MODE_CONNECTOR_eDP); - if (ret) - return ERR_PTR(ret); - - drm_connector_helper_add(connector, &edp_connector_helper_funcs); - - /* We don't support HPD, so only poll status until connected. */ - connector->polled = DRM_CONNECTOR_POLL_CONNECT; - - /* Display driver doesn't support interlace now. */ - connector->interlace_allowed = false; - connector->doublescan_allowed = false; - - drm_connector_attach_encoder(connector, edp->encoder); - - return connector; -} diff --git a/drivers/gpu/drm/msm/edp/edp_phy.c b/drivers/gpu/drm/msm/edp/edp_phy.c deleted file mode 100644 index fcaf7b7ecdd2..000000000000 --- a/drivers/gpu/drm/msm/edp/edp_phy.c +++ /dev/null @@ -1,98 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. - */ - -#include "edp.h" -#include "edp.xml.h" - -#define EDP_MAX_LANE 4 - -struct edp_phy { - void __iomem *base; -}; - -bool msm_edp_phy_ready(struct edp_phy *phy) -{ - u32 status; - int cnt = 100; - - while (--cnt) { - status = edp_read(phy->base + - REG_EDP_PHY_GLB_PHY_STATUS); - if (status & 0x01) - break; - usleep_range(500, 1000); - } - - if (cnt == 0) { - pr_err("%s: PHY NOT ready\n", __func__); - return false; - } else { - return true; - } -} - -void msm_edp_phy_ctrl(struct edp_phy *phy, int enable) -{ - DBG("enable=%d", enable); - if (enable) { - /* Reset */ - edp_write(phy->base + REG_EDP_PHY_CTRL, - EDP_PHY_CTRL_SW_RESET | EDP_PHY_CTRL_SW_RESET_PLL); - /* Make sure fully reset */ - wmb(); - usleep_range(500, 1000); - edp_write(phy->base + REG_EDP_PHY_CTRL, 0x000); - edp_write(phy->base + REG_EDP_PHY_GLB_PD_CTL, 0x3f); - edp_write(phy->base + REG_EDP_PHY_GLB_CFG, 0x1); - } else { - edp_write(phy->base + REG_EDP_PHY_GLB_PD_CTL, 0xc0); - } -} - -/* voltage mode and pre emphasis cfg */ -void msm_edp_phy_vm_pe_init(struct edp_phy *phy) -{ - edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG0, 0x3); - edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG1, 0x64); - edp_write(phy->base + REG_EDP_PHY_GLB_MISC9, 0x6c); -} - -void msm_edp_phy_vm_pe_cfg(struct edp_phy *phy, u32 v0, u32 v1) -{ - edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG0, v0); - edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG1, v1); -} - -void msm_edp_phy_lane_power_ctrl(struct edp_phy *phy, bool up, u32 max_lane) -{ - u32 i; - u32 data; - - if (up) - data = 0; /* power up */ - else - data = 0x7; /* power down */ - - for (i = 0; i < max_lane; i++) - edp_write(phy->base + REG_EDP_PHY_LN_PD_CTL(i) , data); - - /* power down unused lane */ - data = 0x7; /* power down */ - for (i = max_lane; i < EDP_MAX_LANE; i++) - edp_write(phy->base + REG_EDP_PHY_LN_PD_CTL(i) , data); -} - -void *msm_edp_phy_init(struct device *dev, void __iomem *regbase) -{ - struct edp_phy *phy = NULL; - - phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); - if (!phy) - return NULL; - - phy->base = regbase; - return phy; -} - diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 75b64e6ae035..719720709e9e 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -8,6 +8,8 @@ #include <linux/of_irq.h> #include <linux/of_gpio.h> +#include <drm/drm_bridge_connector.h> + #include <sound/hdmi-codec.h> #include "hdmi.h" @@ -41,7 +43,7 @@ static irqreturn_t msm_hdmi_irq(int irq, void *dev_id) struct hdmi *hdmi = dev_id; /* Process HPD: */ - msm_hdmi_connector_irq(hdmi->connector); + msm_hdmi_hpd_irq(hdmi->bridge); /* Process DDC: */ msm_hdmi_i2c_irq(hdmi->i2c); @@ -95,8 +97,13 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi) of_node_put(phy_node); - if (!phy_pdev || !hdmi->phy) { + if (!phy_pdev) { + DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n"); + return -EPROBE_DEFER; + } + if (!hdmi->phy) { DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n"); + put_device(&phy_pdev->dev); return -EPROBE_DEFER; } @@ -281,7 +288,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, goto fail; } - hdmi->connector = msm_hdmi_connector_init(hdmi); + hdmi->connector = drm_bridge_connector_init(hdmi->dev, encoder); if (IS_ERR(hdmi->connector)) { ret = PTR_ERR(hdmi->connector); DRM_DEV_ERROR(dev->dev, "failed to create HDMI connector: %d\n", ret); @@ -289,6 +296,8 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, goto fail; } + drm_connector_attach_encoder(hdmi->connector, hdmi->encoder); + hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); if (hdmi->irq < 0) { ret = hdmi->irq; @@ -305,7 +314,9 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, goto fail; } - ret = msm_hdmi_hpd_enable(hdmi->connector); + drm_bridge_connector_enable_hpd(hdmi->connector); + + ret = msm_hdmi_hpd_enable(hdmi->bridge); if (ret < 0) { DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret); goto fail; @@ -514,8 +525,7 @@ static int msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev) static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) { - struct drm_device *drm = dev_get_drvdata(master); - struct msm_drm_private *priv = drm->dev_private; + struct msm_drm_private *priv = dev_get_drvdata(master); struct hdmi_platform_config *hdmi_cfg; struct hdmi *hdmi; struct device_node *of_node = dev->of_node; @@ -586,8 +596,8 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) static void msm_hdmi_unbind(struct device *dev, struct device *master, void *data) { - struct drm_device *drm = dev_get_drvdata(master); - struct msm_drm_private *priv = drm->dev_private; + struct msm_drm_private *priv = dev_get_drvdata(master); + if (priv->hdmi) { if (priv->hdmi->audio_pdev) platform_device_unregister(priv->hdmi->audio_pdev); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index 82261078c6b1..736f348befb3 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -114,6 +114,13 @@ struct hdmi_platform_config { struct hdmi_gpio_data gpios[HDMI_MAX_NUM_GPIO]; }; +struct hdmi_bridge { + struct drm_bridge base; + struct hdmi *hdmi; + struct work_struct hpd_work; +}; +#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base) + void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on); static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data) @@ -230,13 +237,11 @@ void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate); struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi); void msm_hdmi_bridge_destroy(struct drm_bridge *bridge); -/* - * hdmi connector: - */ - -void msm_hdmi_connector_irq(struct drm_connector *connector); -struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi); -int msm_hdmi_hpd_enable(struct drm_connector *connector); +void msm_hdmi_hpd_irq(struct drm_bridge *bridge); +enum drm_connector_status msm_hdmi_bridge_detect( + struct drm_bridge *bridge); +int msm_hdmi_hpd_enable(struct drm_bridge *bridge); +void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge); /* * i2c adapter for ddc: diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index f04eb4a70f0d..68fba4bf7212 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -5,17 +5,16 @@ */ #include <linux/delay.h> +#include <drm/drm_bridge_connector.h> +#include "msm_kms.h" #include "hdmi.h" -struct hdmi_bridge { - struct drm_bridge base; - struct hdmi *hdmi; -}; -#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base) - void msm_hdmi_bridge_destroy(struct drm_bridge *bridge) { + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + + msm_hdmi_hpd_disable(hdmi_bridge); } static void msm_hdmi_power_on(struct drm_bridge *bridge) @@ -70,7 +69,7 @@ static void power_off(struct drm_bridge *bridge) if (ret) DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %d\n", ret); - pm_runtime_put_autosuspend(&hdmi->pdev->dev); + pm_runtime_put(&hdmi->pdev->dev); } #define AVI_IFRAME_LINE_NUMBER 1 @@ -251,14 +250,76 @@ static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge, msm_hdmi_audio_update(hdmi); } +static struct edid *msm_hdmi_bridge_get_edid(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = hdmi_bridge->hdmi; + struct edid *edid; + uint32_t hdmi_ctrl; + + hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL); + hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE); + + edid = drm_get_edid(connector, hdmi->i2c); + + hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl); + + hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid); + + return edid; +} + +static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = hdmi_bridge->hdmi; + const struct hdmi_platform_config *config = hdmi->config; + struct msm_drm_private *priv = bridge->dev->dev_private; + struct msm_kms *kms = priv->kms; + long actual, requested; + + requested = 1000 * mode->clock; + actual = kms->funcs->round_pixclk(kms, + requested, hdmi_bridge->hdmi->encoder); + + /* for mdp5/apq8074, we manage our own pixel clk (as opposed to + * mdp4/dtv stuff where pixel clk is assigned to mdp/encoder + * instead): + */ + if (config->pwr_clk_cnt > 0) + actual = clk_round_rate(hdmi->pwr_clks[0], actual); + + DBG("requested=%ld, actual=%ld", requested, actual); + + if (actual != requested) + return MODE_CLOCK_RANGE; + + return 0; +} + static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = { .pre_enable = msm_hdmi_bridge_pre_enable, .enable = msm_hdmi_bridge_enable, .disable = msm_hdmi_bridge_disable, .post_disable = msm_hdmi_bridge_post_disable, .mode_set = msm_hdmi_bridge_mode_set, + .mode_valid = msm_hdmi_bridge_mode_valid, + .get_edid = msm_hdmi_bridge_get_edid, + .detect = msm_hdmi_bridge_detect, }; +static void +msm_hdmi_hotplug_work(struct work_struct *work) +{ + struct hdmi_bridge *hdmi_bridge = + container_of(work, struct hdmi_bridge, hpd_work); + struct drm_bridge *bridge = &hdmi_bridge->base; + + drm_bridge_hpd_notify(bridge, drm_bridge_detect(bridge)); +} /* initialize bridge */ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi) @@ -275,11 +336,17 @@ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi) } hdmi_bridge->hdmi = hdmi; + INIT_WORK(&hdmi_bridge->hpd_work, msm_hdmi_hotplug_work); bridge = &hdmi_bridge->base; bridge->funcs = &msm_hdmi_bridge_funcs; + bridge->ddc = hdmi->i2c; + bridge->type = DRM_MODE_CONNECTOR_HDMIA; + bridge->ops = DRM_BRIDGE_OP_HPD | + DRM_BRIDGE_OP_DETECT | + DRM_BRIDGE_OP_EDID; - ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, 0); + ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret) goto fail; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c index a7f729cdec7b..75605ddac7c4 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c @@ -11,13 +11,6 @@ #include "msm_kms.h" #include "hdmi.h" -struct hdmi_connector { - struct drm_connector base; - struct hdmi *hdmi; - struct work_struct hpd_work; -}; -#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base) - static void msm_hdmi_phy_reset(struct hdmi *hdmi) { unsigned int val; @@ -139,10 +132,10 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable) } } -int msm_hdmi_hpd_enable(struct drm_connector *connector) +int msm_hdmi_hpd_enable(struct drm_bridge *bridge) { - struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); - struct hdmi *hdmi = hdmi_connector->hdmi; + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = hdmi_bridge->hdmi; const struct hdmi_platform_config *config = hdmi->config; struct device *dev = &hdmi->pdev->dev; uint32_t hpd_ctrl; @@ -199,9 +192,9 @@ fail: return ret; } -static void hdp_disable(struct hdmi_connector *hdmi_connector) +void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge) { - struct hdmi *hdmi = hdmi_connector->hdmi; + struct hdmi *hdmi = hdmi_bridge->hdmi; const struct hdmi_platform_config *config = hdmi->config; struct device *dev = &hdmi->pdev->dev; int ret; @@ -212,7 +205,7 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector) msm_hdmi_set_mode(hdmi, false); enable_hpd_clocks(hdmi, false); - pm_runtime_put_autosuspend(dev); + pm_runtime_put(dev); ret = gpio_config(hdmi, false); if (ret) @@ -227,19 +220,10 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector) dev_warn(dev, "failed to disable hpd regulator: %d\n", ret); } -static void -msm_hdmi_hotplug_work(struct work_struct *work) -{ - struct hdmi_connector *hdmi_connector = - container_of(work, struct hdmi_connector, hpd_work); - struct drm_connector *connector = &hdmi_connector->base; - drm_helper_hpd_irq_event(connector->dev); -} - -void msm_hdmi_connector_irq(struct drm_connector *connector) +void msm_hdmi_hpd_irq(struct drm_bridge *bridge) { - struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); - struct hdmi *hdmi = hdmi_connector->hdmi; + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = hdmi_bridge->hdmi; uint32_t hpd_int_status, hpd_int_ctrl; /* Process HPD: */ @@ -262,7 +246,7 @@ void msm_hdmi_connector_irq(struct drm_connector *connector) hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT; hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl); - queue_work(hdmi->workq, &hdmi_connector->hpd_work); + queue_work(hdmi->workq, &hdmi_bridge->hpd_work); } } @@ -276,7 +260,7 @@ static enum drm_connector_status detect_reg(struct hdmi *hdmi) hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS); enable_hpd_clocks(hdmi, false); - pm_runtime_put_autosuspend(&hdmi->pdev->dev); + pm_runtime_put(&hdmi->pdev->dev); return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ? connector_status_connected : connector_status_disconnected; @@ -293,11 +277,11 @@ static enum drm_connector_status detect_gpio(struct hdmi *hdmi) connector_status_disconnected; } -static enum drm_connector_status hdmi_connector_detect( - struct drm_connector *connector, bool force) +enum drm_connector_status msm_hdmi_bridge_detect( + struct drm_bridge *bridge) { - struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); - struct hdmi *hdmi = hdmi_connector->hdmi; + struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = hdmi_bridge->hdmi; const struct hdmi_platform_config *config = hdmi->config; struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX]; enum drm_connector_status stat_gpio, stat_reg; @@ -331,115 +315,3 @@ static enum drm_connector_status hdmi_connector_detect( return stat_gpio; } - -static void hdmi_connector_destroy(struct drm_connector *connector) -{ - struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); - - hdp_disable(hdmi_connector); - - drm_connector_cleanup(connector); - - kfree(hdmi_connector); -} - -static int msm_hdmi_connector_get_modes(struct drm_connector *connector) -{ - struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); - struct hdmi *hdmi = hdmi_connector->hdmi; - struct edid *edid; - uint32_t hdmi_ctrl; - int ret = 0; - - hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL); - hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE); - - edid = drm_get_edid(connector, hdmi->i2c); - - hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl); - - hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid); - drm_connector_update_edid_property(connector, edid); - - if (edid) { - ret = drm_add_edid_modes(connector, edid); - kfree(edid); - } - - return ret; -} - -static int msm_hdmi_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); - struct hdmi *hdmi = hdmi_connector->hdmi; - const struct hdmi_platform_config *config = hdmi->config; - struct msm_drm_private *priv = connector->dev->dev_private; - struct msm_kms *kms = priv->kms; - long actual, requested; - - requested = 1000 * mode->clock; - actual = kms->funcs->round_pixclk(kms, - requested, hdmi_connector->hdmi->encoder); - - /* for mdp5/apq8074, we manage our own pixel clk (as opposed to - * mdp4/dtv stuff where pixel clk is assigned to mdp/encoder - * instead): - */ - if (config->pwr_clk_cnt > 0) - actual = clk_round_rate(hdmi->pwr_clks[0], actual); - - DBG("requested=%ld, actual=%ld", requested, actual); - - if (actual != requested) - return MODE_CLOCK_RANGE; - - return 0; -} - -static const struct drm_connector_funcs hdmi_connector_funcs = { - .detect = hdmi_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = hdmi_connector_destroy, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = { - .get_modes = msm_hdmi_connector_get_modes, - .mode_valid = msm_hdmi_connector_mode_valid, -}; - -/* initialize connector */ -struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi) -{ - struct drm_connector *connector = NULL; - struct hdmi_connector *hdmi_connector; - - hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL); - if (!hdmi_connector) - return ERR_PTR(-ENOMEM); - - hdmi_connector->hdmi = hdmi; - INIT_WORK(&hdmi_connector->hpd_work, msm_hdmi_hotplug_work); - - connector = &hdmi_connector->base; - - drm_connector_init_with_ddc(hdmi->dev, connector, - &hdmi_connector_funcs, - DRM_MODE_CONNECTOR_HDMIA, - hdmi->i2c); - drm_connector_helper_add(connector, &msm_hdmi_connector_helper_funcs); - - connector->polled = DRM_CONNECTOR_POLL_CONNECT | - DRM_CONNECTOR_POLL_DISCONNECT; - - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - - drm_connector_attach_encoder(connector, hdmi->encoder); - - return connector; -} diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index dee13fedee3b..0804c31e8962 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -15,6 +15,11 @@ #include "msm_gpu.h" #include "msm_kms.h" #include "msm_debugfs.h" +#include "disp/msm_disp_snapshot.h" + +/* + * GPU Snapshot: + */ struct msm_gpu_show_priv { struct msm_gpu_state *state; @@ -29,14 +34,14 @@ static int msm_gpu_show(struct seq_file *m, void *arg) struct msm_gpu *gpu = priv->gpu; int ret; - ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex); + ret = mutex_lock_interruptible(&gpu->lock); if (ret) return ret; drm_printf(&p, "%s Status:\n", gpu->name); gpu->funcs->show(gpu, show_priv->state, &p); - mutex_unlock(&show_priv->dev->struct_mutex); + mutex_unlock(&gpu->lock); return 0; } @@ -48,9 +53,9 @@ static int msm_gpu_release(struct inode *inode, struct file *file) struct msm_drm_private *priv = show_priv->dev->dev_private; struct msm_gpu *gpu = priv->gpu; - mutex_lock(&show_priv->dev->struct_mutex); + mutex_lock(&gpu->lock); gpu->funcs->gpu_state_put(show_priv->state); - mutex_unlock(&show_priv->dev->struct_mutex); + mutex_unlock(&gpu->lock); kfree(show_priv); @@ -72,7 +77,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file) if (!show_priv) return -ENOMEM; - ret = mutex_lock_interruptible(&dev->struct_mutex); + ret = mutex_lock_interruptible(&gpu->lock); if (ret) goto free_priv; @@ -81,7 +86,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file) show_priv->state = gpu->funcs->gpu_state_get(gpu); pm_runtime_put_sync(&gpu->pdev->dev); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&gpu->lock); if (IS_ERR(show_priv->state)) { ret = PTR_ERR(show_priv->state); @@ -109,6 +114,73 @@ static const struct file_operations msm_gpu_fops = { .release = msm_gpu_release, }; +/* + * Display Snapshot: + */ + +static int msm_kms_show(struct seq_file *m, void *arg) +{ + struct drm_printer p = drm_seq_file_printer(m); + struct msm_disp_state *state = m->private; + + msm_disp_state_print(state, &p); + + return 0; +} + +static int msm_kms_release(struct inode *inode, struct file *file) +{ + struct seq_file *m = file->private_data; + struct msm_disp_state *state = m->private; + + msm_disp_state_free(state); + + return single_release(inode, file); +} + +static int msm_kms_open(struct inode *inode, struct file *file) +{ + struct drm_device *dev = inode->i_private; + struct msm_drm_private *priv = dev->dev_private; + struct msm_disp_state *state; + int ret; + + if (!priv->kms) + return -ENODEV; + + ret = mutex_lock_interruptible(&priv->kms->dump_mutex); + if (ret) + return ret; + + state = msm_disp_snapshot_state_sync(priv->kms); + + mutex_unlock(&priv->kms->dump_mutex); + + if (IS_ERR(state)) { + return PTR_ERR(state); + } + + ret = single_open(file, msm_kms_show, state); + if (ret) { + msm_disp_state_free(state); + return ret; + } + + return 0; +} + +static const struct file_operations msm_kms_fops = { + .owner = THIS_MODULE, + .open = msm_kms_open, + .read = seq_read, + .llseek = seq_lseek, + .release = msm_kms_release, +}; + +/* + * Other debugfs: + */ + static unsigned long last_shrink_freed; static int @@ -134,8 +206,10 @@ DEFINE_SIMPLE_ATTRIBUTE(shrink_fops, "0x%08llx\n"); -static int msm_gem_show(struct drm_device *dev, struct seq_file *m) +static int msm_gem_show(struct seq_file *m, void *arg) { + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; struct msm_drm_private *priv = dev->dev_private; int ret; @@ -150,8 +224,10 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m) return 0; } -static int msm_mm_show(struct drm_device *dev, struct seq_file *m) +static int msm_mm_show(struct seq_file *m, void *arg) { + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; struct drm_printer p = drm_seq_file_printer(m); drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p); @@ -159,8 +235,10 @@ static int msm_mm_show(struct drm_device *dev, struct seq_file *m) return 0; } -static int msm_fb_show(struct drm_device *dev, struct seq_file *m) +static int msm_fb_show(struct seq_file *m, void *arg) { + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; struct msm_drm_private *priv = dev->dev_private; struct drm_framebuffer *fb, *fbdev_fb = NULL; @@ -183,29 +261,10 @@ static int msm_fb_show(struct drm_device *dev, struct seq_file *m) return 0; } -static int show_locked(struct seq_file *m, void *arg) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - int (*show)(struct drm_device *dev, struct seq_file *m) = - node->info_ent->data; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - - ret = show(dev, m); - - mutex_unlock(&dev->struct_mutex); - - return ret; -} - static struct drm_info_list msm_debugfs_list[] = { - {"gem", show_locked, 0, msm_gem_show}, - { "mm", show_locked, 0, msm_mm_show }, - { "fb", show_locked, 0, msm_fb_show }, + {"gem", msm_gem_show}, + { "mm", msm_mm_show }, + { "fb", msm_fb_show }, }; static int late_init_minor(struct drm_minor *minor) @@ -252,9 +311,15 @@ void msm_debugfs_init(struct drm_minor *minor) debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root, dev, &msm_gpu_fops); + debugfs_create_file("kms", S_IRUSR, minor->debugfs_root, + dev, &msm_kms_fops); + debugfs_create_u32("hangcheck_period_ms", 0600, minor->debugfs_root, &priv->hangcheck_period); + debugfs_create_bool("disable_err_irq", 0600, minor->debugfs_root, + &priv->disable_err_irq); + debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root, dev, &shrink_fops); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 8f30e68ae3b5..ff19c5a94d6f 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -339,10 +339,9 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv, static int msm_drm_uninit(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); - struct drm_device *ddev = platform_get_drvdata(pdev); - struct msm_drm_private *priv = ddev->dev_private; + struct msm_drm_private *priv = platform_get_drvdata(pdev); + struct drm_device *ddev = priv->dev; struct msm_kms *kms = priv->kms; - struct msm_mdss *mdss = priv->mdss; int i; /* @@ -402,14 +401,10 @@ static int msm_drm_uninit(struct device *dev) component_unbind_all(dev, ddev); - if (mdss && mdss->funcs) - mdss->funcs->destroy(ddev); - ddev->dev_private = NULL; drm_dev_put(ddev); destroy_workqueue(priv->wq); - kfree(priv); return 0; } @@ -466,7 +461,7 @@ static int msm_init_vram(struct drm_device *dev) of_node_put(node); if (ret) return ret; - size = r.end - r.start; + size = r.end - r.start + 1; DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start); /* if we have no IOMMU, then we need to use carveout allocator. @@ -512,10 +507,9 @@ static int msm_init_vram(struct drm_device *dev) static int msm_drm_init(struct device *dev, const struct drm_driver *drv) { struct platform_device *pdev = to_platform_device(dev); + struct msm_drm_private *priv = dev_get_drvdata(dev); struct drm_device *ddev; - struct msm_drm_private *priv; struct msm_kms *kms; - struct msm_mdss *mdss; int ret, i; if (drm_firmware_drivers_only()) @@ -526,34 +520,9 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) DRM_DEV_ERROR(dev, "failed to allocate drm_device\n"); return PTR_ERR(ddev); } - - platform_set_drvdata(pdev, ddev); - - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) { - ret = -ENOMEM; - goto err_put_drm_dev; - } - ddev->dev_private = priv; priv->dev = ddev; - switch (get_mdp_ver(pdev)) { - case KMS_MDP5: - ret = mdp5_mdss_init(ddev); - break; - case KMS_DPU: - ret = dpu_mdss_init(ddev); - break; - default: - ret = 0; - break; - } - if (ret) - goto err_free_priv; - - mdss = priv->mdss; - priv->wq = alloc_ordered_workqueue("msm", 0); priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD; @@ -574,12 +543,12 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) ret = msm_init_vram(ddev); if (ret) - goto err_destroy_mdss; + return ret; /* Bind all our sub-components: */ ret = component_bind_all(dev, ddev); if (ret) - goto err_destroy_mdss; + return ret; dma_set_max_seg_size(dev, UINT_MAX); @@ -685,15 +654,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) err_msm_uninit: msm_drm_uninit(dev); return ret; -err_destroy_mdss: - if (mdss && mdss->funcs) - mdss->funcs->destroy(ddev); -err_free_priv: - kfree(priv); -err_put_drm_dev: - drm_dev_put(ddev); - platform_set_drvdata(pdev, NULL); - return ret; } /* @@ -755,14 +715,8 @@ static void context_close(struct msm_file_private *ctx) static void msm_postclose(struct drm_device *dev, struct drm_file *file) { - struct msm_drm_private *priv = dev->dev_private; struct msm_file_private *ctx = file->driver_priv; - mutex_lock(&dev->struct_mutex); - if (ctx == priv->lastctx) - priv->lastctx = NULL; - mutex_unlock(&dev->struct_mutex); - context_close(ctx); } @@ -976,7 +930,7 @@ static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id, struct dma_fence *fence; int ret; - if (fence_id > queue->last_fence) { + if (fence_after(fence_id, queue->last_fence)) { DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n", fence_id, queue->last_fence); return -EINVAL; @@ -1145,8 +1099,7 @@ static const struct drm_driver msm_driver = { static int __maybe_unused msm_runtime_suspend(struct device *dev) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct msm_drm_private *priv = ddev->dev_private; + struct msm_drm_private *priv = dev_get_drvdata(dev); struct msm_mdss *mdss = priv->mdss; DBG(""); @@ -1159,8 +1112,7 @@ static int __maybe_unused msm_runtime_suspend(struct device *dev) static int __maybe_unused msm_runtime_resume(struct device *dev) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct msm_drm_private *priv = ddev->dev_private; + struct msm_drm_private *priv = dev_get_drvdata(dev); struct msm_mdss *mdss = priv->mdss; DBG(""); @@ -1190,8 +1142,8 @@ static int __maybe_unused msm_pm_resume(struct device *dev) static int __maybe_unused msm_pm_prepare(struct device *dev) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL; + struct msm_drm_private *priv = dev_get_drvdata(dev); + struct drm_device *ddev = priv ? priv->dev : NULL; if (!priv || !priv->kms) return 0; @@ -1201,8 +1153,8 @@ static int __maybe_unused msm_pm_prepare(struct device *dev) static void __maybe_unused msm_pm_complete(struct device *dev) { - struct drm_device *ddev = dev_get_drvdata(dev); - struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL; + struct msm_drm_private *priv = dev_get_drvdata(dev); + struct drm_device *ddev = priv ? priv->dev : NULL; if (!priv || !priv->kms) return; @@ -1295,9 +1247,10 @@ static int add_components_mdp(struct device *mdp_dev, return 0; } -static int compare_name_mdp(struct device *dev, void *data) +static int find_mdp_node(struct device *dev, void *data) { - return (strstr(dev_name(dev), "mdp") != NULL); + return of_match_node(dpu_dt_match, dev->of_node) || + of_match_node(mdp5_dt_match, dev->of_node); } static int add_display_components(struct platform_device *pdev, @@ -1322,7 +1275,7 @@ static int add_display_components(struct platform_device *pdev, return ret; } - mdp_dev = device_find_child(dev, NULL, compare_name_mdp); + mdp_dev = device_find_child(dev, NULL, find_mdp_node); if (!mdp_dev) { DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n"); of_platform_depopulate(dev); @@ -1400,12 +1353,35 @@ static const struct component_master_ops msm_drm_ops = { static int msm_pdev_probe(struct platform_device *pdev) { struct component_match *match = NULL; + struct msm_drm_private *priv; int ret; + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + + switch (get_mdp_ver(pdev)) { + case KMS_MDP5: + ret = mdp5_mdss_init(pdev); + break; + case KMS_DPU: + ret = dpu_mdss_init(pdev); + break; + default: + ret = 0; + break; + } + if (ret) { + platform_set_drvdata(pdev, NULL); + return ret; + } + if (get_mdp_ver(pdev)) { ret = add_display_components(pdev, &match); if (ret) - return ret; + goto fail; } ret = add_gpu_components(&pdev->dev, &match); @@ -1427,21 +1403,31 @@ static int msm_pdev_probe(struct platform_device *pdev) fail: of_platform_depopulate(&pdev->dev); + + if (priv->mdss && priv->mdss->funcs) + priv->mdss->funcs->destroy(priv->mdss); + return ret; } static int msm_pdev_remove(struct platform_device *pdev) { + struct msm_drm_private *priv = platform_get_drvdata(pdev); + struct msm_mdss *mdss = priv->mdss; + component_master_del(&pdev->dev, &msm_drm_ops); of_platform_depopulate(&pdev->dev); + if (mdss && mdss->funcs) + mdss->funcs->destroy(mdss); + return 0; } static void msm_pdev_shutdown(struct platform_device *pdev) { - struct drm_device *drm = platform_get_drvdata(pdev); - struct msm_drm_private *priv = drm ? drm->dev_private : NULL; + struct msm_drm_private *priv = platform_get_drvdata(pdev); + struct drm_device *drm = priv ? priv->dev : NULL; if (!priv || !priv->kms) return; @@ -1481,7 +1467,6 @@ static int __init msm_drm_register(void) msm_mdp_register(); msm_dpu_register(); msm_dsi_register(); - msm_edp_register(); msm_hdmi_register(); msm_dp_register(); adreno_register(); @@ -1495,7 +1480,6 @@ static void __exit msm_drm_unregister(void) msm_dp_unregister(); msm_hdmi_unregister(); adreno_unregister(); - msm_edp_unregister(); msm_dsi_unregister(); msm_mdp_unregister(); msm_dpu_unregister(); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index eb984d925f4d..d7574e6bd4e4 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -151,12 +151,6 @@ struct msm_drm_private { */ struct hdmi *hdmi; - /* eDP is for mdp5 only, but kms has not been created - * when edp_bind() and edp_init() are called. Here is the only - * place to keep the edp instance. - */ - struct msm_edp *edp; - /* DSI is shared by mdp4 and mdp5 */ struct msm_dsi *dsi[2]; @@ -164,7 +158,7 @@ struct msm_drm_private { /* when we have more than one 'msm_gpu' these need to be an array: */ struct msm_gpu *gpu; - struct msm_file_private *lastctx; + /* gpu is only set on open(), but we need this info earlier */ bool is_a2xx; bool has_cached_coherent; @@ -246,6 +240,15 @@ struct msm_drm_private { /* For hang detection, in ms */ unsigned int hangcheck_period; + + /** + * disable_err_irq: + * + * Disable handling of GPU hw error interrupts, to force fallback to + * sw hangcheck timer. Written (via debugfs) by igt tests to test + * the sw hangcheck mechanism. + */ + bool disable_err_irq; }; struct msm_format { @@ -335,12 +338,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev, void __init msm_hdmi_register(void); void __exit msm_hdmi_unregister(void); -struct msm_edp; -void __init msm_edp_register(void); -void __exit msm_edp_unregister(void); -int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, - struct drm_encoder *encoder); - struct msm_dsi; #ifdef CONFIG_DRM_MSM_DSI int dsi_dev_attach(struct platform_device *pdev); @@ -392,8 +389,12 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder); int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder); int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder); void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode); + +struct drm_bridge *msm_dp_bridge_init(struct msm_dp *dp_display, + struct drm_device *dev, + struct drm_encoder *encoder); void msm_dp_irq_postinstall(struct msm_dp *dp_display); void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display); @@ -430,8 +431,8 @@ static inline int msm_dp_display_pre_disable(struct msm_dp *dp, } static inline void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) { } diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 0daaeb54ff6f..4c39ef9dd75d 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -81,8 +81,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, bo = msm_framebuffer_bo(fb, 0); - mutex_lock(&dev->struct_mutex); - /* * NOTE: if we can be guaranteed to be able to map buffer * in panic (ie. lock-safe, etc) we could avoid pinning the @@ -91,14 +89,14 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr); if (ret) { DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret); - goto fail_unlock; + goto fail; } fbi = drm_fb_helper_alloc_fbi(helper); if (IS_ERR(fbi)) { DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n"); ret = PTR_ERR(fbi); - goto fail_unlock; + goto fail; } DBG("fbi=%p, dev=%p", fbi, dev); @@ -115,7 +113,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, fbi->screen_base = msm_gem_get_vaddr(bo); if (IS_ERR(fbi->screen_base)) { ret = PTR_ERR(fbi->screen_base); - goto fail_unlock; + goto fail; } fbi->screen_size = bo->size; fbi->fix.smem_start = paddr; @@ -124,12 +122,9 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres); DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height); - mutex_unlock(&dev->struct_mutex); - return 0; -fail_unlock: - mutex_unlock(&dev->struct_mutex); +fail: drm_framebuffer_remove(fb); return ret; } diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h index 4783db528bcc..17ee3822b423 100644 --- a/drivers/gpu/drm/msm/msm_fence.h +++ b/drivers/gpu/drm/msm/msm_fence.h @@ -60,4 +60,16 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence); struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx); +static inline bool +fence_before(uint32_t a, uint32_t b) +{ + return (int32_t)(a - b) < 0; +} + +static inline bool +fence_after(uint32_t a, uint32_t b) +{ + return (int32_t)(a - b) > 0; +} + #endif diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 282628d6b72c..6cfa984dee6a 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -881,7 +881,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, * to the underlying fence. */ submit->fence_id = idr_alloc_cyclic(&queue->fence_idr, - submit->user_fence, 0, INT_MAX, GFP_KERNEL); + submit->user_fence, 1, INT_MAX, GFP_KERNEL); if (submit->fence_id < 0) { ret = submit->fence_id = 0; submit->fence_id = 0; diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 2c46cd968ac4..2c1049c0ea14 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -150,7 +150,7 @@ int msm_gpu_hw_init(struct msm_gpu *gpu) { int ret; - WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex)); + WARN_ON(!mutex_is_locked(&gpu->lock)); if (!gpu->needs_hw_init) return 0; @@ -172,7 +172,7 @@ static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring, spin_lock_irqsave(&ring->submit_lock, flags); list_for_each_entry(submit, &ring->submits, node) { - if (submit->seqno > fence) + if (fence_after(submit->seqno, fence)) break; msm_update_fence(submit->ring->fctx, @@ -361,7 +361,7 @@ static void recover_worker(struct kthread_work *work) char *comm = NULL, *cmd = NULL; int i; - mutex_lock(&dev->struct_mutex); + mutex_lock(&gpu->lock); DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name); @@ -442,7 +442,7 @@ static void recover_worker(struct kthread_work *work) } } - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&gpu->lock); msm_gpu_retire(gpu); } @@ -450,12 +450,11 @@ static void recover_worker(struct kthread_work *work) static void fault_worker(struct kthread_work *work) { struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work); - struct drm_device *dev = gpu->dev; struct msm_gem_submit *submit; struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu); char *comm = NULL, *cmd = NULL; - mutex_lock(&dev->struct_mutex); + mutex_lock(&gpu->lock); submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1); if (submit && submit->fault_dumped) @@ -490,7 +489,7 @@ resume_smmu: memset(&gpu->fault_info, 0, sizeof(gpu->fault_info)); gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&gpu->lock); } static void hangcheck_timer_reset(struct msm_gpu *gpu) @@ -510,7 +509,7 @@ static void hangcheck_handler(struct timer_list *t) if (fence != ring->hangcheck_fence) { /* some progress has been made.. ya! */ ring->hangcheck_fence = fence; - } else if (fence < ring->seqno) { + } else if (fence_before(fence, ring->seqno)) { /* no progress and not done.. hung! */ ring->hangcheck_fence = fence; DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n", @@ -524,7 +523,7 @@ static void hangcheck_handler(struct timer_list *t) } /* if still more pending work, reset the hangcheck timer: */ - if (ring->seqno > ring->hangcheck_fence) + if (fence_after(ring->seqno, ring->hangcheck_fence)) hangcheck_timer_reset(gpu); /* workaround for missing irq: */ @@ -704,6 +703,8 @@ static void retire_submits(struct msm_gpu *gpu) } } } + + wake_up_all(&gpu->retire_event); } static void retire_worker(struct kthread_work *work) @@ -733,7 +734,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct msm_ringbuffer *ring = submit->ring; unsigned long flags; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + WARN_ON(!mutex_is_locked(&gpu->lock)); pm_runtime_get_sync(&gpu->pdev->dev); @@ -763,7 +764,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) mutex_unlock(&gpu->active_lock); gpu->funcs->submit(gpu, submit); - priv->lastctx = submit->queue->ctx; + gpu->cur_ctx_seqno = submit->queue->ctx->seqno; hangcheck_timer_reset(gpu); } @@ -848,6 +849,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, INIT_LIST_HEAD(&gpu->active_list); mutex_init(&gpu->active_lock); + mutex_init(&gpu->lock); + init_waitqueue_head(&gpu->retire_event); kthread_init_work(&gpu->retire_work, retire_worker); kthread_init_work(&gpu->recover_work, recover_worker); kthread_init_work(&gpu->fault_work, fault_worker); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 48ea2de911f1..92aa1e9196c6 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -88,6 +88,21 @@ struct msm_gpu_devfreq { struct devfreq *devfreq; /** + * idle_constraint: + * + * A PM QoS constraint to limit max freq while the GPU is idle. + */ + struct dev_pm_qos_request idle_freq; + + /** + * boost_constraint: + * + * A PM QoS constraint to boost min freq for a period of time + * until the boost expires. + */ + struct dev_pm_qos_request boost_freq; + + /** * busy_cycles: * * Used by implementation of gpu->gpu_busy() to track the last @@ -103,22 +118,19 @@ struct msm_gpu_devfreq { ktime_t idle_time; /** - * idle_freq: + * idle_work: * - * Shadow frequency used while the GPU is idle. From the PoV of - * the devfreq governor, we are continuing to sample busyness and - * adjust frequency while the GPU is idle, but we use this shadow - * value as the GPU is actually clamped to minimum frequency while - * it is inactive. + * Used to delay clamping to idle freq on active->idle transition. */ - unsigned long idle_freq; + struct msm_hrtimer_work idle_work; /** - * idle_work: + * boost_work: * - * Used to delay clamping to idle freq on active->idle transition. + * Used to reset the boost_constraint after the boost period has + * elapsed */ - struct msm_hrtimer_work idle_work; + struct msm_hrtimer_work boost_work; }; struct msm_gpu { @@ -144,6 +156,17 @@ struct msm_gpu { struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS]; int nr_rings; + /** + * cur_ctx_seqno: + * + * The ctx->seqno value of the last context to submit rendering, + * and the one with current pgtables installed (for generations + * that support per-context pgtables). Tracked by seqno rather + * than pointer value to avoid dangling pointers, and cases where + * a ctx can be freed and a new one created with the same address. + */ + int cur_ctx_seqno; + /* * List of GEM active objects on this gpu. Protected by * msm_drm_private::mm_lock @@ -151,12 +174,22 @@ struct msm_gpu { struct list_head active_list; /** + * lock: + * + * General lock for serializing all the gpu things. + * + * TODO move to per-ring locking where feasible (ie. submit/retire + * path, etc) + */ + struct mutex lock; + + /** * active_submits: * * The number of submitted but not yet retired submits, used to * determine transitions between active and idle. * - * Protected by lock + * Protected by active_lock */ int active_submits; @@ -197,6 +230,9 @@ struct msm_gpu { /* work for handling GPU recovery: */ struct kthread_work recover_work; + /** retire_event: notified when submits are retired: */ + wait_queue_head_t retire_event; + /* work for handling active-list retiring: */ struct kthread_work retire_work; @@ -241,7 +277,7 @@ static inline bool msm_gpu_active(struct msm_gpu *gpu) for (i = 0; i < gpu->nr_rings; i++) { struct msm_ringbuffer *ring = gpu->rb[i]; - if (ring->seqno > ring->memptrs->fence) + if (fence_after(ring->seqno, ring->memptrs->fence)) return true; } @@ -501,6 +537,7 @@ void msm_devfreq_init(struct msm_gpu *gpu); void msm_devfreq_cleanup(struct msm_gpu *gpu); void msm_devfreq_resume(struct msm_gpu *gpu); void msm_devfreq_suspend(struct msm_gpu *gpu); +void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor); void msm_devfreq_active(struct msm_gpu *gpu); void msm_devfreq_idle(struct msm_gpu *gpu); @@ -537,28 +574,28 @@ static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu) { struct msm_gpu_state *state = NULL; - mutex_lock(&gpu->dev->struct_mutex); + mutex_lock(&gpu->lock); if (gpu->crashstate) { kref_get(&gpu->crashstate->ref); state = gpu->crashstate; } - mutex_unlock(&gpu->dev->struct_mutex); + mutex_unlock(&gpu->lock); return state; } static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu) { - mutex_lock(&gpu->dev->struct_mutex); + mutex_lock(&gpu->lock); if (gpu->crashstate) { if (gpu->funcs->gpu_state_put(gpu->crashstate)) gpu->crashstate = NULL; } - mutex_unlock(&gpu->dev->struct_mutex); + mutex_unlock(&gpu->lock); } /* diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c index 384e90c4b2a7..9bf319be11f6 100644 --- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c +++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c @@ -9,6 +9,7 @@ #include <linux/devfreq.h> #include <linux/devfreq_cooling.h> +#include <linux/units.h> /* * Power Management: @@ -25,17 +26,6 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, * to something that actually is in the opp table: */ opp = devfreq_recommended_opp(dev, freq, flags); - - /* - * If the GPU is idle, devfreq is not aware, so just ignore - * it's requests - */ - if (gpu->devfreq.idle_freq) { - gpu->devfreq.idle_freq = *freq; - dev_pm_opp_put(opp); - return 0; - } - if (IS_ERR(opp)) return PTR_ERR(opp); @@ -53,9 +43,6 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, static unsigned long get_freq(struct msm_gpu *gpu) { - if (gpu->devfreq.idle_freq) - return gpu->devfreq.idle_freq; - if (gpu->funcs->gpu_get_freq) return gpu->funcs->gpu_get_freq(gpu); @@ -93,6 +80,7 @@ static struct devfreq_dev_profile msm_devfreq_profile = { .get_cur_freq = msm_devfreq_get_cur_freq, }; +static void msm_devfreq_boost_work(struct kthread_work *work); static void msm_devfreq_idle_work(struct kthread_work *work); void msm_devfreq_init(struct msm_gpu *gpu) @@ -103,6 +91,12 @@ void msm_devfreq_init(struct msm_gpu *gpu) if (!gpu->funcs->gpu_busy) return; + dev_pm_qos_add_request(&gpu->pdev->dev, &df->idle_freq, + DEV_PM_QOS_MAX_FREQUENCY, + PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE); + dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq, + DEV_PM_QOS_MIN_FREQUENCY, 0); + msm_devfreq_profile.initial_freq = gpu->fast_rate; /* @@ -133,13 +127,31 @@ void msm_devfreq_init(struct msm_gpu *gpu) gpu->cooling = NULL; } + msm_hrtimer_work_init(&df->boost_work, gpu->worker, msm_devfreq_boost_work, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); msm_hrtimer_work_init(&df->idle_work, gpu->worker, msm_devfreq_idle_work, CLOCK_MONOTONIC, HRTIMER_MODE_REL); } +static void cancel_idle_work(struct msm_gpu_devfreq *df) +{ + hrtimer_cancel(&df->idle_work.timer); + kthread_cancel_work_sync(&df->idle_work.work); +} + +static void cancel_boost_work(struct msm_gpu_devfreq *df) +{ + hrtimer_cancel(&df->boost_work.timer); + kthread_cancel_work_sync(&df->boost_work.work); +} + void msm_devfreq_cleanup(struct msm_gpu *gpu) { + struct msm_gpu_devfreq *df = &gpu->devfreq; + devfreq_cooling_unregister(gpu->cooling); + dev_pm_qos_remove_request(&df->boost_freq); + dev_pm_qos_remove_request(&df->idle_freq); } void msm_devfreq_resume(struct msm_gpu *gpu) @@ -152,7 +164,41 @@ void msm_devfreq_resume(struct msm_gpu *gpu) void msm_devfreq_suspend(struct msm_gpu *gpu) { - devfreq_suspend_device(gpu->devfreq.devfreq); + struct msm_gpu_devfreq *df = &gpu->devfreq; + + devfreq_suspend_device(df->devfreq); + + cancel_idle_work(df); + cancel_boost_work(df); +} + +static void msm_devfreq_boost_work(struct kthread_work *work) +{ + struct msm_gpu_devfreq *df = container_of(work, + struct msm_gpu_devfreq, boost_work.work); + + dev_pm_qos_update_request(&df->boost_freq, 0); +} + +void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor) +{ + struct msm_gpu_devfreq *df = &gpu->devfreq; + uint64_t freq; + + freq = get_freq(gpu); + freq *= factor; + + /* + * A nice little trap is that PM QoS operates in terms of KHz, + * while devfreq operates in terms of Hz: + */ + do_div(freq, HZ_PER_KHZ); + + dev_pm_qos_update_request(&df->boost_freq, freq); + + msm_hrtimer_queue_work(&df->boost_work, + ms_to_ktime(msm_devfreq_profile.polling_ms), + HRTIMER_MODE_REL); } void msm_devfreq_active(struct msm_gpu *gpu) @@ -160,7 +206,6 @@ void msm_devfreq_active(struct msm_gpu *gpu) struct msm_gpu_devfreq *df = &gpu->devfreq; struct devfreq_dev_status status; unsigned int idle_time; - unsigned long target_freq = df->idle_freq; if (!df->devfreq) return; @@ -168,13 +213,7 @@ void msm_devfreq_active(struct msm_gpu *gpu) /* * Cancel any pending transition to idle frequency: */ - hrtimer_cancel(&df->idle_work.timer); - - /* - * Hold devfreq lock to synchronize with get_dev_status()/ - * target() callbacks - */ - mutex_lock(&df->devfreq->lock); + cancel_idle_work(df); idle_time = ktime_to_ms(ktime_sub(ktime_get(), df->idle_time)); @@ -183,21 +222,18 @@ void msm_devfreq_active(struct msm_gpu *gpu) * interval, then we won't meet the threshold of busyness for * the governor to ramp up the freq.. so give some boost */ - if (idle_time > msm_devfreq_profile.polling_ms/2) { - target_freq *= 2; + if (idle_time > msm_devfreq_profile.polling_ms) { + msm_devfreq_boost(gpu, 2); } - df->idle_freq = 0; - - msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0); + dev_pm_qos_update_request(&df->idle_freq, + PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE); /* * Reset the polling interval so we aren't inconsistent * about freq vs busy/total cycles */ msm_devfreq_get_dev_status(&gpu->pdev->dev, &status); - - mutex_unlock(&df->devfreq->lock); } @@ -206,23 +242,11 @@ static void msm_devfreq_idle_work(struct kthread_work *work) struct msm_gpu_devfreq *df = container_of(work, struct msm_gpu_devfreq, idle_work.work); struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq); - unsigned long idle_freq, target_freq = 0; - - /* - * Hold devfreq lock to synchronize with get_dev_status()/ - * target() callbacks - */ - mutex_lock(&df->devfreq->lock); - - idle_freq = get_freq(gpu); - - if (gpu->clamp_to_idle) - msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0); df->idle_time = ktime_get(); - df->idle_freq = idle_freq; - mutex_unlock(&df->devfreq->lock); + if (gpu->clamp_to_idle) + dev_pm_qos_update_request(&df->idle_freq, 0); } void msm_devfreq_idle(struct msm_gpu *gpu) diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 6a42b819abc4..2a4f0526cb98 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -198,19 +198,22 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev); struct msm_kms *mdp5_kms_init(struct drm_device *dev); struct msm_kms *dpu_kms_init(struct drm_device *dev); +extern const struct of_device_id dpu_dt_match[]; +extern const struct of_device_id mdp5_dt_match[]; + struct msm_mdss_funcs { int (*enable)(struct msm_mdss *mdss); int (*disable)(struct msm_mdss *mdss); - void (*destroy)(struct drm_device *dev); + void (*destroy)(struct msm_mdss *mdss); }; struct msm_mdss { - struct drm_device *dev; + struct device *dev; const struct msm_mdss_funcs *funcs; }; -int mdp5_mdss_init(struct drm_device *dev); -int dpu_mdss_init(struct drm_device *dev); +int mdp5_mdss_init(struct platform_device *dev); +int dpu_mdss_init(struct platform_device *dev); #define for_each_crtc_mask(dev, crtc, crtc_mask) \ drm_for_each_crtc(crtc, dev) \ diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c index 3a27153eef08..3d3da79fec2a 100644 --- a/drivers/gpu/drm/msm/msm_perf.c +++ b/drivers/gpu/drm/msm/msm_perf.c @@ -155,9 +155,12 @@ static int perf_open(struct inode *inode, struct file *file) struct msm_gpu *gpu = priv->gpu; int ret = 0; - mutex_lock(&dev->struct_mutex); + if (!gpu) + return -ENODEV; - if (perf->open || !gpu) { + mutex_lock(&gpu->lock); + + if (perf->open) { ret = -EBUSY; goto out; } @@ -171,7 +174,7 @@ static int perf_open(struct inode *inode, struct file *file) perf->next_jiffies = jiffies + SAMPLE_TIME; out: - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&gpu->lock); return ret; } diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index b55398a34fa4..81432ec07012 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -86,7 +86,7 @@ struct msm_rd_state { struct msm_gem_submit *submit; /* fifo access is synchronized on the producer side by - * struct_mutex held by submit code (otherwise we could + * gpu->lock held by submit code (otherwise we could * end up w/ cmds logged in different order than they * were executed). And read_lock synchronizes the reads */ @@ -181,9 +181,12 @@ static int rd_open(struct inode *inode, struct file *file) uint32_t gpu_id; int ret = 0; - mutex_lock(&dev->struct_mutex); + if (!gpu) + return -ENODEV; - if (rd->open || !gpu) { + mutex_lock(&gpu->lock); + + if (rd->open) { ret = -EBUSY; goto out; } @@ -200,7 +203,7 @@ static int rd_open(struct inode *inode, struct file *file) rd_write_section(rd, RD_GPU_ID, &gpu_id, sizeof(gpu_id)); out: - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&gpu->lock); return ret; } @@ -340,11 +343,10 @@ out_unlock: msm_gem_unlock(&obj->base); } -/* called under struct_mutex */ +/* called under gpu->lock */ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, const char *fmt, ...) { - struct drm_device *dev = submit->dev; struct task_struct *task; char msg[256]; int i, n; @@ -355,7 +357,7 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, /* writing into fifo is serialized by caller, and * rd->read_lock is used to serialize the reads */ - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + WARN_ON(!mutex_is_locked(&submit->gpu->lock)); if (fmt) { va_list args; diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 652b1dedd7c1..3bbf574c3bdc 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -21,11 +21,11 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job) pm_runtime_get_sync(&gpu->pdev->dev); /* TODO move submit path over to using a per-ring lock.. */ - mutex_lock(&gpu->dev->struct_mutex); + mutex_lock(&gpu->lock); msm_gpu_submit(gpu, submit); - mutex_unlock(&gpu->dev->struct_mutex); + mutex_unlock(&gpu->lock); pm_runtime_put(&gpu->pdev->dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 26f9299df881..a3a04e0d76ec 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -344,38 +344,48 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, { struct nouveau_fence_chan *fctx = chan->fence; struct dma_resv *resv = nvbo->bo.base.resv; - struct dma_resv_iter cursor; - struct dma_fence *fence; - struct nouveau_fence *f; - int ret; + int i, ret; if (!exclusive) { ret = dma_resv_reserve_shared(resv, 1); - if (ret) return ret; } - dma_resv_for_each_fence(&cursor, resv, exclusive, fence) { - struct nouveau_channel *prev = NULL; - bool must_wait = true; - - f = nouveau_local_fence(fence, chan->drm); - if (f) { - rcu_read_lock(); - prev = rcu_dereference(f->channel); - if (prev && (prev == chan || - fctx->sync(f, prev, chan) == 0)) - must_wait = false; - rcu_read_unlock(); - } + /* Waiting for the exclusive fence first causes performance regressions + * under some circumstances. So manually wait for the shared ones first. + */ + for (i = 0; i < 2; ++i) { + struct dma_resv_iter cursor; + struct dma_fence *fence; + + dma_resv_for_each_fence(&cursor, resv, exclusive, fence) { + struct nouveau_fence *f; + + if (i == 0 && dma_resv_iter_is_exclusive(&cursor)) + continue; + + f = nouveau_local_fence(fence, chan->drm); + if (f) { + struct nouveau_channel *prev; + bool must_wait = true; + + rcu_read_lock(); + prev = rcu_dereference(f->channel); + if (prev && (prev == chan || + fctx->sync(f, prev, chan) == 0)) + must_wait = false; + rcu_read_unlock(); + if (!must_wait) + continue; + } - if (must_wait) { ret = dma_fence_wait(fence, intr); if (ret) return ret; } } + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 7afe28408085..11ad210919c8 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -648,6 +648,8 @@ void radeon_driver_lastclose_kms(struct drm_device *dev) int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) { struct radeon_device *rdev = dev->dev_private; + struct radeon_fpriv *fpriv; + struct radeon_vm *vm; int r; file_priv->driver_priv = NULL; @@ -660,48 +662,52 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN) { - struct radeon_fpriv *fpriv; - struct radeon_vm *vm; fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); if (unlikely(!fpriv)) { r = -ENOMEM; - goto out_suspend; + goto err_suspend; } if (rdev->accel_working) { vm = &fpriv->vm; r = radeon_vm_init(rdev, vm); - if (r) { - kfree(fpriv); - goto out_suspend; - } + if (r) + goto err_fpriv; r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); - if (r) { - radeon_vm_fini(rdev, vm); - kfree(fpriv); - goto out_suspend; - } + if (r) + goto err_vm_fini; /* map the ib pool buffer read only into * virtual address space */ vm->ib_bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo); + if (!vm->ib_bo_va) { + r = -ENOMEM; + goto err_vm_fini; + } + r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va, RADEON_VA_IB_OFFSET, RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); - if (r) { - radeon_vm_fini(rdev, vm); - kfree(fpriv); - goto out_suspend; - } + if (r) + goto err_vm_fini; } file_priv->driver_priv = fpriv; } -out_suspend: + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + return 0; + +err_vm_fini: + radeon_vm_fini(rdev, vm); +err_fpriv: + kfree(fpriv); + +err_suspend: pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return r; diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index 511a942e851d..ca4a36464340 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -513,7 +513,7 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, * @allocated: allocated a new handle? * * Validates the handle and return the found session index or -EINVAL - * we we don't have another free session index. + * we don't have another free session index. */ static int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle, bool *allocated) diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index d59dca5efb52..fa5cfda4e90e 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -8,6 +8,7 @@ config DRM_ROCKCHIP select DRM_PANEL select VIDEOMODE_HELPERS select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP + select DRM_DP_HELPER if ROCKCHIP_ANALOGIX_DP select DRM_DW_HDMI if ROCKCHIP_DW_HDMI select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI select GENERIC_PHY if ROCKCHIP_DW_MIPI_DSI diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c index b64d93da651d..5e2b0175df36 100644 --- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c +++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c @@ -658,8 +658,10 @@ int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node) return -EPROBE_DEFER; phy = platform_get_drvdata(pdev); - if (!phy) + if (!phy) { + put_device(&pdev->dev); return -EPROBE_DEFER; + } hdmi->phy = phy; diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig index dc88adc7ba40..18c319b804c0 100644 --- a/drivers/gpu/drm/tegra/Kconfig +++ b/drivers/gpu/drm/tegra/Kconfig @@ -13,6 +13,9 @@ config DRM_TEGRA select INTERCONNECT select IOMMU_IOVA select CEC_CORE if CEC_NOTIFIER + select SND_SIMPLE_CARD if SND_SOC_TEGRA20_SPDIF + select SND_SOC_HDMI_CODEC if SND_SOC_TEGRA20_SPDIF + select SND_AUDIO_GRAPH_CARD if SND_SOC_TEGRA20_SPDIF help Choose this option if you have an NVIDIA Tegra SoC. diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile index d801909182cf..df6cc986aeba 100644 --- a/drivers/gpu/drm/tegra/Makefile +++ b/drivers/gpu/drm/tegra/Makefile @@ -23,7 +23,8 @@ tegra-drm-y := \ gr2d.o \ gr3d.o \ falcon.o \ - vic.o + vic.o \ + nvdec.o tegra-drm-y += trace.o diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index a29d64f87563..eb70eee8992a 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -11,9 +11,12 @@ #include <linux/interconnect.h> #include <linux/module.h> #include <linux/of_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_opp.h> #include <linux/pm_runtime.h> #include <linux/reset.h> +#include <soc/tegra/common.h> #include <soc/tegra/pmc.h> #include <drm/drm_atomic.h> @@ -890,11 +893,9 @@ static int tegra_cursor_atomic_check(struct drm_plane *plane, return 0; } -static void tegra_cursor_atomic_update(struct drm_plane *plane, - struct drm_atomic_state *state) +static void __tegra_cursor_atomic_update(struct drm_plane *plane, + struct drm_plane_state *new_state) { - struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, - plane); struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state); struct tegra_dc *dc = to_tegra_dc(new_state->crtc); struct tegra_drm *tegra = plane->dev->dev_private; @@ -990,6 +991,14 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane, tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION); } +static void tegra_cursor_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); + + __tegra_cursor_atomic_update(plane, new_state); +} + static void tegra_cursor_atomic_disable(struct drm_plane *plane, struct drm_atomic_state *state) { @@ -1009,12 +1018,78 @@ static void tegra_cursor_atomic_disable(struct drm_plane *plane, tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); } +static int tegra_cursor_atomic_async_check(struct drm_plane *plane, struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_crtc_state *crtc_state; + int min_scale, max_scale; + int err; + + crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + + if (!crtc_state->active) + return -EINVAL; + + if (plane->state->crtc != new_state->crtc || + plane->state->src_w != new_state->src_w || + plane->state->src_h != new_state->src_h || + plane->state->crtc_w != new_state->crtc_w || + plane->state->crtc_h != new_state->crtc_h || + plane->state->fb != new_state->fb || + plane->state->fb == NULL) + return -EINVAL; + + min_scale = (1 << 16) / 8; + max_scale = (8 << 16) / 1; + + err = drm_atomic_helper_check_plane_state(new_state, crtc_state, min_scale, max_scale, + true, true); + if (err < 0) + return err; + + if (new_state->visible != plane->state->visible) + return -EINVAL; + + return 0; +} + +static void tegra_cursor_atomic_async_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); + struct tegra_dc *dc = to_tegra_dc(new_state->crtc); + + plane->state->src_x = new_state->src_x; + plane->state->src_y = new_state->src_y; + plane->state->crtc_x = new_state->crtc_x; + plane->state->crtc_y = new_state->crtc_y; + + if (new_state->visible) { + struct tegra_plane *p = to_tegra_plane(plane); + u32 value; + + __tegra_cursor_atomic_update(plane, new_state); + + value = (WIN_A_ACT_REQ << p->index) << 8 | GENERAL_UPDATE; + tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); + (void)tegra_dc_readl(dc, DC_CMD_STATE_CONTROL); + + value = (WIN_A_ACT_REQ << p->index) | GENERAL_ACT_REQ; + tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); + (void)tegra_dc_readl(dc, DC_CMD_STATE_CONTROL); + } +} + static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = { .prepare_fb = tegra_plane_prepare_fb, .cleanup_fb = tegra_plane_cleanup_fb, .atomic_check = tegra_cursor_atomic_check, .atomic_update = tegra_cursor_atomic_update, .atomic_disable = tegra_cursor_atomic_disable, + .atomic_async_check = tegra_cursor_atomic_async_check, + .atomic_async_update = tegra_cursor_atomic_async_update, }; static const uint64_t linear_modifiers[] = { @@ -1267,9 +1342,9 @@ static struct drm_plane *tegra_dc_add_planes(struct drm_device *drm, err = PTR_ERR(planes[i]); while (i--) - tegra_plane_funcs.destroy(planes[i]); + planes[i]->funcs->destroy(planes[i]); - tegra_plane_funcs.destroy(primary); + primary->funcs->destroy(primary); return ERR_PTR(err); } } @@ -1762,10 +1837,55 @@ int tegra_dc_state_setup_clock(struct tegra_dc *dc, return 0; } -static void tegra_dc_commit_state(struct tegra_dc *dc, - struct tegra_dc_state *state) +static void tegra_dc_update_voltage_state(struct tegra_dc *dc, + struct tegra_dc_state *state) +{ + unsigned long rate, pstate; + struct dev_pm_opp *opp; + int err; + + if (!dc->has_opp_table) + return; + + /* calculate actual pixel clock rate which depends on internal divider */ + rate = DIV_ROUND_UP(clk_get_rate(dc->clk) * 2, state->div + 2); + + /* find suitable OPP for the rate */ + opp = dev_pm_opp_find_freq_ceil(dc->dev, &rate); + + /* + * Very high resolution modes may results in a clock rate that is + * above the characterized maximum. In this case it's okay to fall + * back to the characterized maximum. + */ + if (opp == ERR_PTR(-ERANGE)) + opp = dev_pm_opp_find_freq_floor(dc->dev, &rate); + + if (IS_ERR(opp)) { + dev_err(dc->dev, "failed to find OPP for %luHz: %pe\n", + rate, opp); + return; + } + + pstate = dev_pm_opp_get_required_pstate(opp, 0); + dev_pm_opp_put(opp); + + /* + * The minimum core voltage depends on the pixel clock rate (which + * depends on internal clock divider of the CRTC) and not on the + * rate of the display controller clock. This is why we're not using + * dev_pm_opp_set_rate() API and instead controlling the power domain + * directly. + */ + err = dev_pm_genpd_set_performance_state(dc->dev, pstate); + if (err) + dev_err(dc->dev, "failed to set power domain state to %lu: %d\n", + pstate, err); +} + +static void tegra_dc_set_clock_rate(struct tegra_dc *dc, + struct tegra_dc_state *state) { - u32 value; int err; err = clk_set_parent(dc->clk, state->clk); @@ -1797,10 +1917,7 @@ static void tegra_dc_commit_state(struct tegra_dc *dc, state->div); DRM_DEBUG_KMS("pclk: %lu\n", state->pclk); - if (!dc->soc->has_nvdisplay) { - value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1; - tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); - } + tegra_dc_update_voltage_state(dc, state); } static void tegra_dc_stop(struct tegra_dc *dc) @@ -1991,6 +2108,13 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc, err = host1x_client_suspend(&dc->client); if (err < 0) dev_err(dc->dev, "failed to suspend: %d\n", err); + + if (dc->has_opp_table) { + err = dev_pm_genpd_set_performance_state(dc->dev, 0); + if (err) + dev_err(dc->dev, + "failed to clear power domain state: %d\n", err); + } } static void tegra_crtc_atomic_enable(struct drm_crtc *crtc, @@ -2002,6 +2126,9 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc, u32 value; int err; + /* apply PLL changes */ + tegra_dc_set_clock_rate(dc, crtc_state); + err = host1x_client_resume(&dc->client); if (err < 0) { dev_err(dc->dev, "failed to resume: %d\n", err); @@ -2076,8 +2203,11 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc, else tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR); - /* apply PLL and pixel clock changes */ - tegra_dc_commit_state(dc, crtc_state); + /* apply pixel clock changes */ + if (!dc->soc->has_nvdisplay) { + value = SHIFT_CLK_DIVIDER(crtc_state->div) | PIXEL_CLK_DIVIDER_PCD1; + tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); + } /* program display mode */ tegra_dc_set_timings(dc, mode); @@ -2107,6 +2237,12 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc, tegra_dc_writel(dc, value, DC_COM_RG_UNDERFLOW); } + if (dc->rgb) { + /* XXX: parameterize? */ + value = SC0_H_QUALIFIER_NONE | SC1_H_QUALIFIER_NONE; + tegra_dc_writel(dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS); + } + tegra_dc_commit(dc); drm_crtc_vblank_on(crtc); @@ -2685,6 +2821,7 @@ static const struct tegra_dc_soc_info tegra20_dc_soc_info = { .has_win_b_vfilter_mem_client = true, .has_win_c_without_vert_filter = true, .plane_tiled_memory_bandwidth_x2 = false, + .has_pll_d2_out0 = false, }; static const struct tegra_dc_soc_info tegra30_dc_soc_info = { @@ -2707,6 +2844,7 @@ static const struct tegra_dc_soc_info tegra30_dc_soc_info = { .has_win_b_vfilter_mem_client = true, .has_win_c_without_vert_filter = false, .plane_tiled_memory_bandwidth_x2 = true, + .has_pll_d2_out0 = true, }; static const struct tegra_dc_soc_info tegra114_dc_soc_info = { @@ -2729,6 +2867,7 @@ static const struct tegra_dc_soc_info tegra114_dc_soc_info = { .has_win_b_vfilter_mem_client = false, .has_win_c_without_vert_filter = false, .plane_tiled_memory_bandwidth_x2 = true, + .has_pll_d2_out0 = true, }; static const struct tegra_dc_soc_info tegra124_dc_soc_info = { @@ -2751,6 +2890,7 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = { .has_win_b_vfilter_mem_client = false, .has_win_c_without_vert_filter = false, .plane_tiled_memory_bandwidth_x2 = false, + .has_pll_d2_out0 = true, }; static const struct tegra_dc_soc_info tegra210_dc_soc_info = { @@ -2773,6 +2913,7 @@ static const struct tegra_dc_soc_info tegra210_dc_soc_info = { .has_win_b_vfilter_mem_client = false, .has_win_c_without_vert_filter = false, .plane_tiled_memory_bandwidth_x2 = false, + .has_pll_d2_out0 = true, }; static const struct tegra_windowgroup_soc tegra186_dc_wgrps[] = { @@ -2823,6 +2964,7 @@ static const struct tegra_dc_soc_info tegra186_dc_soc_info = { .wgrps = tegra186_dc_wgrps, .num_wgrps = ARRAY_SIZE(tegra186_dc_wgrps), .plane_tiled_memory_bandwidth_x2 = false, + .has_pll_d2_out0 = false, }; static const struct tegra_windowgroup_soc tegra194_dc_wgrps[] = { @@ -2873,6 +3015,7 @@ static const struct tegra_dc_soc_info tegra194_dc_soc_info = { .wgrps = tegra194_dc_wgrps, .num_wgrps = ARRAY_SIZE(tegra194_dc_wgrps), .plane_tiled_memory_bandwidth_x2 = false, + .has_pll_d2_out0 = false, }; static const struct of_device_id tegra_dc_of_match[] = { @@ -2973,6 +3116,23 @@ static int tegra_dc_couple(struct tegra_dc *dc) return 0; } +static int tegra_dc_init_opp_table(struct tegra_dc *dc) +{ + struct tegra_core_opp_params opp_params = {}; + int err; + + err = devm_tegra_core_dev_init_opp_table(dc->dev, &opp_params); + if (err && err != -ENODEV) + return err; + + if (err) + dc->has_opp_table = false; + else + dc->has_opp_table = true; + + return 0; +} + static int tegra_dc_probe(struct platform_device *pdev) { u64 dma_mask = dma_get_mask(pdev->dev.parent); @@ -3038,6 +3198,10 @@ static int tegra_dc_probe(struct platform_device *pdev) tegra_powergate_power_off(dc->powergate); } + err = tegra_dc_init_opp_table(dc); + if (err < 0) + return err; + dc->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dc->regs)) return PTR_ERR(dc->regs); diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h index 40378308d527..3f91a10ea6c7 100644 --- a/drivers/gpu/drm/tegra/dc.h +++ b/drivers/gpu/drm/tegra/dc.h @@ -76,6 +76,7 @@ struct tegra_dc_soc_info { bool has_win_b_vfilter_mem_client; bool has_win_c_without_vert_filter; bool plane_tiled_memory_bandwidth_x2; + bool has_pll_d2_out0; }; struct tegra_dc { @@ -100,6 +101,8 @@ struct tegra_dc { struct drm_info_list *debugfs_files; const struct tegra_dc_soc_info *soc; + + bool has_opp_table; }; static inline struct tegra_dc * diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 48e35d686473..9464f522e257 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -10,6 +10,7 @@ #include <linux/iommu.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <drm/drm_aperture.h> #include <drm/drm_atomic.h> @@ -21,6 +22,10 @@ #include <drm/drm_prime.h> #include <drm/drm_vblank.h> +#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) +#include <asm/dma-iommu.h> +#endif + #include "dc.h" #include "drm.h" #include "gem.h" @@ -116,6 +121,7 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) static void tegra_drm_context_free(struct tegra_drm_context *context) { context->client->ops->close_channel(context); + pm_runtime_put(context->client->base.dev); kfree(context); } @@ -427,13 +433,20 @@ static int tegra_client_open(struct tegra_drm_file *fpriv, { int err; + err = pm_runtime_resume_and_get(client->base.dev); + if (err) + return err; + err = client->ops->open_channel(client, context); - if (err < 0) + if (err < 0) { + pm_runtime_put(client->base.dev); return err; + } err = idr_alloc(&fpriv->legacy_contexts, context, 1, 0, GFP_KERNEL); if (err < 0) { client->ops->close_channel(context); + pm_runtime_put(client->base.dev); return err; } @@ -936,6 +949,17 @@ int host1x_client_iommu_attach(struct host1x_client *client) struct iommu_group *group = NULL; int err; +#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) + if (client->dev->archdata.mapping) { + struct dma_iommu_mapping *mapping = + to_dma_iommu_mapping(client->dev); + arm_iommu_detach_device(client->dev); + arm_iommu_release_mapping(mapping); + + domain = iommu_get_domain_for_dev(client->dev); + } +#endif + /* * If the host1x client is already attached to an IOMMU domain that is * not the shared IOMMU domain, don't try to attach it to a different @@ -1344,15 +1368,18 @@ static const struct of_device_id host1x_drm_subdevs[] = { { .compatible = "nvidia,tegra210-sor", }, { .compatible = "nvidia,tegra210-sor1", }, { .compatible = "nvidia,tegra210-vic", }, + { .compatible = "nvidia,tegra210-nvdec", }, { .compatible = "nvidia,tegra186-display", }, { .compatible = "nvidia,tegra186-dc", }, { .compatible = "nvidia,tegra186-sor", }, { .compatible = "nvidia,tegra186-sor1", }, { .compatible = "nvidia,tegra186-vic", }, + { .compatible = "nvidia,tegra186-nvdec", }, { .compatible = "nvidia,tegra194-display", }, { .compatible = "nvidia,tegra194-dc", }, { .compatible = "nvidia,tegra194-sor", }, { .compatible = "nvidia,tegra194-vic", }, + { .compatible = "nvidia,tegra194-nvdec", }, { /* sentinel */ } }; @@ -1376,6 +1403,7 @@ static struct platform_driver * const drivers[] = { &tegra_gr2d_driver, &tegra_gr3d_driver, &tegra_vic_driver, + &tegra_nvdec_driver, }; static int __init host1x_drm_init(void) diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 8b28327c931c..fc0a19554eac 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -202,5 +202,6 @@ extern struct platform_driver tegra_sor_driver; extern struct platform_driver tegra_gr2d_driver; extern struct platform_driver tegra_gr3d_driver; extern struct platform_driver tegra_vic_driver; +extern struct platform_driver tegra_nvdec_driver; #endif /* HOST1X_DRM_H */ diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index d38fd7e12b57..fce0e52973c2 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -23,86 +23,97 @@ MODULE_IMPORT_NS(DMA_BUF); -static void tegra_bo_put(struct host1x_bo *bo) +static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents) { - struct tegra_bo *obj = host1x_to_tegra_bo(bo); + dma_addr_t next = ~(dma_addr_t)0; + unsigned int count = 0, i; + struct scatterlist *s; - drm_gem_object_put(&obj->gem); -} + for_each_sg(sgl, s, nents, i) { + /* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */ + if (!sg_dma_len(s)) + continue; -/* XXX move this into lib/scatterlist.c? */ -static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg, - unsigned int nents, gfp_t gfp_mask) -{ - struct scatterlist *dst; - unsigned int i; - int err; + if (sg_dma_address(s) != next) { + next = sg_dma_address(s) + sg_dma_len(s); + count++; + } + } - err = sg_alloc_table(sgt, nents, gfp_mask); - if (err < 0) - return err; + return count; +} - dst = sgt->sgl; +static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt) +{ + return sg_dma_count_chunks(sgt->sgl, sgt->nents); +} - for (i = 0; i < nents; i++) { - sg_set_page(dst, sg_page(sg), sg->length, 0); - dst = sg_next(dst); - sg = sg_next(sg); - } +static void tegra_bo_put(struct host1x_bo *bo) +{ + struct tegra_bo *obj = host1x_to_tegra_bo(bo); - return 0; + drm_gem_object_put(&obj->gem); } -static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo, - dma_addr_t *phys) +static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo, + enum dma_data_direction direction) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); - struct sg_table *sgt; + struct drm_gem_object *gem = &obj->gem; + struct host1x_bo_mapping *map; int err; + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (!map) + return ERR_PTR(-ENOMEM); + + kref_init(&map->ref); + map->bo = host1x_bo_get(bo); + map->direction = direction; + map->dev = dev; + /* - * If we've manually mapped the buffer object through the IOMMU, make - * sure to return the IOVA address of our mapping. - * - * Similarly, for buffers that have been allocated by the DMA API the - * physical address can be used for devices that are not attached to - * an IOMMU. For these devices, callers must pass a valid pointer via - * the @phys argument. - * - * Imported buffers were also already mapped at import time, so the - * existing mapping can be reused. + * Imported buffers need special treatment to satisfy the semantics of DMA-BUF. */ - if (phys) { - *phys = obj->iova; - return NULL; + if (gem->import_attach) { + struct dma_buf *buf = gem->import_attach->dmabuf; + + map->attach = dma_buf_attach(buf, dev); + if (IS_ERR(map->attach)) { + err = PTR_ERR(map->attach); + goto free; + } + + map->sgt = dma_buf_map_attachment(map->attach, direction); + if (IS_ERR(map->sgt)) { + dma_buf_detach(buf, map->attach); + err = PTR_ERR(map->sgt); + goto free; + } + + err = sgt_dma_count_chunks(map->sgt); + map->size = gem->size; + + goto out; } /* * If we don't have a mapping for this buffer yet, return an SG table * so that host1x can do the mapping for us via the DMA API. */ - sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); - if (!sgt) - return ERR_PTR(-ENOMEM); + map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL); + if (!map->sgt) { + err = -ENOMEM; + goto free; + } if (obj->pages) { /* * If the buffer object was allocated from the explicit IOMMU * API code paths, construct an SG table from the pages. */ - err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages, - 0, obj->gem.size, GFP_KERNEL); - if (err < 0) - goto free; - } else if (obj->sgt) { - /* - * If the buffer object already has an SG table but no pages - * were allocated for it, it means the buffer was imported and - * the SG table needs to be copied to avoid overwriting any - * other potential users of the original SG table. - */ - err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, - obj->sgt->orig_nents, GFP_KERNEL); + err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size, + GFP_KERNEL); if (err < 0) goto free; } else { @@ -111,25 +122,53 @@ static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo, * not imported, it had to be allocated with the DMA API, so * the DMA API helper can be used. */ - err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova, - obj->gem.size); + err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size); if (err < 0) goto free; } - return sgt; + err = dma_map_sgtable(dev, map->sgt, direction, 0); + if (err) + goto free_sgt; + +out: + /* + * If we've manually mapped the buffer object through the IOMMU, make sure to return the + * existing IOVA address of our mapping. + */ + if (!obj->mm) { + map->phys = sg_dma_address(map->sgt->sgl); + map->chunks = err; + } else { + map->phys = obj->iova; + map->chunks = 1; + } + + map->size = gem->size; + return map; + +free_sgt: + sg_free_table(map->sgt); free: - kfree(sgt); + kfree(map->sgt); + kfree(map); return ERR_PTR(err); } -static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt) +static void tegra_bo_unpin(struct host1x_bo_mapping *map) { - if (sgt) { - sg_free_table(sgt); - kfree(sgt); + if (map->attach) { + dma_buf_unmap_attachment(map->attach, map->sgt, map->direction); + dma_buf_detach(map->attach->dmabuf, map->attach); + } else { + dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0); + sg_free_table(map->sgt); + kfree(map->sgt); } + + host1x_bo_put(map->bo); + kfree(map); } static void *tegra_bo_mmap(struct host1x_bo *bo) @@ -452,8 +491,18 @@ free: void tegra_bo_free_object(struct drm_gem_object *gem) { struct tegra_drm *tegra = gem->dev->dev_private; + struct host1x_bo_mapping *mapping, *tmp; struct tegra_bo *bo = to_tegra_bo(gem); + /* remove all mappings of this buffer object from any caches */ + list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) { + if (mapping->cache) + host1x_bo_unpin(mapping); + else + dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping, + dev_name(mapping->dev)); + } + if (tegra->domain) tegra_bo_iommu_unmap(tegra, bo); diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c index de288cba3905..e3bb4c99ed39 100644 --- a/drivers/gpu/drm/tegra/gr2d.c +++ b/drivers/gpu/drm/tegra/gr2d.c @@ -4,14 +4,25 @@ */ #include <linux/clk.h> +#include <linux/delay.h> #include <linux/iommu.h> #include <linux/module.h> #include <linux/of_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> + +#include <soc/tegra/common.h> #include "drm.h" #include "gem.h" #include "gr2d.h" +enum { + RST_MC, + RST_GR2D, + RST_GR2D_MAX, +}; + struct gr2d_soc { unsigned int version; }; @@ -21,6 +32,9 @@ struct gr2d { struct host1x_channel *channel; struct clk *clk; + struct reset_control_bulk_data resets[RST_GR2D_MAX]; + unsigned int nresets; + const struct gr2d_soc *soc; DECLARE_BITMAP(addr_regs, GR2D_NUM_REGS); @@ -56,15 +70,22 @@ static int gr2d_init(struct host1x_client *client) goto free; } + pm_runtime_enable(client->dev); + pm_runtime_use_autosuspend(client->dev); + pm_runtime_set_autosuspend_delay(client->dev, 200); + err = tegra_drm_register_client(dev->dev_private, drm); if (err < 0) { dev_err(client->dev, "failed to register client: %d\n", err); - goto detach; + goto disable_rpm; } return 0; -detach: +disable_rpm: + pm_runtime_dont_use_autosuspend(client->dev); + pm_runtime_force_suspend(client->dev); + host1x_client_iommu_detach(client); free: host1x_syncpt_put(client->syncpts[0]); @@ -85,10 +106,15 @@ static int gr2d_exit(struct host1x_client *client) if (err < 0) return err; + pm_runtime_dont_use_autosuspend(client->dev); + pm_runtime_force_suspend(client->dev); + host1x_client_iommu_detach(client); host1x_syncpt_put(client->syncpts[0]); host1x_channel_put(gr2d->channel); + gr2d->channel = NULL; + return 0; } @@ -190,6 +216,27 @@ static const u32 gr2d_addr_regs[] = { GR2D_VA_BASE_ADDR_SB, }; +static int gr2d_get_resets(struct device *dev, struct gr2d *gr2d) +{ + int err; + + gr2d->resets[RST_MC].id = "mc"; + gr2d->resets[RST_GR2D].id = "2d"; + gr2d->nresets = RST_GR2D_MAX; + + err = devm_reset_control_bulk_get_optional_exclusive_released( + dev, gr2d->nresets, gr2d->resets); + if (err) { + dev_err(dev, "failed to get reset: %d\n", err); + return err; + } + + if (WARN_ON(!gr2d->resets[RST_GR2D].rstc)) + return -ENOENT; + + return 0; +} + static int gr2d_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -202,6 +249,8 @@ static int gr2d_probe(struct platform_device *pdev) if (!gr2d) return -ENOMEM; + platform_set_drvdata(pdev, gr2d); + gr2d->soc = of_device_get_match_data(dev); syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL); @@ -214,11 +263,9 @@ static int gr2d_probe(struct platform_device *pdev) return PTR_ERR(gr2d->clk); } - err = clk_prepare_enable(gr2d->clk); - if (err) { - dev_err(dev, "cannot turn on clock\n"); + err = gr2d_get_resets(dev, gr2d); + if (err) return err; - } INIT_LIST_HEAD(&gr2d->client.base.list); gr2d->client.base.ops = &gr2d_client_ops; @@ -231,10 +278,13 @@ static int gr2d_probe(struct platform_device *pdev) gr2d->client.version = gr2d->soc->version; gr2d->client.ops = &gr2d_ops; + err = devm_tegra_core_dev_init_opp_table_common(dev); + if (err) + return err; + err = host1x_client_register(&gr2d->client.base); if (err < 0) { dev_err(dev, "failed to register host1x client: %d\n", err); - clk_disable_unprepare(gr2d->clk); return err; } @@ -242,8 +292,6 @@ static int gr2d_probe(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); i++) set_bit(gr2d_addr_regs[i], gr2d->addr_regs); - platform_set_drvdata(pdev, gr2d); - return 0; } @@ -259,15 +307,100 @@ static int gr2d_remove(struct platform_device *pdev) return err; } + return 0; +} + +static int __maybe_unused gr2d_runtime_suspend(struct device *dev) +{ + struct gr2d *gr2d = dev_get_drvdata(dev); + int err; + + host1x_channel_stop(gr2d->channel); + reset_control_bulk_release(gr2d->nresets, gr2d->resets); + + /* + * GR2D module shouldn't be reset while hardware is idling, otherwise + * host1x's cmdproc will stuck on trying to access any G2 register + * after reset. GR2D module could be either hot-reset or reset after + * power-gating of the HEG partition. Hence we will put in reset only + * the memory client part of the module, the HEG GENPD will take care + * of resetting GR2D module across power-gating. + * + * On Tegra20 there is no HEG partition, but it's okay to have + * undetermined h/w state since userspace is expected to reprogram + * the state on each job submission anyways. + */ + err = reset_control_acquire(gr2d->resets[RST_MC].rstc); + if (err) { + dev_err(dev, "failed to acquire MC reset: %d\n", err); + goto acquire_reset; + } + + err = reset_control_assert(gr2d->resets[RST_MC].rstc); + reset_control_release(gr2d->resets[RST_MC].rstc); + if (err) { + dev_err(dev, "failed to assert MC reset: %d\n", err); + goto acquire_reset; + } + clk_disable_unprepare(gr2d->clk); return 0; + +acquire_reset: + reset_control_bulk_acquire(gr2d->nresets, gr2d->resets); + reset_control_bulk_deassert(gr2d->nresets, gr2d->resets); + + return err; } +static int __maybe_unused gr2d_runtime_resume(struct device *dev) +{ + struct gr2d *gr2d = dev_get_drvdata(dev); + int err; + + err = reset_control_bulk_acquire(gr2d->nresets, gr2d->resets); + if (err) { + dev_err(dev, "failed to acquire reset: %d\n", err); + return err; + } + + err = clk_prepare_enable(gr2d->clk); + if (err) { + dev_err(dev, "failed to enable clock: %d\n", err); + goto release_reset; + } + + usleep_range(2000, 4000); + + /* this is a reset array which deasserts both 2D MC and 2D itself */ + err = reset_control_bulk_deassert(gr2d->nresets, gr2d->resets); + if (err) { + dev_err(dev, "failed to deassert reset: %d\n", err); + goto disable_clk; + } + + return 0; + +disable_clk: + clk_disable_unprepare(gr2d->clk); +release_reset: + reset_control_bulk_release(gr2d->nresets, gr2d->resets); + + return err; +} + +static const struct dev_pm_ops tegra_gr2d_pm = { + SET_RUNTIME_PM_OPS(gr2d_runtime_suspend, gr2d_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + struct platform_driver tegra_gr2d_driver = { .driver = { .name = "tegra-gr2d", .of_match_table = gr2d_match, + .pm = &tegra_gr2d_pm, }, .probe = gr2d_probe, .remove = gr2d_remove, diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c index 24442ade0da3..a1fd3113ea96 100644 --- a/drivers/gpu/drm/tegra/gr3d.c +++ b/drivers/gpu/drm/tegra/gr3d.c @@ -5,32 +5,47 @@ */ #include <linux/clk.h> +#include <linux/delay.h> #include <linux/host1x.h> #include <linux/iommu.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_opp.h> +#include <linux/pm_runtime.h> #include <linux/reset.h> +#include <soc/tegra/common.h> #include <soc/tegra/pmc.h> #include "drm.h" #include "gem.h" #include "gr3d.h" +enum { + RST_MC, + RST_GR3D, + RST_MC2, + RST_GR3D2, + RST_GR3D_MAX, +}; + struct gr3d_soc { unsigned int version; + unsigned int num_clocks; + unsigned int num_resets; }; struct gr3d { struct tegra_drm_client client; struct host1x_channel *channel; - struct clk *clk_secondary; - struct clk *clk; - struct reset_control *rst_secondary; - struct reset_control *rst; const struct gr3d_soc *soc; + struct clk_bulk_data *clocks; + unsigned int nclocks; + struct reset_control_bulk_data resets[RST_GR3D_MAX]; + unsigned int nresets; DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS); }; @@ -65,15 +80,22 @@ static int gr3d_init(struct host1x_client *client) goto free; } + pm_runtime_enable(client->dev); + pm_runtime_use_autosuspend(client->dev); + pm_runtime_set_autosuspend_delay(client->dev, 200); + err = tegra_drm_register_client(dev->dev_private, drm); if (err < 0) { dev_err(client->dev, "failed to register client: %d\n", err); - goto detach; + goto disable_rpm; } return 0; -detach: +disable_rpm: + pm_runtime_dont_use_autosuspend(client->dev); + pm_runtime_force_suspend(client->dev); + host1x_client_iommu_detach(client); free: host1x_syncpt_put(client->syncpts[0]); @@ -93,10 +115,15 @@ static int gr3d_exit(struct host1x_client *client) if (err < 0) return err; + pm_runtime_dont_use_autosuspend(client->dev); + pm_runtime_force_suspend(client->dev); + host1x_client_iommu_detach(client); host1x_syncpt_put(client->syncpts[0]); host1x_channel_put(gr3d->channel); + gr3d->channel = NULL; + return 0; } @@ -155,14 +182,20 @@ static const struct tegra_drm_client_ops gr3d_ops = { static const struct gr3d_soc tegra20_gr3d_soc = { .version = 0x20, + .num_clocks = 1, + .num_resets = 2, }; static const struct gr3d_soc tegra30_gr3d_soc = { .version = 0x30, + .num_clocks = 2, + .num_resets = 4, }; static const struct gr3d_soc tegra114_gr3d_soc = { .version = 0x35, + .num_clocks = 1, + .num_resets = 2, }; static const struct of_device_id tegra_gr3d_match[] = { @@ -278,69 +311,216 @@ static const u32 gr3d_addr_regs[] = { GR3D_GLOBAL_SAMP23SURFADDR(15), }; -static int gr3d_probe(struct platform_device *pdev) +static int gr3d_power_up_legacy_domain(struct device *dev, const char *name, + unsigned int id) { - struct device_node *np = pdev->dev.of_node; - struct host1x_syncpt **syncpts; - struct gr3d *gr3d; + struct gr3d *gr3d = dev_get_drvdata(dev); + struct reset_control *reset; + struct clk *clk; unsigned int i; int err; - gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL); - if (!gr3d) - return -ENOMEM; - - gr3d->soc = of_device_get_match_data(&pdev->dev); + /* + * Tegra20 device-tree doesn't specify 3d clock name and there is only + * one clock for Tegra20. Tegra30+ device-trees always specified names + * for the clocks. + */ + if (gr3d->nclocks == 1) { + if (id == TEGRA_POWERGATE_3D1) + return 0; + + clk = gr3d->clocks[0].clk; + } else { + for (i = 0; i < gr3d->nclocks; i++) { + if (WARN_ON(!gr3d->clocks[i].id)) + continue; + + if (!strcmp(gr3d->clocks[i].id, name)) { + clk = gr3d->clocks[i].clk; + break; + } + } - syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL); - if (!syncpts) - return -ENOMEM; + if (WARN_ON(i == gr3d->nclocks)) + return -EINVAL; + } - gr3d->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(gr3d->clk)) { - dev_err(&pdev->dev, "cannot get clock\n"); - return PTR_ERR(gr3d->clk); + /* + * We use array of resets, which includes MC resets, and MC + * reset shouldn't be asserted while hardware is gated because + * MC flushing will fail for gated hardware. Hence for legacy + * PD we request the individual reset separately. + */ + reset = reset_control_get_exclusive_released(dev, name); + if (IS_ERR(reset)) + return PTR_ERR(reset); + + err = reset_control_acquire(reset); + if (err) { + dev_err(dev, "failed to acquire %s reset: %d\n", name, err); + } else { + err = tegra_powergate_sequence_power_up(id, clk, reset); + reset_control_release(reset); } - gr3d->rst = devm_reset_control_get(&pdev->dev, "3d"); - if (IS_ERR(gr3d->rst)) { - dev_err(&pdev->dev, "cannot get reset\n"); - return PTR_ERR(gr3d->rst); + reset_control_put(reset); + if (err) + return err; + + /* + * tegra_powergate_sequence_power_up() leaves clocks enabled, + * while GENPD not. Hence keep clock-enable balanced. + */ + clk_disable_unprepare(clk); + + return 0; +} + +static void gr3d_del_link(void *link) +{ + device_link_del(link); +} + +static int gr3d_init_power(struct device *dev, struct gr3d *gr3d) +{ + static const char * const opp_genpd_names[] = { "3d0", "3d1", NULL }; + const u32 link_flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME; + struct device **opp_virt_devs, *pd_dev; + struct device_link *link; + unsigned int i; + int err; + + err = of_count_phandle_with_args(dev->of_node, "power-domains", + "#power-domain-cells"); + if (err < 0) { + if (err != -ENOENT) + return err; + + /* + * Older device-trees don't use GENPD. In this case we should + * toggle power domain manually. + */ + err = gr3d_power_up_legacy_domain(dev, "3d", + TEGRA_POWERGATE_3D); + if (err) + return err; + + err = gr3d_power_up_legacy_domain(dev, "3d2", + TEGRA_POWERGATE_3D1); + if (err) + return err; + + return 0; } - if (of_device_is_compatible(np, "nvidia,tegra30-gr3d")) { - gr3d->clk_secondary = devm_clk_get(&pdev->dev, "3d2"); - if (IS_ERR(gr3d->clk_secondary)) { - dev_err(&pdev->dev, "cannot get secondary clock\n"); - return PTR_ERR(gr3d->clk_secondary); + /* + * The PM domain core automatically attaches a single power domain, + * otherwise it skips attaching completely. We have a single domain + * on Tegra20 and two domains on Tegra30+. + */ + if (dev->pm_domain) + return 0; + + err = devm_pm_opp_attach_genpd(dev, opp_genpd_names, &opp_virt_devs); + if (err) + return err; + + for (i = 0; opp_genpd_names[i]; i++) { + pd_dev = opp_virt_devs[i]; + if (!pd_dev) { + dev_err(dev, "failed to get %s power domain\n", + opp_genpd_names[i]); + return -EINVAL; } - gr3d->rst_secondary = devm_reset_control_get(&pdev->dev, - "3d2"); - if (IS_ERR(gr3d->rst_secondary)) { - dev_err(&pdev->dev, "cannot get secondary reset\n"); - return PTR_ERR(gr3d->rst_secondary); + link = device_link_add(dev, pd_dev, link_flags); + if (!link) { + dev_err(dev, "failed to link to %s\n", dev_name(pd_dev)); + return -EINVAL; } + + err = devm_add_action_or_reset(dev, gr3d_del_link, link); + if (err) + return err; } - err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D, gr3d->clk, - gr3d->rst); + return 0; +} + +static int gr3d_get_clocks(struct device *dev, struct gr3d *gr3d) +{ + int err; + + err = devm_clk_bulk_get_all(dev, &gr3d->clocks); if (err < 0) { - dev_err(&pdev->dev, "failed to power up 3D unit\n"); + dev_err(dev, "failed to get clock: %d\n", err); return err; } + gr3d->nclocks = err; - if (gr3d->clk_secondary) { - err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D1, - gr3d->clk_secondary, - gr3d->rst_secondary); - if (err < 0) { - dev_err(&pdev->dev, - "failed to power up secondary 3D unit\n"); - return err; - } + if (gr3d->nclocks != gr3d->soc->num_clocks) { + dev_err(dev, "invalid number of clocks: %u\n", gr3d->nclocks); + return -ENOENT; + } + + return 0; +} + +static int gr3d_get_resets(struct device *dev, struct gr3d *gr3d) +{ + int err; + + gr3d->resets[RST_MC].id = "mc"; + gr3d->resets[RST_MC2].id = "mc2"; + gr3d->resets[RST_GR3D].id = "3d"; + gr3d->resets[RST_GR3D2].id = "3d2"; + gr3d->nresets = gr3d->soc->num_resets; + + err = devm_reset_control_bulk_get_optional_exclusive_released( + dev, gr3d->nresets, gr3d->resets); + if (err) { + dev_err(dev, "failed to get reset: %d\n", err); + return err; } + if (WARN_ON(!gr3d->resets[RST_GR3D].rstc) || + WARN_ON(!gr3d->resets[RST_GR3D2].rstc && gr3d->nresets == 4)) + return -ENOENT; + + return 0; +} + +static int gr3d_probe(struct platform_device *pdev) +{ + struct host1x_syncpt **syncpts; + struct gr3d *gr3d; + unsigned int i; + int err; + + gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL); + if (!gr3d) + return -ENOMEM; + + platform_set_drvdata(pdev, gr3d); + + gr3d->soc = of_device_get_match_data(&pdev->dev); + + syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL); + if (!syncpts) + return -ENOMEM; + + err = gr3d_get_clocks(&pdev->dev, gr3d); + if (err) + return err; + + err = gr3d_get_resets(&pdev->dev, gr3d); + if (err) + return err; + + err = gr3d_init_power(&pdev->dev, gr3d); + if (err) + return err; + INIT_LIST_HEAD(&gr3d->client.base.list); gr3d->client.base.ops = &gr3d_client_ops; gr3d->client.base.dev = &pdev->dev; @@ -352,6 +532,10 @@ static int gr3d_probe(struct platform_device *pdev) gr3d->client.version = gr3d->soc->version; gr3d->client.ops = &gr3d_ops; + err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); + if (err) + return err; + err = host1x_client_register(&gr3d->client.base); if (err < 0) { dev_err(&pdev->dev, "failed to register host1x client: %d\n", @@ -363,8 +547,6 @@ static int gr3d_probe(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(gr3d_addr_regs); i++) set_bit(gr3d_addr_regs[i], gr3d->addr_regs); - platform_set_drvdata(pdev, gr3d); - return 0; } @@ -380,23 +562,80 @@ static int gr3d_remove(struct platform_device *pdev) return err; } - if (gr3d->clk_secondary) { - reset_control_assert(gr3d->rst_secondary); - tegra_powergate_power_off(TEGRA_POWERGATE_3D1); - clk_disable_unprepare(gr3d->clk_secondary); + return 0; +} + +static int __maybe_unused gr3d_runtime_suspend(struct device *dev) +{ + struct gr3d *gr3d = dev_get_drvdata(dev); + int err; + + host1x_channel_stop(gr3d->channel); + + err = reset_control_bulk_assert(gr3d->nresets, gr3d->resets); + if (err) { + dev_err(dev, "failed to assert reset: %d\n", err); + return err; + } + + usleep_range(10, 20); + + /* + * Older device-trees don't specify MC resets and power-gating can't + * be done safely in that case. Hence we will keep the power ungated + * for older DTBs. For newer DTBs, GENPD will perform the power-gating. + */ + + clk_bulk_disable_unprepare(gr3d->nclocks, gr3d->clocks); + reset_control_bulk_release(gr3d->nresets, gr3d->resets); + + return 0; +} + +static int __maybe_unused gr3d_runtime_resume(struct device *dev) +{ + struct gr3d *gr3d = dev_get_drvdata(dev); + int err; + + err = reset_control_bulk_acquire(gr3d->nresets, gr3d->resets); + if (err) { + dev_err(dev, "failed to acquire reset: %d\n", err); + return err; + } + + err = clk_bulk_prepare_enable(gr3d->nclocks, gr3d->clocks); + if (err) { + dev_err(dev, "failed to enable clock: %d\n", err); + goto release_reset; } - reset_control_assert(gr3d->rst); - tegra_powergate_power_off(TEGRA_POWERGATE_3D); - clk_disable_unprepare(gr3d->clk); + err = reset_control_bulk_deassert(gr3d->nresets, gr3d->resets); + if (err) { + dev_err(dev, "failed to deassert reset: %d\n", err); + goto disable_clk; + } return 0; + +disable_clk: + clk_bulk_disable_unprepare(gr3d->nclocks, gr3d->clocks); +release_reset: + reset_control_bulk_release(gr3d->nresets, gr3d->resets); + + return err; } +static const struct dev_pm_ops tegra_gr3d_pm = { + SET_RUNTIME_PM_OPS(gr3d_runtime_suspend, gr3d_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + struct platform_driver tegra_gr3d_driver = { .driver = { .name = "tegra-gr3d", .of_match_table = tegra_gr3d_match, + .pm = &tegra_gr3d_pm, }, .probe = gr3d_probe, .remove = gr3d_remove, diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index e5d2a4026028..8845af5d325f 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -11,10 +11,14 @@ #include <linux/math64.h> #include <linux/module.h> #include <linux/of_device.h> +#include <linux/pm_opp.h> #include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> +#include <soc/tegra/common.h> +#include <sound/hdmi-codec.h> + #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_debugfs.h> @@ -78,6 +82,9 @@ struct tegra_hdmi { bool dvi; struct drm_info_list *debugfs_files; + + struct platform_device *audio_pdev; + struct mutex audio_lock; }; static inline struct tegra_hdmi * @@ -360,6 +367,18 @@ static const struct tmds_config tegra124_tmds_config[] = { }, }; +static void tegra_hdmi_audio_lock(struct tegra_hdmi *hdmi) +{ + mutex_lock(&hdmi->audio_lock); + disable_irq(hdmi->irq); +} + +static void tegra_hdmi_audio_unlock(struct tegra_hdmi *hdmi) +{ + enable_irq(hdmi->irq); + mutex_unlock(&hdmi->audio_lock); +} + static int tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pix_clock, struct tegra_hdmi_audio_config *config) @@ -829,6 +848,23 @@ static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi, HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT); } +static int tegra_hdmi_reconfigure_audio(struct tegra_hdmi *hdmi) +{ + int err; + + err = tegra_hdmi_setup_audio(hdmi); + if (err < 0) { + tegra_hdmi_disable_audio_infoframe(hdmi); + tegra_hdmi_disable_audio(hdmi); + } else { + tegra_hdmi_setup_audio_infoframe(hdmi); + tegra_hdmi_enable_audio_infoframe(hdmi); + tegra_hdmi_enable_audio(hdmi); + } + + return err; +} + static bool tegra_output_is_hdmi(struct tegra_output *output) { struct edid *edid; @@ -1135,6 +1171,8 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder) u32 value; int err; + tegra_hdmi_audio_lock(hdmi); + /* * The following accesses registers of the display controller, so make * sure it's only executed when the output is attached to one. @@ -1159,6 +1197,10 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder) tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_ENABLE); tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_MASK); + hdmi->pixel_clock = 0; + + tegra_hdmi_audio_unlock(hdmi); + err = host1x_client_suspend(&hdmi->client); if (err < 0) dev_err(hdmi->dev, "failed to suspend: %d\n", err); @@ -1182,6 +1224,8 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) return; } + tegra_hdmi_audio_lock(hdmi); + /* * Enable and unmask the HDA codec SCRATCH0 register interrupt. This * is used for interoperability between the HDA codec driver and the @@ -1195,7 +1239,7 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) h_back_porch = mode->htotal - mode->hsync_end; h_front_porch = mode->hsync_start - mode->hdisplay; - err = clk_set_rate(hdmi->clk, hdmi->pixel_clock); + err = dev_pm_opp_set_rate(hdmi->dev, hdmi->pixel_clock); if (err < 0) { dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n", err); @@ -1387,6 +1431,8 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) } /* TODO: add HDCP support */ + + tegra_hdmi_audio_unlock(hdmi); } static int @@ -1416,6 +1462,91 @@ static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = { .atomic_check = tegra_hdmi_encoder_atomic_check, }; +static int tegra_hdmi_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *hparms) +{ + struct tegra_hdmi *hdmi = data; + int ret = 0; + + tegra_hdmi_audio_lock(hdmi); + + hdmi->format.sample_rate = hparms->sample_rate; + hdmi->format.channels = hparms->channels; + + if (hdmi->pixel_clock && !hdmi->dvi) + ret = tegra_hdmi_reconfigure_audio(hdmi); + + tegra_hdmi_audio_unlock(hdmi); + + return ret; +} + +static int tegra_hdmi_audio_startup(struct device *dev, void *data) +{ + struct tegra_hdmi *hdmi = data; + int ret; + + ret = host1x_client_resume(&hdmi->client); + if (ret < 0) + dev_err(hdmi->dev, "failed to resume: %d\n", ret); + + return ret; +} + +static void tegra_hdmi_audio_shutdown(struct device *dev, void *data) +{ + struct tegra_hdmi *hdmi = data; + int ret; + + tegra_hdmi_audio_lock(hdmi); + + hdmi->format.sample_rate = 0; + hdmi->format.channels = 0; + + tegra_hdmi_audio_unlock(hdmi); + + ret = host1x_client_suspend(&hdmi->client); + if (ret < 0) + dev_err(hdmi->dev, "failed to suspend: %d\n", ret); +} + +static const struct hdmi_codec_ops tegra_hdmi_codec_ops = { + .hw_params = tegra_hdmi_hw_params, + .audio_startup = tegra_hdmi_audio_startup, + .audio_shutdown = tegra_hdmi_audio_shutdown, +}; + +static int tegra_hdmi_codec_register(struct tegra_hdmi *hdmi) +{ + struct hdmi_codec_pdata codec_data = {}; + + if (hdmi->config->has_hda) + return 0; + + codec_data.ops = &tegra_hdmi_codec_ops; + codec_data.data = hdmi; + codec_data.spdif = 1; + + hdmi->audio_pdev = platform_device_register_data(hdmi->dev, + HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, + sizeof(codec_data)); + if (IS_ERR(hdmi->audio_pdev)) + return PTR_ERR(hdmi->audio_pdev); + + hdmi->format.channels = 2; + + return 0; +} + +static void tegra_hdmi_codec_unregister(struct tegra_hdmi *hdmi) +{ + if (hdmi->audio_pdev) + platform_device_unregister(hdmi->audio_pdev); +} + static int tegra_hdmi_init(struct host1x_client *client) { struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); @@ -1453,28 +1584,47 @@ static int tegra_hdmi_init(struct host1x_client *client) if (err < 0) { dev_err(client->dev, "failed to enable HDMI regulator: %d\n", err); - return err; + goto output_exit; } err = regulator_enable(hdmi->pll); if (err < 0) { dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err); - return err; + goto disable_hdmi; } err = regulator_enable(hdmi->vdd); if (err < 0) { dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err); - return err; + goto disable_pll; + } + + err = tegra_hdmi_codec_register(hdmi); + if (err < 0) { + dev_err(hdmi->dev, "failed to register audio codec: %d\n", err); + goto disable_vdd; } return 0; + +disable_vdd: + regulator_disable(hdmi->vdd); +disable_pll: + regulator_disable(hdmi->pll); +disable_hdmi: + regulator_disable(hdmi->hdmi); +output_exit: + tegra_output_exit(&hdmi->output); + + return err; } static int tegra_hdmi_exit(struct host1x_client *client) { struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); + tegra_hdmi_codec_unregister(hdmi); + tegra_output_exit(&hdmi->output); regulator_disable(hdmi->vdd); @@ -1599,7 +1749,6 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data) { struct tegra_hdmi *hdmi = data; u32 value; - int err; value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_INT_STATUS); tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_INT_STATUS); @@ -1614,16 +1763,7 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data) format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK; tegra_hda_parse_format(format, &hdmi->format); - - err = tegra_hdmi_setup_audio(hdmi); - if (err < 0) { - tegra_hdmi_disable_audio_infoframe(hdmi); - tegra_hdmi_disable_audio(hdmi); - } else { - tegra_hdmi_setup_audio_infoframe(hdmi); - tegra_hdmi_enable_audio_infoframe(hdmi); - tegra_hdmi_enable_audio(hdmi); - } + tegra_hdmi_reconfigure_audio(hdmi); } else { tegra_hdmi_disable_audio_infoframe(hdmi); tegra_hdmi_disable_audio(hdmi); @@ -1651,6 +1791,8 @@ static int tegra_hdmi_probe(struct platform_device *pdev) hdmi->stereo = false; hdmi->dvi = false; + mutex_init(&hdmi->audio_lock); + hdmi->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(hdmi->clk)) { dev_err(&pdev->dev, "failed to get clock\n"); @@ -1732,7 +1874,14 @@ static int tegra_hdmi_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, hdmi); - pm_runtime_enable(&pdev->dev); + + err = devm_pm_runtime_enable(&pdev->dev); + if (err) + return err; + + err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); + if (err) + return err; INIT_LIST_HEAD(&hdmi->client.list); hdmi->client.ops = &hdmi_client_ops; @@ -1753,8 +1902,6 @@ static int tegra_hdmi_remove(struct platform_device *pdev) struct tegra_hdmi *hdmi = platform_get_drvdata(pdev); int err; - pm_runtime_disable(&pdev->dev); - err = host1x_client_unregister(&hdmi->client); if (err < 0) { dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h index 3efa1be07ff8..23c4b2115ed1 100644 --- a/drivers/gpu/drm/tegra/hub.h +++ b/drivers/gpu/drm/tegra/hub.h @@ -72,7 +72,6 @@ to_tegra_display_hub_state(struct drm_private_state *priv) return container_of(priv, struct tegra_display_hub_state, base); } -struct tegra_dc; struct tegra_plane; int tegra_display_hub_prepare(struct tegra_display_hub *hub); diff --git a/drivers/gpu/drm/tegra/nvdec.c b/drivers/gpu/drm/tegra/nvdec.c new file mode 100644 index 000000000000..79e1e88203cf --- /dev/null +++ b/drivers/gpu/drm/tegra/nvdec.c @@ -0,0 +1,466 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015-2021, NVIDIA Corporation. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/host1x.h> +#include <linux/iommu.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> + +#include <soc/tegra/pmc.h> + +#include "drm.h" +#include "falcon.h" +#include "vic.h" + +struct nvdec_config { + const char *firmware; + unsigned int version; + bool supports_sid; +}; + +struct nvdec { + struct falcon falcon; + + void __iomem *regs; + struct tegra_drm_client client; + struct host1x_channel *channel; + struct device *dev; + struct clk *clk; + + /* Platform configuration */ + const struct nvdec_config *config; +}; + +static inline struct nvdec *to_nvdec(struct tegra_drm_client *client) +{ + return container_of(client, struct nvdec, client); +} + +static inline void nvdec_writel(struct nvdec *nvdec, u32 value, + unsigned int offset) +{ + writel(value, nvdec->regs + offset); +} + +static int nvdec_boot(struct nvdec *nvdec) +{ +#ifdef CONFIG_IOMMU_API + struct iommu_fwspec *spec = dev_iommu_fwspec_get(nvdec->dev); +#endif + int err; + +#ifdef CONFIG_IOMMU_API + if (nvdec->config->supports_sid && spec) { + u32 value; + + value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | TRANSCFG_ATT(0, TRANSCFG_SID_HW); + nvdec_writel(nvdec, value, VIC_TFBIF_TRANSCFG); + + if (spec->num_ids > 0) { + value = spec->ids[0] & 0xffff; + + nvdec_writel(nvdec, value, VIC_THI_STREAMID0); + nvdec_writel(nvdec, value, VIC_THI_STREAMID1); + } + } +#endif + + err = falcon_boot(&nvdec->falcon); + if (err < 0) + return err; + + err = falcon_wait_idle(&nvdec->falcon); + if (err < 0) { + dev_err(nvdec->dev, "falcon boot timed out\n"); + return err; + } + + return 0; +} + +static int nvdec_init(struct host1x_client *client) +{ + struct tegra_drm_client *drm = host1x_to_drm_client(client); + struct drm_device *dev = dev_get_drvdata(client->host); + struct tegra_drm *tegra = dev->dev_private; + struct nvdec *nvdec = to_nvdec(drm); + int err; + + err = host1x_client_iommu_attach(client); + if (err < 0 && err != -ENODEV) { + dev_err(nvdec->dev, "failed to attach to domain: %d\n", err); + return err; + } + + nvdec->channel = host1x_channel_request(client); + if (!nvdec->channel) { + err = -ENOMEM; + goto detach; + } + + client->syncpts[0] = host1x_syncpt_request(client, 0); + if (!client->syncpts[0]) { + err = -ENOMEM; + goto free_channel; + } + + pm_runtime_enable(client->dev); + pm_runtime_use_autosuspend(client->dev); + pm_runtime_set_autosuspend_delay(client->dev, 500); + + err = tegra_drm_register_client(tegra, drm); + if (err < 0) + goto disable_rpm; + + /* + * Inherit the DMA parameters (such as maximum segment size) from the + * parent host1x device. + */ + client->dev->dma_parms = client->host->dma_parms; + + return 0; + +disable_rpm: + pm_runtime_dont_use_autosuspend(client->dev); + pm_runtime_force_suspend(client->dev); + + host1x_syncpt_put(client->syncpts[0]); +free_channel: + host1x_channel_put(nvdec->channel); +detach: + host1x_client_iommu_detach(client); + + return err; +} + +static int nvdec_exit(struct host1x_client *client) +{ + struct tegra_drm_client *drm = host1x_to_drm_client(client); + struct drm_device *dev = dev_get_drvdata(client->host); + struct tegra_drm *tegra = dev->dev_private; + struct nvdec *nvdec = to_nvdec(drm); + int err; + + /* avoid a dangling pointer just in case this disappears */ + client->dev->dma_parms = NULL; + + err = tegra_drm_unregister_client(tegra, drm); + if (err < 0) + return err; + + pm_runtime_dont_use_autosuspend(client->dev); + pm_runtime_force_suspend(client->dev); + + host1x_syncpt_put(client->syncpts[0]); + host1x_channel_put(nvdec->channel); + host1x_client_iommu_detach(client); + + nvdec->channel = NULL; + + if (client->group) { + dma_unmap_single(nvdec->dev, nvdec->falcon.firmware.phys, + nvdec->falcon.firmware.size, DMA_TO_DEVICE); + tegra_drm_free(tegra, nvdec->falcon.firmware.size, + nvdec->falcon.firmware.virt, + nvdec->falcon.firmware.iova); + } else { + dma_free_coherent(nvdec->dev, nvdec->falcon.firmware.size, + nvdec->falcon.firmware.virt, + nvdec->falcon.firmware.iova); + } + + return 0; +} + +static const struct host1x_client_ops nvdec_client_ops = { + .init = nvdec_init, + .exit = nvdec_exit, +}; + +static int nvdec_load_firmware(struct nvdec *nvdec) +{ + struct host1x_client *client = &nvdec->client.base; + struct tegra_drm *tegra = nvdec->client.drm; + dma_addr_t iova; + size_t size; + void *virt; + int err; + + if (nvdec->falcon.firmware.virt) + return 0; + + err = falcon_read_firmware(&nvdec->falcon, nvdec->config->firmware); + if (err < 0) + return err; + + size = nvdec->falcon.firmware.size; + + if (!client->group) { + virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL); + + err = dma_mapping_error(nvdec->dev, iova); + if (err < 0) + return err; + } else { + virt = tegra_drm_alloc(tegra, size, &iova); + } + + nvdec->falcon.firmware.virt = virt; + nvdec->falcon.firmware.iova = iova; + + err = falcon_load_firmware(&nvdec->falcon); + if (err < 0) + goto cleanup; + + /* + * In this case we have received an IOVA from the shared domain, so we + * need to make sure to get the physical address so that the DMA API + * knows what memory pages to flush the cache for. + */ + if (client->group) { + dma_addr_t phys; + + phys = dma_map_single(nvdec->dev, virt, size, DMA_TO_DEVICE); + + err = dma_mapping_error(nvdec->dev, phys); + if (err < 0) + goto cleanup; + + nvdec->falcon.firmware.phys = phys; + } + + return 0; + +cleanup: + if (!client->group) + dma_free_coherent(nvdec->dev, size, virt, iova); + else + tegra_drm_free(tegra, size, virt, iova); + + return err; +} + + +static __maybe_unused int nvdec_runtime_resume(struct device *dev) +{ + struct nvdec *nvdec = dev_get_drvdata(dev); + int err; + + err = clk_prepare_enable(nvdec->clk); + if (err < 0) + return err; + + usleep_range(10, 20); + + err = nvdec_load_firmware(nvdec); + if (err < 0) + goto disable; + + err = nvdec_boot(nvdec); + if (err < 0) + goto disable; + + return 0; + +disable: + clk_disable_unprepare(nvdec->clk); + return err; +} + +static __maybe_unused int nvdec_runtime_suspend(struct device *dev) +{ + struct nvdec *nvdec = dev_get_drvdata(dev); + + host1x_channel_stop(nvdec->channel); + + clk_disable_unprepare(nvdec->clk); + + return 0; +} + +static int nvdec_open_channel(struct tegra_drm_client *client, + struct tegra_drm_context *context) +{ + struct nvdec *nvdec = to_nvdec(client); + + context->channel = host1x_channel_get(nvdec->channel); + if (!context->channel) + return -ENOMEM; + + return 0; +} + +static void nvdec_close_channel(struct tegra_drm_context *context) +{ + host1x_channel_put(context->channel); +} + +static const struct tegra_drm_client_ops nvdec_ops = { + .open_channel = nvdec_open_channel, + .close_channel = nvdec_close_channel, + .submit = tegra_drm_submit, +}; + +#define NVIDIA_TEGRA_210_NVDEC_FIRMWARE "nvidia/tegra210/nvdec.bin" + +static const struct nvdec_config nvdec_t210_config = { + .firmware = NVIDIA_TEGRA_210_NVDEC_FIRMWARE, + .version = 0x21, + .supports_sid = false, +}; + +#define NVIDIA_TEGRA_186_NVDEC_FIRMWARE "nvidia/tegra186/nvdec.bin" + +static const struct nvdec_config nvdec_t186_config = { + .firmware = NVIDIA_TEGRA_186_NVDEC_FIRMWARE, + .version = 0x18, + .supports_sid = true, +}; + +#define NVIDIA_TEGRA_194_NVDEC_FIRMWARE "nvidia/tegra194/nvdec.bin" + +static const struct nvdec_config nvdec_t194_config = { + .firmware = NVIDIA_TEGRA_194_NVDEC_FIRMWARE, + .version = 0x19, + .supports_sid = true, +}; + +static const struct of_device_id tegra_nvdec_of_match[] = { + { .compatible = "nvidia,tegra210-nvdec", .data = &nvdec_t210_config }, + { .compatible = "nvidia,tegra186-nvdec", .data = &nvdec_t186_config }, + { .compatible = "nvidia,tegra194-nvdec", .data = &nvdec_t194_config }, + { }, +}; +MODULE_DEVICE_TABLE(of, tegra_nvdec_of_match); + +static int nvdec_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct host1x_syncpt **syncpts; + struct nvdec *nvdec; + u32 host_class; + int err; + + /* inherit DMA mask from host1x parent */ + err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask); + if (err < 0) { + dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); + return err; + } + + nvdec = devm_kzalloc(dev, sizeof(*nvdec), GFP_KERNEL); + if (!nvdec) + return -ENOMEM; + + nvdec->config = of_device_get_match_data(dev); + + syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL); + if (!syncpts) + return -ENOMEM; + + nvdec->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); + if (IS_ERR(nvdec->regs)) + return PTR_ERR(nvdec->regs); + + nvdec->clk = devm_clk_get(dev, NULL); + if (IS_ERR(nvdec->clk)) { + dev_err(&pdev->dev, "failed to get clock\n"); + return PTR_ERR(nvdec->clk); + } + + err = clk_set_rate(nvdec->clk, ULONG_MAX); + if (err < 0) { + dev_err(&pdev->dev, "failed to set clock rate\n"); + return err; + } + + err = of_property_read_u32(dev->of_node, "nvidia,host1x-class", &host_class); + if (err < 0) + host_class = HOST1X_CLASS_NVDEC; + + nvdec->falcon.dev = dev; + nvdec->falcon.regs = nvdec->regs; + + err = falcon_init(&nvdec->falcon); + if (err < 0) + return err; + + platform_set_drvdata(pdev, nvdec); + + INIT_LIST_HEAD(&nvdec->client.base.list); + nvdec->client.base.ops = &nvdec_client_ops; + nvdec->client.base.dev = dev; + nvdec->client.base.class = host_class; + nvdec->client.base.syncpts = syncpts; + nvdec->client.base.num_syncpts = 1; + nvdec->dev = dev; + + INIT_LIST_HEAD(&nvdec->client.list); + nvdec->client.version = nvdec->config->version; + nvdec->client.ops = &nvdec_ops; + + err = host1x_client_register(&nvdec->client.base); + if (err < 0) { + dev_err(dev, "failed to register host1x client: %d\n", err); + goto exit_falcon; + } + + return 0; + +exit_falcon: + falcon_exit(&nvdec->falcon); + + return err; +} + +static int nvdec_remove(struct platform_device *pdev) +{ + struct nvdec *nvdec = platform_get_drvdata(pdev); + int err; + + err = host1x_client_unregister(&nvdec->client.base); + if (err < 0) { + dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", + err); + return err; + } + + falcon_exit(&nvdec->falcon); + + return 0; +} + +static const struct dev_pm_ops nvdec_pm_ops = { + SET_RUNTIME_PM_OPS(nvdec_runtime_suspend, nvdec_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + +struct platform_driver tegra_nvdec_driver = { + .driver = { + .name = "tegra-nvdec", + .of_match_table = tegra_nvdec_of_match, + .pm = &nvdec_pm_ops + }, + .probe = nvdec_probe, + .remove = nvdec_remove, +}; + +#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) +MODULE_FIRMWARE(NVIDIA_TEGRA_210_NVDEC_FIRMWARE); +#endif +#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) +MODULE_FIRMWARE(NVIDIA_TEGRA_186_NVDEC_FIRMWARE); +#endif +#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) +MODULE_FIRMWARE(NVIDIA_TEGRA_194_NVDEC_FIRMWARE); +#endif diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c index 16a1cdc28657..321cb1f13da6 100644 --- a/drivers/gpu/drm/tegra/plane.c +++ b/drivers/gpu/drm/tegra/plane.c @@ -74,7 +74,7 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane) for (i = 0; i < 3; i++) { copy->iova[i] = DMA_MAPPING_ERROR; - copy->sgt[i] = NULL; + copy->map[i] = NULL; } return ©->base; @@ -138,55 +138,37 @@ const struct drm_plane_funcs tegra_plane_funcs = { static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state) { - struct iommu_domain *domain = iommu_get_domain_for_dev(dc->dev); unsigned int i; int err; for (i = 0; i < state->base.fb->format->num_planes; i++) { struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); - dma_addr_t phys_addr, *phys; - struct sg_table *sgt; + struct host1x_bo_mapping *map; - /* - * If we're not attached to a domain, we already stored the - * physical address when the buffer was allocated. If we're - * part of a group that's shared between all display - * controllers, we've also already mapped the framebuffer - * through the SMMU. In both cases we can short-circuit the - * code below and retrieve the stored IOV address. - */ - if (!domain || dc->client.group) - phys = &phys_addr; - else - phys = NULL; - - sgt = host1x_bo_pin(dc->dev, &bo->base, phys); - if (IS_ERR(sgt)) { - err = PTR_ERR(sgt); + map = host1x_bo_pin(dc->dev, &bo->base, DMA_TO_DEVICE, &dc->client.cache); + if (IS_ERR(map)) { + err = PTR_ERR(map); goto unpin; } - if (sgt) { - err = dma_map_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0); - if (err) - goto unpin; - + if (!dc->client.group) { /* * The display controller needs contiguous memory, so * fail if the buffer is discontiguous and we fail to * map its SG table to a single contiguous chunk of * I/O virtual memory. */ - if (sgt->nents > 1) { + if (map->chunks > 1) { err = -EINVAL; goto unpin; } - state->iova[i] = sg_dma_address(sgt->sgl); - state->sgt[i] = sgt; + state->iova[i] = map->phys; } else { - state->iova[i] = phys_addr; + state->iova[i] = bo->iova; } + + state->map[i] = map; } return 0; @@ -195,15 +177,9 @@ unpin: dev_err(dc->dev, "failed to map plane %u: %d\n", i, err); while (i--) { - struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); - struct sg_table *sgt = state->sgt[i]; - - if (sgt) - dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0); - - host1x_bo_unpin(dc->dev, &bo->base, sgt); + host1x_bo_unpin(state->map[i]); state->iova[i] = DMA_MAPPING_ERROR; - state->sgt[i] = NULL; + state->map[i] = NULL; } return err; @@ -214,15 +190,9 @@ static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state) unsigned int i; for (i = 0; i < state->base.fb->format->num_planes; i++) { - struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); - struct sg_table *sgt = state->sgt[i]; - - if (sgt) - dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0); - - host1x_bo_unpin(dc->dev, &bo->base, sgt); + host1x_bo_unpin(state->map[i]); state->iova[i] = DMA_MAPPING_ERROR; - state->sgt[i] = NULL; + state->map[i] = NULL; } } @@ -230,11 +200,14 @@ int tegra_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) { struct tegra_dc *dc = to_tegra_dc(state->crtc); + int err; if (!state->fb) return 0; - drm_gem_plane_helper_prepare_fb(plane, state); + err = drm_gem_plane_helper_prepare_fb(plane, state); + if (err < 0) + return err; return tegra_dc_pin(dc, to_tegra_plane_state(state)); } diff --git a/drivers/gpu/drm/tegra/plane.h b/drivers/gpu/drm/tegra/plane.h index d9470780c803..dfb20712fbd7 100644 --- a/drivers/gpu/drm/tegra/plane.h +++ b/drivers/gpu/drm/tegra/plane.h @@ -43,7 +43,7 @@ struct tegra_plane_legacy_blending_state { struct tegra_plane_state { struct drm_plane_state base; - struct sg_table *sgt[3]; + struct host1x_bo_mapping *map[3]; dma_addr_t iova[3]; struct tegra_bo_tiling tiling; diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index 606c78a2b988..ff8fce36d2aa 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c @@ -17,6 +17,8 @@ struct tegra_rgb { struct tegra_output output; struct tegra_dc *dc; + struct clk *pll_d_out0; + struct clk *pll_d2_out0; struct clk *clk_parent; struct clk *clk; }; @@ -116,13 +118,21 @@ static void tegra_rgb_encoder_enable(struct drm_encoder *encoder) DISP_ORDER_RED_BLUE; tegra_dc_writel(rgb->dc, value, DC_DISP_DISP_INTERFACE_CONTROL); - /* XXX: parameterize? */ - value = SC0_H_QUALIFIER_NONE | SC1_H_QUALIFIER_NONE; - tegra_dc_writel(rgb->dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS); - tegra_dc_commit(rgb->dc); } +static bool tegra_rgb_pll_rate_change_allowed(struct tegra_rgb *rgb) +{ + if (!rgb->pll_d2_out0) + return false; + + if (!clk_is_match(rgb->clk_parent, rgb->pll_d_out0) && + !clk_is_match(rgb->clk_parent, rgb->pll_d2_out0)) + return false; + + return true; +} + static int tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, @@ -151,8 +161,17 @@ tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder, * and hope that the desired frequency can be matched (or at least * matched sufficiently close that the panel will still work). */ - div = ((clk_get_rate(rgb->clk) * 2) / pclk) - 2; - pclk = 0; + if (tegra_rgb_pll_rate_change_allowed(rgb)) { + /* + * Set display controller clock to x2 of PCLK in order to + * produce higher resolution pulse positions. + */ + div = 2; + pclk *= 2; + } else { + div = ((clk_get_rate(rgb->clk) * 2) / pclk) - 2; + pclk = 0; + } err = tegra_dc_state_setup_clock(dc, crtc_state, rgb->clk_parent, pclk, div); @@ -210,6 +229,22 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc) return err; } + rgb->pll_d_out0 = clk_get_sys(NULL, "pll_d_out0"); + if (IS_ERR(rgb->pll_d_out0)) { + err = PTR_ERR(rgb->pll_d_out0); + dev_err(dc->dev, "failed to get pll_d_out0: %d\n", err); + return err; + } + + if (dc->soc->has_pll_d2_out0) { + rgb->pll_d2_out0 = clk_get_sys(NULL, "pll_d2_out0"); + if (IS_ERR(rgb->pll_d2_out0)) { + err = PTR_ERR(rgb->pll_d2_out0); + dev_err(dc->dev, "failed to get pll_d2_out0: %d\n", err); + return err; + } + } + dc->rgb = &rgb->output; return 0; @@ -217,9 +252,15 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc) int tegra_dc_rgb_remove(struct tegra_dc *dc) { + struct tegra_rgb *rgb; + if (!dc->rgb) return 0; + rgb = to_rgb(dc->rgb); + clk_put(rgb->pll_d2_out0); + clk_put(rgb->pll_d_out0); + tegra_output_remove(dc->rgb); dc->rgb = NULL; diff --git a/drivers/gpu/drm/tegra/submit.c b/drivers/gpu/drm/tegra/submit.c index 776f825df52f..6d6dd8c35475 100644 --- a/drivers/gpu/drm/tegra/submit.c +++ b/drivers/gpu/drm/tegra/submit.c @@ -64,33 +64,62 @@ static void gather_bo_put(struct host1x_bo *host_bo) kref_put(&bo->ref, gather_bo_release); } -static struct sg_table * -gather_bo_pin(struct device *dev, struct host1x_bo *host_bo, dma_addr_t *phys) +static struct host1x_bo_mapping * +gather_bo_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction direction) { - struct gather_bo *bo = container_of(host_bo, struct gather_bo, base); - struct sg_table *sgt; + struct gather_bo *gather = container_of(bo, struct gather_bo, base); + struct host1x_bo_mapping *map; int err; - sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); - if (!sgt) + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (!map) return ERR_PTR(-ENOMEM); - err = dma_get_sgtable(bo->dev, sgt, bo->gather_data, bo->gather_data_dma, - bo->gather_data_words * 4); - if (err) { - kfree(sgt); - return ERR_PTR(err); + kref_init(&map->ref); + map->bo = host1x_bo_get(bo); + map->direction = direction; + map->dev = dev; + + map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL); + if (!map->sgt) { + err = -ENOMEM; + goto free; } - return sgt; + err = dma_get_sgtable(gather->dev, map->sgt, gather->gather_data, gather->gather_data_dma, + gather->gather_data_words * 4); + if (err) + goto free_sgt; + + err = dma_map_sgtable(dev, map->sgt, direction, 0); + if (err) + goto free_sgt; + + map->phys = sg_dma_address(map->sgt->sgl); + map->size = gather->gather_data_words * 4; + map->chunks = err; + + return map; + +free_sgt: + sg_free_table(map->sgt); + kfree(map->sgt); +free: + kfree(map); + return ERR_PTR(err); } -static void gather_bo_unpin(struct device *dev, struct sg_table *sgt) +static void gather_bo_unpin(struct host1x_bo_mapping *map) { - if (sgt) { - sg_free_table(sgt); - kfree(sgt); - } + if (!map) + return; + + dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0); + sg_free_table(map->sgt); + kfree(map->sgt); + host1x_bo_put(map->bo); + + kfree(map); } static void *gather_bo_mmap(struct host1x_bo *host_bo) @@ -475,8 +504,8 @@ static void release_job(struct host1x_job *job) kfree(job_data->used_mappings); kfree(job_data); - if (pm_runtime_enabled(client->base.dev)) - pm_runtime_put_autosuspend(client->base.dev); + pm_runtime_mark_last_busy(client->base.dev); + pm_runtime_put_autosuspend(client->base.dev); } int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data, @@ -560,12 +589,10 @@ int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data, } /* Boot engine. */ - if (pm_runtime_enabled(context->client->base.dev)) { - err = pm_runtime_resume_and_get(context->client->base.dev); - if (err < 0) { - SUBMIT_ERR(context, "could not power up engine: %d", err); - goto unpin_job; - } + err = pm_runtime_resume_and_get(context->client->base.dev); + if (err < 0) { + SUBMIT_ERR(context, "could not power up engine: %d", err); + goto unpin_job; } job->user_data = job_data; diff --git a/drivers/gpu/drm/tegra/uapi.c b/drivers/gpu/drm/tegra/uapi.c index 690a339c52ec..9ab9179d2026 100644 --- a/drivers/gpu/drm/tegra/uapi.c +++ b/drivers/gpu/drm/tegra/uapi.c @@ -17,11 +17,7 @@ static void tegra_drm_mapping_release(struct kref *ref) struct tegra_drm_mapping *mapping = container_of(ref, struct tegra_drm_mapping, ref); - if (mapping->sgt) - dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction, - DMA_ATTR_SKIP_CPU_SYNC); - - host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt); + host1x_bo_unpin(mapping->map); host1x_bo_put(mapping->bo); kfree(mapping); @@ -159,6 +155,7 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f struct drm_tegra_channel_map *args = data; struct tegra_drm_mapping *mapping; struct tegra_drm_context *context; + enum dma_data_direction direction; int err = 0; if (args->flags & ~DRM_TEGRA_CHANNEL_MAP_READ_WRITE) @@ -180,68 +177,53 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f kref_init(&mapping->ref); - mapping->dev = context->client->base.dev; mapping->bo = tegra_gem_lookup(file, args->handle); if (!mapping->bo) { err = -EINVAL; - goto unlock; + goto free; } - if (context->client->base.group) { - /* IOMMU domain managed directly using IOMMU API */ - host1x_bo_pin(mapping->dev, mapping->bo, &mapping->iova); - } else { - switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) { - case DRM_TEGRA_CHANNEL_MAP_READ_WRITE: - mapping->direction = DMA_BIDIRECTIONAL; - break; - - case DRM_TEGRA_CHANNEL_MAP_WRITE: - mapping->direction = DMA_FROM_DEVICE; - break; - - case DRM_TEGRA_CHANNEL_MAP_READ: - mapping->direction = DMA_TO_DEVICE; - break; + switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) { + case DRM_TEGRA_CHANNEL_MAP_READ_WRITE: + direction = DMA_BIDIRECTIONAL; + break; - default: - return -EINVAL; - } + case DRM_TEGRA_CHANNEL_MAP_WRITE: + direction = DMA_FROM_DEVICE; + break; - mapping->sgt = host1x_bo_pin(mapping->dev, mapping->bo, NULL); - if (IS_ERR(mapping->sgt)) { - err = PTR_ERR(mapping->sgt); - goto put_gem; - } + case DRM_TEGRA_CHANNEL_MAP_READ: + direction = DMA_TO_DEVICE; + break; - err = dma_map_sgtable(mapping->dev, mapping->sgt, mapping->direction, - DMA_ATTR_SKIP_CPU_SYNC); - if (err) - goto unpin; + default: + err = -EINVAL; + goto put_gem; + } - mapping->iova = sg_dma_address(mapping->sgt->sgl); + mapping->map = host1x_bo_pin(context->client->base.dev, mapping->bo, direction, NULL); + if (IS_ERR(mapping->map)) { + err = PTR_ERR(mapping->map); + goto put_gem; } + mapping->iova = mapping->map->phys; mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size; err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX), GFP_KERNEL); if (err < 0) - goto unmap; + goto unpin; mutex_unlock(&fpriv->lock); return 0; -unmap: - if (mapping->sgt) { - dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction, - DMA_ATTR_SKIP_CPU_SYNC); - } unpin: - host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt); + host1x_bo_unpin(mapping->map); put_gem: host1x_bo_put(mapping->bo); +free: kfree(mapping); unlock: mutex_unlock(&fpriv->lock); diff --git a/drivers/gpu/drm/tegra/uapi.h b/drivers/gpu/drm/tegra/uapi.h index 12adad770ad3..92ff1e44ff15 100644 --- a/drivers/gpu/drm/tegra/uapi.h +++ b/drivers/gpu/drm/tegra/uapi.h @@ -27,10 +27,9 @@ struct tegra_drm_file { struct tegra_drm_mapping { struct kref ref; - struct device *dev; + struct host1x_bo_mapping *map; struct host1x_bo *bo; - struct sg_table *sgt; - enum dma_data_direction direction; + dma_addr_t iova; dma_addr_t iova_end; }; diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c index c02010ff2b7f..1e342fa3d27b 100644 --- a/drivers/gpu/drm/tegra/vic.c +++ b/drivers/gpu/drm/tegra/vic.c @@ -5,6 +5,7 @@ #include <linux/clk.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> #include <linux/host1x.h> #include <linux/iommu.h> #include <linux/module.h> @@ -151,9 +152,13 @@ static int vic_init(struct host1x_client *client) goto free_channel; } + pm_runtime_enable(client->dev); + pm_runtime_use_autosuspend(client->dev); + pm_runtime_set_autosuspend_delay(client->dev, 500); + err = tegra_drm_register_client(tegra, drm); if (err < 0) - goto free_syncpt; + goto disable_rpm; /* * Inherit the DMA parameters (such as maximum segment size) from the @@ -163,7 +168,10 @@ static int vic_init(struct host1x_client *client) return 0; -free_syncpt: +disable_rpm: + pm_runtime_dont_use_autosuspend(client->dev); + pm_runtime_force_suspend(client->dev); + host1x_syncpt_put(client->syncpts[0]); free_channel: host1x_channel_put(vic->channel); @@ -188,10 +196,15 @@ static int vic_exit(struct host1x_client *client) if (err < 0) return err; + pm_runtime_dont_use_autosuspend(client->dev); + pm_runtime_force_suspend(client->dev); + host1x_syncpt_put(client->syncpts[0]); host1x_channel_put(vic->channel); host1x_client_iommu_detach(client); + vic->channel = NULL; + if (client->group) { dma_unmap_single(vic->dev, vic->falcon.firmware.phys, vic->falcon.firmware.size, DMA_TO_DEVICE); @@ -232,12 +245,12 @@ static int vic_load_firmware(struct vic *vic) if (!client->group) { virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL); - - err = dma_mapping_error(vic->dev, iova); - if (err < 0) - return err; + if (!virt) + return -ENOMEM; } else { virt = tegra_drm_alloc(tegra, size, &iova); + if (IS_ERR(virt)) + return PTR_ERR(virt); } vic->falcon.firmware.virt = virt; @@ -315,6 +328,8 @@ static int vic_runtime_suspend(struct device *dev) struct vic *vic = dev_get_drvdata(dev); int err; + host1x_channel_stop(vic->channel); + err = reset_control_assert(vic->rst); if (err < 0) return err; @@ -330,27 +345,17 @@ static int vic_open_channel(struct tegra_drm_client *client, struct tegra_drm_context *context) { struct vic *vic = to_vic(client); - int err; - - err = pm_runtime_resume_and_get(vic->dev); - if (err < 0) - return err; context->channel = host1x_channel_get(vic->channel); - if (!context->channel) { - pm_runtime_put(vic->dev); + if (!context->channel) return -ENOMEM; - } return 0; } static void vic_close_channel(struct tegra_drm_context *context) { - struct vic *vic = to_vic(context->client); - host1x_channel_put(context->channel); - pm_runtime_put(vic->dev); } static const struct tegra_drm_client_ops vic_ops = { @@ -441,6 +446,12 @@ static int vic_probe(struct platform_device *pdev) return PTR_ERR(vic->clk); } + err = clk_set_rate(vic->clk, ULONG_MAX); + if (err < 0) { + dev_err(&pdev->dev, "failed to set clock rate\n"); + return err; + } + if (!dev->pm_domain) { vic->rst = devm_reset_control_get(dev, "vic"); if (IS_ERR(vic->rst)) { @@ -476,17 +487,8 @@ static int vic_probe(struct platform_device *pdev) goto exit_falcon; } - pm_runtime_enable(&pdev->dev); - if (!pm_runtime_enabled(&pdev->dev)) { - err = vic_runtime_resume(&pdev->dev); - if (err < 0) - goto unregister_client; - } - return 0; -unregister_client: - host1x_client_unregister(&vic->client.base); exit_falcon: falcon_exit(&vic->falcon); @@ -505,11 +507,6 @@ static int vic_remove(struct platform_device *pdev) return err; } - if (pm_runtime_enabled(&pdev->dev)) - pm_runtime_disable(&pdev->dev); - else - vic_runtime_suspend(&pdev->dev); - falcon_exit(&vic->falcon); return 0; @@ -517,6 +514,8 @@ static int vic_remove(struct platform_device *pdev) static const struct dev_pm_ops vic_pm_ops = { SET_RUNTIME_PM_OPS(vic_runtime_suspend, vic_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) }; struct platform_driver tegra_vic_driver = { diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index 0b7a8be37a9f..d191e87a37b5 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -459,7 +459,7 @@ static struct drm_display_mode simpledrm_mode(unsigned int width, { struct drm_display_mode mode = { SIMPLEDRM_MODE(width, height) }; - mode.clock = 60 /* Hz */ * mode.hdisplay * mode.vdisplay; + mode.clock = mode.hdisplay * mode.vdisplay * 60 / 1000 /* kHz */; drm_mode_set_name(&mode); return mode; diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c index 0037eefe3239..a3ad7c9736ec 100644 --- a/drivers/gpu/drm/ttm/ttm_module.c +++ b/drivers/gpu/drm/ttm/ttm_module.c @@ -68,9 +68,11 @@ pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp) #if defined(__i386__) || defined(__x86_64__) if (caching == ttm_write_combined) tmp = pgprot_writecombine(tmp); +#ifndef CONFIG_UML else if (boot_cpu_data.x86 > 3) tmp = pgprot_noncached(tmp); -#endif +#endif /* CONFIG_UML */ +#endif /* __i386__ || __x86_64__ */ #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \ defined(__powerpc__) || defined(__mips__) if (caching == ttm_write_combined) diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index a229da58962a..9300d3354c51 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -1262,7 +1262,6 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *device) { struct vc4_dsi *dsi = host_to_dsi(host); - int ret; dsi->lanes = device->lanes; dsi->channel = device->channel; @@ -1297,18 +1296,15 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host, return 0; } - ret = component_add(&dsi->pdev->dev, &vc4_dsi_ops); - if (ret) { - mipi_dsi_host_unregister(&dsi->dsi_host); - return ret; - } - - return 0; + return component_add(&dsi->pdev->dev, &vc4_dsi_ops); } static int vc4_dsi_host_detach(struct mipi_dsi_host *host, struct mipi_dsi_device *device) { + struct vc4_dsi *dsi = host_to_dsi(host); + + component_del(&dsi->pdev->dev, &vc4_dsi_ops); return 0; } @@ -1686,9 +1682,7 @@ static int vc4_dsi_dev_remove(struct platform_device *pdev) struct device *dev = &pdev->dev; struct vc4_dsi *dsi = dev_get_drvdata(dev); - component_del(&pdev->dev, &vc4_dsi_ops); mipi_dsi_host_unregister(&dsi->dsi_host); - return 0; } diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 21f410901694..3313b92db531 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -279,7 +279,7 @@ void virtio_gpu_deinit(struct drm_device *dev) flush_work(&vgdev->ctrlq.dequeue_work); flush_work(&vgdev->cursorq.dequeue_work); flush_work(&vgdev->config_changed_work); - vgdev->vdev->config->reset(vgdev->vdev); + virtio_reset_device(vgdev->vdev); vgdev->vdev->config->del_vqs(vgdev->vdev); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index d6b66636a19b..ea3ecdda561d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1140,15 +1140,14 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, struct vmw_private *dev_priv, struct vmw_fence_obj **p_fence, uint32_t *p_handle); -extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, +extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, struct vmw_fpriv *vmw_fp, int ret, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj *fence, uint32_t fence_handle, - int32_t out_fence_fd, - struct sync_file *sync_file); + int32_t out_fence_fd); bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 0ff28f0e3eb4..d49de4905efa 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -3879,17 +3879,17 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, * Also if copying fails, user-space will be unable to signal the fence object * so we wait for it immediately, and then unreference the user-space reference. */ -void +int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, struct vmw_fpriv *vmw_fp, int ret, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj *fence, uint32_t fence_handle, - int32_t out_fence_fd, struct sync_file *sync_file) + int32_t out_fence_fd) { struct drm_vmw_fence_rep fence_rep; if (user_fence_rep == NULL) - return; + return 0; memset(&fence_rep, 0, sizeof(fence_rep)); @@ -3917,19 +3917,13 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, * handle. */ if (unlikely(ret != 0) && (fence_rep.error == 0)) { - if (sync_file) - fput(sync_file->file); - - if (fence_rep.fd != -1) { - put_unused_fd(fence_rep.fd); - fence_rep.fd = -1; - } - ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle); VMW_DEBUG_USER("Fence copy error. Syncing.\n"); (void) vmw_fence_obj_wait(fence, false, false, VMW_FENCE_WAIT_TIMEOUT); } + + return ret ? -EFAULT : 0; } /** @@ -4266,16 +4260,23 @@ int vmw_execbuf_process(struct drm_file *file_priv, (void) vmw_fence_obj_wait(fence, false, false, VMW_FENCE_WAIT_TIMEOUT); + } + } + + ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, + user_fence_rep, fence, handle, out_fence_fd); + + if (sync_file) { + if (ret) { + /* usercopy of fence failed, put the file object */ + fput(sync_file->file); + put_unused_fd(out_fence_fd); } else { /* Link the fence with the FD created earlier */ fd_install(out_fence_fd, sync_file->file); } } - vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, - user_fence_rep, fence, handle, out_fence_fd, - sync_file); - /* Don't unreference when handing fence out */ if (unlikely(out_fence != NULL)) { *out_fence = fence; @@ -4293,7 +4294,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, */ vmw_validation_unref_lists(&val_ctx); - return 0; + return ret; out_unlock_binding: mutex_unlock(&dev_priv->binding_mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 430f83a1847c..59d6a2dd4c2e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -1082,7 +1082,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, } vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, - handle, -1, NULL); + handle, -1); vmw_fence_obj_unreference(&fence); return 0; out_no_create: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 4e693e8de2c3..bbd2f4ec08ec 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2501,7 +2501,7 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, if (file_priv) vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, user_fence_rep, fence, - handle, -1, NULL); + handle, -1); if (out_fence) *out_fence = fence; else diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c index ff2b308d8651..11c409cbc88e 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_disp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c @@ -24,6 +24,7 @@ #include <linux/clk.h> #include <linux/delay.h> +#include <linux/dma/xilinx_dpdma.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/module.h> @@ -1058,14 +1059,18 @@ static void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer, zynqmp_disp_avbuf_set_format(layer->disp, layer, layer->disp_fmt); /* - * Set slave_id for each DMA channel to indicate they're part of a + * Set pconfig for each DMA channel to indicate they're part of a * video group. */ for (i = 0; i < info->num_planes; i++) { struct zynqmp_disp_layer_dma *dma = &layer->dmas[i]; + struct xilinx_dpdma_peripheral_config pconfig = { + .video_group = true, + }; struct dma_slave_config config = { .direction = DMA_MEM_TO_DEV, - .slave_id = 1, + .peripheral_config = &pconfig, + .peripheral_size = sizeof(pconfig), }; dmaengine_slave_config(dma->chan, &config); |