diff options
Diffstat (limited to 'drivers/gpu')
249 files changed, 9978 insertions, 4306 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index bd943a71756c..5e1bc630b885 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -173,6 +173,12 @@ config DRM_KMS_CMA_HELPER help Choose this if you need the KMS CMA helper functions +config DRM_GEM_SHMEM_HELPER + bool + depends on DRM + help + Choose this if you need the GEM shmem helper functions + config DRM_VM bool depends on DRM && MMU @@ -329,6 +335,8 @@ source "drivers/gpu/drm/tve200/Kconfig" source "drivers/gpu/drm/xen/Kconfig" +source "drivers/gpu/drm/vboxvideo/Kconfig" + # Keep legacy drivers last menuconfig DRM_LEGACY diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 1ac55c65eac0..e630eccb951c 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -25,6 +25,7 @@ drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_DRM_VM) += drm_vm.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o +drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o drm-$(CONFIG_PCI) += ati_pcigart.o drm-$(CONFIG_DRM_PANEL) += drm_panel.o drm-$(CONFIG_OF) += drm_of.o @@ -109,3 +110,4 @@ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/ obj-$(CONFIG_DRM_PL111) += pl111/ obj-$(CONFIG_DRM_TVE200) += tve200/ obj-$(CONFIG_DRM_XEN) += xen/ +obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 7419ea8a388b..8a0732088640 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -974,6 +974,7 @@ amdgpu_pci_remove(struct pci_dev *pdev) DRM_ERROR("Device removal is currently not supported outside of fbcon\n"); drm_dev_unplug(dev); + drm_dev_put(dev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ead851413c0a..16fcb56c232b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -700,6 +700,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_vm_bo_base *bo_base, *tmp; int r = 0; + vm->bulk_moveable &= list_empty(&vm->evicted); + list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { struct amdgpu_bo *bo = bo_base->bo; @@ -947,10 +949,6 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, if (r) return r; - r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats); - if (r) - goto error_free_pt; - if (vm->use_cpu_for_update) { r = amdgpu_bo_kmap(pt, NULL); if (r) @@ -963,6 +961,10 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, pt->parent = amdgpu_bo_ref(cursor.parent->base.bo); amdgpu_vm_bo_base_init(&entry->base, vm, pt); + + r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats); + if (r) + goto error_free_pt; } return 0; @@ -3033,13 +3035,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (r) goto error_unreserve; + amdgpu_vm_bo_base_init(&vm->root.base, vm, root); + r = amdgpu_vm_clear_bo(adev, vm, root, adev->vm_manager.root_level, vm->pte_support_ats); if (r) goto error_unreserve; - amdgpu_vm_bo_base_init(&vm->root.base, vm, root); amdgpu_bo_unreserve(vm->root.base.bo); if (pasid) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 5533f6e4f4a4..d0309e8c9d12 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -220,6 +220,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] = static const struct soc15_reg_golden golden_settings_gc_9_x_common[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382) }; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 9fc3296592fe..98fd9208877f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -886,7 +886,7 @@ static int gmc_v6_0_sw_init(void *handle) pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n"); } - adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); + adev->need_swiotlb = drm_need_swiotlb(dma_bits); r = gmc_v6_0_init_microcode(adev); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 761dcfb2fec0..3e9c5034febe 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1030,7 +1030,7 @@ static int gmc_v7_0_sw_init(void *handle) pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); pr_warn("amdgpu: No coherent DMA available\n"); } - adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); + adev->need_swiotlb = drm_need_swiotlb(dma_bits); r = gmc_v7_0_init_microcode(adev); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 34440672f938..29dde64bf2e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1155,7 +1155,7 @@ static int gmc_v8_0_sw_init(void *handle) pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); pr_warn("amdgpu: No coherent DMA available\n"); } - adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); + adev->need_swiotlb = drm_need_swiotlb(dma_bits); r = gmc_v8_0_init_microcode(adev); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 600259b4e291..53327498efbf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -742,7 +742,7 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev) } ring->vm_inv_eng = inv_eng - 1; - change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub])); + vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng); dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", ring->name, ring->vm_inv_eng, ring->funcs->vmhub); @@ -1011,7 +1011,7 @@ static int gmc_v9_0_sw_init(void *handle) pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); printk(KERN_WARNING "amdgpu: No coherent DMA available.\n"); } - adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); + adev->need_swiotlb = drm_need_swiotlb(dma_bits); if (adev->gmc.xgmi.supported) { r = gfxhub_v1_1_get_xgmi_info(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index c63de945c021..0487e3a4e9e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -500,9 +500,7 @@ static bool psp_v3_1_smu_reload_quirk(struct psp_context *psp) struct amdgpu_device *adev = psp->adev; uint32_t reg; - reg = smnMP1_FIRMWARE_FLAGS | 0x03b00000; - WREG32_SOC15(NBIO, 0, mmPCIE_INDEX2, reg); - reg = RREG32_SOC15(NBIO, 0, mmPCIE_DATA2); + reg = RREG32_PCIE(smnMP1_FIRMWARE_FLAGS | 0x03b00000); return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 99ebcf29dcb0..ed89a101f73f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -461,7 +461,6 @@ static int soc15_asic_reset(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA10: - case CHIP_VEGA20: soc15_asic_get_baco_capability(adev, &baco_reset); break; default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 47243165a082..ae90a99909ef 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -323,57 +323,7 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) { - uint64_t addr; - struct cik_mqd *m; - int retval; - - retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd), - mqd_mem_obj); - - if (retval != 0) - return -ENOMEM; - - m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr; - addr = (*mqd_mem_obj)->gpu_addr; - - memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256)); - - m->header = 0xC0310800; - m->compute_pipelinestat_enable = 1; - m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; - - m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE | - PRELOAD_REQ; - m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS | - QUANTUM_DURATION(10); - - m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN; - m->cp_mqd_base_addr_lo = lower_32_bits(addr); - m->cp_mqd_base_addr_hi = upper_32_bits(addr); - - m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE; - - /* - * Pipe Priority - * Identifies the pipe relative priority when this queue is connected - * to the pipeline. The pipe priority is against the GFX pipe and HP3D. - * In KFD we are using a fixed pipe priority set to CS_MEDIUM. - * 0 = CS_LOW (typically below GFX) - * 1 = CS_MEDIUM (typically between HP3D and GFX - * 2 = CS_HIGH (typically above HP3D) - */ - m->cp_hqd_pipe_priority = 1; - m->cp_hqd_queue_priority = 15; - - *mqd = m; - if (gart_addr) - *gart_addr = addr; - retval = mm->update_mqd(mm, m, q); - - return retval; + return init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); } static int update_mqd_hiq(struct mqd_manager *mm, void *mqd, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 2f26581b93ff..fb27783d7a54 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -886,6 +886,7 @@ static void emulated_link_detect(struct dc_link *link) return; } + /* dc_sink_create returns a new reference */ link->local_sink = sink; edid_status = dm_helpers_read_local_edid( @@ -952,6 +953,8 @@ static int dm_resume(void *handle) if (aconnector->fake_enable && aconnector->dc_link->local_sink) aconnector->fake_enable = false; + if (aconnector->dc_sink) + dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; amdgpu_dm_update_connector_after_detect(aconnector); mutex_unlock(&aconnector->hpd_lock); @@ -1061,6 +1064,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) sink = aconnector->dc_link->local_sink; + if (sink) + dc_sink_retain(sink); /* * Edid mgmt connector gets first update only in mode_valid hook and then @@ -1085,21 +1090,24 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) * to it anymore after disconnect, so on next crtc to connector * reshuffle by UMD we will get into unwanted dc_sink release */ - if (aconnector->dc_sink != aconnector->dc_em_sink) - dc_sink_release(aconnector->dc_sink); + dc_sink_release(aconnector->dc_sink); } aconnector->dc_sink = sink; + dc_sink_retain(aconnector->dc_sink); amdgpu_dm_update_freesync_caps(connector, aconnector->edid); } else { amdgpu_dm_update_freesync_caps(connector, NULL); - if (!aconnector->dc_sink) + if (!aconnector->dc_sink) { aconnector->dc_sink = aconnector->dc_em_sink; - else if (aconnector->dc_sink != aconnector->dc_em_sink) dc_sink_retain(aconnector->dc_sink); + } } mutex_unlock(&dev->mode_config.mutex); + + if (sink) + dc_sink_release(sink); return; } @@ -1107,8 +1115,10 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) * TODO: temporary guard to look for proper fix * if this sink is MST sink, we should not do anything */ - if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + dc_sink_release(sink); return; + } if (aconnector->dc_sink == sink) { /* @@ -1117,6 +1127,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) */ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", aconnector->connector_id); + if (sink) + dc_sink_release(sink); return; } @@ -1138,6 +1150,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) amdgpu_dm_update_freesync_caps(connector, NULL); aconnector->dc_sink = sink; + dc_sink_retain(aconnector->dc_sink); if (sink->dc_edid.length == 0) { aconnector->edid = NULL; drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); @@ -1158,11 +1171,15 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) amdgpu_dm_update_freesync_caps(connector, NULL); drm_connector_update_edid_property(connector, NULL); aconnector->num_modes = 0; + dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; aconnector->edid = NULL; } mutex_unlock(&dev->mode_config.mutex); + + if (sink) + dc_sink_release(sink); } static void handle_hpd_irq(void *param) @@ -2977,6 +2994,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, return stream; } else { sink = aconnector->dc_sink; + dc_sink_retain(sink); } stream = dc_create_stream_for_sink(sink); @@ -3042,8 +3060,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, update_stream_signal(stream, sink); finish: - if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON) - dc_sink_release(sink); + dc_sink_release(sink); return stream; } @@ -3301,6 +3318,14 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) dm->backlight_dev = NULL; } #endif + + if (aconnector->dc_em_sink) + dc_sink_release(aconnector->dc_em_sink); + aconnector->dc_em_sink = NULL; + if (aconnector->dc_sink) + dc_sink_release(aconnector->dc_sink); + aconnector->dc_sink = NULL; + drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); drm_connector_unregister(connector); drm_connector_cleanup(connector); @@ -3398,10 +3423,12 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) (edid->extensions + 1) * EDID_LENGTH, &init_params); - if (aconnector->base.force == DRM_FORCE_ON) + if (aconnector->base.force == DRM_FORCE_ON) { aconnector->dc_sink = aconnector->dc_link->local_sink ? aconnector->dc_link->local_sink : aconnector->dc_em_sink; + dc_sink_retain(aconnector->dc_sink); + } } static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index f51d52eb52e6..c4ea3a91f17a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -191,6 +191,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) &init_params); dc_sink->priv = aconnector; + /* dc_link_add_remote_sink returns a new reference */ aconnector->dc_sink = dc_sink; if (aconnector->dc_sink) diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 12d1842079ae..eb62d10bb65c 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -1348,12 +1348,12 @@ void dcn_bw_update_from_pplib(struct dc *dc) struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0}; bool res; - kernel_fpu_begin(); - /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */ res = dm_pp_get_clock_levels_by_type_with_voltage( ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks); + kernel_fpu_begin(); + if (res) res = verify_clock_values(&fclks); @@ -1372,9 +1372,13 @@ void dcn_bw_update_from_pplib(struct dc *dc) } else BREAK_TO_DEBUGGER(); + kernel_fpu_end(); + res = dm_pp_get_clock_levels_by_type_with_voltage( ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks); + kernel_fpu_begin(); + if (res) res = verify_clock_values(&dcfclks); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 7f5a947ad31d..4eba3c4800b6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -794,6 +794,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock; sink->converter_disable_audio = converter_disable_audio; + /* dc_sink_create returns a new reference */ link->local_sink = sink; edid_status = dm_helpers_read_local_edid( @@ -2037,6 +2038,9 @@ static enum dc_status enable_link( break; } + if (status == DC_OK) + pipe_ctx->stream->link->link_status.link_active = true; + return status; } @@ -2060,6 +2064,14 @@ static void disable_link(struct dc_link *link, enum signal_type signal) dp_disable_link_phy_mst(link, signal); } else link->link_enc->funcs->disable_output(link->link_enc, signal); + + if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + /* MST disable link only when no stream use the link */ + if (link->mst_stream_alloc_table.stream_count <= 0) + link->link_status.link_active = false; + } else { + link->link_status.link_active = false; + } } static bool dp_active_dongle_validate_timing( @@ -2623,8 +2635,6 @@ void core_link_enable_stream( } } - stream->link->link_status.link_active = true; - core_dc->hwss.enable_audio_stream(pipe_ctx); /* turn off otg test pattern if enable */ @@ -2659,8 +2669,6 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) core_dc->hwss.disable_stream(pipe_ctx, option); disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); - - pipe_ctx->stream->link->link_status.link_active = false; } void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 94a84bc57c7a..bfd27f10879e 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -724,7 +724,7 @@ static void build_vrr_infopacket_v1(enum signal_type signal, static void build_vrr_infopacket_v2(enum signal_type signal, const struct mod_vrr_params *vrr, - const enum color_transfer_func *app_tf, + enum color_transfer_func app_tf, struct dc_info_packet *infopacket) { unsigned int payload_size = 0; @@ -732,8 +732,7 @@ static void build_vrr_infopacket_v2(enum signal_type signal, build_vrr_infopacket_header_v2(signal, infopacket, &payload_size); build_vrr_infopacket_data(vrr, infopacket); - if (app_tf != NULL) - build_vrr_infopacket_fs2_data(*app_tf, infopacket); + build_vrr_infopacket_fs2_data(app_tf, infopacket); build_vrr_infopacket_checksum(&payload_size, infopacket); @@ -757,7 +756,7 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, const struct dc_stream_state *stream, const struct mod_vrr_params *vrr, enum vrr_packet_type packet_type, - const enum color_transfer_func *app_tf, + enum color_transfer_func app_tf, struct dc_info_packet *infopacket) { /* SPD info packet for FreeSync diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index 4222e403b151..dcef85994c45 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -145,7 +145,7 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, const struct dc_stream_state *stream, const struct mod_vrr_params *vrr, enum vrr_packet_type packet_type, - const enum color_transfer_func *app_tf, + enum color_transfer_func app_tf, struct dc_info_packet *infopacket); void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c index ce177d7f04cb..6bf48934fdc4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c @@ -277,8 +277,7 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set if (!skip_display_settings) phm_notify_smc_display_config_after_ps_adjustment(hwmgr); - if ((hwmgr->request_dpm_level != hwmgr->dpm_level) && - !phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level)) + if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level)) hwmgr->dpm_level = hwmgr->request_dpm_level; if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c index 4588bddf8b33..615cf2c09e54 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c @@ -489,15 +489,16 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr, } int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, - uint8_t id, uint32_t *frequency) + uint8_t clk_id, uint8_t syspll_id, + uint32_t *frequency) { struct amdgpu_device *adev = hwmgr->adev; struct atom_get_smu_clock_info_parameters_v3_1 parameters; struct atom_get_smu_clock_info_output_parameters_v3_1 *output; uint32_t ix; - parameters.clk_id = id; - parameters.syspll_id = 0; + parameters.clk_id = clk_id; + parameters.syspll_id = syspll_id; parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; parameters.dfsdid = 0; @@ -530,20 +531,23 @@ static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr, boot_values->ulSocClk = 0; boot_values->ulDCEFClk = 0; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, SMU11_SYSPLL0_ID, &frequency)) boot_values->ulSocClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, SMU11_SYSPLL0_ID, &frequency)) boot_values->ulDCEFClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, SMU11_SYSPLL0_ID, &frequency)) boot_values->ulEClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, SMU11_SYSPLL0_ID, &frequency)) boot_values->ulVClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, SMU11_SYSPLL0_ID, &frequency)) boot_values->ulDClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL1_0_FCLK_ID, SMU11_SYSPLL1_2_ID, &frequency)) + boot_values->ulFClk = frequency; } static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr, @@ -563,19 +567,19 @@ static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr, boot_values->ulSocClk = 0; boot_values->ulDCEFClk = 0; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, 0, &frequency)) boot_values->ulSocClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, 0, &frequency)) boot_values->ulDCEFClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, 0, &frequency)) boot_values->ulEClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, 0, &frequency)) boot_values->ulVClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, 0, &frequency)) boot_values->ulDClk = frequency; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h index fe9e8ceef50e..b7e2651b570b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h @@ -139,6 +139,7 @@ struct pp_atomfwctrl_bios_boot_up_values { uint32_t ulEClk; uint32_t ulVClk; uint32_t ulDClk; + uint32_t ulFClk; uint16_t usVddc; uint16_t usVddci; uint16_t usMvddc; @@ -236,7 +237,8 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr, struct pp_atomfwctrl_smc_dpm_parameters *param); int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, - uint8_t id, uint32_t *frequency); + uint8_t clk_id, uint8_t syspll_id, + uint32_t *frequency); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 48187acac59e..83d3d935f3ac 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3491,14 +3491,14 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixSMU_PM_STATUS_94, 0); + ixSMU_PM_STATUS_95, 0); for (i = 0; i < 10; i++) { - mdelay(1); + mdelay(500); smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample); tmp = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixSMU_PM_STATUS_94); + ixSMU_PM_STATUS_95); if (tmp != 0) break; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 5479125ff4f6..5c4f701939ea 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -2575,10 +2575,10 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk; data->vbios_boot_state.mem_clock = boot_up_values.ulUClk; pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, - SMU9_SYSPLL0_SOCCLK_ID, &boot_up_values.ulSocClk); + SMU9_SYSPLL0_SOCCLK_ID, 0, &boot_up_values.ulSocClk); pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, - SMU9_SYSPLL0_DCEFCLK_ID, &boot_up_values.ulDCEFClk); + SMU9_SYSPLL0_DCEFCLK_ID, 0, &boot_up_values.ulDCEFClk); data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; @@ -4407,9 +4407,9 @@ static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe return ret; features_to_disable = - (features_enabled ^ new_ppfeature_masks) & features_enabled; + features_enabled & ~new_ppfeature_masks; features_to_enable = - (features_enabled ^ new_ppfeature_masks) ^ features_to_disable; + ~features_enabled & new_ppfeature_masks; pr_debug("features_to_disable 0x%llx\n", features_to_disable); pr_debug("features_to_enable 0x%llx\n", features_to_enable); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 6c8e78611c03..bdb48e94eff6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -2009,9 +2009,9 @@ static int vega12_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe return ret; features_to_disable = - (features_enabled ^ new_ppfeature_masks) & features_enabled; + features_enabled & ~new_ppfeature_masks; features_to_enable = - (features_enabled ^ new_ppfeature_masks) ^ features_to_disable; + ~features_enabled & new_ppfeature_masks; pr_debug("features_to_disable 0x%llx\n", features_to_disable); pr_debug("features_to_enable 0x%llx\n", features_to_enable); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index aad79affb081..9aa7bec1b5fe 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -463,9 +463,9 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr) static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state) { dpm_state->soft_min_level = 0x0; - dpm_state->soft_max_level = 0xffff; + dpm_state->soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_state->hard_min_level = 0x0; - dpm_state->hard_max_level = 0xffff; + dpm_state->hard_max_level = VG20_CLOCK_MAX_DEFAULT; } static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr, @@ -711,8 +711,10 @@ static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!ret, "[SetupDefaultDpmTable] failed to get fclk dpm levels!", return ret); - } else - dpm_table->count = 0; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.fclock / 100; + } vega20_init_dpm_state(&(dpm_table->dpm_state)); /* save a copy of the default DPM table */ @@ -754,6 +756,7 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr) data->vbios_boot_state.eclock = boot_up_values.ulEClk; data->vbios_boot_state.vclock = boot_up_values.ulVClk; data->vbios_boot_state.dclock = boot_up_values.ulDClk; + data->vbios_boot_state.fclock = boot_up_values.ulFClk; data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID; smum_send_msg_to_smc_with_parameter(hwmgr, @@ -780,6 +783,8 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr) static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg; int ret; @@ -816,6 +821,10 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) "[OverridePcieParameters] Attempt to override pcie params failed!", return ret); + data->pcie_parameters_override = 1; + data->pcie_gen_level1 = pcie_gen; + data->pcie_width_level1 = pcie_width; + return 0; } @@ -979,6 +988,8 @@ static int vega20_od8_set_feature_capabilities( } if (data->smu_features[GNLD_DPM_UCLK].enabled) { + pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] = + data->dpm_table.mem_table.dpm_levels[data->dpm_table.mem_table.count - 2].value; if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] && pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 && pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 && @@ -2314,32 +2325,8 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr) static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) { - struct vega20_hwmgr *data = - (struct vega20_hwmgr *)(hwmgr->backend); - uint32_t soft_min_level, soft_max_level; int ret = 0; - soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); - soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table)); - data->dpm_table.gfx_table.dpm_state.soft_min_level = - data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; - data->dpm_table.gfx_table.dpm_state.soft_max_level = - data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; - - soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table)); - soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table)); - data->dpm_table.mem_table.dpm_state.soft_min_level = - data->dpm_table.mem_table.dpm_levels[soft_min_level].value; - data->dpm_table.mem_table.dpm_state.soft_max_level = - data->dpm_table.mem_table.dpm_levels[soft_max_level].value; - - soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table)); - soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table)); - data->dpm_table.soc_table.dpm_state.soft_min_level = - data->dpm_table.soc_table.dpm_levels[soft_min_level].value; - data->dpm_table.soc_table.dpm_state.soft_max_level = - data->dpm_table.soc_table.dpm_levels[soft_max_level].value; - ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); PP_ASSERT_WITH_CODE(!ret, "Failed to upload DPM Bootup Levels!", @@ -2641,9 +2628,8 @@ static int vega20_get_sclks(struct pp_hwmgr *hwmgr, struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table); int i, count; - PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled, - "[GetSclks]: gfxclk dpm not enabled!\n", - return -EPERM); + if (!data->smu_features[GNLD_DPM_GFXCLK].enabled) + return -1; count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; clocks->num_levels = count; @@ -2670,9 +2656,8 @@ static int vega20_get_memclocks(struct pp_hwmgr *hwmgr, struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table); int i, count; - PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled, - "[GetMclks]: uclk dpm not enabled!\n", - return -EPERM); + if (!data->smu_features[GNLD_DPM_UCLK].enabled) + return -1; count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; clocks->num_levels = data->mclk_latency_table.count = count; @@ -2696,9 +2681,8 @@ static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr, struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table); int i, count; - PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_DCEFCLK].enabled, - "[GetDcfclocks]: dcefclk dpm not enabled!\n", - return -EPERM); + if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled) + return -1; count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; clocks->num_levels = count; @@ -2719,9 +2703,8 @@ static int vega20_get_socclocks(struct pp_hwmgr *hwmgr, struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table); int i, count; - PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_SOCCLK].enabled, - "[GetSocclks]: socclk dpm not enabled!\n", - return -EPERM); + if (!data->smu_features[GNLD_DPM_SOCCLK].enabled) + return -1; count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; clocks->num_levels = count; @@ -2799,7 +2782,6 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, data->od8_settings.od8_settings_array; OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table); - struct pp_clock_levels_with_latency clocks; int32_t input_index, input_clk, input_vol, i; int od8_id; int ret; @@ -2858,11 +2840,6 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, return -EOPNOTSUPP; } - ret = vega20_get_memclocks(hwmgr, &clocks); - PP_ASSERT_WITH_CODE(!ret, - "Attempt to get memory clk levels failed!", - return ret); - for (i = 0; i < size; i += 2) { if (i + 2 > size) { pr_info("invalid number of input parameters %d\n", @@ -2879,11 +2856,11 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, return -EINVAL; } - if (input_clk < clocks.data[0].clocks_in_khz / 1000 || + if (input_clk < od8_settings[OD8_SETTING_UCLK_FMAX].min_value || input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) { pr_info("clock freq %d is not within allowed range [%d - %d]\n", input_clk, - clocks.data[0].clocks_in_khz / 1000, + od8_settings[OD8_SETTING_UCLK_FMAX].min_value, od8_settings[OD8_SETTING_UCLK_FMAX].max_value); return -EINVAL; } @@ -3088,9 +3065,9 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe return ret; features_to_disable = - (features_enabled ^ new_ppfeature_masks) & features_enabled; + features_enabled & ~new_ppfeature_masks; features_to_enable = - (features_enabled ^ new_ppfeature_masks) ^ features_to_disable; + ~features_enabled & new_ppfeature_masks; pr_debug("features_to_disable 0x%llx\n", features_to_disable); pr_debug("features_to_enable 0x%llx\n", features_to_enable); @@ -3128,7 +3105,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, &(data->dpm_table.fclk_table); int i, now, size = 0; int ret = 0; - uint32_t gen_speed, lane_width; + uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width; switch (type) { case PP_SCLK: @@ -3137,10 +3114,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get current gfx clk Failed!", return ret); - ret = vega20_get_sclks(hwmgr, &clocks); - PP_ASSERT_WITH_CODE(!ret, - "Attempt to get gfx clk levels Failed!", - return ret); + if (vega20_get_sclks(hwmgr, &clocks)) { + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", + now / 100); + break; + } for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -3154,10 +3132,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get current mclk freq Failed!", return ret); - ret = vega20_get_memclocks(hwmgr, &clocks); - PP_ASSERT_WITH_CODE(!ret, - "Attempt to get memory clk levels Failed!", - return ret); + if (vega20_get_memclocks(hwmgr, &clocks)) { + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", + now / 100); + break; + } for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -3171,10 +3150,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get current socclk freq Failed!", return ret); - ret = vega20_get_socclocks(hwmgr, &clocks); - PP_ASSERT_WITH_CODE(!ret, - "Attempt to get soc clk levels Failed!", - return ret); + if (vega20_get_socclocks(hwmgr, &clocks)) { + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", + now / 100); + break; + } for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -3200,10 +3180,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get current dcefclk freq Failed!", return ret); - ret = vega20_get_dcefclocks(hwmgr, &clocks); - PP_ASSERT_WITH_CODE(!ret, - "Attempt to get dcefclk levels Failed!", - return ret); + if (vega20_get_dcefclocks(hwmgr, &clocks)) { + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", + now / 100); + break; + } for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -3212,28 +3193,36 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, break; case PP_PCIE: - gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & + current_gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; - lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & + current_lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; - for (i = 0; i < NUM_LINK_LEVELS; i++) + for (i = 0; i < NUM_LINK_LEVELS; i++) { + if (i == 1 && data->pcie_parameters_override) { + gen_speed = data->pcie_gen_level1; + lane_width = data->pcie_width_level1; + } else { + gen_speed = pptable->PcieGenSpeed[i]; + lane_width = pptable->PcieLaneCount[i]; + } size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, - (pptable->PcieGenSpeed[i] == 0) ? "2.5GT/s," : - (pptable->PcieGenSpeed[i] == 1) ? "5.0GT/s," : - (pptable->PcieGenSpeed[i] == 2) ? "8.0GT/s," : - (pptable->PcieGenSpeed[i] == 3) ? "16.0GT/s," : "", - (pptable->PcieLaneCount[i] == 1) ? "x1" : - (pptable->PcieLaneCount[i] == 2) ? "x2" : - (pptable->PcieLaneCount[i] == 3) ? "x4" : - (pptable->PcieLaneCount[i] == 4) ? "x8" : - (pptable->PcieLaneCount[i] == 5) ? "x12" : - (pptable->PcieLaneCount[i] == 6) ? "x16" : "", + (gen_speed == 0) ? "2.5GT/s," : + (gen_speed == 1) ? "5.0GT/s," : + (gen_speed == 2) ? "8.0GT/s," : + (gen_speed == 3) ? "16.0GT/s," : "", + (lane_width == 1) ? "x1" : + (lane_width == 2) ? "x2" : + (lane_width == 3) ? "x4" : + (lane_width == 4) ? "x8" : + (lane_width == 5) ? "x12" : + (lane_width == 6) ? "x16" : "", pptable->LclkFreq[i], - (gen_speed == pptable->PcieGenSpeed[i]) && - (lane_width == pptable->PcieLaneCount[i]) ? + (current_gen_speed == gen_speed) && + (current_lane_width == lane_width) ? "*" : ""); + } break; case OD_SCLK: @@ -3288,13 +3277,8 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, } if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { - ret = vega20_get_memclocks(hwmgr, &clocks); - PP_ASSERT_WITH_CODE(!ret, - "Fail to get memory clk levels!", - return ret); - size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", - clocks.data[0].clocks_in_khz / 1000, + od8_settings[OD8_SETTING_UCLK_FMAX].min_value, od8_settings[OD8_SETTING_UCLK_FMAX].max_value); } @@ -3356,6 +3340,31 @@ static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr, return ret; } +static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.fclk_table); + int ret = 0; + + if (data->smu_features[GNLD_DPM_FCLK].enabled) { + PP_ASSERT_WITH_CODE(dpm_table->count > 0, + "[SetFclkToHightestDpmLevel] Dpm table has no entry!", + return -EINVAL); + PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_FCLK_DPM_LEVELS, + "[SetFclkToHightestDpmLevel] Dpm table has too many entries!", + return -EINVAL); + + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level)), + "[SetFclkToHightestDpmLevel] Set soft min fclk failed!", + return ret); + } + + return ret; +} + static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); @@ -3366,8 +3375,10 @@ static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr) ret = vega20_set_uclk_to_highest_dpm_level(hwmgr, &data->dpm_table.mem_table); + if (ret) + return ret; - return ret; + return vega20_set_fclk_to_highest_dpm_level(hwmgr); } static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr) @@ -3461,9 +3472,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) /* gfxclk */ dpm_table = &(data->dpm_table.gfx_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; if (PP_CAP(PHM_PlatformCaps_UMDPState)) { if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) { @@ -3485,9 +3496,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) /* memclk */ dpm_table = &(data->dpm_table.mem_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; if (PP_CAP(PHM_PlatformCaps_UMDPState)) { if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) { @@ -3526,12 +3537,21 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) if (hwmgr->display_config->nb_pstate_switch_disable) dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + /* fclk */ + dpm_table = &(data->dpm_table.fclk_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; + if (hwmgr->display_config->nb_pstate_switch_disable) + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + /* vclk */ dpm_table = &(data->dpm_table.vclk_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; if (PP_CAP(PHM_PlatformCaps_UMDPState)) { if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { @@ -3548,9 +3568,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) /* dclk */ dpm_table = &(data->dpm_table.dclk_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; if (PP_CAP(PHM_PlatformCaps_UMDPState)) { if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { @@ -3567,9 +3587,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) /* socclk */ dpm_table = &(data->dpm_table.soc_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; if (PP_CAP(PHM_PlatformCaps_UMDPState)) { if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) { @@ -3586,9 +3606,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) /* eclk */ dpm_table = &(data->dpm_table.eclk_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; if (PP_CAP(PHM_PlatformCaps_UMDPState)) { if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h index 37f5f5e657da..a5bc758ae097 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h @@ -42,6 +42,8 @@ #define AVFS_CURVE 0 #define OD8_HOTCURVE_TEMPERATURE 85 +#define VG20_CLOCK_MAX_DEFAULT 0xFFFF + typedef uint32_t PP_Clock; enum { @@ -219,6 +221,7 @@ struct vega20_vbios_boot_state { uint32_t eclock; uint32_t dclock; uint32_t vclock; + uint32_t fclock; }; #define DPMTABLE_OD_UPDATE_SCLK 0x00000001 @@ -523,6 +526,10 @@ struct vega20_hwmgr { unsigned long metrics_time; SmuMetrics_t metrics_table; + + bool pcie_parameters_override; + uint32_t pcie_gen_level1; + uint32_t pcie_width_level1; }; #define VEGA20_DPM2_NEAR_TDP_DEC 10 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c index 97f8a1a970c3..7a7f15d0c53a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c @@ -32,6 +32,8 @@ #include "cgs_common.h" #include "vega20_pptable.h" +#define VEGA20_FAN_TARGET_TEMPERATURE_OVERRIDE 105 + static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, enum phm_platform_caps cap) { @@ -798,6 +800,17 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable return 0; } +static int override_powerplay_table_fantargettemperature(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; + PPTable_t *ppsmc_pptable = (PPTable_t *)(pptable_information->smc_pptable); + + ppsmc_pptable->FanTargetTemperature = VEGA20_FAN_TARGET_TEMPERATURE_OVERRIDE; + + return 0; +} + #define VEGA20_ENGINECLOCK_HARDMAX 198000 static int init_powerplay_table_information( struct pp_hwmgr *hwmgr, @@ -887,6 +900,10 @@ static int init_powerplay_table_information( result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable)); + if (result) + return result; + + result = override_powerplay_table_fantargettemperature(hwmgr); return result; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 52abca065764..2d4cfe14f72e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -2330,6 +2330,7 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member) case DRAM_LOG_BUFF_SIZE: return offsetof(SMU74_SoftRegisters, DRAM_LOG_BUFF_SIZE); } + break; case SMU_Discrete_DpmTable: switch (member) { case UvdBootLevel: @@ -2339,6 +2340,7 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member) case LowSclkInterruptThreshold: return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold); } + break; } pr_warn("can't get the offset of type %x member %x\n", type, member); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c index 079fc8e8f709..742b3dc1f6cb 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c @@ -40,10 +40,8 @@ bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr) struct amdgpu_device *adev = hwmgr->adev; uint32_t mp1_fw_flags; - WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2, - (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); - - mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2); + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) return true; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c index b7ff7d4d6f44..ba00744c3413 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c @@ -49,10 +49,8 @@ static bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr) struct amdgpu_device *adev = hwmgr->adev; uint32_t mp1_fw_flags; - WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2, - (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); - - mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2); + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index ab50ad06e271..21725c9b9f5e 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -264,37 +264,17 @@ static bool malidp_verify_afbc_framebuffer_caps(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd) { - const struct drm_format_info *info; - - if ((mode_cmd->modifier[0] >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) { - DRM_DEBUG_KMS("Unknown modifier (not Arm)\n"); - return false; - } - - if (mode_cmd->modifier[0] & - ~DRM_FORMAT_MOD_ARM_AFBC(AFBC_MOD_VALID_BITS)) { - DRM_DEBUG_KMS("Unsupported modifiers\n"); - return false; - } - - info = drm_get_format_info(dev, mode_cmd); - if (!info) { - DRM_DEBUG_KMS("Unable to get the format information\n"); + if (malidp_format_mod_supported(dev, mode_cmd->pixel_format, + mode_cmd->modifier[0]) == false) return false; - } - - if (info->num_planes != 1) { - DRM_DEBUG_KMS("AFBC buffers expect one plane\n"); - return false; - } if (mode_cmd->offsets[0] != 0) { DRM_DEBUG_KMS("AFBC buffers' plane offset should be 0\n"); return false; } - switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) { - case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16: + switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) { + case AFBC_SIZE_16X16: if ((mode_cmd->width % 16) || (mode_cmd->height % 16)) { DRM_DEBUG_KMS("AFBC buffers must be aligned to 16 pixels\n"); return false; @@ -318,9 +298,10 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev, struct drm_gem_object *objs = NULL; u32 afbc_superblock_size = 0, afbc_superblock_height = 0; u32 afbc_superblock_width = 0, afbc_size = 0; + int bpp = 0; - switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) { - case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16: + switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) { + case AFBC_SIZE_16X16: afbc_superblock_height = 16; afbc_superblock_width = 16; break; @@ -334,15 +315,19 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev, n_superblocks = (mode_cmd->width / afbc_superblock_width) * (mode_cmd->height / afbc_superblock_height); - afbc_superblock_size = info->cpp[0] * afbc_superblock_width * - afbc_superblock_height; + bpp = malidp_format_get_bpp(info->format); + + afbc_superblock_size = (bpp * afbc_superblock_width * afbc_superblock_height) + / BITS_PER_BYTE; afbc_size = ALIGN(n_superblocks * AFBC_HEADER_SIZE, AFBC_SUPERBLK_ALIGNMENT); afbc_size += n_superblocks * ALIGN(afbc_superblock_size, AFBC_SUPERBLK_ALIGNMENT); - if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) { - DRM_DEBUG_KMS("Invalid value of pitch (=%u) should be same as width (=%u) * cpp (=%u)\n", - mode_cmd->pitches[0], mode_cmd->width, info->cpp[0]); + if ((mode_cmd->width * bpp) != (mode_cmd->pitches[0] * BITS_PER_BYTE)) { + DRM_DEBUG_KMS("Invalid value of (pitch * BITS_PER_BYTE) (=%u) " + "should be same as width (=%u) * bpp (=%u)\n", + (mode_cmd->pitches[0] * BITS_PER_BYTE), + mode_cmd->width, bpp); return false; } @@ -406,6 +391,7 @@ static int malidp_init(struct drm_device *drm) drm->mode_config.max_height = hwdev->max_line_size; drm->mode_config.funcs = &malidp_mode_config_funcs; drm->mode_config.helper_private = &malidp_mode_config_helpers; + drm->mode_config.allow_fb_modifiers = true; ret = malidp_crtc_init(drm); if (ret) diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h index b76c86f18a56..019a682b2716 100644 --- a/drivers/gpu/drm/arm/malidp_drv.h +++ b/drivers/gpu/drm/arm/malidp_drv.h @@ -90,6 +90,12 @@ struct malidp_crtc_state { int malidp_de_planes_init(struct drm_device *drm); int malidp_crtc_init(struct drm_device *drm); +bool malidp_hw_format_is_linear_only(u32 format); +bool malidp_hw_format_is_afbc_only(u32 format); + +bool malidp_format_mod_supported(struct drm_device *drm, + u32 format, u64 modifier); + #ifdef CONFIG_DEBUG_FS void malidp_error(struct malidp_drm *malidp, struct malidp_error_stats *error_stats, u32 status, diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c index b9bed1138fa3..8df12e9a33bb 100644 --- a/drivers/gpu/drm/arm/malidp_hw.c +++ b/drivers/gpu/drm/arm/malidp_hw.c @@ -49,11 +49,19 @@ static const struct malidp_format_id malidp500_de_formats[] = { { DRM_FORMAT_YUYV, DE_VIDEO1, 13 }, { DRM_FORMAT_NV12, DE_VIDEO1 | SE_MEMWRITE, 14 }, { DRM_FORMAT_YUV420, DE_VIDEO1, 15 }, + { DRM_FORMAT_XYUV8888, DE_VIDEO1, 16 }, + /* These are supported with AFBC only */ + { DRM_FORMAT_YUV420_8BIT, DE_VIDEO1, 14 }, + { DRM_FORMAT_VUY888, DE_VIDEO1, 16 }, + { DRM_FORMAT_VUY101010, DE_VIDEO1, 17 }, + { DRM_FORMAT_YUV420_10BIT, DE_VIDEO1, 18 } }; #define MALIDP_ID(__group, __format) \ ((((__group) & 0x7) << 3) | ((__format) & 0x7)) +#define AFBC_YUV_422_FORMAT_ID MALIDP_ID(5, 1) + #define MALIDP_COMMON_FORMATS \ /* fourcc, layers supporting the format, internal id */ \ { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 0) }, \ @@ -74,11 +82,25 @@ static const struct malidp_format_id malidp500_de_formats[] = { { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \ { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \ { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \ + /* This is only supported with linear modifier */ \ + { DRM_FORMAT_XYUV8888, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 0) },\ + /* This is only supported with AFBC modifier */ \ + { DRM_FORMAT_VUY888, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 0) }, \ { DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \ + /* This is only supported with linear modifier */ \ { DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \ { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(5, 6) }, \ + /* This is only supported with AFBC modifier */ \ + { DRM_FORMAT_YUV420_8BIT, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) }, \ { DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }, \ - { DRM_FORMAT_X0L2, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 6)} + /* This is only supported with linear modifier */ \ + { DRM_FORMAT_XVYU2101010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 0)}, \ + /* This is only supported with AFBC modifier */ \ + { DRM_FORMAT_VUY101010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 0)}, \ + { DRM_FORMAT_X0L2, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 6)}, \ + /* This is only supported with AFBC modifier */ \ + { DRM_FORMAT_YUV420_10BIT, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 7)}, \ + { DRM_FORMAT_P010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 7)} static const struct malidp_format_id malidp550_de_formats[] = { MALIDP_COMMON_FORMATS, @@ -94,11 +116,14 @@ static const struct malidp_layer malidp500_layers[] = { * yuv2rgb matrix offset, mmu control register offset, rotation_features */ { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE, - MALIDP_DE_LV_STRIDE0, MALIDP500_LV_YUV2RGB, 0, ROTATE_ANY }, + MALIDP_DE_LV_STRIDE0, MALIDP500_LV_YUV2RGB, 0, ROTATE_ANY, + MALIDP500_DE_LV_AD_CTRL }, { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE, - MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY }, + MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY, + MALIDP500_DE_LG1_AD_CTRL }, { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE, - MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY }, + MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY, + MALIDP500_DE_LG2_AD_CTRL }, }; static const struct malidp_layer malidp550_layers[] = { @@ -106,13 +131,16 @@ static const struct malidp_layer malidp550_layers[] = { * yuv2rgb matrix offset, mmu control register offset, rotation_features */ { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, - MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY }, + MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY, + MALIDP550_DE_LV1_AD_CTRL }, { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, - MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY }, + MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY, + MALIDP550_DE_LG_AD_CTRL }, { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, - MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY }, + MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY, + MALIDP550_DE_LV2_AD_CTRL }, { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, - MALIDP550_DE_LS_R1_STRIDE, 0, 0, ROTATE_NONE }, + MALIDP550_DE_LS_R1_STRIDE, 0, 0, ROTATE_NONE, 0 }, }; static const struct malidp_layer malidp650_layers[] = { @@ -122,16 +150,44 @@ static const struct malidp_layer malidp650_layers[] = { */ { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, - MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY }, + MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY, + MALIDP550_DE_LV1_AD_CTRL }, { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE, 0, MALIDP650_DE_LG_MMU_CTRL, - ROTATE_COMPRESSED }, + ROTATE_COMPRESSED, MALIDP550_DE_LG_AD_CTRL }, { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, - MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY }, + MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY, + MALIDP550_DE_LV2_AD_CTRL }, { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE, 0, MALIDP650_DE_LS_MMU_CTRL, - ROTATE_NONE }, + ROTATE_NONE, 0 }, +}; + +const u64 malidp_format_modifiers[] = { + /* All RGB formats (except XRGB, RGBX, XBGR, BGRX) */ + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR | AFBC_SPARSE), + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR), + + /* All RGB formats > 16bpp (except XRGB, RGBX, XBGR, BGRX) */ + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR | AFBC_SPARSE | AFBC_SPLIT), + + /* All 8 or 10 bit YUV 444 formats. */ + /* In DP550, 10 bit YUV 420 format also supported */ + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_SPARSE | AFBC_SPLIT), + + /* YUV 420, 422 P1 8 bit and YUV 444 8 bit/10 bit formats */ + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_SPARSE), + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16), + + /* YUV 420, 422 P1 8, 10 bit formats */ + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_CBR | AFBC_SPARSE), + DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_CBR), + + /* All formats */ + DRM_FORMAT_MOD_LINEAR, + + DRM_FORMAT_MOD_INVALID }; #define SE_N_SCALING_COEFFS 96 @@ -324,14 +380,39 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode * malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); } -static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt) +int malidp_format_get_bpp(u32 fmt) +{ + int bpp = drm_format_plane_cpp(fmt, 0) * 8; + + if (bpp == 0) { + switch (fmt) { + case DRM_FORMAT_VUY101010: + bpp = 30; + case DRM_FORMAT_YUV420_10BIT: + bpp = 15; + break; + case DRM_FORMAT_YUV420_8BIT: + bpp = 12; + break; + default: + bpp = 0; + } + } + + return bpp; +} + +static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, + u16 h, u32 fmt, bool has_modifier) { /* * Each layer needs enough rotation memory to fit 8 lines * worth of pixel data. Required size is then: * size = rotated_width * (bpp / 8) * 8; */ - return w * drm_format_plane_cpp(fmt, 0) * 8; + int bpp = malidp_format_get_bpp(fmt); + + return w * bpp; } static void malidp500_se_write_pp_coefftab(struct malidp_hw_device *hwdev, @@ -609,9 +690,9 @@ static void malidp550_modeset(struct malidp_hw_device *hwdev, struct videomode * malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); } -static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt) +static int malidpx50_get_bytes_per_column(u32 fmt) { - u32 bytes_per_col; + u32 bytes_per_column; switch (fmt) { /* 8 lines at 4 bytes per pixel */ @@ -637,19 +718,77 @@ static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 case DRM_FORMAT_UYVY: case DRM_FORMAT_YUYV: case DRM_FORMAT_X0L0: - case DRM_FORMAT_X0L2: - bytes_per_col = 32; + bytes_per_column = 32; break; /* 16 lines at 1.5 bytes per pixel */ case DRM_FORMAT_NV12: case DRM_FORMAT_YUV420: - bytes_per_col = 24; + /* 8 lines at 3 bytes per pixel */ + case DRM_FORMAT_VUY888: + /* 16 lines at 12 bits per pixel */ + case DRM_FORMAT_YUV420_8BIT: + /* 8 lines at 3 bytes per pixel */ + case DRM_FORMAT_P010: + bytes_per_column = 24; + break; + /* 8 lines at 30 bits per pixel */ + case DRM_FORMAT_VUY101010: + /* 16 lines at 15 bits per pixel */ + case DRM_FORMAT_YUV420_10BIT: + bytes_per_column = 30; break; default: return -EINVAL; } - return w * bytes_per_col; + return bytes_per_column; +} + +static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, + u16 h, u32 fmt, bool has_modifier) +{ + int bytes_per_column = 0; + + switch (fmt) { + /* 8 lines at 15 bits per pixel */ + case DRM_FORMAT_YUV420_10BIT: + bytes_per_column = 15; + break; + /* Uncompressed YUV 420 10 bit single plane cannot be rotated */ + case DRM_FORMAT_X0L2: + if (has_modifier) + bytes_per_column = 8; + else + return -EINVAL; + break; + default: + bytes_per_column = malidpx50_get_bytes_per_column(fmt); + } + + if (bytes_per_column == -EINVAL) + return bytes_per_column; + + return w * bytes_per_column; +} + +static int malidp650_rotmem_required(struct malidp_hw_device *hwdev, u16 w, + u16 h, u32 fmt, bool has_modifier) +{ + int bytes_per_column = 0; + + switch (fmt) { + /* 16 lines at 2 bytes per pixel */ + case DRM_FORMAT_X0L2: + bytes_per_column = 32; + break; + default: + bytes_per_column = malidpx50_get_bytes_per_column(fmt); + } + + if (bytes_per_column == -EINVAL) + return bytes_per_column; + + return w * bytes_per_column; } static int malidp550_se_set_scaling_coeffs(struct malidp_hw_device *hwdev, @@ -838,7 +977,10 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = { .se_base = MALIDP550_SE_BASE, .dc_base = MALIDP550_DC_BASE, .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH, - .features = MALIDP_REGMAP_HAS_CLEARIRQ, + .features = MALIDP_REGMAP_HAS_CLEARIRQ | + MALIDP_DEVICE_AFBC_SUPPORT_SPLIT | + MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT | + MALIDP_DEVICE_AFBC_YUYV_USE_422_P2, .n_layers = ARRAY_SIZE(malidp550_layers), .layers = malidp550_layers, .de_irq_map = { @@ -884,7 +1026,9 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = { .se_base = MALIDP550_SE_BASE, .dc_base = MALIDP550_DC_BASE, .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH, - .features = MALIDP_REGMAP_HAS_CLEARIRQ, + .features = MALIDP_REGMAP_HAS_CLEARIRQ | + MALIDP_DEVICE_AFBC_SUPPORT_SPLIT | + MALIDP_DEVICE_AFBC_YUYV_USE_422_P2, .n_layers = ARRAY_SIZE(malidp650_layers), .layers = malidp650_layers, .de_irq_map = { @@ -923,7 +1067,7 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = { .in_config_mode = malidp550_in_config_mode, .set_config_valid = malidp550_set_config_valid, .modeset = malidp550_modeset, - .rotmem_required = malidp550_rotmem_required, + .rotmem_required = malidp650_rotmem_required, .se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs, .se_calc_mclk = malidp550_se_calc_mclk, .enable_memwrite = malidp550_enable_memwrite, @@ -933,19 +1077,72 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = { }; u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map, - u8 layer_id, u32 format) + u8 layer_id, u32 format, bool has_modifier) { unsigned int i; for (i = 0; i < map->n_pixel_formats; i++) { if (((map->pixel_formats[i].layer & layer_id) == layer_id) && - (map->pixel_formats[i].format == format)) - return map->pixel_formats[i].id; + (map->pixel_formats[i].format == format)) { + /* + * In some DP550 and DP650, DRM_FORMAT_YUYV + AFBC modifier + * is supported by a different h/w format id than + * DRM_FORMAT_YUYV (only). + */ + if (format == DRM_FORMAT_YUYV && + (has_modifier) && + (map->features & MALIDP_DEVICE_AFBC_YUYV_USE_422_P2)) + return AFBC_YUV_422_FORMAT_ID; + else + return map->pixel_formats[i].id; + } } return MALIDP_INVALID_FORMAT_ID; } +bool malidp_hw_format_is_linear_only(u32 format) +{ + switch (format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_RGB888: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_XYUV8888: + case DRM_FORMAT_XVYU2101010: + case DRM_FORMAT_X0L2: + case DRM_FORMAT_X0L0: + return true; + default: + return false; + } +} + +bool malidp_hw_format_is_afbc_only(u32 format) +{ + switch (format) { + case DRM_FORMAT_VUY888: + case DRM_FORMAT_VUY101010: + case DRM_FORMAT_YUV420_8BIT: + case DRM_FORMAT_YUV420_10BIT: + return true; + default: + return false; + } +} + static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 irq) { u32 base = malidp_get_block_base(hwdev, block); diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h index 40155e2ea9d9..207c3ce52f1a 100644 --- a/drivers/gpu/drm/arm/malidp_hw.h +++ b/drivers/gpu/drm/arm/malidp_hw.h @@ -70,6 +70,8 @@ struct malidp_layer { s16 yuv2rgb_offset; /* offset to the YUV->RGB matrix entries */ u16 mmu_ctrl_offset; /* offset to the MMU control register */ enum rotation_features rot; /* type of rotation supported */ + /* address offset for the AFBC decoder registers */ + u16 afbc_decoder_offset; }; enum malidp_scaling_coeff_set { @@ -93,7 +95,10 @@ struct malidp_se_config { }; /* regmap features */ -#define MALIDP_REGMAP_HAS_CLEARIRQ (1 << 0) +#define MALIDP_REGMAP_HAS_CLEARIRQ BIT(0) +#define MALIDP_DEVICE_AFBC_SUPPORT_SPLIT BIT(1) +#define MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT BIT(2) +#define MALIDP_DEVICE_AFBC_YUYV_USE_422_P2 BIT(3) struct malidp_hw_regmap { /* address offset of the DE register bank */ @@ -179,7 +184,8 @@ struct malidp_hw { * Calculate the required rotation memory given the active area * and the buffer format. */ - int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt); + int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, + u32 fmt, bool has_modifier); int (*se_set_scaling_coeffs)(struct malidp_hw_device *hwdev, struct malidp_se_config *se_config, @@ -319,7 +325,9 @@ int malidp_se_irq_init(struct drm_device *drm, int irq); void malidp_se_irq_fini(struct malidp_hw_device *hwdev); u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map, - u8 layer_id, u32 format); + u8 layer_id, u32 format, bool has_modifier); + +int malidp_format_get_bpp(u32 fmt); static inline u8 malidp_hw_get_pitch_align(struct malidp_hw_device *hwdev, bool rotated) { @@ -388,9 +396,18 @@ static inline void malidp_se_set_enh_coeffs(struct malidp_hw_device *hwdev) #define MALIDP_GAMMA_LUT_SIZE 4096 -#define AFBC_MOD_VALID_BITS (AFBC_FORMAT_MOD_BLOCK_SIZE_MASK | \ - AFBC_FORMAT_MOD_YTR | AFBC_FORMAT_MOD_SPLIT | \ - AFBC_FORMAT_MOD_SPARSE | AFBC_FORMAT_MOD_CBR | \ - AFBC_FORMAT_MOD_TILED | AFBC_FORMAT_MOD_SC) +#define AFBC_SIZE_MASK AFBC_FORMAT_MOD_BLOCK_SIZE_MASK +#define AFBC_SIZE_16X16 AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 +#define AFBC_YTR AFBC_FORMAT_MOD_YTR +#define AFBC_SPARSE AFBC_FORMAT_MOD_SPARSE +#define AFBC_CBR AFBC_FORMAT_MOD_CBR +#define AFBC_SPLIT AFBC_FORMAT_MOD_SPLIT +#define AFBC_TILED AFBC_FORMAT_MOD_TILED +#define AFBC_SC AFBC_FORMAT_MOD_SC + +#define AFBC_MOD_VALID_BITS (AFBC_SIZE_MASK | AFBC_YTR | AFBC_SPLIT | \ + AFBC_SPARSE | AFBC_CBR | AFBC_TILED | AFBC_SC) + +extern const u64 malidp_format_modifiers[]; #endif /* __MALIDP_HW_H__ */ diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c index 041a64dc7167..5f102bdaf841 100644 --- a/drivers/gpu/drm/arm/malidp_mw.c +++ b/drivers/gpu/drm/arm/malidp_mw.c @@ -141,9 +141,14 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder, return -EINVAL; } + if (fb->modifier) { + DRM_DEBUG_KMS("Writeback framebuffer does not support modifiers\n"); + return -EINVAL; + } + mw_state->format = malidp_hw_get_format_id(&malidp->dev->hw->map, SE_MEMWRITE, - fb->format->format); + fb->format->format, !!fb->modifier); if (mw_state->format == MALIDP_INVALID_FORMAT_ID) { struct drm_format_name_buf format_name; @@ -252,8 +257,7 @@ void malidp_mw_atomic_commit(struct drm_device *drm, &mw_state->addrs[0], mw_state->format); - drm_writeback_queue_job(mw_conn, conn_state->writeback_job); - conn_state->writeback_job = NULL; + drm_writeback_queue_job(mw_conn, conn_state); hwdev->hw->enable_memwrite(hwdev, mw_state->addrs, mw_state->pitches, mw_state->n_planes, fb->width, fb->height, mw_state->format, diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index c9a6d3e0cada..d42e0ea9a303 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -52,6 +52,8 @@ #define MALIDP550_LS_ENABLE 0x01c #define MALIDP550_LS_R1_IN_SIZE 0x020 +#define MODIFIERS_COUNT_MAX 15 + /* * This 4-entry look-up-table is used to determine the full 8-bit alpha value * for formats with 1- or 2-bit alpha channels. @@ -145,6 +147,119 @@ static void malidp_plane_atomic_print_state(struct drm_printer *p, drm_printf(p, "\tmmu_prefetch_pgsize=%d\n", ms->mmu_prefetch_pgsize); } +bool malidp_format_mod_supported(struct drm_device *drm, + u32 format, u64 modifier) +{ + const struct drm_format_info *info; + const u64 *modifiers; + struct malidp_drm *malidp = drm->dev_private; + const struct malidp_hw_regmap *map = &malidp->dev->hw->map; + + if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID)) + return false; + + /* Some pixel formats are supported without any modifier */ + if (modifier == DRM_FORMAT_MOD_LINEAR) { + /* + * However these pixel formats need to be supported with + * modifiers only + */ + return !malidp_hw_format_is_afbc_only(format); + } + + if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) { + DRM_ERROR("Unknown modifier (not Arm)\n"); + return false; + } + + if (modifier & + ~DRM_FORMAT_MOD_ARM_AFBC(AFBC_MOD_VALID_BITS)) { + DRM_DEBUG_KMS("Unsupported modifiers\n"); + return false; + } + + modifiers = malidp_format_modifiers; + + /* SPLIT buffers must use SPARSE layout */ + if (WARN_ON_ONCE((modifier & AFBC_SPLIT) && !(modifier & AFBC_SPARSE))) + return false; + + /* CBR only applies to YUV formats, where YTR should be always 0 */ + if (WARN_ON_ONCE((modifier & AFBC_CBR) && (modifier & AFBC_YTR))) + return false; + + while (*modifiers != DRM_FORMAT_MOD_INVALID) { + if (*modifiers == modifier) + break; + + modifiers++; + } + + /* return false, if the modifier was not found */ + if (*modifiers == DRM_FORMAT_MOD_INVALID) { + DRM_DEBUG_KMS("Unsupported modifier\n"); + return false; + } + + info = drm_format_info(format); + + if (info->num_planes != 1) { + DRM_DEBUG_KMS("AFBC buffers expect one plane\n"); + return false; + } + + if (malidp_hw_format_is_linear_only(format) == true) { + DRM_DEBUG_KMS("Given format (0x%x) is supported is linear mode only\n", + format); + return false; + } + + /* + * RGB formats need to provide YTR modifier and YUV formats should not + * provide YTR modifier. + */ + if (!(info->is_yuv) != !!(modifier & AFBC_FORMAT_MOD_YTR)) { + DRM_DEBUG_KMS("AFBC_FORMAT_MOD_YTR is %s for %s formats\n", + info->is_yuv ? "disallowed" : "mandatory", + info->is_yuv ? "YUV" : "RGB"); + return false; + } + + if (modifier & AFBC_SPLIT) { + if (!info->is_yuv) { + if (drm_format_plane_cpp(format, 0) <= 2) { + DRM_DEBUG_KMS("RGB formats <= 16bpp are not supported with SPLIT\n"); + return false; + } + } + + if ((drm_format_horz_chroma_subsampling(format) != 1) || + (drm_format_vert_chroma_subsampling(format) != 1)) { + if (!(format == DRM_FORMAT_YUV420_10BIT && + (map->features & MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT))) { + DRM_DEBUG_KMS("Formats which are sub-sampled should never be split\n"); + return false; + } + } + } + + if (modifier & AFBC_CBR) { + if ((drm_format_horz_chroma_subsampling(format) == 1) || + (drm_format_vert_chroma_subsampling(format) == 1)) { + DRM_DEBUG_KMS("Formats which are not sub-sampled should not have CBR set\n"); + return false; + } + } + + return true; +} + +static bool malidp_format_mod_supported_per_plane(struct drm_plane *plane, + u32 format, u64 modifier) +{ + return malidp_format_mod_supported(plane->dev, format, modifier); +} + static const struct drm_plane_funcs malidp_de_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, @@ -153,6 +268,7 @@ static const struct drm_plane_funcs malidp_de_plane_funcs = { .atomic_duplicate_state = malidp_duplicate_plane_state, .atomic_destroy_state = malidp_destroy_plane_state, .atomic_print_state = malidp_plane_atomic_print_state, + .format_mod_supported = malidp_format_mod_supported_per_plane, }; static int malidp_se_check_scaling(struct malidp_plane *mp, @@ -406,8 +522,8 @@ static int malidp_de_plane_check(struct drm_plane *plane, fb = state->fb; ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map, - mp->layer->id, - fb->format->format); + mp->layer->id, fb->format->format, + !!fb->modifier); if (ms->format == MALIDP_INVALID_FORMAT_ID) return -EINVAL; @@ -415,8 +531,8 @@ static int malidp_de_plane_check(struct drm_plane *plane, for (i = 0; i < ms->n_planes; i++) { u8 alignment = malidp_hw_get_pitch_align(mp->hwdev, rotated); - if ((fb->pitches[i] * drm_format_info_block_height(fb->format, i)) - & (alignment - 1)) { + if (((fb->pitches[i] * drm_format_info_block_height(fb->format, i)) + & (alignment - 1)) && !(fb->modifier)) { DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n", fb->pitches[i], i); return -EINVAL; @@ -469,13 +585,20 @@ static int malidp_de_plane_check(struct drm_plane *plane, return -EINVAL; } + /* SMART layer does not support AFBC */ + if (mp->layer->id == DE_SMART && fb->modifier) { + DRM_ERROR("AFBC framebuffer not supported in SMART layer"); + return -EINVAL; + } + ms->rotmem_size = 0; if (state->rotation & MALIDP_ROTATED_MASK) { int val; val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w, state->crtc_h, - fb->format->format); + fb->format->format, + !!(fb->modifier)); if (val < 0) return val; @@ -592,6 +715,83 @@ static void malidp_de_set_mmu_control(struct malidp_plane *mp, mp->layer->base + mp->layer->mmu_ctrl_offset); } +static void malidp_set_plane_base_addr(struct drm_framebuffer *fb, + struct malidp_plane *mp, + int plane_index) +{ + dma_addr_t paddr; + u16 ptr; + struct drm_plane *plane = &mp->base; + bool afbc = fb->modifier ? true : false; + + ptr = mp->layer->ptr + (plane_index << 4); + + /* + * drm_fb_cma_get_gem_addr() alters the physical base address of the + * framebuffer as per the plane's src_x, src_y co-ordinates (ie to + * take care of source cropping). + * For AFBC, this is not needed as the cropping is handled by _AD_CROP_H + * and _AD_CROP_V registers. + */ + if (!afbc) { + paddr = drm_fb_cma_get_gem_addr(fb, plane->state, + plane_index); + } else { + struct drm_gem_cma_object *obj; + + obj = drm_fb_cma_get_gem_obj(fb, plane_index); + + if (WARN_ON(!obj)) + return; + paddr = obj->paddr; + } + + malidp_hw_write(mp->hwdev, lower_32_bits(paddr), ptr); + malidp_hw_write(mp->hwdev, upper_32_bits(paddr), ptr + 4); +} + +static void malidp_de_set_plane_afbc(struct drm_plane *plane) +{ + struct malidp_plane *mp; + u32 src_w, src_h, val = 0, src_x, src_y; + struct drm_framebuffer *fb = plane->state->fb; + + mp = to_malidp_plane(plane); + + /* no afbc_decoder_offset means AFBC is not supported on this plane */ + if (!mp->layer->afbc_decoder_offset) + return; + + if (!fb->modifier) { + malidp_hw_write(mp->hwdev, 0, mp->layer->afbc_decoder_offset); + return; + } + + /* convert src values from Q16 fixed point to integer */ + src_w = plane->state->src_w >> 16; + src_h = plane->state->src_h >> 16; + src_x = plane->state->src_x >> 16; + src_y = plane->state->src_y >> 16; + + val = ((fb->width - (src_x + src_w)) << MALIDP_AD_CROP_RIGHT_OFFSET) | + src_x; + malidp_hw_write(mp->hwdev, val, + mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_H); + + val = ((fb->height - (src_y + src_h)) << MALIDP_AD_CROP_BOTTOM_OFFSET) | + src_y; + malidp_hw_write(mp->hwdev, val, + mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_V); + + val = MALIDP_AD_EN; + if (fb->modifier & AFBC_FORMAT_MOD_SPLIT) + val |= MALIDP_AD_BS; + if (fb->modifier & AFBC_FORMAT_MOD_YTR) + val |= MALIDP_AD_YTR; + + malidp_hw_write(mp->hwdev, val, mp->layer->afbc_decoder_offset); +} + static void malidp_de_plane_update(struct drm_plane *plane, struct drm_plane_state *old_state) { @@ -602,12 +802,23 @@ static void malidp_de_plane_update(struct drm_plane *plane, u8 plane_alpha = state->alpha >> 8; u32 src_w, src_h, dest_w, dest_h, val; int i; + struct drm_framebuffer *fb = plane->state->fb; mp = to_malidp_plane(plane); - /* convert src values from Q16 fixed point to integer */ - src_w = state->src_w >> 16; - src_h = state->src_h >> 16; + /* + * For AFBC framebuffer, use the framebuffer width and height for + * configuring layer input size register. + */ + if (fb->modifier) { + src_w = fb->width; + src_h = fb->height; + } else { + /* convert src values from Q16 fixed point to integer */ + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + } + dest_w = state->crtc_w; dest_h = state->crtc_h; @@ -615,15 +826,8 @@ static void malidp_de_plane_update(struct drm_plane *plane, val = (val & ~LAYER_FORMAT_MASK) | ms->format; malidp_hw_write(mp->hwdev, val, mp->layer->base); - for (i = 0; i < ms->n_planes; i++) { - /* calculate the offset for the layer's plane registers */ - u16 ptr = mp->layer->ptr + (i << 4); - dma_addr_t fb_addr = drm_fb_cma_get_gem_addr(state->fb, - state, i); - - malidp_hw_write(mp->hwdev, lower_32_bits(fb_addr), ptr); - malidp_hw_write(mp->hwdev, upper_32_bits(fb_addr), ptr + 4); - } + for (i = 0; i < ms->n_planes; i++) + malidp_set_plane_base_addr(fb, mp, i); malidp_de_set_mmu_control(mp, ms); @@ -657,6 +861,8 @@ static void malidp_de_plane_update(struct drm_plane *plane, mp->layer->base + MALIDP550_LS_R1_IN_SIZE); } + malidp_de_set_plane_afbc(plane); + /* first clear the rotation bits */ val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL); val &= ~LAYER_ROT_MASK; @@ -733,7 +939,26 @@ int malidp_de_planes_init(struct drm_device *drm) BIT(DRM_MODE_BLEND_PREMULTI) | BIT(DRM_MODE_BLEND_COVERAGE); u32 *formats; - int ret, i, j, n; + int ret, i = 0, j = 0, n; + u64 supported_modifiers[MODIFIERS_COUNT_MAX]; + const u64 *modifiers; + + modifiers = malidp_format_modifiers; + + if (!(map->features & MALIDP_DEVICE_AFBC_SUPPORT_SPLIT)) { + /* + * Since our hardware does not support SPLIT, so build the list + * of supported modifiers excluding SPLIT ones. + */ + while (*modifiers != DRM_FORMAT_MOD_INVALID) { + if (!(*modifiers & AFBC_SPLIT)) + supported_modifiers[j++] = *modifiers; + + modifiers++; + } + supported_modifiers[j++] = DRM_FORMAT_MOD_INVALID; + modifiers = supported_modifiers; + } formats = kcalloc(map->n_pixel_formats, sizeof(*formats), GFP_KERNEL); if (!formats) { @@ -758,9 +983,15 @@ int malidp_de_planes_init(struct drm_device *drm) plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; + + /* + * All the layers except smart layer supports AFBC modifiers. + */ ret = drm_universal_plane_init(drm, &plane->base, crtcs, - &malidp_de_plane_funcs, formats, - n, NULL, plane_type, NULL); + &malidp_de_plane_funcs, formats, n, + (id == DE_SMART) ? NULL : modifiers, plane_type, + NULL); + if (ret < 0) goto cleanup; diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h index 7ce3e141464d..a0dd6e1676a8 100644 --- a/drivers/gpu/drm/arm/malidp_regs.h +++ b/drivers/gpu/drm/arm/malidp_regs.h @@ -198,10 +198,13 @@ #define MALIDP500_LV_YUV2RGB ((s16)(-0xB8)) #define MALIDP500_DE_LV_BASE 0x00100 #define MALIDP500_DE_LV_PTR_BASE 0x00124 +#define MALIDP500_DE_LV_AD_CTRL 0x00400 #define MALIDP500_DE_LG1_BASE 0x00200 #define MALIDP500_DE_LG1_PTR_BASE 0x0021c +#define MALIDP500_DE_LG1_AD_CTRL 0x0040c #define MALIDP500_DE_LG2_BASE 0x00300 #define MALIDP500_DE_LG2_PTR_BASE 0x0031c +#define MALIDP500_DE_LG2_AD_CTRL 0x00418 #define MALIDP500_SE_BASE 0x00c00 #define MALIDP500_SE_CONTROL 0x00c0c #define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c @@ -228,10 +231,13 @@ #define MALIDP550_LV_YUV2RGB 0x00084 #define MALIDP550_DE_LV1_BASE 0x00100 #define MALIDP550_DE_LV1_PTR_BASE 0x00124 +#define MALIDP550_DE_LV1_AD_CTRL 0x001B8 #define MALIDP550_DE_LV2_BASE 0x00200 #define MALIDP550_DE_LV2_PTR_BASE 0x00224 +#define MALIDP550_DE_LV2_AD_CTRL 0x002B8 #define MALIDP550_DE_LG_BASE 0x00300 #define MALIDP550_DE_LG_PTR_BASE 0x0031c +#define MALIDP550_DE_LG_AD_CTRL 0x00330 #define MALIDP550_DE_LS_BASE 0x00400 #define MALIDP550_DE_LS_PTR_BASE 0x0042c #define MALIDP550_DE_PERF_BASE 0x00500 @@ -258,6 +264,20 @@ #define MALIDP_MMU_CTRL_PX_PS(x) (1 << (8 + (x))) #define MALIDP_MMU_CTRL_PP_NUM_REQ(x) (((x) & 0x7f) << 12) +/* AFBC register offsets relative to MALIDPXXX_DE_LX_AD_CTRL */ +/* The following register offsets are common for DP500, DP550 and DP650 */ +#define MALIDP_AD_CROP_H 0x4 +#define MALIDP_AD_CROP_V 0x8 +#define MALIDP_AD_END_PTR_LOW 0xc +#define MALIDP_AD_END_PTR_HIGH 0x10 + +/* AFBC decoder Registers */ +#define MALIDP_AD_EN BIT(0) +#define MALIDP_AD_YTR BIT(4) +#define MALIDP_AD_BS BIT(8) +#define MALIDP_AD_CROP_RIGHT_OFFSET 16 +#define MALIDP_AD_CROP_BOTTOM_OFFSET 16 + /* * Starting with DP550 the register map blocks has been standardised to the * following layout: diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index 9cd82e3631fb..9e7cd6b34106 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -214,20 +214,9 @@ static enum drm_mode_status bochs_connector_mode_valid(struct drm_connector *con return MODE_OK; } -static struct drm_encoder * -bochs_connector_best_encoder(struct drm_connector *connector) -{ - int enc_id = connector->encoder_ids[0]; - /* pick the encoder ids */ - if (enc_id) - return drm_encoder_find(connector->dev, NULL, enc_id); - return NULL; -} - static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = { .get_modes = bochs_connector_get_modes, .mode_valid = bochs_connector_mode_valid, - .best_encoder = bochs_connector_best_encoder, }; static const struct drm_connector_funcs bochs_connector_connector_funcs = { diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c index 0805801f4e94..e64736c39a9f 100644 --- a/drivers/gpu/drm/bridge/dumb-vga-dac.c +++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c @@ -234,7 +234,7 @@ static int dumb_vga_remove(struct platform_device *pdev) */ static const struct drm_bridge_timings default_dac_timings = { /* Timing specifications, datasheet page 7 */ - .sampling_edge = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE, .setup_time_ps = 500, .hold_time_ps = 1500, }; @@ -245,7 +245,7 @@ static const struct drm_bridge_timings default_dac_timings = { */ static const struct drm_bridge_timings ti_ths8134_dac_timings = { /* From timing diagram, datasheet page 9 */ - .sampling_edge = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE, /* From datasheet, page 12 */ .setup_time_ps = 3000, /* I guess this means latched input */ @@ -258,7 +258,7 @@ static const struct drm_bridge_timings ti_ths8134_dac_timings = { */ static const struct drm_bridge_timings ti_ths8135_dac_timings = { /* From timing diagram, datasheet page 14 */ - .sampling_edge = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE, /* From datasheet, page 16 */ .setup_time_ps = 2000, .hold_time_ps = 500, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c index cf3f0caf9c63..ed7af7518b52 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c @@ -614,7 +614,6 @@ static int snd_dw_hdmi_suspend(struct device *dev) struct snd_dw_hdmi *dw = dev_get_drvdata(dev); snd_power_change_state(dw->card, SNDRV_CTL_POWER_D3cold); - snd_pcm_suspend_all(dw->pcm); return 0; } diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 888980d4bc74..e570c9dee180 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1222,8 +1222,8 @@ static int tc_bridge_attach(struct drm_bridge *bridge) &bus_format, 1); tc->connector.display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH | - DRM_BUS_FLAG_PIXDATA_NEGEDGE | - DRM_BUS_FLAG_SYNC_NEGEDGE; + DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE | + DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE; drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder); return 0; diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c index 7bfb4f338813..285be4a0f4bd 100644 --- a/drivers/gpu/drm/bridge/ti-tfp410.c +++ b/drivers/gpu/drm/bridge/ti-tfp410.c @@ -27,10 +27,14 @@ struct tfp410 { struct drm_bridge bridge; struct drm_connector connector; + unsigned int connector_type; struct i2c_adapter *ddc; struct gpio_desc *hpd; struct delayed_work hpd_work; + struct gpio_desc *powerdown; + + struct drm_bridge_timings timings; struct device *dev; }; @@ -126,7 +130,7 @@ static int tfp410_attach(struct drm_bridge *bridge) drm_connector_helper_add(&dvi->connector, &tfp410_con_helper_funcs); ret = drm_connector_init(bridge->dev, &dvi->connector, - &tfp410_con_funcs, DRM_MODE_CONNECTOR_HDMIA); + &tfp410_con_funcs, dvi->connector_type); if (ret) { dev_err(dvi->dev, "drm_connector_init() failed: %d\n", ret); return ret; @@ -138,8 +142,24 @@ static int tfp410_attach(struct drm_bridge *bridge) return 0; } +static void tfp410_enable(struct drm_bridge *bridge) +{ + struct tfp410 *dvi = drm_bridge_to_tfp410(bridge); + + gpiod_set_value_cansleep(dvi->powerdown, 0); +} + +static void tfp410_disable(struct drm_bridge *bridge) +{ + struct tfp410 *dvi = drm_bridge_to_tfp410(bridge); + + gpiod_set_value_cansleep(dvi->powerdown, 1); +} + static const struct drm_bridge_funcs tfp410_bridge_funcs = { .attach = tfp410_attach, + .enable = tfp410_enable, + .disable = tfp410_disable, }; static void tfp410_hpd_work_func(struct work_struct *work) @@ -162,6 +182,70 @@ static irqreturn_t tfp410_hpd_irq_thread(int irq, void *arg) return IRQ_HANDLED; } +static const struct drm_bridge_timings tfp410_default_timings = { + .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE + | DRM_BUS_FLAG_DE_HIGH, + .setup_time_ps = 1200, + .hold_time_ps = 1300, +}; + +static int tfp410_parse_timings(struct tfp410 *dvi, bool i2c) +{ + struct drm_bridge_timings *timings = &dvi->timings; + struct device_node *ep; + u32 pclk_sample = 0; + s32 deskew = 0; + + /* Start with defaults. */ + *timings = tfp410_default_timings; + + if (i2c) + /* + * In I2C mode timings are configured through the I2C interface. + * As the driver doesn't support I2C configuration yet, we just + * go with the defaults (BSEL=1, DSEL=1, DKEN=0, EDGE=1). + */ + return 0; + + /* + * In non-I2C mode, timings are configured through the BSEL, DSEL, DKEN + * and EDGE pins. They are specified in DT through endpoint properties + * and vendor-specific properties. + */ + ep = of_graph_get_endpoint_by_regs(dvi->dev->of_node, 0, 0); + if (!ep) + return -EINVAL; + + /* Get the sampling edge from the endpoint. */ + of_property_read_u32(ep, "pclk-sample", &pclk_sample); + of_node_put(ep); + + timings->input_bus_flags = DRM_BUS_FLAG_DE_HIGH; + + switch (pclk_sample) { + case 0: + timings->input_bus_flags |= DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE + | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE; + break; + case 1: + timings->input_bus_flags |= DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE + | DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE; + break; + default: + return -EINVAL; + } + + /* Get the setup and hold time from vendor-specific properties. */ + of_property_read_u32(dvi->dev->of_node, "ti,deskew", (u32 *)&deskew); + if (deskew < -4 || deskew > 3) + return -EINVAL; + + timings->setup_time_ps = min(0, 1200 - 350 * deskew); + timings->hold_time_ps = min(0, 1300 + 350 * deskew); + + return 0; +} + static int tfp410_get_connector_properties(struct tfp410 *dvi) { struct device_node *connector_node, *ddc_phandle; @@ -172,6 +256,11 @@ static int tfp410_get_connector_properties(struct tfp410 *dvi) if (!connector_node) return -ENODEV; + if (of_device_is_compatible(connector_node, "hdmi-connector")) + dvi->connector_type = DRM_MODE_CONNECTOR_HDMIA; + else + dvi->connector_type = DRM_MODE_CONNECTOR_DVID; + dvi->hpd = fwnode_get_named_gpiod(&connector_node->fwnode, "hpd-gpios", 0, GPIOD_IN, "hpd"); if (IS_ERR(dvi->hpd)) { @@ -200,7 +289,7 @@ fail: return ret; } -static int tfp410_init(struct device *dev) +static int tfp410_init(struct device *dev, bool i2c) { struct tfp410 *dvi; int ret; @@ -217,12 +306,24 @@ static int tfp410_init(struct device *dev) dvi->bridge.funcs = &tfp410_bridge_funcs; dvi->bridge.of_node = dev->of_node; + dvi->bridge.timings = &dvi->timings; dvi->dev = dev; + ret = tfp410_parse_timings(dvi, i2c); + if (ret) + goto fail; + ret = tfp410_get_connector_properties(dvi); if (ret) goto fail; + dvi->powerdown = devm_gpiod_get_optional(dev, "powerdown", + GPIOD_OUT_HIGH); + if (IS_ERR(dvi->powerdown)) { + dev_err(dev, "failed to parse powerdown gpio\n"); + return PTR_ERR(dvi->powerdown); + } + if (dvi->hpd) { INIT_DELAYED_WORK(&dvi->hpd_work, tfp410_hpd_work_func); @@ -264,7 +365,7 @@ static int tfp410_fini(struct device *dev) static int tfp410_probe(struct platform_device *pdev) { - return tfp410_init(&pdev->dev); + return tfp410_init(&pdev->dev, false); } static int tfp410_remove(struct platform_device *pdev) @@ -301,7 +402,7 @@ static int tfp410_i2c_probe(struct i2c_client *client, return -ENXIO; } - return tfp410_init(&client->dev); + return tfp410_init(&client->dev, true); } static int tfp410_i2c_remove(struct i2c_client *client) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 540a77a2ade9..86efd2da37f9 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -495,7 +495,7 @@ mode_fixup(struct drm_atomic_state *state) static enum drm_mode_status mode_valid_path(struct drm_connector *connector, struct drm_encoder *encoder, struct drm_crtc *crtc, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { enum drm_mode_status ret; @@ -534,7 +534,7 @@ mode_valid(struct drm_atomic_state *state) struct drm_crtc *crtc = conn_state->crtc; struct drm_crtc_state *crtc_state; enum drm_mode_status mode_status; - struct drm_display_mode *mode; + const struct drm_display_mode *mode; if (!crtc || !encoder) continue; @@ -1752,7 +1752,7 @@ int drm_atomic_helper_commit(struct drm_device *dev, * * NOTE: Commit work has multiple phases, first hardware commit, then * cleanup. We want them to overlap, hence need system_unbound_wq to - * make sure work items don't artifically stall on each another. + * make sure work items don't artificially stall on each another. */ drm_atomic_state_get(state); @@ -1786,7 +1786,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit); * * Asynchronous workers need to have sufficient parallelism to be able to run * different atomic commits on different CRTCs in parallel. The simplest way to - * achive this is by running them on the &system_unbound_wq work queue. Note + * achieve this is by running them on the &system_unbound_wq work queue. Note * that drivers are not required to split up atomic commits and run an * individual commit in parallel - userspace is supposed to do that if it cares. * But it might be beneficial to do that for modesets, since those necessarily @@ -2261,10 +2261,21 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done); int drm_atomic_helper_prepare_planes(struct drm_device *dev, struct drm_atomic_state *state) { + struct drm_connector *connector; + struct drm_connector_state *new_conn_state; struct drm_plane *plane; struct drm_plane_state *new_plane_state; int ret, i, j; + for_each_new_connector_in_state(state, connector, new_conn_state, i) { + if (!new_conn_state->writeback_job) + continue; + + ret = drm_writeback_prepare_job(new_conn_state->writeback_job); + if (ret < 0) + return ret; + } + for_each_new_plane_in_state(state, plane, new_plane_state, i) { const struct drm_plane_helper_funcs *funcs; @@ -3039,9 +3050,31 @@ commit: return 0; } -static int __drm_atomic_helper_disable_all(struct drm_device *dev, - struct drm_modeset_acquire_ctx *ctx, - bool clean_old_fbs) +/** + * drm_atomic_helper_disable_all - disable all currently active outputs + * @dev: DRM device + * @ctx: lock acquisition context + * + * Loops through all connectors, finding those that aren't turned off and then + * turns them off by setting their DPMS mode to OFF and deactivating the CRTC + * that they are connected to. + * + * This is used for example in suspend/resume to disable all currently active + * functions when suspending. If you just want to shut down everything at e.g. + * driver unload, look at drm_atomic_helper_shutdown(). + * + * Note that if callers haven't already acquired all modeset locks this might + * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). + * + * Returns: + * 0 on success or a negative error code on failure. + * + * See also: + * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and + * drm_atomic_helper_shutdown(). + */ +int drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx) { struct drm_atomic_state *state; struct drm_connector_state *conn_state; @@ -3099,35 +3132,6 @@ free: drm_atomic_state_put(state); return ret; } - -/** - * drm_atomic_helper_disable_all - disable all currently active outputs - * @dev: DRM device - * @ctx: lock acquisition context - * - * Loops through all connectors, finding those that aren't turned off and then - * turns them off by setting their DPMS mode to OFF and deactivating the CRTC - * that they are connected to. - * - * This is used for example in suspend/resume to disable all currently active - * functions when suspending. If you just want to shut down everything at e.g. - * driver unload, look at drm_atomic_helper_shutdown(). - * - * Note that if callers haven't already acquired all modeset locks this might - * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). - * - * Returns: - * 0 on success or a negative error code on failure. - * - * See also: - * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and - * drm_atomic_helper_shutdown(). - */ -int drm_atomic_helper_disable_all(struct drm_device *dev, - struct drm_modeset_acquire_ctx *ctx) -{ - return __drm_atomic_helper_disable_all(dev, ctx, false); -} EXPORT_SYMBOL(drm_atomic_helper_disable_all); /** @@ -3148,7 +3152,7 @@ void drm_atomic_helper_shutdown(struct drm_device *dev) DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret); - ret = __drm_atomic_helper_disable_all(dev, &ctx, true); + ret = drm_atomic_helper_disable_all(dev, &ctx); if (ret) DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret); diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index 4985384e51f6..59ffb6b9c745 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c @@ -30,6 +30,7 @@ #include <drm/drm_connector.h> #include <drm/drm_atomic.h> #include <drm/drm_device.h> +#include <drm/drm_writeback.h> #include <linux/slab.h> #include <linux/dma-fence.h> @@ -412,6 +413,9 @@ __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state) if (state->commit) drm_crtc_commit_put(state->commit); + + if (state->writeback_job) + drm_writeback_cleanup_job(state->writeback_job); } EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state); diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 0aabd401d3ca..ea797d4c82ee 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -647,28 +647,15 @@ drm_atomic_plane_get_property(struct drm_plane *plane, return 0; } -static struct drm_writeback_job * -drm_atomic_get_writeback_job(struct drm_connector_state *conn_state) -{ - WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK); - - if (!conn_state->writeback_job) - conn_state->writeback_job = - kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL); - - return conn_state->writeback_job; -} - static int drm_atomic_set_writeback_fb_for_connector( struct drm_connector_state *conn_state, struct drm_framebuffer *fb) { - struct drm_writeback_job *job = - drm_atomic_get_writeback_job(conn_state); - if (!job) - return -ENOMEM; + int ret; - drm_framebuffer_assign(&job->fb, fb); + ret = drm_writeback_set_fb(conn_state, fb); + if (ret < 0) + return ret; if (fb) DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n", @@ -746,6 +733,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector, return -EINVAL; } state->content_protection = val; + } else if (property == connector->colorspace_property) { + state->colorspace = val; } else if (property == config->writeback_fb_id_property) { struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val); int ret = drm_atomic_set_writeback_fb_for_connector(state, fb); @@ -814,6 +803,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector, *val = state->picture_aspect_ratio; } else if (property == config->content_type_property) { *val = state->content_type; + } else if (property == connector->colorspace_property) { + *val = state->colorspace; } else if (property == connector->scaling_mode_property) { *val = state->scaling_mode; } else if (property == connector->content_protection_property) { @@ -1158,19 +1149,17 @@ static int prepare_signaling(struct drm_device *dev, for_each_new_connector_in_state(state, conn, conn_state, i) { struct drm_writeback_connector *wb_conn; - struct drm_writeback_job *job; struct drm_out_fence_state *f; struct dma_fence *fence; s32 __user *fence_ptr; + if (!conn_state->writeback_job) + continue; + fence_ptr = get_out_fence_for_connector(state, conn); if (!fence_ptr) continue; - job = drm_atomic_get_writeback_job(conn_state); - if (!job) - return -ENOMEM; - f = krealloc(*fence_state, sizeof(**fence_state) * (*num_fences + 1), GFP_KERNEL); if (!f) @@ -1192,7 +1181,7 @@ static int prepare_signaling(struct drm_device *dev, return ret; } - job->out_fence = fence; + conn_state->writeback_job->out_fence = fence; } /* diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index dd40eff0911c..2355124849db 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -245,6 +245,7 @@ int drm_connector_init(struct drm_device *dev, INIT_LIST_HEAD(&connector->modes); mutex_init(&connector->mutex); connector->edid_blob_ptr = NULL; + connector->tile_blob_ptr = NULL; connector->status = connector_status_unknown; connector->display_info.panel_orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; @@ -272,6 +273,9 @@ int drm_connector_init(struct drm_device *dev, drm_object_attach_property(&connector->base, config->non_desktop_property, 0); + drm_object_attach_property(&connector->base, + config->tile_property, + 0); if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { drm_object_attach_property(&connector->base, config->prop_crtc_id, 0); @@ -826,6 +830,33 @@ static struct drm_prop_enum_list drm_cp_enum_list[] = { }; DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list) +static const struct drm_prop_enum_list hdmi_colorspaces[] = { + /* For Default case, driver will set the colorspace */ + { DRM_MODE_COLORIMETRY_DEFAULT, "Default" }, + /* Standard Definition Colorimetry based on CEA 861 */ + { DRM_MODE_COLORIMETRY_SMPTE_170M_YCC, "SMPTE_170M_YCC" }, + { DRM_MODE_COLORIMETRY_BT709_YCC, "BT709_YCC" }, + /* Standard Definition Colorimetry based on IEC 61966-2-4 */ + { DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" }, + /* High Definition Colorimetry based on IEC 61966-2-4 */ + { DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" }, + /* Colorimetry based on IEC 61966-2-1/Amendment 1 */ + { DRM_MODE_COLORIMETRY_SYCC_601, "SYCC_601" }, + /* Colorimetry based on IEC 61966-2-5 [33] */ + { DRM_MODE_COLORIMETRY_OPYCC_601, "opYCC_601" }, + /* Colorimetry based on IEC 61966-2-5 */ + { DRM_MODE_COLORIMETRY_OPRGB, "opRGB" }, + /* Colorimetry based on ITU-R BT.2020 */ + { DRM_MODE_COLORIMETRY_BT2020_CYCC, "BT2020_CYCC" }, + /* Colorimetry based on ITU-R BT.2020 */ + { DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" }, + /* Colorimetry based on ITU-R BT.2020 */ + { DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" }, + /* Added as part of Additional Colorimetry Extension in 861.G */ + { DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" }, + { DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER, "DCI-P3_RGB_Theater" }, +}; + /** * DOC: standard connector properties * @@ -1548,6 +1579,57 @@ int drm_mode_create_aspect_ratio_property(struct drm_device *dev) EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property); /** + * DOC: standard connector properties + * + * Colorspace: + * drm_mode_create_colorspace_property - create colorspace property + * This property helps select a suitable colorspace based on the sink + * capability. Modern sink devices support wider gamut like BT2020. + * This helps switch to BT2020 mode if the BT2020 encoded video stream + * is being played by the user, same for any other colorspace. Thereby + * giving a good visual experience to users. + * + * The expectation from userspace is that it should parse the EDID + * and get supported colorspaces. Use this property and switch to the + * one supported. Sink supported colorspaces should be retrieved by + * userspace from EDID and driver will not explicitly expose them. + * + * Basically the expectation from userspace is: + * - Set up CRTC DEGAMMA/CTM/GAMMA to convert to some sink + * colorspace + * - Set this new property to let the sink know what it + * converted the CRTC output to. + * - This property is just to inform sink what colorspace + * source is trying to drive. + * + * Called by a driver the first time it's needed, must be attached to desired + * connectors. + */ +int drm_mode_create_colorspace_property(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_property *prop; + + if (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || + connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) { + prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, + "Colorspace", + hdmi_colorspaces, + ARRAY_SIZE(hdmi_colorspaces)); + if (!prop) + return -ENOMEM; + } else { + DRM_DEBUG_KMS("Colorspace property not supported\n"); + return 0; + } + + connector->colorspace_property = prop; + + return 0; +} +EXPORT_SYMBOL(drm_mode_create_colorspace_property); + +/** * drm_mode_create_content_type_property - create content type property * @dev: DRM device * @@ -1634,6 +1716,8 @@ EXPORT_SYMBOL(drm_connector_set_path_property); * This looks up the tile information for a connector, and creates a * property for userspace to parse if it exists. The property is of * the form of 8 integers using ':' as a separator. + * This is used for dual port tiled displays with DisplayPort SST + * or DisplayPort MST connectors. * * Returns: * Zero on success, errno on failure. @@ -1677,6 +1761,9 @@ EXPORT_SYMBOL(drm_connector_set_tile_property); * * This function creates a new blob modeset object and assigns its id to the * connector's edid property. + * Since we also parse tile information from EDID's displayID block, we also + * set the connector's tile property here. See drm_connector_set_tile_property() + * for more details. * * Returns: * Zero on success, negative errno on failure. @@ -1718,7 +1805,9 @@ int drm_connector_update_edid_property(struct drm_connector *connector, edid, &connector->base, dev->mode_config.edid_property); - return ret; + if (ret) + return ret; + return drm_connector_set_tile_property(connector); } EXPORT_SYMBOL(drm_connector_update_edid_property); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index dc7ac0c60547..c630ed157994 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -3022,7 +3022,6 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ edid = drm_edid_duplicate(port->cached_edid); else { edid = drm_get_edid(connector, &port->aux.ddc); - drm_connector_set_tile_property(connector); } port->has_audio = drm_detect_monitor_audio(edid); drm_dp_mst_topology_put_port(port); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 381581b01d48..50d849d1bc6e 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -286,6 +286,138 @@ void drm_minor_release(struct drm_minor *minor) * Note that the lifetime rules for &drm_device instance has still a lot of * historical baggage. Hence use the reference counting provided by * drm_dev_get() and drm_dev_put() only carefully. + * + * Display driver example + * ~~~~~~~~~~~~~~~~~~~~~~ + * + * The following example shows a typical structure of a DRM display driver. + * The example focus on the probe() function and the other functions that is + * almost always present and serves as a demonstration of devm_drm_dev_init() + * usage with its accompanying drm_driver->release callback. + * + * .. code-block:: c + * + * struct driver_device { + * struct drm_device drm; + * void *userspace_facing; + * struct clk *pclk; + * }; + * + * static void driver_drm_release(struct drm_device *drm) + * { + * struct driver_device *priv = container_of(...); + * + * drm_mode_config_cleanup(drm); + * drm_dev_fini(drm); + * kfree(priv->userspace_facing); + * kfree(priv); + * } + * + * static struct drm_driver driver_drm_driver = { + * [...] + * .release = driver_drm_release, + * }; + * + * static int driver_probe(struct platform_device *pdev) + * { + * struct driver_device *priv; + * struct drm_device *drm; + * int ret; + * + * [ + * devm_kzalloc() can't be used here because the drm_device + * lifetime can exceed the device lifetime if driver unbind + * happens when userspace still has open file descriptors. + * ] + * priv = kzalloc(sizeof(*priv), GFP_KERNEL); + * if (!priv) + * return -ENOMEM; + * + * drm = &priv->drm; + * + * ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver); + * if (ret) { + * kfree(drm); + * return ret; + * } + * + * drm_mode_config_init(drm); + * + * priv->userspace_facing = kzalloc(..., GFP_KERNEL); + * if (!priv->userspace_facing) + * return -ENOMEM; + * + * priv->pclk = devm_clk_get(dev, "PCLK"); + * if (IS_ERR(priv->pclk)) + * return PTR_ERR(priv->pclk); + * + * [ Further setup, display pipeline etc ] + * + * platform_set_drvdata(pdev, drm); + * + * drm_mode_config_reset(drm); + * + * ret = drm_dev_register(drm); + * if (ret) + * return ret; + * + * drm_fbdev_generic_setup(drm, 32); + * + * return 0; + * } + * + * [ This function is called before the devm_ resources are released ] + * static int driver_remove(struct platform_device *pdev) + * { + * struct drm_device *drm = platform_get_drvdata(pdev); + * + * drm_dev_unregister(drm); + * drm_atomic_helper_shutdown(drm) + * + * return 0; + * } + * + * [ This function is called on kernel restart and shutdown ] + * static void driver_shutdown(struct platform_device *pdev) + * { + * drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); + * } + * + * static int __maybe_unused driver_pm_suspend(struct device *dev) + * { + * return drm_mode_config_helper_suspend(dev_get_drvdata(dev)); + * } + * + * static int __maybe_unused driver_pm_resume(struct device *dev) + * { + * drm_mode_config_helper_resume(dev_get_drvdata(dev)); + * + * return 0; + * } + * + * static const struct dev_pm_ops driver_pm_ops = { + * SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume) + * }; + * + * static struct platform_driver driver_driver = { + * .driver = { + * [...] + * .pm = &driver_pm_ops, + * }, + * .probe = driver_probe, + * .remove = driver_remove, + * .shutdown = driver_shutdown, + * }; + * module_platform_driver(driver_driver); + * + * Drivers that want to support device unplugging (USB, DT overlay unload) should + * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect + * regions that is accessing device resources to prevent use after they're + * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one + * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before + * drm_atomic_helper_shutdown() is called. This means that if the disable code + * paths are protected, they will not run on regular driver module unload, + * possibily leaving the hardware enabled. */ /** @@ -376,11 +508,6 @@ void drm_dev_unplug(struct drm_device *dev) synchronize_srcu(&drm_unplug_srcu); drm_dev_unregister(dev); - - mutex_lock(&drm_global_mutex); - if (dev->open_count == 0) - drm_dev_put(dev); - mutex_unlock(&drm_global_mutex); } EXPORT_SYMBOL(drm_dev_unplug); @@ -457,6 +584,31 @@ static void drm_fs_inode_free(struct inode *inode) } /** + * DOC: component helper usage recommendations + * + * DRM drivers that drive hardware where a logical device consists of a pile of + * independent hardware blocks are recommended to use the :ref:`component helper + * library<component>`. For consistency and better options for code reuse the + * following guidelines apply: + * + * - The entire device initialization procedure should be run from the + * &component_master_ops.master_bind callback, starting with drm_dev_init(), + * then binding all components with component_bind_all() and finishing with + * drm_dev_register(). + * + * - The opaque pointer passed to all components through component_bind_all() + * should point at &struct drm_device of the device instance, not some driver + * specific private structure. + * + * - The component helper fills the niche where further standardization of + * interfaces is not practical. When there already is, or will be, a + * standardized interface like &drm_bridge or &drm_panel, providing its own + * functions to find such components at driver load time, like + * drm_of_find_panel_or_bridge(), then the component helper should not be + * used. + */ + +/** * drm_dev_init - Initialise new DRM device * @dev: DRM device * @driver: DRM driver @@ -501,7 +653,7 @@ int drm_dev_init(struct drm_device *dev, BUG_ON(!parent); kref_init(&dev->ref); - dev->dev = parent; + dev->dev = get_device(parent); dev->driver = driver; /* no per-device feature limits by default */ @@ -571,6 +723,7 @@ err_minors: drm_minor_free(dev, DRM_MINOR_RENDER); drm_fs_inode_free(dev->anon_inode); err_free: + put_device(dev->dev); mutex_destroy(&dev->master_mutex); mutex_destroy(&dev->ctxlist_mutex); mutex_destroy(&dev->clientlist_mutex); @@ -580,6 +733,45 @@ err_free: } EXPORT_SYMBOL(drm_dev_init); +static void devm_drm_dev_init_release(void *data) +{ + drm_dev_put(data); +} + +/** + * devm_drm_dev_init - Resource managed drm_dev_init() + * @parent: Parent device object + * @dev: DRM device + * @driver: DRM driver + * + * Managed drm_dev_init(). The DRM device initialized with this function is + * automatically put on driver detach using drm_dev_put(). You must supply a + * &drm_driver.release callback to control the finalization explicitly. + * + * RETURNS: + * 0 on success, or error code on failure. + */ +int devm_drm_dev_init(struct device *parent, + struct drm_device *dev, + struct drm_driver *driver) +{ + int ret; + + if (WARN_ON(!parent || !driver->release)) + return -EINVAL; + + ret = drm_dev_init(dev, driver, parent); + if (ret) + return ret; + + ret = devm_add_action(parent, devm_drm_dev_init_release, dev); + if (ret) + devm_drm_dev_init_release(dev); + + return ret; +} +EXPORT_SYMBOL(devm_drm_dev_init); + /** * drm_dev_fini - Finalize a dead DRM device * @dev: DRM device @@ -606,6 +798,8 @@ void drm_dev_fini(struct drm_device *dev) drm_minor_free(dev, DRM_MINOR_PRIMARY); drm_minor_free(dev, DRM_MINOR_RENDER); + put_device(dev->dev); + mutex_destroy(&dev->master_mutex); mutex_destroy(&dev->ctxlist_mutex); mutex_destroy(&dev->clientlist_mutex); diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c index bce99f95c1a3..77f4e5ae4197 100644 --- a/drivers/gpu/drm/drm_dsc.c +++ b/drivers/gpu/drm/drm_dsc.c @@ -11,6 +11,7 @@ #include <linux/init.h> #include <linux/errno.h> #include <linux/byteorder/generic.h> +#include <drm/drm_print.h> #include <drm/drm_dp_helper.h> #include <drm/drm_dsc.h> @@ -31,75 +32,74 @@ /** * drm_dsc_dp_pps_header_init() - Initializes the PPS Header * for DisplayPort as per the DP 1.4 spec. - * @pps_sdp: Secondary data packet for DSC Picture Parameter Set - * as defined in &struct drm_dsc_pps_infoframe + * @pps_header: Secondary data packet header for DSC Picture + * Parameter Set as defined in &struct dp_sdp_header * * DP 1.4 spec defines the secondary data packet for sending the * picture parameter infoframes from the source to the sink. - * This function populates the pps header defined in - * &struct drm_dsc_pps_infoframe as per the header bytes defined - * in &struct dp_sdp_header. + * This function populates the SDP header defined in + * &struct dp_sdp_header. */ -void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp) +void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header) { - memset(&pps_sdp->pps_header, 0, sizeof(pps_sdp->pps_header)); + memset(pps_header, 0, sizeof(*pps_header)); - pps_sdp->pps_header.HB1 = DP_SDP_PPS; - pps_sdp->pps_header.HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1; + pps_header->HB1 = DP_SDP_PPS; + pps_header->HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1; } EXPORT_SYMBOL(drm_dsc_dp_pps_header_init); /** - * drm_dsc_pps_infoframe_pack() - Populates the DSC PPS infoframe + * drm_dsc_pps_payload_pack() - Populates the DSC PPS * - * @pps_sdp: - * Secondary data packet for DSC Picture Parameter Set. This is defined - * by &struct drm_dsc_pps_infoframe + * @pps_payload: + * Bitwise struct for DSC Picture Parameter Set. This is defined + * by &struct drm_dsc_picture_parameter_set * @dsc_cfg: * DSC Configuration data filled by driver as defined by * &struct drm_dsc_config * - * DSC source device sends a secondary data packet filled with all the - * picture parameter set (PPS) information required by the sink to decode - * the compressed frame. Driver populates the dsC PPS infoframe using the DSC - * configuration parameters in the order expected by the DSC Display Sink - * device. For the DSC, the sink device expects the PPS payload in the big - * endian format for the fields that span more than 1 byte. + * DSC source device sends a picture parameter set (PPS) containing the + * information required by the sink to decode the compressed frame. Driver + * populates the DSC PPS struct using the DSC configuration parameters in + * the order expected by the DSC Display Sink device. For the DSC, the sink + * device expects the PPS payload in big endian format for fields + * that span more than 1 byte. */ -void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp, +void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload, const struct drm_dsc_config *dsc_cfg) { int i; /* Protect against someone accidently changing struct size */ - BUILD_BUG_ON(sizeof(pps_sdp->pps_payload) != + BUILD_BUG_ON(sizeof(*pps_payload) != DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 + 1); - memset(&pps_sdp->pps_payload, 0, sizeof(pps_sdp->pps_payload)); + memset(pps_payload, 0, sizeof(*pps_payload)); /* PPS 0 */ - pps_sdp->pps_payload.dsc_version = + pps_payload->dsc_version = dsc_cfg->dsc_version_minor | dsc_cfg->dsc_version_major << DSC_PPS_VERSION_MAJOR_SHIFT; /* PPS 1, 2 is 0 */ /* PPS 3 */ - pps_sdp->pps_payload.pps_3 = + pps_payload->pps_3 = dsc_cfg->line_buf_depth | dsc_cfg->bits_per_component << DSC_PPS_BPC_SHIFT; /* PPS 4 */ - pps_sdp->pps_payload.pps_4 = + pps_payload->pps_4 = ((dsc_cfg->bits_per_pixel & DSC_PPS_BPP_HIGH_MASK) >> DSC_PPS_MSB_SHIFT) | dsc_cfg->vbr_enable << DSC_PPS_VBR_EN_SHIFT | - dsc_cfg->enable422 << DSC_PPS_SIMPLE422_SHIFT | + dsc_cfg->simple_422 << DSC_PPS_SIMPLE422_SHIFT | dsc_cfg->convert_rgb << DSC_PPS_CONVERT_RGB_SHIFT | dsc_cfg->block_pred_enable << DSC_PPS_BLOCK_PRED_EN_SHIFT; /* PPS 5 */ - pps_sdp->pps_payload.bits_per_pixel_low = + pps_payload->bits_per_pixel_low = (dsc_cfg->bits_per_pixel & DSC_PPS_LSB_MASK); /* @@ -110,103 +110,103 @@ void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp, */ /* PPS 6, 7 */ - pps_sdp->pps_payload.pic_height = cpu_to_be16(dsc_cfg->pic_height); + pps_payload->pic_height = cpu_to_be16(dsc_cfg->pic_height); /* PPS 8, 9 */ - pps_sdp->pps_payload.pic_width = cpu_to_be16(dsc_cfg->pic_width); + pps_payload->pic_width = cpu_to_be16(dsc_cfg->pic_width); /* PPS 10, 11 */ - pps_sdp->pps_payload.slice_height = cpu_to_be16(dsc_cfg->slice_height); + pps_payload->slice_height = cpu_to_be16(dsc_cfg->slice_height); /* PPS 12, 13 */ - pps_sdp->pps_payload.slice_width = cpu_to_be16(dsc_cfg->slice_width); + pps_payload->slice_width = cpu_to_be16(dsc_cfg->slice_width); /* PPS 14, 15 */ - pps_sdp->pps_payload.chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size); + pps_payload->chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size); /* PPS 16 */ - pps_sdp->pps_payload.initial_xmit_delay_high = + pps_payload->initial_xmit_delay_high = ((dsc_cfg->initial_xmit_delay & DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK) >> DSC_PPS_MSB_SHIFT); /* PPS 17 */ - pps_sdp->pps_payload.initial_xmit_delay_low = + pps_payload->initial_xmit_delay_low = (dsc_cfg->initial_xmit_delay & DSC_PPS_LSB_MASK); /* PPS 18, 19 */ - pps_sdp->pps_payload.initial_dec_delay = + pps_payload->initial_dec_delay = cpu_to_be16(dsc_cfg->initial_dec_delay); /* PPS 20 is 0 */ /* PPS 21 */ - pps_sdp->pps_payload.initial_scale_value = + pps_payload->initial_scale_value = dsc_cfg->initial_scale_value; /* PPS 22, 23 */ - pps_sdp->pps_payload.scale_increment_interval = + pps_payload->scale_increment_interval = cpu_to_be16(dsc_cfg->scale_increment_interval); /* PPS 24 */ - pps_sdp->pps_payload.scale_decrement_interval_high = + pps_payload->scale_decrement_interval_high = ((dsc_cfg->scale_decrement_interval & DSC_PPS_SCALE_DEC_INT_HIGH_MASK) >> DSC_PPS_MSB_SHIFT); /* PPS 25 */ - pps_sdp->pps_payload.scale_decrement_interval_low = + pps_payload->scale_decrement_interval_low = (dsc_cfg->scale_decrement_interval & DSC_PPS_LSB_MASK); /* PPS 26[7:0], PPS 27[7:5] RESERVED */ /* PPS 27 */ - pps_sdp->pps_payload.first_line_bpg_offset = + pps_payload->first_line_bpg_offset = dsc_cfg->first_line_bpg_offset; /* PPS 28, 29 */ - pps_sdp->pps_payload.nfl_bpg_offset = + pps_payload->nfl_bpg_offset = cpu_to_be16(dsc_cfg->nfl_bpg_offset); /* PPS 30, 31 */ - pps_sdp->pps_payload.slice_bpg_offset = + pps_payload->slice_bpg_offset = cpu_to_be16(dsc_cfg->slice_bpg_offset); /* PPS 32, 33 */ - pps_sdp->pps_payload.initial_offset = + pps_payload->initial_offset = cpu_to_be16(dsc_cfg->initial_offset); /* PPS 34, 35 */ - pps_sdp->pps_payload.final_offset = cpu_to_be16(dsc_cfg->final_offset); + pps_payload->final_offset = cpu_to_be16(dsc_cfg->final_offset); /* PPS 36 */ - pps_sdp->pps_payload.flatness_min_qp = dsc_cfg->flatness_min_qp; + pps_payload->flatness_min_qp = dsc_cfg->flatness_min_qp; /* PPS 37 */ - pps_sdp->pps_payload.flatness_max_qp = dsc_cfg->flatness_max_qp; + pps_payload->flatness_max_qp = dsc_cfg->flatness_max_qp; /* PPS 38, 39 */ - pps_sdp->pps_payload.rc_model_size = + pps_payload->rc_model_size = cpu_to_be16(DSC_RC_MODEL_SIZE_CONST); /* PPS 40 */ - pps_sdp->pps_payload.rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST; + pps_payload->rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST; /* PPS 41 */ - pps_sdp->pps_payload.rc_quant_incr_limit0 = + pps_payload->rc_quant_incr_limit0 = dsc_cfg->rc_quant_incr_limit0; /* PPS 42 */ - pps_sdp->pps_payload.rc_quant_incr_limit1 = + pps_payload->rc_quant_incr_limit1 = dsc_cfg->rc_quant_incr_limit1; /* PPS 43 */ - pps_sdp->pps_payload.rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST | + pps_payload->rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST | DSC_RC_TGT_OFFSET_HI_CONST << DSC_PPS_RC_TGT_OFFSET_HI_SHIFT; /* PPS 44 - 57 */ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) - pps_sdp->pps_payload.rc_buf_thresh[i] = + pps_payload->rc_buf_thresh[i] = dsc_cfg->rc_buf_thresh[i]; /* PPS 58 - 87 */ @@ -215,32 +215,181 @@ void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp, * are as follows: Min_qp[15:11], max_qp[10:6], offset[5:0] */ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) { - pps_sdp->pps_payload.rc_range_parameters[i] = + pps_payload->rc_range_parameters[i] = ((dsc_cfg->rc_range_params[i].range_min_qp << DSC_PPS_RC_RANGE_MINQP_SHIFT) | (dsc_cfg->rc_range_params[i].range_max_qp << DSC_PPS_RC_RANGE_MAXQP_SHIFT) | (dsc_cfg->rc_range_params[i].range_bpg_offset)); - pps_sdp->pps_payload.rc_range_parameters[i] = - cpu_to_be16(pps_sdp->pps_payload.rc_range_parameters[i]); + pps_payload->rc_range_parameters[i] = + cpu_to_be16(pps_payload->rc_range_parameters[i]); } /* PPS 88 */ - pps_sdp->pps_payload.native_422_420 = dsc_cfg->native_422 | + pps_payload->native_422_420 = dsc_cfg->native_422 | dsc_cfg->native_420 << DSC_PPS_NATIVE_420_SHIFT; /* PPS 89 */ - pps_sdp->pps_payload.second_line_bpg_offset = + pps_payload->second_line_bpg_offset = dsc_cfg->second_line_bpg_offset; /* PPS 90, 91 */ - pps_sdp->pps_payload.nsl_bpg_offset = + pps_payload->nsl_bpg_offset = cpu_to_be16(dsc_cfg->nsl_bpg_offset); /* PPS 92, 93 */ - pps_sdp->pps_payload.second_line_offset_adj = + pps_payload->second_line_offset_adj = cpu_to_be16(dsc_cfg->second_line_offset_adj); /* PPS 94 - 127 are O */ } -EXPORT_SYMBOL(drm_dsc_pps_infoframe_pack); +EXPORT_SYMBOL(drm_dsc_pps_payload_pack); + +/** + * drm_dsc_compute_rc_parameters() - Write rate control + * parameters to the dsc configuration defined in + * &struct drm_dsc_config in accordance with the DSC 1.2 + * specification. Some configuration fields must be present + * beforehand. + * + * @vdsc_cfg: + * DSC Configuration data partially filled by driver + */ +int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg) +{ + unsigned long groups_per_line = 0; + unsigned long groups_total = 0; + unsigned long num_extra_mux_bits = 0; + unsigned long slice_bits = 0; + unsigned long hrd_delay = 0; + unsigned long final_scale = 0; + unsigned long rbs_min = 0; + + if (vdsc_cfg->native_420 || vdsc_cfg->native_422) { + /* Number of groups used to code each line of a slice */ + groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width / 2, + DSC_RC_PIXELS_PER_GROUP); + + /* chunksize in Bytes */ + vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width / 2 * + vdsc_cfg->bits_per_pixel, + (8 * 16)); + } else { + /* Number of groups used to code each line of a slice */ + groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width, + DSC_RC_PIXELS_PER_GROUP); + + /* chunksize in Bytes */ + vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width * + vdsc_cfg->bits_per_pixel, + (8 * 16)); + } + + if (vdsc_cfg->convert_rgb) + num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size + + (4 * vdsc_cfg->bits_per_component + 4) + - 2); + else if (vdsc_cfg->native_422) + num_extra_mux_bits = 4 * vdsc_cfg->mux_word_size + + (4 * vdsc_cfg->bits_per_component + 4) + + 3 * (4 * vdsc_cfg->bits_per_component) - 2; + else + num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size + + (4 * vdsc_cfg->bits_per_component + 4) + + 2 * (4 * vdsc_cfg->bits_per_component) - 2; + /* Number of bits in one Slice */ + slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height; + + while ((num_extra_mux_bits > 0) && + ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size)) + num_extra_mux_bits--; + + if (groups_per_line < vdsc_cfg->initial_scale_value - 8) + vdsc_cfg->initial_scale_value = groups_per_line + 8; + + /* scale_decrement_interval calculation according to DSC spec 1.11 */ + if (vdsc_cfg->initial_scale_value > 8) + vdsc_cfg->scale_decrement_interval = groups_per_line / + (vdsc_cfg->initial_scale_value - 8); + else + vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX; + + vdsc_cfg->final_offset = vdsc_cfg->rc_model_size - + (vdsc_cfg->initial_xmit_delay * + vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits; + + if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) { + DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n"); + return -ERANGE; + } + + final_scale = (vdsc_cfg->rc_model_size * 8) / + (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset); + if (vdsc_cfg->slice_height > 1) + /* + * NflBpgOffset is 16 bit value with 11 fractional bits + * hence we multiply by 2^11 for preserving the + * fractional part + */ + vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11), + (vdsc_cfg->slice_height - 1)); + else + vdsc_cfg->nfl_bpg_offset = 0; + + /* 2^16 - 1 */ + if (vdsc_cfg->nfl_bpg_offset > 65535) { + DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n"); + return -ERANGE; + } + + /* Number of groups used to code the entire slice */ + groups_total = groups_per_line * vdsc_cfg->slice_height; + + /* slice_bpg_offset is 16 bit value with 11 fractional bits */ + vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size - + vdsc_cfg->initial_offset + + num_extra_mux_bits) << 11), + groups_total); + + if (final_scale > 9) { + /* + * ScaleIncrementInterval = + * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125)) + * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value, + * we need divide by 2^11 from pstDscCfg values + */ + vdsc_cfg->scale_increment_interval = + (vdsc_cfg->final_offset * (1 << 11)) / + ((vdsc_cfg->nfl_bpg_offset + + vdsc_cfg->slice_bpg_offset) * + (final_scale - 9)); + } else { + /* + * If finalScaleValue is less than or equal to 9, a value of 0 should + * be used to disable the scale increment at the end of the slice + */ + vdsc_cfg->scale_increment_interval = 0; + } + + if (vdsc_cfg->scale_increment_interval > 65535) { + DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n"); + return -ERANGE; + } + + /* + * DSC spec mentions that bits_per_pixel specifies the target + * bits/pixel (bpp) rate that is used by the encoder, + * in steps of 1/16 of a bit per pixel + */ + rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset + + DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay * + vdsc_cfg->bits_per_pixel, 16) + + groups_per_line * vdsc_cfg->first_line_bpg_offset; + + hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel); + vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16; + vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay; + + return 0; +} +EXPORT_SYMBOL(drm_dsc_compute_rc_parameters); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 990b1909f9d7..fa39592ebc0a 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -193,6 +193,12 @@ static const struct edid_quirk { /* Sony PlayStation VR Headset */ { "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP }, + + /* Sensics VR Headsets */ + { "SEN", 0x1019, EDID_QUIRK_NON_DESKTOP }, + + /* OSVR HDK and HDK2 VR Headsets */ + { "SVR", 0x1019, EDID_QUIRK_NON_DESKTOP }, }; /* @@ -4924,6 +4930,76 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, } EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode); +/* HDMI Colorspace Spec Definitions */ +#define FULL_COLORIMETRY_MASK 0x1FF +#define NORMAL_COLORIMETRY_MASK 0x3 +#define EXTENDED_COLORIMETRY_MASK 0x7 +#define EXTENDED_ACE_COLORIMETRY_MASK 0xF + +#define C(x) ((x) << 0) +#define EC(x) ((x) << 2) +#define ACE(x) ((x) << 5) + +#define HDMI_COLORIMETRY_NO_DATA 0x0 +#define HDMI_COLORIMETRY_SMPTE_170M_YCC (C(1) | EC(0) | ACE(0)) +#define HDMI_COLORIMETRY_BT709_YCC (C(2) | EC(0) | ACE(0)) +#define HDMI_COLORIMETRY_XVYCC_601 (C(3) | EC(0) | ACE(0)) +#define HDMI_COLORIMETRY_XVYCC_709 (C(3) | EC(1) | ACE(0)) +#define HDMI_COLORIMETRY_SYCC_601 (C(3) | EC(2) | ACE(0)) +#define HDMI_COLORIMETRY_OPYCC_601 (C(3) | EC(3) | ACE(0)) +#define HDMI_COLORIMETRY_OPRGB (C(3) | EC(4) | ACE(0)) +#define HDMI_COLORIMETRY_BT2020_CYCC (C(3) | EC(5) | ACE(0)) +#define HDMI_COLORIMETRY_BT2020_RGB (C(3) | EC(6) | ACE(0)) +#define HDMI_COLORIMETRY_BT2020_YCC (C(3) | EC(6) | ACE(0)) +#define HDMI_COLORIMETRY_DCI_P3_RGB_D65 (C(3) | EC(7) | ACE(0)) +#define HDMI_COLORIMETRY_DCI_P3_RGB_THEATER (C(3) | EC(7) | ACE(1)) + +static const u32 hdmi_colorimetry_val[] = { + [DRM_MODE_COLORIMETRY_NO_DATA] = HDMI_COLORIMETRY_NO_DATA, + [DRM_MODE_COLORIMETRY_SMPTE_170M_YCC] = HDMI_COLORIMETRY_SMPTE_170M_YCC, + [DRM_MODE_COLORIMETRY_BT709_YCC] = HDMI_COLORIMETRY_BT709_YCC, + [DRM_MODE_COLORIMETRY_XVYCC_601] = HDMI_COLORIMETRY_XVYCC_601, + [DRM_MODE_COLORIMETRY_XVYCC_709] = HDMI_COLORIMETRY_XVYCC_709, + [DRM_MODE_COLORIMETRY_SYCC_601] = HDMI_COLORIMETRY_SYCC_601, + [DRM_MODE_COLORIMETRY_OPYCC_601] = HDMI_COLORIMETRY_OPYCC_601, + [DRM_MODE_COLORIMETRY_OPRGB] = HDMI_COLORIMETRY_OPRGB, + [DRM_MODE_COLORIMETRY_BT2020_CYCC] = HDMI_COLORIMETRY_BT2020_CYCC, + [DRM_MODE_COLORIMETRY_BT2020_RGB] = HDMI_COLORIMETRY_BT2020_RGB, + [DRM_MODE_COLORIMETRY_BT2020_YCC] = HDMI_COLORIMETRY_BT2020_YCC, +}; + +#undef C +#undef EC +#undef ACE + +/** + * drm_hdmi_avi_infoframe_colorspace() - fill the HDMI AVI infoframe + * colorspace information + * @frame: HDMI AVI infoframe + * @conn_state: connector state + */ +void +drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state) +{ + u32 colorimetry_val; + u32 colorimetry_index = conn_state->colorspace & FULL_COLORIMETRY_MASK; + + if (colorimetry_index >= ARRAY_SIZE(hdmi_colorimetry_val)) + colorimetry_val = HDMI_COLORIMETRY_NO_DATA; + else + colorimetry_val = hdmi_colorimetry_val[colorimetry_index]; + + frame->colorimetry = colorimetry_val & NORMAL_COLORIMETRY_MASK; + /* + * ToDo: Extend it for ACE formats as well. Modify the infoframe + * structure and extend it in drivers/video/hdmi + */ + frame->extended_colorimetry = (colorimetry_val >> 2) & + EXTENDED_COLORIMETRY_MASK; +} +EXPORT_SYMBOL(drm_hdmi_avi_infoframe_colorspace); + /** * drm_hdmi_avi_infoframe_quant_range() - fill the HDMI AVI infoframe * quantization range information diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 0e9349ff2d16..04d23cb430bf 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -3024,7 +3024,8 @@ static int drm_fbdev_fb_open(struct fb_info *info, int user) { struct drm_fb_helper *fb_helper = info->par; - if (!try_module_get(fb_helper->dev->driver->fops->owner)) + /* No need to take a ref for fbcon because it unbinds on unregister */ + if (user && !try_module_get(fb_helper->dev->driver->fops->owner)) return -ENODEV; return 0; @@ -3034,7 +3035,8 @@ static int drm_fbdev_fb_release(struct fb_info *info, int user) { struct drm_fb_helper *fb_helper = info->par; - module_put(fb_helper->dev->driver->fops->owner); + if (user) + module_put(fb_helper->dev->driver->fops->owner); return 0; } diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 83a5bbca6e7e..9701469a6e93 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -489,11 +489,9 @@ int drm_release(struct inode *inode, struct file *filp) drm_close_helper(filp); - if (!--dev->open_count) { + if (!--dev->open_count) drm_lastclose(dev); - if (drm_dev_is_unplugged(dev)) - drm_put_dev(dev); - } + mutex_unlock(&drm_global_mutex); drm_minor_release(minor); @@ -579,6 +577,7 @@ put_back_event: file_priv->event_space -= length; list_add(&e->link, &file_priv->event_list); spin_unlock_irq(&dev->event_lock); + wake_up_interruptible(&file_priv->event_wait); break; } diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index 9163e807579a..6ea55fb4526d 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -229,13 +229,17 @@ const struct drm_format_info *__drm_format_info(u32 format) { .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_XYUV8888, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_VUY888, .depth = 0, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, { .format = DRM_FORMAT_Y210, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_Y212, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_Y216, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true }, - { .format = DRM_FORMAT_Y410, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, - { .format = DRM_FORMAT_Y412, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, - { .format = DRM_FORMAT_Y416, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_Y410, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, + { .format = DRM_FORMAT_Y412, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, + { .format = DRM_FORMAT_Y416, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true }, + { .format = DRM_FORMAT_XVYU2101010, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_XVYU12_16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_XVYU16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_Y0L0, .depth = 0, .num_planes = 1, .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 }, .hsub = 2, .vsub = 2, .has_alpha = true, .is_yuv = true }, @@ -257,6 +261,19 @@ const struct drm_format_info *__drm_format_info(u32 format) { .format = DRM_FORMAT_P016, .depth = 0, .num_planes = 2, .char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true}, + { .format = DRM_FORMAT_P210, .depth = 0, + .num_planes = 2, .char_per_block = { 2, 4, 0 }, + .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 }, .hsub = 2, + .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_VUY101010, .depth = 0, + .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 1, .vsub = 1, + .is_yuv = true }, + { .format = DRM_FORMAT_YUV420_8BIT, .depth = 0, + .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2, + .is_yuv = true }, + { .format = DRM_FORMAT_YUV420_10BIT, .depth = 0, + .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2, + .is_yuv = true }, }; unsigned int i; diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index d0b9f6a9953f..388b3742e562 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -171,6 +171,10 @@ void drm_gem_private_object_init(struct drm_device *dev, kref_init(&obj->refcount); obj->handle_count = 0; obj->size = size; + reservation_object_init(&obj->_resv); + if (!obj->resv) + obj->resv = &obj->_resv; + drm_vma_node_reset(&obj->vma_node); } EXPORT_SYMBOL(drm_gem_private_object_init); @@ -688,6 +692,44 @@ drm_gem_object_lookup(struct drm_file *filp, u32 handle) EXPORT_SYMBOL(drm_gem_object_lookup); /** + * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects + * shared and/or exclusive fences. + * @filep: DRM file private date + * @handle: userspace handle + * @wait_all: if true, wait on all fences, else wait on just exclusive fence + * @timeout: timeout value in jiffies or zero to return immediately + * + * Returns: + * + * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or + * greater than 0 on success. + */ +long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle, + bool wait_all, unsigned long timeout) +{ + long ret; + struct drm_gem_object *obj; + + obj = drm_gem_object_lookup(filep, handle); + if (!obj) { + DRM_DEBUG("Failed to look up GEM BO %d\n", handle); + return -EINVAL; + } + + ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all, + true, timeout); + if (ret == 0) + ret = -ETIME; + else if (ret > 0) + ret = 0; + + drm_gem_object_put_unlocked(obj); + + return ret; +} +EXPORT_SYMBOL(drm_gem_reservation_object_wait); + +/** * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl * @dev: drm_device * @data: ioctl data @@ -851,6 +893,7 @@ drm_gem_object_release(struct drm_gem_object *obj) if (obj->filp) fput(obj->filp); + reservation_object_fini(&obj->_resv); drm_gem_free_mmap_offset(obj); } EXPORT_SYMBOL(drm_gem_object_release); @@ -1190,3 +1233,81 @@ void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr) obj->dev->driver->gem_prime_vunmap(obj, vaddr); } EXPORT_SYMBOL(drm_gem_vunmap); + +/** + * drm_gem_lock_reservations - Sets up the ww context and acquires + * the lock on an array of GEM objects. + * + * Once you've locked your reservations, you'll want to set up space + * for your shared fences (if applicable), submit your job, then + * drm_gem_unlock_reservations(). + * + * @objs: drm_gem_objects to lock + * @count: Number of objects in @objs + * @acquire_ctx: struct ww_acquire_ctx that will be initialized as + * part of tracking this set of locked reservations. + */ +int +drm_gem_lock_reservations(struct drm_gem_object **objs, int count, + struct ww_acquire_ctx *acquire_ctx) +{ + int contended = -1; + int i, ret; + + ww_acquire_init(acquire_ctx, &reservation_ww_class); + +retry: + if (contended != -1) { + struct drm_gem_object *obj = objs[contended]; + + ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock, + acquire_ctx); + if (ret) { + ww_acquire_done(acquire_ctx); + return ret; + } + } + + for (i = 0; i < count; i++) { + if (i == contended) + continue; + + ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock, + acquire_ctx); + if (ret) { + int j; + + for (j = 0; j < i; j++) + ww_mutex_unlock(&objs[j]->resv->lock); + + if (contended != -1 && contended >= i) + ww_mutex_unlock(&objs[contended]->resv->lock); + + if (ret == -EDEADLK) { + contended = i; + goto retry; + } + + ww_acquire_done(acquire_ctx); + return ret; + } + } + + ww_acquire_done(acquire_ctx); + + return 0; +} +EXPORT_SYMBOL(drm_gem_lock_reservations); + +void +drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, + struct ww_acquire_ctx *acquire_ctx) +{ + int i; + + for (i = 0; i < count; i++) + ww_mutex_unlock(&objs[i]->resv->lock); + + ww_acquire_fini(acquire_ctx); +} +EXPORT_SYMBOL(drm_gem_unlock_reservations); diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c new file mode 100644 index 000000000000..3750a982aaf6 --- /dev/null +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -0,0 +1,625 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2018 Noralf Trønnes + */ + +#include <linux/dma-buf.h> +#include <linux/export.h> +#include <linux/mutex.h> +#include <linux/shmem_fs.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> + +#include <drm/drm_device.h> +#include <drm/drm_drv.h> +#include <drm/drm_gem_shmem_helper.h> +#include <drm/drm_prime.h> +#include <drm/drm_print.h> + +/** + * DOC: overview + * + * This library provides helpers for GEM objects backed by shmem buffers + * allocated using anonymous pageable memory. + */ + +static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { + .free = drm_gem_shmem_free_object, + .print_info = drm_gem_shmem_print_info, + .pin = drm_gem_shmem_pin, + .unpin = drm_gem_shmem_unpin, + .get_sg_table = drm_gem_shmem_get_sg_table, + .vmap = drm_gem_shmem_vmap, + .vunmap = drm_gem_shmem_vunmap, + .vm_ops = &drm_gem_shmem_vm_ops, +}; + +/** + * drm_gem_shmem_create - Allocate an object with the given size + * @dev: DRM device + * @size: Size of the object to allocate + * + * This function creates a shmem GEM object. + * + * Returns: + * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative + * error code on failure. + */ +struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size) +{ + struct drm_gem_shmem_object *shmem; + struct drm_gem_object *obj; + int ret; + + size = PAGE_ALIGN(size); + + if (dev->driver->gem_create_object) + obj = dev->driver->gem_create_object(dev, size); + else + obj = kzalloc(sizeof(*shmem), GFP_KERNEL); + if (!obj) + return ERR_PTR(-ENOMEM); + + if (!obj->funcs) + obj->funcs = &drm_gem_shmem_funcs; + + ret = drm_gem_object_init(dev, obj, size); + if (ret) + goto err_free; + + ret = drm_gem_create_mmap_offset(obj); + if (ret) + goto err_release; + + shmem = to_drm_gem_shmem_obj(obj); + mutex_init(&shmem->pages_lock); + mutex_init(&shmem->vmap_lock); + + /* + * Our buffers are kept pinned, so allocating them + * from the MOVABLE zone is a really bad idea, and + * conflicts with CMA. See comments above new_inode() + * why this is required _and_ expected if you're + * going to pin these pages. + */ + mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | + __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + + return shmem; + +err_release: + drm_gem_object_release(obj); +err_free: + kfree(obj); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_create); + +/** + * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object + * @obj: GEM object to free + * + * This function cleans up the GEM object state and frees the memory used to + * store the object itself. + */ +void drm_gem_shmem_free_object(struct drm_gem_object *obj) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + WARN_ON(shmem->vmap_use_count); + + if (obj->import_attach) { + shmem->pages_use_count--; + drm_prime_gem_destroy(obj, shmem->sgt); + kvfree(shmem->pages); + } else { + if (shmem->sgt) { + dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, + shmem->sgt->nents, DMA_BIDIRECTIONAL); + + drm_gem_shmem_put_pages(shmem); + sg_free_table(shmem->sgt); + kfree(shmem->sgt); + } + } + + WARN_ON(shmem->pages_use_count); + + drm_gem_object_release(obj); + mutex_destroy(&shmem->pages_lock); + mutex_destroy(&shmem->vmap_lock); + kfree(shmem); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object); + +static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) +{ + struct drm_gem_object *obj = &shmem->base; + struct page **pages; + + if (shmem->pages_use_count++ > 0) + return 0; + + pages = drm_gem_get_pages(obj); + if (IS_ERR(pages)) { + DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages)); + shmem->pages_use_count = 0; + return PTR_ERR(pages); + } + + shmem->pages = pages; + + return 0; +} + +/* + * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object + * @shmem: shmem GEM object + * + * This function makes sure that backing pages exists for the shmem GEM object + * and increases the use count. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) +{ + int ret; + + ret = mutex_lock_interruptible(&shmem->pages_lock); + if (ret) + return ret; + ret = drm_gem_shmem_get_pages_locked(shmem); + mutex_unlock(&shmem->pages_lock); + + return ret; +} +EXPORT_SYMBOL(drm_gem_shmem_get_pages); + +static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) +{ + struct drm_gem_object *obj = &shmem->base; + + if (WARN_ON_ONCE(!shmem->pages_use_count)) + return; + + if (--shmem->pages_use_count > 0) + return; + + drm_gem_put_pages(obj, shmem->pages, + shmem->pages_mark_dirty_on_put, + shmem->pages_mark_accessed_on_put); + shmem->pages = NULL; +} + +/* + * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object + * @shmem: shmem GEM object + * + * This function decreases the use count and puts the backing pages when use drops to zero. + */ +void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) +{ + mutex_lock(&shmem->pages_lock); + drm_gem_shmem_put_pages_locked(shmem); + mutex_unlock(&shmem->pages_lock); +} +EXPORT_SYMBOL(drm_gem_shmem_put_pages); + +/** + * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object + * @obj: GEM object + * + * This function makes sure the backing pages are pinned in memory while the + * buffer is exported. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +int drm_gem_shmem_pin(struct drm_gem_object *obj) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + return drm_gem_shmem_get_pages(shmem); +} +EXPORT_SYMBOL(drm_gem_shmem_pin); + +/** + * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object + * @obj: GEM object + * + * This function removes the requirement that the backing pages are pinned in + * memory. + */ +void drm_gem_shmem_unpin(struct drm_gem_object *obj) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + drm_gem_shmem_put_pages(shmem); +} +EXPORT_SYMBOL(drm_gem_shmem_unpin); + +static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) +{ + struct drm_gem_object *obj = &shmem->base; + int ret; + + if (shmem->vmap_use_count++ > 0) + return shmem->vaddr; + + ret = drm_gem_shmem_get_pages(shmem); + if (ret) + goto err_zero_use; + + if (obj->import_attach) + shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf); + else + shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, PAGE_KERNEL); + + if (!shmem->vaddr) { + DRM_DEBUG_KMS("Failed to vmap pages\n"); + ret = -ENOMEM; + goto err_put_pages; + } + + return shmem->vaddr; + +err_put_pages: + drm_gem_shmem_put_pages(shmem); +err_zero_use: + shmem->vmap_use_count = 0; + + return ERR_PTR(ret); +} + +/* + * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object + * @shmem: shmem GEM object + * + * This function makes sure that a virtual address exists for the buffer backing + * the shmem GEM object. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +void *drm_gem_shmem_vmap(struct drm_gem_object *obj) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + void *vaddr; + int ret; + + ret = mutex_lock_interruptible(&shmem->vmap_lock); + if (ret) + return ERR_PTR(ret); + vaddr = drm_gem_shmem_vmap_locked(shmem); + mutex_unlock(&shmem->vmap_lock); + + return vaddr; +} +EXPORT_SYMBOL(drm_gem_shmem_vmap); + +static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) +{ + struct drm_gem_object *obj = &shmem->base; + + if (WARN_ON_ONCE(!shmem->vmap_use_count)) + return; + + if (--shmem->vmap_use_count > 0) + return; + + if (obj->import_attach) + dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr); + else + vunmap(shmem->vaddr); + + shmem->vaddr = NULL; + drm_gem_shmem_put_pages(shmem); +} + +/* + * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object + * @shmem: shmem GEM object + * + * This function removes the virtual address when use count drops to zero. + */ +void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + mutex_lock(&shmem->vmap_lock); + drm_gem_shmem_vunmap_locked(shmem); + mutex_unlock(&shmem->vmap_lock); +} +EXPORT_SYMBOL(drm_gem_shmem_vunmap); + +struct drm_gem_shmem_object * +drm_gem_shmem_create_with_handle(struct drm_file *file_priv, + struct drm_device *dev, size_t size, + uint32_t *handle) +{ + struct drm_gem_shmem_object *shmem; + int ret; + + shmem = drm_gem_shmem_create(dev, size); + if (IS_ERR(shmem)) + return shmem; + + /* + * Allocate an id of idr table where the obj is registered + * and handle has the id what user can see. + */ + ret = drm_gem_handle_create(file_priv, &shmem->base, handle); + /* drop reference from allocate - handle holds it now. */ + drm_gem_object_put_unlocked(&shmem->base); + if (ret) + return ERR_PTR(ret); + + return shmem; +} +EXPORT_SYMBOL(drm_gem_shmem_create_with_handle); + +/** + * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object + * @file: DRM file structure to create the dumb buffer for + * @dev: DRM device + * @args: IOCTL data + * + * This function computes the pitch of the dumb buffer and rounds it up to an + * integer number of bytes per pixel. Drivers for hardware that doesn't have + * any additional restrictions on the pitch can directly use this function as + * their &drm_driver.dumb_create callback. + * + * For hardware with additional restrictions, drivers can adjust the fields + * set up by userspace before calling into this function. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + struct drm_gem_shmem_object *shmem; + + if (!args->pitch || !args->size) { + args->pitch = min_pitch; + args->size = args->pitch * args->height; + } else { + /* ensure sane minimum values */ + if (args->pitch < min_pitch) + args->pitch = min_pitch; + if (args->size < args->pitch * args->height) + args->size = args->pitch * args->height; + } + + shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); + + return PTR_ERR_OR_ZERO(shmem); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create); + +static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct drm_gem_object *obj = vma->vm_private_data; + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + loff_t num_pages = obj->size >> PAGE_SHIFT; + struct page *page; + + if (vmf->pgoff > num_pages || WARN_ON_ONCE(!shmem->pages)) + return VM_FAULT_SIGBUS; + + page = shmem->pages[vmf->pgoff]; + + return vmf_insert_page(vma, vmf->address, page); +} + +static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) +{ + struct drm_gem_object *obj = vma->vm_private_data; + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + int ret; + + ret = drm_gem_shmem_get_pages(shmem); + WARN_ON_ONCE(ret != 0); + + drm_gem_vm_open(vma); +} + +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) +{ + struct drm_gem_object *obj = vma->vm_private_data; + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + drm_gem_shmem_put_pages(shmem); + drm_gem_vm_close(vma); +} + +const struct vm_operations_struct drm_gem_shmem_vm_ops = { + .fault = drm_gem_shmem_fault, + .open = drm_gem_shmem_vm_open, + .close = drm_gem_shmem_vm_close, +}; +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops); + +/** + * drm_gem_shmem_mmap - Memory-map a shmem GEM object + * @filp: File object + * @vma: VMA for the area to be mapped + * + * This function implements an augmented version of the GEM DRM file mmap + * operation for shmem objects. Drivers which employ the shmem helpers should + * use this function as their &file_operations.mmap handler in the DRM device file's + * file_operations structure. + * + * Instead of directly referencing this function, drivers should use the + * DEFINE_DRM_GEM_SHMEM_FOPS() macro. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_gem_shmem_object *shmem; + int ret; + + ret = drm_gem_mmap(filp, vma); + if (ret) + return ret; + + shmem = to_drm_gem_shmem_obj(vma->vm_private_data); + + ret = drm_gem_shmem_get_pages(shmem); + if (ret) { + drm_gem_vm_close(vma); + return ret; + } + + /* VM_PFNMAP was set by drm_gem_mmap() */ + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags |= VM_MIXEDMAP; + + /* Remove the fake offset */ + vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node); + + return 0; +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); + +/** + * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs + * @p: DRM printer + * @indent: Tab indentation level + * @obj: GEM object + */ +void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj) +{ + const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); + drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); + drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); +} +EXPORT_SYMBOL(drm_gem_shmem_print_info); + +/** + * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned + * pages for a shmem GEM object + * @obj: GEM object + * + * This function exports a scatter/gather table suitable for PRIME usage by + * calling the standard DMA mapping API. + * + * Returns: + * A pointer to the scatter/gather table of pinned pages or NULL on failure. + */ +struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + + return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); + +/** + * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a + * scatter/gather table for a shmem GEM object. + * @obj: GEM object + * + * This function returns a scatter/gather table suitable for driver usage. If + * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg + * table created. + * + * Returns: + * A pointer to the scatter/gather table of pinned pages or errno on failure. + */ +struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj) +{ + int ret; + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + struct sg_table *sgt; + + if (shmem->sgt) + return shmem->sgt; + + WARN_ON(obj->import_attach); + + ret = drm_gem_shmem_get_pages(shmem); + if (ret) + return ERR_PTR(ret); + + sgt = drm_gem_shmem_get_sg_table(&shmem->base); + if (IS_ERR(sgt)) { + ret = PTR_ERR(sgt); + goto err_put_pages; + } + /* Map the pages for use by the h/w. */ + dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); + + shmem->sgt = sgt; + + return sgt; + +err_put_pages: + drm_gem_shmem_put_pages(shmem); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt); + +/** + * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from + * another driver's scatter/gather table of pinned pages + * @dev: Device to import into + * @attach: DMA-BUF attachment + * @sgt: Scatter/gather table of pinned pages + * + * This function imports a scatter/gather table exported via DMA-BUF by + * another driver. Drivers that use the shmem helpers should set this as their + * &drm_driver.gem_prime_import_sg_table callback. + * + * Returns: + * A pointer to a newly created GEM object or an ERR_PTR-encoded negative + * error code on failure. + */ +struct drm_gem_object * +drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) +{ + size_t size = PAGE_ALIGN(attach->dmabuf->size); + size_t npages = size >> PAGE_SHIFT; + struct drm_gem_shmem_object *shmem; + int ret; + + shmem = drm_gem_shmem_create(dev, size); + if (IS_ERR(shmem)) + return ERR_CAST(shmem); + + shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); + if (!shmem->pages) { + ret = -ENOMEM; + goto err_free_gem; + } + + ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages); + if (ret < 0) + goto err_free_array; + + shmem->sgt = sgt; + shmem->pages_use_count = 1; /* Permanently pinned from our point of view */ + + DRM_DEBUG_PRIME("size = %zu\n", size); + + return &shmem->base; + +err_free_array: + kvfree(shmem->pages); +err_free_gem: + drm_gem_object_put_unlocked(&shmem->base); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table); diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index 67b1fca39aa6..0e3043e08c69 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -185,7 +185,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd, m32.size = map.size; m32.type = map.type; m32.flags = map.flags; - m32.handle = ptr_to_compat(map.handle); + m32.handle = ptr_to_compat((void __user *)map.handle); m32.mtrr = map.mtrr; if (copy_to_user(argp, &m32, sizeof(m32))) return -EFAULT; @@ -216,7 +216,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd, m32.offset = map.offset; m32.mtrr = map.mtrr; - m32.handle = ptr_to_compat(map.handle); + m32.handle = ptr_to_compat((void __user *)map.handle); if (map.handle != compat_ptr(m32.handle)) pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n", map.handle, m32.type, m32.offset); @@ -526,7 +526,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd, if (err) return err; - req32.handle = ptr_to_compat(req.handle); + req32.handle = ptr_to_compat((void __user *)req.handle); if (copy_to_user(argp, &req32, sizeof(req32))) return -EFAULT; diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c index 93e2b30fe1a5..9c5ae825c507 100644 --- a/drivers/gpu/drm/drm_kms_helper_common.c +++ b/drivers/gpu/drm/drm_kms_helper_common.c @@ -39,7 +39,7 @@ MODULE_LICENSE("GPL and additional rights"); /* Backward compatibility for drm_kms_helper.edid_firmware */ static int edid_firmware_set(const char *val, const struct kernel_param *kp) { - DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware intead.\n"); + DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware instead.\n"); return __drm_set_edid_firmware_path(val); } diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index 40c4349cb939..8dbcdc77f6bf 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c @@ -35,6 +35,7 @@ #include <linux/highmem.h> #include <linux/export.h> +#include <xen/xen.h> #include <drm/drmP.h> #include "drm_legacy.h" @@ -150,15 +151,27 @@ void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev) } EXPORT_SYMBOL(drm_legacy_ioremapfree); -u64 drm_get_max_iomem(void) +bool drm_need_swiotlb(int dma_bits) { struct resource *tmp; resource_size_t max_iomem = 0; + /* + * Xen paravirtual hosts require swiotlb regardless of requested dma + * transfer size. + * + * NOTE: Really, what it requires is use of the dma_alloc_coherent + * allocator used in ttm_dma_populate() instead of + * ttm_populate_and_map_pages(), which bounce buffers so much in + * Xen it leads to swiotlb buffer exhaustion. + */ + if (xen_pv_domain()) + return true; + for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) { max_iomem = max(max_iomem, tmp->end); } - return max_iomem; + return max_iomem > ((u64)1 << dma_bits); } -EXPORT_SYMBOL(drm_get_max_iomem); +EXPORT_SYMBOL(drm_need_swiotlb); diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 869ac6f4671e..56f92a0bba62 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -655,22 +655,22 @@ EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode); * @bus_flags: information about pixelclk, sync and DE polarity will be stored * here * - * Sets DRM_BUS_FLAG_DE_(LOW|HIGH), DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE and - * DISPLAY_FLAGS_SYNC_(POS|NEG)EDGE in @bus_flags according to DISPLAY_FLAGS + * Sets DRM_BUS_FLAG_DE_(LOW|HIGH), DRM_BUS_FLAG_PIXDATA_DRIVE_(POS|NEG)EDGE + * and DISPLAY_FLAGS_SYNC_(POS|NEG)EDGE in @bus_flags according to DISPLAY_FLAGS * found in @vm */ void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags) { *bus_flags = 0; if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE) - *bus_flags |= DRM_BUS_FLAG_PIXDATA_POSEDGE; + *bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE) - *bus_flags |= DRM_BUS_FLAG_PIXDATA_NEGEDGE; + *bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE) - *bus_flags |= DRM_BUS_FLAG_SYNC_POSEDGE; + *bus_flags |= DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE; if (vm->flags & DISPLAY_FLAGS_SYNC_NEGEDGE) - *bus_flags |= DRM_BUS_FLAG_SYNC_NEGEDGE; + *bus_flags |= DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE; if (vm->flags & DISPLAY_FLAGS_DE_LOW) *bus_flags |= DRM_BUS_FLAG_DE_LOW; diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 52e445bb1aa5..521aff99b08a 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -80,6 +80,12 @@ static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = { .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = { + .width = 1200, + .height = 1920, + .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + static const struct dmi_system_id orientation_data[] = { { /* Acer One 10 (S1003) */ .matches = { @@ -148,6 +154,13 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Lenovo Ideapad D330 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81H3"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, }, { /* VIOS LTH17 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"), diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 231e3f6d5f41..dc079efb3b0f 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -504,6 +504,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev, .size = obj->size, .flags = flags, .priv = obj, + .resv = obj->resv, }; if (dev->driver->gem_prime_res_obj) diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index e19525af0cce..5329e66598c6 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -731,7 +731,7 @@ cleanup_entries: * * Calculate the timeout in jiffies from an absolute time in sec/nsec. */ -static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec) +signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec) { ktime_t abs_timeout, now; u64 timeout_ns, timeout_jiffies64; @@ -755,6 +755,7 @@ static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec) return timeout_jiffies64 + 1; } +EXPORT_SYMBOL(drm_timeout_abs_to_jiffies); static int drm_syncobj_array_wait(struct drm_device *dev, struct drm_file *file_private, diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index c3301046dfaa..8987501f53b2 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -584,8 +584,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) vma->vm_ops = &drm_vm_ops; break; } - /* fall through to _DRM_FRAME_BUFFER... */ #endif + /* fall through - to _DRM_FRAME_BUFFER... */ case _DRM_FRAME_BUFFER: case _DRM_REGISTERS: offset = drm_core_get_reg_ofs(dev); @@ -610,7 +610,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; vma->vm_page_prot = drm_dma_prot(map->type, vma); - /* fall through to _DRM_SHM */ + /* fall through - to _DRM_SHM */ case _DRM_SHM: vma->vm_ops = &drm_vm_shm_ops; vma->vm_private_data = (void *)map; diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c index c20e6fe00cb3..79ac014701c8 100644 --- a/drivers/gpu/drm/drm_writeback.c +++ b/drivers/gpu/drm/drm_writeback.c @@ -239,14 +239,52 @@ fail: } EXPORT_SYMBOL(drm_writeback_connector_init); +int drm_writeback_set_fb(struct drm_connector_state *conn_state, + struct drm_framebuffer *fb) +{ + WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK); + + if (!conn_state->writeback_job) { + conn_state->writeback_job = + kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL); + if (!conn_state->writeback_job) + return -ENOMEM; + + conn_state->writeback_job->connector = + drm_connector_to_writeback(conn_state->connector); + } + + drm_framebuffer_assign(&conn_state->writeback_job->fb, fb); + return 0; +} + +int drm_writeback_prepare_job(struct drm_writeback_job *job) +{ + struct drm_writeback_connector *connector = job->connector; + const struct drm_connector_helper_funcs *funcs = + connector->base.helper_private; + int ret; + + if (funcs->prepare_writeback_job) { + ret = funcs->prepare_writeback_job(connector, job); + if (ret < 0) + return ret; + } + + job->prepared = true; + return 0; +} +EXPORT_SYMBOL(drm_writeback_prepare_job); + /** * drm_writeback_queue_job - Queue a writeback job for later signalling * @wb_connector: The writeback connector to queue a job on - * @job: The job to queue + * @conn_state: The connector state containing the job to queue * - * This function adds a job to the job_queue for a writeback connector. It - * should be considered to take ownership of the writeback job, and so any other - * references to the job must be cleared after calling this function. + * This function adds the job contained in @conn_state to the job_queue for a + * writeback connector. It takes ownership of the writeback job and sets the + * @conn_state->writeback_job to NULL, and so no access to the job may be + * performed by the caller after this function returns. * * Drivers must ensure that for a given writeback connector, jobs are queued in * exactly the same order as they will be completed by the hardware (and @@ -258,16 +296,36 @@ EXPORT_SYMBOL(drm_writeback_connector_init); * See also: drm_writeback_signal_completion() */ void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector, - struct drm_writeback_job *job) + struct drm_connector_state *conn_state) { + struct drm_writeback_job *job; unsigned long flags; + job = conn_state->writeback_job; + conn_state->writeback_job = NULL; + spin_lock_irqsave(&wb_connector->job_lock, flags); list_add_tail(&job->list_entry, &wb_connector->job_queue); spin_unlock_irqrestore(&wb_connector->job_lock, flags); } EXPORT_SYMBOL(drm_writeback_queue_job); +void drm_writeback_cleanup_job(struct drm_writeback_job *job) +{ + struct drm_writeback_connector *connector = job->connector; + const struct drm_connector_helper_funcs *funcs = + connector->base.helper_private; + + if (job->prepared && funcs->cleanup_writeback_job) + funcs->cleanup_writeback_job(connector, job); + + if (job->fb) + drm_framebuffer_put(job->fb); + + kfree(job); +} +EXPORT_SYMBOL(drm_writeback_cleanup_job); + /* * @cleanup_work: deferred cleanup of a writeback job * @@ -280,10 +338,9 @@ static void cleanup_work(struct work_struct *work) struct drm_writeback_job *job = container_of(work, struct drm_writeback_job, cleanup_work); - drm_framebuffer_put(job->fb); - kfree(job); -} + drm_writeback_cleanup_job(job); +} /** * drm_writeback_signal_completion - Signal the completion of a writeback job diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig index 041a77e400d4..21df44b78df3 100644 --- a/drivers/gpu/drm/etnaviv/Kconfig +++ b/drivers/gpu/drm/etnaviv/Kconfig @@ -2,7 +2,6 @@ config DRM_ETNAVIV tristate "ETNAVIV (DRM support for Vivante GPU IP cores)" depends on DRM - depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST) depends on MMU select SHMEM select SYNC_FILE diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h index acb68c698363..4d5d1a77eb2a 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h @@ -15,8 +15,6 @@ struct etnaviv_perfmon_request; struct etnaviv_cmdbuf { /* suballocator this cmdbuf is allocated from */ struct etnaviv_cmdbuf_suballoc *suballoc; - /* user context key, must be unique between all active users */ - struct etnaviv_file_private *ctx; /* cmdbuf properties */ int suballoc_offset; void *vaddr; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 18c27f795cf6..9f42f7538236 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -473,7 +473,6 @@ static struct drm_driver etnaviv_drm_driver = { .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, - .gem_prime_res_obj = etnaviv_gem_prime_res_obj, .gem_prime_pin = etnaviv_gem_prime_pin, .gem_prime_unpin = etnaviv_gem_prime_unpin, .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index a6a7ded37ef1..6044ace6bb3e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -60,7 +60,6 @@ void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj); void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int etnaviv_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); -struct reservation_object *etnaviv_gem_prime_res_obj(struct drm_gem_object *obj); struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); int etnaviv_gem_prime_pin(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c index 3fbb4855396c..33854c94cb85 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c @@ -215,7 +215,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) mutex_lock(&obj->lock); pages = etnaviv_gem_get_pages(obj); mutex_unlock(&obj->lock); - if (pages) { + if (!IS_ERR(pages)) { int j; iter.hdr->data[0] = bomap - bomap_start; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 5c48915f492d..c60752ef7324 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -397,13 +397,13 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, } if (op & ETNA_PREP_NOSYNC) { - if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv, + if (!reservation_object_test_signaled_rcu(obj->resv, write)) return -EBUSY; } else { unsigned long remain = etnaviv_timeout_to_jiffies(timeout); - ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv, + ret = reservation_object_wait_timeout_rcu(obj->resv, write, true, remain); if (ret <= 0) return ret == 0 ? -ETIMEDOUT : ret; @@ -459,7 +459,7 @@ static void etnaviv_gem_describe_fence(struct dma_fence *fence, static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) { struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); - struct reservation_object *robj = etnaviv_obj->resv; + struct reservation_object *robj = obj->resv; struct reservation_object_list *fobj; struct dma_fence *fence; unsigned long off = drm_vma_node_start(&obj->vma_node); @@ -549,8 +549,6 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj) drm_gem_free_mmap_offset(obj); etnaviv_obj->ops->release(etnaviv_obj); - if (etnaviv_obj->resv == &etnaviv_obj->_resv) - reservation_object_fini(&etnaviv_obj->_resv); drm_gem_object_release(obj); kfree(etnaviv_obj); @@ -596,12 +594,8 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags, etnaviv_obj->flags = flags; etnaviv_obj->ops = ops; - if (robj) { - etnaviv_obj->resv = robj; - } else { - etnaviv_obj->resv = &etnaviv_obj->_resv; - reservation_object_init(&etnaviv_obj->_resv); - } + if (robj) + etnaviv_obj->base.resv = robj; mutex_init(&etnaviv_obj->lock); INIT_LIST_HEAD(&etnaviv_obj->vram_list); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index 76079c2291f8..753c458497d0 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -47,10 +47,6 @@ struct etnaviv_gem_object { struct sg_table *sgt; void *vaddr; - /* normally (resv == &_resv) except for imported bo's */ - struct reservation_object *resv; - struct reservation_object _resv; - struct list_head vram_list; /* cache maintenance */ @@ -95,6 +91,7 @@ struct etnaviv_gem_submit_bo { struct etnaviv_gem_submit { struct drm_sched_job sched_job; struct kref refcount; + struct etnaviv_file_private *ctx; struct etnaviv_gpu *gpu; struct dma_fence *out_fence, *in_fence; int out_fence_id; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 0566171f8df2..00e8b6a817e3 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -15,7 +15,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) int npages = obj->size >> PAGE_SHIFT; if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */ - return NULL; + return ERR_PTR(-EINVAL); return drm_prime_pages_to_sg(etnaviv_obj->pages, npages); } @@ -139,10 +139,3 @@ fail: return ERR_PTR(ret); } - -struct reservation_object *etnaviv_gem_prime_res_obj(struct drm_gem_object *obj) -{ - struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); - - return etnaviv_obj->resv; -} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 30875f8f2933..e054f09ac828 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -108,9 +108,9 @@ out_unlock: static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i) { if (submit->bos[i].flags & BO_LOCKED) { - struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + struct drm_gem_object *obj = &submit->bos[i].obj->base; - ww_mutex_unlock(&etnaviv_obj->resv->lock); + ww_mutex_unlock(&obj->resv->lock); submit->bos[i].flags &= ~BO_LOCKED; } } @@ -122,7 +122,7 @@ static int submit_lock_objects(struct etnaviv_gem_submit *submit, retry: for (i = 0; i < submit->nr_bos; i++) { - struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + struct drm_gem_object *obj = &submit->bos[i].obj->base; if (slow_locked == i) slow_locked = -1; @@ -130,7 +130,7 @@ retry: contended = i; if (!(submit->bos[i].flags & BO_LOCKED)) { - ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock, + ret = ww_mutex_lock_interruptible(&obj->resv->lock, ticket); if (ret == -EALREADY) DRM_ERROR("BO at index %u already on submit list\n", @@ -153,12 +153,12 @@ fail: submit_unlock_object(submit, slow_locked); if (ret == -EDEADLK) { - struct etnaviv_gem_object *etnaviv_obj; + struct drm_gem_object *obj; - etnaviv_obj = submit->bos[contended].obj; + obj = &submit->bos[contended].obj->base; /* we lost out in a seqno race, lock and retry.. */ - ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock, + ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock, ticket); if (!ret) { submit->bos[contended].flags |= BO_LOCKED; @@ -176,7 +176,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit) for (i = 0; i < submit->nr_bos; i++) { struct etnaviv_gem_submit_bo *bo = &submit->bos[i]; - struct reservation_object *robj = bo->obj->resv; + struct reservation_object *robj = bo->obj->base.resv; if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) { ret = reservation_object_reserve_shared(robj, 1); @@ -207,13 +207,13 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit) int i; for (i = 0; i < submit->nr_bos; i++) { - struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; + struct drm_gem_object *obj = &submit->bos[i].obj->base; if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE) - reservation_object_add_excl_fence(etnaviv_obj->resv, + reservation_object_add_excl_fence(obj->resv, submit->out_fence); else - reservation_object_add_shared_fence(etnaviv_obj->resv, + reservation_object_add_shared_fence(obj->resv, submit->out_fence); submit_unlock_object(submit, i); @@ -506,7 +506,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, if (ret) goto err_submit_objects; - submit->cmdbuf.ctx = file->driver_priv; + submit->ctx = file->driver_priv; submit->exec_state = args->exec_state; submit->flags = args->flags; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c index f1c88d8ad5ba..f794e04be9e6 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c @@ -320,8 +320,8 @@ etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu) domain = &etnaviv_domain->base; domain->dev = gpu->dev; - domain->base = 0; - domain->size = (u64)SZ_1G * 4; + domain->base = SZ_4K; + domain->size = (u64)SZ_1G * 4 - SZ_4K; domain->ops = &etnaviv_iommuv2_ops; ret = etnaviv_iommuv2_init(etnaviv_domain); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c index 9980d81a26e3..4227a4006c34 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c @@ -113,7 +113,7 @@ static const struct etnaviv_pm_domain doms_3d[] = { .name = "PE", .profile_read = VIVS_MC_PROFILE_PE_READ, .profile_config = VIVS_MC_PROFILE_CONFIG0, - .nr_signals = 5, + .nr_signals = 4, .signal = (const struct etnaviv_pm_signal[]) { { "PIXEL_COUNT_KILLED_BY_COLOR_PIPE", @@ -435,7 +435,7 @@ int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu, dom = meta->domains + signal->domain; - if (signal->iter > dom->nr_signals) + if (signal->iter >= dom->nr_signals) return -EINVAL; sig = &dom->signal[signal->iter]; @@ -461,7 +461,7 @@ int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r, dom = meta->domains + r->domain; - if (r->signal > dom->nr_signals) + if (r->signal >= dom->nr_signals) return -EINVAL; return 0; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index 67ae26602024..6d24fea1766b 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -153,7 +153,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity, mutex_lock(&submit->gpu->fence_lock); ret = drm_sched_job_init(&submit->sched_job, sched_entity, - submit->cmdbuf.ctx); + submit->ctx); if (ret) goto out_unlock; diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 0573eab0e190..f35e4ab55b27 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -20,6 +20,7 @@ #include "regs-vp.h" #include <linux/kernel.h> +#include <linux/ktime.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/i2c.h> @@ -352,15 +353,62 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx, unsigned int alpha) mixer_reg_write(ctx, MXR_VIDEO_CFG, val); } -static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable) +static bool mixer_is_synced(struct mixer_context *ctx) { - /* block update on vsync */ - mixer_reg_writemask(ctx, MXR_STATUS, enable ? - MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE); + u32 base, shadow; + if (ctx->mxr_ver == MXR_VER_16_0_33_0 || + ctx->mxr_ver == MXR_VER_128_0_0_184) + return !(mixer_reg_read(ctx, MXR_CFG) & + MXR_CFG_LAYER_UPDATE_COUNT_MASK); + + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) && + vp_reg_read(ctx, VP_SHADOW_UPDATE)) + return false; + + base = mixer_reg_read(ctx, MXR_CFG); + shadow = mixer_reg_read(ctx, MXR_CFG_S); + if (base != shadow) + return false; + + base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0)); + shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); + if (base != shadow) + return false; + + base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1)); + shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1)); + if (base != shadow) + return false; + + return true; +} + +static int mixer_wait_for_sync(struct mixer_context *ctx) +{ + ktime_t timeout = ktime_add_us(ktime_get(), 100000); + + while (!mixer_is_synced(ctx)) { + usleep_range(1000, 2000); + if (ktime_compare(ktime_get(), timeout) > 0) + return -ETIMEDOUT; + } + return 0; +} + +static void mixer_disable_sync(struct mixer_context *ctx) +{ + mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_SYNC_ENABLE); +} + +static void mixer_enable_sync(struct mixer_context *ctx) +{ + if (ctx->mxr_ver == MXR_VER_16_0_33_0 || + ctx->mxr_ver == MXR_VER_128_0_0_184) + mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); + mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SYNC_ENABLE); if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) - vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ? - VP_SHADOW_UPDATE_ENABLE : 0); + vp_reg_write(ctx, VP_SHADOW_UPDATE, VP_SHADOW_UPDATE_ENABLE); } static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height) @@ -498,7 +546,6 @@ static void vp_video_buffer(struct mixer_context *ctx, spin_lock_irqsave(&ctx->reg_slock, flags); - vp_reg_write(ctx, VP_SHADOW_UPDATE, 1); /* interlace or progressive scan mode */ val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0); vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP); @@ -553,11 +600,6 @@ static void vp_video_buffer(struct mixer_context *ctx, vp_regs_dump(ctx); } -static void mixer_layer_update(struct mixer_context *ctx) -{ - mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); -} - static void mixer_graph_buffer(struct mixer_context *ctx, struct exynos_drm_plane *plane) { @@ -640,11 +682,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx, mixer_cfg_layer(ctx, win, priority, true); mixer_cfg_gfx_blend(ctx, win, pixel_alpha, state->base.alpha); - /* layer update mandatory for mixer 16.0.33.0 */ - if (ctx->mxr_ver == MXR_VER_16_0_33_0 || - ctx->mxr_ver == MXR_VER_128_0_0_184) - mixer_layer_update(ctx); - spin_unlock_irqrestore(&ctx->reg_slock, flags); mixer_regs_dump(ctx); @@ -709,7 +746,7 @@ static void mixer_win_reset(struct mixer_context *ctx) static irqreturn_t mixer_irq_handler(int irq, void *arg) { struct mixer_context *ctx = arg; - u32 val, base, shadow; + u32 val; spin_lock(&ctx->reg_slock); @@ -723,26 +760,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) val &= ~MXR_INT_STATUS_VSYNC; /* interlace scan need to check shadow register */ - if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { - if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) && - vp_reg_read(ctx, VP_SHADOW_UPDATE)) - goto out; - - base = mixer_reg_read(ctx, MXR_CFG); - shadow = mixer_reg_read(ctx, MXR_CFG_S); - if (base != shadow) - goto out; - - base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0)); - shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); - if (base != shadow) - goto out; - - base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1)); - shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1)); - if (base != shadow) - goto out; - } + if (test_bit(MXR_BIT_INTERLACE, &ctx->flags) + && !mixer_is_synced(ctx)) + goto out; drm_crtc_handle_vblank(&ctx->crtc->base); } @@ -917,12 +937,14 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc) static void mixer_atomic_begin(struct exynos_drm_crtc *crtc) { - struct mixer_context *mixer_ctx = crtc->ctx; + struct mixer_context *ctx = crtc->ctx; - if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) + if (!test_bit(MXR_BIT_POWERED, &ctx->flags)) return; - mixer_vsync_set_update(mixer_ctx, false); + if (mixer_wait_for_sync(ctx)) + dev_err(ctx->dev, "timeout waiting for VSYNC\n"); + mixer_disable_sync(ctx); } static void mixer_update_plane(struct exynos_drm_crtc *crtc, @@ -964,7 +986,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc) if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) return; - mixer_vsync_set_update(mixer_ctx, true); + mixer_enable_sync(mixer_ctx); exynos_crtc_handle_event(crtc); } @@ -979,7 +1001,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc) exynos_drm_pipe_clk_enable(crtc, true); - mixer_vsync_set_update(ctx, false); + mixer_disable_sync(ctx); mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); @@ -992,7 +1014,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc) mixer_commit(ctx); - mixer_vsync_set_update(ctx, true); + mixer_enable_sync(ctx); set_bit(MXR_BIT_POWERED, &ctx->flags); } diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c index bf256971063d..83c841b50272 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c @@ -94,7 +94,7 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) drm_display_mode_to_videomode(mode, &vm); /* INV_PXCK as default (most display sample data on rising edge) */ - if (!(con->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)) + if (!(con->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)) pol |= DCU_SYN_POL_INV_PXCK; if (vm.flags & DISPLAY_FLAGS_HSYNC_LOW) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index a9fcb32f4989..bbe1a5d56480 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -761,39 +761,6 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) return ret; } -#if !defined(CONFIG_VGA_CONSOLE) -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) -{ - return 0; -} -#elif !defined(CONFIG_DUMMY_CONSOLE) -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) -{ - return -ENODEV; -} -#else -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) -{ - int ret = 0; - - DRM_INFO("Replacing VGA console driver\n"); - - console_lock(); - if (con_is_bound(&vga_con)) - ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); - if (ret == 0) { - ret = do_unregister_con_driver(&vga_con); - - /* Ignore "already unregistered". */ - if (ret == -ENODEV) - ret = 0; - } - console_unlock(); - - return ret; -} -#endif - static void intel_init_dpio(struct drm_i915_private *dev_priv) { /* @@ -1532,7 +1499,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) goto err_ggtt; } - ret = i915_kick_out_vgacon(dev_priv); + ret = vga_remove_vgacon(pdev); if (ret) { DRM_ERROR("failed to remove conflicting VGA console\n"); goto err_ggtt; diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 084016200a1e..46a52da3db29 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -5,6 +5,7 @@ */ #include <linux/irq.h> +#include <linux/pm_runtime.h> #include "i915_pmu.h" #include "intel_ringbuffer.h" #include "i915_drv.h" @@ -474,7 +475,6 @@ static u64 get_rc6(struct drm_i915_private *i915) * counter value. */ spin_lock_irqsave(&i915->pmu.lock, flags); - spin_lock(&kdev->power.lock); /* * After the above branch intel_runtime_pm_get_if_in_use failed @@ -487,16 +487,13 @@ static u64 get_rc6(struct drm_i915_private *i915) * suspended and if not we cannot do better than report the last * known RC6 value. */ - if (kdev->power.runtime_status == RPM_SUSPENDED) { - if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) - i915->pmu.suspended_jiffies_last = - kdev->power.suspended_jiffies; + if (pm_runtime_status_suspended(kdev)) { + val = pm_runtime_suspended_time(kdev); - val = kdev->power.suspended_jiffies - - i915->pmu.suspended_jiffies_last; - val += jiffies - kdev->power.accounting_timestamp; + if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) + i915->pmu.suspended_time_last = val; - val = jiffies_to_nsecs(val); + val -= i915->pmu.suspended_time_last; val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; @@ -506,7 +503,6 @@ static u64 get_rc6(struct drm_i915_private *i915) val = i915->pmu.sample[__I915_SAMPLE_RC6].cur; } - spin_unlock(&kdev->power.lock); spin_unlock_irqrestore(&i915->pmu.lock, flags); } diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index b3728c5f13e7..4fc4f2478301 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -97,9 +97,9 @@ struct i915_pmu { */ struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS]; /** - * @suspended_jiffies_last: Cached suspend time from PM core. + * @suspended_time_last: Cached suspend time from PM core. */ - unsigned long suspended_jiffies_last; + u64 suspended_time_last; /** * @i915_attr: Memory block holding device attributes. */ diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 8baedcaddbba..2dbe8933b50a 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -154,12 +154,6 @@ static inline u64 ptr_to_u64(const void *ptr) #include <linux/list.h> -static inline int list_is_first(const struct list_head *list, - const struct list_head *head) -{ - return head->next == list; -} - static inline void __list_del_many(struct list_head *head, struct list_head *first) { diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 3a0d72b141eb..b844e8840c6f 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -126,6 +126,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn, */ if (new_conn_state->force_audio != old_conn_state->force_audio || new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb || + new_conn_state->base.colorspace != old_conn_state->base.colorspace || new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio || new_conn_state->base.content_type != old_conn_state->base.content_type || new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode) diff --git a/drivers/gpu/drm/i915/intel_connector.c b/drivers/gpu/drm/i915/intel_connector.c index 66ed3ee5998a..848dd9e728d8 100644 --- a/drivers/gpu/drm/i915/intel_connector.c +++ b/drivers/gpu/drm/i915/intel_connector.c @@ -267,3 +267,11 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector) connector->dev->mode_config.aspect_ratio_property, DRM_MODE_PICTURE_ASPECT_NONE); } + +void +intel_attach_colorspace_property(struct drm_connector *connector) +{ + if (!drm_mode_create_colorspace_property(connector)) + drm_object_attach_property(&connector->base, + connector->colorspace_property, 0); +} diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3371bf96dabd..8576a7f799f2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2667,11 +2667,11 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) case PLANE_CTL_FORMAT_Y216: return DRM_FORMAT_Y216; case PLANE_CTL_FORMAT_Y410: - return DRM_FORMAT_Y410; + return DRM_FORMAT_XVYU2101010; case PLANE_CTL_FORMAT_Y412: - return DRM_FORMAT_Y412; + return DRM_FORMAT_XVYU12_16161616; case PLANE_CTL_FORMAT_Y416: - return DRM_FORMAT_Y416; + return DRM_FORMAT_XVYU16161616; default: case PLANE_CTL_FORMAT_XRGB_8888: if (rgb_order) { @@ -3625,11 +3625,11 @@ static u32 skl_plane_ctl_format(u32 pixel_format) return PLANE_CTL_FORMAT_Y212; case DRM_FORMAT_Y216: return PLANE_CTL_FORMAT_Y216; - case DRM_FORMAT_Y410: + case DRM_FORMAT_XVYU2101010: return PLANE_CTL_FORMAT_Y410; - case DRM_FORMAT_Y412: + case DRM_FORMAT_XVYU12_16161616: return PLANE_CTL_FORMAT_Y412; - case DRM_FORMAT_Y416: + case DRM_FORMAT_XVYU16161616: return PLANE_CTL_FORMAT_Y416; default: MISSING_CASE(pixel_format); @@ -5186,9 +5186,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, case DRM_FORMAT_Y210: case DRM_FORMAT_Y212: case DRM_FORMAT_Y216: - case DRM_FORMAT_Y410: - case DRM_FORMAT_Y412: - case DRM_FORMAT_Y416: + case DRM_FORMAT_XVYU2101010: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: break; default: DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 6aab82d8d9c5..f8c7b291fdc3 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1899,6 +1899,7 @@ int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); void intel_attach_force_audio_property(struct drm_connector *connector); void intel_attach_broadcast_rgb_property(struct drm_connector *connector); void intel_attach_aspect_ratio_property(struct drm_connector *connector); +void intel_attach_colorspace_property(struct drm_connector *connector); /* intel_csr.c */ void intel_csr_ucode_init(struct drm_i915_private *); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 07893ad2ad1f..26767785f14a 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -701,6 +701,8 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder, else frame->colorspace = HDMI_COLORSPACE_RGB; + drm_hdmi_avi_infoframe_colorspace(frame, conn_state); + drm_hdmi_avi_infoframe_quant_range(frame, connector, adjusted_mode, crtc_state->limited_color_range ? @@ -2700,10 +2702,21 @@ static void intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); + struct intel_digital_port *intel_dig_port = + hdmi_to_dig_port(intel_hdmi); intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); intel_attach_aspect_ratio_property(connector); + + /* + * Attach Colorspace property for Non LSPCON based device + * ToDo: This needs to be extended for LSPCON implementation + * as well. Will be implemented separately. + */ + if (!intel_dig_port->lspcon.active) + intel_attach_colorspace_property(connector); + drm_connector_attach_content_type_property(connector); connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE; diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index aee4defcb88d..3f2055f70d05 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -1837,9 +1837,9 @@ static const u32 icl_plane_formats[] = { DRM_FORMAT_Y210, DRM_FORMAT_Y212, DRM_FORMAT_Y216, - DRM_FORMAT_Y410, - DRM_FORMAT_Y412, - DRM_FORMAT_Y416, + DRM_FORMAT_XVYU2101010, + DRM_FORMAT_XVYU12_16161616, + DRM_FORMAT_XVYU16161616, }; static const u32 icl_hdr_plane_formats[] = { @@ -1862,9 +1862,9 @@ static const u32 icl_hdr_plane_formats[] = { DRM_FORMAT_Y210, DRM_FORMAT_Y212, DRM_FORMAT_Y216, - DRM_FORMAT_Y410, - DRM_FORMAT_Y412, - DRM_FORMAT_Y416, + DRM_FORMAT_XVYU2101010, + DRM_FORMAT_XVYU12_16161616, + DRM_FORMAT_XVYU16161616, }; static const u32 skl_planar_formats[] = { @@ -1922,9 +1922,9 @@ static const u32 icl_planar_formats[] = { DRM_FORMAT_Y210, DRM_FORMAT_Y212, DRM_FORMAT_Y216, - DRM_FORMAT_Y410, - DRM_FORMAT_Y412, - DRM_FORMAT_Y416, + DRM_FORMAT_XVYU2101010, + DRM_FORMAT_XVYU12_16161616, + DRM_FORMAT_XVYU16161616, }; static const u32 icl_hdr_planar_formats[] = { @@ -1951,9 +1951,9 @@ static const u32 icl_hdr_planar_formats[] = { DRM_FORMAT_Y210, DRM_FORMAT_Y212, DRM_FORMAT_Y216, - DRM_FORMAT_Y410, - DRM_FORMAT_Y412, - DRM_FORMAT_Y416, + DRM_FORMAT_XVYU2101010, + DRM_FORMAT_XVYU12_16161616, + DRM_FORMAT_XVYU16161616, }; static const u64 skl_plane_format_modifiers_noccs[] = { @@ -2101,9 +2101,9 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_Y210: case DRM_FORMAT_Y212: case DRM_FORMAT_Y216: - case DRM_FORMAT_Y410: - case DRM_FORMAT_Y412: - case DRM_FORMAT_Y416: + case DRM_FORMAT_XVYU2101010: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: if (modifier == I915_FORMAT_MOD_Yf_TILED) return true; /* fall through */ diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c index 23abf03736e7..3f9921ba4a76 100644 --- a/drivers/gpu/drm/i915/intel_vdsc.c +++ b/drivers/gpu/drm/i915/intel_vdsc.c @@ -317,129 +317,6 @@ static int get_column_index_for_rc_params(u8 bits_per_component) } } -static int intel_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg) -{ - unsigned long groups_per_line = 0; - unsigned long groups_total = 0; - unsigned long num_extra_mux_bits = 0; - unsigned long slice_bits = 0; - unsigned long hrd_delay = 0; - unsigned long final_scale = 0; - unsigned long rbs_min = 0; - - /* Number of groups used to code each line of a slice */ - groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width, - DSC_RC_PIXELS_PER_GROUP); - - /* chunksize in Bytes */ - vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width * - vdsc_cfg->bits_per_pixel, - (8 * 16)); - - if (vdsc_cfg->convert_rgb) - num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size + - (4 * vdsc_cfg->bits_per_component + 4) - - 2); - else - num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size + - (4 * vdsc_cfg->bits_per_component + 4) + - 2 * (4 * vdsc_cfg->bits_per_component) - 2; - /* Number of bits in one Slice */ - slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height; - - while ((num_extra_mux_bits > 0) && - ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size)) - num_extra_mux_bits--; - - if (groups_per_line < vdsc_cfg->initial_scale_value - 8) - vdsc_cfg->initial_scale_value = groups_per_line + 8; - - /* scale_decrement_interval calculation according to DSC spec 1.11 */ - if (vdsc_cfg->initial_scale_value > 8) - vdsc_cfg->scale_decrement_interval = groups_per_line / - (vdsc_cfg->initial_scale_value - 8); - else - vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX; - - vdsc_cfg->final_offset = vdsc_cfg->rc_model_size - - (vdsc_cfg->initial_xmit_delay * - vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits; - - if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) { - DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n"); - return -ERANGE; - } - - final_scale = (vdsc_cfg->rc_model_size * 8) / - (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset); - if (vdsc_cfg->slice_height > 1) - /* - * NflBpgOffset is 16 bit value with 11 fractional bits - * hence we multiply by 2^11 for preserving the - * fractional part - */ - vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11), - (vdsc_cfg->slice_height - 1)); - else - vdsc_cfg->nfl_bpg_offset = 0; - - /* 2^16 - 1 */ - if (vdsc_cfg->nfl_bpg_offset > 65535) { - DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n"); - return -ERANGE; - } - - /* Number of groups used to code the entire slice */ - groups_total = groups_per_line * vdsc_cfg->slice_height; - - /* slice_bpg_offset is 16 bit value with 11 fractional bits */ - vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size - - vdsc_cfg->initial_offset + - num_extra_mux_bits) << 11), - groups_total); - - if (final_scale > 9) { - /* - * ScaleIncrementInterval = - * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125)) - * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value, - * we need divide by 2^11 from pstDscCfg values - */ - vdsc_cfg->scale_increment_interval = - (vdsc_cfg->final_offset * (1 << 11)) / - ((vdsc_cfg->nfl_bpg_offset + - vdsc_cfg->slice_bpg_offset) * - (final_scale - 9)); - } else { - /* - * If finalScaleValue is less than or equal to 9, a value of 0 should - * be used to disable the scale increment at the end of the slice - */ - vdsc_cfg->scale_increment_interval = 0; - } - - if (vdsc_cfg->scale_increment_interval > 65535) { - DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n"); - return -ERANGE; - } - - /* - * DSC spec mentions that bits_per_pixel specifies the target - * bits/pixel (bpp) rate that is used by the encoder, - * in steps of 1/16 of a bit per pixel - */ - rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset + - DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay * - vdsc_cfg->bits_per_pixel, 16) + - groups_per_line * vdsc_cfg->first_line_bpg_offset; - - hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel); - vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16; - vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay; - - return 0; -} - int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config) { @@ -491,7 +368,7 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; /* Gen 11 does not support YCbCr */ - vdsc_cfg->enable422 = false; + vdsc_cfg->simple_422 = false; /* Gen 11 does not support VBR */ vdsc_cfg->vbr_enable = false; vdsc_cfg->block_pred_enable = @@ -574,7 +451,7 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) / (vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset); - return intel_compute_rc_parameters(vdsc_cfg); + return drm_dsc_compute_rc_parameters(vdsc_cfg); } enum intel_display_power_domain @@ -618,7 +495,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, pps_val |= DSC_BLOCK_PREDICTION; if (vdsc_cfg->convert_rgb) pps_val |= DSC_COLOR_SPACE_CONVERSION; - if (vdsc_cfg->enable422) + if (vdsc_cfg->simple_422) pps_val |= DSC_422_ENABLE; if (vdsc_cfg->vbr_enable) pps_val |= DSC_VBR_ENABLE; @@ -1004,10 +881,10 @@ static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder, struct drm_dsc_pps_infoframe dp_dsc_pps_sdp; /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */ - drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp); + drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp.pps_header); /* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */ - drm_dsc_pps_infoframe_pack(&dp_dsc_pps_sdp, vdsc_cfg); + drm_dsc_pps_payload_pack(&dp_dsc_pps_sdp.pps_payload, vdsc_cfg); intel_dig_port->write_infoframe(encoder, crtc_state, DP_SDP_PPS, &dp_dsc_pps_sdp, diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index c935cbe059a7..3e8bece620df 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -185,7 +185,7 @@ static int compare_of(struct device *dev, void *data) } /* Special case for LDB, one device for two channels */ - if (of_node_cmp(np->name, "lvds-channel") == 0) { + if (of_node_name_eq(np, "lvds-channel")) { np = of_get_parent(np); of_node_put(np); } diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index ec3602ebbc1c..311a20c942eb 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -295,7 +295,7 @@ static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc) sig_cfg.enable_pol = !(imx_crtc_state->bus_flags & DRM_BUS_FLAG_DE_LOW); /* Default to driving pixel data on negative clock edges */ sig_cfg.clk_pol = !!(imx_crtc_state->bus_flags & - DRM_BUS_FLAG_PIXDATA_POSEDGE); + DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE); sig_cfg.bus_format = imx_crtc_state->bus_format; sig_cfg.v_to_h_sync = 0; sig_cfg.hsync_pin = imx_crtc_state->di_hsync_pin; diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile index 7709f2fbb9f7..d4ea82fc493b 100644 --- a/drivers/gpu/drm/meson/Makefile +++ b/drivers/gpu/drm/meson/Makefile @@ -1,5 +1,5 @@ meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o -meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o meson_overlay.o +meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_overlay.o obj-$(CONFIG_DRM_MESON) += meson-drm.o obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o diff --git a/drivers/gpu/drm/meson/meson_canvas.c b/drivers/gpu/drm/meson/meson_canvas.c deleted file mode 100644 index 5de11aa7c775..000000000000 --- a/drivers/gpu/drm/meson/meson_canvas.c +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (C) 2016 BayLibre, SAS - * Author: Neil Armstrong <narmstrong@baylibre.com> - * Copyright (C) 2015 Amlogic, Inc. All rights reserved. - * Copyright (C) 2014 Endless Mobile - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include "meson_drv.h" -#include "meson_canvas.h" -#include "meson_registers.h" - -/** - * DOC: Canvas - * - * CANVAS is a memory zone where physical memory frames information - * are stored for the VIU to scanout. - */ - -/* DMC Registers */ -#define DMC_CAV_LUT_DATAL 0x48 /* 0x12 offset in data sheet */ -#define CANVAS_WIDTH_LBIT 29 -#define CANVAS_WIDTH_LWID 3 -#define DMC_CAV_LUT_DATAH 0x4c /* 0x13 offset in data sheet */ -#define CANVAS_WIDTH_HBIT 0 -#define CANVAS_HEIGHT_BIT 9 -#define CANVAS_BLKMODE_BIT 24 -#define CANVAS_ENDIAN_BIT 26 -#define DMC_CAV_LUT_ADDR 0x50 /* 0x14 offset in data sheet */ -#define CANVAS_LUT_WR_EN (0x2 << 8) -#define CANVAS_LUT_RD_EN (0x1 << 8) - -void meson_canvas_setup(struct meson_drm *priv, - uint32_t canvas_index, uint32_t addr, - uint32_t stride, uint32_t height, - unsigned int wrap, - unsigned int blkmode, - unsigned int endian) -{ - unsigned int val; - - regmap_write(priv->dmc, DMC_CAV_LUT_DATAL, - (((addr + 7) >> 3)) | - (((stride + 7) >> 3) << CANVAS_WIDTH_LBIT)); - - regmap_write(priv->dmc, DMC_CAV_LUT_DATAH, - ((((stride + 7) >> 3) >> CANVAS_WIDTH_LWID) << - CANVAS_WIDTH_HBIT) | - (height << CANVAS_HEIGHT_BIT) | - (wrap << 22) | - (blkmode << CANVAS_BLKMODE_BIT) | - (endian << CANVAS_ENDIAN_BIT)); - - regmap_write(priv->dmc, DMC_CAV_LUT_ADDR, - CANVAS_LUT_WR_EN | canvas_index); - - /* Force a read-back to make sure everything is flushed. */ - regmap_read(priv->dmc, DMC_CAV_LUT_DATAH, &val); -} diff --git a/drivers/gpu/drm/meson/meson_canvas.h b/drivers/gpu/drm/meson/meson_canvas.h deleted file mode 100644 index 85dbf26e2826..000000000000 --- a/drivers/gpu/drm/meson/meson_canvas.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (C) 2016 BayLibre, SAS - * Author: Neil Armstrong <narmstrong@baylibre.com> - * Copyright (C) 2014 Endless Mobile - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - */ - -/* Canvas LUT Memory */ - -#ifndef __MESON_CANVAS_H -#define __MESON_CANVAS_H - -#define MESON_CANVAS_ID_OSD1 0x4e -#define MESON_CANVAS_ID_VD1_0 0x60 -#define MESON_CANVAS_ID_VD1_1 0x61 -#define MESON_CANVAS_ID_VD1_2 0x62 - -/* Canvas configuration. */ -#define MESON_CANVAS_WRAP_NONE 0x00 -#define MESON_CANVAS_WRAP_X 0x01 -#define MESON_CANVAS_WRAP_Y 0x02 - -#define MESON_CANVAS_BLKMODE_LINEAR 0x00 -#define MESON_CANVAS_BLKMODE_32x32 0x01 -#define MESON_CANVAS_BLKMODE_64x64 0x02 - -#define MESON_CANVAS_ENDIAN_SWAP16 0x1 -#define MESON_CANVAS_ENDIAN_SWAP32 0x3 -#define MESON_CANVAS_ENDIAN_SWAP64 0x7 -#define MESON_CANVAS_ENDIAN_SWAP128 0xf - -void meson_canvas_setup(struct meson_drm *priv, - uint32_t canvas_index, uint32_t addr, - uint32_t stride, uint32_t height, - unsigned int wrap, - unsigned int blkmode, - unsigned int endian); - -#endif /* __MESON_CANVAS_H */ diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c index 43e29984f8b1..6d9311e254ef 100644 --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c @@ -37,7 +37,6 @@ #include "meson_venc.h" #include "meson_vpp.h" #include "meson_viu.h" -#include "meson_canvas.h" #include "meson_registers.h" /* CRTC definition */ @@ -214,13 +213,7 @@ void meson_crtc_irq(struct meson_drm *priv) writel_relaxed(priv->viu.osd_sc_v_ctrl0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0)); - if (priv->canvas) - meson_canvas_config(priv->canvas, priv->canvas_id_osd1, - priv->viu.osd1_addr, priv->viu.osd1_stride, - priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE, - MESON_CANVAS_BLKMODE_LINEAR, 0); - else - meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, + meson_canvas_config(priv->canvas, priv->canvas_id_osd1, priv->viu.osd1_addr, priv->viu.osd1_stride, priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE, MESON_CANVAS_BLKMODE_LINEAR, 0); @@ -237,61 +230,34 @@ void meson_crtc_irq(struct meson_drm *priv) switch (priv->viu.vd1_planes) { case 3: - if (priv->canvas) - meson_canvas_config(priv->canvas, - priv->canvas_id_vd1_2, - priv->viu.vd1_addr2, - priv->viu.vd1_stride2, - priv->viu.vd1_height2, - MESON_CANVAS_WRAP_NONE, - MESON_CANVAS_BLKMODE_LINEAR, - MESON_CANVAS_ENDIAN_SWAP64); - else - meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_2, - priv->viu.vd1_addr2, - priv->viu.vd1_stride2, - priv->viu.vd1_height2, - MESON_CANVAS_WRAP_NONE, - MESON_CANVAS_BLKMODE_LINEAR, - MESON_CANVAS_ENDIAN_SWAP64); + meson_canvas_config(priv->canvas, + priv->canvas_id_vd1_2, + priv->viu.vd1_addr2, + priv->viu.vd1_stride2, + priv->viu.vd1_height2, + MESON_CANVAS_WRAP_NONE, + MESON_CANVAS_BLKMODE_LINEAR, + MESON_CANVAS_ENDIAN_SWAP64); /* fallthrough */ case 2: - if (priv->canvas) - meson_canvas_config(priv->canvas, - priv->canvas_id_vd1_1, - priv->viu.vd1_addr1, - priv->viu.vd1_stride1, - priv->viu.vd1_height1, - MESON_CANVAS_WRAP_NONE, - MESON_CANVAS_BLKMODE_LINEAR, - MESON_CANVAS_ENDIAN_SWAP64); - else - meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_1, - priv->viu.vd1_addr2, - priv->viu.vd1_stride2, - priv->viu.vd1_height2, - MESON_CANVAS_WRAP_NONE, - MESON_CANVAS_BLKMODE_LINEAR, - MESON_CANVAS_ENDIAN_SWAP64); + meson_canvas_config(priv->canvas, + priv->canvas_id_vd1_1, + priv->viu.vd1_addr1, + priv->viu.vd1_stride1, + priv->viu.vd1_height1, + MESON_CANVAS_WRAP_NONE, + MESON_CANVAS_BLKMODE_LINEAR, + MESON_CANVAS_ENDIAN_SWAP64); /* fallthrough */ case 1: - if (priv->canvas) - meson_canvas_config(priv->canvas, - priv->canvas_id_vd1_0, - priv->viu.vd1_addr0, - priv->viu.vd1_stride0, - priv->viu.vd1_height0, - MESON_CANVAS_WRAP_NONE, - MESON_CANVAS_BLKMODE_LINEAR, - MESON_CANVAS_ENDIAN_SWAP64); - else - meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_0, - priv->viu.vd1_addr2, - priv->viu.vd1_stride2, - priv->viu.vd1_height2, - MESON_CANVAS_WRAP_NONE, - MESON_CANVAS_BLKMODE_LINEAR, - MESON_CANVAS_ENDIAN_SWAP64); + meson_canvas_config(priv->canvas, + priv->canvas_id_vd1_0, + priv->viu.vd1_addr0, + priv->viu.vd1_stride0, + priv->viu.vd1_height0, + MESON_CANVAS_WRAP_NONE, + MESON_CANVAS_BLKMODE_LINEAR, + MESON_CANVAS_ENDIAN_SWAP64); }; writel_relaxed(priv->viu.vd1_if0_gen_reg, diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 2281ed3eb774..70f9d7b85e8e 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -48,7 +48,6 @@ #include "meson_vpp.h" #include "meson_viu.h" #include "meson_venc.h" -#include "meson_canvas.h" #include "meson_registers.h" #define DRIVER_NAME "meson" @@ -231,50 +230,31 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) } priv->canvas = meson_canvas_get(dev); - if (!IS_ERR(priv->canvas)) { - ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1); - if (ret) - goto free_drm; - ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0); - if (ret) { - meson_canvas_free(priv->canvas, priv->canvas_id_osd1); - goto free_drm; - } - ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1); - if (ret) { - meson_canvas_free(priv->canvas, priv->canvas_id_osd1); - meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); - goto free_drm; - } - ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2); - if (ret) { - meson_canvas_free(priv->canvas, priv->canvas_id_osd1); - meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); - meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1); - goto free_drm; - } - } else { - priv->canvas = NULL; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc"); - if (!res) { - ret = -EINVAL; - goto free_drm; - } - /* Simply ioremap since it may be a shared register zone */ - regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!regs) { - ret = -EADDRNOTAVAIL; - goto free_drm; - } + if (IS_ERR(priv->canvas)) { + ret = PTR_ERR(priv->canvas); + goto free_drm; + } - priv->dmc = devm_regmap_init_mmio(dev, regs, - &meson_regmap_config); - if (IS_ERR(priv->dmc)) { - dev_err(&pdev->dev, "Couldn't create the DMC regmap\n"); - ret = PTR_ERR(priv->dmc); - goto free_drm; - } + ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1); + if (ret) + goto free_drm; + ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0); + if (ret) { + meson_canvas_free(priv->canvas, priv->canvas_id_osd1); + goto free_drm; + } + ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1); + if (ret) { + meson_canvas_free(priv->canvas, priv->canvas_id_osd1); + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); + goto free_drm; + } + ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2); + if (ret) { + meson_canvas_free(priv->canvas, priv->canvas_id_osd1); + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0); + meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1); + goto free_drm; } priv->vsync_irq = platform_get_irq(pdev, 0); diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h index 4dccf4cd042a..214a7cb18ce2 100644 --- a/drivers/gpu/drm/meson/meson_drv.h +++ b/drivers/gpu/drm/meson/meson_drv.h @@ -29,7 +29,6 @@ struct meson_drm { struct device *dev; void __iomem *io_base; struct regmap *hhi; - struct regmap *dmc; int vsync_irq; struct meson_canvas *canvas; diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c index 691a9fd16b36..b54a22e483b9 100644 --- a/drivers/gpu/drm/meson/meson_overlay.c +++ b/drivers/gpu/drm/meson/meson_overlay.c @@ -22,7 +22,6 @@ #include "meson_overlay.h" #include "meson_vpp.h" #include "meson_viu.h" -#include "meson_canvas.h" #include "meson_registers.h" /* VD1_IF0_GEN_REG */ @@ -350,13 +349,6 @@ static void meson_overlay_atomic_update(struct drm_plane *plane, DRM_DEBUG_DRIVER("\n"); - /* Fallback is canvas provider is not available */ - if (!priv->canvas) { - priv->canvas_id_vd1_0 = MESON_CANVAS_ID_VD1_0; - priv->canvas_id_vd1_1 = MESON_CANVAS_ID_VD1_1; - priv->canvas_id_vd1_2 = MESON_CANVAS_ID_VD1_2; - } - interlace_mode = state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE; spin_lock_irqsave(&priv->drm->event_lock, flags); diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c index 6119a0224278..b7786218cb10 100644 --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c @@ -38,7 +38,6 @@ #include "meson_plane.h" #include "meson_vpp.h" #include "meson_viu.h" -#include "meson_canvas.h" #include "meson_registers.h" /* OSD_SCI_WH_M1 */ @@ -148,10 +147,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane, (0xFF << OSD_GLOBAL_ALPHA_SHIFT) | OSD_BLK0_ENABLE; - if (priv->canvas) - canvas_id_osd1 = priv->canvas_id_osd1; - else - canvas_id_osd1 = MESON_CANVAS_ID_OSD1; + canvas_id_osd1 = priv->canvas_id_osd1; /* Set up BLK0 to point to the right canvas */ priv->viu.osd1_blk0_cfg[0] = ((canvas_id_osd1 << OSD_CANVAS_SEL) | diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c index e46e05f50bad..ac0f3687e09a 100644 --- a/drivers/gpu/drm/meson/meson_viu.c +++ b/drivers/gpu/drm/meson/meson_viu.c @@ -25,7 +25,6 @@ #include "meson_viu.h" #include "meson_vpp.h" #include "meson_venc.h" -#include "meson_canvas.h" #include "meson_registers.h" /** diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index cf549f1ed403..78c9e5a5e793 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -5,6 +5,7 @@ config DRM_MSM depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST) depends on OF && COMMON_CLK depends on MMU + depends on INTERCONNECT || !INTERCONNECT select QCOM_MDT_LOADER if ARCH_QCOM select REGULATOR select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index ce1b3cc4bf6d..d1662a75c7ec 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -2,6 +2,7 @@ /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ #include <linux/clk.h> +#include <linux/interconnect.h> #include <linux/pm_opp.h> #include <soc/qcom/cmd-db.h> @@ -84,6 +85,9 @@ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; int ret; gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); @@ -106,6 +110,12 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); gmu->freq = gmu->gpu_freqs[index]; + + /* + * Eventually we will want to scale the path vote with the frequency but + * for now leave it at max so that the performance is nominal. + */ + icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216)); } void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq) @@ -705,6 +715,8 @@ out: int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) { + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; struct a6xx_gmu *gmu = &a6xx_gpu->gmu; int status, ret; @@ -720,6 +732,9 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) if (ret) goto out; + /* Set the bus quota to a reasonable value for boot */ + icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072)); + a6xx_gmu_irq_enable(gmu); /* Check to see if we are doing a cold or warm boot */ @@ -760,6 +775,8 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) { + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; struct a6xx_gmu *gmu = &a6xx_gpu->gmu; u32 val; @@ -806,6 +823,9 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) /* Tell RPMh to power off the GPU */ a6xx_rpmh_stop(gmu); + /* Remove the bus vote */ + icc_set_bw(gpu->icc_path, 0, 0); + clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); pm_runtime_put_sync(gmu->dev); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 2cfee1a4fe0b..27898475cdf4 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -18,6 +18,7 @@ */ #include <linux/ascii85.h> +#include <linux/interconnect.h> #include <linux/kernel.h> #include <linux/pm_opp.h> #include <linux/slab.h> @@ -747,6 +748,11 @@ static int adreno_get_pwrlevels(struct device *dev, DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate); + /* Check for an interconnect path for the bus */ + gpu->icc_path = of_icc_get(dev, NULL); + if (IS_ERR(gpu->icc_path)) + gpu->icc_path = NULL; + return 0; } @@ -787,10 +793,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) { + struct msm_gpu *gpu = &adreno_gpu->base; unsigned int i; for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) release_firmware(adreno_gpu->fw[i]); + icc_put(gpu->icc_path); + msm_gpu_cleanup(&adreno_gpu->base); } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 0bdd93648761..4697d854b827 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1027,7 +1027,6 @@ static struct drm_driver msm_driver = { .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, - .gem_prime_res_obj = msm_gem_prime_res_obj, .gem_prime_pin = msm_gem_prime_pin, .gem_prime_unpin = msm_gem_prime_unpin, .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index c56dade2c1dc..163e24d2ab99 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -292,7 +292,6 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); void *msm_gem_prime_vmap(struct drm_gem_object *obj); void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); -struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj); struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); int msm_gem_prime_pin(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 18ca651ab942..a72c648ba6e7 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -672,14 +672,13 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) int msm_gem_sync_object(struct drm_gem_object *obj, struct msm_fence_context *fctx, bool exclusive) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); struct reservation_object_list *fobj; struct dma_fence *fence; int i, ret; - fobj = reservation_object_get_list(msm_obj->resv); + fobj = reservation_object_get_list(obj->resv); if (!fobj || (fobj->shared_count == 0)) { - fence = reservation_object_get_excl(msm_obj->resv); + fence = reservation_object_get_excl(obj->resv); /* don't need to wait on our own fences, since ring is fifo */ if (fence && (fence->context != fctx->context)) { ret = dma_fence_wait(fence, true); @@ -693,7 +692,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, for (i = 0; i < fobj->shared_count; i++) { fence = rcu_dereference_protected(fobj->shared[i], - reservation_object_held(msm_obj->resv)); + reservation_object_held(obj->resv)); if (fence->context != fctx->context) { ret = dma_fence_wait(fence, true); if (ret) @@ -711,9 +710,9 @@ void msm_gem_move_to_active(struct drm_gem_object *obj, WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); msm_obj->gpu = gpu; if (exclusive) - reservation_object_add_excl_fence(msm_obj->resv, fence); + reservation_object_add_excl_fence(obj->resv, fence); else - reservation_object_add_shared_fence(msm_obj->resv, fence); + reservation_object_add_shared_fence(obj->resv, fence); list_del_init(&msm_obj->mm_list); list_add_tail(&msm_obj->mm_list, &gpu->active_list); } @@ -733,13 +732,12 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj) int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); bool write = !!(op & MSM_PREP_WRITE); unsigned long remain = op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); long ret; - ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write, + ret = reservation_object_wait_timeout_rcu(obj->resv, write, true, remain); if (ret == 0) return remain == 0 ? -EBUSY : -ETIMEDOUT; @@ -771,7 +769,7 @@ static void describe_fence(struct dma_fence *fence, const char *type, void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - struct reservation_object *robj = msm_obj->resv; + struct reservation_object *robj = obj->resv; struct reservation_object_list *fobj; struct dma_fence *fence; struct msm_gem_vma *vma; @@ -883,9 +881,6 @@ void msm_gem_free_object(struct drm_gem_object *obj) put_pages(obj); } - if (msm_obj->resv == &msm_obj->_resv) - reservation_object_fini(msm_obj->resv); - drm_gem_object_release(obj); mutex_unlock(&msm_obj->lock); @@ -945,12 +940,8 @@ static int msm_gem_new_impl(struct drm_device *dev, msm_obj->flags = flags; msm_obj->madv = MSM_MADV_WILLNEED; - if (resv) { - msm_obj->resv = resv; - } else { - msm_obj->resv = &msm_obj->_resv; - reservation_object_init(msm_obj->resv); - } + if (resv) + msm_obj->base.resv = resv; INIT_LIST_HEAD(&msm_obj->submit_entry); INIT_LIST_HEAD(&msm_obj->vmas); diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 13403c6da6c7..60bb290700ce 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -70,10 +70,3 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj) if (!obj->import_attach) msm_gem_put_pages(obj); } - -struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj) -{ - struct msm_gem_object *msm_obj = to_msm_bo(obj); - - return msm_obj->resv; -} diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 12b983fc0b56..df302521ec74 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -173,7 +173,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace); if (submit->bos[i].flags & BO_LOCKED) - ww_mutex_unlock(&msm_obj->resv->lock); + ww_mutex_unlock(&msm_obj->base.resv->lock); if (backoff && !(submit->bos[i].flags & BO_VALID)) submit->bos[i].iova = 0; @@ -196,7 +196,7 @@ retry: contended = i; if (!(submit->bos[i].flags & BO_LOCKED)) { - ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock, + ret = ww_mutex_lock_interruptible(&msm_obj->base.resv->lock, &submit->ticket); if (ret) goto fail; @@ -218,7 +218,7 @@ fail: if (ret == -EDEADLK) { struct msm_gem_object *msm_obj = submit->bos[contended].obj; /* we lost out in a seqno race, lock and retry.. */ - ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock, + ret = ww_mutex_lock_slow_interruptible(&msm_obj->base.resv->lock, &submit->ticket); if (!ret) { submit->bos[contended].flags |= BO_LOCKED; @@ -244,7 +244,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) * strange place to call it. OTOH this is a * convenient can-fail point to hook it in. */ - ret = reservation_object_reserve_shared(msm_obj->resv, + ret = reservation_object_reserve_shared(msm_obj->base.resv, 1); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index ca17086f72c9..6241986bab51 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -19,6 +19,7 @@ #define __MSM_GPU_H__ #include <linux/clk.h> +#include <linux/interconnect.h> #include <linux/regulator/consumer.h> #include "msm_drv.h" @@ -118,6 +119,8 @@ struct msm_gpu { struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk; uint32_t fast_rate; + struct icc_path *icc_path; + /* Hang and Inactivity Detection: */ #define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */ diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c index 0ee1ca8a316a..98e9bda91e80 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c @@ -253,12 +253,12 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) if (!(bus_flags & DRM_BUS_FLAG_DE_LOW)) vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH; /* - * DRM_BUS_FLAG_PIXDATA_ defines are controller centric, + * DRM_BUS_FLAG_PIXDATA_DRIVE_ defines are controller centric, * controllers VDCTRL0_DOTCLK is display centric. * Drive on positive edge -> display samples on falling edge - * DRM_BUS_FLAG_PIXDATA_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING + * DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING */ - if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) + if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE) vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING; writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0); diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 88a52f6b39fe..7dfbbbc1beea 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -181,7 +181,7 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf, } ret = pm_runtime_get_sync(drm->dev); - if (IS_ERR_VALUE(ret) && ret != -EACCES) + if (ret < 0 && ret != -EACCES) return ret; ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args)); pm_runtime_put_autosuspend(drm->dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 8be7a83ced9b..40c47d6a7d78 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -100,12 +100,10 @@ static void nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page) { struct nouveau_dmem_chunk *chunk; - struct nouveau_drm *drm; unsigned long idx; chunk = (void *)hmm_devmem_page_get_drvdata(page); idx = page_to_pfn(page) - chunk->pfn_first; - drm = chunk->drm; /* * FIXME: @@ -261,7 +259,7 @@ static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = { .finalize_and_map = nouveau_dmem_fault_finalize_and_map, }; -static int +static vm_fault_t nouveau_dmem_fault(struct hmm_devmem *devmem, struct vm_area_struct *vma, unsigned long addr, @@ -456,11 +454,6 @@ nouveau_dmem_resume(struct nouveau_drm *drm) /* FIXME handle pin failure */ WARN_ON(ret); } - list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) { - ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false); - /* FIXME handle pin failure */ - WARN_ON(ret); - } mutex_unlock(&drm->dmem->mutex); } @@ -479,9 +472,6 @@ nouveau_dmem_suspend(struct nouveau_drm *drm) list_for_each_entry (chunk, &drm->dmem->chunk_full, list) { nouveau_bo_unpin(chunk->bo); } - list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) { - nouveau_bo_unpin(chunk->bo); - } mutex_unlock(&drm->dmem->mutex); } @@ -623,7 +613,7 @@ nouveau_dmem_init(struct nouveau_drm *drm) */ drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops, device, size); - if (drm->dmem->devmem == NULL) { + if (IS_ERR(drm->dmem->devmem)) { kfree(drm->dmem); drm->dmem = NULL; return; diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig index a349cb61961e..7b0bcb494b5c 100644 --- a/drivers/gpu/drm/omapdrm/displays/Kconfig +++ b/drivers/gpu/drm/omapdrm/displays/Kconfig @@ -6,23 +6,12 @@ config DRM_OMAP_ENCODER_OPA362 Driver for OPA362 external analog TV amplifier controlled through a GPIO. -config DRM_OMAP_ENCODER_TFP410 - tristate "TFP410 DPI to DVI Encoder" - help - Driver for TFP410 DPI to DVI encoder. - config DRM_OMAP_ENCODER_TPD12S015 tristate "TPD12S015 HDMI ESD protection and level shifter" help Driver for TPD12S015, which offers HDMI ESD protection and level shifting. -config DRM_OMAP_CONNECTOR_DVI - tristate "DVI Connector" - depends on I2C - help - Driver for a generic DVI connector. - config DRM_OMAP_CONNECTOR_HDMI tristate "HDMI Connector" help @@ -33,12 +22,6 @@ config DRM_OMAP_CONNECTOR_ANALOG_TV help Driver for a generic analog TV connector. -config DRM_OMAP_PANEL_DPI - tristate "Generic DPI panel" - depends on BACKLIGHT_CLASS_DEVICE - help - Driver for generic DPI panels. - config DRM_OMAP_PANEL_DSI_CM tristate "Generic DSI Command Mode Panel" depends on BACKLIGHT_CLASS_DEVICE diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile index d99659e1381b..1db34d4fed64 100644 --- a/drivers/gpu/drm/omapdrm/displays/Makefile +++ b/drivers/gpu/drm/omapdrm/displays/Makefile @@ -1,11 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o -obj-$(CONFIG_DRM_OMAP_ENCODER_TFP410) += encoder-tfp410.o obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o -obj-$(CONFIG_DRM_OMAP_CONNECTOR_DVI) += connector-dvi.o obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o -obj-$(CONFIG_DRM_OMAP_PANEL_DPI) += panel-dpi.o obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o obj-$(CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o obj-$(CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index 28a3ce8f88d2..6c0561101874 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -35,50 +35,9 @@ static void tvc_disconnect(struct omap_dss_device *src, { } -static int tvc_enable(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - int r; - - dev_dbg(ddata->dev, "enable\n"); - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return r; -} - -static void tvc_disable(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - dev_dbg(ddata->dev, "disable\n"); - - if (!omapdss_device_is_enabled(dssdev)) - return; - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; -} - static const struct omap_dss_device_ops tvc_ops = { .connect = tvc_connect, .disconnect = tvc_disconnect, - - .enable = tvc_enable, - .disable = tvc_disable, }; static int tvc_probe(struct platform_device *pdev) @@ -97,6 +56,7 @@ static int tvc_probe(struct platform_device *pdev) dssdev->ops = &tvc_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_VENC; + dssdev->display = true; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); @@ -109,12 +69,9 @@ static int tvc_probe(struct platform_device *pdev) static int __exit tvc_remove(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *dssdev = &ddata->dssdev; omapdss_device_unregister(&ddata->dssdev); - tvc_disable(dssdev); - return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c deleted file mode 100644 index 24b14f44248e..000000000000 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Generic DVI Connector driver - * - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ - * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - */ - -#include <linux/gpio/consumer.h> -#include <linux/i2c.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/slab.h> - -#include <drm/drm_edid.h> - -#include "../dss/omapdss.h" - -struct panel_drv_data { - struct omap_dss_device dssdev; - - struct i2c_adapter *i2c_adapter; - - struct gpio_desc *hpd_gpio; - - void (*hpd_cb)(void *cb_data, enum drm_connector_status status); - void *hpd_cb_data; - bool hpd_enabled; - /* mutex for hpd fields above */ - struct mutex hpd_lock; -}; - -#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) - -static int dvic_connect(struct omap_dss_device *src, - struct omap_dss_device *dst) -{ - return 0; -} - -static void dvic_disconnect(struct omap_dss_device *src, - struct omap_dss_device *dst) -{ -} - -static int dvic_enable(struct omap_dss_device *dssdev) -{ - struct omap_dss_device *src = dssdev->src; - int r; - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; -} - -static void dvic_disable(struct omap_dss_device *dssdev) -{ - struct omap_dss_device *src = dssdev->src; - - if (!omapdss_device_is_enabled(dssdev)) - return; - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; -} - -static int dvic_ddc_read(struct i2c_adapter *adapter, - unsigned char *buf, u16 count, u8 offset) -{ - int r, retries; - - for (retries = 3; retries > 0; retries--) { - struct i2c_msg msgs[] = { - { - .addr = DDC_ADDR, - .flags = 0, - .len = 1, - .buf = &offset, - }, { - .addr = DDC_ADDR, - .flags = I2C_M_RD, - .len = count, - .buf = buf, - } - }; - - r = i2c_transfer(adapter, msgs, 2); - if (r == 2) - return 0; - - if (r != -EAGAIN) - break; - } - - return r < 0 ? r : -EIO; -} - -static int dvic_read_edid(struct omap_dss_device *dssdev, - u8 *edid, int len) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - int r, l, bytes_read; - - l = min(EDID_LENGTH, len); - r = dvic_ddc_read(ddata->i2c_adapter, edid, l, 0); - if (r) - return r; - - bytes_read = l; - - /* if there are extensions, read second block */ - if (len > EDID_LENGTH && edid[0x7e] > 0) { - l = min(EDID_LENGTH, len - EDID_LENGTH); - - r = dvic_ddc_read(ddata->i2c_adapter, edid + EDID_LENGTH, - l, EDID_LENGTH); - if (r) - return r; - - bytes_read += l; - } - - return bytes_read; -} - -static bool dvic_detect(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - unsigned char out; - int r; - - if (ddata->hpd_gpio) - return gpiod_get_value_cansleep(ddata->hpd_gpio); - - if (!ddata->i2c_adapter) - return true; - - r = dvic_ddc_read(ddata->i2c_adapter, &out, 1, 0); - - return r == 0; -} - -static void dvic_register_hpd_cb(struct omap_dss_device *dssdev, - void (*cb)(void *cb_data, - enum drm_connector_status status), - void *cb_data) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - mutex_lock(&ddata->hpd_lock); - ddata->hpd_cb = cb; - ddata->hpd_cb_data = cb_data; - mutex_unlock(&ddata->hpd_lock); -} - -static void dvic_unregister_hpd_cb(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - mutex_lock(&ddata->hpd_lock); - ddata->hpd_cb = NULL; - ddata->hpd_cb_data = NULL; - mutex_unlock(&ddata->hpd_lock); -} - -static const struct omap_dss_device_ops dvic_ops = { - .connect = dvic_connect, - .disconnect = dvic_disconnect, - - .enable = dvic_enable, - .disable = dvic_disable, - - .read_edid = dvic_read_edid, - .detect = dvic_detect, - - .register_hpd_cb = dvic_register_hpd_cb, - .unregister_hpd_cb = dvic_unregister_hpd_cb, -}; - -static irqreturn_t dvic_hpd_isr(int irq, void *data) -{ - struct panel_drv_data *ddata = data; - - mutex_lock(&ddata->hpd_lock); - if (ddata->hpd_enabled && ddata->hpd_cb) { - enum drm_connector_status status; - - if (dvic_detect(&ddata->dssdev)) - status = connector_status_connected; - else - status = connector_status_disconnected; - - ddata->hpd_cb(ddata->hpd_cb_data, status); - } - mutex_unlock(&ddata->hpd_lock); - - return IRQ_HANDLED; -} - -static int dvic_probe_of(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct device_node *node = pdev->dev.of_node; - struct device_node *adapter_node; - struct i2c_adapter *adapter; - struct gpio_desc *gpio; - int r; - - gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN); - if (IS_ERR(gpio)) { - dev_err(&pdev->dev, "failed to parse HPD gpio\n"); - return PTR_ERR(gpio); - } - - ddata->hpd_gpio = gpio; - - mutex_init(&ddata->hpd_lock); - - if (ddata->hpd_gpio) { - r = devm_request_threaded_irq(&pdev->dev, - gpiod_to_irq(ddata->hpd_gpio), NULL, dvic_hpd_isr, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - "DVI HPD", ddata); - if (r) - return r; - } - - adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0); - if (adapter_node) { - adapter = of_get_i2c_adapter_by_node(adapter_node); - of_node_put(adapter_node); - if (adapter == NULL) { - dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n"); - return -EPROBE_DEFER; - } - - ddata->i2c_adapter = adapter; - } - - return 0; -} - -static int dvic_probe(struct platform_device *pdev) -{ - struct panel_drv_data *ddata; - struct omap_dss_device *dssdev; - int r; - - ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); - if (!ddata) - return -ENOMEM; - - platform_set_drvdata(pdev, ddata); - - r = dvic_probe_of(pdev); - if (r) - return r; - - dssdev = &ddata->dssdev; - dssdev->ops = &dvic_ops; - dssdev->dev = &pdev->dev; - dssdev->type = OMAP_DISPLAY_TYPE_DVI; - dssdev->owner = THIS_MODULE; - dssdev->of_ports = BIT(0); - - if (ddata->hpd_gpio) - dssdev->ops_flags |= OMAP_DSS_DEVICE_OP_DETECT - | OMAP_DSS_DEVICE_OP_HPD; - if (ddata->i2c_adapter) - dssdev->ops_flags |= OMAP_DSS_DEVICE_OP_DETECT - | OMAP_DSS_DEVICE_OP_EDID; - - omapdss_display_init(dssdev); - omapdss_device_register(dssdev); - - return 0; -} - -static int __exit dvic_remove(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *dssdev = &ddata->dssdev; - - omapdss_device_unregister(&ddata->dssdev); - - dvic_disable(dssdev); - - i2c_put_adapter(ddata->i2c_adapter); - - mutex_destroy(&ddata->hpd_lock); - - return 0; -} - -static const struct of_device_id dvic_of_match[] = { - { .compatible = "omapdss,dvi-connector", }, - {}, -}; - -MODULE_DEVICE_TABLE(of, dvic_of_match); - -static struct platform_driver dvi_connector_driver = { - .probe = dvic_probe, - .remove = __exit_p(dvic_remove), - .driver = { - .name = "connector-dvi", - .of_match_table = dvic_of_match, - .suppress_bind_attrs = true, - }, -}; - -module_platform_driver(dvi_connector_driver); - -MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); -MODULE_DESCRIPTION("Generic DVI Connector driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index e602fa4a50a4..68d6f6e44b03 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -41,44 +41,6 @@ static void hdmic_disconnect(struct omap_dss_device *src, { } -static int hdmic_enable(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - int r; - - dev_dbg(ddata->dev, "enable\n"); - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return r; -} - -static void hdmic_disable(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - dev_dbg(ddata->dev, "disable\n"); - - if (!omapdss_device_is_enabled(dssdev)) - return; - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; -} - static bool hdmic_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); @@ -113,9 +75,6 @@ static const struct omap_dss_device_ops hdmic_ops = { .connect = hdmic_connect, .disconnect = hdmic_disconnect, - .enable = hdmic_enable, - .disable = hdmic_disable, - .detect = hdmic_detect, .register_hpd_cb = hdmic_register_hpd_cb, .unregister_hpd_cb = hdmic_unregister_hpd_cb, @@ -181,6 +140,7 @@ static int hdmic_probe(struct platform_device *pdev) dssdev->ops = &hdmic_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_HDMI; + dssdev->display = true; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); dssdev->ops_flags = ddata->hpd_gpio @@ -196,12 +156,9 @@ static int hdmic_probe(struct platform_device *pdev) static int __exit hdmic_remove(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *dssdev = &ddata->dssdev; omapdss_device_unregister(&ddata->dssdev); - hdmic_disable(dssdev); - return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 4fefd80f53bb..29a5a130ebd1 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -41,48 +41,20 @@ static void opa362_disconnect(struct omap_dss_device *src, omapdss_device_disconnect(dst, dst->next); } -static int opa362_enable(struct omap_dss_device *dssdev) +static void opa362_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - int r; - - dev_dbg(dssdev->dev, "enable\n"); - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 1); - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; } static void opa362_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - dev_dbg(dssdev->dev, "disable\n"); - - if (!omapdss_device_is_enabled(dssdev)) - return; if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 0); - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } static const struct omap_dss_device_ops opa362_ops = { @@ -116,7 +88,6 @@ static int opa362_probe(struct platform_device *pdev) dssdev->ops = &opa362_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_VENC; - dssdev->output_type = OMAP_DISPLAY_TYPE_VENC; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(1) | BIT(0); @@ -141,13 +112,7 @@ static int __exit opa362_remove(struct platform_device *pdev) omapdss_device_put(dssdev->next); omapdss_device_unregister(&ddata->dssdev); - WARN_ON(omapdss_device_is_enabled(dssdev)); - if (omapdss_device_is_enabled(dssdev)) - opa362_disable(dssdev); - - WARN_ON(omapdss_device_is_connected(dssdev)); - if (omapdss_device_is_connected(dssdev)) - omapdss_device_disconnect(NULL, dssdev); + opa362_disable(dssdev); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c deleted file mode 100644 index f1a748353279..000000000000 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ /dev/null @@ -1,170 +0,0 @@ -/* - * TFP410 DPI-to-DVI encoder driver - * - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ - * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - */ - -#include <linux/gpio/consumer.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/slab.h> - -#include "../dss/omapdss.h" - -struct panel_drv_data { - struct omap_dss_device dssdev; - - struct gpio_desc *pd_gpio; -}; - -#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) - -static int tfp410_connect(struct omap_dss_device *src, - struct omap_dss_device *dst) -{ - return omapdss_device_connect(dst->dss, dst, dst->next); -} - -static void tfp410_disconnect(struct omap_dss_device *src, - struct omap_dss_device *dst) -{ - omapdss_device_disconnect(dst, dst->next); -} - -static int tfp410_enable(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - int r; - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; - - if (ddata->pd_gpio) - gpiod_set_value_cansleep(ddata->pd_gpio, 0); - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; -} - -static void tfp410_disable(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - if (!omapdss_device_is_enabled(dssdev)) - return; - - if (ddata->pd_gpio) - gpiod_set_value_cansleep(ddata->pd_gpio, 0); - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; -} - -static const struct omap_dss_device_ops tfp410_ops = { - .connect = tfp410_connect, - .disconnect = tfp410_disconnect, - .enable = tfp410_enable, - .disable = tfp410_disable, -}; - -static int tfp410_probe(struct platform_device *pdev) -{ - struct panel_drv_data *ddata; - struct omap_dss_device *dssdev; - struct gpio_desc *gpio; - - ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); - if (!ddata) - return -ENOMEM; - - platform_set_drvdata(pdev, ddata); - - /* Powerdown GPIO */ - gpio = devm_gpiod_get_optional(&pdev->dev, "powerdown", GPIOD_OUT_HIGH); - if (IS_ERR(gpio)) { - dev_err(&pdev->dev, "failed to parse powerdown gpio\n"); - return PTR_ERR(gpio); - } - - ddata->pd_gpio = gpio; - - dssdev = &ddata->dssdev; - dssdev->ops = &tfp410_ops; - dssdev->dev = &pdev->dev; - dssdev->type = OMAP_DISPLAY_TYPE_DPI; - dssdev->output_type = OMAP_DISPLAY_TYPE_DVI; - dssdev->owner = THIS_MODULE; - dssdev->of_ports = BIT(1) | BIT(0); - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE - | DRM_BUS_FLAG_PIXDATA_POSEDGE; - - dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1); - if (IS_ERR(dssdev->next)) { - if (PTR_ERR(dssdev->next) != -EPROBE_DEFER) - dev_err(&pdev->dev, "failed to find video sink\n"); - return PTR_ERR(dssdev->next); - } - - omapdss_device_register(dssdev); - - return 0; -} - -static int __exit tfp410_remove(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *dssdev = &ddata->dssdev; - - if (dssdev->next) - omapdss_device_put(dssdev->next); - omapdss_device_unregister(&ddata->dssdev); - - WARN_ON(omapdss_device_is_enabled(dssdev)); - if (omapdss_device_is_enabled(dssdev)) - tfp410_disable(dssdev); - - WARN_ON(omapdss_device_is_connected(dssdev)); - if (omapdss_device_is_connected(dssdev)) - omapdss_device_disconnect(NULL, dssdev); - - return 0; -} - -static const struct of_device_id tfp410_of_match[] = { - { .compatible = "omapdss,ti,tfp410", }, - {}, -}; - -MODULE_DEVICE_TABLE(of, tfp410_of_match); - -static struct platform_driver tfp410_driver = { - .probe = tfp410_probe, - .remove = __exit_p(tfp410_remove), - .driver = { - .name = "tfp410", - .of_match_table = tfp410_of_match, - .suppress_bind_attrs = true, - }, -}; - -module_platform_driver(tfp410_driver); - -MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); -MODULE_DESCRIPTION("TFP410 DPI to DVI encoder driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index 94de55fd8884..bc03752d2762 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -62,35 +62,6 @@ static void tpd_disconnect(struct omap_dss_device *src, omapdss_device_disconnect(dst, dst->next); } -static int tpd_enable(struct omap_dss_device *dssdev) -{ - struct omap_dss_device *src = dssdev->src; - int r; - - if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) - return 0; - - r = src->ops->enable(src); - if (r) - return r; - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return r; -} - -static void tpd_disable(struct omap_dss_device *dssdev) -{ - struct omap_dss_device *src = dssdev->src; - - if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) - return; - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; -} - static bool tpd_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); @@ -124,8 +95,6 @@ static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev) static const struct omap_dss_device_ops tpd_ops = { .connect = tpd_connect, .disconnect = tpd_disconnect, - .enable = tpd_enable, - .disable = tpd_disable, .detect = tpd_detect, .register_hpd_cb = tpd_register_hpd_cb, .unregister_hpd_cb = tpd_unregister_hpd_cb, @@ -198,7 +167,6 @@ static int tpd_probe(struct platform_device *pdev) dssdev->ops = &tpd_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_HDMI; - dssdev->output_type = OMAP_DISPLAY_TYPE_HDMI; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(1) | BIT(0); dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT @@ -225,14 +193,6 @@ static int __exit tpd_remove(struct platform_device *pdev) omapdss_device_put(dssdev->next); omapdss_device_unregister(&ddata->dssdev); - WARN_ON(omapdss_device_is_enabled(dssdev)); - if (omapdss_device_is_enabled(dssdev)) - tpd_disable(dssdev); - - WARN_ON(omapdss_device_is_connected(dssdev)); - if (omapdss_device_is_connected(dssdev)) - omapdss_device_disconnect(NULL, dssdev); - return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c deleted file mode 100644 index 465120809eb3..000000000000 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Generic MIPI DPI Panel Driver - * - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ - * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - */ - -#include <linux/gpio/consumer.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/of.h> -#include <linux/regulator/consumer.h> -#include <linux/backlight.h> - -#include <video/of_display_timing.h> - -#include "../dss/omapdss.h" - -struct panel_drv_data { - struct omap_dss_device dssdev; - - struct videomode vm; - - struct backlight_device *backlight; - - struct gpio_desc *enable_gpio; - struct regulator *vcc_supply; -}; - -#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) - -static int panel_dpi_connect(struct omap_dss_device *src, - struct omap_dss_device *dst) -{ - return 0; -} - -static void panel_dpi_disconnect(struct omap_dss_device *src, - struct omap_dss_device *dst) -{ -} - -static int panel_dpi_enable(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - int r; - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; - - r = regulator_enable(ddata->vcc_supply); - if (r) { - src->ops->disable(src); - return r; - } - - gpiod_set_value_cansleep(ddata->enable_gpio, 1); - backlight_enable(ddata->backlight); - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; -} - -static void panel_dpi_disable(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - if (!omapdss_device_is_enabled(dssdev)) - return; - - backlight_disable(ddata->backlight); - - gpiod_set_value_cansleep(ddata->enable_gpio, 0); - regulator_disable(ddata->vcc_supply); - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; -} - -static void panel_dpi_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - *vm = ddata->vm; -} - -static const struct omap_dss_device_ops panel_dpi_ops = { - .connect = panel_dpi_connect, - .disconnect = panel_dpi_disconnect, - - .enable = panel_dpi_enable, - .disable = panel_dpi_disable, - - .get_timings = panel_dpi_get_timings, -}; - -static int panel_dpi_probe_of(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct device_node *node = pdev->dev.of_node; - int r; - struct display_timing timing; - struct gpio_desc *gpio; - - gpio = devm_gpiod_get_optional(&pdev->dev, "enable", GPIOD_OUT_LOW); - if (IS_ERR(gpio)) - return PTR_ERR(gpio); - - ddata->enable_gpio = gpio; - - /* - * Many different panels are supported by this driver and there are - * probably very different needs for their reset pins in regards to - * timing and order relative to the enable gpio. So for now it's just - * ensured that the reset line isn't active. - */ - gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(gpio)) - return PTR_ERR(gpio); - - ddata->vcc_supply = devm_regulator_get(&pdev->dev, "vcc"); - if (IS_ERR(ddata->vcc_supply)) - return PTR_ERR(ddata->vcc_supply); - - ddata->backlight = devm_of_find_backlight(&pdev->dev); - - if (IS_ERR(ddata->backlight)) - return PTR_ERR(ddata->backlight); - - r = of_get_display_timing(node, "panel-timing", &timing); - if (r) { - dev_err(&pdev->dev, "failed to get video timing\n"); - return r; - } - - videomode_from_timing(&timing, &ddata->vm); - - return 0; -} - -static int panel_dpi_probe(struct platform_device *pdev) -{ - struct panel_drv_data *ddata; - struct omap_dss_device *dssdev; - int r; - - ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); - if (ddata == NULL) - return -ENOMEM; - - platform_set_drvdata(pdev, ddata); - - r = panel_dpi_probe_of(pdev); - if (r) - return r; - - dssdev = &ddata->dssdev; - dssdev->dev = &pdev->dev; - dssdev->ops = &panel_dpi_ops; - dssdev->type = OMAP_DISPLAY_TYPE_DPI; - dssdev->owner = THIS_MODULE; - dssdev->of_ports = BIT(0); - drm_bus_flags_from_videomode(&ddata->vm, &dssdev->bus_flags); - - omapdss_display_init(dssdev); - omapdss_device_register(dssdev); - - return 0; -} - -static int __exit panel_dpi_remove(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *dssdev = &ddata->dssdev; - - omapdss_device_unregister(dssdev); - - panel_dpi_disable(dssdev); - - return 0; -} - -static const struct of_device_id panel_dpi_of_match[] = { - { .compatible = "omapdss,panel-dpi", }, - {}, -}; - -MODULE_DEVICE_TABLE(of, panel_dpi_of_match); - -static struct platform_driver panel_dpi_driver = { - .probe = panel_dpi_probe, - .remove = __exit_p(panel_dpi_remove), - .driver = { - .name = "panel-dpi", - .of_match_table = panel_dpi_of_match, - .suppress_bind_attrs = true, - }, -}; - -module_platform_driver(panel_dpi_driver); - -MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); -MODULE_DESCRIPTION("Generic MIPI DPI Panel Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 29692a5217c5..741a5e324767 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -24,6 +24,8 @@ #include <linux/of_device.h> #include <linux/regulator/consumer.h> +#include <drm/drm_connector.h> + #include <video/mipi_display.h> #include <video/of_display_timing.h> @@ -41,6 +43,7 @@ struct panel_drv_data { struct omap_dss_device dssdev; + struct omap_dss_device *src; struct videomode vm; @@ -141,7 +144,7 @@ static void hw_guard_wait(struct panel_drv_data *ddata) static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; int r; u8 buf[1]; @@ -157,14 +160,14 @@ static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data) static int dsicm_dcs_write_0(struct panel_drv_data *ddata, u8 dcs_cmd) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; return src->ops->dsi.dcs_write(src, ddata->channel, &dcs_cmd, 1); } static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; u8 buf[2] = { dcs_cmd, param }; return src->ops->dsi.dcs_write(src, ddata->channel, buf, 2); @@ -173,7 +176,7 @@ static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param) static int dsicm_sleep_in(struct panel_drv_data *ddata) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; u8 cmd; int r; @@ -228,7 +231,7 @@ static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3) static int dsicm_set_update_window(struct panel_drv_data *ddata, u16 x, u16 y, u16 w, u16 h) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; int r; u16 x1 = x; u16 x2 = x + w - 1; @@ -275,7 +278,7 @@ static void dsicm_cancel_ulps_work(struct panel_drv_data *ddata) static int dsicm_enter_ulps(struct panel_drv_data *ddata) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; int r; if (ddata->ulps_enabled) @@ -309,18 +312,13 @@ err: static int dsicm_exit_ulps(struct panel_drv_data *ddata) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; int r; if (!ddata->ulps_enabled) return 0; - r = src->ops->enable(src); - if (r) { - dev_err(&ddata->pdev->dev, "failed to enable DSI\n"); - goto err1; - } - + src->ops->enable(src); src->ops->dsi.enable_hs(src, ddata->channel, true); r = _dsicm_enable_te(ddata, true); @@ -347,7 +345,7 @@ err2: enable_irq(gpiod_to_irq(ddata->ext_te_gpio)); ddata->ulps_enabled = false; } -err1: + dsicm_queue_ulps_work(ddata); return r; @@ -366,7 +364,7 @@ static int dsicm_wake_up(struct panel_drv_data *ddata) static int dsicm_bl_update_status(struct backlight_device *dev) { struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev); - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; int r = 0; int level; @@ -414,7 +412,7 @@ static ssize_t dsicm_num_errors_show(struct device *dev, { struct platform_device *pdev = to_platform_device(dev); struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; u8 errors = 0; int r; @@ -446,7 +444,7 @@ static ssize_t dsicm_hw_revision_show(struct device *dev, { struct platform_device *pdev = to_platform_device(dev); struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; u8 id1, id2, id3; int r; @@ -478,7 +476,7 @@ static ssize_t dsicm_store_ulps(struct device *dev, { struct platform_device *pdev = to_platform_device(dev); struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; unsigned long t; int r; @@ -528,7 +526,7 @@ static ssize_t dsicm_store_ulps_timeout(struct device *dev, { struct platform_device *pdev = to_platform_device(dev); struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; unsigned long t; int r; @@ -603,7 +601,7 @@ static void dsicm_hw_reset(struct panel_drv_data *ddata) static int dsicm_power_on(struct panel_drv_data *ddata) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; u8 id1, id2, id3; int r; struct omap_dss_dsi_config dsi_config = { @@ -649,11 +647,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata) goto err_vddi; } - r = src->ops->enable(src); - if (r) { - dev_err(&ddata->pdev->dev, "failed to enable DSI\n"); - goto err_vddi; - } + src->ops->enable(src); dsicm_hw_reset(ddata); @@ -722,7 +716,7 @@ err_vpnl: static void dsicm_power_off(struct panel_drv_data *ddata) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; int r; src->ops->dsi.disable_video_output(src, ddata->channel); @@ -776,6 +770,7 @@ static int dsicm_connect(struct omap_dss_device *src, return r; } + ddata->src = src; return 0; } @@ -785,28 +780,17 @@ static void dsicm_disconnect(struct omap_dss_device *src, struct panel_drv_data *ddata = to_panel_data(dst); src->ops->dsi.release_vc(src, ddata->channel); + ddata->src = NULL; } -static int dsicm_enable(struct omap_dss_device *dssdev) +static void dsicm_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; + struct omap_dss_device *src = ddata->src; int r; - dev_dbg(&ddata->pdev->dev, "enable\n"); - mutex_lock(&ddata->lock); - if (!omapdss_device_is_connected(dssdev)) { - r = -ENODEV; - goto err; - } - - if (omapdss_device_is_enabled(dssdev)) { - r = 0; - goto err; - } - src->ops->dsi.bus_lock(src); r = dsicm_power_on(ddata); @@ -816,27 +800,22 @@ static int dsicm_enable(struct omap_dss_device *dssdev) if (r) goto err; - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - mutex_unlock(&ddata->lock); dsicm_bl_power(ddata, true); - return 0; + return; err: - dev_dbg(&ddata->pdev->dev, "enable failed\n"); + dev_dbg(&ddata->pdev->dev, "enable failed (%d)\n", r); mutex_unlock(&ddata->lock); - return r; } static void dsicm_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; + struct omap_dss_device *src = ddata->src; int r; - dev_dbg(&ddata->pdev->dev, "disable\n"); - dsicm_bl_power(ddata, false); mutex_lock(&ddata->lock); @@ -845,23 +824,19 @@ static void dsicm_disable(struct omap_dss_device *dssdev) src->ops->dsi.bus_lock(src); - if (omapdss_device_is_enabled(dssdev)) { - r = dsicm_wake_up(ddata); - if (!r) - dsicm_power_off(ddata); - } + r = dsicm_wake_up(ddata); + if (!r) + dsicm_power_off(ddata); src->ops->dsi.bus_unlock(src); - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; - mutex_unlock(&ddata->lock); } static void dsicm_framedone_cb(int err, void *data) { struct panel_drv_data *ddata = data; - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; dev_dbg(&ddata->pdev->dev, "framedone, err %d\n", err); src->ops->dsi.bus_unlock(src); @@ -870,7 +845,7 @@ static void dsicm_framedone_cb(int err, void *data) static irqreturn_t dsicm_te_isr(int irq, void *data) { struct panel_drv_data *ddata = data; - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; int old; int r; @@ -896,7 +871,7 @@ static void dsicm_te_timeout_work_callback(struct work_struct *work) { struct panel_drv_data *ddata = container_of(work, struct panel_drv_data, te_timeout_work.work); - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; dev_err(&ddata->pdev->dev, "TE not received for 250ms!\n"); @@ -908,7 +883,7 @@ static int dsicm_update(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; + struct omap_dss_device *src = ddata->src; int r; dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h); @@ -954,7 +929,7 @@ err: static int dsicm_sync(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; + struct omap_dss_device *src = ddata->src; dev_dbg(&ddata->pdev->dev, "sync\n"); @@ -970,7 +945,7 @@ static int dsicm_sync(struct omap_dss_device *dssdev) static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; int r; if (enable) @@ -990,7 +965,7 @@ static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable) static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; + struct omap_dss_device *src = ddata->src; int r; mutex_lock(&ddata->lock); @@ -1041,7 +1016,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; + struct omap_dss_device *src = ddata->src; int r; int first = 1; int plen; @@ -1123,7 +1098,7 @@ static void dsicm_ulps_work(struct work_struct *work) struct panel_drv_data *ddata = container_of(work, struct panel_drv_data, ulps_work.work); struct omap_dss_device *dssdev = &ddata->dssdev; - struct omap_dss_device *src = dssdev->src; + struct omap_dss_device *src = ddata->src; mutex_lock(&ddata->lock); @@ -1140,29 +1115,32 @@ static void dsicm_ulps_work(struct work_struct *work) mutex_unlock(&ddata->lock); } -static void dsicm_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) +static int dsicm_get_modes(struct omap_dss_device *dssdev, + struct drm_connector *connector) { struct panel_drv_data *ddata = to_panel_data(dssdev); - *vm = ddata->vm; + connector->display_info.width_mm = ddata->width_mm; + connector->display_info.height_mm = ddata->height_mm; + + return omapdss_display_get_modes(connector, &ddata->vm); } static int dsicm_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + struct drm_display_mode *mode) { struct panel_drv_data *ddata = to_panel_data(dssdev); int ret = 0; - if (vm->hactive != ddata->vm.hactive) + if (mode->hdisplay != ddata->vm.hactive) ret = -EINVAL; - if (vm->vactive != ddata->vm.vactive) + if (mode->vdisplay != ddata->vm.vactive) ret = -EINVAL; if (ret) { dev_warn(dssdev->dev, "wrong resolution: %d x %d", - vm->hactive, vm->vactive); + mode->hdisplay, mode->vdisplay); dev_warn(dssdev->dev, "panel resolution: %d x %d", ddata->vm.hactive, ddata->vm.vactive); } @@ -1170,15 +1148,6 @@ static int dsicm_check_timings(struct omap_dss_device *dssdev, return ret; } -static void dsicm_get_size(struct omap_dss_device *dssdev, - unsigned int *width, unsigned int *height) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - *width = ddata->width_mm; - *height = ddata->height_mm; -} - static const struct omap_dss_device_ops dsicm_ops = { .connect = dsicm_connect, .disconnect = dsicm_disconnect, @@ -1186,7 +1155,7 @@ static const struct omap_dss_device_ops dsicm_ops = { .enable = dsicm_enable, .disable = dsicm_disable, - .get_timings = dsicm_get_timings, + .get_modes = dsicm_get_modes, .check_timings = dsicm_check_timings, }; @@ -1194,8 +1163,6 @@ static const struct omap_dss_driver dsicm_dss_driver = { .update = dsicm_update, .sync = dsicm_sync, - .get_size = dsicm_get_size, - .enable_te = dsicm_enable_te, .get_te = dsicm_get_te, @@ -1305,8 +1272,10 @@ static int dsicm_probe(struct platform_device *pdev) dssdev->ops = &dsicm_ops; dssdev->driver = &dsicm_dss_driver; dssdev->type = OMAP_DISPLAY_TYPE_DSI; + dssdev->display = true; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); + dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE | OMAP_DSS_DISPLAY_CAP_TEAR_ELIM; @@ -1385,8 +1354,9 @@ static int __exit dsicm_remove(struct platform_device *pdev) omapdss_device_unregister(dssdev); - dsicm_disable(dssdev); - omapdss_device_disconnect(dssdev->src, dssdev); + if (omapdss_device_is_enabled(dssdev)) + dsicm_disable(dssdev); + omapdss_device_disconnect(ddata->src, dssdev); sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index f6ef8ff964dd..99f2350d462c 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -123,52 +123,28 @@ static void lb035q02_disconnect(struct omap_dss_device *src, { } -static int lb035q02_enable(struct omap_dss_device *dssdev) +static void lb035q02_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - int r; - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 1); - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; } static void lb035q02_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - if (!omapdss_device_is_enabled(dssdev)) - return; if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 0); - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void lb035q02_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) +static int lb035q02_get_modes(struct omap_dss_device *dssdev, + struct drm_connector *connector) { struct panel_drv_data *ddata = to_panel_data(dssdev); - *vm = ddata->vm; + return omapdss_display_get_modes(connector, &ddata->vm); } static const struct omap_dss_device_ops lb035q02_ops = { @@ -178,7 +154,7 @@ static const struct omap_dss_device_ops lb035q02_ops = { .enable = lb035q02_enable, .disable = lb035q02_disable, - .get_timings = lb035q02_get_timings, + .get_modes = lb035q02_get_modes, }; static int lb035q02_probe_of(struct spi_device *spi) @@ -221,16 +197,19 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) dssdev->dev = &spi->dev; dssdev->ops = &lb035q02_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; + dssdev->display = true; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); + dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; /* * Note: According to the panel documentation: * DE is active LOW * DATA needs to be driven on the FALLING edge */ - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE - | DRM_BUS_FLAG_PIXDATA_POSEDGE; + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH + | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE + | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index f445de6369f7..c2409815a204 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -118,50 +118,26 @@ static void nec_8048_disconnect(struct omap_dss_device *src, { } -static int nec_8048_enable(struct omap_dss_device *dssdev) +static void nec_8048_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - int r; - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; gpiod_set_value_cansleep(ddata->res_gpio, 1); - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; } static void nec_8048_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - if (!omapdss_device_is_enabled(dssdev)) - return; gpiod_set_value_cansleep(ddata->res_gpio, 0); - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void nec_8048_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) +static int nec_8048_get_modes(struct omap_dss_device *dssdev, + struct drm_connector *connector) { struct panel_drv_data *ddata = to_panel_data(dssdev); - *vm = ddata->vm; + return omapdss_display_get_modes(connector, &ddata->vm); } static const struct omap_dss_device_ops nec_8048_ops = { @@ -171,7 +147,7 @@ static const struct omap_dss_device_ops nec_8048_ops = { .enable = nec_8048_enable, .disable = nec_8048_disable, - .get_timings = nec_8048_get_timings, + .get_modes = nec_8048_get_modes, }; static int nec_8048_probe(struct spi_device *spi) @@ -216,10 +192,13 @@ static int nec_8048_probe(struct spi_device *spi) dssdev->dev = &spi->dev; dssdev->ops = &nec_8048_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; + dssdev->display = true; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE - | DRM_BUS_FLAG_PIXDATA_POSEDGE; + dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH + | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE + | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index 64b1369cb274..9c545de430f6 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -62,29 +62,22 @@ static void sharp_ls_disconnect(struct omap_dss_device *src, { } -static int sharp_ls_enable(struct omap_dss_device *dssdev) +static void sharp_ls_pre_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; int r; - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - if (ddata->vcc) { r = regulator_enable(ddata->vcc); - if (r != 0) - return r; + if (r) + dev_err(dssdev->dev, "%s: failed to enable regulator\n", + __func__); } +} - r = src->ops->enable(src); - if (r) { - regulator_disable(ddata->vcc); - return r; - } +static void sharp_ls_enable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); /* wait couple of vsyncs until enabling the LCD */ msleep(50); @@ -94,19 +87,11 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev) if (ddata->ini_gpio) gpiod_set_value_cansleep(ddata->ini_gpio, 1); - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; } static void sharp_ls_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - if (!omapdss_device_is_enabled(dssdev)) - return; if (ddata->ini_gpio) gpiod_set_value_cansleep(ddata->ini_gpio, 0); @@ -115,33 +100,35 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev) gpiod_set_value_cansleep(ddata->resb_gpio, 0); /* wait at least 5 vsyncs after disabling the LCD */ - msleep(100); +} - src->ops->disable(src); +static void sharp_ls_post_disable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); if (ddata->vcc) regulator_disable(ddata->vcc); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void sharp_ls_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) +static int sharp_ls_get_modes(struct omap_dss_device *dssdev, + struct drm_connector *connector) { struct panel_drv_data *ddata = to_panel_data(dssdev); - *vm = ddata->vm; + return omapdss_display_get_modes(connector, &ddata->vm); } static const struct omap_dss_device_ops sharp_ls_ops = { .connect = sharp_ls_connect, .disconnect = sharp_ls_disconnect, + .pre_enable = sharp_ls_pre_enable, .enable = sharp_ls_enable, .disable = sharp_ls_disable, + .post_disable = sharp_ls_post_disable, - .get_timings = sharp_ls_get_timings, + .get_modes = sharp_ls_get_modes, }; static int sharp_ls_get_gpio_of(struct device *dev, int index, int val, @@ -220,15 +207,18 @@ static int sharp_ls_probe(struct platform_device *pdev) dssdev->dev = &pdev->dev; dssdev->ops = &sharp_ls_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; + dssdev->display = true; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); + dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; /* * Note: According to the panel documentation: * DATA needs to be driven on the FALLING edge */ - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE - | DRM_BUS_FLAG_PIXDATA_POSEDGE; + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH + | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE + | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; omapdss_display_init(dssdev); omapdss_device_register(dssdev); @@ -243,7 +233,10 @@ static int __exit sharp_ls_remove(struct platform_device *pdev) omapdss_device_unregister(dssdev); - sharp_ls_disable(dssdev); + if (omapdss_device_is_enabled(dssdev)) { + sharp_ls_disable(dssdev); + sharp_ls_post_disable(dssdev); + } return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index e04663856b31..2038def14ba1 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -516,17 +516,9 @@ static void acx565akm_disconnect(struct omap_dss_device *src, static int acx565akm_panel_power_on(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - int r; dev_dbg(&ddata->spi->dev, "%s\n", __func__); - r = src->ops->enable(src); - if (r) { - pr_err("%s sdi enable failed\n", __func__); - return r; - } - /*FIXME tweak me */ msleep(50); @@ -562,7 +554,6 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev) static void acx565akm_panel_power_off(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; dev_dbg(dssdev->dev, "%s\n", __func__); @@ -585,56 +576,32 @@ static void acx565akm_panel_power_off(struct omap_dss_device *dssdev) /* FIXME need to tweak this delay */ msleep(100); - - src->ops->disable(src); } -static int acx565akm_enable(struct omap_dss_device *dssdev) +static void acx565akm_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - int r; - - dev_dbg(dssdev->dev, "%s\n", __func__); - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; mutex_lock(&ddata->mutex); - r = acx565akm_panel_power_on(dssdev); + acx565akm_panel_power_on(dssdev); mutex_unlock(&ddata->mutex); - if (r) - return r; - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; } static void acx565akm_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - dev_dbg(dssdev->dev, "%s\n", __func__); - - if (!omapdss_device_is_enabled(dssdev)) - return; - mutex_lock(&ddata->mutex); acx565akm_panel_power_off(dssdev); mutex_unlock(&ddata->mutex); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void acx565akm_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) +static int acx565akm_get_modes(struct omap_dss_device *dssdev, + struct drm_connector *connector) { struct panel_drv_data *ddata = to_panel_data(dssdev); - *vm = ddata->vm; + return omapdss_display_get_modes(connector, &ddata->vm); } static const struct omap_dss_device_ops acx565akm_ops = { @@ -644,7 +611,7 @@ static const struct omap_dss_device_ops acx565akm_ops = { .enable = acx565akm_enable, .disable = acx565akm_disable, - .get_timings = acx565akm_get_timings, + .get_modes = acx565akm_get_modes, }; static int acx565akm_probe(struct spi_device *spi) @@ -739,10 +706,13 @@ static int acx565akm_probe(struct spi_device *spi) dssdev->dev = &spi->dev; dssdev->ops = &acx565akm_ops; dssdev->type = OMAP_DISPLAY_TYPE_SDI; + dssdev->display = true; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE - | DRM_BUS_FLAG_PIXDATA_POSEDGE; + dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH + | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE + | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; omapdss_display_init(dssdev); omapdss_device_register(dssdev); @@ -766,7 +736,8 @@ static int acx565akm_remove(struct spi_device *spi) omapdss_device_unregister(dssdev); - acx565akm_disable(dssdev); + if (omapdss_device_is_enabled(dssdev)) + acx565akm_disable(dssdev); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index 7ddc8c574a61..2ad161e33106 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -35,6 +35,8 @@ struct panel_drv_data { struct videomode vm; + struct backlight_device *backlight; + struct spi_device *spi_dev; }; @@ -169,24 +171,12 @@ static void td028ttec1_panel_disconnect(struct omap_dss_device *src, { } -static int td028ttec1_panel_enable(struct omap_dss_device *dssdev) +static void td028ttec1_panel_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - int r; - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; + int r = 0; - dev_dbg(dssdev->dev, "td028ttec1_panel_enable() - state %d\n", - dssdev->state); + dev_dbg(dssdev->dev, "%s: state %d\n", __func__, dssdev->state); /* three times command zero */ r |= jbt_ret_write_0(ddata, 0x00); @@ -197,8 +187,8 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev) usleep_range(1000, 2000); if (r) { - dev_warn(dssdev->dev, "transfer error\n"); - goto transfer_err; + dev_warn(dssdev->dev, "%s: transfer error\n", __func__); + return; } /* deep standby out */ @@ -268,20 +258,17 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev) r |= jbt_ret_write_0(ddata, JBT_REG_DISPLAY_ON); - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - -transfer_err: + if (r) + dev_err(dssdev->dev, "%s: write error\n", __func__); - return r ? -EIO : 0; + backlight_enable(ddata->backlight); } static void td028ttec1_panel_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - if (!omapdss_device_is_enabled(dssdev)) - return; + backlight_disable(ddata->backlight); dev_dbg(dssdev->dev, "td028ttec1_panel_disable()\n"); @@ -289,18 +276,14 @@ static void td028ttec1_panel_disable(struct omap_dss_device *dssdev) jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0x8002); jbt_ret_write_0(ddata, JBT_REG_SLEEP_IN); jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x00); - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) +static int td028ttec1_panel_get_modes(struct omap_dss_device *dssdev, + struct drm_connector *connector) { struct panel_drv_data *ddata = to_panel_data(dssdev); - *vm = ddata->vm; + return omapdss_display_get_modes(connector, &ddata->vm); } static const struct omap_dss_device_ops td028ttec1_ops = { @@ -310,7 +293,7 @@ static const struct omap_dss_device_ops td028ttec1_ops = { .enable = td028ttec1_panel_enable, .disable = td028ttec1_panel_disable, - .get_timings = td028ttec1_panel_get_timings, + .get_modes = td028ttec1_panel_get_modes, }; static int td028ttec1_panel_probe(struct spi_device *spi) @@ -334,6 +317,10 @@ static int td028ttec1_panel_probe(struct spi_device *spi) if (ddata == NULL) return -ENOMEM; + ddata->backlight = devm_of_find_backlight(&spi->dev); + if (IS_ERR(ddata->backlight)) + return PTR_ERR(ddata->backlight); + dev_set_drvdata(&spi->dev, ddata); ddata->spi_dev = spi; @@ -344,15 +331,18 @@ static int td028ttec1_panel_probe(struct spi_device *spi) dssdev->dev = &spi->dev; dssdev->ops = &td028ttec1_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; + dssdev->display = true; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); + dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; /* * Note: According to the panel documentation: * SYNC needs to be driven on the FALLING edge */ - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE - | DRM_BUS_FLAG_PIXDATA_NEGEDGE; + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH + | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE + | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 8440fcb744d9..0b692fc7e5ea 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -320,22 +320,11 @@ static void tpo_td043_disconnect(struct omap_dss_device *src, { } -static int tpo_td043_enable(struct omap_dss_device *dssdev) +static void tpo_td043_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; int r; - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; - /* * If we are resuming from system suspend, SPI clocks might not be * enabled yet, so we'll program the LCD from SPI PM resume callback. @@ -343,38 +332,27 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev) if (!ddata->spi_suspended) { r = tpo_td043_power_on(ddata); if (r) { - src->ops->disable(src); - return r; + dev_err(&ddata->spi->dev, "%s: power on failed (%d)\n", + __func__, r); + return; } } - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; } static void tpo_td043_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - if (!omapdss_device_is_enabled(dssdev)) - return; - - src->ops->disable(src); if (!ddata->spi_suspended) tpo_td043_power_off(ddata); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void tpo_td043_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) +static int tpo_td043_get_modes(struct omap_dss_device *dssdev, + struct drm_connector *connector) { struct panel_drv_data *ddata = to_panel_data(dssdev); - *vm = ddata->vm; + return omapdss_display_get_modes(connector, &ddata->vm); } static const struct omap_dss_device_ops tpo_td043_ops = { @@ -384,7 +362,7 @@ static const struct omap_dss_device_ops tpo_td043_ops = { .enable = tpo_td043_enable, .disable = tpo_td043_disable, - .get_timings = tpo_td043_get_timings, + .get_modes = tpo_td043_get_modes, }; static int tpo_td043_probe(struct spi_device *spi) @@ -442,15 +420,18 @@ static int tpo_td043_probe(struct spi_device *spi) dssdev->dev = &spi->dev; dssdev->ops = &tpo_td043_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; + dssdev->display = true; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); + dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; /* * Note: According to the panel documentation: * SYNC needs to be driven on the FALLING edge */ - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE - | DRM_BUS_FLAG_PIXDATA_NEGEDGE; + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH + | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE + | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; omapdss_display_init(dssdev); omapdss_device_register(dssdev); @@ -467,7 +448,8 @@ static int tpo_td043_remove(struct spi_device *spi) omapdss_device_unregister(dssdev); - tpo_td043_disable(dssdev); + if (omapdss_device_is_enabled(dssdev)) + tpo_td043_disable(dssdev); sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group); diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 472f56e3de70..f8dad99013e8 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -19,6 +19,7 @@ #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_graph.h> +#include <linux/platform_device.h> #include "dss.h" #include "omapdss.h" @@ -112,13 +113,12 @@ void omapdss_device_put(struct omap_dss_device *dssdev) } EXPORT_SYMBOL(omapdss_device_put); -struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, - unsigned int port) +struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node) { struct omap_dss_device *dssdev; list_for_each_entry(dssdev, &omapdss_devices_list, list) { - if (dssdev->dev->of_node == src && dssdev->of_ports & BIT(port)) + if (dssdev->dev->of_node == node) return omapdss_device_get(dssdev); } @@ -126,13 +126,10 @@ struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, } /* - * Search for the next device starting at @from. The type argument specfies - * which device types to consider when searching. Searching for multiple types - * is supported by and'ing their type flags. Release the reference to the @from - * device, and acquire a reference to the returned device if found. + * Search for the next output device starting at @from. Release the reference to + * the @from device, and acquire a reference to the returned device if found. */ -struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from, - enum omap_dss_device_type type) +struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from) { struct omap_dss_device *dssdev; struct list_head *list; @@ -160,15 +157,8 @@ struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from, goto done; } - /* - * Accept display entities if the display type is requested, - * and output entities if the output type is requested. - */ - if ((type & OMAP_DSS_DEVICE_TYPE_DISPLAY) && - !dssdev->output_type) - goto done; - if ((type & OMAP_DSS_DEVICE_TYPE_OUTPUT) && dssdev->id && - dssdev->next) + if (dssdev->id && + (dssdev->next || dssdev->bridge || dssdev->panel)) goto done; } @@ -183,7 +173,12 @@ done: mutex_unlock(&omapdss_devices_lock); return dssdev; } -EXPORT_SYMBOL(omapdss_device_get_next); +EXPORT_SYMBOL(omapdss_device_next_output); + +static bool omapdss_device_is_connected(struct omap_dss_device *dssdev) +{ + return dssdev->dss; +} int omapdss_device_connect(struct dss_device *dss, struct omap_dss_device *src, @@ -191,7 +186,19 @@ int omapdss_device_connect(struct dss_device *dss, { int ret; - dev_dbg(dst->dev, "connect\n"); + dev_dbg(&dss->pdev->dev, "connect(%s, %s)\n", + src ? dev_name(src->dev) : "NULL", + dst ? dev_name(dst->dev) : "NULL"); + + if (!dst) { + /* + * The destination is NULL when the source is connected to a + * bridge or panel instead of a DSS device. Stop here, we will + * attach the bridge or panel later when we will have a DRM + * encoder. + */ + return src && (src->bridge || src->panel) ? 0 : -EINVAL; + } if (omapdss_device_is_connected(dst)) return -EBUSY; @@ -204,12 +211,6 @@ int omapdss_device_connect(struct dss_device *dss, return ret; } - if (src) { - WARN_ON(src->dst); - dst->src = src; - src->dst = dst; - } - return 0; } EXPORT_SYMBOL_GPL(omapdss_device_connect); @@ -217,19 +218,20 @@ EXPORT_SYMBOL_GPL(omapdss_device_connect); void omapdss_device_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { - dev_dbg(dst->dev, "disconnect\n"); + struct dss_device *dss = src ? src->dss : dst->dss; - if (!dst->id && !omapdss_device_is_connected(dst)) { - WARN_ON(dst->output_type); + dev_dbg(&dss->pdev->dev, "disconnect(%s, %s)\n", + src ? dev_name(src->dev) : "NULL", + dst ? dev_name(dst->dev) : "NULL"); + + if (!dst) { + WARN_ON(!src->bridge && !src->panel); return; } - if (src) { - if (WARN_ON(dst != src->dst)) - return; - - dst->src = NULL; - src->dst = NULL; + if (!dst->id && !omapdss_device_is_connected(dst)) { + WARN_ON(!dst->display); + return; } WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED); @@ -239,6 +241,58 @@ void omapdss_device_disconnect(struct omap_dss_device *src, } EXPORT_SYMBOL_GPL(omapdss_device_disconnect); +void omapdss_device_pre_enable(struct omap_dss_device *dssdev) +{ + if (!dssdev) + return; + + omapdss_device_pre_enable(dssdev->next); + + if (dssdev->ops->pre_enable) + dssdev->ops->pre_enable(dssdev); +} +EXPORT_SYMBOL_GPL(omapdss_device_pre_enable); + +void omapdss_device_enable(struct omap_dss_device *dssdev) +{ + if (!dssdev) + return; + + if (dssdev->ops->enable) + dssdev->ops->enable(dssdev); + + omapdss_device_enable(dssdev->next); + + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; +} +EXPORT_SYMBOL_GPL(omapdss_device_enable); + +void omapdss_device_disable(struct omap_dss_device *dssdev) +{ + if (!dssdev) + return; + + omapdss_device_disable(dssdev->next); + + if (dssdev->ops->disable) + dssdev->ops->disable(dssdev); +} +EXPORT_SYMBOL_GPL(omapdss_device_disable); + +void omapdss_device_post_disable(struct omap_dss_device *dssdev) +{ + if (!dssdev) + return; + + if (dssdev->ops->post_disable) + dssdev->ops->post_disable(dssdev); + + omapdss_device_post_disable(dssdev->next); + + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; +} +EXPORT_SYMBOL_GPL(omapdss_device_post_disable); + /* ----------------------------------------------------------------------------- * Components Handling */ @@ -249,6 +303,7 @@ struct omapdss_comp_node { struct list_head list; struct device_node *node; bool dss_core_component; + const char *compat; }; static bool omapdss_list_contains(const struct device_node *node) @@ -266,13 +321,20 @@ static bool omapdss_list_contains(const struct device_node *node) static void omapdss_walk_device(struct device *dev, struct device_node *node, bool dss_core) { + struct omapdss_comp_node *comp; struct device_node *n; - struct omapdss_comp_node *comp = devm_kzalloc(dev, sizeof(*comp), - GFP_KERNEL); + const char *compat; + int ret; + ret = of_property_read_string(node, "compatible", &compat); + if (ret < 0) + return; + + comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL); if (comp) { comp->node = node; comp->dss_core_component = dss_core; + comp->compat = compat; list_add(&comp->list, &omapdss_comp_list); } @@ -312,12 +374,8 @@ void omapdss_gather_components(struct device *dev) omapdss_walk_device(dev, dev->of_node, true); - for_each_available_child_of_node(dev->of_node, child) { - if (!of_find_property(child, "compatible", NULL)) - continue; - + for_each_available_child_of_node(dev->of_node, child) omapdss_walk_device(dev, child, true); - } } EXPORT_SYMBOL(omapdss_gather_components); @@ -325,6 +383,8 @@ static bool omapdss_component_is_loaded(struct omapdss_comp_node *comp) { if (comp->dss_core_component) return true; + if (!strstarts(comp->compat, "omapdss,")) + return true; if (omapdss_device_is_registered(comp->node)) return true; diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c index 34b2a4ef63a4..e93f61a567a8 100644 --- a/drivers/gpu/drm/omapdrm/dss/display.c +++ b/drivers/gpu/drm/omapdrm/dss/display.c @@ -23,6 +23,9 @@ #include <linux/kernel.h> #include <linux/of.h> +#include <drm/drm_connector.h> +#include <drm/drm_modes.h> + #include "omapdss.h" static int disp_num_counter; @@ -39,8 +42,6 @@ void omapdss_display_init(struct omap_dss_device *dssdev) if (id < 0) id = disp_num_counter++; - dssdev->alias_id = id; - /* Use 'label' property for name, if it exists */ of_property_read_string(dssdev->dev->of_node, "label", &dssdev->name); @@ -58,3 +59,22 @@ struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output) return omapdss_device_get(output); } EXPORT_SYMBOL_GPL(omapdss_display_get); + +int omapdss_display_get_modes(struct drm_connector *connector, + const struct videomode *vm) +{ + struct drm_display_mode *mode; + + mode = drm_mode_create(connector->dev); + if (!mode) + return 0; + + drm_display_mode_from_videomode(vm, mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + drm_mode_set_name(mode); + drm_mode_probed_add(connector, mode); + + return 1; +} +EXPORT_SYMBOL_GPL(omapdss_display_get_modes); diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index ca4f3c4c6318..cc78dfa07f04 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -47,8 +47,8 @@ struct dpi_data { struct mutex lock; - struct videomode vm; struct dss_lcd_mgr_config mgr_config; + unsigned long pixelclock; int data_lines; struct omap_dss_device output; @@ -347,16 +347,15 @@ static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req, static int dpi_set_mode(struct dpi_data *dpi) { - const struct videomode *vm = &dpi->vm; int lck_div = 0, pck_div = 0; unsigned long fck = 0; int r = 0; if (dpi->pll) r = dpi_set_pll_clk(dpi, dpi->output.dispc_channel, - vm->pixelclock, &fck, &lck_div, &pck_div); + dpi->pixelclock, &fck, &lck_div, &pck_div); else - r = dpi_set_dispc_clk(dpi, vm->pixelclock, &fck, + r = dpi_set_dispc_clk(dpi, dpi->pixelclock, &fck, &lck_div, &pck_div); if (r) return r; @@ -378,7 +377,7 @@ static void dpi_config_lcd_manager(struct dpi_data *dpi) dss_mgr_set_lcd_config(&dpi->output, &dpi->mgr_config); } -static int dpi_display_enable(struct omap_dss_device *dssdev) +static void dpi_display_enable(struct omap_dss_device *dssdev) { struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); struct omap_dss_device *out = &dpi->output; @@ -386,12 +385,6 @@ static int dpi_display_enable(struct omap_dss_device *dssdev) mutex_lock(&dpi->lock); - if (!out->dispc_channel_connected) { - DSSERR("failed to enable display: no output/manager\n"); - r = -ENODEV; - goto err_no_out_mgr; - } - if (dpi->vdds_dsi_reg) { r = regulator_enable(dpi->vdds_dsi_reg); if (r) @@ -426,7 +419,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev) mutex_unlock(&dpi->lock); - return 0; + return; err_mgr_enable: err_set_mode: @@ -439,9 +432,7 @@ err_get_dispc: if (dpi->vdds_dsi_reg) regulator_disable(dpi->vdds_dsi_reg); err_reg_enable: -err_no_out_mgr: mutex_unlock(&dpi->lock); - return r; } static void dpi_display_disable(struct omap_dss_device *dssdev) @@ -467,7 +458,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev) } static void dpi_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) + const struct drm_display_mode *mode) { struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); @@ -475,13 +466,13 @@ static void dpi_set_timings(struct omap_dss_device *dssdev, mutex_lock(&dpi->lock); - dpi->vm = *vm; + dpi->pixelclock = mode->clock * 1000; mutex_unlock(&dpi->lock); } static int dpi_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + struct drm_display_mode *mode) { struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); int lck_div, pck_div; @@ -490,20 +481,20 @@ static int dpi_check_timings(struct omap_dss_device *dssdev, struct dpi_clk_calc_ctx ctx; bool ok; - if (vm->hactive % 8 != 0) + if (mode->hdisplay % 8 != 0) return -EINVAL; - if (vm->pixelclock == 0) + if (mode->clock == 0) return -EINVAL; if (dpi->pll) { - ok = dpi_pll_clk_calc(dpi, vm->pixelclock, &ctx); + ok = dpi_pll_clk_calc(dpi, mode->clock * 1000, &ctx); if (!ok) return -EINVAL; fck = ctx.pll_cinfo.clkout[ctx.clkout_idx]; } else { - ok = dpi_dss_clk_calc(dpi, vm->pixelclock, &ctx); + ok = dpi_dss_clk_calc(dpi, mode->clock * 1000, &ctx); if (!ok) return -EINVAL; @@ -515,7 +506,7 @@ static int dpi_check_timings(struct omap_dss_device *dssdev, pck = fck / lck_div / pck_div; - vm->pixelclock = pck; + mode->clock = pck / 1000; return 0; } @@ -596,23 +587,15 @@ static int dpi_connect(struct omap_dss_device *src, struct omap_dss_device *dst) { struct dpi_data *dpi = dpi_get_data_from_dssdev(dst); - int r; dpi_init_pll(dpi); - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) - return r; - - dst->dispc_channel_connected = true; - return 0; + return omapdss_device_connect(dst->dss, dst, dst->next); } static void dpi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { - dst->dispc_channel_connected = false; - omapdss_device_disconnect(dst, dst->next); } @@ -651,25 +634,15 @@ static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port) out->dev = &dpi->pdev->dev; out->id = OMAP_DSS_OUTPUT_DPI; - out->output_type = OMAP_DISPLAY_TYPE_DPI; + out->type = OMAP_DISPLAY_TYPE_DPI; out->dispc_channel = dpi_get_channel(dpi); out->of_ports = BIT(port_num); out->ops = &dpi_ops; out->owner = THIS_MODULE; - out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); - if (IS_ERR(out->next)) { - if (PTR_ERR(out->next) != -EPROBE_DEFER) - dev_err(out->dev, "failed to find video sink\n"); - return PTR_ERR(out->next); - } - - r = omapdss_output_validate(out); - if (r) { - omapdss_device_put(out->next); - out->next = NULL; + r = omapdss_device_init_output(out); + if (r < 0) return r; - } omapdss_device_register(out); @@ -681,9 +654,8 @@ static void dpi_uninit_output_port(struct device_node *port) struct dpi_data *dpi = port->data; struct omap_dss_device *out = &dpi->output; - if (out->next) - omapdss_device_put(out->next); omapdss_device_unregister(out); + omapdss_device_cleanup_output(out); } static const struct soc_device_attribute dpi_soc_devices[] = { diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 64fb788b6647..5202862d89b5 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -1342,12 +1342,9 @@ static int dsi_pll_enable(struct dss_pll *pll) */ dsi_enable_scp_clk(dsi); - if (!dsi->vdds_dsi_enabled) { - r = regulator_enable(dsi->vdds_dsi_reg); - if (r) - goto err0; - dsi->vdds_dsi_enabled = true; - } + r = regulator_enable(dsi->vdds_dsi_reg); + if (r) + goto err0; /* XXX PLL does not come out of reset without this... */ dispc_pck_free_enable(dsi->dss->dispc, 1); @@ -1372,36 +1369,25 @@ static int dsi_pll_enable(struct dss_pll *pll) return 0; err1: - if (dsi->vdds_dsi_enabled) { - regulator_disable(dsi->vdds_dsi_reg); - dsi->vdds_dsi_enabled = false; - } + regulator_disable(dsi->vdds_dsi_reg); err0: dsi_disable_scp_clk(dsi); dsi_runtime_put(dsi); return r; } -static void dsi_pll_uninit(struct dsi_data *dsi, bool disconnect_lanes) +static void dsi_pll_disable(struct dss_pll *pll) { + struct dsi_data *dsi = container_of(pll, struct dsi_data, pll); + dsi_pll_power(dsi, DSI_PLL_POWER_OFF); - if (disconnect_lanes) { - WARN_ON(!dsi->vdds_dsi_enabled); - regulator_disable(dsi->vdds_dsi_reg); - dsi->vdds_dsi_enabled = false; - } + + regulator_disable(dsi->vdds_dsi_reg); dsi_disable_scp_clk(dsi); dsi_runtime_put(dsi); - DSSDBG("PLL uninit done\n"); -} - -static void dsi_pll_disable(struct dss_pll *pll) -{ - struct dsi_data *dsi = container_of(pll, struct dsi_data, pll); - - dsi_pll_uninit(dsi, true); + DSSDBG("PLL disable done\n"); } static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) @@ -3753,19 +3739,13 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel) { struct dsi_data *dsi = to_dsi_data(dssdev); int bpp = dsi_get_pixel_size(dsi->pix_fmt); - struct omap_dss_device *out = &dsi->output; u8 data_type; u16 word_count; int r; - if (!out->dispc_channel_connected) { - DSSERR("failed to enable display: no output/manager\n"); - return -ENODEV; - } - r = dsi_display_init_dispc(dsi); if (r) - goto err_init_dispc; + return r; if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { switch (dsi->pix_fmt) { @@ -3814,7 +3794,6 @@ err_mgr_enable: } err_pix_fmt: dsi_display_uninit_dispc(dsi); -err_init_dispc: return r; } @@ -4096,11 +4075,11 @@ static int dsi_display_init_dsi(struct dsi_data *dsi) r = dss_pll_enable(&dsi->pll); if (r) - goto err0; + return r; r = dsi_configure_dsi_clocks(dsi); if (r) - goto err1; + goto err0; dss_select_dsi_clk_source(dsi->dss, dsi->module_id, dsi->module_id == 0 ? @@ -4108,6 +4087,14 @@ static int dsi_display_init_dsi(struct dsi_data *dsi) DSSDBG("PLL OK\n"); + if (!dsi->vdds_dsi_enabled) { + r = regulator_enable(dsi->vdds_dsi_reg); + if (r) + goto err1; + + dsi->vdds_dsi_enabled = true; + } + r = dsi_cio_init(dsi); if (r) goto err2; @@ -4136,10 +4123,13 @@ static int dsi_display_init_dsi(struct dsi_data *dsi) err3: dsi_cio_uninit(dsi); err2: - dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK); + regulator_disable(dsi->vdds_dsi_reg); + dsi->vdds_dsi_enabled = false; err1: - dss_pll_disable(&dsi->pll); + dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK); err0: + dss_pll_disable(&dsi->pll); + return r; } @@ -4158,13 +4148,18 @@ static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes, dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK); dsi_cio_uninit(dsi); - dsi_pll_uninit(dsi, disconnect_lanes); + dss_pll_disable(&dsi->pll); + + if (disconnect_lanes) { + regulator_disable(dsi->vdds_dsi_reg); + dsi->vdds_dsi_enabled = false; + } } -static int dsi_display_enable(struct omap_dss_device *dssdev) +static void dsi_display_enable(struct omap_dss_device *dssdev) { struct dsi_data *dsi = to_dsi_data(dssdev); - int r = 0; + int r; DSSDBG("dsi_display_enable\n"); @@ -4184,14 +4179,13 @@ static int dsi_display_enable(struct omap_dss_device *dssdev) mutex_unlock(&dsi->lock); - return 0; + return; err_init_dsi: dsi_runtime_put(dsi); err_get_dsi: mutex_unlock(&dsi->lock); DSSDBG("dsi_display_enable FAILED\n"); - return r; } static void dsi_display_disable(struct omap_dss_device *dssdev, @@ -4888,21 +4882,12 @@ static int dsi_get_clocks(struct dsi_data *dsi) static int dsi_connect(struct omap_dss_device *src, struct omap_dss_device *dst) { - int r; - - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) - return r; - - dst->dispc_channel_connected = true; - return 0; + return omapdss_device_connect(dst->dss, dst, dst->next); } static void dsi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { - dst->dispc_channel_connected = false; - omapdss_device_disconnect(dst, dst->next); } @@ -5138,29 +5123,19 @@ static int dsi_init_output(struct dsi_data *dsi) out->id = dsi->module_id == 0 ? OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2; - out->output_type = OMAP_DISPLAY_TYPE_DSI; + out->type = OMAP_DISPLAY_TYPE_DSI; out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1"; out->dispc_channel = dsi_get_channel(dsi); out->ops = &dsi_ops; out->owner = THIS_MODULE; out->of_ports = BIT(0); - out->bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE + out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE | DRM_BUS_FLAG_DE_HIGH - | DRM_BUS_FLAG_SYNC_NEGEDGE; - - out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); - if (IS_ERR(out->next)) { - if (PTR_ERR(out->next) != -EPROBE_DEFER) - dev_err(out->dev, "failed to find video sink\n"); - return PTR_ERR(out->next); - } + | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE; - r = omapdss_output_validate(out); - if (r) { - omapdss_device_put(out->next); - out->next = NULL; + r = omapdss_device_init_output(out); + if (r < 0) return r; - } omapdss_device_register(out); @@ -5171,9 +5146,8 @@ static void dsi_uninit_output(struct dsi_data *dsi) { struct omap_dss_device *out = &dsi->output; - if (out->next) - omapdss_device_put(out->next); omapdss_device_unregister(out); + omapdss_device_cleanup_output(out); } static int dsi_probe_of(struct dsi_data *dsi) diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c index 0422597ac6b0..b2094055c5fc 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss-of.c +++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c @@ -12,71 +12,25 @@ * more details. */ -#include <linux/device.h> #include <linux/err.h> -#include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> -#include <linux/seq_file.h> #include "omapdss.h" -static struct device_node * -dss_of_port_get_parent_device(struct device_node *port) -{ - struct device_node *np; - int i; - - if (!port) - return NULL; - - np = of_get_parent(port); - - for (i = 0; i < 2 && np; ++i) { - struct property *prop; - - prop = of_find_property(np, "compatible", NULL); - - if (prop) - return np; - - np = of_get_next_parent(np); - } - - return NULL; -} - struct omap_dss_device * omapdss_of_find_connected_device(struct device_node *node, unsigned int port) { - struct device_node *src_node; - struct device_node *src_port; - struct device_node *ep; - struct omap_dss_device *src; - u32 port_number = 0; + struct device_node *remote_node; + struct omap_dss_device *dssdev; - /* Get the endpoint... */ - ep = of_graph_get_endpoint_by_regs(node, port, 0); - if (!ep) + remote_node = of_graph_get_remote_node(node, port, 0); + if (!remote_node) return NULL; - /* ... and its remote port... */ - src_port = of_graph_get_remote_port(ep); - of_node_put(ep); - if (!src_port) - return NULL; - - /* ... and the remote port's number and parent... */ - of_property_read_u32(src_port, "reg", &port_number); - src_node = dss_of_port_get_parent_device(src_port); - of_node_put(src_port); - if (!src_node) - return ERR_PTR(-EINVAL); - - /* ... and finally the connected device. */ - src = omapdss_find_device_by_port(src_node, port_number); - of_node_put(src_node); + dssdev = omapdss_find_device_by_node(remote_node); + of_node_put(remote_node); - return src ? src : ERR_PTR(-EPROBE_DEFER); + return dssdev ? dssdev : ERR_PTR(-EPROBE_DEFER); } EXPORT_SYMBOL_GPL(omapdss_of_find_connected_device); diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 7553c7fc1c45..55e68863ef15 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1560,7 +1560,7 @@ static void dss_shutdown(struct platform_device *pdev) DSSDBG("shutdown\n"); - for_each_dss_display(dssdev) { + for_each_dss_output(dssdev) { if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) dssdev->ops->disable(dssdev); } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index aabdda394c9c..6339e2756b34 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -249,15 +249,15 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi) } static void hdmi_display_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) + const struct drm_display_mode *mode) { struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); mutex_lock(&hdmi->lock); - hdmi->cfg.vm = *vm; + drm_display_mode_to_videomode(mode, &hdmi->cfg.vm); - dispc_set_tv_pclk(hdmi->dss->dispc, vm->pixelclock); + dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000); mutex_unlock(&hdmi->lock); } @@ -312,26 +312,20 @@ static void hdmi_stop_audio_stream(struct omap_hdmi *hd) hdmi_wp_audio_enable(&hd->wp, false); } -static int hdmi_display_enable(struct omap_dss_device *dssdev) +static void hdmi_display_enable(struct omap_dss_device *dssdev) { struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); unsigned long flags; - int r = 0; + int r; DSSDBG("ENTER hdmi_display_enable\n"); mutex_lock(&hdmi->lock); - if (!dssdev->dispc_channel_connected) { - DSSERR("failed to enable display: no output/manager\n"); - r = -ENODEV; - goto err0; - } - r = hdmi_power_on_full(hdmi); if (r) { DSSERR("failed to power on device\n"); - goto err0; + goto done; } if (hdmi->audio_configured) { @@ -351,12 +345,8 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev) hdmi->display_enabled = true; spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags); +done: mutex_unlock(&hdmi->lock); - return 0; - -err0: - mutex_unlock(&hdmi->lock); - return r; } static void hdmi_display_disable(struct omap_dss_device *dssdev) @@ -417,21 +407,12 @@ void hdmi4_core_disable(struct hdmi_core_data *core) static int hdmi_connect(struct omap_dss_device *src, struct omap_dss_device *dst) { - int r; - - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) - return r; - - dst->dispc_channel_connected = true; - return 0; + return omapdss_device_connect(dst->dss, dst, dst->next); } static void hdmi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { - dst->dispc_channel_connected = false; - omapdss_device_disconnect(dst, dst->next); } @@ -698,7 +679,7 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi) out->dev = &hdmi->pdev->dev; out->id = OMAP_DSS_OUTPUT_HDMI; - out->output_type = OMAP_DISPLAY_TYPE_HDMI; + out->type = OMAP_DISPLAY_TYPE_HDMI; out->name = "hdmi.0"; out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; out->ops = &hdmi_ops; @@ -706,19 +687,9 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi) out->of_ports = BIT(0); out->ops_flags = OMAP_DSS_DEVICE_OP_EDID; - out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); - if (IS_ERR(out->next)) { - if (PTR_ERR(out->next) != -EPROBE_DEFER) - dev_err(out->dev, "failed to find video sink\n"); - return PTR_ERR(out->next); - } - - r = omapdss_output_validate(out); - if (r) { - omapdss_device_put(out->next); - out->next = NULL; + r = omapdss_device_init_output(out); + if (r < 0) return r; - } omapdss_device_register(out); @@ -729,9 +700,8 @@ static void hdmi4_uninit_output(struct omap_hdmi *hdmi) { struct omap_dss_device *out = &hdmi->output; - if (out->next) - omapdss_device_put(out->next); omapdss_device_unregister(out); + omapdss_device_cleanup_output(out); } static int hdmi4_probe_of(struct omap_hdmi *hdmi) diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index 9e8556f67a29..2955bbad13bb 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -248,15 +248,15 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi) } static void hdmi_display_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) + const struct drm_display_mode *mode) { struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); mutex_lock(&hdmi->lock); - hdmi->cfg.vm = *vm; + drm_display_mode_to_videomode(mode, &hdmi->cfg.vm); - dispc_set_tv_pclk(hdmi->dss->dispc, vm->pixelclock); + dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000); mutex_unlock(&hdmi->lock); } @@ -320,26 +320,20 @@ static void hdmi_stop_audio_stream(struct omap_hdmi *hd) REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2); } -static int hdmi_display_enable(struct omap_dss_device *dssdev) +static void hdmi_display_enable(struct omap_dss_device *dssdev) { struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); unsigned long flags; - int r = 0; + int r; DSSDBG("ENTER hdmi_display_enable\n"); mutex_lock(&hdmi->lock); - if (!dssdev->dispc_channel_connected) { - DSSERR("failed to enable display: no output/manager\n"); - r = -ENODEV; - goto err0; - } - r = hdmi_power_on_full(hdmi); if (r) { DSSERR("failed to power on device\n"); - goto err0; + goto done; } if (hdmi->audio_configured) { @@ -359,12 +353,8 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev) hdmi->display_enabled = true; spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags); +done: mutex_unlock(&hdmi->lock); - return 0; - -err0: - mutex_unlock(&hdmi->lock); - return r; } static void hdmi_display_disable(struct omap_dss_device *dssdev) @@ -422,21 +412,12 @@ static void hdmi_core_disable(struct omap_hdmi *hdmi) static int hdmi_connect(struct omap_dss_device *src, struct omap_dss_device *dst) { - int r; - - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) - return r; - - dst->dispc_channel_connected = true; - return 0; + return omapdss_device_connect(dst->dss, dst, dst->next); } static void hdmi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { - dst->dispc_channel_connected = false; - omapdss_device_disconnect(dst, dst->next); } @@ -682,7 +663,7 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi) out->dev = &hdmi->pdev->dev; out->id = OMAP_DSS_OUTPUT_HDMI; - out->output_type = OMAP_DISPLAY_TYPE_HDMI; + out->type = OMAP_DISPLAY_TYPE_HDMI; out->name = "hdmi.0"; out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; out->ops = &hdmi_ops; @@ -690,19 +671,9 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi) out->of_ports = BIT(0); out->ops_flags = OMAP_DSS_DEVICE_OP_EDID; - out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); - if (IS_ERR(out->next)) { - if (PTR_ERR(out->next) != -EPROBE_DEFER) - dev_err(out->dev, "failed to find video sink\n"); - return PTR_ERR(out->next); - } - - r = omapdss_output_validate(out); - if (r) { - omapdss_device_put(out->next); - out->next = NULL; + r = omapdss_device_init_output(out); + if (r < 0) return r; - } omapdss_device_register(out); @@ -713,9 +684,8 @@ static void hdmi5_uninit_output(struct omap_hdmi *hdmi) { struct omap_dss_device *out = &hdmi->output; - if (out->next) - omapdss_device_put(out->next); omapdss_device_unregister(out); + omapdss_device_cleanup_output(out); } static int hdmi5_probe_of(struct omap_hdmi *hdmi) diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c index 3bfb95d230e0..2b41c75ce988 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c +++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c @@ -184,6 +184,22 @@ static const struct of_device_id omapdss_of_match[] __initconst = { {}, }; +static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = { + { .compatible = "composite-video-connector" }, + { .compatible = "hdmi-connector" }, + { .compatible = "lgphilips,lb035q02" }, + { .compatible = "nec,nl8048hl11" }, + { .compatible = "panel-dsi-cm" }, + { .compatible = "sharp,ls037v7dw01" }, + { .compatible = "sony,acx565akm" }, + { .compatible = "svideo-connector" }, + { .compatible = "ti,opa362" }, + { .compatible = "ti,tpd12s015" }, + { .compatible = "toppoly,td028ttec1" }, + { .compatible = "tpo,td028ttec1" }, + { .compatible = "tpo,td043mtea1" }, +}; + static int __init omapdss_boot_init(void) { struct device_node *dss, *child; @@ -210,7 +226,7 @@ static int __init omapdss_boot_init(void) n = list_first_entry(&dss_conv_list, struct dss_conv_node, list); - if (!n->root) + if (of_match_node(omapdss_of_fixups_whitelist, n->node)) omapdss_omapify_node(n->node); list_del(&n->list); diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 33e15cb77efa..0c734d1f89e1 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -19,7 +19,6 @@ #define __OMAP_DRM_DSS_H #include <linux/list.h> -#include <linux/kobject.h> #include <linux/device.h> #include <linux/interrupt.h> #include <video/videomode.h> @@ -68,6 +67,7 @@ struct dss_lcd_mgr_config; struct snd_aes_iec958; struct snd_cea_861_aud_if; struct hdmi_avi_infoframe; +struct drm_connector; enum omap_display_type { OMAP_DISPLAY_TYPE_NONE = 0, @@ -360,15 +360,15 @@ struct omap_dss_device_ops { void (*disconnect)(struct omap_dss_device *dssdev, struct omap_dss_device *dst); - int (*enable)(struct omap_dss_device *dssdev); + void (*pre_enable)(struct omap_dss_device *dssdev); + void (*enable)(struct omap_dss_device *dssdev); void (*disable)(struct omap_dss_device *dssdev); + void (*post_disable)(struct omap_dss_device *dssdev); int (*check_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*get_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); + struct drm_display_mode *mode); void (*set_timings)(struct omap_dss_device *dssdev, - const struct videomode *vm); + const struct drm_display_mode *mode); bool (*detect)(struct omap_dss_device *dssdev); @@ -380,6 +380,9 @@ struct omap_dss_device_ops { int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); + int (*get_modes)(struct omap_dss_device *dssdev, + struct drm_connector *connector); + union { const struct omapdss_hdmi_ops hdmi; const struct omapdss_dsi_ops dsi; @@ -390,42 +393,40 @@ struct omap_dss_device_ops { * enum omap_dss_device_ops_flag - Indicates which device ops are supported * @OMAP_DSS_DEVICE_OP_DETECT: The device supports output connection detection * @OMAP_DSS_DEVICE_OP_HPD: The device supports all hot-plug-related operations - * @OMAP_DSS_DEVICE_OP_EDID: The device supports readind EDID + * @OMAP_DSS_DEVICE_OP_EDID: The device supports reading EDID + * @OMAP_DSS_DEVICE_OP_MODES: The device supports reading modes */ enum omap_dss_device_ops_flag { OMAP_DSS_DEVICE_OP_DETECT = BIT(0), OMAP_DSS_DEVICE_OP_HPD = BIT(1), OMAP_DSS_DEVICE_OP_EDID = BIT(2), -}; - -enum omap_dss_device_type { - OMAP_DSS_DEVICE_TYPE_OUTPUT = (1 << 0), - OMAP_DSS_DEVICE_TYPE_DISPLAY = (1 << 1), + OMAP_DSS_DEVICE_OP_MODES = BIT(3), }; struct omap_dss_device { - struct kobject kobj; struct device *dev; struct module *owner; struct dss_device *dss; - struct omap_dss_device *src; - struct omap_dss_device *dst; struct omap_dss_device *next; + struct drm_bridge *bridge; + struct drm_panel *panel; struct list_head list; - unsigned int alias_id; - + /* + * DSS type that this device generates (for DSS internal devices) or + * requires (for external encoders, connectors and panels). Must be a + * non-zero (different than OMAP_DISPLAY_TYPE_NONE) value. + */ enum omap_display_type type; + /* - * DSS output type that this device generates (for DSS internal devices) - * or requires (for external encoders). Must be OMAP_DISPLAY_TYPE_NONE - * for display devices (connectors and panels) and to non-zero value for - * all other devices. + * True if the device is a display (panel or connector) at the end of + * the pipeline, false otherwise. */ - enum omap_display_type output_type; + bool display; const char *name; @@ -434,9 +435,6 @@ struct omap_dss_device { unsigned long ops_flags; u32 bus_flags; - /* helper variable for driver suspend/resume */ - bool activate_after_resume; - enum omap_display_caps caps; enum omap_dss_display_state state; @@ -445,7 +443,6 @@ struct omap_dss_device { /* DISPC channel for this output */ enum omap_channel dispc_channel; - bool dispc_channel_connected; /* output instance */ enum omap_dss_output_id id; @@ -465,9 +462,6 @@ struct omap_dss_driver { int (*memory_read)(struct omap_dss_device *dssdev, void *buf, size_t size, u16 x, u16 y, u16 w, u16 h); - - void (*get_size)(struct omap_dss_device *dssdev, - unsigned int *width, unsigned int *height); }; struct dss_device *omapdss_get_dss(void); @@ -477,32 +471,35 @@ static inline bool omapdss_is_initialized(void) return !!omapdss_get_dss(); } -#define for_each_dss_display(d) \ - while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_DISPLAY)) != NULL) void omapdss_display_init(struct omap_dss_device *dssdev); struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output); +int omapdss_display_get_modes(struct drm_connector *connector, + const struct videomode *vm); void omapdss_device_register(struct omap_dss_device *dssdev); void omapdss_device_unregister(struct omap_dss_device *dssdev); struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev); void omapdss_device_put(struct omap_dss_device *dssdev); -struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, - unsigned int port); -struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from, - enum omap_dss_device_type type); +struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node); int omapdss_device_connect(struct dss_device *dss, struct omap_dss_device *src, struct omap_dss_device *dst); void omapdss_device_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst); +void omapdss_device_pre_enable(struct omap_dss_device *dssdev); +void omapdss_device_enable(struct omap_dss_device *dssdev); +void omapdss_device_disable(struct omap_dss_device *dssdev); +void omapdss_device_post_disable(struct omap_dss_device *dssdev); int omap_dss_get_num_overlay_managers(void); int omap_dss_get_num_overlays(void); #define for_each_dss_output(d) \ - while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_OUTPUT)) != NULL) -int omapdss_output_validate(struct omap_dss_device *out); + while ((d = omapdss_device_next_output(d)) != NULL) +struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from); +int omapdss_device_init_output(struct omap_dss_device *out); +void omapdss_device_cleanup_output(struct omap_dss_device *out); typedef void (*omap_dispc_isr_t) (void *arg, u32 mask); int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask); @@ -511,11 +508,6 @@ int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask); int omapdss_compat_init(void); void omapdss_compat_uninit(void); -static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev) -{ - return dssdev->src; -} - static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev) { return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE; diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index 18505bc70f7e..10a9ee5cdc61 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -20,20 +20,48 @@ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/of.h> +#include <linux/of_graph.h> + +#include <drm/drm_panel.h> #include "dss.h" #include "omapdss.h" -int omapdss_output_validate(struct omap_dss_device *out) +int omapdss_device_init_output(struct omap_dss_device *out) { - if (out->next && out->output_type != out->next->type) { + struct device_node *remote_node; + + remote_node = of_graph_get_remote_node(out->dev->of_node, 0, 0); + if (!remote_node) { + dev_dbg(out->dev, "failed to find video sink\n"); + return 0; + } + + out->next = omapdss_find_device_by_node(remote_node); + out->bridge = of_drm_find_bridge(remote_node); + out->panel = of_drm_find_panel(remote_node); + if (IS_ERR(out->panel)) + out->panel = NULL; + + of_node_put(remote_node); + + if (out->next && out->type != out->next->type) { dev_err(out->dev, "output type and display type don't match\n"); + omapdss_device_put(out->next); + out->next = NULL; return -EINVAL; } - return 0; + return out->next || out->bridge || out->panel ? 0 : -EPROBE_DEFER; +} +EXPORT_SYMBOL(omapdss_device_init_output); + +void omapdss_device_cleanup_output(struct omap_dss_device *out) +{ + if (out->next) + omapdss_device_put(out->next); } -EXPORT_SYMBOL(omapdss_output_validate); +EXPORT_SYMBOL(omapdss_device_cleanup_output); int dss_install_mgr_ops(struct dss_device *dss, const struct dss_mgr_ops *mgr_ops, diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index b2fe2387037a..7aae52984fed 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -37,7 +37,7 @@ struct sdi_device { struct regulator *vdds_sdi_reg; struct dss_lcd_mgr_config mgr_config; - struct videomode vm; + unsigned long pixelclock; int datapairs; struct omap_dss_device output; @@ -129,27 +129,22 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi) dss_mgr_set_lcd_config(&sdi->output, &sdi->mgr_config); } -static int sdi_display_enable(struct omap_dss_device *dssdev) +static void sdi_display_enable(struct omap_dss_device *dssdev) { struct sdi_device *sdi = dssdev_to_sdi(dssdev); struct dispc_clock_info dispc_cinfo; unsigned long fck; int r; - if (!sdi->output.dispc_channel_connected) { - DSSERR("failed to enable display: no output/manager\n"); - return -ENODEV; - } - r = regulator_enable(sdi->vdds_sdi_reg); if (r) - goto err_reg_enable; + return; r = dispc_runtime_get(sdi->dss->dispc); if (r) goto err_get_dispc; - r = sdi_calc_clock_div(sdi, sdi->vm.pixelclock, &fck, &dispc_cinfo); + r = sdi_calc_clock_div(sdi, sdi->pixelclock, &fck, &dispc_cinfo); if (r) goto err_calc_clock_div; @@ -185,7 +180,7 @@ static int sdi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_mgr_enable; - return 0; + return; err_mgr_enable: dss_sdi_disable(sdi->dss); @@ -195,8 +190,6 @@ err_calc_clock_div: dispc_runtime_put(sdi->dss->dispc); err_get_dispc: regulator_disable(sdi->vdds_sdi_reg); -err_reg_enable: - return r; } static void sdi_display_disable(struct omap_dss_device *dssdev) @@ -213,36 +206,37 @@ static void sdi_display_disable(struct omap_dss_device *dssdev) } static void sdi_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) + const struct drm_display_mode *mode) { struct sdi_device *sdi = dssdev_to_sdi(dssdev); - sdi->vm = *vm; + sdi->pixelclock = mode->clock * 1000; } static int sdi_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + struct drm_display_mode *mode) { struct sdi_device *sdi = dssdev_to_sdi(dssdev); struct dispc_clock_info dispc_cinfo; + unsigned long pixelclock = mode->clock * 1000; unsigned long fck; unsigned long pck; int r; - if (vm->pixelclock == 0) + if (pixelclock == 0) return -EINVAL; - r = sdi_calc_clock_div(sdi, vm->pixelclock, &fck, &dispc_cinfo); + r = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo); if (r) return r; pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div; - if (pck != vm->pixelclock) { + if (pck != pixelclock) { DSSWARN("Pixel clock adjusted from %lu Hz to %lu Hz\n", - vm->pixelclock, pck); + pixelclock, pck); - vm->pixelclock = pck; + mode->clock = pck / 1000; } return 0; @@ -251,21 +245,12 @@ static int sdi_check_timings(struct omap_dss_device *dssdev, static int sdi_connect(struct omap_dss_device *src, struct omap_dss_device *dst) { - int r; - - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) - return r; - - dst->dispc_channel_connected = true; - return 0; + return omapdss_device_connect(dst->dss, dst, dst->next); } static void sdi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { - dst->dispc_channel_connected = false; - omapdss_device_disconnect(dst, dst->next); } @@ -287,29 +272,19 @@ static int sdi_init_output(struct sdi_device *sdi) out->dev = &sdi->pdev->dev; out->id = OMAP_DSS_OUTPUT_SDI; - out->output_type = OMAP_DISPLAY_TYPE_SDI; + out->type = OMAP_DISPLAY_TYPE_SDI; out->name = "sdi.0"; out->dispc_channel = OMAP_DSS_CHANNEL_LCD; /* We have SDI only on OMAP3, where it's on port 1 */ out->of_ports = BIT(1); out->ops = &sdi_ops; out->owner = THIS_MODULE; - out->bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE /* 15.5.9.1.2 */ - | DRM_BUS_FLAG_SYNC_POSEDGE; - - out->next = omapdss_of_find_connected_device(out->dev->of_node, 1); - if (IS_ERR(out->next)) { - if (PTR_ERR(out->next) != -EPROBE_DEFER) - dev_err(out->dev, "failed to find video sink\n"); - return PTR_ERR(out->next); - } + out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE /* 15.5.9.1.2 */ + | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE; - r = omapdss_output_validate(out); - if (r) { - omapdss_device_put(out->next); - out->next = NULL; + r = omapdss_device_init_output(out); + if (r < 0) return r; - } omapdss_device_register(out); @@ -318,9 +293,8 @@ static int sdi_init_output(struct sdi_device *sdi) static void sdi_uninit_output(struct sdi_device *sdi) { - if (sdi->output.next) - omapdss_device_put(sdi->output.next); omapdss_device_unregister(&sdi->output); + omapdss_device_cleanup_output(&sdi->output); } int sdi_init_port(struct dss_device *dss, struct platform_device *pdev, diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index b5f52727f8b1..da43b865d973 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -267,63 +267,40 @@ enum venc_videomode { VENC_MODE_NTSC, }; -static const struct videomode omap_dss_pal_vm = { - .hactive = 720, - .vactive = 574, - .pixelclock = 13500000, - .hsync_len = 64, - .hfront_porch = 12, - .hback_porch = 68, - .vsync_len = 5, - .vfront_porch = 5, - .vback_porch = 41, - - .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW | - DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH | - DISPLAY_FLAGS_PIXDATA_POSEDGE | - DISPLAY_FLAGS_SYNC_NEGEDGE, +static const struct drm_display_mode omap_dss_pal_mode = { + .hdisplay = 720, + .hsync_start = 732, + .hsync_end = 796, + .htotal = 864, + .vdisplay = 574, + .vsync_start = 579, + .vsync_end = 584, + .vtotal = 625, + .clock = 13500, + + .flags = DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_NHSYNC | + DRM_MODE_FLAG_NVSYNC, }; -static const struct videomode omap_dss_ntsc_vm = { - .hactive = 720, - .vactive = 482, - .pixelclock = 13500000, - .hsync_len = 64, - .hfront_porch = 16, - .hback_porch = 58, - .vsync_len = 6, - .vfront_porch = 6, - .vback_porch = 31, - - .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW | - DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH | - DISPLAY_FLAGS_PIXDATA_POSEDGE | - DISPLAY_FLAGS_SYNC_NEGEDGE, +static const struct drm_display_mode omap_dss_ntsc_mode = { + .hdisplay = 720, + .hsync_start = 736, + .hsync_end = 800, + .htotal = 858, + .vdisplay = 482, + .vsync_start = 488, + .vsync_end = 494, + .vtotal = 525, + .clock = 13500, + + .flags = DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_NHSYNC | + DRM_MODE_FLAG_NVSYNC, }; -static enum venc_videomode venc_get_videomode(const struct videomode *vm) -{ - if (!(vm->flags & DISPLAY_FLAGS_INTERLACED)) - return VENC_MODE_UNKNOWN; - - if (vm->pixelclock == omap_dss_pal_vm.pixelclock && - vm->hactive == omap_dss_pal_vm.hactive && - vm->vactive == omap_dss_pal_vm.vactive) - return VENC_MODE_PAL; - - if (vm->pixelclock == omap_dss_ntsc_vm.pixelclock && - vm->hactive == omap_dss_ntsc_vm.hactive && - vm->vactive == omap_dss_ntsc_vm.vactive) - return VENC_MODE_NTSC; - - return VENC_MODE_UNKNOWN; -} - struct venc_device { struct platform_device *pdev; void __iomem *base; struct mutex venc_lock; - u32 wss_data; struct regulator *vdda_dac_reg; struct dss_device *dss; @@ -331,7 +308,7 @@ struct venc_device { struct clk *tv_dac_clk; - struct videomode vm; + const struct venc_config *config; enum omap_dss_venc_type type; bool invert_polarity; bool requires_tv_dac_clk; @@ -367,8 +344,7 @@ static void venc_write_config(struct venc_device *venc, venc_write_reg(venc, VENC_BLACK_LEVEL, config->black_level); venc_write_reg(venc, VENC_BLANK_LEVEL, config->blank_level); venc_write_reg(venc, VENC_M_CONTROL, config->m_control); - venc_write_reg(venc, VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data | - venc->wss_data); + venc_write_reg(venc, VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data); venc_write_reg(venc, VENC_S_CARR, config->s_carr); venc_write_reg(venc, VENC_L21__WC_CTL, config->l21__wc_ctl); venc_write_reg(venc, VENC_SAVID__EAVID, config->savid__eavid); @@ -452,18 +428,6 @@ static void venc_runtime_put(struct venc_device *venc) WARN_ON(r < 0 && r != -ENOSYS); } -static const struct venc_config *venc_timings_to_config(const struct videomode *vm) -{ - switch (venc_get_videomode(vm)) { - default: - WARN_ON_ONCE(1); - case VENC_MODE_PAL: - return &venc_config_pal_trm; - case VENC_MODE_NTSC: - return &venc_config_ntsc_trm; - } -} - static int venc_power_on(struct venc_device *venc) { u32 l; @@ -474,7 +438,7 @@ static int venc_power_on(struct venc_device *venc) goto err0; venc_reset(venc); - venc_write_config(venc, venc_timings_to_config(&venc->vm)); + venc_write_config(venc, venc->config); dss_set_venc_output(venc->dss, venc->type); dss_set_dac_pwrdn_bgz(venc->dss, 1); @@ -524,33 +488,17 @@ static void venc_power_off(struct venc_device *venc) venc_runtime_put(venc); } -static int venc_display_enable(struct omap_dss_device *dssdev) +static void venc_display_enable(struct omap_dss_device *dssdev) { struct venc_device *venc = dssdev_to_venc(dssdev); - int r; DSSDBG("venc_display_enable\n"); mutex_lock(&venc->venc_lock); - if (!dssdev->dispc_channel_connected) { - DSSERR("Failed to enable display: no output/manager\n"); - r = -ENODEV; - goto err0; - } - - r = venc_power_on(venc); - if (r) - goto err0; - - venc->wss_data = 0; + venc_power_on(venc); mutex_unlock(&venc->venc_lock); - - return 0; -err0: - mutex_unlock(&venc->venc_lock); - return r; } static void venc_display_disable(struct omap_dss_device *dssdev) @@ -566,30 +514,70 @@ static void venc_display_disable(struct omap_dss_device *dssdev) mutex_unlock(&venc->venc_lock); } -static void venc_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) +static int venc_get_modes(struct omap_dss_device *dssdev, + struct drm_connector *connector) { - struct venc_device *venc = dssdev_to_venc(dssdev); + static const struct drm_display_mode *modes[] = { + &omap_dss_pal_mode, + &omap_dss_ntsc_mode, + }; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(modes); ++i) { + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, modes[i]); + if (!mode) + return i; + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + drm_mode_set_name(mode); + drm_mode_probed_add(connector, mode); + } - mutex_lock(&venc->venc_lock); - *vm = venc->vm; - mutex_unlock(&venc->venc_lock); + return ARRAY_SIZE(modes); +} + +static enum venc_videomode venc_get_videomode(const struct drm_display_mode *mode) +{ + if (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) + return VENC_MODE_UNKNOWN; + + if (mode->clock == omap_dss_pal_mode.clock && + mode->hdisplay == omap_dss_pal_mode.hdisplay && + mode->vdisplay == omap_dss_pal_mode.vdisplay) + return VENC_MODE_PAL; + + if (mode->clock == omap_dss_ntsc_mode.clock && + mode->hdisplay == omap_dss_ntsc_mode.hdisplay && + mode->vdisplay == omap_dss_ntsc_mode.vdisplay) + return VENC_MODE_NTSC; + + return VENC_MODE_UNKNOWN; } static void venc_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) + const struct drm_display_mode *mode) { struct venc_device *venc = dssdev_to_venc(dssdev); + enum venc_videomode venc_mode = venc_get_videomode(mode); DSSDBG("venc_set_timings\n"); mutex_lock(&venc->venc_lock); - /* Reset WSS data when the TV standard changes. */ - if (memcmp(&venc->vm, vm, sizeof(*vm))) - venc->wss_data = 0; + switch (venc_mode) { + default: + WARN_ON_ONCE(1); + /* Fall-through */ + case VENC_MODE_PAL: + venc->config = &venc_config_pal_trm; + break; - venc->vm = *vm; + case VENC_MODE_NTSC: + venc->config = &venc_config_ntsc_trm; + break; + } dispc_set_tv_pclk(venc->dss->dispc, 13500000); @@ -597,22 +585,26 @@ static void venc_set_timings(struct omap_dss_device *dssdev, } static int venc_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + struct drm_display_mode *mode) { DSSDBG("venc_check_timings\n"); - switch (venc_get_videomode(vm)) { + switch (venc_get_videomode(mode)) { case VENC_MODE_PAL: - *vm = omap_dss_pal_vm; - return 0; + drm_mode_copy(mode, &omap_dss_pal_mode); + break; case VENC_MODE_NTSC: - *vm = omap_dss_ntsc_vm; - return 0; + drm_mode_copy(mode, &omap_dss_ntsc_mode); + break; default: return -EINVAL; } + + drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); + drm_mode_set_name(mode); + return 0; } static int venc_dump_regs(struct seq_file *s, void *p) @@ -695,21 +687,12 @@ static int venc_get_clocks(struct venc_device *venc) static int venc_connect(struct omap_dss_device *src, struct omap_dss_device *dst) { - int r; - - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) - return r; - - dst->dispc_channel_connected = true; - return 0; + return omapdss_device_connect(dst->dss, dst, dst->next); } static void venc_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { - dst->dispc_channel_connected = false; - omapdss_device_disconnect(dst, dst->next); } @@ -721,8 +704,9 @@ static const struct omap_dss_device_ops venc_ops = { .disable = venc_display_disable, .check_timings = venc_check_timings, - .get_timings = venc_get_timings, .set_timings = venc_set_timings, + + .get_modes = venc_get_modes, }; /* ----------------------------------------------------------------------------- @@ -776,26 +760,17 @@ static int venc_init_output(struct venc_device *venc) out->dev = &venc->pdev->dev; out->id = OMAP_DSS_OUTPUT_VENC; - out->output_type = OMAP_DISPLAY_TYPE_VENC; + out->type = OMAP_DISPLAY_TYPE_VENC; out->name = "venc.0"; out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; out->ops = &venc_ops; out->owner = THIS_MODULE; out->of_ports = BIT(0); + out->ops_flags = OMAP_DSS_DEVICE_OP_MODES; - out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); - if (IS_ERR(out->next)) { - if (PTR_ERR(out->next) != -EPROBE_DEFER) - dev_err(out->dev, "failed to find video sink\n"); - return PTR_ERR(out->next); - } - - r = omapdss_output_validate(out); - if (r) { - omapdss_device_put(out->next); - out->next = NULL; + r = omapdss_device_init_output(out); + if (r < 0) return r; - } omapdss_device_register(out); @@ -804,9 +779,8 @@ static int venc_init_output(struct venc_device *venc) static void venc_uninit_output(struct venc_device *venc) { - if (venc->output.next) - omapdss_device_put(venc->output.next); omapdss_device_unregister(&venc->output); + omapdss_device_cleanup_output(&venc->output); } static int venc_probe_of(struct venc_device *venc) @@ -878,8 +852,7 @@ static int venc_probe(struct platform_device *pdev) mutex_init(&venc->venc_lock); - venc->wss_data = 0; - venc->vm = omap_dss_pal_vm; + venc->config = &venc_config_pal_trm; venc_mem = platform_get_resource(venc->pdev, IORESOURCE_MEM, 0); venc->base = devm_ioremap_resource(&pdev->dev, venc_mem); diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index 9da94d10782a..5967283934e1 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -17,6 +17,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> +#include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> #include "omap_drv.h" @@ -30,24 +31,27 @@ struct omap_connector { struct drm_connector base; struct omap_dss_device *output; - struct omap_dss_device *display; struct omap_dss_device *hpd; bool hdmi_mode; }; static void omap_connector_hpd_notify(struct drm_connector *connector, - struct omap_dss_device *src, enum drm_connector_status status) { - if (status == connector_status_disconnected) { - /* - * If the source is an HDMI encoder, notify it of disconnection. - * This is required to let the HDMI encoder reset any internal - * state related to connection status, such as the CEC address. - */ - if (src && src->type == OMAP_DISPLAY_TYPE_HDMI && - src->ops->hdmi.lost_hotplug) - src->ops->hdmi.lost_hotplug(src); + struct omap_connector *omap_connector = to_omap_connector(connector); + struct omap_dss_device *dssdev; + + if (status != connector_status_disconnected) + return; + + /* + * Notify all devics in the pipeline of disconnection. This is required + * to let the HDMI encoders reset their internal state related to + * connection status, such as the CEC address. + */ + for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) { + if (dssdev->ops && dssdev->ops->hdmi.lost_hotplug) + dssdev->ops->hdmi.lost_hotplug(dssdev); } } @@ -67,7 +71,7 @@ static void omap_connector_hpd_cb(void *cb_data, if (old_status == status) return; - omap_connector_hpd_notify(connector, omap_connector->hpd, status); + omap_connector_hpd_notify(connector, status); drm_kms_helper_hotplug_event(dev); } @@ -103,20 +107,20 @@ omap_connector_find_device(struct drm_connector *connector, enum omap_dss_device_ops_flag op) { struct omap_connector *omap_connector = to_omap_connector(connector); - struct omap_dss_device *dssdev; + struct omap_dss_device *dssdev = NULL; + struct omap_dss_device *d; - for (dssdev = omap_connector->display; dssdev; dssdev = dssdev->src) { - if (dssdev->ops_flags & op) - return dssdev; + for (d = omap_connector->output; d; d = d->next) { + if (d->ops_flags & op) + dssdev = d; } - return NULL; + return dssdev; } static enum drm_connector_status omap_connector_detect( struct drm_connector *connector, bool force) { - struct omap_connector *omap_connector = to_omap_connector(connector); struct omap_dss_device *dssdev; enum drm_connector_status status; @@ -128,13 +132,12 @@ static enum drm_connector_status omap_connector_detect( ? connector_status_connected : connector_status_disconnected; - omap_connector_hpd_notify(connector, dssdev->src, status); + omap_connector_hpd_notify(connector, status); } else { - switch (omap_connector->display->type) { - case OMAP_DISPLAY_TYPE_DPI: - case OMAP_DISPLAY_TYPE_DBI: - case OMAP_DISPLAY_TYPE_SDI: - case OMAP_DISPLAY_TYPE_DSI: + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_DPI: + case DRM_MODE_CONNECTOR_LVDS: + case DRM_MODE_CONNECTOR_DSI: status = connector_status_connected; break; default: @@ -143,7 +146,7 @@ static enum drm_connector_status omap_connector_detect( } } - VERB("%s: %d (force=%d)", omap_connector->display->name, status, force); + VERB("%s: %d (force=%d)", connector->name, status, force); return status; } @@ -152,7 +155,7 @@ static void omap_connector_destroy(struct drm_connector *connector) { struct omap_connector *omap_connector = to_omap_connector(connector); - DBG("%s", omap_connector->display->name); + DBG("%s", connector->name); if (omap_connector->hpd) { struct omap_dss_device *hpd = omap_connector->hpd; @@ -166,7 +169,6 @@ static void omap_connector_destroy(struct drm_connector *connector) drm_connector_cleanup(connector); omapdss_device_put(omap_connector->output); - omapdss_device_put(omap_connector->display); kfree(omap_connector); } @@ -212,10 +214,8 @@ static int omap_connector_get_modes(struct drm_connector *connector) { struct omap_connector *omap_connector = to_omap_connector(connector); struct omap_dss_device *dssdev; - struct drm_display_mode *mode; - struct videomode vm = {0}; - DBG("%s", omap_connector->display->name); + DBG("%s", connector->name); /* * If display exposes EDID, then we parse that in the normal way to @@ -227,89 +227,71 @@ static int omap_connector_get_modes(struct drm_connector *connector) return omap_connector_get_modes_edid(connector, dssdev); /* - * Otherwise we have either a fixed resolution panel or an output that - * doesn't support modes discovery (e.g. DVI or VGA with the DDC bus - * unconnected, or analog TV). Start by querying the size. + * Otherwise if the display pipeline reports modes (e.g. with a fixed + * resolution panel or an analog TV output), query it. */ - dssdev = omap_connector->display; - if (dssdev->driver && dssdev->driver->get_size) - dssdev->driver->get_size(dssdev, - &connector->display_info.width_mm, - &connector->display_info.height_mm); + dssdev = omap_connector_find_device(connector, + OMAP_DSS_DEVICE_OP_MODES); + if (dssdev) + return dssdev->ops->get_modes(dssdev, connector); /* - * Iterate over the pipeline to find the first device that can provide - * timing information. If we can't find any, we just let the KMS core - * add the default modes. + * Otherwise if the display pipeline uses a drm_panel, we delegate the + * operation to the panel API. */ - for (dssdev = omap_connector->display; dssdev; dssdev = dssdev->src) { - if (dssdev->ops->get_timings) - break; - } - if (!dssdev) - return 0; + if (omap_connector->output->panel) + return drm_panel_get_modes(omap_connector->output->panel); - /* Add a single mode corresponding to the fixed panel timings. */ - mode = drm_mode_create(connector->dev); - if (!mode) - return 0; + /* + * We can't retrieve modes, which can happen for instance for a DVI or + * VGA output with the DDC bus unconnected. The KMS core will add the + * default modes. + */ + return 0; +} - dssdev->ops->get_timings(dssdev, &vm); +enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + int ret; - drm_display_mode_from_videomode(&vm, mode); + drm_mode_copy(adjusted_mode, mode); - mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - drm_mode_set_name(mode); - drm_mode_probed_add(connector, mode); + for (; dssdev; dssdev = dssdev->next) { + if (!dssdev->ops->check_timings) + continue; + + ret = dssdev->ops->check_timings(dssdev, adjusted_mode); + if (ret) + return MODE_BAD; + } - return 1; + return MODE_OK; } -static int omap_connector_mode_valid(struct drm_connector *connector, +static enum drm_mode_status omap_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct omap_connector *omap_connector = to_omap_connector(connector); - enum omap_channel channel = omap_connector->output->dispc_channel; - struct omap_drm_private *priv = connector->dev->dev_private; - struct omap_dss_device *dssdev; - struct videomode vm = {0}; - struct drm_device *dev = connector->dev; - struct drm_display_mode *new_mode; - int r, ret = MODE_BAD; - - drm_display_mode_to_videomode(mode, &vm); - mode->vrefresh = drm_mode_vrefresh(mode); + struct drm_display_mode new_mode = { { 0 } }; + enum drm_mode_status status; - r = priv->dispc_ops->mgr_check_timings(priv->dispc, channel, &vm); - if (r) + status = omap_connector_mode_fixup(omap_connector->output, mode, + &new_mode); + if (status != MODE_OK) goto done; - for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) { - if (!dssdev->ops->check_timings) - continue; - - r = dssdev->ops->check_timings(dssdev, &vm); - if (r) - goto done; - } - - /* check if vrefresh is still valid */ - new_mode = drm_mode_duplicate(dev, mode); - if (!new_mode) - return MODE_BAD; - - new_mode->clock = vm.pixelclock / 1000; - new_mode->vrefresh = 0; - if (mode->vrefresh == drm_mode_vrefresh(new_mode)) - ret = MODE_OK; - drm_mode_destroy(dev, new_mode); + /* Check if vrefresh is still valid. */ + if (drm_mode_vrefresh(mode) != drm_mode_vrefresh(&new_mode)) + status = MODE_NOCLOCK; done: DBG("connector: mode %s: " DRM_MODE_FMT, - (ret == MODE_OK) ? "valid" : "invalid", + (status == MODE_OK) ? "valid" : "invalid", DRM_MODE_ARG(mode)); - return ret; + return status; } static const struct drm_connector_funcs omap_connector_funcs = { @@ -326,9 +308,16 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = { .mode_valid = omap_connector_mode_valid, }; -static int omap_connector_get_type(struct omap_dss_device *display) +static int omap_connector_get_type(struct omap_dss_device *output) { - switch (display->type) { + struct omap_dss_device *display; + enum omap_display_type type; + + display = omapdss_display_get(output); + type = display->type; + omapdss_device_put(display); + + switch (type) { case OMAP_DISPLAY_TYPE_HDMI: return DRM_MODE_CONNECTOR_HDMIA; case OMAP_DISPLAY_TYPE_DVI: @@ -351,28 +340,26 @@ static int omap_connector_get_type(struct omap_dss_device *display) /* initialize connector */ struct drm_connector *omap_connector_init(struct drm_device *dev, struct omap_dss_device *output, - struct omap_dss_device *display, struct drm_encoder *encoder) { struct drm_connector *connector = NULL; struct omap_connector *omap_connector; struct omap_dss_device *dssdev; - DBG("%s", display->name); + DBG("%s", output->name); omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL); if (!omap_connector) goto fail; omap_connector->output = omapdss_device_get(output); - omap_connector->display = omapdss_device_get(display); connector = &omap_connector->base; connector->interlace_allowed = 1; connector->doublescan_allowed = 0; drm_connector_init(dev, connector, &omap_connector_funcs, - omap_connector_get_type(display)); + omap_connector_get_type(output)); drm_connector_helper_add(connector, &omap_connector_helper_funcs); /* diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h index 854099801649..608085219336 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.h +++ b/drivers/gpu/drm/omapdrm/omap_connector.h @@ -22,6 +22,8 @@ #include <linux/types.h> +enum drm_mode_status; + struct drm_connector; struct drm_device; struct drm_encoder; @@ -29,12 +31,12 @@ struct omap_dss_device; struct drm_connector *omap_connector_init(struct drm_device *dev, struct omap_dss_device *output, - struct omap_dss_device *display, struct drm_encoder *encoder); -struct drm_encoder *omap_connector_attached_encoder( - struct drm_connector *connector); bool omap_connector_get_hdmi_mode(struct drm_connector *connector); void omap_connector_enable_hpd(struct drm_connector *connector); void omap_connector_disable_hpd(struct drm_connector *connector); +enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); #endif /* __OMAPDRM_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index d99e24dcc0bf..5a29bf01c0e8 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -128,7 +128,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable) if (WARN_ON(omap_crtc->enabled == enable)) return; - if (omap_crtc->pipe->output->output_type == OMAP_DISPLAY_TYPE_HDMI) { + if (omap_crtc->pipe->output->type == OMAP_DISPLAY_TYPE_HDMI) { priv->dispc_ops->mgr_enable(priv->dispc, channel, enable); omap_crtc->enabled = enable; return; @@ -390,6 +390,15 @@ static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) { struct omap_drm_private *priv = crtc->dev->dev_private; + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); + struct videomode vm = {0}; + int r; + + drm_display_mode_to_videomode(mode, &vm); + r = priv->dispc_ops->mgr_check_timings(priv->dispc, omap_crtc->channel, + &vm); + if (r) + return r; /* Check for bandwidth limit */ if (priv->max_bandwidth) { @@ -657,7 +666,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, &omap_crtc_funcs, NULL); if (ret < 0) { dev_err(dev->dev, "%s(): could not init crtc for: %s\n", - __func__, pipe->display->name); + __func__, pipe->output->name); kfree(omap_crtc); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index f8292278f57d..1b9b6f5e48e1 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -23,6 +23,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_panel.h> #include "omap_dmm_tiler.h" #include "omap_drv.h" @@ -137,12 +138,13 @@ static void omap_disconnect_pipelines(struct drm_device *ddev) for (i = 0; i < priv->num_pipes; i++) { struct omap_drm_pipeline *pipe = &priv->pipes[i]; + if (pipe->output->panel) + drm_panel_detach(pipe->output->panel); + omapdss_device_disconnect(NULL, pipe->output); omapdss_device_put(pipe->output); - omapdss_device_put(pipe->display); pipe->output = NULL; - pipe->display = NULL; } memset(&priv->channels, 0, sizeof(priv->channels)); @@ -150,33 +152,17 @@ static void omap_disconnect_pipelines(struct drm_device *ddev) priv->num_pipes = 0; } -static int omap_compare_pipes(const void *a, const void *b) -{ - const struct omap_drm_pipeline *pipe1 = a; - const struct omap_drm_pipeline *pipe2 = b; - - if (pipe1->display->alias_id > pipe2->display->alias_id) - return 1; - else if (pipe1->display->alias_id < pipe2->display->alias_id) - return -1; - return 0; -} - static int omap_connect_pipelines(struct drm_device *ddev) { struct omap_drm_private *priv = ddev->dev_private; struct omap_dss_device *output = NULL; - unsigned int i; int r; - if (!omapdss_stack_is_ready()) - return -EPROBE_DEFER; - for_each_dss_output(output) { r = omapdss_device_connect(priv->dss, NULL, output); if (r == -EPROBE_DEFER) { omapdss_device_put(output); - goto cleanup; + return r; } else if (r) { dev_warn(output->dev, "could not connect output %s\n", output->name); @@ -185,7 +171,6 @@ static int omap_connect_pipelines(struct drm_device *ddev) pipe = &priv->pipes[priv->num_pipes++]; pipe->output = omapdss_device_get(output); - pipe->display = omapdss_display_get(output); if (priv->num_pipes == ARRAY_SIZE(priv->pipes)) { /* To balance the 'for_each_dss_output' loop */ @@ -195,36 +180,19 @@ static int omap_connect_pipelines(struct drm_device *ddev) } } - /* Sort the list by DT aliases */ - sort(priv->pipes, priv->num_pipes, sizeof(priv->pipes[0]), - omap_compare_pipes, NULL); - - /* - * Populate the pipeline lookup table by DISPC channel. Only one display - * is allowed per channel. - */ - for (i = 0; i < priv->num_pipes; ++i) { - struct omap_drm_pipeline *pipe = &priv->pipes[i]; - enum omap_channel channel = pipe->output->dispc_channel; - - if (WARN_ON(priv->channels[channel] != NULL)) { - r = -EINVAL; - goto cleanup; - } - - priv->channels[channel] = pipe; - } - return 0; +} -cleanup: - /* - * if we are deferring probe, we disconnect the devices we previously - * connected - */ - omap_disconnect_pipelines(ddev); +static int omap_compare_pipelines(const void *a, const void *b) +{ + const struct omap_drm_pipeline *pipe1 = a; + const struct omap_drm_pipeline *pipe2 = b; - return r; + if (pipe1->alias_id > pipe2->alias_id) + return 1; + else if (pipe1->alias_id < pipe2->alias_id) + return -1; + return 0; } static int omap_modeset_init_properties(struct drm_device *dev) @@ -240,6 +208,30 @@ static int omap_modeset_init_properties(struct drm_device *dev) return 0; } +static int omap_display_id(struct omap_dss_device *output) +{ + struct device_node *node = NULL; + + if (output->next) { + struct omap_dss_device *display; + + display = omapdss_display_get(output); + node = display->dev->of_node; + omapdss_device_put(display); + } else if (output->bridge) { + struct drm_bridge *bridge = output->bridge; + + while (bridge->next) + bridge = bridge->next; + + node = bridge->of_node; + } else if (output->panel) { + node = output->panel->dev->of_node; + } + + return node ? of_alias_get_id(node, "display") : -ENODEV; +} + static int omap_modeset_init(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; @@ -249,6 +241,9 @@ static int omap_modeset_init(struct drm_device *dev) int ret; u32 plane_crtc_mask; + if (!omapdss_stack_is_ready()) + return -EPROBE_DEFER; + drm_mode_config_init(dev); ret = omap_modeset_init_properties(dev); @@ -263,6 +258,10 @@ static int omap_modeset_init(struct drm_device *dev) * configuration does not match the expectations or exceeds * the available resources, the configuration is rejected. */ + ret = omap_connect_pipelines(dev); + if (ret < 0) + return ret; + if (priv->num_pipes > num_mgrs || priv->num_pipes > num_ovls) { dev_err(dev->dev, "%s(): Too many connected displays\n", __func__); @@ -288,33 +287,75 @@ static int omap_modeset_init(struct drm_device *dev) priv->planes[priv->num_planes++] = plane; } - /* Create the CRTCs, encoders and connectors. */ + /* + * Create the encoders, attach the bridges and get the pipeline alias + * IDs. + */ for (i = 0; i < priv->num_pipes; i++) { struct omap_drm_pipeline *pipe = &priv->pipes[i]; - struct omap_dss_device *display = pipe->display; - struct drm_connector *connector; - struct drm_encoder *encoder; - struct drm_crtc *crtc; + int id; - encoder = omap_encoder_init(dev, pipe->output, display); - if (!encoder) + pipe->encoder = omap_encoder_init(dev, pipe->output); + if (!pipe->encoder) return -ENOMEM; - connector = omap_connector_init(dev, pipe->output, display, - encoder); - if (!connector) - return -ENOMEM; + if (pipe->output->bridge) { + ret = drm_bridge_attach(pipe->encoder, + pipe->output->bridge, NULL); + if (ret < 0) + return ret; + } + + id = omap_display_id(pipe->output); + pipe->alias_id = id >= 0 ? id : i; + } + + /* Sort the pipelines by DT aliases. */ + sort(priv->pipes, priv->num_pipes, sizeof(priv->pipes[0]), + omap_compare_pipelines, NULL); + + /* + * Populate the pipeline lookup table by DISPC channel. Only one display + * is allowed per channel. + */ + for (i = 0; i < priv->num_pipes; ++i) { + struct omap_drm_pipeline *pipe = &priv->pipes[i]; + enum omap_channel channel = pipe->output->dispc_channel; + + if (WARN_ON(priv->channels[channel] != NULL)) + return -EINVAL; + + priv->channels[channel] = pipe; + } + + /* Create the connectors and CRTCs. */ + for (i = 0; i < priv->num_pipes; i++) { + struct omap_drm_pipeline *pipe = &priv->pipes[i]; + struct drm_encoder *encoder = pipe->encoder; + struct drm_crtc *crtc; + + if (!pipe->output->bridge) { + pipe->connector = omap_connector_init(dev, pipe->output, + encoder); + if (!pipe->connector) + return -ENOMEM; + + drm_connector_attach_encoder(pipe->connector, encoder); + + if (pipe->output->panel) { + ret = drm_panel_attach(pipe->output->panel, + pipe->connector); + if (ret < 0) + return ret; + } + } crtc = omap_crtc_init(dev, pipe, priv->planes[i]); if (IS_ERR(crtc)) return PTR_ERR(crtc); - drm_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = 1 << i; - pipe->crtc = crtc; - pipe->encoder = encoder; - pipe->connector = connector; } DBG("registered %u planes, %u crtcs/encoders/connectors\n", @@ -351,10 +392,12 @@ static int omap_modeset_init(struct drm_device *dev) static void omap_modeset_enable_external_hpd(struct drm_device *ddev) { struct omap_drm_private *priv = ddev->dev_private; - int i; + unsigned int i; - for (i = 0; i < priv->num_pipes; i++) - omap_connector_enable_hpd(priv->pipes[i].connector); + for (i = 0; i < priv->num_pipes; i++) { + if (priv->pipes[i].connector) + omap_connector_enable_hpd(priv->pipes[i].connector); + } } /* @@ -363,10 +406,12 @@ static void omap_modeset_enable_external_hpd(struct drm_device *ddev) static void omap_modeset_disable_external_hpd(struct drm_device *ddev) { struct omap_drm_private *priv = ddev->dev_private; - int i; + unsigned int i; - for (i = 0; i < priv->num_pipes; i++) - omap_connector_disable_hpd(priv->pipes[i].connector); + for (i = 0; i < priv->num_pipes; i++) { + if (priv->pipes[i].connector) + omap_connector_disable_hpd(priv->pipes[i].connector); + } } /* @@ -551,10 +596,6 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) omap_crtc_pre_init(priv); - ret = omap_connect_pipelines(ddev); - if (ret) - goto err_crtc_uninit; - soc = soc_device_match(omapdrm_soc_devices); priv->omaprev = soc ? (unsigned int)soc->data : 0; priv->wq = alloc_ordered_workqueue("omapdrm", 0); @@ -612,7 +653,6 @@ err_gem_deinit: omap_gem_deinit(ddev); destroy_workqueue(priv->wq); omap_disconnect_pipelines(ddev); -err_crtc_uninit: omap_crtc_pre_uninit(priv); drm_dev_put(ddev); return ret; @@ -685,54 +725,12 @@ static int pdev_remove(struct platform_device *pdev) } #ifdef CONFIG_PM_SLEEP -static int omap_drm_suspend_all_displays(struct drm_device *ddev) -{ - struct omap_drm_private *priv = ddev->dev_private; - int i; - - for (i = 0; i < priv->num_pipes; i++) { - struct omap_dss_device *display = priv->pipes[i].display; - - if (display->state == OMAP_DSS_DISPLAY_ACTIVE) { - display->ops->disable(display); - display->activate_after_resume = true; - } else { - display->activate_after_resume = false; - } - } - - return 0; -} - -static int omap_drm_resume_all_displays(struct drm_device *ddev) -{ - struct omap_drm_private *priv = ddev->dev_private; - int i; - - for (i = 0; i < priv->num_pipes; i++) { - struct omap_dss_device *display = priv->pipes[i].display; - - if (display->activate_after_resume) { - display->ops->enable(display); - display->activate_after_resume = false; - } - } - - return 0; -} - static int omap_drm_suspend(struct device *dev) { struct omap_drm_private *priv = dev_get_drvdata(dev); struct drm_device *drm_dev = priv->ddev; - drm_kms_helper_poll_disable(drm_dev); - - drm_modeset_lock_all(drm_dev); - omap_drm_suspend_all_displays(drm_dev); - drm_modeset_unlock_all(drm_dev); - - return 0; + return drm_mode_config_helper_suspend(drm_dev); } static int omap_drm_resume(struct device *dev) @@ -740,11 +738,7 @@ static int omap_drm_resume(struct device *dev) struct omap_drm_private *priv = dev_get_drvdata(dev); struct drm_device *drm_dev = priv->ddev; - drm_modeset_lock_all(drm_dev); - omap_drm_resume_all_displays(drm_dev); - drm_modeset_unlock_all(drm_dev); - - drm_kms_helper_poll_enable(drm_dev); + drm_mode_config_helper_resume(drm_dev); return omap_gem_resume(drm_dev); } diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 0c57d2814c51..3cca45cb25f3 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -49,7 +49,7 @@ struct omap_drm_pipeline { struct drm_encoder *encoder; struct drm_connector *connector; struct omap_dss_device *output; - struct omap_dss_device *display; + unsigned int alias_id; }; struct omap_drm_private { diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index 0d85b3a35767..40512419642b 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -20,6 +20,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_edid.h> +#include <drm/drm_panel.h> #include "omap_drv.h" @@ -37,7 +38,6 @@ struct omap_encoder { struct drm_encoder base; struct omap_dss_device *output; - struct omap_dss_device *display; }; static void omap_encoder_destroy(struct drm_encoder *encoder) @@ -52,22 +52,43 @@ static const struct drm_encoder_funcs omap_encoder_funcs = { .destroy = omap_encoder_destroy, }; -static void omap_encoder_hdmi_mode_set(struct drm_encoder *encoder, +static void omap_encoder_update_videomode_flags(struct videomode *vm, + u32 bus_flags) +{ + if (!(vm->flags & (DISPLAY_FLAGS_DE_LOW | + DISPLAY_FLAGS_DE_HIGH))) { + if (bus_flags & DRM_BUS_FLAG_DE_LOW) + vm->flags |= DISPLAY_FLAGS_DE_LOW; + else if (bus_flags & DRM_BUS_FLAG_DE_HIGH) + vm->flags |= DISPLAY_FLAGS_DE_HIGH; + } + + if (!(vm->flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE | + DISPLAY_FLAGS_PIXDATA_NEGEDGE))) { + if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE) + vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; + else if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) + vm->flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE; + } + + if (!(vm->flags & (DISPLAY_FLAGS_SYNC_POSEDGE | + DISPLAY_FLAGS_SYNC_NEGEDGE))) { + if (bus_flags & DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE) + vm->flags |= DISPLAY_FLAGS_SYNC_POSEDGE; + else if (bus_flags & DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE) + vm->flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; + } +} + +static void omap_encoder_hdmi_mode_set(struct drm_connector *connector, + struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { - struct drm_device *dev = encoder->dev; struct omap_encoder *omap_encoder = to_omap_encoder(encoder); struct omap_dss_device *dssdev = omap_encoder->output; - struct drm_connector *connector; bool hdmi_mode; - hdmi_mode = false; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->encoder == encoder) { - hdmi_mode = omap_connector_get_hdmi_mode(connector); - break; - } - } + hdmi_mode = omap_connector_get_hdmi_mode(connector); if (dssdev->ops->hdmi.set_hdmi_mode) dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode); @@ -88,8 +109,18 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); + struct omap_dss_device *output = omap_encoder->output; struct omap_dss_device *dssdev; + struct drm_device *dev = encoder->dev; + struct drm_connector *connector; + struct drm_bridge *bridge; struct videomode vm = { 0 }; + u32 bus_flags; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) + break; + } drm_display_mode_to_videomode(adjusted_mode, &vm); @@ -102,66 +133,102 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, * * A better solution is to use DRM's bus-flags through the whole driver. */ - for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) { - unsigned long bus_flags = dssdev->bus_flags; - - if (!(vm.flags & (DISPLAY_FLAGS_DE_LOW | - DISPLAY_FLAGS_DE_HIGH))) { - if (bus_flags & DRM_BUS_FLAG_DE_LOW) - vm.flags |= DISPLAY_FLAGS_DE_LOW; - else if (bus_flags & DRM_BUS_FLAG_DE_HIGH) - vm.flags |= DISPLAY_FLAGS_DE_HIGH; - } - - if (!(vm.flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE | - DISPLAY_FLAGS_PIXDATA_NEGEDGE))) { - if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) - vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; - else if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) - vm.flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE; - } - - if (!(vm.flags & (DISPLAY_FLAGS_SYNC_POSEDGE | - DISPLAY_FLAGS_SYNC_NEGEDGE))) { - if (bus_flags & DRM_BUS_FLAG_SYNC_POSEDGE) - vm.flags |= DISPLAY_FLAGS_SYNC_POSEDGE; - else if (bus_flags & DRM_BUS_FLAG_SYNC_NEGEDGE) - vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; - } + for (dssdev = output; dssdev; dssdev = dssdev->next) + omap_encoder_update_videomode_flags(&vm, dssdev->bus_flags); + + for (bridge = output->bridge; bridge; bridge = bridge->next) { + if (!bridge->timings) + continue; + + bus_flags = bridge->timings->input_bus_flags; + omap_encoder_update_videomode_flags(&vm, bus_flags); } + bus_flags = connector->display_info.bus_flags; + omap_encoder_update_videomode_flags(&vm, bus_flags); + /* Set timings for all devices in the display pipeline. */ - dss_mgr_set_timings(omap_encoder->output, &vm); + dss_mgr_set_timings(output, &vm); - for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) { + for (dssdev = output; dssdev; dssdev = dssdev->next) { if (dssdev->ops->set_timings) - dssdev->ops->set_timings(dssdev, &vm); + dssdev->ops->set_timings(dssdev, adjusted_mode); } /* Set the HDMI mode and HDMI infoframe if applicable. */ - if (omap_encoder->output->output_type == OMAP_DISPLAY_TYPE_HDMI) - omap_encoder_hdmi_mode_set(encoder, adjusted_mode); + if (output->type == OMAP_DISPLAY_TYPE_HDMI) + omap_encoder_hdmi_mode_set(connector, encoder, adjusted_mode); } static void omap_encoder_disable(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - struct omap_dss_device *dssdev = omap_encoder->display; + struct omap_dss_device *dssdev = omap_encoder->output; + struct drm_device *dev = encoder->dev; + + dev_dbg(dev->dev, "disable(%s)\n", dssdev->name); + + /* Disable the panel if present. */ + if (dssdev->panel) { + drm_panel_disable(dssdev->panel); + drm_panel_unprepare(dssdev->panel); + } + + /* + * Disable the chain of external devices, starting at the one at the + * internal encoder's output. + */ + omapdss_device_disable(dssdev->next); + + /* + * Disable the internal encoder. This will disable the DSS output. The + * DSI is treated as an exception as DSI pipelines still use the legacy + * flow where the pipeline output controls the encoder. + */ + if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) { + dssdev->ops->disable(dssdev); + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; + } - dssdev->ops->disable(dssdev); + /* + * Perform the post-disable operations on the chain of external devices + * to complete the display pipeline disable. + */ + omapdss_device_post_disable(dssdev->next); } static void omap_encoder_enable(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - struct omap_dss_device *dssdev = omap_encoder->display; - int r; - - r = dssdev->ops->enable(dssdev); - if (r) - dev_err(encoder->dev->dev, - "Failed to enable display '%s': %d\n", - dssdev->name, r); + struct omap_dss_device *dssdev = omap_encoder->output; + struct drm_device *dev = encoder->dev; + + dev_dbg(dev->dev, "enable(%s)\n", dssdev->name); + + /* Prepare the chain of external devices for pipeline enable. */ + omapdss_device_pre_enable(dssdev->next); + + /* + * Enable the internal encoder. This will enable the DSS output. The + * DSI is treated as an exception as DSI pipelines still use the legacy + * flow where the pipeline output controls the encoder. + */ + if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) { + dssdev->ops->enable(dssdev); + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; + } + + /* + * Enable the chain of external devices, starting at the one at the + * internal encoder's output. + */ + omapdss_device_enable(dssdev->next); + + /* Enable the panel if present. */ + if (dssdev->panel) { + drm_panel_prepare(dssdev->panel); + drm_panel_enable(dssdev->panel); + } } static int omap_encoder_atomic_check(struct drm_encoder *encoder, @@ -169,35 +236,17 @@ static int omap_encoder_atomic_check(struct drm_encoder *encoder, struct drm_connector_state *conn_state) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - enum omap_channel channel = omap_encoder->output->dispc_channel; - struct drm_device *dev = encoder->dev; - struct omap_drm_private *priv = dev->dev_private; - struct omap_dss_device *dssdev; - struct videomode vm = { 0 }; - int ret; - - drm_display_mode_to_videomode(&crtc_state->mode, &vm); - - ret = priv->dispc_ops->mgr_check_timings(priv->dispc, channel, &vm); - if (ret) - goto done; - - for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) { - if (!dssdev->ops->check_timings) - continue; - - ret = dssdev->ops->check_timings(dssdev, &vm); - if (ret) - goto done; + enum drm_mode_status status; + + status = omap_connector_mode_fixup(omap_encoder->output, + &crtc_state->mode, + &crtc_state->adjusted_mode); + if (status != MODE_OK) { + dev_err(encoder->dev->dev, "invalid timings: %d\n", status); + return -EINVAL; } - drm_display_mode_from_videomode(&vm, &crtc_state->adjusted_mode); - -done: - if (ret) - dev_err(dev->dev, "invalid timings: %d\n", ret); - - return ret; + return 0; } static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = { @@ -209,8 +258,7 @@ static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = { /* initialize encoder */ struct drm_encoder *omap_encoder_init(struct drm_device *dev, - struct omap_dss_device *output, - struct omap_dss_device *display) + struct omap_dss_device *output) { struct drm_encoder *encoder = NULL; struct omap_encoder *omap_encoder; @@ -220,7 +268,6 @@ struct drm_encoder *omap_encoder_init(struct drm_device *dev, goto fail; omap_encoder->output = output; - omap_encoder->display = display; encoder = &omap_encoder->base; diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.h b/drivers/gpu/drm/omapdrm/omap_encoder.h index a7b5dde63ecb..4aefb3142886 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.h +++ b/drivers/gpu/drm/omapdrm/omap_encoder.h @@ -25,7 +25,6 @@ struct drm_encoder; struct omap_dss_device; struct drm_encoder *omap_encoder_init(struct drm_device *dev, - struct omap_dss_device *output, - struct omap_dss_device *display); + struct omap_dss_device *output); #endif /* __OMAPDRM_ENCODER_H__ */ diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 3e070153ef21..f53f817356db 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -149,6 +149,15 @@ config DRM_PANEL_RAYDIUM_RM68200 Say Y here if you want to enable support for Raydium RM68200 720x1280 DSI video mode panel. +config DRM_PANEL_RONBO_RB070D30 + tristate "Ronbo Electronics RB070D30 panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for Ronbo Electronics + RB070D30 1024x600 DSI panel. + config DRM_PANEL_SAMSUNG_S6D16D0 tristate "Samsung S6D16D0 DSI video mode panel" depends on OF diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index e7ab71968bbf..7834947a53b0 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o +obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c index b428c4678106..078fa2c0eef8 100644 --- a/drivers/gpu/drm/panel/panel-arm-versatile.c +++ b/drivers/gpu/drm/panel/panel-arm-versatile.c @@ -191,7 +191,7 @@ static const struct versatile_panel_type versatile_panels[] = { .vrefresh = 390, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }, - .bus_flags = DRM_BUS_FLAG_PIXDATA_NEGEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, }, /* * Sanyo ALR252RGT 240x320 portrait display found on the @@ -215,7 +215,7 @@ static const struct versatile_panel_type versatile_panels[] = { .vrefresh = 116, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }, - .bus_flags = DRM_BUS_FLAG_PIXDATA_NEGEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, .ib2 = true, }, }; diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c index bd38bf4f1ba6..35497ff08391 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c @@ -412,11 +412,11 @@ static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili) if (ili->conf->dclk_active_high) { reg = ILI9322_POL_DCLK; connector->display_info.bus_flags |= - DRM_BUS_FLAG_PIXDATA_POSEDGE; + DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; } else { reg = 0; connector->display_info.bus_flags |= - DRM_BUS_FLAG_PIXDATA_NEGEDGE; + DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; } if (ili->conf->de_active_high) { reg |= ILI9322_POL_DE; diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c new file mode 100644 index 000000000000..3c15764f0c03 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2018-2019, Bridge Systems BV + * Copyright (C) 2018-2019, Bootlin + * Copyright (C) 2017, Free Electrons + * + * This file based on panel-ilitek-ili9881c.c + */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/fb.h> +#include <linux/kernel.h> +#include <linux/media-bus-format.h> +#include <linux/module.h> + +#include <linux/gpio/consumer.h> +#include <linux/regulator/consumer.h> + +#include <drm/drm_connector.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_print.h> + +struct rb070d30_panel { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct backlight_device *backlight; + struct regulator *supply; + + struct { + struct gpio_desc *power; + struct gpio_desc *reset; + struct gpio_desc *updn; + struct gpio_desc *shlr; + } gpios; +}; + +static inline struct rb070d30_panel *panel_to_rb070d30_panel(struct drm_panel *panel) +{ + return container_of(panel, struct rb070d30_panel, panel); +} + +static int rb070d30_panel_prepare(struct drm_panel *panel) +{ + struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel); + int ret; + + ret = regulator_enable(ctx->supply); + if (ret < 0) { + DRM_DEV_ERROR(&ctx->dsi->dev, "Failed to enable supply: %d\n", ret); + return ret; + } + + msleep(20); + gpiod_set_value(ctx->gpios.power, 1); + msleep(20); + gpiod_set_value(ctx->gpios.reset, 1); + msleep(20); + return 0; +} + +static int rb070d30_panel_unprepare(struct drm_panel *panel) +{ + struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel); + + gpiod_set_value(ctx->gpios.reset, 0); + gpiod_set_value(ctx->gpios.power, 0); + regulator_disable(ctx->supply); + + return 0; +} + +static int rb070d30_panel_enable(struct drm_panel *panel) +{ + struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel); + int ret; + + ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi); + if (ret) + return ret; + + ret = backlight_enable(ctx->backlight); + if (ret) + goto out; + + return 0; + +out: + mipi_dsi_dcs_enter_sleep_mode(ctx->dsi); + return ret; +} + +static int rb070d30_panel_disable(struct drm_panel *panel) +{ + struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel); + + backlight_disable(ctx->backlight); + return mipi_dsi_dcs_enter_sleep_mode(ctx->dsi); +} + +/* Default timings */ +static const struct drm_display_mode default_mode = { + .clock = 51206, + .hdisplay = 1024, + .hsync_start = 1024 + 160, + .hsync_end = 1024 + 160 + 80, + .htotal = 1024 + 160 + 80 + 80, + .vdisplay = 600, + .vsync_start = 600 + 12, + .vsync_end = 600 + 12 + 10, + .vtotal = 600 + 12 + 10 + 13, + .vrefresh = 60, + + .width_mm = 154, + .height_mm = 85, +}; + +static int rb070d30_panel_get_modes(struct drm_panel *panel) +{ + struct drm_connector *connector = panel->connector; + struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel); + struct drm_display_mode *mode; + static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; + + mode = drm_mode_duplicate(panel->drm, &default_mode); + if (!mode) { + DRM_DEV_ERROR(&ctx->dsi->dev, + "Failed to add mode " DRM_MODE_FMT "\n", + DRM_MODE_ARG(&default_mode)); + return -EINVAL; + } + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + drm_mode_probed_add(connector, mode); + + panel->connector->display_info.bpc = 8; + panel->connector->display_info.width_mm = mode->width_mm; + panel->connector->display_info.height_mm = mode->height_mm; + drm_display_info_set_bus_formats(&connector->display_info, + &bus_format, 1); + + return 1; +} + +static const struct drm_panel_funcs rb070d30_panel_funcs = { + .get_modes = rb070d30_panel_get_modes, + .prepare = rb070d30_panel_prepare, + .enable = rb070d30_panel_enable, + .disable = rb070d30_panel_disable, + .unprepare = rb070d30_panel_unprepare, +}; + +static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi) +{ + struct rb070d30_panel *ctx; + int ret; + + ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->supply = devm_regulator_get(&dsi->dev, "vcc-lcd"); + if (IS_ERR(ctx->supply)) + return PTR_ERR(ctx->supply); + + mipi_dsi_set_drvdata(dsi, ctx); + ctx->dsi = dsi; + + drm_panel_init(&ctx->panel); + ctx->panel.dev = &dsi->dev; + ctx->panel.funcs = &rb070d30_panel_funcs; + + ctx->gpios.reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(ctx->gpios.reset)) { + DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n"); + return PTR_ERR(ctx->gpios.reset); + } + + ctx->gpios.power = devm_gpiod_get(&dsi->dev, "power", GPIOD_OUT_LOW); + if (IS_ERR(ctx->gpios.power)) { + DRM_DEV_ERROR(&dsi->dev, "Couldn't get our power GPIO\n"); + return PTR_ERR(ctx->gpios.power); + } + + /* + * We don't change the state of that GPIO later on but we need + * to force it into a low state. + */ + ctx->gpios.updn = devm_gpiod_get(&dsi->dev, "updn", GPIOD_OUT_LOW); + if (IS_ERR(ctx->gpios.updn)) { + DRM_DEV_ERROR(&dsi->dev, "Couldn't get our updn GPIO\n"); + return PTR_ERR(ctx->gpios.updn); + } + + /* + * We don't change the state of that GPIO later on but we need + * to force it into a low state. + */ + ctx->gpios.shlr = devm_gpiod_get(&dsi->dev, "shlr", GPIOD_OUT_LOW); + if (IS_ERR(ctx->gpios.shlr)) { + DRM_DEV_ERROR(&dsi->dev, "Couldn't get our shlr GPIO\n"); + return PTR_ERR(ctx->gpios.shlr); + } + + ctx->backlight = devm_of_find_backlight(&dsi->dev); + if (IS_ERR(ctx->backlight)) { + DRM_DEV_ERROR(&dsi->dev, "Couldn't get our backlight\n"); + return PTR_ERR(ctx->backlight); + } + + ret = drm_panel_add(&ctx->panel); + if (ret < 0) + return ret; + + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->lanes = 4; + + return mipi_dsi_attach(dsi); +} + +static int rb070d30_panel_dsi_remove(struct mipi_dsi_device *dsi) +{ + struct rb070d30_panel *ctx = mipi_dsi_get_drvdata(dsi); + + mipi_dsi_detach(dsi); + drm_panel_remove(&ctx->panel); + + return 0; +} + +static const struct of_device_id rb070d30_panel_of_match[] = { + { .compatible = "ronbo,rb070d30" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, rb070d30_panel_of_match); + +static struct mipi_dsi_driver rb070d30_panel_driver = { + .probe = rb070d30_panel_dsi_probe, + .remove = rb070d30_panel_dsi_remove, + .driver = { + .name = "panel-ronbo-rb070d30", + .of_match_table = rb070d30_panel_of_match, + }, +}; +module_mipi_dsi_driver(rb070d30_panel_driver); + +MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>"); +MODULE_AUTHOR("Konstantin Sudakov <k.sudakov@integrasources.com>"); +MODULE_DESCRIPTION("Ronbo RB070D30 Panel Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c index 2d99e28ff117..bdcc5d80823d 100644 --- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c +++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c @@ -328,7 +328,7 @@ static const struct seiko_panel_desc seiko_43wvf1g = { .height = 57, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, - .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, }; static const struct of_device_id platform_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 9e8218f6a3f2..8fee7a8b29d9 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -914,7 +914,7 @@ static const struct panel_desc cdtech_s043wq26h_ct7 = { .width = 95, .height = 54, }, - .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }; static const struct drm_display_mode cdtech_s070wv95_ct16_mode = { @@ -1034,7 +1034,7 @@ static const struct panel_desc dataimage_scf0700c48ggu18 = { .height = 91, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, - .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }; static const struct display_timing dlc_dlc0700yzg_1_timing = { @@ -1119,7 +1119,7 @@ static const struct panel_desc edt_et057090dhu = { .height = 86, }, .bus_format = MEDIA_BUS_FMT_RGB666_1X18, - .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, }; static const struct drm_display_mode edt_etm0700g0dh6_mode = { @@ -1145,7 +1145,7 @@ static const struct panel_desc edt_etm0700g0dh6 = { .height = 91, }, .bus_format = MEDIA_BUS_FMT_RGB666_1X18, - .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, }; static const struct panel_desc edt_etm0700g0bdh6 = { @@ -1157,7 +1157,7 @@ static const struct panel_desc edt_etm0700g0bdh6 = { .height = 91, }, .bus_format = MEDIA_BUS_FMT_RGB666_1X18, - .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }; static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = { @@ -1311,7 +1311,7 @@ static const struct panel_desc innolux_at043tn24 = { .height = 54, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, - .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }; static const struct drm_display_mode innolux_at070tn92_mode = { @@ -1818,7 +1818,7 @@ static const struct panel_desc nec_nl4827hc19_05b = { .height = 54, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, - .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }; static const struct drm_display_mode netron_dy_e231732_mode = { @@ -1867,8 +1867,8 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = { .height = 54, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, - .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE | - DRM_BUS_FLAG_SYNC_POSEDGE, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE | + DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE, }; static const struct display_timing nlt_nl192108ac18_02d_timing = { @@ -2029,7 +2029,33 @@ static const struct panel_desc ortustech_com43h4m85ulc = { .height = 93, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, - .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, +}; + +static const struct drm_display_mode osddisplays_osd070t1718_19ts_mode = { + .clock = 33000, + .hdisplay = 800, + .hsync_start = 800 + 210, + .hsync_end = 800 + 210 + 30, + .htotal = 800 + 210 + 30 + 16, + .vdisplay = 480, + .vsync_start = 480 + 22, + .vsync_end = 480 + 22 + 13, + .vtotal = 480 + 22 + 13 + 10, + .vrefresh = 60, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, +}; + +static const struct panel_desc osddisplays_osd070t1718_19ts = { + .modes = &osddisplays_osd070t1718_19ts_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 152, + .height = 91, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }; static const struct drm_display_mode pda_91_00156_a0_mode = { @@ -2398,7 +2424,7 @@ static const struct panel_desc toshiba_lt089ac29000 = { .height = 116, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, - .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }; static const struct drm_display_mode tpk_f07a_0102_mode = { @@ -2421,7 +2447,7 @@ static const struct panel_desc tpk_f07a_0102 = { .width = 152, .height = 91, }, - .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }; static const struct drm_display_mode tpk_f10a_0102_mode = { @@ -2737,6 +2763,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "ortustech,com43h4m85ulc", .data = &ortustech_com43h4m85ulc, }, { + .compatible = "osddisplays,osd070t1718-19ts", + .data = &osddisplays_osd070t1718_19ts, + }, { .compatible = "pda,91-00156-a0", .data = &pda_91_00156_a0, }, { diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c index 5a9f8f4d5d24..25f00cfc1af4 100644 --- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c +++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c @@ -118,7 +118,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = { .vtotal = 480 + 10 + 1 + 35, .vrefresh = 60, }, - .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, { .name = "640x480 RGB", @@ -135,7 +135,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = { .vtotal = 480 + 18 + 1 + 27, .vrefresh = 60, }, - .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, { .name = "480x272 RGB", @@ -152,7 +152,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = { .vtotal = 272 + 2 + 1 + 12, .vrefresh = 60, }, - .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, { .name = "480x640 RGB", @@ -169,7 +169,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = { .vtotal = 640 + 4 + 1 + 8, .vrefresh = 60, }, - .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, { .name = "400x240 RGB", @@ -186,7 +186,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = { .vtotal = 240 + 2 + 1 + 20, .vrefresh = 60, }, - .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, }; diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c index 754f6b25f265..0c5d391f0a8f 100644 --- a/drivers/gpu/drm/pl111/pl111_display.c +++ b/drivers/gpu/drm/pl111/pl111_display.c @@ -188,7 +188,7 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe, tim2 |= TIM2_IOE; if (connector->display_info.bus_flags & - DRM_BUS_FLAG_PIXDATA_NEGEDGE) + DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) tim2 |= TIM2_IPC; } diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 08c725544a2f..8b319ebbb0fb 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -535,7 +535,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, { struct qxl_device *qdev = plane->dev->dev_private; struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]); - struct qxl_bo *bo_old, *primary; + struct qxl_bo *primary; struct drm_clip_rect norect = { .x1 = 0, .y1 = 0, @@ -544,12 +544,6 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, }; uint32_t dumb_shadow_offset = 0; - if (old_state->fb) { - bo_old = gem_to_qxl_bo(old_state->fb->obj[0]); - } else { - bo_old = NULL; - } - primary = bo->shadow ? bo->shadow : bo; if (!primary->is_primary) { diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index bb81e310eb6d..578d867a81d5 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -79,6 +79,10 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto free_dev; + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl"); + if (ret) + goto disable_pci; + ret = qxl_device_init(qdev, &qxl_driver, pdev); if (ret) goto disable_pci; @@ -94,7 +98,6 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto modeset_cleanup; - drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl"); drm_fbdev_generic_setup(&qdev->ddev, 32); return 0; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 53f29a115104..0a9312ea250a 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1388,7 +1388,7 @@ int radeon_device_init(struct radeon_device *rdev, pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); pr_warn("radeon: No coherent DMA available\n"); } - rdev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits); + rdev->need_swiotlb = drm_need_swiotlb(dma_bits); /* Registers mapping */ /* TODO: block userspace mapping of io register */ diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index 7c36e2777a15..1529849e217e 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -36,3 +36,7 @@ config DRM_RCAR_VSP depends on VIDEO_RENESAS_VSP1=y || (VIDEO_RENESAS_VSP1 && DRM_RCAR_DU=m) help Enable support to expose the R-Car VSP Compositor as KMS planes. + +config DRM_RCAR_WRITEBACK + bool + default y if ARM64 diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile index 2a3b8d7972b5..6c2ed9c46467 100644 --- a/drivers/gpu/drm/rcar-du/Makefile +++ b/drivers/gpu/drm/rcar-du/Makefile @@ -4,7 +4,7 @@ rcar-du-drm-y := rcar_du_crtc.o \ rcar_du_encoder.o \ rcar_du_group.o \ rcar_du_kms.o \ - rcar_du_plane.o + rcar_du_plane.o \ rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_of.o \ rcar_du_of_lvds_r8a7790.dtb.o \ @@ -13,6 +13,7 @@ rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_of.o \ rcar_du_of_lvds_r8a7795.dtb.o \ rcar_du_of_lvds_r8a7796.dtb.o rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o +rcar-du-drm-$(CONFIG_DRM_RCAR_WRITEBACK) += rcar_du_writeback.o obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 4cdea14d552f..834432cafda8 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -648,8 +648,13 @@ static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc, rstate->outputs = 0; drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) { - struct rcar_du_encoder *renc = to_rcar_encoder(encoder); + struct rcar_du_encoder *renc; + /* Skip the writeback encoder. */ + if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) + continue; + + renc = to_rcar_encoder(encoder); rstate->outputs |= BIT(renc->output); } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h index bcb35b0b7612..92f7d5f3ff80 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h @@ -15,6 +15,7 @@ #include <linux/wait.h> #include <drm/drm_crtc.h> +#include <drm/drm_writeback.h> #include <media/vsp1.h> @@ -27,7 +28,7 @@ struct rcar_du_vsp; * @clock: the CRTC functional clock * @extclock: external pixel dot clock (optional) * @mmio_offset: offset of the CRTC registers in the DU MMIO block - * @index: CRTC software and hardware index + * @index: CRTC hardware index * @initialized: whether the CRTC has been initialized and clocks enabled * @dsysr: cached value of the DSYSR register * @vblank_enable: whether vblank events are enabled on this CRTC @@ -39,6 +40,7 @@ struct rcar_du_vsp; * @group: CRTC group this CRTC belongs to * @vsp: VSP feeding video to this CRTC * @vsp_pipe: index of the VSP pipeline feeding video to this CRTC + * @writeback: the writeback connector */ struct rcar_du_crtc { struct drm_crtc crtc; @@ -65,9 +67,12 @@ struct rcar_du_crtc { const char *const *sources; unsigned int sources_count; + + struct drm_writeback_connector writeback; }; -#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc) +#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc) +#define wb_to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, writeback) /** * struct rcar_du_crtc_state - Driver-specific CRTC state diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 3b7d50a8fb9b..f8f7fff34dff 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -26,6 +26,7 @@ #include "rcar_du_kms.h" #include "rcar_du_regs.h" #include "rcar_du_vsp.h" +#include "rcar_du_writeback.h" /* ----------------------------------------------------------------------------- * Format helpers @@ -34,60 +35,70 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { { .fourcc = DRM_FORMAT_RGB565, + .v4l2 = V4L2_PIX_FMT_RGB565, .bpp = 16, .planes = 1, .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP, .edf = PnDDCR4_EDF_NONE, }, { .fourcc = DRM_FORMAT_ARGB1555, + .v4l2 = V4L2_PIX_FMT_ARGB555, .bpp = 16, .planes = 1, .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB, .edf = PnDDCR4_EDF_NONE, }, { .fourcc = DRM_FORMAT_XRGB1555, + .v4l2 = V4L2_PIX_FMT_XRGB555, .bpp = 16, .planes = 1, .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB, .edf = PnDDCR4_EDF_NONE, }, { .fourcc = DRM_FORMAT_XRGB8888, + .v4l2 = V4L2_PIX_FMT_XBGR32, .bpp = 32, .planes = 1, .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP, .edf = PnDDCR4_EDF_RGB888, }, { .fourcc = DRM_FORMAT_ARGB8888, + .v4l2 = V4L2_PIX_FMT_ABGR32, .bpp = 32, .planes = 1, .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_16BPP, .edf = PnDDCR4_EDF_ARGB8888, }, { .fourcc = DRM_FORMAT_UYVY, + .v4l2 = V4L2_PIX_FMT_UYVY, .bpp = 16, .planes = 1, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, { .fourcc = DRM_FORMAT_YUYV, + .v4l2 = V4L2_PIX_FMT_YUYV, .bpp = 16, .planes = 1, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, { .fourcc = DRM_FORMAT_NV12, + .v4l2 = V4L2_PIX_FMT_NV12M, .bpp = 12, .planes = 2, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, { .fourcc = DRM_FORMAT_NV21, + .v4l2 = V4L2_PIX_FMT_NV21M, .bpp = 12, .planes = 2, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, { .fourcc = DRM_FORMAT_NV16, + .v4l2 = V4L2_PIX_FMT_NV16M, .bpp = 16, .planes = 2, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, @@ -99,62 +110,77 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { */ { .fourcc = DRM_FORMAT_RGB332, + .v4l2 = V4L2_PIX_FMT_RGB332, .bpp = 8, .planes = 1, }, { .fourcc = DRM_FORMAT_ARGB4444, + .v4l2 = V4L2_PIX_FMT_ARGB444, .bpp = 16, .planes = 1, }, { .fourcc = DRM_FORMAT_XRGB4444, + .v4l2 = V4L2_PIX_FMT_XRGB444, .bpp = 16, .planes = 1, }, { .fourcc = DRM_FORMAT_BGR888, + .v4l2 = V4L2_PIX_FMT_RGB24, .bpp = 24, .planes = 1, }, { .fourcc = DRM_FORMAT_RGB888, + .v4l2 = V4L2_PIX_FMT_BGR24, .bpp = 24, .planes = 1, }, { .fourcc = DRM_FORMAT_BGRA8888, + .v4l2 = V4L2_PIX_FMT_ARGB32, .bpp = 32, .planes = 1, }, { .fourcc = DRM_FORMAT_BGRX8888, + .v4l2 = V4L2_PIX_FMT_XRGB32, .bpp = 32, .planes = 1, }, { .fourcc = DRM_FORMAT_YVYU, + .v4l2 = V4L2_PIX_FMT_YVYU, .bpp = 16, .planes = 1, }, { .fourcc = DRM_FORMAT_NV61, + .v4l2 = V4L2_PIX_FMT_NV61M, .bpp = 16, .planes = 2, }, { .fourcc = DRM_FORMAT_YUV420, + .v4l2 = V4L2_PIX_FMT_YUV420M, .bpp = 12, .planes = 3, }, { .fourcc = DRM_FORMAT_YVU420, + .v4l2 = V4L2_PIX_FMT_YVU420M, .bpp = 12, .planes = 3, }, { .fourcc = DRM_FORMAT_YUV422, + .v4l2 = V4L2_PIX_FMT_YUV422M, .bpp = 16, .planes = 3, }, { .fourcc = DRM_FORMAT_YVU422, + .v4l2 = V4L2_PIX_FMT_YVU422M, .bpp = 16, .planes = 3, }, { .fourcc = DRM_FORMAT_YUV444, + .v4l2 = V4L2_PIX_FMT_YUV444M, .bpp = 24, .planes = 3, }, { .fourcc = DRM_FORMAT_YVU444, + .v4l2 = V4L2_PIX_FMT_YVU444M, .bpp = 24, .planes = 3, }, @@ -639,6 +665,17 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) encoder->possible_clones = (1 << num_encoders) - 1; } + /* Create the writeback connectors. */ + if (rcdu->info->gen >= 3) { + for (i = 0; i < rcdu->num_crtcs; ++i) { + struct rcar_du_crtc *rcrtc = &rcdu->crtcs[i]; + + ret = rcar_du_writeback_init(rcdu, rcrtc); + if (ret < 0) + return ret; + } + } + /* * Initialize the default DPAD0 source to the index of the first DU * channel that can be connected to DPAD0. The exact value doesn't diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h index e171527abdaa..0346504d8c59 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h @@ -19,6 +19,7 @@ struct rcar_du_device; struct rcar_du_format_info { u32 fourcc; + u32 v4l2; unsigned int bpp; unsigned int planes; unsigned int pnmr; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index 0878accbd134..4c8bed34838c 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c @@ -10,6 +10,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_fb_cma_helper.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> @@ -26,16 +27,19 @@ #include "rcar_du_drv.h" #include "rcar_du_kms.h" #include "rcar_du_vsp.h" +#include "rcar_du_writeback.h" -static void rcar_du_vsp_complete(void *private, bool completed, u32 crc) +static void rcar_du_vsp_complete(void *private, unsigned int status, u32 crc) { struct rcar_du_crtc *crtc = private; if (crtc->vblank_enable) drm_crtc_handle_vblank(&crtc->crtc); - if (completed) + if (status & VSP1_DU_STATUS_COMPLETE) rcar_du_crtc_finish_page_flip(crtc); + if (status & VSP1_DU_STATUS_WRITEBACK) + rcar_du_writeback_complete(crtc); drm_crtc_add_crc_entry(&crtc->crtc, false, 0, &crc); } @@ -107,11 +111,12 @@ void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc) state = to_rcar_crtc_state(crtc->crtc.state); cfg.crc = state->crc; + rcar_du_writeback_setup(crtc, &cfg.writeback); + vsp1_du_atomic_flush(crtc->vsp->vsp, crtc->vsp_pipe, &cfg); } -/* Keep the two tables in sync. */ -static const u32 formats_kms[] = { +static const u32 rcar_du_vsp_formats[] = { DRM_FORMAT_RGB332, DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB4444, @@ -139,40 +144,13 @@ static const u32 formats_kms[] = { DRM_FORMAT_YVU444, }; -static const u32 formats_v4l2[] = { - V4L2_PIX_FMT_RGB332, - V4L2_PIX_FMT_ARGB444, - V4L2_PIX_FMT_XRGB444, - V4L2_PIX_FMT_ARGB555, - V4L2_PIX_FMT_XRGB555, - V4L2_PIX_FMT_RGB565, - V4L2_PIX_FMT_RGB24, - V4L2_PIX_FMT_BGR24, - V4L2_PIX_FMT_ARGB32, - V4L2_PIX_FMT_XRGB32, - V4L2_PIX_FMT_ABGR32, - V4L2_PIX_FMT_XBGR32, - V4L2_PIX_FMT_UYVY, - V4L2_PIX_FMT_YUYV, - V4L2_PIX_FMT_YVYU, - V4L2_PIX_FMT_NV12M, - V4L2_PIX_FMT_NV21M, - V4L2_PIX_FMT_NV16M, - V4L2_PIX_FMT_NV61M, - V4L2_PIX_FMT_YUV420M, - V4L2_PIX_FMT_YVU420M, - V4L2_PIX_FMT_YUV422M, - V4L2_PIX_FMT_YVU422M, - V4L2_PIX_FMT_YUV444M, - V4L2_PIX_FMT_YVU444M, -}; - static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane) { struct rcar_du_vsp_plane_state *state = to_rcar_vsp_plane_state(plane->plane.state); struct rcar_du_crtc *crtc = to_rcar_crtc(state->state.crtc); struct drm_framebuffer *fb = plane->plane.state->fb; + const struct rcar_du_format_info *format; struct vsp1_du_atomic_config cfg = { .pixelformat = 0, .pitch = fb->pitches[0], @@ -195,37 +173,23 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane) cfg.mem[i] = sg_dma_address(state->sg_tables[i].sgl) + fb->offsets[i]; - for (i = 0; i < ARRAY_SIZE(formats_kms); ++i) { - if (formats_kms[i] == state->format->fourcc) { - cfg.pixelformat = formats_v4l2[i]; - break; - } - } + format = rcar_du_format_info(state->format->fourcc); + cfg.pixelformat = format->v4l2; vsp1_du_atomic_update(plane->vsp->vsp, crtc->vsp_pipe, plane->index, &cfg); } -static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *state) +int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb, + struct sg_table sg_tables[3]) { - struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state); - struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp; struct rcar_du_device *rcdu = vsp->dev; unsigned int i; int ret; - /* - * There's no need to prepare (and unprepare) the framebuffer when the - * plane is not visible, as it will not be displayed. - */ - if (!state->visible) - return 0; - - for (i = 0; i < rstate->format->planes; ++i) { - struct drm_gem_cma_object *gem = - drm_fb_cma_get_gem_obj(state->fb, i); - struct sg_table *sgt = &rstate->sg_tables[i]; + for (i = 0; i < fb->format->num_planes; ++i) { + struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i); + struct sg_table *sgt = &sg_tables[i]; ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr, gem->paddr, gem->base.size); @@ -240,15 +204,11 @@ static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane, } } - ret = drm_gem_fb_prepare_fb(plane, state); - if (ret) - goto fail; - return 0; fail: while (i--) { - struct sg_table *sgt = &rstate->sg_tables[i]; + struct sg_table *sgt = &sg_tables[i]; vsp1_du_unmap_sg(vsp->vsp, sgt); sg_free_table(sgt); @@ -257,24 +217,52 @@ fail: return ret; } -static void rcar_du_vsp_plane_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *state) +static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *state) { struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state); struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp; - unsigned int i; + int ret; + /* + * There's no need to prepare (and unprepare) the framebuffer when the + * plane is not visible, as it will not be displayed. + */ if (!state->visible) - return; + return 0; + + ret = rcar_du_vsp_map_fb(vsp, state->fb, rstate->sg_tables); + if (ret < 0) + return ret; + + return drm_gem_fb_prepare_fb(plane, state); +} + +void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb, + struct sg_table sg_tables[3]) +{ + unsigned int i; - for (i = 0; i < rstate->format->planes; ++i) { - struct sg_table *sgt = &rstate->sg_tables[i]; + for (i = 0; i < fb->format->num_planes; ++i) { + struct sg_table *sgt = &sg_tables[i]; vsp1_du_unmap_sg(vsp->vsp, sgt); sg_free_table(sgt); } } +static void rcar_du_vsp_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state); + struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp; + + if (!state->visible) + return; + + rcar_du_vsp_unmap_fb(vsp, state->fb, rstate->sg_tables); +} + static int rcar_du_vsp_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { @@ -395,8 +383,8 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs, &rcar_du_vsp_plane_funcs, - formats_kms, - ARRAY_SIZE(formats_kms), + rcar_du_vsp_formats, + ARRAY_SIZE(rcar_du_vsp_formats), NULL, type, NULL); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h index db232037f24a..9b4724159378 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h @@ -12,8 +12,10 @@ #include <drm/drm_plane.h> +struct drm_framebuffer; struct rcar_du_format_info; struct rcar_du_vsp; +struct sg_table; struct rcar_du_vsp_plane { struct drm_plane plane; @@ -60,6 +62,10 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc); void rcar_du_vsp_disable(struct rcar_du_crtc *crtc); void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc); void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc); +int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb, + struct sg_table sg_tables[3]); +void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb, + struct sg_table sg_tables[3]); #else static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, @@ -71,6 +77,17 @@ static inline void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) { }; static inline void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) { }; static inline void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) { }; static inline void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc) { }; +static inline int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, + struct drm_framebuffer *fb, + struct sg_table sg_tables[3]) +{ + return -ENXIO; +} +static inline void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp, + struct drm_framebuffer *fb, + struct sg_table sg_tables[3]) +{ +} #endif #endif /* __RCAR_DU_VSP_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c new file mode 100644 index 000000000000..989a0be94131 --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * rcar_du_writeback.c -- R-Car Display Unit Writeback Support + * + * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com> + */ + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_device.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_writeback.h> + +#include "rcar_du_crtc.h" +#include "rcar_du_drv.h" +#include "rcar_du_kms.h" + +/** + * struct rcar_du_wb_conn_state - Driver-specific writeback connector state + * @state: base DRM connector state + * @format: format of the writeback framebuffer + */ +struct rcar_du_wb_conn_state { + struct drm_connector_state state; + const struct rcar_du_format_info *format; +}; + +#define to_rcar_wb_conn_state(s) \ + container_of(s, struct rcar_du_wb_conn_state, state) + +/** + * struct rcar_du_wb_job - Driver-private data for writeback jobs + * @sg_tables: scatter-gather tables for the framebuffer memory + */ +struct rcar_du_wb_job { + struct sg_table sg_tables[3]; +}; + +static int rcar_du_wb_conn_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + + return drm_add_modes_noedid(connector, dev->mode_config.max_width, + dev->mode_config.max_height); +} + +static int rcar_du_wb_prepare_job(struct drm_writeback_connector *connector, + struct drm_writeback_job *job) +{ + struct rcar_du_crtc *rcrtc = wb_to_rcar_crtc(connector); + struct rcar_du_wb_job *rjob; + int ret; + + if (!job->fb) + return 0; + + rjob = kzalloc(sizeof(*rjob), GFP_KERNEL); + if (!rjob) + return -ENOMEM; + + /* Map the framebuffer to the VSP. */ + ret = rcar_du_vsp_map_fb(rcrtc->vsp, job->fb, rjob->sg_tables); + if (ret < 0) { + kfree(rjob); + return ret; + } + + job->priv = rjob; + return 0; +} + +static void rcar_du_wb_cleanup_job(struct drm_writeback_connector *connector, + struct drm_writeback_job *job) +{ + struct rcar_du_crtc *rcrtc = wb_to_rcar_crtc(connector); + struct rcar_du_wb_job *rjob = job->priv; + + if (!job->fb) + return; + + rcar_du_vsp_unmap_fb(rcrtc->vsp, job->fb, rjob->sg_tables); + kfree(rjob); +} + +static const struct drm_connector_helper_funcs rcar_du_wb_conn_helper_funcs = { + .get_modes = rcar_du_wb_conn_get_modes, + .prepare_writeback_job = rcar_du_wb_prepare_job, + .cleanup_writeback_job = rcar_du_wb_cleanup_job, +}; + +static struct drm_connector_state * +rcar_du_wb_conn_duplicate_state(struct drm_connector *connector) +{ + struct rcar_du_wb_conn_state *copy; + + if (WARN_ON(!connector->state)) + return NULL; + + copy = kzalloc(sizeof(*copy), GFP_KERNEL); + if (!copy) + return NULL; + + __drm_atomic_helper_connector_duplicate_state(connector, ©->state); + + return ©->state; +} + +static void rcar_du_wb_conn_destroy_state(struct drm_connector *connector, + struct drm_connector_state *state) +{ + __drm_atomic_helper_connector_destroy_state(state); + kfree(to_rcar_wb_conn_state(state)); +} + +static void rcar_du_wb_conn_reset(struct drm_connector *connector) +{ + struct rcar_du_wb_conn_state *state; + + if (connector->state) { + rcar_du_wb_conn_destroy_state(connector, connector->state); + connector->state = NULL; + } + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state == NULL) + return; + + __drm_atomic_helper_connector_reset(connector, &state->state); +} + +static const struct drm_connector_funcs rcar_du_wb_conn_funcs = { + .reset = rcar_du_wb_conn_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = rcar_du_wb_conn_duplicate_state, + .atomic_destroy_state = rcar_du_wb_conn_destroy_state, +}; + +static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct rcar_du_wb_conn_state *wb_state = + to_rcar_wb_conn_state(conn_state); + const struct drm_display_mode *mode = &crtc_state->mode; + struct drm_device *dev = encoder->dev; + struct drm_framebuffer *fb; + + if (!conn_state->writeback_job || !conn_state->writeback_job->fb) + return 0; + + fb = conn_state->writeback_job->fb; + + /* + * Verify that the framebuffer format is supported and that its size + * matches the current mode. + */ + if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) { + dev_dbg(dev->dev, "%s: invalid framebuffer size %ux%u\n", + __func__, fb->width, fb->height); + return -EINVAL; + } + + wb_state->format = rcar_du_format_info(fb->format->format); + if (wb_state->format == NULL) { + dev_dbg(dev->dev, "%s: unsupported format %08x\n", __func__, + fb->format->format); + return -EINVAL; + } + + return 0; +} + +static const struct drm_encoder_helper_funcs rcar_du_wb_enc_helper_funcs = { + .atomic_check = rcar_du_wb_enc_atomic_check, +}; + +/* + * Only RGB formats are currently supported as the VSP outputs RGB to the DU + * and can't convert to YUV separately for writeback. + */ +static const u32 writeback_formats[] = { + DRM_FORMAT_RGB332, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_XRGB8888, +}; + +int rcar_du_writeback_init(struct rcar_du_device *rcdu, + struct rcar_du_crtc *rcrtc) +{ + struct drm_writeback_connector *wb_conn = &rcrtc->writeback; + + wb_conn->encoder.possible_crtcs = 1 << drm_crtc_index(&rcrtc->crtc); + drm_connector_helper_add(&wb_conn->base, + &rcar_du_wb_conn_helper_funcs); + + return drm_writeback_connector_init(rcdu->ddev, wb_conn, + &rcar_du_wb_conn_funcs, + &rcar_du_wb_enc_helper_funcs, + writeback_formats, + ARRAY_SIZE(writeback_formats)); +} + +void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc, + struct vsp1_du_writeback_config *cfg) +{ + struct rcar_du_wb_conn_state *wb_state; + struct drm_connector_state *state; + struct rcar_du_wb_job *rjob; + struct drm_framebuffer *fb; + unsigned int i; + + state = rcrtc->writeback.base.state; + if (!state || !state->writeback_job || !state->writeback_job->fb) + return; + + fb = state->writeback_job->fb; + rjob = state->writeback_job->priv; + wb_state = to_rcar_wb_conn_state(state); + + cfg->pixelformat = wb_state->format->v4l2; + cfg->pitch = fb->pitches[0]; + + for (i = 0; i < wb_state->format->planes; ++i) + cfg->mem[i] = sg_dma_address(rjob->sg_tables[i].sgl) + + fb->offsets[i]; + + drm_writeback_queue_job(&rcrtc->writeback, state); +} + +void rcar_du_writeback_complete(struct rcar_du_crtc *rcrtc) +{ + drm_writeback_signal_completion(&rcrtc->writeback, 0); +} diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.h b/drivers/gpu/drm/rcar-du/rcar_du_writeback.h new file mode 100644 index 000000000000..fa87ebf8d21f --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * rcar_du_writeback.h -- R-Car Display Unit Writeback Support + * + * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com> + */ + +#ifndef __RCAR_DU_WRITEBACK_H__ +#define __RCAR_DU_WRITEBACK_H__ + +#include <drm/drm_plane.h> + +struct rcar_du_crtc; +struct rcar_du_device; +struct vsp1_du_atomic_pipe_config; + +#ifdef CONFIG_DRM_RCAR_WRITEBACK +int rcar_du_writeback_init(struct rcar_du_device *rcdu, + struct rcar_du_crtc *rcrtc); +void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc, + struct vsp1_du_writeback_config *cfg); +void rcar_du_writeback_complete(struct rcar_du_crtc *rcrtc); +#else +static inline int rcar_du_writeback_init(struct rcar_du_device *rcdu, + struct rcar_du_crtc *rcrtc) +{ + return -ENXIO; +} +static inline void +rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc, + struct vsp1_du_writeback_config *cfg) +{ +} +static inline void rcar_du_writeback_complete(struct rcar_du_crtc *rcrtc) +{ +} +#endif + +#endif /* __RCAR_DU_WRITEBACK_H__ */ diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c index fbed2c90fd51..286a0eeefcb6 100644 --- a/drivers/gpu/drm/selftests/test-drm_mm.c +++ b/drivers/gpu/drm/selftests/test-drm_mm.c @@ -1615,7 +1615,7 @@ static int igt_topdown(void *ignored) DRM_RND_STATE(prng, random_seed); const unsigned int count = 8192; unsigned int size; - unsigned long *bitmap = NULL; + unsigned long *bitmap; struct drm_mm mm; struct drm_mm_node *nodes, *node, *next; unsigned int *order, n, m, o = 0; @@ -1631,8 +1631,7 @@ static int igt_topdown(void *ignored) if (!nodes) goto err; - bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long), - GFP_KERNEL); + bitmap = bitmap_zalloc(count, GFP_KERNEL); if (!bitmap) goto err_nodes; @@ -1717,7 +1716,7 @@ out: drm_mm_takedown(&mm); kfree(order); err_bitmap: - kfree(bitmap); + bitmap_free(bitmap); err_nodes: vfree(nodes); err: @@ -1745,8 +1744,7 @@ static int igt_bottomup(void *ignored) if (!nodes) goto err; - bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long), - GFP_KERNEL); + bitmap = bitmap_zalloc(count, GFP_KERNEL); if (!bitmap) goto err_nodes; @@ -1818,7 +1816,7 @@ out: drm_mm_takedown(&mm); kfree(order); err_bitmap: - kfree(bitmap); + bitmap_free(bitmap); err_nodes: vfree(nodes); err: diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig index 35367ada3bc1..d15b10de1da6 100644 --- a/drivers/gpu/drm/stm/Kconfig +++ b/drivers/gpu/drm/stm/Kconfig @@ -6,7 +6,7 @@ config DRM_STM select DRM_KMS_CMA_HELPER select DRM_PANEL_BRIDGE select VIDEOMODE_HELPERS - select FB_PROVIDE_GET_FB_UNMAPPED_AREA + select FB_PROVIDE_GET_FB_UNMAPPED_AREA if FB help Enable support for the on-chip display controller on diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index 4c0d51f73237..ee59da4a0172 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -720,33 +720,22 @@ static int sun4i_backend_free_sat(struct device *dev) { */ static int sun4i_backend_of_get_id(struct device_node *node) { - struct device_node *port, *ep; - int ret = -EINVAL; + struct device_node *ep, *remote; + struct of_endpoint of_ep; - /* input is port 0 */ - port = of_graph_get_port_by_id(node, 0); - if (!port) + /* Input port is 0, and we want the first endpoint. */ + ep = of_graph_get_endpoint_by_regs(node, 0, -1); + if (!ep) return -EINVAL; - /* try finding an upstream endpoint */ - for_each_available_child_of_node(port, ep) { - struct device_node *remote; - u32 reg; - - remote = of_graph_get_remote_endpoint(ep); - if (!remote) - continue; - - ret = of_property_read_u32(remote, "reg", ®); - if (ret) - continue; - - ret = reg; - } - - of_node_put(port); + remote = of_graph_get_remote_endpoint(ep); + of_node_put(ep); + if (!remote) + return -EINVAL; - return ret; + of_graph_parse_endpoint(remote, &of_ep); + of_node_put(remote); + return of_ep.id; } /* TODO: This needs to take multiple pipelines into account */ diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c index 147b97ed1a09..3a3ba99fed22 100644 --- a/drivers/gpu/drm/sun4i/sun4i_lvds.c +++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c @@ -20,7 +20,7 @@ struct sun4i_lvds { struct drm_connector connector; struct drm_encoder encoder; - struct sun4i_tcon *tcon; + struct drm_panel *panel; }; static inline struct sun4i_lvds * @@ -41,9 +41,8 @@ static int sun4i_lvds_get_modes(struct drm_connector *connector) { struct sun4i_lvds *lvds = drm_connector_to_sun4i_lvds(connector); - struct sun4i_tcon *tcon = lvds->tcon; - return drm_panel_get_modes(tcon->panel); + return drm_panel_get_modes(lvds->panel); } static struct drm_connector_helper_funcs sun4i_lvds_con_helper_funcs = { @@ -54,9 +53,8 @@ static void sun4i_lvds_connector_destroy(struct drm_connector *connector) { struct sun4i_lvds *lvds = drm_connector_to_sun4i_lvds(connector); - struct sun4i_tcon *tcon = lvds->tcon; - drm_panel_detach(tcon->panel); + drm_panel_detach(lvds->panel); drm_connector_cleanup(connector); } @@ -71,26 +69,24 @@ static const struct drm_connector_funcs sun4i_lvds_con_funcs = { static void sun4i_lvds_encoder_enable(struct drm_encoder *encoder) { struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder); - struct sun4i_tcon *tcon = lvds->tcon; DRM_DEBUG_DRIVER("Enabling LVDS output\n"); - if (tcon->panel) { - drm_panel_prepare(tcon->panel); - drm_panel_enable(tcon->panel); + if (lvds->panel) { + drm_panel_prepare(lvds->panel); + drm_panel_enable(lvds->panel); } } static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder) { struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder); - struct sun4i_tcon *tcon = lvds->tcon; DRM_DEBUG_DRIVER("Disabling LVDS output\n"); - if (tcon->panel) { - drm_panel_disable(tcon->panel); - drm_panel_unprepare(tcon->panel); + if (lvds->panel) { + drm_panel_disable(lvds->panel); + drm_panel_unprepare(lvds->panel); } } @@ -113,11 +109,10 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon) lvds = devm_kzalloc(drm->dev, sizeof(*lvds), GFP_KERNEL); if (!lvds) return -ENOMEM; - lvds->tcon = tcon; encoder = &lvds->encoder; ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0, - &tcon->panel, &bridge); + &lvds->panel, &bridge); if (ret) { dev_info(drm->dev, "No panel or bridge found... LVDS output disabled\n"); return 0; @@ -138,7 +133,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon) /* The LVDS encoder can only work with the TCON channel 0 */ lvds->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc); - if (tcon->panel) { + if (lvds->panel) { drm_connector_helper_add(&lvds->connector, &sun4i_lvds_con_helper_funcs); ret = drm_connector_init(drm, &lvds->connector, @@ -152,7 +147,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon) drm_connector_attach_encoder(&lvds->connector, &lvds->encoder); - ret = drm_panel_attach(tcon->panel, &lvds->connector); + ret = drm_panel_attach(lvds->panel, &lvds->connector); if (ret) { dev_err(drm->dev, "Couldn't attach our panel\n"); goto err_cleanup_connector; diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index cae19e7bbeaa..d9e2502b49fa 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -27,6 +27,8 @@ struct sun4i_rgb { struct drm_encoder encoder; struct sun4i_tcon *tcon; + struct drm_panel *panel; + struct drm_bridge *bridge; }; static inline struct sun4i_rgb * @@ -47,11 +49,18 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector) { struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector); - struct sun4i_tcon *tcon = rgb->tcon; - return drm_panel_get_modes(tcon->panel); + return drm_panel_get_modes(rgb->panel); } +/* + * VESA DMT defines a tolerance of 0.5% on the pixel clock, while the + * CVT spec reuses that tolerance in its examples, so it looks to be a + * good default tolerance for the EDID-based modes. Define it to 5 per + * mille to avoid floating point operations. + */ +#define SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE 5 + static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc, const struct drm_display_mode *mode) { @@ -59,8 +68,9 @@ static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc, struct sun4i_tcon *tcon = rgb->tcon; u32 hsync = mode->hsync_end - mode->hsync_start; u32 vsync = mode->vsync_end - mode->vsync_start; - unsigned long rate = mode->clock * 1000; - long rounded_rate; + unsigned long long rate = mode->clock * 1000; + unsigned long long lowest, highest; + unsigned long long rounded_rate; DRM_DEBUG_DRIVER("Validating modes...\n"); @@ -92,15 +102,39 @@ static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc, DRM_DEBUG_DRIVER("Vertical parameters OK\n"); + /* + * TODO: We should use the struct display_timing if available + * and / or trying to stretch the timings within that + * tolerancy to take care of panels that we wouldn't be able + * to have a exact match for. + */ + if (rgb->panel) { + DRM_DEBUG_DRIVER("RGB panel used, skipping clock rate checks"); + goto out; + } + + /* + * That shouldn't ever happen unless something is really wrong, but it + * doesn't harm to check. + */ + if (!rgb->bridge) + goto out; + tcon->dclk_min_div = 6; tcon->dclk_max_div = 127; rounded_rate = clk_round_rate(tcon->dclk, rate); - if (rounded_rate < rate) + + lowest = rate * (1000 - SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE); + do_div(lowest, 1000); + if (rounded_rate < lowest) return MODE_CLOCK_LOW; - if (rounded_rate > rate) + highest = rate * (1000 + SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE); + do_div(highest, 1000); + if (rounded_rate > highest) return MODE_CLOCK_HIGH; +out: DRM_DEBUG_DRIVER("Clock rate OK\n"); return MODE_OK; @@ -114,9 +148,8 @@ static void sun4i_rgb_connector_destroy(struct drm_connector *connector) { struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector); - struct sun4i_tcon *tcon = rgb->tcon; - drm_panel_detach(tcon->panel); + drm_panel_detach(rgb->panel); drm_connector_cleanup(connector); } @@ -131,26 +164,24 @@ static const struct drm_connector_funcs sun4i_rgb_con_funcs = { static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder) { struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder); - struct sun4i_tcon *tcon = rgb->tcon; DRM_DEBUG_DRIVER("Enabling RGB output\n"); - if (tcon->panel) { - drm_panel_prepare(tcon->panel); - drm_panel_enable(tcon->panel); + if (rgb->panel) { + drm_panel_prepare(rgb->panel); + drm_panel_enable(rgb->panel); } } static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) { struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder); - struct sun4i_tcon *tcon = rgb->tcon; DRM_DEBUG_DRIVER("Disabling RGB output\n"); - if (tcon->panel) { - drm_panel_disable(tcon->panel); - drm_panel_unprepare(tcon->panel); + if (rgb->panel) { + drm_panel_disable(rgb->panel); + drm_panel_unprepare(rgb->panel); } } @@ -172,7 +203,6 @@ static struct drm_encoder_funcs sun4i_rgb_enc_funcs = { int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon) { struct drm_encoder *encoder; - struct drm_bridge *bridge; struct sun4i_rgb *rgb; int ret; @@ -183,7 +213,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon) encoder = &rgb->encoder; ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0, - &tcon->panel, &bridge); + &rgb->panel, &rgb->bridge); if (ret) { dev_info(drm->dev, "No panel or bridge found... RGB output disabled\n"); return 0; @@ -204,7 +234,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon) /* The RGB encoder can only work with the TCON channel 0 */ rgb->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc); - if (tcon->panel) { + if (rgb->panel) { drm_connector_helper_add(&rgb->connector, &sun4i_rgb_con_helper_funcs); ret = drm_connector_init(drm, &rgb->connector, @@ -218,15 +248,15 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon) drm_connector_attach_encoder(&rgb->connector, &rgb->encoder); - ret = drm_panel_attach(tcon->panel, &rgb->connector); + ret = drm_panel_attach(rgb->panel, &rgb->connector); if (ret) { dev_err(drm->dev, "Couldn't attach our panel\n"); goto err_cleanup_connector; } } - if (bridge) { - ret = drm_bridge_attach(encoder, bridge, NULL); + if (rgb->bridge) { + ret = drm_bridge_attach(encoder, rgb->bridge, NULL); if (ret) { dev_err(drm->dev, "Couldn't attach our bridge\n"); goto err_cleanup_connector; diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 7136fc91c603..fa92e992a282 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -341,8 +341,8 @@ static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon, u32 block_space, start_delay; u32 tcon_div; - tcon->dclk_min_div = 4; - tcon->dclk_max_div = 127; + tcon->dclk_min_div = SUN6I_DSI_TCON_DIV; + tcon->dclk_max_div = SUN6I_DSI_TCON_DIV; sun4i_tcon0_mode_set_common(tcon, mode); @@ -561,10 +561,10 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, * Following code is a way to avoid quirks all around TCON * and DOTCLOCK drivers. */ - if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) + if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE) clk_set_phase(tcon->dclk, 240); - if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) + if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) clk_set_phase(tcon->dclk, 0); regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG, diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h index b5214d71610f..84cfb1952ff7 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.h +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h @@ -257,8 +257,6 @@ struct sun4i_tcon { struct reset_control *lcd_rst; struct reset_control *lvds_rst; - struct drm_panel *panel; - /* Platform adjustments */ const struct sun4i_tcon_quirks *quirks; diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index 318994cd1b85..6ff585055a07 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -24,7 +24,9 @@ #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> +#include "sun4i_crtc.h" #include "sun4i_drv.h" +#include "sun4i_tcon.h" #include "sun6i_mipi_dsi.h" #include <video/mipi_display.h> @@ -33,6 +35,8 @@ #define SUN6I_DSI_CTL_EN BIT(0) #define SUN6I_DSI_BASIC_CTL_REG 0x00c +#define SUN6I_DSI_BASIC_CTL_TRAIL_INV(n) (((n) & 0xf) << 4) +#define SUN6I_DSI_BASIC_CTL_TRAIL_FILL BIT(3) #define SUN6I_DSI_BASIC_CTL_HBP_DIS BIT(2) #define SUN6I_DSI_BASIC_CTL_HSA_HSE_DIS BIT(1) #define SUN6I_DSI_BASIC_CTL_VIDEO_BURST BIT(0) @@ -153,6 +157,8 @@ #define SUN6I_DSI_CMD_TX_REG(n) (0x300 + (n) * 0x04) +#define SUN6I_DSI_SYNC_POINT 40 + enum sun6i_dsi_start_inst { DSI_START_LPRX, DSI_START_LPTX, @@ -358,7 +364,54 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi, static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi, struct drm_display_mode *mode) { - return mode->vtotal - (mode->vsync_end - mode->vdisplay) + 1; + u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100); + u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start; + + if (delay > mode->vtotal) + delay = delay % mode->vtotal; + + return max_t(u16, delay, 1); +} + +static u16 sun6i_dsi_get_line_num(struct sun6i_dsi *dsi, + struct drm_display_mode *mode) +{ + struct mipi_dsi_device *device = dsi->device; + unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8; + + return mode->htotal * Bpp / device->lanes; +} + +static u16 sun6i_dsi_get_drq_edge0(struct sun6i_dsi *dsi, + struct drm_display_mode *mode, + u16 line_num, u16 edge1) +{ + u16 edge0 = edge1; + + edge0 += (mode->hdisplay + 40) * SUN6I_DSI_TCON_DIV / 8; + + if (edge0 > line_num) + return edge0 - line_num; + + return 1; +} + +static u16 sun6i_dsi_get_drq_edge1(struct sun6i_dsi *dsi, + struct drm_display_mode *mode, + u16 line_num) +{ + struct mipi_dsi_device *device = dsi->device; + unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8; + unsigned int hbp = mode->htotal - mode->hsync_end; + u16 edge1; + + edge1 = SUN6I_DSI_SYNC_POINT; + edge1 += (mode->hdisplay + hbp + 20) * Bpp / device->lanes; + + if (edge1 > line_num) + return line_num; + + return edge1; } static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi, @@ -367,7 +420,23 @@ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi, struct mipi_dsi_device *device = dsi->device; u32 val = 0; - if ((mode->hsync_end - mode->hdisplay) > 20) { + if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) { + u16 line_num = sun6i_dsi_get_line_num(dsi, mode); + u16 edge0, edge1; + + edge1 = sun6i_dsi_get_drq_edge1(dsi, mode, line_num); + edge0 = sun6i_dsi_get_drq_edge0(dsi, mode, line_num, edge1); + + regmap_write(dsi->regs, SUN6I_DSI_BURST_DRQ_REG, + SUN6I_DSI_BURST_DRQ_EDGE0(edge0) | + SUN6I_DSI_BURST_DRQ_EDGE1(edge1)); + + regmap_write(dsi->regs, SUN6I_DSI_BURST_LINE_REG, + SUN6I_DSI_BURST_LINE_NUM(line_num) | + SUN6I_DSI_BURST_LINE_SYNC_POINT(SUN6I_DSI_SYNC_POINT)); + + val = SUN6I_DSI_TCON_DRQ_ENABLE_MODE; + } else if ((mode->hsync_end - mode->hdisplay) > 20) { /* Maaaaaagic */ u16 drq = (mode->hsync_end - mode->hdisplay) - 20; @@ -384,8 +453,19 @@ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi, static void sun6i_dsi_setup_inst_loop(struct sun6i_dsi *dsi, struct drm_display_mode *mode) { + struct mipi_dsi_device *device = dsi->device; u16 delay = 50 - 1; + if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) { + delay = (mode->htotal - mode->hdisplay) * 150; + delay /= (mode->clock / 1000) * 8; + delay -= 50; + } + + regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_SEL_REG, + 2 << (4 * DSI_INST_ID_LP11) | + 3 << (4 * DSI_INST_ID_DLY)); + regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_NUM_REG(0), SUN6I_DSI_INST_LOOP_NUM_N0(50 - 1) | SUN6I_DSI_INST_LOOP_NUM_N1(delay)); @@ -451,49 +531,68 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi, { struct mipi_dsi_device *device = dsi->device; unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8; - u16 hbp, hfp, hsa, hblk, vblk; + u16 hbp = 0, hfp = 0, hsa = 0, hblk = 0, vblk = 0; + u32 basic_ctl = 0; size_t bytes; u8 *buffer; /* Do all timing calculations up front to allocate buffer space */ - /* - * A sync period is composed of a blanking packet (4 bytes + - * payload + 2 bytes) and a sync event packet (4 bytes). Its - * minimal size is therefore 10 bytes - */ -#define HSA_PACKET_OVERHEAD 10 - hsa = max((unsigned int)HSA_PACKET_OVERHEAD, - (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD); + if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) { + hblk = mode->hdisplay * Bpp; + basic_ctl = SUN6I_DSI_BASIC_CTL_VIDEO_BURST | + SUN6I_DSI_BASIC_CTL_HSA_HSE_DIS | + SUN6I_DSI_BASIC_CTL_HBP_DIS; - /* - * The backporch is set using a blanking packet (4 bytes + - * payload + 2 bytes). Its minimal size is therefore 6 bytes - */ + if (device->lanes == 4) + basic_ctl |= SUN6I_DSI_BASIC_CTL_TRAIL_FILL | + SUN6I_DSI_BASIC_CTL_TRAIL_INV(0xc); + } else { + /* + * A sync period is composed of a blanking packet (4 + * bytes + payload + 2 bytes) and a sync event packet + * (4 bytes). Its minimal size is therefore 10 bytes + */ +#define HSA_PACKET_OVERHEAD 10 + hsa = max((unsigned int)HSA_PACKET_OVERHEAD, + (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD); + + /* + * The backporch is set using a blanking packet (4 + * bytes + payload + 2 bytes). Its minimal size is + * therefore 6 bytes + */ #define HBP_PACKET_OVERHEAD 6 - hbp = max((unsigned int)HBP_PACKET_OVERHEAD, - (mode->hsync_start - mode->hdisplay) * Bpp - HBP_PACKET_OVERHEAD); - - /* - * The frontporch is set using a blanking packet (4 bytes + - * payload + 2 bytes). Its minimal size is therefore 6 bytes - */ + hbp = max((unsigned int)HBP_PACKET_OVERHEAD, + (mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD); + + /* + * The frontporch is set using a blanking packet (4 + * bytes + payload + 2 bytes). Its minimal size is + * therefore 6 bytes + */ #define HFP_PACKET_OVERHEAD 6 - hfp = max((unsigned int)HFP_PACKET_OVERHEAD, - (mode->htotal - mode->hsync_end) * Bpp - HFP_PACKET_OVERHEAD); - - /* - * hblk seems to be the line + porches length. - */ - hblk = mode->htotal * Bpp - hsa; - - /* - * And I'm not entirely sure what vblk is about. The driver in - * Allwinner BSP is using a rather convoluted calculation - * there only for 4 lanes. However, using 0 (the !4 lanes - * case) even with a 4 lanes screen seems to work... - */ - vblk = 0; + hfp = max((unsigned int)HFP_PACKET_OVERHEAD, + (mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD); + + /* + * The blanking is set using a sync event (4 bytes) + * and a blanking packet (4 bytes + payload + 2 + * bytes). Its minimal size is therefore 10 bytes. + */ +#define HBLK_PACKET_OVERHEAD 10 + hblk = max((unsigned int)HBLK_PACKET_OVERHEAD, + (mode->htotal - (mode->hsync_end - mode->hsync_start)) * Bpp - + HBLK_PACKET_OVERHEAD); + + /* + * And I'm not entirely sure what vblk is about. The driver in + * Allwinner BSP is using a rather convoluted calculation + * there only for 4 lanes. However, using 0 (the !4 lanes + * case) even with a 4 lanes screen seems to work... + */ + vblk = 0; + } /* How many bytes do we need to send all payloads? */ bytes = max_t(size_t, max(max(hfp, hblk), max(hsa, hbp)), vblk); @@ -501,7 +600,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi, if (WARN_ON(!buffer)) return; - regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, 0); + regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, basic_ctl); regmap_write(dsi->regs, SUN6I_DSI_SYNC_HSS_REG, sun6i_dsi_build_sync_pkt(MIPI_DSI_H_SYNC_START, @@ -526,8 +625,8 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi, regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE0_REG, SUN6I_DSI_BASIC_SIZE0_VSA(mode->vsync_end - mode->vsync_start) | - SUN6I_DSI_BASIC_SIZE0_VBP(mode->vsync_start - - mode->vdisplay)); + SUN6I_DSI_BASIC_SIZE0_VBP(mode->vtotal - + mode->vsync_end)); regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE1_REG, SUN6I_DSI_BASIC_SIZE1_VACT(mode->vdisplay) | diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h index a07090579f84..5c3ad5be0690 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h @@ -13,6 +13,8 @@ #include <drm/drm_encoder.h> #include <drm/drm_mipi_dsi.h> +#define SUN6I_DSI_TCON_DIV 4 + struct sun6i_dsi { struct drm_connector connector; struct drm_encoder encoder; diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index 30a2eff55687..fd20a928cf4d 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c @@ -325,38 +325,22 @@ static struct regmap_config sun8i_mixer_regmap_config = { static int sun8i_mixer_of_get_id(struct device_node *node) { - struct device_node *port, *ep; - int ret = -EINVAL; + struct device_node *ep, *remote; + struct of_endpoint of_ep; - /* output is port 1 */ - port = of_graph_get_port_by_id(node, 1); - if (!port) + /* Output port is 1, and we want the first endpoint. */ + ep = of_graph_get_endpoint_by_regs(node, 1, -1); + if (!ep) return -EINVAL; - /* try to find downstream endpoint */ - for_each_available_child_of_node(port, ep) { - struct device_node *remote; - u32 reg; - - remote = of_graph_get_remote_endpoint(ep); - if (!remote) - continue; - - ret = of_property_read_u32(remote, "reg", ®); - if (!ret) { - of_node_put(remote); - of_node_put(ep); - of_node_put(port); - - return reg; - } - - of_node_put(remote); - } - - of_node_put(port); + remote = of_graph_get_remote_endpoint(ep); + of_node_put(ep); + if (!remote) + return -EINVAL; - return ret; + of_graph_parse_endpoint(remote, &of_ep); + of_node_put(remote); + return of_ep.id; } static int sun8i_mixer_bind(struct device *dev, struct device *master, @@ -554,6 +538,7 @@ static int sun8i_mixer_remove(struct platform_device *pdev) static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = { .ccsc = 0, .scaler_mask = 0xf, + .scanline_yuv = 2048, .ui_num = 3, .vi_num = 1, }; @@ -561,6 +546,7 @@ static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = { static const struct sun8i_mixer_cfg sun8i_a83t_mixer1_cfg = { .ccsc = 1, .scaler_mask = 0x3, + .scanline_yuv = 2048, .ui_num = 1, .vi_num = 1, }; @@ -569,6 +555,7 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = { .ccsc = 0, .mod_rate = 432000000, .scaler_mask = 0xf, + .scanline_yuv = 2048, .ui_num = 3, .vi_num = 1, }; @@ -577,6 +564,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = { .ccsc = 0, .mod_rate = 297000000, .scaler_mask = 0xf, + .scanline_yuv = 2048, .ui_num = 3, .vi_num = 1, }; @@ -585,6 +573,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = { .ccsc = 1, .mod_rate = 297000000, .scaler_mask = 0x3, + .scanline_yuv = 2048, .ui_num = 1, .vi_num = 1, }; @@ -593,6 +582,7 @@ static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = { .vi_num = 2, .ui_num = 1, .scaler_mask = 0x3, + .scanline_yuv = 2048, .ccsc = 0, .mod_rate = 150000000, }; @@ -601,6 +591,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer0_cfg = { .ccsc = 0, .mod_rate = 297000000, .scaler_mask = 0xf, + .scanline_yuv = 4096, .ui_num = 3, .vi_num = 1, }; @@ -609,6 +600,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = { .ccsc = 1, .mod_rate = 297000000, .scaler_mask = 0x3, + .scanline_yuv = 2048, .ui_num = 1, .vi_num = 1, }; @@ -618,6 +610,7 @@ static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = { .is_de3 = true, .mod_rate = 600000000, .scaler_mask = 0xf, + .scanline_yuv = 4096, .ui_num = 3, .vi_num = 1, }; diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h index 913d14ce68b0..80e084caa084 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.h +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h @@ -159,6 +159,7 @@ struct de2_fmt_info { * @mod_rate: module clock rate that needs to be set in order to have * a functional block. * @is_de3: true, if this is next gen display engine 3.0, false otherwise. + * @scaline_yuv: size of a scanline for VI scaler for YUV formats. */ struct sun8i_mixer_cfg { int vi_num; @@ -167,6 +168,7 @@ struct sun8i_mixer_cfg { int ccsc; unsigned long mod_rate; unsigned int is_de3 : 1; + unsigned int scanline_yuv; }; struct sun8i_mixer { diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c index 8a0616238467..bb8e026d6405 100644 --- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c +++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c @@ -80,6 +80,8 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel, u32 bld_base, ch_base; u32 outsize, insize; u32 hphase, vphase; + u32 hn = 0, hm = 0; + u32 vn = 0, vm = 0; bool subsampled; DRM_DEBUG_DRIVER("Updating VI channel %d overlay %d\n", @@ -137,12 +139,41 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel, subsampled = format->hsub > 1 || format->vsub > 1; if (insize != outsize || subsampled || hphase || vphase) { - u32 hscale, vscale; + unsigned int scanline, required; + struct drm_display_mode *mode; + u32 hscale, vscale, fps; + u64 ability; DRM_DEBUG_DRIVER("HW scaling is enabled\n"); - hscale = state->src_w / state->crtc_w; - vscale = state->src_h / state->crtc_h; + mode = &plane->state->crtc->state->mode; + fps = (mode->clock * 1000) / (mode->vtotal * mode->htotal); + ability = clk_get_rate(mixer->mod_clk); + /* BSP algorithm assumes 80% efficiency of VI scaler unit */ + ability *= 80; + do_div(ability, mode->vdisplay * fps * max(src_w, dst_w)); + + required = src_h * 100 / dst_h; + + if (ability < required) { + DRM_DEBUG_DRIVER("Using vertical coarse scaling\n"); + vm = src_h; + vn = (u32)ability * dst_h / 100; + src_h = vn; + } + + /* it seems that every RGB scaler has buffer for 2048 pixels */ + scanline = subsampled ? mixer->cfg->scanline_yuv : 2048; + + if (src_w > scanline) { + DRM_DEBUG_DRIVER("Using horizontal coarse scaling\n"); + hm = src_w; + hn = scanline; + src_w = hn; + } + + hscale = (src_w << 16) / dst_w; + vscale = (src_h << 16) / dst_h; sun8i_vi_scaler_setup(mixer, channel, src_w, src_h, dst_w, dst_h, hscale, vscale, hphase, vphase, @@ -153,6 +184,23 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel, sun8i_vi_scaler_enable(mixer, channel, false); } + regmap_write(mixer->engine.regs, + SUN8I_MIXER_CHAN_VI_HDS_Y(ch_base), + SUN8I_MIXER_CHAN_VI_DS_N(hn) | + SUN8I_MIXER_CHAN_VI_DS_M(hm)); + regmap_write(mixer->engine.regs, + SUN8I_MIXER_CHAN_VI_HDS_UV(ch_base), + SUN8I_MIXER_CHAN_VI_DS_N(hn) | + SUN8I_MIXER_CHAN_VI_DS_M(hm)); + regmap_write(mixer->engine.regs, + SUN8I_MIXER_CHAN_VI_VDS_Y(ch_base), + SUN8I_MIXER_CHAN_VI_DS_N(vn) | + SUN8I_MIXER_CHAN_VI_DS_M(vm)); + regmap_write(mixer->engine.regs, + SUN8I_MIXER_CHAN_VI_VDS_UV(ch_base), + SUN8I_MIXER_CHAN_VI_DS_N(vn) | + SUN8I_MIXER_CHAN_VI_DS_M(vm)); + /* Set base coordinates */ DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n", state->dst.x1, state->dst.y1); diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h index 8a5e6d01c85d..a223a4839f45 100644 --- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h +++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h @@ -24,6 +24,14 @@ ((base) + 0x30 * (layer) + 0x18 + 4 * (plane)) #define SUN8I_MIXER_CHAN_VI_OVL_SIZE(base) \ ((base) + 0xe8) +#define SUN8I_MIXER_CHAN_VI_HDS_Y(base) \ + ((base) + 0xf0) +#define SUN8I_MIXER_CHAN_VI_HDS_UV(base) \ + ((base) + 0xf4) +#define SUN8I_MIXER_CHAN_VI_VDS_Y(base) \ + ((base) + 0xf8) +#define SUN8I_MIXER_CHAN_VI_VDS_UV(base) \ + ((base) + 0xfc) #define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN BIT(0) /* RGB mode should be set for RGB formats and cleared for YCbCr */ @@ -33,6 +41,9 @@ #define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK GENMASK(31, 24) #define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA(x) ((x) << 24) +#define SUN8I_MIXER_CHAN_VI_DS_N(x) ((x) << 16) +#define SUN8I_MIXER_CHAN_VI_DS_M(x) ((x) << 0) + struct sun8i_mixer; struct sun8i_vi_layer { diff --git a/drivers/gpu/drm/tinydrm/core/Makefile b/drivers/gpu/drm/tinydrm/core/Makefile index fb221e6f8885..6f8f764560e0 100644 --- a/drivers/gpu/drm/tinydrm/core/Makefile +++ b/drivers/gpu/drm/tinydrm/core/Makefile @@ -1,3 +1,3 @@ -tinydrm-y := tinydrm-core.o tinydrm-pipe.o tinydrm-helpers.o +tinydrm-y := tinydrm-pipe.o tinydrm-helpers.o obj-$(CONFIG_DRM_TINYDRM) += tinydrm.o diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c deleted file mode 100644 index 554abd5d3b53..000000000000 --- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Copyright (C) 2016 Noralf Trønnes - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <drm/drm_atomic.h> -#include <drm/drm_atomic_helper.h> -#include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> -#include <drm/drm_probe_helper.h> -#include <drm/drm_print.h> -#include <drm/tinydrm/tinydrm.h> -#include <linux/device.h> -#include <linux/dma-buf.h> -#include <linux/module.h> - -/** - * DOC: overview - * - * This library provides driver helpers for very simple display hardware. - * - * It is based on &drm_simple_display_pipe coupled with a &drm_connector which - * has only one fixed &drm_display_mode. The framebuffers are backed by the - * cma helper and have support for framebuffer flushing (dirty). - * fbdev support is also included. - * - */ - -/** - * DOC: core - * - * The driver allocates &tinydrm_device, initializes it using - * devm_tinydrm_init(), sets up the pipeline using tinydrm_display_pipe_init() - * and registers the DRM device using devm_tinydrm_register(). - */ - -static const struct drm_mode_config_funcs tinydrm_mode_config_funcs = { - .fb_create = drm_gem_fb_create_with_dirty, - .atomic_check = drm_atomic_helper_check, - .atomic_commit = drm_atomic_helper_commit, -}; - -static int tinydrm_init(struct device *parent, struct tinydrm_device *tdev, - struct drm_driver *driver) -{ - struct drm_device *drm; - - /* - * We don't embed drm_device, because that prevent us from using - * devm_kzalloc() to allocate tinydrm_device in the driver since - * drm_dev_put() frees the structure. The devm_ functions provide - * for easy error handling. - */ - drm = drm_dev_alloc(driver, parent); - if (IS_ERR(drm)) - return PTR_ERR(drm); - - tdev->drm = drm; - drm->dev_private = tdev; - drm_mode_config_init(drm); - drm->mode_config.funcs = &tinydrm_mode_config_funcs; - drm->mode_config.allow_fb_modifiers = true; - - return 0; -} - -static void tinydrm_fini(struct tinydrm_device *tdev) -{ - drm_mode_config_cleanup(tdev->drm); - tdev->drm->dev_private = NULL; - drm_dev_put(tdev->drm); -} - -static void devm_tinydrm_release(void *data) -{ - tinydrm_fini(data); -} - -/** - * devm_tinydrm_init - Initialize tinydrm device - * @parent: Parent device object - * @tdev: tinydrm device - * @driver: DRM driver - * - * This function initializes @tdev, the underlying DRM device and it's - * mode_config. Resources will be automatically freed on driver detach (devres) - * using drm_mode_config_cleanup() and drm_dev_put(). - * - * Returns: - * Zero on success, negative error code on failure. - */ -int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev, - struct drm_driver *driver) -{ - int ret; - - ret = tinydrm_init(parent, tdev, driver); - if (ret) - return ret; - - ret = devm_add_action(parent, devm_tinydrm_release, tdev); - if (ret) - tinydrm_fini(tdev); - - return ret; -} -EXPORT_SYMBOL(devm_tinydrm_init); - -static int tinydrm_register(struct tinydrm_device *tdev) -{ - struct drm_device *drm = tdev->drm; - int ret; - - ret = drm_dev_register(tdev->drm, 0); - if (ret) - return ret; - - ret = drm_fbdev_generic_setup(drm, 0); - if (ret) - DRM_ERROR("Failed to initialize fbdev: %d\n", ret); - - return 0; -} - -static void tinydrm_unregister(struct tinydrm_device *tdev) -{ - drm_atomic_helper_shutdown(tdev->drm); - drm_dev_unregister(tdev->drm); -} - -static void devm_tinydrm_register_release(void *data) -{ - tinydrm_unregister(data); -} - -/** - * devm_tinydrm_register - Register tinydrm device - * @tdev: tinydrm device - * - * This function registers the underlying DRM device and fbdev. - * These resources will be automatically unregistered on driver detach (devres) - * and the display pipeline will be disabled. - * - * Returns: - * Zero on success, negative error code on failure. - */ -int devm_tinydrm_register(struct tinydrm_device *tdev) -{ - struct device *dev = tdev->drm->dev; - int ret; - - ret = tinydrm_register(tdev); - if (ret) - return ret; - - ret = devm_add_action(dev, devm_tinydrm_register_release, tdev); - if (ret) - tinydrm_unregister(tdev); - - return ret; -} -EXPORT_SYMBOL(devm_tinydrm_register); - -/** - * tinydrm_shutdown - Shutdown tinydrm - * @tdev: tinydrm device - * - * This function makes sure that the display pipeline is disabled. - * Used by drivers in their shutdown callback to turn off the display - * on machine shutdown and reboot. - */ -void tinydrm_shutdown(struct tinydrm_device *tdev) -{ - drm_atomic_helper_shutdown(tdev->drm); -} -EXPORT_SYMBOL(tinydrm_shutdown); - -MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c index 2737b6fdadc8..d7b38dfb6438 100644 --- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c +++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c @@ -365,3 +365,5 @@ int tinydrm_spi_transfer(struct spi_device *spi, u32 speed_hz, EXPORT_SYMBOL(tinydrm_spi_transfer); #endif /* CONFIG_SPI */ + +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c index bb5b1c1e21ba..bb8a7ed8ddf6 100644 --- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c +++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c @@ -13,7 +13,7 @@ #include <drm/drm_modes.h> #include <drm/drm_probe_helper.h> #include <drm/drm_print.h> -#include <drm/tinydrm/tinydrm.h> +#include <drm/drm_simple_kms_helper.h> struct tinydrm_connector { struct drm_connector base; @@ -129,7 +129,8 @@ static int tinydrm_rotate_mode(struct drm_display_mode *mode, /** * tinydrm_display_pipe_init - Initialize display pipe - * @tdev: tinydrm device + * @drm: DRM device + * @pipe: Display pipe * @funcs: Display pipe functions * @connector_type: Connector type * @formats: Array of supported formats (DRM_FORMAT\_\*) @@ -143,16 +144,15 @@ static int tinydrm_rotate_mode(struct drm_display_mode *mode, * Returns: * Zero on success, negative error code on failure. */ -int -tinydrm_display_pipe_init(struct tinydrm_device *tdev, - const struct drm_simple_display_pipe_funcs *funcs, - int connector_type, - const uint32_t *formats, - unsigned int format_count, - const struct drm_display_mode *mode, - unsigned int rotation) +int tinydrm_display_pipe_init(struct drm_device *drm, + struct drm_simple_display_pipe *pipe, + const struct drm_simple_display_pipe_funcs *funcs, + int connector_type, + const uint32_t *formats, + unsigned int format_count, + const struct drm_display_mode *mode, + unsigned int rotation) { - struct drm_device *drm = tdev->drm; struct drm_display_mode mode_copy; struct drm_connector *connector; int ret; @@ -177,7 +177,7 @@ tinydrm_display_pipe_init(struct tinydrm_device *tdev, if (IS_ERR(connector)) return PTR_ERR(connector); - return drm_simple_display_pipe_init(drm, &tdev->pipe, funcs, formats, + return drm_simple_display_pipe_init(drm, pipe, funcs, formats, format_count, modifiers, connector); } EXPORT_SYMBOL(tinydrm_display_pipe_init); diff --git a/drivers/gpu/drm/tinydrm/hx8357d.c b/drivers/gpu/drm/tinydrm/hx8357d.c index 8bbd0beafc6a..fab961dded87 100644 --- a/drivers/gpu/drm/tinydrm/hx8357d.c +++ b/drivers/gpu/drm/tinydrm/hx8357d.c @@ -16,7 +16,9 @@ #include <linux/property.h> #include <linux/spi/spi.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_modeset_helper.h> @@ -46,16 +48,18 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev); u8 addr_mode; - int ret; + int ret, idx; + + if (!drm_dev_enter(pipe->crtc.dev, &idx)) + return; DRM_DEBUG_KMS("\n"); ret = mipi_dbi_poweron_conditional_reset(mipi); if (ret < 0) - return; + goto out_exit; if (ret == 1) goto out_enable; @@ -171,6 +175,8 @@ out_enable: } mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); mipi_dbi_enable_flush(mipi, crtc_state, plane_state); +out_exit: + drm_dev_exit(idx); } static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = { @@ -181,7 +187,7 @@ static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = { }; static const struct drm_display_mode yx350hv15_mode = { - TINYDRM_MODE(320, 480, 60, 75), + DRM_SIMPLE_MODE(320, 480, 60, 75), }; DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops); @@ -189,6 +195,7 @@ DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops); static struct drm_driver hx8357d_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, .fops = &hx8357d_fops, + .release = mipi_dbi_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "hx8357d", @@ -213,15 +220,25 @@ MODULE_DEVICE_TABLE(spi, hx8357d_id); static int hx8357d_probe(struct spi_device *spi) { struct device *dev = &spi->dev; + struct drm_device *drm; struct mipi_dbi *mipi; struct gpio_desc *dc; u32 rotation = 0; int ret; - mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); + mipi = kzalloc(sizeof(*mipi), GFP_KERNEL); if (!mipi) return -ENOMEM; + drm = &mipi->drm; + ret = devm_drm_dev_init(dev, drm, &hx8357d_driver); + if (ret) { + kfree(mipi); + return ret; + } + + drm_mode_config_init(drm); + dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW); if (IS_ERR(dc)) { DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n"); @@ -238,21 +255,36 @@ static int hx8357d_probe(struct spi_device *spi) if (ret) return ret; - ret = mipi_dbi_init(&spi->dev, mipi, &hx8357d_pipe_funcs, - &hx8357d_driver, &yx350hv15_mode, rotation); + ret = mipi_dbi_init(mipi, &hx8357d_pipe_funcs, &yx350hv15_mode, rotation); if (ret) return ret; - spi_set_drvdata(spi, mipi); + drm_mode_config_reset(drm); - return devm_tinydrm_register(&mipi->tinydrm); + ret = drm_dev_register(drm, 0); + if (ret) + return ret; + + spi_set_drvdata(spi, drm); + + drm_fbdev_generic_setup(drm, 32); + + return 0; } -static void hx8357d_shutdown(struct spi_device *spi) +static int hx8357d_remove(struct spi_device *spi) { - struct mipi_dbi *mipi = spi_get_drvdata(spi); + struct drm_device *drm = spi_get_drvdata(spi); + + drm_dev_unplug(drm); + drm_atomic_helper_shutdown(drm); - tinydrm_shutdown(&mipi->tinydrm); + return 0; +} + +static void hx8357d_shutdown(struct spi_device *spi) +{ + drm_atomic_helper_shutdown(spi_get_drvdata(spi)); } static struct spi_driver hx8357d_spi_driver = { @@ -262,6 +294,7 @@ static struct spi_driver hx8357d_spi_driver = { }, .id_table = hx8357d_id, .probe = hx8357d_probe, + .remove = hx8357d_remove, .shutdown = hx8357d_shutdown, }; module_spi_driver(hx8357d_spi_driver); diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c index 43a3b68d90a2..e9116ef4b5bc 100644 --- a/drivers/gpu/drm/tinydrm/ili9225.c +++ b/drivers/gpu/drm/tinydrm/ili9225.c @@ -20,9 +20,11 @@ #include <linux/spi/spi.h> #include <video/mipi_display.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_cma_helper.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> @@ -81,20 +83,22 @@ static inline int ili9225_command(struct mipi_dbi *mipi, u8 cmd, u16 data) static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) { struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); - struct tinydrm_device *tdev = fb->dev->dev_private; - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev); unsigned int height = rect->y2 - rect->y1; unsigned int width = rect->x2 - rect->x1; bool swap = mipi->swap_bytes; u16 x_start, y_start; u16 x1, x2, y1, y2; - int ret = 0; + int idx, ret = 0; bool full; void *tr; if (!mipi->enabled) return; + if (!drm_dev_enter(fb->dev, &idx)) + return; + full = width == fb->width && height == fb->height; DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect)); @@ -157,6 +161,8 @@ static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) err_msg: if (ret) dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret); + + drm_dev_exit(idx); } static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe, @@ -181,19 +187,21 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev); struct drm_framebuffer *fb = plane_state->fb; - struct device *dev = tdev->drm->dev; + struct device *dev = pipe->crtc.dev->dev; struct drm_rect rect = { .x1 = 0, .x2 = fb->width, .y1 = 0, .y2 = fb->height, }; - int ret; + int ret, idx; u8 am_id; + if (!drm_dev_enter(pipe->crtc.dev, &idx)) + return; + DRM_DEBUG_KMS("\n"); mipi_dbi_hw_reset(mipi); @@ -207,7 +215,7 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe, ret = ili9225_command(mipi, ILI9225_POWER_CONTROL_1, 0x0000); if (ret) { DRM_DEV_ERROR(dev, "Error sending command %d\n", ret); - return; + goto out_exit; } ili9225_command(mipi, ILI9225_POWER_CONTROL_2, 0x0000); ili9225_command(mipi, ILI9225_POWER_CONTROL_3, 0x0000); @@ -280,15 +288,23 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe, mipi->enabled = true; ili9225_fb_dirty(fb, &rect); +out_exit: + drm_dev_exit(idx); } static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev); DRM_DEBUG_KMS("\n"); + /* + * This callback is not protected by drm_dev_enter/exit since we want to + * turn off the display on regular driver unload. It's highly unlikely + * that the underlying SPI controller is gone should this be called after + * unplug. + */ + if (!mipi->enabled) return; @@ -301,7 +317,7 @@ static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe) mipi->enabled = false; } -static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par, +static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par, size_t num) { struct spi_device *spi = mipi->spi; @@ -311,11 +327,11 @@ static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par, gpiod_set_value_cansleep(mipi->dc, 0); speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1); - ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1); + ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1); if (ret || !num) return ret; - if (cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes) + if (*cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes) bpw = 16; gpiod_set_value_cansleep(mipi->dc, 1); @@ -332,7 +348,7 @@ static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = { }; static const struct drm_display_mode ili9225_mode = { - TINYDRM_MODE(176, 220, 35, 44), + DRM_SIMPLE_MODE(176, 220, 35, 44), }; DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops); @@ -341,6 +357,7 @@ static struct drm_driver ili9225_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, .fops = &ili9225_fops, + .release = mipi_dbi_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .name = "ili9225", .desc = "Ilitek ILI9225", @@ -364,15 +381,25 @@ MODULE_DEVICE_TABLE(spi, ili9225_id); static int ili9225_probe(struct spi_device *spi) { struct device *dev = &spi->dev; + struct drm_device *drm; struct mipi_dbi *mipi; struct gpio_desc *rs; u32 rotation = 0; int ret; - mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); + mipi = kzalloc(sizeof(*mipi), GFP_KERNEL); if (!mipi) return -ENOMEM; + drm = &mipi->drm; + ret = devm_drm_dev_init(dev, drm, &ili9225_driver); + if (ret) { + kfree(mipi); + return ret; + } + + drm_mode_config_init(drm); + mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(mipi->reset)) { DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n"); @@ -394,21 +421,36 @@ static int ili9225_probe(struct spi_device *spi) /* override the command function set in mipi_dbi_spi_init() */ mipi->command = ili9225_dbi_command; - ret = mipi_dbi_init(&spi->dev, mipi, &ili9225_pipe_funcs, - &ili9225_driver, &ili9225_mode, rotation); + ret = mipi_dbi_init(mipi, &ili9225_pipe_funcs, &ili9225_mode, rotation); if (ret) return ret; - spi_set_drvdata(spi, mipi); + drm_mode_config_reset(drm); + + ret = drm_dev_register(drm, 0); + if (ret) + return ret; + + spi_set_drvdata(spi, drm); + + drm_fbdev_generic_setup(drm, 32); - return devm_tinydrm_register(&mipi->tinydrm); + return 0; } -static void ili9225_shutdown(struct spi_device *spi) +static int ili9225_remove(struct spi_device *spi) { - struct mipi_dbi *mipi = spi_get_drvdata(spi); + struct drm_device *drm = spi_get_drvdata(spi); + + drm_dev_unplug(drm); + drm_atomic_helper_shutdown(drm); + + return 0; +} - tinydrm_shutdown(&mipi->tinydrm); +static void ili9225_shutdown(struct spi_device *spi) +{ + drm_atomic_helper_shutdown(spi_get_drvdata(spi)); } static struct spi_driver ili9225_spi_driver = { @@ -419,6 +461,7 @@ static struct spi_driver ili9225_spi_driver = { }, .id_table = ili9225_id, .probe = ili9225_probe, + .remove = ili9225_remove, .shutdown = ili9225_shutdown, }; module_spi_driver(ili9225_spi_driver); diff --git a/drivers/gpu/drm/tinydrm/ili9341.c b/drivers/gpu/drm/tinydrm/ili9341.c index 713bb2dd7e04..d15f85e837ae 100644 --- a/drivers/gpu/drm/tinydrm/ili9341.c +++ b/drivers/gpu/drm/tinydrm/ili9341.c @@ -15,7 +15,9 @@ #include <linux/property.h> #include <linux/spi/spi.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_modeset_helper.h> @@ -52,16 +54,18 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev); u8 addr_mode; - int ret; + int ret, idx; + + if (!drm_dev_enter(pipe->crtc.dev, &idx)) + return; DRM_DEBUG_KMS("\n"); ret = mipi_dbi_poweron_conditional_reset(mipi); if (ret < 0) - return; + goto out_exit; if (ret == 1) goto out_enable; @@ -127,6 +131,8 @@ out_enable: addr_mode |= ILI9341_MADCTL_BGR; mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); mipi_dbi_enable_flush(mipi, crtc_state, plane_state); +out_exit: + drm_dev_exit(idx); } static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = { @@ -137,7 +143,7 @@ static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = { }; static const struct drm_display_mode yx240qv29_mode = { - TINYDRM_MODE(240, 320, 37, 49), + DRM_SIMPLE_MODE(240, 320, 37, 49), }; DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops); @@ -145,6 +151,7 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops); static struct drm_driver ili9341_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, .fops = &ili9341_fops, + .release = mipi_dbi_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "ili9341", @@ -169,15 +176,25 @@ MODULE_DEVICE_TABLE(spi, ili9341_id); static int ili9341_probe(struct spi_device *spi) { struct device *dev = &spi->dev; + struct drm_device *drm; struct mipi_dbi *mipi; struct gpio_desc *dc; u32 rotation = 0; int ret; - mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); + mipi = kzalloc(sizeof(*mipi), GFP_KERNEL); if (!mipi) return -ENOMEM; + drm = &mipi->drm; + ret = devm_drm_dev_init(dev, drm, &ili9341_driver); + if (ret) { + kfree(mipi); + return ret; + } + + drm_mode_config_init(drm); + mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(mipi->reset)) { DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n"); @@ -200,21 +217,36 @@ static int ili9341_probe(struct spi_device *spi) if (ret) return ret; - ret = mipi_dbi_init(&spi->dev, mipi, &ili9341_pipe_funcs, - &ili9341_driver, &yx240qv29_mode, rotation); + ret = mipi_dbi_init(mipi, &ili9341_pipe_funcs, &yx240qv29_mode, rotation); if (ret) return ret; - spi_set_drvdata(spi, mipi); + drm_mode_config_reset(drm); - return devm_tinydrm_register(&mipi->tinydrm); + ret = drm_dev_register(drm, 0); + if (ret) + return ret; + + spi_set_drvdata(spi, drm); + + drm_fbdev_generic_setup(drm, 32); + + return 0; } -static void ili9341_shutdown(struct spi_device *spi) +static int ili9341_remove(struct spi_device *spi) { - struct mipi_dbi *mipi = spi_get_drvdata(spi); + struct drm_device *drm = spi_get_drvdata(spi); + + drm_dev_unplug(drm); + drm_atomic_helper_shutdown(drm); - tinydrm_shutdown(&mipi->tinydrm); + return 0; +} + +static void ili9341_shutdown(struct spi_device *spi) +{ + drm_atomic_helper_shutdown(spi_get_drvdata(spi)); } static struct spi_driver ili9341_spi_driver = { @@ -224,6 +256,7 @@ static struct spi_driver ili9341_spi_driver = { }, .id_table = ili9341_id, .probe = ili9341_probe, + .remove = ili9341_remove, .shutdown = ili9341_shutdown, }; module_spi_driver(ili9341_spi_driver); diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c index 82a92ec9ae3c..c6dc31084a4e 100644 --- a/drivers/gpu/drm/tinydrm/mi0283qt.c +++ b/drivers/gpu/drm/tinydrm/mi0283qt.c @@ -17,7 +17,9 @@ #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_modeset_helper.h> @@ -54,16 +56,18 @@ static void mi0283qt_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev); u8 addr_mode; - int ret; + int ret, idx; + + if (!drm_dev_enter(pipe->crtc.dev, &idx)) + return; DRM_DEBUG_KMS("\n"); ret = mipi_dbi_poweron_conditional_reset(mipi); if (ret < 0) - return; + goto out_exit; if (ret == 1) goto out_enable; @@ -135,6 +139,8 @@ out_enable: addr_mode |= ILI9341_MADCTL_BGR; mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); mipi_dbi_enable_flush(mipi, crtc_state, plane_state); +out_exit: + drm_dev_exit(idx); } static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = { @@ -145,7 +151,7 @@ static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = { }; static const struct drm_display_mode mi0283qt_mode = { - TINYDRM_MODE(320, 240, 58, 43), + DRM_SIMPLE_MODE(320, 240, 58, 43), }; DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops); @@ -154,6 +160,7 @@ static struct drm_driver mi0283qt_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, .fops = &mi0283qt_fops, + .release = mipi_dbi_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "mi0283qt", @@ -178,15 +185,25 @@ MODULE_DEVICE_TABLE(spi, mi0283qt_id); static int mi0283qt_probe(struct spi_device *spi) { struct device *dev = &spi->dev; + struct drm_device *drm; struct mipi_dbi *mipi; struct gpio_desc *dc; u32 rotation = 0; int ret; - mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); + mipi = kzalloc(sizeof(*mipi), GFP_KERNEL); if (!mipi) return -ENOMEM; + drm = &mipi->drm; + ret = devm_drm_dev_init(dev, drm, &mi0283qt_driver); + if (ret) { + kfree(mipi); + return ret; + } + + drm_mode_config_init(drm); + mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(mipi->reset)) { DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n"); @@ -213,35 +230,46 @@ static int mi0283qt_probe(struct spi_device *spi) if (ret) return ret; - ret = mipi_dbi_init(&spi->dev, mipi, &mi0283qt_pipe_funcs, - &mi0283qt_driver, &mi0283qt_mode, rotation); + ret = mipi_dbi_init(mipi, &mi0283qt_pipe_funcs, &mi0283qt_mode, rotation); + if (ret) + return ret; + + drm_mode_config_reset(drm); + + ret = drm_dev_register(drm, 0); if (ret) return ret; - spi_set_drvdata(spi, mipi); + spi_set_drvdata(spi, drm); - return devm_tinydrm_register(&mipi->tinydrm); + drm_fbdev_generic_setup(drm, 32); + + return 0; } -static void mi0283qt_shutdown(struct spi_device *spi) +static int mi0283qt_remove(struct spi_device *spi) { - struct mipi_dbi *mipi = spi_get_drvdata(spi); + struct drm_device *drm = spi_get_drvdata(spi); - tinydrm_shutdown(&mipi->tinydrm); + drm_dev_unplug(drm); + drm_atomic_helper_shutdown(drm); + + return 0; } -static int __maybe_unused mi0283qt_pm_suspend(struct device *dev) +static void mi0283qt_shutdown(struct spi_device *spi) { - struct mipi_dbi *mipi = dev_get_drvdata(dev); + drm_atomic_helper_shutdown(spi_get_drvdata(spi)); +} - return drm_mode_config_helper_suspend(mipi->tinydrm.drm); +static int __maybe_unused mi0283qt_pm_suspend(struct device *dev) +{ + return drm_mode_config_helper_suspend(dev_get_drvdata(dev)); } static int __maybe_unused mi0283qt_pm_resume(struct device *dev) { - struct mipi_dbi *mipi = dev_get_drvdata(dev); - - drm_mode_config_helper_resume(mipi->tinydrm.drm); + drm_mode_config_helper_resume(dev_get_drvdata(dev)); return 0; } @@ -259,6 +287,7 @@ static struct spi_driver mi0283qt_spi_driver = { }, .id_table = mi0283qt_id, .probe = mi0283qt_probe, + .remove = mi0283qt_remove, .shutdown = mi0283qt_shutdown, }; module_spi_driver(mi0283qt_spi_driver); diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c index 918f77c7de34..869c8f56da3b 100644 --- a/drivers/gpu/drm/tinydrm/mipi-dbi.c +++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c @@ -153,16 +153,42 @@ EXPORT_SYMBOL(mipi_dbi_command_read); */ int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len) { + u8 *cmdbuf; int ret; + /* SPI requires dma-safe buffers */ + cmdbuf = kmemdup(&cmd, 1, GFP_KERNEL); + if (!cmdbuf) + return -ENOMEM; + mutex_lock(&mipi->cmdlock); - ret = mipi->command(mipi, cmd, data, len); + ret = mipi->command(mipi, cmdbuf, data, len); mutex_unlock(&mipi->cmdlock); + kfree(cmdbuf); + return ret; } EXPORT_SYMBOL(mipi_dbi_command_buf); +/* This should only be used by mipi_dbi_command() */ +int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len) +{ + u8 *buf; + int ret; + + buf = kmemdup(data, len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = mipi_dbi_command_buf(mipi, cmd, buf, len); + + kfree(buf); + + return ret; +} +EXPORT_SYMBOL(mipi_dbi_command_stackbuf); + /** * mipi_dbi_buf_copy - Copy a framebuffer, transforming it if necessary * @dst: The destination buffer @@ -216,18 +242,20 @@ EXPORT_SYMBOL(mipi_dbi_buf_copy); static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) { struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); - struct tinydrm_device *tdev = fb->dev->dev_private; - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev); unsigned int height = rect->y2 - rect->y1; unsigned int width = rect->x2 - rect->x1; bool swap = mipi->swap_bytes; - int ret = 0; + int idx, ret = 0; bool full; void *tr; if (!mipi->enabled) return; + if (!drm_dev_enter(fb->dev, &idx)) + return; + full = width == fb->width && height == fb->height; DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect)); @@ -254,6 +282,8 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) err_msg: if (ret) dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret); + + drm_dev_exit(idx); } /** @@ -308,19 +338,29 @@ void mipi_dbi_enable_flush(struct mipi_dbi *mipi, .y1 = 0, .y2 = fb->height, }; + int idx; + + if (!drm_dev_enter(&mipi->drm, &idx)) + return; mipi->enabled = true; mipi_dbi_fb_dirty(fb, &rect); backlight_enable(mipi->backlight); + + drm_dev_exit(idx); } EXPORT_SYMBOL(mipi_dbi_enable_flush); static void mipi_dbi_blank(struct mipi_dbi *mipi) { - struct drm_device *drm = mipi->tinydrm.drm; + struct drm_device *drm = &mipi->drm; u16 height = drm->mode_config.min_height; u16 width = drm->mode_config.min_width; size_t len = width * height * 2; + int idx; + + if (!drm_dev_enter(drm, &idx)) + return; memset(mipi->tx_buf, 0, len); @@ -330,6 +370,8 @@ static void mipi_dbi_blank(struct mipi_dbi *mipi) (height >> 8) & 0xFF, (height - 1) & 0xFF); mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START, (u8 *)mipi->tx_buf, len); + + drm_dev_exit(idx); } /** @@ -342,8 +384,10 @@ static void mipi_dbi_blank(struct mipi_dbi *mipi) */ void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev); + + if (!mipi->enabled) + return; DRM_DEBUG_KMS("\n"); @@ -359,6 +403,12 @@ void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe) } EXPORT_SYMBOL(mipi_dbi_pipe_disable); +static const struct drm_mode_config_funcs mipi_dbi_mode_config_funcs = { + .fb_create = drm_gem_fb_create_with_dirty, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + static const uint32_t mipi_dbi_formats[] = { DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -366,31 +416,27 @@ static const uint32_t mipi_dbi_formats[] = { /** * mipi_dbi_init - MIPI DBI initialization - * @dev: Parent device * @mipi: &mipi_dbi structure to initialize - * @pipe_funcs: Display pipe functions - * @driver: DRM driver + * @funcs: Display pipe functions * @mode: Display mode * @rotation: Initial rotation in degrees Counter Clock Wise * - * This function initializes a &mipi_dbi structure and it's underlying - * @tinydrm_device. It also sets up the display pipeline. + * This function sets up a &drm_simple_display_pipe with a &drm_connector that + * has one fixed &drm_display_mode which is rotated according to @rotation. + * This mode is used to set the mode config min/max width/height properties. + * Additionally &mipi_dbi.tx_buf is allocated. * * Supported formats: Native RGB565 and emulated XRGB8888. * - * Objects created by this function will be automatically freed on driver - * detach (devres). - * * Returns: * Zero on success, negative error code on failure. */ -int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi, - const struct drm_simple_display_pipe_funcs *pipe_funcs, - struct drm_driver *driver, +int mipi_dbi_init(struct mipi_dbi *mipi, + const struct drm_simple_display_pipe_funcs *funcs, const struct drm_display_mode *mode, unsigned int rotation) { size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16); - struct tinydrm_device *tdev = &mipi->tinydrm; + struct drm_device *drm = &mipi->drm; int ret; if (!mipi->command) @@ -398,16 +444,12 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi, mutex_init(&mipi->cmdlock); - mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL); + mipi->tx_buf = devm_kmalloc(drm->dev, bufsize, GFP_KERNEL); if (!mipi->tx_buf) return -ENOMEM; - ret = devm_tinydrm_init(dev, tdev, driver); - if (ret) - return ret; - /* TODO: Maybe add DRM_MODE_CONNECTOR_SPI */ - ret = tinydrm_display_pipe_init(tdev, pipe_funcs, + ret = tinydrm_display_pipe_init(drm, &mipi->pipe, funcs, DRM_MODE_CONNECTOR_VIRTUAL, mipi_dbi_formats, ARRAY_SIZE(mipi_dbi_formats), mode, @@ -415,21 +457,40 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi, if (ret) return ret; - drm_plane_enable_fb_damage_clips(&tdev->pipe.plane); + drm_plane_enable_fb_damage_clips(&mipi->pipe.plane); - tdev->drm->mode_config.preferred_depth = 16; + drm->mode_config.funcs = &mipi_dbi_mode_config_funcs; + drm->mode_config.preferred_depth = 16; mipi->rotation = rotation; - drm_mode_config_reset(tdev->drm); - DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n", - tdev->drm->mode_config.preferred_depth, rotation); + drm->mode_config.preferred_depth, rotation); return 0; } EXPORT_SYMBOL(mipi_dbi_init); /** + * mipi_dbi_release - DRM driver release helper + * @drm: DRM device + * + * This function finalizes and frees &mipi_dbi. + * + * Drivers can use this as their &drm_driver->release callback. + */ +void mipi_dbi_release(struct drm_device *drm) +{ + struct mipi_dbi *dbi = drm_to_mipi_dbi(drm); + + DRM_DEBUG_DRIVER("\n"); + + drm_mode_config_cleanup(drm); + drm_dev_fini(drm); + kfree(dbi); +} +EXPORT_SYMBOL(mipi_dbi_release); + +/** * mipi_dbi_hw_reset - Hardware reset of controller * @mipi: MIPI DBI structure * @@ -481,7 +542,7 @@ EXPORT_SYMBOL(mipi_dbi_display_is_on); static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi *mipi, bool cond) { - struct device *dev = mipi->tinydrm.drm->dev; + struct device *dev = mipi->drm.dev; int ret; if (mipi->regulator) { @@ -774,18 +835,18 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc, return 0; } -static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd, +static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 *cmd, u8 *parameters, size_t num) { - unsigned int bpw = (cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8; + unsigned int bpw = (*cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8; int ret; - if (mipi_dbi_command_is_read(mipi, cmd)) + if (mipi_dbi_command_is_read(mipi, *cmd)) return -ENOTSUPP; - MIPI_DBI_DEBUG_COMMAND(cmd, parameters, num); + MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num); - ret = mipi_dbi_spi1_transfer(mipi, 0, &cmd, 1, 8); + ret = mipi_dbi_spi1_transfer(mipi, 0, cmd, 1, 8); if (ret || !num) return ret; @@ -794,7 +855,7 @@ static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd, /* MIPI DBI Type C Option 3 */ -static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, +static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 *cmd, u8 *data, size_t len) { struct spi_device *spi = mipi->spi; @@ -803,7 +864,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, struct spi_transfer tr[2] = { { .speed_hz = speed_hz, - .tx_buf = &cmd, + .tx_buf = cmd, .len = 1, }, { .speed_hz = speed_hz, @@ -821,8 +882,8 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, * Support non-standard 24-bit and 32-bit Nokia read commands which * start with a dummy clock, so we need to read an extra byte. */ - if (cmd == MIPI_DCS_GET_DISPLAY_ID || - cmd == MIPI_DCS_GET_DISPLAY_STATUS) { + if (*cmd == MIPI_DCS_GET_DISPLAY_ID || + *cmd == MIPI_DCS_GET_DISPLAY_STATUS) { if (!(len == 3 || len == 4)) return -EINVAL; @@ -852,7 +913,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd, data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7)); } - MIPI_DBI_DEBUG_COMMAND(cmd, data, len); + MIPI_DBI_DEBUG_COMMAND(*cmd, data, len); err_free: kfree(buf); @@ -860,7 +921,7 @@ err_free: return ret; } -static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd, +static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par, size_t num) { struct spi_device *spi = mipi->spi; @@ -868,18 +929,18 @@ static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd, u32 speed_hz; int ret; - if (mipi_dbi_command_is_read(mipi, cmd)) + if (mipi_dbi_command_is_read(mipi, *cmd)) return mipi_dbi_typec3_command_read(mipi, cmd, par, num); - MIPI_DBI_DEBUG_COMMAND(cmd, par, num); + MIPI_DBI_DEBUG_COMMAND(*cmd, par, num); gpiod_set_value_cansleep(mipi->dc, 0); speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1); - ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1); + ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1); if (ret || !num) return ret; - if (cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes) + if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes) bpw = 16; gpiod_set_value_cansleep(mipi->dc, 1); @@ -926,7 +987,7 @@ int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi, * Even though it's not the SPI device that does DMA (the master does), * the dma mask is necessary for the dma_alloc_wc() in * drm_gem_cma_create(). The dma_addr returned will be a physical - * adddress which might be different from the bus address, but this is + * address which might be different from the bus address, but this is * not a problem since the address will not be used. * The virtual address is used in the transfer and the SPI core * re-maps it on the SPI master device using the DMA streaming API @@ -976,11 +1037,16 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file, u8 val, cmd = 0, parameters[64]; char *buf, *pos, *token; unsigned int i; - int ret; + int ret, idx; + + if (!drm_dev_enter(&mipi->drm, &idx)) + return -ENODEV; buf = memdup_user_nul(ubuf, count); - if (IS_ERR(buf)) - return PTR_ERR(buf); + if (IS_ERR(buf)) { + ret = PTR_ERR(buf); + goto err_exit; + } /* strip trailing whitespace */ for (i = count - 1; i > 0; i--) @@ -1016,6 +1082,8 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file, err_free: kfree(buf); +err_exit: + drm_dev_exit(idx); return ret < 0 ? ret : count; } @@ -1024,8 +1092,11 @@ static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused) { struct mipi_dbi *mipi = m->private; u8 cmd, val[4]; + int ret, idx; size_t len; - int ret; + + if (!drm_dev_enter(&mipi->drm, &idx)) + return -ENODEV; for (cmd = 0; cmd < 255; cmd++) { if (!mipi_dbi_command_is_read(mipi, cmd)) @@ -1056,6 +1127,8 @@ static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused) seq_printf(m, "%*phN\n", (int)len, val); } + drm_dev_exit(idx); + return 0; } @@ -1088,8 +1161,7 @@ static const struct file_operations mipi_dbi_debugfs_command_fops = { */ int mipi_dbi_debugfs_init(struct drm_minor *minor) { - struct tinydrm_device *tdev = minor->dev->dev_private; - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(minor->dev); umode_t mode = S_IFREG | S_IWUSR; if (mipi->read_commands) diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c index b037c6540cf3..3f3632457079 100644 --- a/drivers/gpu/drm/tinydrm/repaper.c +++ b/drivers/gpu/drm/tinydrm/repaper.c @@ -26,14 +26,16 @@ #include <linux/spi/spi.h> #include <linux/thermal.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_cma_helper.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_rect.h> #include <drm/drm_vblank.h> -#include <drm/tinydrm/tinydrm.h> +#include <drm/drm_simple_kms_helper.h> #include <drm/tinydrm/tinydrm-helpers.h> #define REPAPER_RID_G2_COG_ID 0x12 @@ -59,7 +61,8 @@ enum repaper_epd_border_byte { }; struct repaper_epd { - struct tinydrm_device tinydrm; + struct drm_device drm; + struct drm_simple_display_pipe pipe; struct spi_device *spi; struct gpio_desc *panel_on; @@ -88,10 +91,9 @@ struct repaper_epd { bool partial; }; -static inline struct repaper_epd * -epd_from_tinydrm(struct tinydrm_device *tdev) +static inline struct repaper_epd *drm_to_epd(struct drm_device *drm) { - return container_of(tdev, struct repaper_epd, tinydrm); + return container_of(drm, struct repaper_epd, drm); } static int repaper_spi_transfer(struct spi_device *spi, u8 header, @@ -529,11 +531,16 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb) { struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); struct dma_buf_attachment *import_attach = cma_obj->base.import_attach; - struct tinydrm_device *tdev = fb->dev->dev_private; - struct repaper_epd *epd = epd_from_tinydrm(tdev); + struct repaper_epd *epd = drm_to_epd(fb->dev); struct drm_rect clip; + int idx, ret = 0; u8 *buf = NULL; - int ret = 0; + + if (!epd->enabled) + return 0; + + if (!drm_dev_enter(fb->dev, &idx)) + return -ENODEV; /* repaper can't do partial updates */ clip.x1 = 0; @@ -541,17 +548,16 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb) clip.y1 = 0; clip.y2 = fb->height; - if (!epd->enabled) - return 0; - repaper_get_temperature(epd); DRM_DEBUG("Flushing [FB:%d] st=%ums\n", fb->base.id, epd->factored_stage_time); buf = kmalloc_array(fb->width, fb->height, GFP_KERNEL); - if (!buf) - return -ENOMEM; + if (!buf) { + ret = -ENOMEM; + goto out_exit; + } if (import_attach) { ret = dma_buf_begin_cpu_access(import_attach->dmabuf, @@ -620,6 +626,8 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb) out_free: kfree(buf); +out_exit: + drm_dev_exit(idx); return ret; } @@ -645,12 +653,14 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct repaper_epd *epd = epd_from_tinydrm(tdev); + struct repaper_epd *epd = drm_to_epd(pipe->crtc.dev); struct spi_device *spi = epd->spi; struct device *dev = &spi->dev; bool dc_ok = false; - int i, ret; + int i, ret, idx; + + if (!drm_dev_enter(pipe->crtc.dev, &idx)) + return; DRM_DEBUG_DRIVER("\n"); @@ -689,7 +699,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe, if (!i) { DRM_DEV_ERROR(dev, "timeout waiting for panel to become ready.\n"); power_off(epd); - return; + goto out_exit; } repaper_read_id(spi); @@ -700,7 +710,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe, else dev_err(dev, "wrong COG ID 0x%02x\n", ret); power_off(epd); - return; + goto out_exit; } /* Disable OE */ @@ -713,7 +723,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe, else DRM_DEV_ERROR(dev, "panel is reported broken\n"); power_off(epd); - return; + goto out_exit; } /* Power saving mode */ @@ -753,7 +763,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe, if (ret < 0) { DRM_DEV_ERROR(dev, "failed to read chip (%d)\n", ret); power_off(epd); - return; + goto out_exit; } if (ret & 0x40) { @@ -765,7 +775,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe, if (!dc_ok) { DRM_DEV_ERROR(dev, "dc/dc failed\n"); power_off(epd); - return; + goto out_exit; } /* @@ -776,15 +786,26 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe, epd->enabled = true; epd->partial = false; +out_exit: + drm_dev_exit(idx); } static void repaper_pipe_disable(struct drm_simple_display_pipe *pipe) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct repaper_epd *epd = epd_from_tinydrm(tdev); + struct repaper_epd *epd = drm_to_epd(pipe->crtc.dev); struct spi_device *spi = epd->spi; unsigned int line; + /* + * This callback is not protected by drm_dev_enter/exit since we want to + * turn off the display on regular driver unload. It's highly unlikely + * that the underlying SPI controller is gone should this be called after + * unplug. + */ + + if (!epd->enabled) + return; + DRM_DEBUG_DRIVER("\n"); epd->enabled = false; @@ -855,33 +876,50 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = { .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, }; +static const struct drm_mode_config_funcs repaper_mode_config_funcs = { + .fb_create = drm_gem_fb_create_with_dirty, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static void repaper_release(struct drm_device *drm) +{ + struct repaper_epd *epd = drm_to_epd(drm); + + DRM_DEBUG_DRIVER("\n"); + + drm_mode_config_cleanup(drm); + drm_dev_fini(drm); + kfree(epd); +} + static const uint32_t repaper_formats[] = { DRM_FORMAT_XRGB8888, }; static const struct drm_display_mode repaper_e1144cs021_mode = { - TINYDRM_MODE(128, 96, 29, 22), + DRM_SIMPLE_MODE(128, 96, 29, 22), }; static const u8 repaper_e1144cs021_cs[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x00 }; static const struct drm_display_mode repaper_e1190cs021_mode = { - TINYDRM_MODE(144, 128, 36, 32), + DRM_SIMPLE_MODE(144, 128, 36, 32), }; static const u8 repaper_e1190cs021_cs[] = { 0x00, 0x00, 0x00, 0x03, 0xfc, 0x00, 0x00, 0xff }; static const struct drm_display_mode repaper_e2200cs021_mode = { - TINYDRM_MODE(200, 96, 46, 22), + DRM_SIMPLE_MODE(200, 96, 46, 22), }; static const u8 repaper_e2200cs021_cs[] = { 0x00, 0x00, 0x00, 0x00, 0x01, 0xff, 0xe0, 0x00 }; static const struct drm_display_mode repaper_e2271cs021_mode = { - TINYDRM_MODE(264, 176, 57, 38), + DRM_SIMPLE_MODE(264, 176, 57, 38), }; static const u8 repaper_e2271cs021_cs[] = { 0x00, 0x00, 0x00, 0x7f, @@ -893,6 +931,7 @@ static struct drm_driver repaper_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, .fops = &repaper_fops, + .release = repaper_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .name = "repaper", .desc = "Pervasive Displays RePaper e-ink panels", @@ -925,11 +964,11 @@ static int repaper_probe(struct spi_device *spi) const struct spi_device_id *spi_id; const struct of_device_id *match; struct device *dev = &spi->dev; - struct tinydrm_device *tdev; enum repaper_model model; const char *thermal_zone; struct repaper_epd *epd; size_t line_buffer_size; + struct drm_device *drm; int ret; match = of_match_device(repaper_of_match, dev); @@ -949,10 +988,21 @@ static int repaper_probe(struct spi_device *spi) } } - epd = devm_kzalloc(dev, sizeof(*epd), GFP_KERNEL); + epd = kzalloc(sizeof(*epd), GFP_KERNEL); if (!epd) return -ENOMEM; + drm = &epd->drm; + + ret = devm_drm_dev_init(dev, drm, &repaper_driver); + if (ret) { + kfree(epd); + return ret; + } + + drm_mode_config_init(drm); + drm->mode_config.funcs = &repaper_mode_config_funcs; + epd->spi = spi; epd->panel_on = devm_gpiod_get(dev, "panel-on", GPIOD_OUT_LOW); @@ -1063,32 +1113,41 @@ static int repaper_probe(struct spi_device *spi) if (!epd->current_frame) return -ENOMEM; - tdev = &epd->tinydrm; - - ret = devm_tinydrm_init(dev, tdev, &repaper_driver); - if (ret) - return ret; - - ret = tinydrm_display_pipe_init(tdev, &repaper_pipe_funcs, + ret = tinydrm_display_pipe_init(drm, &epd->pipe, &repaper_pipe_funcs, DRM_MODE_CONNECTOR_VIRTUAL, repaper_formats, ARRAY_SIZE(repaper_formats), mode, 0); if (ret) return ret; - drm_mode_config_reset(tdev->drm); - spi_set_drvdata(spi, tdev); + drm_mode_config_reset(drm); + + ret = drm_dev_register(drm, 0); + if (ret) + return ret; + + spi_set_drvdata(spi, drm); DRM_DEBUG_DRIVER("SPI speed: %uMHz\n", spi->max_speed_hz / 1000000); - return devm_tinydrm_register(tdev); + drm_fbdev_generic_setup(drm, 32); + + return 0; } -static void repaper_shutdown(struct spi_device *spi) +static int repaper_remove(struct spi_device *spi) { - struct tinydrm_device *tdev = spi_get_drvdata(spi); + struct drm_device *drm = spi_get_drvdata(spi); + + drm_dev_unplug(drm); + drm_atomic_helper_shutdown(drm); + + return 0; +} - tinydrm_shutdown(tdev); +static void repaper_shutdown(struct spi_device *spi) +{ + drm_atomic_helper_shutdown(spi_get_drvdata(spi)); } static struct spi_driver repaper_spi_driver = { @@ -1099,6 +1158,7 @@ static struct spi_driver repaper_spi_driver = { }, .id_table = repaper_id, .probe = repaper_probe, + .remove = repaper_remove, .shutdown = repaper_shutdown, }; module_spi_driver(repaper_spi_driver); diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c index 01a8077954b3..d99957bac532 100644 --- a/drivers/gpu/drm/tinydrm/st7586.c +++ b/drivers/gpu/drm/tinydrm/st7586.c @@ -17,9 +17,11 @@ #include <linux/spi/spi.h> #include <video/mipi_display.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_cma_helper.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_rect.h> @@ -116,14 +118,15 @@ static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb, static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) { - struct tinydrm_device *tdev = fb->dev->dev_private; - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); - int start, end; - int ret = 0; + struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev); + int start, end, idx, ret = 0; if (!mipi->enabled) return; + if (!drm_dev_enter(fb->dev, &idx)) + return; + /* 3 pixels per byte, so grow clip to nearest multiple of 3 */ rect->x1 = rounddown(rect->x1, 3); rect->x2 = roundup(rect->x2, 3); @@ -151,6 +154,8 @@ static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) err_msg: if (ret) dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret); + + drm_dev_exit(idx); } static void st7586_pipe_update(struct drm_simple_display_pipe *pipe, @@ -175,8 +180,7 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev); struct drm_framebuffer *fb = plane_state->fb; struct drm_rect rect = { .x1 = 0, @@ -184,14 +188,17 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe, .y1 = 0, .y2 = fb->height, }; - int ret; + int idx, ret; u8 addr_mode; + if (!drm_dev_enter(pipe->crtc.dev, &idx)) + return; + DRM_DEBUG_KMS("\n"); ret = mipi_dbi_poweron_reset(mipi); if (ret) - return; + goto out_exit; mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f); mipi_dbi_command(mipi, ST7586_OTP_RW_CTRL, 0x00); @@ -244,12 +251,20 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe, st7586_fb_dirty(fb, &rect); mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON); +out_exit: + drm_dev_exit(idx); } static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); + struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev); + + /* + * This callback is not protected by drm_dev_enter/exit since we want to + * turn off the display on regular driver unload. It's highly unlikely + * that the underlying SPI controller is gone should this be called after + * unplug. + */ DRM_DEBUG_KMS("\n"); @@ -264,46 +279,6 @@ static const u32 st7586_formats[] = { DRM_FORMAT_XRGB8888, }; -static int st7586_init(struct device *dev, struct mipi_dbi *mipi, - const struct drm_simple_display_pipe_funcs *pipe_funcs, - struct drm_driver *driver, const struct drm_display_mode *mode, - unsigned int rotation) -{ - size_t bufsize = (mode->vdisplay + 2) / 3 * mode->hdisplay; - struct tinydrm_device *tdev = &mipi->tinydrm; - int ret; - - mutex_init(&mipi->cmdlock); - - mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL); - if (!mipi->tx_buf) - return -ENOMEM; - - ret = devm_tinydrm_init(dev, tdev, driver); - if (ret) - return ret; - - ret = tinydrm_display_pipe_init(tdev, pipe_funcs, - DRM_MODE_CONNECTOR_VIRTUAL, - st7586_formats, - ARRAY_SIZE(st7586_formats), - mode, rotation); - if (ret) - return ret; - - drm_plane_enable_fb_damage_clips(&tdev->pipe.plane); - - tdev->drm->mode_config.preferred_depth = 32; - mipi->rotation = rotation; - - drm_mode_config_reset(tdev->drm); - - DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n", - tdev->drm->mode_config.preferred_depth, rotation); - - return 0; -} - static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = { .enable = st7586_pipe_enable, .disable = st7586_pipe_disable, @@ -311,8 +286,14 @@ static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = { .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, }; +static const struct drm_mode_config_funcs st7586_mode_config_funcs = { + .fb_create = drm_gem_fb_create_with_dirty, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + static const struct drm_display_mode st7586_mode = { - TINYDRM_MODE(178, 128, 37, 27), + DRM_SIMPLE_MODE(178, 128, 37, 27), }; DEFINE_DRM_GEM_CMA_FOPS(st7586_fops); @@ -321,6 +302,7 @@ static struct drm_driver st7586_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, .fops = &st7586_fops, + .release = mipi_dbi_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "st7586", @@ -345,15 +327,35 @@ MODULE_DEVICE_TABLE(spi, st7586_id); static int st7586_probe(struct spi_device *spi) { struct device *dev = &spi->dev; + struct drm_device *drm; struct mipi_dbi *mipi; struct gpio_desc *a0; u32 rotation = 0; + size_t bufsize; int ret; - mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); + mipi = kzalloc(sizeof(*mipi), GFP_KERNEL); if (!mipi) return -ENOMEM; + drm = &mipi->drm; + ret = devm_drm_dev_init(dev, drm, &st7586_driver); + if (ret) { + kfree(mipi); + return ret; + } + + drm_mode_config_init(drm); + drm->mode_config.preferred_depth = 32; + drm->mode_config.funcs = &st7586_mode_config_funcs; + + mutex_init(&mipi->cmdlock); + + bufsize = (st7586_mode.vdisplay + 2) / 3 * st7586_mode.hdisplay; + mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL); + if (!mipi->tx_buf) + return -ENOMEM; + mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(mipi->reset)) { DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n"); @@ -367,6 +369,7 @@ static int st7586_probe(struct spi_device *spi) } device_property_read_u32(dev, "rotation", &rotation); + mipi->rotation = rotation; ret = mipi_dbi_spi_init(spi, mipi, a0); if (ret) @@ -384,21 +387,44 @@ static int st7586_probe(struct spi_device *spi) */ mipi->swap_bytes = true; - ret = st7586_init(&spi->dev, mipi, &st7586_pipe_funcs, &st7586_driver, - &st7586_mode, rotation); + ret = tinydrm_display_pipe_init(drm, &mipi->pipe, &st7586_pipe_funcs, + DRM_MODE_CONNECTOR_VIRTUAL, + st7586_formats, ARRAY_SIZE(st7586_formats), + &st7586_mode, rotation); if (ret) return ret; - spi_set_drvdata(spi, mipi); + drm_plane_enable_fb_damage_clips(&mipi->pipe.plane); - return devm_tinydrm_register(&mipi->tinydrm); + drm_mode_config_reset(drm); + + ret = drm_dev_register(drm, 0); + if (ret) + return ret; + + spi_set_drvdata(spi, drm); + + DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n", + drm->mode_config.preferred_depth, rotation); + + drm_fbdev_generic_setup(drm, 32); + + return 0; } -static void st7586_shutdown(struct spi_device *spi) +static int st7586_remove(struct spi_device *spi) { - struct mipi_dbi *mipi = spi_get_drvdata(spi); + struct drm_device *drm = spi_get_drvdata(spi); + + drm_dev_unplug(drm); + drm_atomic_helper_shutdown(drm); + + return 0; +} - tinydrm_shutdown(&mipi->tinydrm); +static void st7586_shutdown(struct spi_device *spi) +{ + drm_atomic_helper_shutdown(spi_get_drvdata(spi)); } static struct spi_driver st7586_spi_driver = { @@ -409,6 +435,7 @@ static struct spi_driver st7586_spi_driver = { }, .id_table = st7586_id, .probe = st7586_probe, + .remove = st7586_remove, .shutdown = st7586_shutdown, }; module_spi_driver(st7586_spi_driver); diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c index 3bab9a9569a6..022e9849b95b 100644 --- a/drivers/gpu/drm/tinydrm/st7735r.c +++ b/drivers/gpu/drm/tinydrm/st7735r.c @@ -14,7 +14,9 @@ #include <linux/spi/spi.h> #include <video/mipi_display.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/tinydrm/mipi-dbi.h> @@ -41,16 +43,18 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { - struct tinydrm_device *tdev = pipe_to_tinydrm(pipe); - struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev); - int ret; + struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev); + int ret, idx; u8 addr_mode; + if (!drm_dev_enter(pipe->crtc.dev, &idx)) + return; + DRM_DEBUG_KMS("\n"); ret = mipi_dbi_poweron_reset(mipi); if (ret) - return; + goto out_exit; msleep(150); @@ -101,6 +105,8 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe, msleep(20); mipi_dbi_enable_flush(mipi, crtc_state, plane_state); +out_exit: + drm_dev_exit(idx); } static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = { @@ -111,7 +117,7 @@ static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = { }; static const struct drm_display_mode jd_t18003_t01_mode = { - TINYDRM_MODE(128, 160, 28, 35), + DRM_SIMPLE_MODE(128, 160, 28, 35), }; DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops); @@ -120,6 +126,7 @@ static struct drm_driver st7735r_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, .fops = &st7735r_fops, + .release = mipi_dbi_release, DRM_GEM_CMA_VMAP_DRIVER_OPS, .debugfs_init = mipi_dbi_debugfs_init, .name = "st7735r", @@ -144,15 +151,25 @@ MODULE_DEVICE_TABLE(spi, st7735r_id); static int st7735r_probe(struct spi_device *spi) { struct device *dev = &spi->dev; + struct drm_device *drm; struct mipi_dbi *mipi; struct gpio_desc *dc; u32 rotation = 0; int ret; - mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL); + mipi = kzalloc(sizeof(*mipi), GFP_KERNEL); if (!mipi) return -ENOMEM; + drm = &mipi->drm; + ret = devm_drm_dev_init(dev, drm, &st7735r_driver); + if (ret) { + kfree(mipi); + return ret; + } + + drm_mode_config_init(drm); + mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(mipi->reset)) { DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n"); @@ -178,21 +195,36 @@ static int st7735r_probe(struct spi_device *spi) /* Cannot read from Adafruit 1.8" display via SPI */ mipi->read_commands = NULL; - ret = mipi_dbi_init(&spi->dev, mipi, &jd_t18003_t01_pipe_funcs, - &st7735r_driver, &jd_t18003_t01_mode, rotation); + ret = mipi_dbi_init(mipi, &jd_t18003_t01_pipe_funcs, &jd_t18003_t01_mode, rotation); if (ret) return ret; - spi_set_drvdata(spi, mipi); + drm_mode_config_reset(drm); - return devm_tinydrm_register(&mipi->tinydrm); + ret = drm_dev_register(drm, 0); + if (ret) + return ret; + + spi_set_drvdata(spi, drm); + + drm_fbdev_generic_setup(drm, 32); + + return 0; } -static void st7735r_shutdown(struct spi_device *spi) +static int st7735r_remove(struct spi_device *spi) { - struct mipi_dbi *mipi = spi_get_drvdata(spi); + struct drm_device *drm = spi_get_drvdata(spi); + + drm_dev_unplug(drm); + drm_atomic_helper_shutdown(drm); - tinydrm_shutdown(&mipi->tinydrm); + return 0; +} + +static void st7735r_shutdown(struct spi_device *spi) +{ + drm_atomic_helper_shutdown(spi_get_drvdata(spi)); } static struct spi_driver st7735r_spi_driver = { @@ -203,6 +235,7 @@ static struct spi_driver st7735r_spi_driver = { }, .id_table = st7735r_id, .probe = st7735r_probe, + .remove = st7735r_remove, .shutdown = st7735r_shutdown, }; module_spi_driver(st7735r_spi_driver); diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c index e8723a2412a6..d775d10dbe6a 100644 --- a/drivers/gpu/drm/tve200/tve200_display.c +++ b/drivers/gpu/drm/tve200/tve200_display.c @@ -149,7 +149,8 @@ static void tve200_display_enable(struct drm_simple_display_pipe *pipe, /* Vsync IRQ at start of Vsync at first */ ctrl1 |= TVE200_VSTSTYPE_VSYNC; - if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) + if (connector->display_info.bus_flags & + DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) ctrl1 |= TVE200_CTRL_TVCLKP; if ((mode->hdisplay == 352 && mode->vdisplay == 240) || /* SIF(525) */ diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 22cd2d13e272..53b7b8c04bc6 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -107,6 +107,7 @@ static void udl_usb_disconnect(struct usb_interface *interface) udl_fbdev_unplug(dev); udl_drop_usb(dev); drm_dev_unplug(dev); + drm_dev_put(dev); } /* diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index d5a23295dd80..bb7b58407039 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -224,7 +224,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, *offset = drm_vma_node_offset_addr(&gobj->base.vma_node); out: - drm_gem_object_put(&gobj->base); + drm_gem_object_put_unlocked(&gobj->base); unlock: mutex_unlock(&udl->gem_lock); return ret; diff --git a/drivers/gpu/drm/v3d/Kconfig b/drivers/gpu/drm/v3d/Kconfig index 1552bf552c94..75a74c45f109 100644 --- a/drivers/gpu/drm/v3d/Kconfig +++ b/drivers/gpu/drm/v3d/Kconfig @@ -5,6 +5,7 @@ config DRM_V3D depends on COMMON_CLK depends on MMU select DRM_SCHED + select DRM_GEM_SHMEM_HELPER help Choose this option if you have a system that has a Broadcom V3D 3.x or newer GPU, such as BCM7268. diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index a08766d39eab..c0219ebb4284 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -25,162 +25,6 @@ #include "v3d_drv.h" #include "uapi/drm/v3d_drm.h" -/* Pins the shmem pages, fills in the .pages and .sgt fields of the BO, and maps - * it for DMA. - */ -static int -v3d_bo_get_pages(struct v3d_bo *bo) -{ - struct drm_gem_object *obj = &bo->base; - struct drm_device *dev = obj->dev; - int npages = obj->size >> PAGE_SHIFT; - int ret = 0; - - mutex_lock(&bo->lock); - if (bo->pages_refcount++ != 0) - goto unlock; - - if (!obj->import_attach) { - bo->pages = drm_gem_get_pages(obj); - if (IS_ERR(bo->pages)) { - ret = PTR_ERR(bo->pages); - goto unlock; - } - - bo->sgt = drm_prime_pages_to_sg(bo->pages, npages); - if (IS_ERR(bo->sgt)) { - ret = PTR_ERR(bo->sgt); - goto put_pages; - } - - /* Map the pages for use by the GPU. */ - dma_map_sg(dev->dev, bo->sgt->sgl, - bo->sgt->nents, DMA_BIDIRECTIONAL); - } else { - bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL); - if (!bo->pages) - goto put_pages; - - drm_prime_sg_to_page_addr_arrays(bo->sgt, bo->pages, - NULL, npages); - - /* Note that dma-bufs come in mapped. */ - } - - mutex_unlock(&bo->lock); - - return 0; - -put_pages: - drm_gem_put_pages(obj, bo->pages, true, true); - bo->pages = NULL; -unlock: - bo->pages_refcount--; - mutex_unlock(&bo->lock); - return ret; -} - -static void -v3d_bo_put_pages(struct v3d_bo *bo) -{ - struct drm_gem_object *obj = &bo->base; - - mutex_lock(&bo->lock); - if (--bo->pages_refcount == 0) { - if (!obj->import_attach) { - dma_unmap_sg(obj->dev->dev, bo->sgt->sgl, - bo->sgt->nents, DMA_BIDIRECTIONAL); - sg_free_table(bo->sgt); - kfree(bo->sgt); - drm_gem_put_pages(obj, bo->pages, true, true); - } else { - kfree(bo->pages); - } - } - mutex_unlock(&bo->lock); -} - -static struct v3d_bo *v3d_bo_create_struct(struct drm_device *dev, - size_t unaligned_size) -{ - struct v3d_dev *v3d = to_v3d_dev(dev); - struct drm_gem_object *obj; - struct v3d_bo *bo; - size_t size = roundup(unaligned_size, PAGE_SIZE); - int ret; - - if (size == 0) - return ERR_PTR(-EINVAL); - - bo = kzalloc(sizeof(*bo), GFP_KERNEL); - if (!bo) - return ERR_PTR(-ENOMEM); - obj = &bo->base; - - INIT_LIST_HEAD(&bo->vmas); - INIT_LIST_HEAD(&bo->unref_head); - mutex_init(&bo->lock); - - ret = drm_gem_object_init(dev, obj, size); - if (ret) - goto free_bo; - - spin_lock(&v3d->mm_lock); - ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node, - obj->size >> PAGE_SHIFT, - GMP_GRANULARITY >> PAGE_SHIFT, 0, 0); - spin_unlock(&v3d->mm_lock); - if (ret) - goto free_obj; - - return bo; - -free_obj: - drm_gem_object_release(obj); -free_bo: - kfree(bo); - return ERR_PTR(ret); -} - -struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, - size_t unaligned_size) -{ - struct v3d_dev *v3d = to_v3d_dev(dev); - struct drm_gem_object *obj; - struct v3d_bo *bo; - int ret; - - bo = v3d_bo_create_struct(dev, unaligned_size); - if (IS_ERR(bo)) - return bo; - obj = &bo->base; - - bo->resv = &bo->_resv; - reservation_object_init(bo->resv); - - ret = v3d_bo_get_pages(bo); - if (ret) - goto free_mm; - - v3d_mmu_insert_ptes(bo); - - mutex_lock(&v3d->bo_lock); - v3d->bo_stats.num_allocated++; - v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT; - mutex_unlock(&v3d->bo_lock); - - return bo; - -free_mm: - spin_lock(&v3d->mm_lock); - drm_mm_remove_node(&bo->node); - spin_unlock(&v3d->mm_lock); - - drm_gem_object_release(obj); - kfree(bo); - return ERR_PTR(ret); -} - /* Called DRM core on the last userspace/kernel unreference of the * BO. */ @@ -189,92 +33,116 @@ void v3d_free_object(struct drm_gem_object *obj) struct v3d_dev *v3d = to_v3d_dev(obj->dev); struct v3d_bo *bo = to_v3d_bo(obj); + v3d_mmu_remove_ptes(bo); + mutex_lock(&v3d->bo_lock); v3d->bo_stats.num_allocated--; v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT; mutex_unlock(&v3d->bo_lock); - reservation_object_fini(&bo->_resv); - - v3d_bo_put_pages(bo); - - if (obj->import_attach) - drm_prime_gem_destroy(obj, bo->sgt); - - v3d_mmu_remove_ptes(bo); spin_lock(&v3d->mm_lock); drm_mm_remove_node(&bo->node); spin_unlock(&v3d->mm_lock); - mutex_destroy(&bo->lock); + /* GPU execution may have dirtied any pages in the BO. */ + bo->base.pages_mark_dirty_on_put = true; - drm_gem_object_release(obj); - kfree(bo); + drm_gem_shmem_free_object(obj); } -struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj) +static const struct drm_gem_object_funcs v3d_gem_funcs = { + .free = v3d_free_object, + .print_info = drm_gem_shmem_print_info, + .pin = drm_gem_shmem_pin, + .unpin = drm_gem_shmem_unpin, + .get_sg_table = drm_gem_shmem_get_sg_table, + .vmap = drm_gem_shmem_vmap, + .vunmap = drm_gem_shmem_vunmap, + .vm_ops = &drm_gem_shmem_vm_ops, +}; + +/* gem_create_object function for allocating a BO struct and doing + * early setup. + */ +struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size) { - struct v3d_bo *bo = to_v3d_bo(obj); + struct v3d_bo *bo; + struct drm_gem_object *obj; - return bo->resv; -} + if (size == 0) + return NULL; -static void -v3d_set_mmap_vma_flags(struct vm_area_struct *vma) -{ - vma->vm_flags &= ~VM_PFNMAP; - vma->vm_flags |= VM_MIXEDMAP; - vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); -} + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (!bo) + return NULL; + obj = &bo->base.base; -vm_fault_t v3d_gem_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct drm_gem_object *obj = vma->vm_private_data; - struct v3d_bo *bo = to_v3d_bo(obj); - pfn_t pfn; - pgoff_t pgoff; + obj->funcs = &v3d_gem_funcs; - /* We don't use vmf->pgoff since that has the fake offset: */ - pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; - pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV); + INIT_LIST_HEAD(&bo->unref_head); - return vmf_insert_mixed(vma, vmf->address, pfn); + return &bo->base.base; } -int v3d_mmap(struct file *filp, struct vm_area_struct *vma) +static int +v3d_bo_create_finish(struct drm_gem_object *obj) { + struct v3d_dev *v3d = to_v3d_dev(obj->dev); + struct v3d_bo *bo = to_v3d_bo(obj); + struct sg_table *sgt; int ret; - ret = drm_gem_mmap(filp, vma); + /* So far we pin the BO in the MMU for its lifetime, so use + * shmem's helper for getting a lifetime sgt. + */ + sgt = drm_gem_shmem_get_pages_sgt(&bo->base.base); + if (IS_ERR(sgt)) + return PTR_ERR(sgt); + + spin_lock(&v3d->mm_lock); + /* Allocate the object's space in the GPU's page tables. + * Inserting PTEs will happen later, but the offset is for the + * lifetime of the BO. + */ + ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node, + obj->size >> PAGE_SHIFT, + GMP_GRANULARITY >> PAGE_SHIFT, 0, 0); + spin_unlock(&v3d->mm_lock); if (ret) return ret; - v3d_set_mmap_vma_flags(vma); + /* Track stats for /debug/dri/n/bo_stats. */ + mutex_lock(&v3d->bo_lock); + v3d->bo_stats.num_allocated++; + v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT; + mutex_unlock(&v3d->bo_lock); - return ret; + v3d_mmu_insert_ptes(bo); + + return 0; } -int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, + size_t unaligned_size) { + struct drm_gem_shmem_object *shmem_obj; + struct v3d_bo *bo; int ret; - ret = drm_gem_mmap_obj(obj, obj->size, vma); - if (ret < 0) - return ret; - - v3d_set_mmap_vma_flags(vma); + shmem_obj = drm_gem_shmem_create(dev, unaligned_size); + if (!shmem_obj) + return NULL; + bo = to_v3d_bo(&shmem_obj->base); - return 0; -} + ret = v3d_bo_create_finish(&shmem_obj->base); + if (ret) + goto free_obj; -struct sg_table * -v3d_prime_get_sg_table(struct drm_gem_object *obj) -{ - struct v3d_bo *bo = to_v3d_bo(obj); - int npages = obj->size >> PAGE_SHIFT; + return bo; - return drm_prime_pages_to_sg(bo->pages, npages); +free_obj: + drm_gem_shmem_free_object(&shmem_obj->base); + return ERR_PTR(ret); } struct drm_gem_object * @@ -283,20 +151,17 @@ v3d_prime_import_sg_table(struct drm_device *dev, struct sg_table *sgt) { struct drm_gem_object *obj; - struct v3d_bo *bo; - - bo = v3d_bo_create_struct(dev, attach->dmabuf->size); - if (IS_ERR(bo)) - return ERR_CAST(bo); - obj = &bo->base; - - bo->resv = attach->dmabuf->resv; + int ret; - bo->sgt = sgt; - obj->import_attach = attach; - v3d_bo_get_pages(bo); + obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt); + if (IS_ERR(obj)) + return obj; - v3d_mmu_insert_ptes(bo); + ret = v3d_bo_create_finish(obj); + if (ret) { + drm_gem_shmem_free_object(obj); + return ERR_PTR(ret); + } return obj; } @@ -319,8 +184,8 @@ int v3d_create_bo_ioctl(struct drm_device *dev, void *data, args->offset = bo->node.start << PAGE_SHIFT; - ret = drm_gem_handle_create(file_priv, &bo->base, &args->handle); - drm_gem_object_put_unlocked(&bo->base); + ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); + drm_gem_object_put_unlocked(&bo->base.base); return ret; } @@ -330,7 +195,6 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, { struct drm_v3d_mmap_bo *args = data; struct drm_gem_object *gem_obj; - int ret; if (args->flags != 0) { DRM_INFO("unknown mmap_bo flags: %d\n", args->flags); @@ -343,12 +207,10 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, return -ENOENT; } - ret = drm_gem_create_mmap_offset(gem_obj); - if (ret == 0) - args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); + args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); drm_gem_object_put_unlocked(gem_obj); - return ret; + return 0; } int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c index eb2b2d2f8553..a24af2d2f574 100644 --- a/drivers/gpu/drm/v3d/v3d_debugfs.c +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c @@ -187,6 +187,11 @@ static int v3d_measure_clock(struct seq_file *m, void *unused) uint32_t cycles; int core = 0; int measure_ms = 1000; + int ret; + + ret = pm_runtime_get_sync(v3d->dev); + if (ret < 0) + return ret; if (v3d->ver >= 40) { V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3, @@ -210,6 +215,9 @@ static int v3d_measure_clock(struct seq_file *m, void *unused) cycles / (measure_ms * 1000), (cycles / (measure_ms * 100)) % 10); + pm_runtime_mark_last_busy(v3d->dev); + pm_runtime_put_autosuspend(v3d->dev); + return 0; } diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index f0afcec72c34..d600628bb5c1 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -7,9 +7,9 @@ * This driver supports the Broadcom V3D 3.3 and 4.1 OpenGL ES GPUs. * For V3D 2.x support, see the VC4 driver. * - * Currently only single-core rendering using the binner and renderer - * is supported. The TFU (texture formatting unit) and V3D 4.x's CSD - * (compute shader dispatch) are not yet supported. + * Currently only single-core rendering using the binner and renderer, + * along with TFU (texture formatting unit) rendering is supported. + * V3D 4.x's CSD (compute shader dispatch) is not yet supported. */ #include <linux/clk.h> @@ -19,6 +19,7 @@ #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/reset.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fb_helper.h> @@ -160,17 +161,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file) kfree(v3d_priv); } -static const struct file_operations v3d_drm_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, - .mmap = v3d_mmap, - .poll = drm_poll, - .read = drm_read, - .compat_ioctl = drm_compat_ioctl, - .llseek = noop_llseek, -}; +DEFINE_DRM_GEM_SHMEM_FOPS(v3d_drm_fops); /* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP * protection between clients. Note that render nodes would be be @@ -188,12 +179,6 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), }; -static const struct vm_operations_struct v3d_vm_ops = { - .fault = v3d_gem_fault, - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - static struct drm_driver v3d_drm_driver = { .driver_features = (DRIVER_GEM | DRIVER_RENDER | @@ -207,17 +192,11 @@ static struct drm_driver v3d_drm_driver = { .debugfs_init = v3d_debugfs_init, #endif - .gem_free_object_unlocked = v3d_free_object, - .gem_vm_ops = &v3d_vm_ops, - + .gem_create_object = v3d_create_object, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_import = drm_gem_prime_import, - .gem_prime_export = drm_gem_prime_export, - .gem_prime_res_obj = v3d_prime_res_obj, - .gem_prime_get_sg_table = v3d_prime_get_sg_table, .gem_prime_import_sg_table = v3d_prime_import_sg_table, - .gem_prime_mmap = v3d_prime_mmap, + .gem_prime_mmap = drm_gem_prime_mmap, .ioctls = v3d_drm_ioctls, .num_ioctls = ARRAY_SIZE(v3d_drm_ioctls), @@ -265,10 +244,6 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) v3d->pdev = pdev; drm = &v3d->drm; - ret = map_regs(v3d, &v3d->bridge_regs, "bridge"); - if (ret) - goto dev_free; - ret = map_regs(v3d, &v3d->hub_regs, "hub"); if (ret) goto dev_free; @@ -283,6 +258,22 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES); WARN_ON(v3d->cores > 1); /* multicore not yet implemented */ + v3d->reset = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(v3d->reset)) { + ret = PTR_ERR(v3d->reset); + + if (ret == -EPROBE_DEFER) + goto dev_free; + + v3d->reset = NULL; + ret = map_regs(v3d, &v3d->bridge_regs, "bridge"); + if (ret) { + dev_err(dev, + "Failed to get reset control or bridge regs\n"); + goto dev_free; + } + } + if (v3d->ver < 41) { ret = map_regs(v3d, &v3d->gca_regs, "gca"); if (ret) @@ -312,14 +303,18 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) if (ret) goto dev_destroy; - v3d_irq_init(v3d); + ret = v3d_irq_init(v3d); + if (ret) + goto gem_destroy; ret = drm_dev_register(drm, 0); if (ret) - goto gem_destroy; + goto irq_disable; return 0; +irq_disable: + v3d_irq_disable(v3d); gem_destroy: v3d_gem_destroy(drm); dev_destroy: diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index fdda3037f7af..7b0fe6240f7d 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -1,11 +1,11 @@ // SPDX-License-Identifier: GPL-2.0+ /* Copyright (C) 2015-2018 Broadcom */ -#include <linux/reservation.h> #include <linux/mm_types.h> #include <drm/drmP.h> #include <drm/drm_encoder.h> #include <drm/drm_gem.h> +#include <drm/drm_gem_shmem_helper.h> #include <drm/gpu_scheduler.h> #include "uapi/drm/v3d_drm.h" @@ -34,6 +34,7 @@ struct v3d_dev { * and revision. */ int ver; + bool single_irq_line; struct device *dev; struct platform_device *pdev; @@ -42,6 +43,7 @@ struct v3d_dev { void __iomem *bridge_regs; void __iomem *gca_regs; struct clk *clk; + struct reset_control *reset; /* Virtual and DMA addresses of the single shared page table. */ volatile u32 *pt; @@ -109,34 +111,15 @@ struct v3d_file_priv { struct drm_sched_entity sched_entity[V3D_MAX_QUEUES]; }; -/* Tracks a mapping of a BO into a per-fd address space */ -struct v3d_vma { - struct v3d_page_table *pt; - struct list_head list; /* entry in v3d_bo.vmas */ -}; - struct v3d_bo { - struct drm_gem_object base; - - struct mutex lock; + struct drm_gem_shmem_object base; struct drm_mm_node node; - u32 pages_refcount; - struct page **pages; - struct sg_table *sgt; - void *vaddr; - - struct list_head vmas; /* list of v3d_vma */ - /* List entry for the BO's position in * v3d_exec_info->unref_list */ struct list_head unref_head; - - /* normally (resv == &_resv) except for imported bo's */ - struct reservation_object *resv; - struct reservation_object _resv; }; static inline struct v3d_bo * @@ -270,6 +253,7 @@ static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) } /* v3d_bo.c */ +struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size); void v3d_free_object(struct drm_gem_object *gem_obj); struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, size_t size); @@ -279,11 +263,6 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -vm_fault_t v3d_gem_fault(struct vm_fault *vmf); -int v3d_mmap(struct file *filp, struct vm_area_struct *vma); -struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj); -int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); -struct sg_table *v3d_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); @@ -310,7 +289,7 @@ void v3d_reset(struct v3d_dev *v3d); void v3d_invalidate_caches(struct v3d_dev *v3d); /* v3d_irq.c */ -void v3d_irq_init(struct v3d_dev *v3d); +int v3d_irq_init(struct v3d_dev *v3d); void v3d_irq_enable(struct v3d_dev *v3d); void v3d_irq_disable(struct v3d_dev *v3d); void v3d_irq_reset(struct v3d_dev *v3d); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 803f31467ec1..b84d89c7b3fb 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -6,6 +6,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/reset.h> #include <linux/device.h> #include <linux/io.h> #include <linux/sched/signal.h> @@ -24,7 +25,8 @@ v3d_init_core(struct v3d_dev *v3d, int core) * type. If you want the default behavior, you can still put * "2" in the indirect texture state's output_type field. */ - V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT); + if (v3d->ver < 40) + V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT); /* Whenever we flush the L2T cache, we always want to flush * the whole thing. @@ -69,7 +71,7 @@ v3d_idle_gca(struct v3d_dev *v3d) } static void -v3d_reset_v3d(struct v3d_dev *v3d) +v3d_reset_by_bridge(struct v3d_dev *v3d) { int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION); @@ -89,6 +91,15 @@ v3d_reset_v3d(struct v3d_dev *v3d) V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT); V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0); } +} + +static void +v3d_reset_v3d(struct v3d_dev *v3d) +{ + if (v3d->reset) + reset_control_reset(v3d->reset); + else + v3d_reset_by_bridge(v3d); v3d_init_hw_state(v3d); } @@ -190,7 +201,8 @@ v3d_attach_object_fences(struct v3d_bo **bos, int bo_count, for (i = 0; i < bo_count; i++) { /* XXX: Use shared fences for read-only objects. */ - reservation_object_add_excl_fence(bos[i]->resv, fence); + reservation_object_add_excl_fence(bos[i]->base.base.resv, + fence); } } @@ -199,12 +211,8 @@ v3d_unlock_bo_reservations(struct v3d_bo **bos, int bo_count, struct ww_acquire_ctx *acquire_ctx) { - int i; - - for (i = 0; i < bo_count; i++) - ww_mutex_unlock(&bos[i]->resv->lock); - - ww_acquire_fini(acquire_ctx); + drm_gem_unlock_reservations((struct drm_gem_object **)bos, bo_count, + acquire_ctx); } /* Takes the reservation lock on all the BOs being referenced, so that @@ -219,58 +227,19 @@ v3d_lock_bo_reservations(struct v3d_bo **bos, int bo_count, struct ww_acquire_ctx *acquire_ctx) { - int contended_lock = -1; int i, ret; - ww_acquire_init(acquire_ctx, &reservation_ww_class); - -retry: - if (contended_lock != -1) { - struct v3d_bo *bo = bos[contended_lock]; - - ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, - acquire_ctx); - if (ret) { - ww_acquire_done(acquire_ctx); - return ret; - } - } - - for (i = 0; i < bo_count; i++) { - if (i == contended_lock) - continue; - - ret = ww_mutex_lock_interruptible(&bos[i]->resv->lock, - acquire_ctx); - if (ret) { - int j; - - for (j = 0; j < i; j++) - ww_mutex_unlock(&bos[j]->resv->lock); - - if (contended_lock != -1 && contended_lock >= i) { - struct v3d_bo *bo = bos[contended_lock]; - - ww_mutex_unlock(&bo->resv->lock); - } - - if (ret == -EDEADLK) { - contended_lock = i; - goto retry; - } - - ww_acquire_done(acquire_ctx); - return ret; - } - } - - ww_acquire_done(acquire_ctx); + ret = drm_gem_lock_reservations((struct drm_gem_object **)bos, + bo_count, acquire_ctx); + if (ret) + return ret; /* Reserve space for our shared (read-only) fence references, * before we commit the CL to the hardware. */ for (i = 0; i < bo_count; i++) { - ret = reservation_object_reserve_shared(bos[i]->resv, 1); + ret = reservation_object_reserve_shared(bos[i]->base.base.resv, + 1); if (ret) { v3d_unlock_bo_reservations(bos, bo_count, acquire_ctx); @@ -378,11 +347,11 @@ v3d_exec_cleanup(struct kref *ref) dma_fence_put(exec->render_done_fence); for (i = 0; i < exec->bo_count; i++) - drm_gem_object_put_unlocked(&exec->bo[i]->base); + drm_gem_object_put_unlocked(&exec->bo[i]->base.base); kvfree(exec->bo); list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) { - drm_gem_object_put_unlocked(&bo->base); + drm_gem_object_put_unlocked(&bo->base.base); } pm_runtime_mark_last_busy(v3d->dev); @@ -409,7 +378,7 @@ v3d_tfu_job_cleanup(struct kref *ref) for (i = 0; i < ARRAY_SIZE(job->bo); i++) { if (job->bo[i]) - drm_gem_object_put_unlocked(&job->bo[i]->base); + drm_gem_object_put_unlocked(&job->bo[i]->base.base); } pm_runtime_mark_last_busy(v3d->dev); @@ -429,8 +398,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data, { int ret; struct drm_v3d_wait_bo *args = data; - struct drm_gem_object *gem_obj; - struct v3d_bo *bo; ktime_t start = ktime_get(); u64 delta_ns; unsigned long timeout_jiffies = @@ -439,21 +406,8 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data, if (args->pad != 0) return -EINVAL; - gem_obj = drm_gem_object_lookup(file_priv, args->handle); - if (!gem_obj) { - DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); - return -EINVAL; - } - bo = to_v3d_bo(gem_obj); - - ret = reservation_object_wait_timeout_rcu(bo->resv, - true, true, - timeout_jiffies); - - if (ret == 0) - ret = -ETIME; - else if (ret > 0) - ret = 0; + ret = drm_gem_reservation_object_wait(file_priv, args->handle, + true, timeout_jiffies); /* Decrement the user's timeout, in case we got interrupted * such that the ioctl will be restarted. @@ -468,8 +422,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data, if (ret == -ETIME && args->timeout_ns) ret = -EAGAIN; - drm_gem_object_put_unlocked(gem_obj); - return ret; } diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index 69338da70ddc..b4d6ae81186d 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -27,6 +27,9 @@ V3D_HUB_INT_MMU_CAP | \ V3D_HUB_INT_TFUC)) +static irqreturn_t +v3d_hub_irq(int irq, void *arg); + static void v3d_overflow_mem_work(struct work_struct *work) { @@ -34,12 +37,14 @@ v3d_overflow_mem_work(struct work_struct *work) container_of(work, struct v3d_dev, overflow_mem_work); struct drm_device *dev = &v3d->drm; struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024); + struct drm_gem_object *obj; unsigned long irqflags; if (IS_ERR(bo)) { DRM_ERROR("Couldn't allocate binner overflow mem\n"); return; } + obj = &bo->base.base; /* We lost a race, and our work task came in after the bin job * completed and exited. This can happen because the HW @@ -56,15 +61,15 @@ v3d_overflow_mem_work(struct work_struct *work) goto out; } - drm_gem_object_get(&bo->base); + drm_gem_object_get(obj); list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list); spin_unlock_irqrestore(&v3d->job_lock, irqflags); V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT); - V3D_CORE_WRITE(0, V3D_PTB_BPOS, bo->base.size); + V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size); out: - drm_gem_object_put_unlocked(&bo->base); + drm_gem_object_put_unlocked(obj); } static irqreturn_t @@ -112,6 +117,12 @@ v3d_irq(int irq, void *arg) if (intsts & V3D_INT_GMPV) dev_err(v3d->dev, "GMP violation\n"); + /* V3D 4.2 wires the hub and core IRQs together, so if we & + * didn't see the common one then check hub for MMU IRQs. + */ + if (v3d->single_irq_line && status == IRQ_NONE) + return v3d_hub_irq(irq, arg); + return status; } @@ -156,10 +167,10 @@ v3d_hub_irq(int irq, void *arg) return status; } -void +int v3d_irq_init(struct v3d_dev *v3d) { - int ret, core; + int irq1, ret, core; INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work); @@ -170,16 +181,37 @@ v3d_irq_init(struct v3d_dev *v3d) V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); - ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0), - v3d_hub_irq, IRQF_SHARED, - "v3d_hub", v3d); - ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1), - v3d_irq, IRQF_SHARED, - "v3d_core0", v3d); - if (ret) - dev_err(v3d->dev, "IRQ setup failed: %d\n", ret); + irq1 = platform_get_irq(v3d->pdev, 1); + if (irq1 == -EPROBE_DEFER) + return irq1; + if (irq1 > 0) { + ret = devm_request_irq(v3d->dev, irq1, + v3d_irq, IRQF_SHARED, + "v3d_core0", v3d); + if (ret) + goto fail; + ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0), + v3d_hub_irq, IRQF_SHARED, + "v3d_hub", v3d); + if (ret) + goto fail; + } else { + v3d->single_irq_line = true; + + ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0), + v3d_irq, IRQF_SHARED, + "v3d", v3d); + if (ret) + goto fail; + } v3d_irq_enable(v3d); + return 0; + +fail: + if (ret != -EPROBE_DEFER) + dev_err(v3d->dev, "IRQ setup failed: %d\n", ret); + return ret; } void diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c index b00f97c31b70..7a21f1787ab1 100644 --- a/drivers/gpu/drm/v3d/v3d_mmu.c +++ b/drivers/gpu/drm/v3d/v3d_mmu.c @@ -83,13 +83,14 @@ int v3d_mmu_set_page_table(struct v3d_dev *v3d) void v3d_mmu_insert_ptes(struct v3d_bo *bo) { - struct v3d_dev *v3d = to_v3d_dev(bo->base.dev); + struct drm_gem_shmem_object *shmem_obj = &bo->base; + struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev); u32 page = bo->node.start; u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID; unsigned int count; struct scatterlist *sgl; - for_each_sg(bo->sgt->sgl, sgl, bo->sgt->nents, count) { + for_each_sg(shmem_obj->sgt->sgl, sgl, shmem_obj->sgt->nents, count) { u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT; u32 pte = page_prot | page_address; u32 i; @@ -102,7 +103,7 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo) } WARN_ON_ONCE(page - bo->node.start != - bo->base.size >> V3D_MMU_PAGE_SHIFT); + shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT); if (v3d_mmu_flush_all(v3d)) dev_err(v3d->dev, "MMU flush timeout\n"); @@ -110,8 +111,8 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo) void v3d_mmu_remove_ptes(struct v3d_bo *bo) { - struct v3d_dev *v3d = to_v3d_dev(bo->base.dev); - u32 npages = bo->base.size >> V3D_MMU_PAGE_SHIFT; + struct v3d_dev *v3d = to_v3d_dev(bo->base.base.dev); + u32 npages = bo->base.base.size >> V3D_MMU_PAGE_SHIFT; u32 page; for (page = bo->node.start; page < bo->node.start + npages; page++) diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h index 6ccdee9d47bd..8e88af237610 100644 --- a/drivers/gpu/drm/v3d/v3d_regs.h +++ b/drivers/gpu/drm/v3d/v3d_regs.h @@ -216,6 +216,8 @@ # define V3D_IDENT2_BCG_INT BIT(28) #define V3D_CTL_MISCCFG 0x00018 +# define V3D_CTL_MISCCFG_QRMAXCNT_MASK V3D_MASK(3, 1) +# define V3D_CTL_MISCCFG_QRMAXCNT_SHIFT 1 # define V3D_MISCCFG_OVRTMUOUT BIT(0) #define V3D_CTL_L2CACTL 0x00020 diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 4704b2df3688..d0c68b7c8b41 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -231,20 +231,17 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) mutex_lock(&v3d->reset_lock); /* block scheduler */ - for (q = 0; q < V3D_MAX_QUEUES; q++) { - struct drm_gpu_scheduler *sched = &v3d->queue[q].sched; - - drm_sched_stop(sched); + for (q = 0; q < V3D_MAX_QUEUES; q++) + drm_sched_stop(&v3d->queue[q].sched); - if(sched_job) - drm_sched_increase_karma(sched_job); - } + if (sched_job) + drm_sched_increase_karma(sched_job); /* get the GPU back into the init state */ v3d_reset(v3d); for (q = 0; q < V3D_MAX_QUEUES; q++) - drm_sched_resubmit_jobs(sched_job->sched); + drm_sched_resubmit_jobs(&v3d->queue[q].sched); /* Unblock schedulers and restart their jobs. */ for (q = 0; q < V3D_MAX_QUEUES; q++) { diff --git a/drivers/gpu/drm/vboxvideo/Kconfig b/drivers/gpu/drm/vboxvideo/Kconfig new file mode 100644 index 000000000000..1f4182e2e980 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/Kconfig @@ -0,0 +1,15 @@ +config DRM_VBOXVIDEO + tristate "Virtual Box Graphics Card" + depends on DRM && X86 && PCI + select DRM_KMS_HELPER + select DRM_TTM + select GENERIC_ALLOCATOR + help + This is a KMS driver for the virtual Graphics Card used in + Virtual Box virtual machines. + + Although it is possible to build this driver built-in to the + kernel, it is advised to build it as a module, so that it can + be updated independently of the kernel. Select M to build this + driver as a module and add support for these devices via drm/kms + interfaces. diff --git a/drivers/gpu/drm/vboxvideo/Makefile b/drivers/gpu/drm/vboxvideo/Makefile new file mode 100644 index 000000000000..1224f313af0c --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +vboxvideo-y := hgsmi_base.o modesetting.o vbva_base.o \ + vbox_drv.o vbox_fb.o vbox_hgsmi.o vbox_irq.o vbox_main.o \ + vbox_mode.o vbox_prime.o vbox_ttm.o + +obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_base.c b/drivers/gpu/drm/vboxvideo/hgsmi_base.c new file mode 100644 index 000000000000..361d3193258e --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/hgsmi_base.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: MIT +/* Copyright (C) 2006-2017 Oracle Corporation */ + +#include <linux/vbox_err.h> +#include "vbox_drv.h" +#include "vboxvideo_guest.h" +#include "vboxvideo_vbe.h" +#include "hgsmi_channels.h" +#include "hgsmi_ch_setup.h" + +/** + * Inform the host of the location of the host flags in VRAM via an HGSMI cmd. + * Return: 0 or negative errno value. + * @ctx: The context of the guest heap to use. + * @location: The offset chosen for the flags within guest VRAM. + */ +int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location) +{ + struct hgsmi_buffer_location *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_HGSMI, + HGSMI_CC_HOST_FLAGS_LOCATION); + if (!p) + return -ENOMEM; + + p->buf_location = location; + p->buf_len = sizeof(struct hgsmi_host_flags); + + hgsmi_buffer_submit(ctx, p); + hgsmi_buffer_free(ctx, p); + + return 0; +} + +/** + * Notify the host of HGSMI-related guest capabilities via an HGSMI command. + * Return: 0 or negative errno value. + * @ctx: The context of the guest heap to use. + * @caps: The capabilities to report, see vbva_caps. + */ +int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps) +{ + struct vbva_caps *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_INFO_CAPS); + if (!p) + return -ENOMEM; + + p->rc = VERR_NOT_IMPLEMENTED; + p->caps = caps; + + hgsmi_buffer_submit(ctx, p); + + WARN_ON_ONCE(p->rc < 0); + + hgsmi_buffer_free(ctx, p); + + return 0; +} + +int hgsmi_test_query_conf(struct gen_pool *ctx) +{ + u32 value = 0; + int ret; + + ret = hgsmi_query_conf(ctx, U32_MAX, &value); + if (ret) + return ret; + + return value == U32_MAX ? 0 : -EIO; +} + +/** + * Query the host for an HGSMI configuration parameter via an HGSMI command. + * Return: 0 or negative errno value. + * @ctx: The context containing the heap used. + * @index: The index of the parameter to query. + * @value_ret: Where to store the value of the parameter on success. + */ +int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret) +{ + struct vbva_conf32 *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, + VBVA_QUERY_CONF32); + if (!p) + return -ENOMEM; + + p->index = index; + p->value = U32_MAX; + + hgsmi_buffer_submit(ctx, p); + + *value_ret = p->value; + + hgsmi_buffer_free(ctx, p); + + return 0; +} + +/** + * Pass the host a new mouse pointer shape via an HGSMI command. + * Return: 0 or negative errno value. + * @ctx: The context containing the heap to be used. + * @flags: Cursor flags. + * @hot_x: Horizontal position of the hot spot. + * @hot_y: Vertical position of the hot spot. + * @width: Width in pixels of the cursor. + * @height: Height in pixels of the cursor. + * @pixels: Pixel data, @see VMMDevReqMousePointer for the format. + * @len: Size in bytes of the pixel data. + */ +int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags, + u32 hot_x, u32 hot_y, u32 width, u32 height, + u8 *pixels, u32 len) +{ + struct vbva_mouse_pointer_shape *p; + u32 pixel_len = 0; + int rc; + + if (flags & VBOX_MOUSE_POINTER_SHAPE) { + /* + * Size of the pointer data: + * sizeof (AND mask) + sizeof (XOR_MASK) + */ + pixel_len = ((((width + 7) / 8) * height + 3) & ~3) + + width * 4 * height; + if (pixel_len > len) + return -EINVAL; + + /* + * If shape is supplied, then always create the pointer visible. + * See comments in 'vboxUpdatePointerShape' + */ + flags |= VBOX_MOUSE_POINTER_VISIBLE; + } + + p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA, + VBVA_MOUSE_POINTER_SHAPE); + if (!p) + return -ENOMEM; + + p->result = VINF_SUCCESS; + p->flags = flags; + p->hot_X = hot_x; + p->hot_y = hot_y; + p->width = width; + p->height = height; + if (pixel_len) + memcpy(p->data, pixels, pixel_len); + + hgsmi_buffer_submit(ctx, p); + + switch (p->result) { + case VINF_SUCCESS: + rc = 0; + break; + case VERR_NO_MEMORY: + rc = -ENOMEM; + break; + case VERR_NOT_SUPPORTED: + rc = -EBUSY; + break; + default: + rc = -EINVAL; + } + + hgsmi_buffer_free(ctx, p); + + return rc; +} + +/** + * Report the guest cursor position. The host may wish to use this information + * to re-position its own cursor (though this is currently unlikely). The + * current host cursor position is returned. + * Return: 0 or negative errno value. + * @ctx: The context containing the heap used. + * @report_position: Are we reporting a position? + * @x: Guest cursor X position. + * @y: Guest cursor Y position. + * @x_host: Host cursor X position is stored here. Optional. + * @y_host: Host cursor Y position is stored here. Optional. + */ +int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position, + u32 x, u32 y, u32 *x_host, u32 *y_host) +{ + struct vbva_cursor_position *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, + VBVA_CURSOR_POSITION); + if (!p) + return -ENOMEM; + + p->report_position = report_position; + p->x = x; + p->y = y; + + hgsmi_buffer_submit(ctx, p); + + *x_host = p->x; + *y_host = p->y; + + hgsmi_buffer_free(ctx, p); + + return 0; +} diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h b/drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h new file mode 100644 index 000000000000..4e93418d6a13 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright (C) 2006-2017 Oracle Corporation */ + +#ifndef __HGSMI_CH_SETUP_H__ +#define __HGSMI_CH_SETUP_H__ + +/* + * Tell the host the location of hgsmi_host_flags structure, where the host + * can write information about pending buffers, etc, and which can be quickly + * polled by the guest without a need to port IO. + */ +#define HGSMI_CC_HOST_FLAGS_LOCATION 0 + +struct hgsmi_buffer_location { + u32 buf_location; + u32 buf_len; +} __packed; + +/* HGSMI setup and configuration data structures. */ + +#define HGSMIHOSTFLAGS_COMMANDS_PENDING 0x01u +#define HGSMIHOSTFLAGS_IRQ 0x02u +#define HGSMIHOSTFLAGS_VSYNC 0x10u +#define HGSMIHOSTFLAGS_HOTPLUG 0x20u +#define HGSMIHOSTFLAGS_CURSOR_CAPABILITIES 0x40u + +struct hgsmi_host_flags { + u32 host_flags; + u32 reserved[3]; +} __packed; + +#endif diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_channels.h b/drivers/gpu/drm/vboxvideo/hgsmi_channels.h new file mode 100644 index 000000000000..9b83f4ff3faf --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/hgsmi_channels.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright (C) 2006-2017 Oracle Corporation */ + +#ifndef __HGSMI_CHANNELS_H__ +#define __HGSMI_CHANNELS_H__ + +/* + * Each channel has an 8 bit identifier. There are a number of predefined + * (hardcoded) channels. + * + * HGSMI_CH_HGSMI channel can be used to map a string channel identifier + * to a free 16 bit numerical value. values are allocated in range + * [HGSMI_CH_STRING_FIRST;HGSMI_CH_STRING_LAST]. + */ + +/* A reserved channel value */ +#define HGSMI_CH_RESERVED 0x00 +/* HGCMI: setup and configuration */ +#define HGSMI_CH_HGSMI 0x01 +/* Graphics: VBVA */ +#define HGSMI_CH_VBVA 0x02 +/* Graphics: Seamless with a single guest region */ +#define HGSMI_CH_SEAMLESS 0x03 +/* Graphics: Seamless with separate host windows */ +#define HGSMI_CH_SEAMLESS2 0x04 +/* Graphics: OpenGL HW acceleration */ +#define HGSMI_CH_OPENGL 0x05 + +/* The first channel index to be used for string mappings (inclusive) */ +#define HGSMI_CH_STRING_FIRST 0x20 +/* The last channel index for string mappings (inclusive) */ +#define HGSMI_CH_STRING_LAST 0xff + +#endif diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_defs.h b/drivers/gpu/drm/vboxvideo/hgsmi_defs.h new file mode 100644 index 000000000000..6c8df1cdb087 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/hgsmi_defs.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright (C) 2006-2017 Oracle Corporation */ + +#ifndef __HGSMI_DEFS_H__ +#define __HGSMI_DEFS_H__ + +/* Buffer sequence type mask. */ +#define HGSMI_BUFFER_HEADER_F_SEQ_MASK 0x03 +/* Single buffer, not a part of a sequence. */ +#define HGSMI_BUFFER_HEADER_F_SEQ_SINGLE 0x00 +/* The first buffer in a sequence. */ +#define HGSMI_BUFFER_HEADER_F_SEQ_START 0x01 +/* A middle buffer in a sequence. */ +#define HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE 0x02 +/* The last buffer in a sequence. */ +#define HGSMI_BUFFER_HEADER_F_SEQ_END 0x03 + +/* 16 bytes buffer header. */ +struct hgsmi_buffer_header { + u32 data_size; /* Size of data that follows the header. */ + u8 flags; /* HGSMI_BUFFER_HEADER_F_* */ + u8 channel; /* The channel the data must be routed to. */ + u16 channel_info; /* Opaque to the HGSMI, used by the channel. */ + + union { + /* Opaque placeholder to make the union 8 bytes. */ + u8 header_data[8]; + + /* HGSMI_BUFFER_HEADER_F_SEQ_SINGLE */ + struct { + u32 reserved1; /* A reserved field, initialize to 0. */ + u32 reserved2; /* A reserved field, initialize to 0. */ + } buffer; + + /* HGSMI_BUFFER_HEADER_F_SEQ_START */ + struct { + /* Must be the same for all buffers in the sequence. */ + u32 sequence_number; + /* The total size of the sequence. */ + u32 sequence_size; + } sequence_start; + + /* + * HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE and + * HGSMI_BUFFER_HEADER_F_SEQ_END + */ + struct { + /* Must be the same for all buffers in the sequence. */ + u32 sequence_number; + /* Data offset in the entire sequence. */ + u32 sequence_offset; + } sequence_continue; + } u; +} __packed; + +/* 8 bytes buffer tail. */ +struct hgsmi_buffer_tail { + /* Reserved, must be initialized to 0. */ + u32 reserved; + /* + * One-at-a-Time Hash: http://www.burtleburtle.net/bob/hash/doobs.html + * Over the header, offset and for first 4 bytes of the tail. + */ + u32 checksum; +} __packed; + +/* + * The size of the array of channels. Array indexes are u8. + * Note: the value must not be changed. + */ +#define HGSMI_NUMBER_OF_CHANNELS 0x100 + +#endif diff --git a/drivers/gpu/drm/vboxvideo/modesetting.c b/drivers/gpu/drm/vboxvideo/modesetting.c new file mode 100644 index 000000000000..7580b9002379 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/modesetting.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: MIT +/* Copyright (C) 2006-2017 Oracle Corporation */ + +#include <linux/vbox_err.h> +#include "vbox_drv.h" +#include "vboxvideo_guest.h" +#include "vboxvideo_vbe.h" +#include "hgsmi_channels.h" + +/** + * Set a video mode via an HGSMI request. The views must have been + * initialised first using @a VBoxHGSMISendViewInfo and if the mode is being + * set on the first display then it must be set first using registers. + * @ctx: The context containing the heap to use. + * @display: The screen number. + * @origin_x: The horizontal displacement relative to the first scrn. + * @origin_y: The vertical displacement relative to the first screen. + * @start_offset: The offset of the visible area of the framebuffer + * relative to the framebuffer start. + * @pitch: The offset in bytes between the starts of two adjecent + * scan lines in video RAM. + * @width: The mode width. + * @height: The mode height. + * @bpp: The colour depth of the mode. + * @flags: Flags. + */ +void hgsmi_process_display_info(struct gen_pool *ctx, u32 display, + s32 origin_x, s32 origin_y, u32 start_offset, + u32 pitch, u32 width, u32 height, + u16 bpp, u16 flags) +{ + struct vbva_infoscreen *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, + VBVA_INFO_SCREEN); + if (!p) + return; + + p->view_index = display; + p->origin_x = origin_x; + p->origin_y = origin_y; + p->start_offset = start_offset; + p->line_size = pitch; + p->width = width; + p->height = height; + p->bits_per_pixel = bpp; + p->flags = flags; + + hgsmi_buffer_submit(ctx, p); + hgsmi_buffer_free(ctx, p); +} + +/** + * Report the rectangle relative to which absolute pointer events should be + * expressed. This information remains valid until the next VBVA resize event + * for any screen, at which time it is reset to the bounding rectangle of all + * virtual screens. + * Return: 0 or negative errno value. + * @ctx: The context containing the heap to use. + * @origin_x: Upper left X co-ordinate relative to the first screen. + * @origin_y: Upper left Y co-ordinate relative to the first screen. + * @width: Rectangle width. + * @height: Rectangle height. + */ +int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y, + u32 width, u32 height) +{ + struct vbva_report_input_mapping *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, + VBVA_REPORT_INPUT_MAPPING); + if (!p) + return -ENOMEM; + + p->x = origin_x; + p->y = origin_y; + p->cx = width; + p->cy = height; + + hgsmi_buffer_submit(ctx, p); + hgsmi_buffer_free(ctx, p); + + return 0; +} + +/** + * Get most recent video mode hints. + * Return: 0 or negative errno value. + * @ctx: The context containing the heap to use. + * @screens: The number of screens to query hints for, starting at 0. + * @hints: Array of vbva_modehint structures for receiving the hints. + */ +int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens, + struct vbva_modehint *hints) +{ + struct vbva_query_mode_hints *p; + size_t size; + + if (WARN_ON(!hints)) + return -EINVAL; + + size = screens * sizeof(struct vbva_modehint); + p = hgsmi_buffer_alloc(ctx, sizeof(*p) + size, HGSMI_CH_VBVA, + VBVA_QUERY_MODE_HINTS); + if (!p) + return -ENOMEM; + + p->hints_queried_count = screens; + p->hint_structure_guest_size = sizeof(struct vbva_modehint); + p->rc = VERR_NOT_SUPPORTED; + + hgsmi_buffer_submit(ctx, p); + + if (p->rc < 0) { + hgsmi_buffer_free(ctx, p); + return -EIO; + } + + memcpy(hints, ((u8 *)p) + sizeof(struct vbva_query_mode_hints), size); + hgsmi_buffer_free(ctx, p); + + return 0; +} diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c new file mode 100644 index 000000000000..fb6a0f0b8167 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2013-2017 Oracle Corporation + * This file is based on ast_drv.c + * Copyright 2012 Red Hat Inc. + * Authors: Dave Airlie <airlied@redhat.com> + * Michael Thayer <michael.thayer@oracle.com, + * Hans de Goede <hdegoede@redhat.com> + */ +#include <linux/console.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/vt_kern.h> + +#include <drm/drm_crtc_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_file.h> +#include <drm/drm_ioctl.h> + +#include "vbox_drv.h" + +static int vbox_modeset = -1; + +MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); +module_param_named(modeset, vbox_modeset, int, 0400); + +static struct drm_driver driver; + +static const struct pci_device_id pciidlist[] = { + { PCI_DEVICE(0x80ee, 0xbeef) }, + { } +}; +MODULE_DEVICE_TABLE(pci, pciidlist); + +static struct drm_fb_helper_funcs vbox_fb_helper_funcs = { + .fb_probe = vboxfb_create, +}; + +static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct vbox_private *vbox; + int ret = 0; + + if (!vbox_check_supported(VBE_DISPI_ID_HGSMI)) + return -ENODEV; + + vbox = kzalloc(sizeof(*vbox), GFP_KERNEL); + if (!vbox) + return -ENOMEM; + + ret = drm_dev_init(&vbox->ddev, &driver, &pdev->dev); + if (ret) { + kfree(vbox); + return ret; + } + + vbox->ddev.pdev = pdev; + vbox->ddev.dev_private = vbox; + pci_set_drvdata(pdev, vbox); + mutex_init(&vbox->hw_mutex); + + ret = pci_enable_device(pdev); + if (ret) + goto err_dev_put; + + ret = vbox_hw_init(vbox); + if (ret) + goto err_pci_disable; + + ret = vbox_mm_init(vbox); + if (ret) + goto err_hw_fini; + + ret = vbox_mode_init(vbox); + if (ret) + goto err_mm_fini; + + ret = vbox_irq_init(vbox); + if (ret) + goto err_mode_fini; + + ret = drm_fb_helper_fbdev_setup(&vbox->ddev, &vbox->fb_helper, + &vbox_fb_helper_funcs, 32, + vbox->num_crtcs); + if (ret) + goto err_irq_fini; + + ret = drm_dev_register(&vbox->ddev, 0); + if (ret) + goto err_fbdev_fini; + + return 0; + +err_fbdev_fini: + vbox_fbdev_fini(vbox); +err_irq_fini: + vbox_irq_fini(vbox); +err_mode_fini: + vbox_mode_fini(vbox); +err_mm_fini: + vbox_mm_fini(vbox); +err_hw_fini: + vbox_hw_fini(vbox); +err_pci_disable: + pci_disable_device(pdev); +err_dev_put: + drm_dev_put(&vbox->ddev); + return ret; +} + +static void vbox_pci_remove(struct pci_dev *pdev) +{ + struct vbox_private *vbox = pci_get_drvdata(pdev); + + drm_dev_unregister(&vbox->ddev); + vbox_fbdev_fini(vbox); + vbox_irq_fini(vbox); + vbox_mode_fini(vbox); + vbox_mm_fini(vbox); + vbox_hw_fini(vbox); + drm_dev_put(&vbox->ddev); +} + +#ifdef CONFIG_PM_SLEEP +static int vbox_pm_suspend(struct device *dev) +{ + struct vbox_private *vbox = dev_get_drvdata(dev); + int error; + + error = drm_mode_config_helper_suspend(&vbox->ddev); + if (error) + return error; + + pci_save_state(vbox->ddev.pdev); + pci_disable_device(vbox->ddev.pdev); + pci_set_power_state(vbox->ddev.pdev, PCI_D3hot); + + return 0; +} + +static int vbox_pm_resume(struct device *dev) +{ + struct vbox_private *vbox = dev_get_drvdata(dev); + + if (pci_enable_device(vbox->ddev.pdev)) + return -EIO; + + return drm_mode_config_helper_resume(&vbox->ddev); +} + +static int vbox_pm_freeze(struct device *dev) +{ + struct vbox_private *vbox = dev_get_drvdata(dev); + + return drm_mode_config_helper_suspend(&vbox->ddev); +} + +static int vbox_pm_thaw(struct device *dev) +{ + struct vbox_private *vbox = dev_get_drvdata(dev); + + return drm_mode_config_helper_resume(&vbox->ddev); +} + +static int vbox_pm_poweroff(struct device *dev) +{ + struct vbox_private *vbox = dev_get_drvdata(dev); + + return drm_mode_config_helper_suspend(&vbox->ddev); +} + +static const struct dev_pm_ops vbox_pm_ops = { + .suspend = vbox_pm_suspend, + .resume = vbox_pm_resume, + .freeze = vbox_pm_freeze, + .thaw = vbox_pm_thaw, + .poweroff = vbox_pm_poweroff, + .restore = vbox_pm_resume, +}; +#endif + +static struct pci_driver vbox_pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = vbox_pci_probe, + .remove = vbox_pci_remove, +#ifdef CONFIG_PM_SLEEP + .driver.pm = &vbox_pm_ops, +#endif +}; + +static const struct file_operations vbox_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .compat_ioctl = drm_compat_ioctl, + .mmap = vbox_mmap, + .poll = drm_poll, + .read = drm_read, +}; + +static struct drm_driver driver = { + .driver_features = + DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, + + .lastclose = drm_fb_helper_lastclose, + + .fops = &vbox_fops, + .irq_handler = vbox_irq_handler, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, + + .gem_free_object_unlocked = vbox_gem_free_object, + .dumb_create = vbox_dumb_create, + .dumb_map_offset = vbox_dumb_mmap_offset, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_pin = vbox_gem_prime_pin, + .gem_prime_unpin = vbox_gem_prime_unpin, + .gem_prime_get_sg_table = vbox_gem_prime_get_sg_table, + .gem_prime_import_sg_table = vbox_gem_prime_import_sg_table, + .gem_prime_vmap = vbox_gem_prime_vmap, + .gem_prime_vunmap = vbox_gem_prime_vunmap, + .gem_prime_mmap = vbox_gem_prime_mmap, +}; + +static int __init vbox_init(void) +{ +#ifdef CONFIG_VGA_CONSOLE + if (vgacon_text_force() && vbox_modeset == -1) + return -EINVAL; +#endif + + if (vbox_modeset == 0) + return -EINVAL; + + return pci_register_driver(&vbox_pci_driver); +} + +static void __exit vbox_exit(void) +{ + pci_unregister_driver(&vbox_pci_driver); +} + +module_init(vbox_init); +module_exit(vbox_exit); + +MODULE_AUTHOR("Oracle Corporation"); +MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h new file mode 100644 index 000000000000..0ecd0a44176e --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h @@ -0,0 +1,273 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2013-2017 Oracle Corporation + * This file is based on ast_drv.h + * Copyright 2012 Red Hat Inc. + * Authors: Dave Airlie <airlied@redhat.com> + * Michael Thayer <michael.thayer@oracle.com, + * Hans de Goede <hdegoede@redhat.com> + */ +#ifndef __VBOX_DRV_H__ +#define __VBOX_DRV_H__ + +#include <linux/genalloc.h> +#include <linux/io.h> +#include <linux/irqreturn.h> +#include <linux/string.h> + +#include <drm/drm_encoder.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_gem.h> + +#include <drm/ttm/ttm_bo_api.h> +#include <drm/ttm/ttm_bo_driver.h> +#include <drm/ttm/ttm_placement.h> +#include <drm/ttm/ttm_memory.h> +#include <drm/ttm/ttm_module.h> + +#include "vboxvideo_guest.h" +#include "vboxvideo_vbe.h" +#include "hgsmi_ch_setup.h" + +#define DRIVER_NAME "vboxvideo" +#define DRIVER_DESC "Oracle VM VirtualBox Graphics Card" +#define DRIVER_DATE "20130823" + +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 0 + +#define VBOX_MAX_CURSOR_WIDTH 64 +#define VBOX_MAX_CURSOR_HEIGHT 64 +#define CURSOR_PIXEL_COUNT (VBOX_MAX_CURSOR_WIDTH * VBOX_MAX_CURSOR_HEIGHT) +#define CURSOR_DATA_SIZE (CURSOR_PIXEL_COUNT * 4 + CURSOR_PIXEL_COUNT / 8) + +#define VBOX_MAX_SCREENS 32 + +#define GUEST_HEAP_OFFSET(vbox) ((vbox)->full_vram_size - \ + VBVA_ADAPTER_INFORMATION_SIZE) +#define GUEST_HEAP_SIZE VBVA_ADAPTER_INFORMATION_SIZE +#define GUEST_HEAP_USABLE_SIZE (VBVA_ADAPTER_INFORMATION_SIZE - \ + sizeof(struct hgsmi_host_flags)) +#define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE + +struct vbox_framebuffer { + struct drm_framebuffer base; + struct drm_gem_object *obj; +}; + +struct vbox_private { + /* Must be first; or we must define our own release callback */ + struct drm_device ddev; + struct drm_fb_helper fb_helper; + struct vbox_framebuffer afb; + + u8 __iomem *guest_heap; + u8 __iomem *vbva_buffers; + struct gen_pool *guest_pool; + struct vbva_buf_ctx *vbva_info; + bool any_pitch; + u32 num_crtcs; + /* Amount of available VRAM, including space used for buffers. */ + u32 full_vram_size; + /* Amount of available VRAM, not including space used for buffers. */ + u32 available_vram_size; + /* Array of structures for receiving mode hints. */ + struct vbva_modehint *last_mode_hints; + + int fb_mtrr; + + struct { + struct ttm_bo_device bdev; + } ttm; + + struct mutex hw_mutex; /* protects modeset and accel/vbva accesses */ + struct work_struct hotplug_work; + u32 input_mapping_width; + u32 input_mapping_height; + /* + * Is user-space using an X.Org-style layout of one large frame-buffer + * encompassing all screen ones or is the fbdev console active? + */ + bool single_framebuffer; + u8 cursor_data[CURSOR_DATA_SIZE]; +}; + +#undef CURSOR_PIXEL_COUNT +#undef CURSOR_DATA_SIZE + +struct vbox_gem_object; + +struct vbox_connector { + struct drm_connector base; + char name[32]; + struct vbox_crtc *vbox_crtc; + struct { + u32 width; + u32 height; + bool disconnected; + } mode_hint; +}; + +struct vbox_crtc { + struct drm_crtc base; + bool disconnected; + unsigned int crtc_id; + u32 fb_offset; + bool cursor_enabled; + u32 x_hint; + u32 y_hint; + /* + * When setting a mode we not only pass the mode to the hypervisor, + * but also information on how to map / translate input coordinates + * for the emulated USB tablet. This input-mapping may change when + * the mode on *another* crtc changes. + * + * This means that sometimes we must do a modeset on other crtc-s then + * the one being changed to update the input-mapping. Including crtc-s + * which may be disabled inside the guest (shown as a black window + * on the host unless closed by the user). + * + * With atomic modesetting the mode-info of disabled crtcs gets zeroed + * yet we need it when updating the input-map to avoid resizing the + * window as a side effect of a mode_set on another crtc. Therefor we + * cache the info of the last mode below. + */ + u32 width; + u32 height; + u32 x; + u32 y; +}; + +struct vbox_encoder { + struct drm_encoder base; +}; + +#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base) +#define to_vbox_connector(x) container_of(x, struct vbox_connector, base) +#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base) +#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base) + +bool vbox_check_supported(u16 id); +int vbox_hw_init(struct vbox_private *vbox); +void vbox_hw_fini(struct vbox_private *vbox); + +int vbox_mode_init(struct vbox_private *vbox); +void vbox_mode_fini(struct vbox_private *vbox); + +void vbox_report_caps(struct vbox_private *vbox); + +void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb, + struct drm_clip_rect *rects, + unsigned int num_rects); + +int vbox_framebuffer_init(struct vbox_private *vbox, + struct vbox_framebuffer *vbox_fb, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj); + +int vboxfb_create(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes); +void vbox_fbdev_fini(struct vbox_private *vbox); + +struct vbox_bo { + struct ttm_buffer_object bo; + struct ttm_placement placement; + struct ttm_bo_kmap_obj kmap; + struct drm_gem_object gem; + struct ttm_place placements[3]; + int pin_count; +}; + +#define gem_to_vbox_bo(gobj) container_of((gobj), struct vbox_bo, gem) + +static inline struct vbox_bo *vbox_bo(struct ttm_buffer_object *bo) +{ + return container_of(bo, struct vbox_bo, bo); +} + +#define to_vbox_obj(x) container_of(x, struct vbox_gem_object, base) + +static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo) +{ + return bo->bo.offset; +} + +int vbox_dumb_create(struct drm_file *file, + struct drm_device *dev, + struct drm_mode_create_dumb *args); + +void vbox_gem_free_object(struct drm_gem_object *obj); +int vbox_dumb_mmap_offset(struct drm_file *file, + struct drm_device *dev, + u32 handle, u64 *offset); + +#define DRM_FILE_PAGE_OFFSET (0x10000000ULL >> PAGE_SHIFT) + +int vbox_mm_init(struct vbox_private *vbox); +void vbox_mm_fini(struct vbox_private *vbox); + +int vbox_bo_create(struct vbox_private *vbox, int size, int align, + u32 flags, struct vbox_bo **pvboxbo); + +int vbox_gem_create(struct vbox_private *vbox, + u32 size, bool iskernel, struct drm_gem_object **obj); + +int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag); +int vbox_bo_unpin(struct vbox_bo *bo); + +static inline int vbox_bo_reserve(struct vbox_bo *bo, bool no_wait) +{ + int ret; + + ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL); + if (ret) { + if (ret != -ERESTARTSYS && ret != -EBUSY) + DRM_ERROR("reserve failed %p\n", bo); + return ret; + } + return 0; +} + +static inline void vbox_bo_unreserve(struct vbox_bo *bo) +{ + ttm_bo_unreserve(&bo->bo); +} + +void vbox_ttm_placement(struct vbox_bo *bo, int domain); +int vbox_bo_push_sysram(struct vbox_bo *bo); +int vbox_mmap(struct file *filp, struct vm_area_struct *vma); +void *vbox_bo_kmap(struct vbox_bo *bo); +void vbox_bo_kunmap(struct vbox_bo *bo); + +/* vbox_prime.c */ +int vbox_gem_prime_pin(struct drm_gem_object *obj); +void vbox_gem_prime_unpin(struct drm_gem_object *obj); +struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object *vbox_gem_prime_import_sg_table( + struct drm_device *dev, struct dma_buf_attachment *attach, + struct sg_table *table); +void *vbox_gem_prime_vmap(struct drm_gem_object *obj); +void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +int vbox_gem_prime_mmap(struct drm_gem_object *obj, + struct vm_area_struct *area); + +/* vbox_irq.c */ +int vbox_irq_init(struct vbox_private *vbox); +void vbox_irq_fini(struct vbox_private *vbox); +void vbox_report_hotplug(struct vbox_private *vbox); +irqreturn_t vbox_irq_handler(int irq, void *arg); + +/* vbox_hgsmi.c */ +void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size, + u8 channel, u16 channel_info); +void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf); +int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf); + +static inline void vbox_write_ioport(u16 index, u16 data) +{ + outw(index, VBE_DISPI_IOPORT_INDEX); + outw(data, VBE_DISPI_IOPORT_DATA); +} + +#endif diff --git a/drivers/gpu/drm/vboxvideo/vbox_fb.c b/drivers/gpu/drm/vboxvideo/vbox_fb.c new file mode 100644 index 000000000000..83a04afd1766 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbox_fb.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2013-2017 Oracle Corporation + * This file is based on ast_fb.c + * Copyright 2012 Red Hat Inc. + * Authors: Dave Airlie <airlied@redhat.com> + * Michael Thayer <michael.thayer@oracle.com, + */ +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/fb.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/string.h> +#include <linux/sysrq.h> +#include <linux/tty.h> + +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> + +#include "vbox_drv.h" +#include "vboxvideo.h" + +#ifdef CONFIG_DRM_KMS_FB_HELPER +static struct fb_deferred_io vbox_defio = { + .delay = HZ / 30, + .deferred_io = drm_fb_helper_deferred_io, +}; +#endif + +static struct fb_ops vboxfb_ops = { + .owner = THIS_MODULE, + DRM_FB_HELPER_DEFAULT_OPS, + .fb_fillrect = drm_fb_helper_sys_fillrect, + .fb_copyarea = drm_fb_helper_sys_copyarea, + .fb_imageblit = drm_fb_helper_sys_imageblit, +}; + +int vboxfb_create(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct vbox_private *vbox = + container_of(helper, struct vbox_private, fb_helper); + struct pci_dev *pdev = vbox->ddev.pdev; + struct drm_mode_fb_cmd2 mode_cmd; + struct drm_framebuffer *fb; + struct fb_info *info; + struct drm_gem_object *gobj; + struct vbox_bo *bo; + int size, ret; + u64 gpu_addr; + u32 pitch; + + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8); + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, + sizes->surface_depth); + mode_cmd.pitches[0] = pitch; + + size = pitch * mode_cmd.height; + + ret = vbox_gem_create(vbox, size, true, &gobj); + if (ret) { + DRM_ERROR("failed to create fbcon backing object %d\n", ret); + return ret; + } + + ret = vbox_framebuffer_init(vbox, &vbox->afb, &mode_cmd, gobj); + if (ret) + return ret; + + bo = gem_to_vbox_bo(gobj); + + ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM); + if (ret) + return ret; + + info = drm_fb_helper_alloc_fbi(helper); + if (IS_ERR(info)) + return PTR_ERR(info); + + info->screen_size = size; + info->screen_base = (char __iomem *)vbox_bo_kmap(bo); + if (IS_ERR(info->screen_base)) + return PTR_ERR(info->screen_base); + + info->par = helper; + + fb = &vbox->afb.base; + helper->fb = fb; + + strcpy(info->fix.id, "vboxdrmfb"); + + info->fbops = &vboxfb_ops; + + /* + * This seems to be done for safety checking that the framebuffer + * is not registered twice by different drivers. + */ + info->apertures->ranges[0].base = pci_resource_start(pdev, 0); + info->apertures->ranges[0].size = pci_resource_len(pdev, 0); + + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); + drm_fb_helper_fill_var(info, helper, sizes->fb_width, + sizes->fb_height); + + gpu_addr = vbox_bo_gpu_offset(bo); + info->fix.smem_start = info->apertures->ranges[0].base + gpu_addr; + info->fix.smem_len = vbox->available_vram_size - gpu_addr; + +#ifdef CONFIG_DRM_KMS_FB_HELPER + info->fbdefio = &vbox_defio; + fb_deferred_io_init(info); +#endif + + info->pixmap.flags = FB_PIXMAP_SYSTEM; + + DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height); + + return 0; +} + +void vbox_fbdev_fini(struct vbox_private *vbox) +{ + struct vbox_framebuffer *afb = &vbox->afb; + +#ifdef CONFIG_DRM_KMS_FB_HELPER + if (vbox->fb_helper.fbdev && vbox->fb_helper.fbdev->fbdefio) + fb_deferred_io_cleanup(vbox->fb_helper.fbdev); +#endif + + drm_fb_helper_unregister_fbi(&vbox->fb_helper); + + if (afb->obj) { + struct vbox_bo *bo = gem_to_vbox_bo(afb->obj); + + vbox_bo_kunmap(bo); + + if (bo->pin_count) + vbox_bo_unpin(bo); + + drm_gem_object_put_unlocked(afb->obj); + afb->obj = NULL; + } + drm_fb_helper_fini(&vbox->fb_helper); + + drm_framebuffer_unregister_private(&afb->base); + drm_framebuffer_cleanup(&afb->base); +} diff --git a/drivers/gpu/drm/vboxvideo/vbox_hgsmi.c b/drivers/gpu/drm/vboxvideo/vbox_hgsmi.c new file mode 100644 index 000000000000..94b60654a012 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbox_hgsmi.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2017 Oracle Corporation + * Authors: Hans de Goede <hdegoede@redhat.com> + */ + +#include "vbox_drv.h" +#include "vboxvideo_vbe.h" +#include "hgsmi_defs.h" + +/* One-at-a-Time Hash from http://www.burtleburtle.net/bob/hash/doobs.html */ +static u32 hgsmi_hash_process(u32 hash, const u8 *data, int size) +{ + while (size--) { + hash += *data++; + hash += (hash << 10); + hash ^= (hash >> 6); + } + + return hash; +} + +static u32 hgsmi_hash_end(u32 hash) +{ + hash += (hash << 3); + hash ^= (hash >> 11); + hash += (hash << 15); + + return hash; +} + +/* Not really a checksum but that is the naming used in all vbox code */ +static u32 hgsmi_checksum(u32 offset, + const struct hgsmi_buffer_header *header, + const struct hgsmi_buffer_tail *tail) +{ + u32 checksum; + + checksum = hgsmi_hash_process(0, (u8 *)&offset, sizeof(offset)); + checksum = hgsmi_hash_process(checksum, (u8 *)header, sizeof(*header)); + /* 4 -> Do not checksum the checksum itself */ + checksum = hgsmi_hash_process(checksum, (u8 *)tail, 4); + + return hgsmi_hash_end(checksum); +} + +void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size, + u8 channel, u16 channel_info) +{ + struct hgsmi_buffer_header *h; + struct hgsmi_buffer_tail *t; + size_t total_size; + dma_addr_t offset; + + total_size = size + sizeof(*h) + sizeof(*t); + h = gen_pool_dma_alloc(guest_pool, total_size, &offset); + if (!h) + return NULL; + + t = (struct hgsmi_buffer_tail *)((u8 *)h + sizeof(*h) + size); + + h->flags = HGSMI_BUFFER_HEADER_F_SEQ_SINGLE; + h->data_size = size; + h->channel = channel; + h->channel_info = channel_info; + memset(&h->u.header_data, 0, sizeof(h->u.header_data)); + + t->reserved = 0; + t->checksum = hgsmi_checksum(offset, h, t); + + return (u8 *)h + sizeof(*h); +} + +void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf) +{ + struct hgsmi_buffer_header *h = + (struct hgsmi_buffer_header *)((u8 *)buf - sizeof(*h)); + size_t total_size = h->data_size + sizeof(*h) + + sizeof(struct hgsmi_buffer_tail); + + gen_pool_free(guest_pool, (unsigned long)h, total_size); +} + +int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf) +{ + phys_addr_t offset; + + offset = gen_pool_virt_to_phys(guest_pool, (unsigned long)buf - + sizeof(struct hgsmi_buffer_header)); + outl(offset, VGA_PORT_HGSMI_GUEST); + /* Make the compiler aware that the host has changed memory. */ + mb(); + + return 0; +} diff --git a/drivers/gpu/drm/vboxvideo/vbox_irq.c b/drivers/gpu/drm/vboxvideo/vbox_irq.c new file mode 100644 index 000000000000..16a1e29f5292 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbox_irq.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2016-2017 Oracle Corporation + * This file is based on qxl_irq.c + * Copyright 2013 Red Hat Inc. + * Authors: Dave Airlie + * Alon Levy + * Michael Thayer <michael.thayer@oracle.com, + * Hans de Goede <hdegoede@redhat.com> + */ + +#include <linux/pci.h> +#include <drm/drm_irq.h> +#include <drm/drm_probe_helper.h> + +#include "vbox_drv.h" +#include "vboxvideo.h" + +static void vbox_clear_irq(void) +{ + outl((u32)~0, VGA_PORT_HGSMI_HOST); +} + +static u32 vbox_get_flags(struct vbox_private *vbox) +{ + return readl(vbox->guest_heap + HOST_FLAGS_OFFSET); +} + +void vbox_report_hotplug(struct vbox_private *vbox) +{ + schedule_work(&vbox->hotplug_work); +} + +irqreturn_t vbox_irq_handler(int irq, void *arg) +{ + struct drm_device *dev = (struct drm_device *)arg; + struct vbox_private *vbox = (struct vbox_private *)dev->dev_private; + u32 host_flags = vbox_get_flags(vbox); + + if (!(host_flags & HGSMIHOSTFLAGS_IRQ)) + return IRQ_NONE; + + /* + * Due to a bug in the initial host implementation of hot-plug irqs, + * the hot-plug and cursor capability flags were never cleared. + * Fortunately we can tell when they would have been set by checking + * that the VSYNC flag is not set. + */ + if (host_flags & + (HGSMIHOSTFLAGS_HOTPLUG | HGSMIHOSTFLAGS_CURSOR_CAPABILITIES) && + !(host_flags & HGSMIHOSTFLAGS_VSYNC)) + vbox_report_hotplug(vbox); + + vbox_clear_irq(); + + return IRQ_HANDLED; +} + +/* + * Check that the position hints provided by the host are suitable for GNOME + * shell (i.e. all screens disjoint and hints for all enabled screens) and if + * not replace them with default ones. Providing valid hints improves the + * chances that we will get a known screen layout for pointer mapping. + */ +static void validate_or_set_position_hints(struct vbox_private *vbox) +{ + struct vbva_modehint *hintsi, *hintsj; + bool valid = true; + u16 currentx = 0; + int i, j; + + for (i = 0; i < vbox->num_crtcs; ++i) { + for (j = 0; j < i; ++j) { + hintsi = &vbox->last_mode_hints[i]; + hintsj = &vbox->last_mode_hints[j]; + + if (hintsi->enabled && hintsj->enabled) { + if (hintsi->dx >= 0xffff || + hintsi->dy >= 0xffff || + hintsj->dx >= 0xffff || + hintsj->dy >= 0xffff || + (hintsi->dx < + hintsj->dx + (hintsj->cx & 0x8fff) && + hintsi->dx + (hintsi->cx & 0x8fff) > + hintsj->dx) || + (hintsi->dy < + hintsj->dy + (hintsj->cy & 0x8fff) && + hintsi->dy + (hintsi->cy & 0x8fff) > + hintsj->dy)) + valid = false; + } + } + } + if (!valid) + for (i = 0; i < vbox->num_crtcs; ++i) { + if (vbox->last_mode_hints[i].enabled) { + vbox->last_mode_hints[i].dx = currentx; + vbox->last_mode_hints[i].dy = 0; + currentx += + vbox->last_mode_hints[i].cx & 0x8fff; + } + } +} + +/* Query the host for the most recent video mode hints. */ +static void vbox_update_mode_hints(struct vbox_private *vbox) +{ + struct drm_connector_list_iter conn_iter; + struct drm_device *dev = &vbox->ddev; + struct drm_connector *connector; + struct vbox_connector *vbox_conn; + struct vbva_modehint *hints; + u16 flags; + bool disconnected; + unsigned int crtc_id; + int ret; + + ret = hgsmi_get_mode_hints(vbox->guest_pool, vbox->num_crtcs, + vbox->last_mode_hints); + if (ret) { + DRM_ERROR("vboxvideo: hgsmi_get_mode_hints failed: %d\n", ret); + return; + } + + validate_or_set_position_hints(vbox); + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + vbox_conn = to_vbox_connector(connector); + + hints = &vbox->last_mode_hints[vbox_conn->vbox_crtc->crtc_id]; + if (hints->magic != VBVAMODEHINT_MAGIC) + continue; + + disconnected = !(hints->enabled); + crtc_id = vbox_conn->vbox_crtc->crtc_id; + vbox_conn->mode_hint.width = hints->cx; + vbox_conn->mode_hint.height = hints->cy; + vbox_conn->vbox_crtc->x_hint = hints->dx; + vbox_conn->vbox_crtc->y_hint = hints->dy; + vbox_conn->mode_hint.disconnected = disconnected; + + if (vbox_conn->vbox_crtc->disconnected == disconnected) + continue; + + if (disconnected) + flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED; + else + flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_BLANK; + + hgsmi_process_display_info(vbox->guest_pool, crtc_id, 0, 0, 0, + hints->cx * 4, hints->cx, + hints->cy, 0, flags); + + vbox_conn->vbox_crtc->disconnected = disconnected; + } + drm_connector_list_iter_end(&conn_iter); + drm_modeset_unlock(&dev->mode_config.connection_mutex); +} + +static void vbox_hotplug_worker(struct work_struct *work) +{ + struct vbox_private *vbox = container_of(work, struct vbox_private, + hotplug_work); + + vbox_update_mode_hints(vbox); + drm_kms_helper_hotplug_event(&vbox->ddev); +} + +int vbox_irq_init(struct vbox_private *vbox) +{ + INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker); + vbox_update_mode_hints(vbox); + + return drm_irq_install(&vbox->ddev, vbox->ddev.pdev->irq); +} + +void vbox_irq_fini(struct vbox_private *vbox) +{ + drm_irq_uninstall(&vbox->ddev); + flush_work(&vbox->hotplug_work); +} diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c new file mode 100644 index 000000000000..f4d02de5518a --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbox_main.c @@ -0,0 +1,361 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2013-2017 Oracle Corporation + * This file is based on ast_main.c + * Copyright 2012 Red Hat Inc. + * Authors: Dave Airlie <airlied@redhat.com>, + * Michael Thayer <michael.thayer@oracle.com, + * Hans de Goede <hdegoede@redhat.com> + */ + +#include <linux/vbox_err.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_crtc_helper.h> + +#include "vbox_drv.h" +#include "vboxvideo_guest.h" +#include "vboxvideo_vbe.h" + +static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb) +{ + struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb); + + if (vbox_fb->obj) + drm_gem_object_put_unlocked(vbox_fb->obj); + + drm_framebuffer_cleanup(fb); + kfree(fb); +} + +void vbox_report_caps(struct vbox_private *vbox) +{ + u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION | + VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY; + + /* The host only accepts VIDEO_MODE_HINTS if it is send separately. */ + hgsmi_send_caps_info(vbox->guest_pool, caps); + caps |= VBVACAPS_VIDEO_MODE_HINTS; + hgsmi_send_caps_info(vbox->guest_pool, caps); +} + +/* Send information about dirty rectangles to VBVA. */ +void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb, + struct drm_clip_rect *rects, + unsigned int num_rects) +{ + struct vbox_private *vbox = fb->dev->dev_private; + struct drm_display_mode *mode; + struct drm_crtc *crtc; + int crtc_x, crtc_y; + unsigned int i; + + mutex_lock(&vbox->hw_mutex); + list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) { + if (crtc->primary->state->fb != fb) + continue; + + mode = &crtc->state->mode; + crtc_x = crtc->primary->state->src_x >> 16; + crtc_y = crtc->primary->state->src_y >> 16; + + for (i = 0; i < num_rects; ++i) { + struct vbva_cmd_hdr cmd_hdr; + unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id; + + if (rects[i].x1 > crtc_x + mode->hdisplay || + rects[i].y1 > crtc_y + mode->vdisplay || + rects[i].x2 < crtc_x || + rects[i].y2 < crtc_y) + continue; + + cmd_hdr.x = (s16)rects[i].x1; + cmd_hdr.y = (s16)rects[i].y1; + cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1; + cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1; + + if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id], + vbox->guest_pool)) + continue; + + vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool, + &cmd_hdr, sizeof(cmd_hdr)); + vbva_buffer_end_update(&vbox->vbva_info[crtc_id]); + } + } + mutex_unlock(&vbox->hw_mutex); +} + +static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int flags, unsigned int color, + struct drm_clip_rect *rects, + unsigned int num_rects) +{ + vbox_framebuffer_dirty_rectangles(fb, rects, num_rects); + + return 0; +} + +static const struct drm_framebuffer_funcs vbox_fb_funcs = { + .destroy = vbox_user_framebuffer_destroy, + .dirty = vbox_user_framebuffer_dirty, +}; + +int vbox_framebuffer_init(struct vbox_private *vbox, + struct vbox_framebuffer *vbox_fb, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj) +{ + int ret; + + drm_helper_mode_fill_fb_struct(&vbox->ddev, &vbox_fb->base, mode_cmd); + vbox_fb->obj = obj; + ret = drm_framebuffer_init(&vbox->ddev, &vbox_fb->base, &vbox_fb_funcs); + if (ret) { + DRM_ERROR("framebuffer init failed %d\n", ret); + return ret; + } + + return 0; +} + +static int vbox_accel_init(struct vbox_private *vbox) +{ + struct vbva_buffer *vbva; + unsigned int i; + + vbox->vbva_info = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs, + sizeof(*vbox->vbva_info), GFP_KERNEL); + if (!vbox->vbva_info) + return -ENOMEM; + + /* Take a command buffer for each screen from the end of usable VRAM. */ + vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE; + + vbox->vbva_buffers = pci_iomap_range(vbox->ddev.pdev, 0, + vbox->available_vram_size, + vbox->num_crtcs * + VBVA_MIN_BUFFER_SIZE); + if (!vbox->vbva_buffers) + return -ENOMEM; + + for (i = 0; i < vbox->num_crtcs; ++i) { + vbva_setup_buffer_context(&vbox->vbva_info[i], + vbox->available_vram_size + + i * VBVA_MIN_BUFFER_SIZE, + VBVA_MIN_BUFFER_SIZE); + vbva = (void __force *)vbox->vbva_buffers + + i * VBVA_MIN_BUFFER_SIZE; + if (!vbva_enable(&vbox->vbva_info[i], + vbox->guest_pool, vbva, i)) { + /* very old host or driver error. */ + DRM_ERROR("vboxvideo: vbva_enable failed\n"); + } + } + + return 0; +} + +static void vbox_accel_fini(struct vbox_private *vbox) +{ + unsigned int i; + + for (i = 0; i < vbox->num_crtcs; ++i) + vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i); + + pci_iounmap(vbox->ddev.pdev, vbox->vbva_buffers); +} + +/* Do we support the 4.3 plus mode hint reporting interface? */ +static bool have_hgsmi_mode_hints(struct vbox_private *vbox) +{ + u32 have_hints, have_cursor; + int ret; + + ret = hgsmi_query_conf(vbox->guest_pool, + VBOX_VBVA_CONF32_MODE_HINT_REPORTING, + &have_hints); + if (ret) + return false; + + ret = hgsmi_query_conf(vbox->guest_pool, + VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING, + &have_cursor); + if (ret) + return false; + + return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS; +} + +bool vbox_check_supported(u16 id) +{ + u16 dispi_id; + + vbox_write_ioport(VBE_DISPI_INDEX_ID, id); + dispi_id = inw(VBE_DISPI_IOPORT_DATA); + + return dispi_id == id; +} + +int vbox_hw_init(struct vbox_private *vbox) +{ + int ret = -ENOMEM; + + vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA); + vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX); + + DRM_INFO("VRAM %08x\n", vbox->full_vram_size); + + /* Map guest-heap at end of vram */ + vbox->guest_heap = + pci_iomap_range(vbox->ddev.pdev, 0, GUEST_HEAP_OFFSET(vbox), + GUEST_HEAP_SIZE); + if (!vbox->guest_heap) + return -ENOMEM; + + /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */ + vbox->guest_pool = gen_pool_create(4, -1); + if (!vbox->guest_pool) + goto err_unmap_guest_heap; + + ret = gen_pool_add_virt(vbox->guest_pool, + (unsigned long)vbox->guest_heap, + GUEST_HEAP_OFFSET(vbox), + GUEST_HEAP_USABLE_SIZE, -1); + if (ret) + goto err_destroy_guest_pool; + + ret = hgsmi_test_query_conf(vbox->guest_pool); + if (ret) { + DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n"); + goto err_destroy_guest_pool; + } + + /* Reduce available VRAM size to reflect the guest heap. */ + vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox); + /* Linux drm represents monitors as a 32-bit array. */ + hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT, + &vbox->num_crtcs); + vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS); + + if (!have_hgsmi_mode_hints(vbox)) { + ret = -ENOTSUPP; + goto err_destroy_guest_pool; + } + + vbox->last_mode_hints = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs, + sizeof(struct vbva_modehint), + GFP_KERNEL); + if (!vbox->last_mode_hints) { + ret = -ENOMEM; + goto err_destroy_guest_pool; + } + + ret = vbox_accel_init(vbox); + if (ret) + goto err_destroy_guest_pool; + + return 0; + +err_destroy_guest_pool: + gen_pool_destroy(vbox->guest_pool); +err_unmap_guest_heap: + pci_iounmap(vbox->ddev.pdev, vbox->guest_heap); + return ret; +} + +void vbox_hw_fini(struct vbox_private *vbox) +{ + vbox_accel_fini(vbox); + gen_pool_destroy(vbox->guest_pool); + pci_iounmap(vbox->ddev.pdev, vbox->guest_heap); +} + +int vbox_gem_create(struct vbox_private *vbox, + u32 size, bool iskernel, struct drm_gem_object **obj) +{ + struct vbox_bo *vboxbo; + int ret; + + *obj = NULL; + + size = roundup(size, PAGE_SIZE); + if (size == 0) + return -EINVAL; + + ret = vbox_bo_create(vbox, size, 0, 0, &vboxbo); + if (ret) { + if (ret != -ERESTARTSYS) + DRM_ERROR("failed to allocate GEM object\n"); + return ret; + } + + *obj = &vboxbo->gem; + + return 0; +} + +int vbox_dumb_create(struct drm_file *file, + struct drm_device *dev, struct drm_mode_create_dumb *args) +{ + struct vbox_private *vbox = + container_of(dev, struct vbox_private, ddev); + struct drm_gem_object *gobj; + u32 handle; + int ret; + + args->pitch = args->width * ((args->bpp + 7) / 8); + args->size = args->pitch * args->height; + + ret = vbox_gem_create(vbox, args->size, false, &gobj); + if (ret) + return ret; + + ret = drm_gem_handle_create(file, gobj, &handle); + drm_gem_object_put_unlocked(gobj); + if (ret) + return ret; + + args->handle = handle; + + return 0; +} + +void vbox_gem_free_object(struct drm_gem_object *obj) +{ + struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj); + + ttm_bo_put(&vbox_bo->bo); +} + +static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo) +{ + return drm_vma_node_offset_addr(&bo->bo.vma_node); +} + +int +vbox_dumb_mmap_offset(struct drm_file *file, + struct drm_device *dev, + u32 handle, u64 *offset) +{ + struct drm_gem_object *obj; + int ret; + struct vbox_bo *bo; + + mutex_lock(&dev->struct_mutex); + obj = drm_gem_object_lookup(file, handle); + if (!obj) { + ret = -ENOENT; + goto out_unlock; + } + + bo = gem_to_vbox_bo(obj); + *offset = vbox_bo_mmap_offset(bo); + + drm_gem_object_put(obj); + ret = 0; + +out_unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c new file mode 100644 index 000000000000..620a6e38f71f --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c @@ -0,0 +1,940 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2013-2017 Oracle Corporation + * This file is based on ast_mode.c + * Copyright 2012 Red Hat Inc. + * Parts based on xf86-video-ast + * Copyright (c) 2005 ASPEED Technology Inc. + * Authors: Dave Airlie <airlied@redhat.com> + * Michael Thayer <michael.thayer@oracle.com, + * Hans de Goede <hdegoede@redhat.com> + */ +#include <linux/export.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_plane_helper.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include "hgsmi_channels.h" +#include "vbox_drv.h" +#include "vboxvideo.h" + +/* + * Set a graphics mode. Poke any required values into registers, do an HGSMI + * mode set and tell the host we support advanced graphics functions. + */ +static void vbox_do_modeset(struct drm_crtc *crtc) +{ + struct drm_framebuffer *fb = crtc->primary->state->fb; + struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); + struct vbox_private *vbox; + int width, height, bpp, pitch; + u16 flags; + s32 x_offset, y_offset; + + vbox = crtc->dev->dev_private; + width = vbox_crtc->width ? vbox_crtc->width : 640; + height = vbox_crtc->height ? vbox_crtc->height : 480; + bpp = fb ? fb->format->cpp[0] * 8 : 32; + pitch = fb ? fb->pitches[0] : width * bpp / 8; + x_offset = vbox->single_framebuffer ? vbox_crtc->x : vbox_crtc->x_hint; + y_offset = vbox->single_framebuffer ? vbox_crtc->y : vbox_crtc->y_hint; + + /* + * This is the old way of setting graphics modes. It assumed one screen + * and a frame-buffer at the start of video RAM. On older versions of + * VirtualBox, certain parts of the code still assume that the first + * screen is programmed this way, so try to fake it. + */ + if (vbox_crtc->crtc_id == 0 && fb && + vbox_crtc->fb_offset / pitch < 0xffff - crtc->y && + vbox_crtc->fb_offset % (bpp / 8) == 0) { + vbox_write_ioport(VBE_DISPI_INDEX_XRES, width); + vbox_write_ioport(VBE_DISPI_INDEX_YRES, height); + vbox_write_ioport(VBE_DISPI_INDEX_VIRT_WIDTH, pitch * 8 / bpp); + vbox_write_ioport(VBE_DISPI_INDEX_BPP, bpp); + vbox_write_ioport(VBE_DISPI_INDEX_ENABLE, VBE_DISPI_ENABLED); + vbox_write_ioport( + VBE_DISPI_INDEX_X_OFFSET, + vbox_crtc->fb_offset % pitch / bpp * 8 + vbox_crtc->x); + vbox_write_ioport(VBE_DISPI_INDEX_Y_OFFSET, + vbox_crtc->fb_offset / pitch + vbox_crtc->y); + } + + flags = VBVA_SCREEN_F_ACTIVE; + flags |= (fb && crtc->state->enable) ? 0 : VBVA_SCREEN_F_BLANK; + flags |= vbox_crtc->disconnected ? VBVA_SCREEN_F_DISABLED : 0; + hgsmi_process_display_info(vbox->guest_pool, vbox_crtc->crtc_id, + x_offset, y_offset, + vbox_crtc->x * bpp / 8 + + vbox_crtc->y * pitch, + pitch, width, height, bpp, flags); +} + +static int vbox_set_view(struct drm_crtc *crtc) +{ + struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); + struct vbox_private *vbox = crtc->dev->dev_private; + struct vbva_infoview *p; + + /* + * Tell the host about the view. This design originally targeted the + * Windows XP driver architecture and assumed that each screen would + * have a dedicated frame buffer with the command buffer following it, + * the whole being a "view". The host works out which screen a command + * buffer belongs to by checking whether it is in the first view, then + * whether it is in the second and so on. The first match wins. We + * cheat around this by making the first view be the managed memory + * plus the first command buffer, the second the same plus the second + * buffer and so on. + */ + p = hgsmi_buffer_alloc(vbox->guest_pool, sizeof(*p), + HGSMI_CH_VBVA, VBVA_INFO_VIEW); + if (!p) + return -ENOMEM; + + p->view_index = vbox_crtc->crtc_id; + p->view_offset = vbox_crtc->fb_offset; + p->view_size = vbox->available_vram_size - vbox_crtc->fb_offset + + vbox_crtc->crtc_id * VBVA_MIN_BUFFER_SIZE; + p->max_screen_size = vbox->available_vram_size - vbox_crtc->fb_offset; + + hgsmi_buffer_submit(vbox->guest_pool, p); + hgsmi_buffer_free(vbox->guest_pool, p); + + return 0; +} + +/* + * Try to map the layout of virtual screens to the range of the input device. + * Return true if we need to re-set the crtc modes due to screen offset + * changes. + */ +static bool vbox_set_up_input_mapping(struct vbox_private *vbox) +{ + struct drm_crtc *crtci; + struct drm_connector *connectori; + struct drm_framebuffer *fb, *fb1 = NULL; + bool single_framebuffer = true; + bool old_single_framebuffer = vbox->single_framebuffer; + u16 width = 0, height = 0; + + /* + * Are we using an X.Org-style single large frame-buffer for all crtcs? + * If so then screen layout can be deduced from the crtc offsets. + * Same fall-back if this is the fbdev frame-buffer. + */ + list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list, head) { + fb = crtci->primary->state->fb; + if (!fb) + continue; + + if (!fb1) { + fb1 = fb; + if (to_vbox_framebuffer(fb1) == &vbox->afb) + break; + } else if (fb != fb1) { + single_framebuffer = false; + } + } + if (!fb1) + return false; + + if (single_framebuffer) { + vbox->single_framebuffer = true; + vbox->input_mapping_width = fb1->width; + vbox->input_mapping_height = fb1->height; + return old_single_framebuffer != vbox->single_framebuffer; + } + /* Otherwise calculate the total span of all screens. */ + list_for_each_entry(connectori, &vbox->ddev.mode_config.connector_list, + head) { + struct vbox_connector *vbox_connector = + to_vbox_connector(connectori); + struct vbox_crtc *vbox_crtc = vbox_connector->vbox_crtc; + + width = max_t(u16, width, vbox_crtc->x_hint + + vbox_connector->mode_hint.width); + height = max_t(u16, height, vbox_crtc->y_hint + + vbox_connector->mode_hint.height); + } + + vbox->single_framebuffer = false; + vbox->input_mapping_width = width; + vbox->input_mapping_height = height; + + return old_single_framebuffer != vbox->single_framebuffer; +} + +static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y) +{ + struct vbox_bo *bo = gem_to_vbox_bo(to_vbox_framebuffer(fb)->obj); + struct vbox_private *vbox = crtc->dev->dev_private; + struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); + bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state); + + mutex_lock(&vbox->hw_mutex); + + if (crtc->state->enable) { + vbox_crtc->width = crtc->state->mode.hdisplay; + vbox_crtc->height = crtc->state->mode.vdisplay; + } + + vbox_crtc->x = x; + vbox_crtc->y = y; + vbox_crtc->fb_offset = vbox_bo_gpu_offset(bo); + + /* vbox_do_modeset() checks vbox->single_framebuffer so update it now */ + if (needs_modeset && vbox_set_up_input_mapping(vbox)) { + struct drm_crtc *crtci; + + list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list, + head) { + if (crtci == crtc) + continue; + vbox_do_modeset(crtci); + } + } + + vbox_set_view(crtc); + vbox_do_modeset(crtc); + + if (needs_modeset) + hgsmi_update_input_mapping(vbox->guest_pool, 0, 0, + vbox->input_mapping_width, + vbox->input_mapping_height); + + mutex_unlock(&vbox->hw_mutex); +} + +static void vbox_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ +} + +static void vbox_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ +} + +static void vbox_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct drm_pending_vblank_event *event; + unsigned long flags; + + if (crtc->state && crtc->state->event) { + event = crtc->state->event; + crtc->state->event = NULL; + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + } +} + +static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = { + .atomic_enable = vbox_crtc_atomic_enable, + .atomic_disable = vbox_crtc_atomic_disable, + .atomic_flush = vbox_crtc_atomic_flush, +}; + +static void vbox_crtc_destroy(struct drm_crtc *crtc) +{ + drm_crtc_cleanup(crtc); + kfree(crtc); +} + +static const struct drm_crtc_funcs vbox_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + /* .gamma_set = vbox_crtc_gamma_set, */ + .destroy = vbox_crtc_destroy, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +static int vbox_primary_atomic_check(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct drm_crtc_state *crtc_state = NULL; + + if (new_state->crtc) { + crtc_state = drm_atomic_get_existing_crtc_state( + new_state->state, new_state->crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + } + + return drm_atomic_helper_check_plane_state(new_state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, true); +} + +static void vbox_primary_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_crtc *crtc = plane->state->crtc; + struct drm_framebuffer *fb = plane->state->fb; + + vbox_crtc_set_base_and_mode(crtc, fb, + plane->state->src_x >> 16, + plane->state->src_y >> 16); +} + +static void vbox_primary_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_crtc *crtc = old_state->crtc; + + /* vbox_do_modeset checks plane->state->fb and will disable if NULL */ + vbox_crtc_set_base_and_mode(crtc, old_state->fb, + old_state->src_x >> 16, + old_state->src_y >> 16); +} + +static int vbox_primary_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct vbox_bo *bo; + int ret; + + if (!new_state->fb) + return 0; + + bo = gem_to_vbox_bo(to_vbox_framebuffer(new_state->fb)->obj); + ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM); + if (ret) + DRM_WARN("Error %d pinning new fb, out of video mem?\n", ret); + + return ret; +} + +static void vbox_primary_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct vbox_bo *bo; + + if (!old_state->fb) + return; + + bo = gem_to_vbox_bo(to_vbox_framebuffer(old_state->fb)->obj); + vbox_bo_unpin(bo); +} + +static int vbox_cursor_atomic_check(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct drm_crtc_state *crtc_state = NULL; + u32 width = new_state->crtc_w; + u32 height = new_state->crtc_h; + int ret; + + if (new_state->crtc) { + crtc_state = drm_atomic_get_existing_crtc_state( + new_state->state, new_state->crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + } + + ret = drm_atomic_helper_check_plane_state(new_state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + true, true); + if (ret) + return ret; + + if (!new_state->fb) + return 0; + + if (width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT || + width == 0 || height == 0) + return -EINVAL; + + return 0; +} + +/* + * Copy the ARGB image and generate the mask, which is needed in case the host + * does not support ARGB cursors. The mask is a 1BPP bitmap with the bit set + * if the corresponding alpha value in the ARGB image is greater than 0xF0. + */ +static void copy_cursor_image(u8 *src, u8 *dst, u32 width, u32 height, + size_t mask_size) +{ + size_t line_size = (width + 7) / 8; + u32 i, j; + + memcpy(dst + mask_size, src, width * height * 4); + for (i = 0; i < height; ++i) + for (j = 0; j < width; ++j) + if (((u32 *)src)[i * width + j] > 0xf0000000) + dst[i * line_size + j / 8] |= (0x80 >> (j % 8)); +} + +static void vbox_cursor_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct vbox_private *vbox = + container_of(plane->dev, struct vbox_private, ddev); + struct vbox_crtc *vbox_crtc = to_vbox_crtc(plane->state->crtc); + struct drm_framebuffer *fb = plane->state->fb; + struct vbox_bo *bo = gem_to_vbox_bo(to_vbox_framebuffer(fb)->obj); + u32 width = plane->state->crtc_w; + u32 height = plane->state->crtc_h; + size_t data_size, mask_size; + u32 flags; + u8 *src; + + /* + * VirtualBox uses the host windowing system to draw the cursor so + * moves are a no-op, we only need to upload new cursor sprites. + */ + if (fb == old_state->fb) + return; + + mutex_lock(&vbox->hw_mutex); + + vbox_crtc->cursor_enabled = true; + + /* pinning is done in prepare/cleanup framebuffer */ + src = vbox_bo_kmap(bo); + if (IS_ERR(src)) { + mutex_unlock(&vbox->hw_mutex); + DRM_WARN("Could not kmap cursor bo, skipping update\n"); + return; + } + + /* + * The mask must be calculated based on the alpha + * channel, one bit per ARGB word, and must be 32-bit + * padded. + */ + mask_size = ((width + 7) / 8 * height + 3) & ~3; + data_size = width * height * 4 + mask_size; + + copy_cursor_image(src, vbox->cursor_data, width, height, mask_size); + vbox_bo_kunmap(bo); + + flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE | + VBOX_MOUSE_POINTER_ALPHA; + hgsmi_update_pointer_shape(vbox->guest_pool, flags, + min_t(u32, max(fb->hot_x, 0), width), + min_t(u32, max(fb->hot_y, 0), height), + width, height, vbox->cursor_data, data_size); + + mutex_unlock(&vbox->hw_mutex); +} + +static void vbox_cursor_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct vbox_private *vbox = + container_of(plane->dev, struct vbox_private, ddev); + struct vbox_crtc *vbox_crtc = to_vbox_crtc(old_state->crtc); + bool cursor_enabled = false; + struct drm_crtc *crtci; + + mutex_lock(&vbox->hw_mutex); + + vbox_crtc->cursor_enabled = false; + + list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list, head) { + if (to_vbox_crtc(crtci)->cursor_enabled) + cursor_enabled = true; + } + + if (!cursor_enabled) + hgsmi_update_pointer_shape(vbox->guest_pool, 0, 0, 0, + 0, 0, NULL, 0); + + mutex_unlock(&vbox->hw_mutex); +} + +static int vbox_cursor_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct vbox_bo *bo; + + if (!new_state->fb) + return 0; + + bo = gem_to_vbox_bo(to_vbox_framebuffer(new_state->fb)->obj); + return vbox_bo_pin(bo, TTM_PL_FLAG_SYSTEM); +} + +static void vbox_cursor_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct vbox_bo *bo; + + if (!plane->state->fb) + return; + + bo = gem_to_vbox_bo(to_vbox_framebuffer(plane->state->fb)->obj); + vbox_bo_unpin(bo); +} + +static const u32 vbox_cursor_plane_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +static const struct drm_plane_helper_funcs vbox_cursor_helper_funcs = { + .atomic_check = vbox_cursor_atomic_check, + .atomic_update = vbox_cursor_atomic_update, + .atomic_disable = vbox_cursor_atomic_disable, + .prepare_fb = vbox_cursor_prepare_fb, + .cleanup_fb = vbox_cursor_cleanup_fb, +}; + +static const struct drm_plane_funcs vbox_cursor_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_primary_helper_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static const u32 vbox_primary_plane_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, +}; + +static const struct drm_plane_helper_funcs vbox_primary_helper_funcs = { + .atomic_check = vbox_primary_atomic_check, + .atomic_update = vbox_primary_atomic_update, + .atomic_disable = vbox_primary_atomic_disable, + .prepare_fb = vbox_primary_prepare_fb, + .cleanup_fb = vbox_primary_cleanup_fb, +}; + +static const struct drm_plane_funcs vbox_primary_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_primary_helper_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static struct drm_plane *vbox_create_plane(struct vbox_private *vbox, + unsigned int possible_crtcs, + enum drm_plane_type type) +{ + const struct drm_plane_helper_funcs *helper_funcs = NULL; + const struct drm_plane_funcs *funcs; + struct drm_plane *plane; + const u32 *formats; + int num_formats; + int err; + + if (type == DRM_PLANE_TYPE_PRIMARY) { + funcs = &vbox_primary_plane_funcs; + formats = vbox_primary_plane_formats; + helper_funcs = &vbox_primary_helper_funcs; + num_formats = ARRAY_SIZE(vbox_primary_plane_formats); + } else if (type == DRM_PLANE_TYPE_CURSOR) { + funcs = &vbox_cursor_plane_funcs; + formats = vbox_cursor_plane_formats; + helper_funcs = &vbox_cursor_helper_funcs; + num_formats = ARRAY_SIZE(vbox_cursor_plane_formats); + } else { + return ERR_PTR(-EINVAL); + } + + plane = kzalloc(sizeof(*plane), GFP_KERNEL); + if (!plane) + return ERR_PTR(-ENOMEM); + + err = drm_universal_plane_init(&vbox->ddev, plane, possible_crtcs, + funcs, formats, num_formats, + NULL, type, NULL); + if (err) + goto free_plane; + + drm_plane_helper_add(plane, helper_funcs); + + return plane; + +free_plane: + kfree(plane); + return ERR_PTR(-EINVAL); +} + +static struct vbox_crtc *vbox_crtc_init(struct drm_device *dev, unsigned int i) +{ + struct vbox_private *vbox = + container_of(dev, struct vbox_private, ddev); + struct drm_plane *cursor = NULL; + struct vbox_crtc *vbox_crtc; + struct drm_plane *primary; + u32 caps = 0; + int ret; + + ret = hgsmi_query_conf(vbox->guest_pool, + VBOX_VBVA_CONF32_CURSOR_CAPABILITIES, &caps); + if (ret) + return ERR_PTR(ret); + + vbox_crtc = kzalloc(sizeof(*vbox_crtc), GFP_KERNEL); + if (!vbox_crtc) + return ERR_PTR(-ENOMEM); + + primary = vbox_create_plane(vbox, 1 << i, DRM_PLANE_TYPE_PRIMARY); + if (IS_ERR(primary)) { + ret = PTR_ERR(primary); + goto free_mem; + } + + if ((caps & VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE)) { + cursor = vbox_create_plane(vbox, 1 << i, DRM_PLANE_TYPE_CURSOR); + if (IS_ERR(cursor)) { + ret = PTR_ERR(cursor); + goto clean_primary; + } + } else { + DRM_WARN("VirtualBox host is too old, no cursor support\n"); + } + + vbox_crtc->crtc_id = i; + + ret = drm_crtc_init_with_planes(dev, &vbox_crtc->base, primary, cursor, + &vbox_crtc_funcs, NULL); + if (ret) + goto clean_cursor; + + drm_mode_crtc_set_gamma_size(&vbox_crtc->base, 256); + drm_crtc_helper_add(&vbox_crtc->base, &vbox_crtc_helper_funcs); + + return vbox_crtc; + +clean_cursor: + if (cursor) { + drm_plane_cleanup(cursor); + kfree(cursor); + } +clean_primary: + drm_plane_cleanup(primary); + kfree(primary); +free_mem: + kfree(vbox_crtc); + return ERR_PTR(ret); +} + +static void vbox_encoder_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); + kfree(encoder); +} + +static const struct drm_encoder_funcs vbox_enc_funcs = { + .destroy = vbox_encoder_destroy, +}; + +static struct drm_encoder *vbox_encoder_init(struct drm_device *dev, + unsigned int i) +{ + struct vbox_encoder *vbox_encoder; + + vbox_encoder = kzalloc(sizeof(*vbox_encoder), GFP_KERNEL); + if (!vbox_encoder) + return NULL; + + drm_encoder_init(dev, &vbox_encoder->base, &vbox_enc_funcs, + DRM_MODE_ENCODER_DAC, NULL); + + vbox_encoder->base.possible_crtcs = 1 << i; + return &vbox_encoder->base; +} + +/* + * Generate EDID data with a mode-unique serial number for the virtual + * monitor to try to persuade Unity that different modes correspond to + * different monitors and it should not try to force the same resolution on + * them. + */ +static void vbox_set_edid(struct drm_connector *connector, int width, + int height) +{ + enum { EDID_SIZE = 128 }; + unsigned char edid[EDID_SIZE] = { + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, /* header */ + 0x58, 0x58, /* manufacturer (VBX) */ + 0x00, 0x00, /* product code */ + 0x00, 0x00, 0x00, 0x00, /* serial number goes here */ + 0x01, /* week of manufacture */ + 0x00, /* year of manufacture */ + 0x01, 0x03, /* EDID version */ + 0x80, /* capabilities - digital */ + 0x00, /* horiz. res in cm, zero for projectors */ + 0x00, /* vert. res in cm */ + 0x78, /* display gamma (120 == 2.2). */ + 0xEE, /* features (standby, suspend, off, RGB, std */ + /* colour space, preferred timing mode) */ + 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, 0x50, 0x54, + /* chromaticity for standard colour space. */ + 0x00, 0x00, 0x00, /* no default timings */ + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, /* no standard timings */ + 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x06, 0x00, 0x02, 0x02, + 0x02, 0x02, + /* descriptor block 1 goes below */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* descriptor block 2, monitor ranges */ + 0x00, 0x00, 0x00, 0xFD, 0x00, + 0x00, 0xC8, 0x00, 0xC8, 0x64, 0x00, 0x0A, 0x20, 0x20, 0x20, + 0x20, 0x20, + /* 0-200Hz vertical, 0-200KHz horizontal, 1000MHz pixel clock */ + 0x20, + /* descriptor block 3, monitor name */ + 0x00, 0x00, 0x00, 0xFC, 0x00, + 'V', 'B', 'O', 'X', ' ', 'm', 'o', 'n', 'i', 't', 'o', 'r', + '\n', + /* descriptor block 4: dummy data */ + 0x00, 0x00, 0x00, 0x10, 0x00, + 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, + 0x00, /* number of extensions */ + 0x00 /* checksum goes here */ + }; + int clock = (width + 6) * (height + 6) * 60 / 10000; + unsigned int i, sum = 0; + + edid[12] = width & 0xff; + edid[13] = width >> 8; + edid[14] = height & 0xff; + edid[15] = height >> 8; + edid[54] = clock & 0xff; + edid[55] = clock >> 8; + edid[56] = width & 0xff; + edid[58] = (width >> 4) & 0xf0; + edid[59] = height & 0xff; + edid[61] = (height >> 4) & 0xf0; + for (i = 0; i < EDID_SIZE - 1; ++i) + sum += edid[i]; + edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF; + drm_connector_update_edid_property(connector, (struct edid *)edid); +} + +static int vbox_get_modes(struct drm_connector *connector) +{ + struct vbox_connector *vbox_connector = NULL; + struct drm_display_mode *mode = NULL; + struct vbox_private *vbox = NULL; + unsigned int num_modes = 0; + int preferred_width, preferred_height; + + vbox_connector = to_vbox_connector(connector); + vbox = connector->dev->dev_private; + + hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) + + HOST_FLAGS_OFFSET); + if (vbox_connector->vbox_crtc->crtc_id == 0) + vbox_report_caps(vbox); + + num_modes = drm_add_modes_noedid(connector, 2560, 1600); + preferred_width = vbox_connector->mode_hint.width ? + vbox_connector->mode_hint.width : 1024; + preferred_height = vbox_connector->mode_hint.height ? + vbox_connector->mode_hint.height : 768; + mode = drm_cvt_mode(connector->dev, preferred_width, preferred_height, + 60, false, false, false); + if (mode) { + mode->type |= DRM_MODE_TYPE_PREFERRED; + drm_mode_probed_add(connector, mode); + ++num_modes; + } + vbox_set_edid(connector, preferred_width, preferred_height); + + if (vbox_connector->vbox_crtc->x_hint != -1) + drm_object_property_set_value(&connector->base, + vbox->ddev.mode_config.suggested_x_property, + vbox_connector->vbox_crtc->x_hint); + else + drm_object_property_set_value(&connector->base, + vbox->ddev.mode_config.suggested_x_property, 0); + + if (vbox_connector->vbox_crtc->y_hint != -1) + drm_object_property_set_value(&connector->base, + vbox->ddev.mode_config.suggested_y_property, + vbox_connector->vbox_crtc->y_hint); + else + drm_object_property_set_value(&connector->base, + vbox->ddev.mode_config.suggested_y_property, 0); + + return num_modes; +} + +static void vbox_connector_destroy(struct drm_connector *connector) +{ + drm_connector_unregister(connector); + drm_connector_cleanup(connector); + kfree(connector); +} + +static enum drm_connector_status +vbox_connector_detect(struct drm_connector *connector, bool force) +{ + struct vbox_connector *vbox_connector; + + vbox_connector = to_vbox_connector(connector); + + return vbox_connector->mode_hint.disconnected ? + connector_status_disconnected : connector_status_connected; +} + +static int vbox_fill_modes(struct drm_connector *connector, u32 max_x, + u32 max_y) +{ + struct vbox_connector *vbox_connector; + struct drm_device *dev; + struct drm_display_mode *mode, *iterator; + + vbox_connector = to_vbox_connector(connector); + dev = vbox_connector->base.dev; + list_for_each_entry_safe(mode, iterator, &connector->modes, head) { + list_del(&mode->head); + drm_mode_destroy(dev, mode); + } + + return drm_helper_probe_single_connector_modes(connector, max_x, max_y); +} + +static const struct drm_connector_helper_funcs vbox_connector_helper_funcs = { + .get_modes = vbox_get_modes, +}; + +static const struct drm_connector_funcs vbox_connector_funcs = { + .detect = vbox_connector_detect, + .fill_modes = vbox_fill_modes, + .destroy = vbox_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int vbox_connector_init(struct drm_device *dev, + struct vbox_crtc *vbox_crtc, + struct drm_encoder *encoder) +{ + struct vbox_connector *vbox_connector; + struct drm_connector *connector; + + vbox_connector = kzalloc(sizeof(*vbox_connector), GFP_KERNEL); + if (!vbox_connector) + return -ENOMEM; + + connector = &vbox_connector->base; + vbox_connector->vbox_crtc = vbox_crtc; + + drm_connector_init(dev, connector, &vbox_connector_funcs, + DRM_MODE_CONNECTOR_VGA); + drm_connector_helper_add(connector, &vbox_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + drm_mode_create_suggested_offset_properties(dev); + drm_object_attach_property(&connector->base, + dev->mode_config.suggested_x_property, 0); + drm_object_attach_property(&connector->base, + dev->mode_config.suggested_y_property, 0); + + drm_connector_attach_encoder(connector, encoder); + + return 0; +} + +static struct drm_framebuffer *vbox_user_framebuffer_create( + struct drm_device *dev, + struct drm_file *filp, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct vbox_private *vbox = + container_of(dev, struct vbox_private, ddev); + struct drm_gem_object *obj; + struct vbox_framebuffer *vbox_fb; + int ret = -ENOMEM; + + obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]); + if (!obj) + return ERR_PTR(-ENOENT); + + vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL); + if (!vbox_fb) + goto err_unref_obj; + + ret = vbox_framebuffer_init(vbox, vbox_fb, mode_cmd, obj); + if (ret) + goto err_free_vbox_fb; + + return &vbox_fb->base; + +err_free_vbox_fb: + kfree(vbox_fb); +err_unref_obj: + drm_gem_object_put_unlocked(obj); + return ERR_PTR(ret); +} + +static const struct drm_mode_config_funcs vbox_mode_funcs = { + .fb_create = vbox_user_framebuffer_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +int vbox_mode_init(struct vbox_private *vbox) +{ + struct drm_device *dev = &vbox->ddev; + struct drm_encoder *encoder; + struct vbox_crtc *vbox_crtc; + unsigned int i; + int ret; + + drm_mode_config_init(dev); + + dev->mode_config.funcs = (void *)&vbox_mode_funcs; + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.preferred_depth = 24; + dev->mode_config.max_width = VBE_DISPI_MAX_XRES; + dev->mode_config.max_height = VBE_DISPI_MAX_YRES; + + for (i = 0; i < vbox->num_crtcs; ++i) { + vbox_crtc = vbox_crtc_init(dev, i); + if (IS_ERR(vbox_crtc)) { + ret = PTR_ERR(vbox_crtc); + goto err_drm_mode_cleanup; + } + encoder = vbox_encoder_init(dev, i); + if (!encoder) { + ret = -ENOMEM; + goto err_drm_mode_cleanup; + } + ret = vbox_connector_init(dev, vbox_crtc, encoder); + if (ret) + goto err_drm_mode_cleanup; + } + + drm_mode_config_reset(dev); + return 0; + +err_drm_mode_cleanup: + drm_mode_config_cleanup(dev); + return ret; +} + +void vbox_mode_fini(struct vbox_private *vbox) +{ + drm_mode_config_cleanup(&vbox->ddev); +} diff --git a/drivers/gpu/drm/vboxvideo/vbox_prime.c b/drivers/gpu/drm/vboxvideo/vbox_prime.c new file mode 100644 index 000000000000..d61985b0c6eb --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbox_prime.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2017 Oracle Corporation + * Copyright 2017 Canonical + * Authors: Andreas Pokorny + */ + +#include "vbox_drv.h" + +/* + * Based on qxl_prime.c: + * Empty Implementations as there should not be any other driver for a virtual + * device that might share buffers with vboxvideo + */ + +int vbox_gem_prime_pin(struct drm_gem_object *obj) +{ + WARN_ONCE(1, "not implemented"); + return -ENOSYS; +} + +void vbox_gem_prime_unpin(struct drm_gem_object *obj) +{ + WARN_ONCE(1, "not implemented"); +} + +struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + WARN_ONCE(1, "not implemented"); + return ERR_PTR(-ENOSYS); +} + +struct drm_gem_object *vbox_gem_prime_import_sg_table( + struct drm_device *dev, struct dma_buf_attachment *attach, + struct sg_table *table) +{ + WARN_ONCE(1, "not implemented"); + return ERR_PTR(-ENOSYS); +} + +void *vbox_gem_prime_vmap(struct drm_gem_object *obj) +{ + WARN_ONCE(1, "not implemented"); + return ERR_PTR(-ENOSYS); +} + +void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + WARN_ONCE(1, "not implemented"); +} + +int vbox_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *area) +{ + WARN_ONCE(1, "not implemented"); + return -ENOSYS; +} diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c new file mode 100644 index 000000000000..30f270027acf --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c @@ -0,0 +1,394 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2013-2017 Oracle Corporation + * This file is based on ast_ttm.c + * Copyright 2012 Red Hat Inc. + * Authors: Dave Airlie <airlied@redhat.com> + * Michael Thayer <michael.thayer@oracle.com> + */ +#include <linux/pci.h> +#include <drm/drm_file.h> +#include <drm/ttm/ttm_page_alloc.h> +#include "vbox_drv.h" + +static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd) +{ + return container_of(bd, struct vbox_private, ttm.bdev); +} + +static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo) +{ + struct vbox_bo *bo; + + bo = container_of(tbo, struct vbox_bo, bo); + + drm_gem_object_release(&bo->gem); + kfree(bo); +} + +static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo) +{ + if (bo->destroy == &vbox_bo_ttm_destroy) + return true; + + return false; +} + +static int +vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type, + struct ttm_mem_type_manager *man) +{ + switch (type) { + case TTM_PL_SYSTEM: + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_MASK_CACHING; + man->default_caching = TTM_PL_FLAG_CACHED; + break; + case TTM_PL_VRAM: + man->func = &ttm_bo_manager_func; + man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + break; + default: + DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type); + return -EINVAL; + } + + return 0; +} + +static void +vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) +{ + struct vbox_bo *vboxbo = vbox_bo(bo); + + if (!vbox_ttm_bo_is_vbox_bo(bo)) + return; + + vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM); + *pl = vboxbo->placement; +} + +static int vbox_bo_verify_access(struct ttm_buffer_object *bo, + struct file *filp) +{ + return 0; +} + +static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + struct vbox_private *vbox = vbox_bdev(bdev); + + mem->bus.addr = NULL; + mem->bus.offset = 0; + mem->bus.size = mem->num_pages << PAGE_SHIFT; + mem->bus.base = 0; + mem->bus.is_iomem = false; + if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) + return -EINVAL; + switch (mem->mem_type) { + case TTM_PL_SYSTEM: + /* system memory */ + return 0; + case TTM_PL_VRAM: + mem->bus.offset = mem->start << PAGE_SHIFT; + mem->bus.base = pci_resource_start(vbox->ddev.pdev, 0); + mem->bus.is_iomem = true; + break; + default: + return -EINVAL; + } + return 0; +} + +static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ +} + +static void vbox_ttm_backend_destroy(struct ttm_tt *tt) +{ + ttm_tt_fini(tt); + kfree(tt); +} + +static struct ttm_backend_func vbox_tt_backend_func = { + .destroy = &vbox_ttm_backend_destroy, +}; + +static struct ttm_tt *vbox_ttm_tt_create(struct ttm_buffer_object *bo, + u32 page_flags) +{ + struct ttm_tt *tt; + + tt = kzalloc(sizeof(*tt), GFP_KERNEL); + if (!tt) + return NULL; + + tt->func = &vbox_tt_backend_func; + if (ttm_tt_init(tt, bo, page_flags)) { + kfree(tt); + return NULL; + } + + return tt; +} + +static struct ttm_bo_driver vbox_bo_driver = { + .ttm_tt_create = vbox_ttm_tt_create, + .init_mem_type = vbox_bo_init_mem_type, + .eviction_valuable = ttm_bo_eviction_valuable, + .evict_flags = vbox_bo_evict_flags, + .verify_access = vbox_bo_verify_access, + .io_mem_reserve = &vbox_ttm_io_mem_reserve, + .io_mem_free = &vbox_ttm_io_mem_free, +}; + +int vbox_mm_init(struct vbox_private *vbox) +{ + int ret; + struct drm_device *dev = &vbox->ddev; + struct ttm_bo_device *bdev = &vbox->ttm.bdev; + + ret = ttm_bo_device_init(&vbox->ttm.bdev, + &vbox_bo_driver, + dev->anon_inode->i_mapping, + DRM_FILE_PAGE_OFFSET, true); + if (ret) { + DRM_ERROR("Error initialising bo driver; %d\n", ret); + return ret; + } + + ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, + vbox->available_vram_size >> PAGE_SHIFT); + if (ret) { + DRM_ERROR("Failed ttm VRAM init: %d\n", ret); + goto err_device_release; + } + +#ifdef DRM_MTRR_WC + vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0), + DRM_MTRR_WC); +#else + vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); +#endif + return 0; + +err_device_release: + ttm_bo_device_release(&vbox->ttm.bdev); + return ret; +} + +void vbox_mm_fini(struct vbox_private *vbox) +{ +#ifdef DRM_MTRR_WC + drm_mtrr_del(vbox->fb_mtrr, + pci_resource_start(vbox->ddev.pdev, 0), + pci_resource_len(vbox->ddev.pdev, 0), DRM_MTRR_WC); +#else + arch_phys_wc_del(vbox->fb_mtrr); +#endif + ttm_bo_device_release(&vbox->ttm.bdev); +} + +void vbox_ttm_placement(struct vbox_bo *bo, int domain) +{ + unsigned int i; + u32 c = 0; + + bo->placement.placement = bo->placements; + bo->placement.busy_placement = bo->placements; + + if (domain & TTM_PL_FLAG_VRAM) + bo->placements[c++].flags = + TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; + if (domain & TTM_PL_FLAG_SYSTEM) + bo->placements[c++].flags = + TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + if (!c) + bo->placements[c++].flags = + TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + + bo->placement.num_placement = c; + bo->placement.num_busy_placement = c; + + for (i = 0; i < c; ++i) { + bo->placements[i].fpfn = 0; + bo->placements[i].lpfn = 0; + } +} + +int vbox_bo_create(struct vbox_private *vbox, int size, int align, + u32 flags, struct vbox_bo **pvboxbo) +{ + struct vbox_bo *vboxbo; + size_t acc_size; + int ret; + + vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL); + if (!vboxbo) + return -ENOMEM; + + ret = drm_gem_object_init(&vbox->ddev, &vboxbo->gem, size); + if (ret) + goto err_free_vboxbo; + + vboxbo->bo.bdev = &vbox->ttm.bdev; + + vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); + + acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size, + sizeof(struct vbox_bo)); + + ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size, + ttm_bo_type_device, &vboxbo->placement, + align >> PAGE_SHIFT, false, acc_size, + NULL, NULL, vbox_bo_ttm_destroy); + if (ret) + goto err_free_vboxbo; + + *pvboxbo = vboxbo; + + return 0; + +err_free_vboxbo: + kfree(vboxbo); + return ret; +} + +int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag) +{ + struct ttm_operation_ctx ctx = { false, false }; + int i, ret; + + if (bo->pin_count) { + bo->pin_count++; + return 0; + } + + ret = vbox_bo_reserve(bo, false); + if (ret) + return ret; + + vbox_ttm_placement(bo, pl_flag); + + for (i = 0; i < bo->placement.num_placement; i++) + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; + + ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); + if (ret == 0) + bo->pin_count = 1; + + vbox_bo_unreserve(bo); + + return ret; +} + +int vbox_bo_unpin(struct vbox_bo *bo) +{ + struct ttm_operation_ctx ctx = { false, false }; + int i, ret; + + if (!bo->pin_count) { + DRM_ERROR("unpin bad %p\n", bo); + return 0; + } + bo->pin_count--; + if (bo->pin_count) + return 0; + + ret = vbox_bo_reserve(bo, false); + if (ret) { + DRM_ERROR("Error %d reserving bo, leaving it pinned\n", ret); + return ret; + } + + for (i = 0; i < bo->placement.num_placement; i++) + bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; + + ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); + + vbox_bo_unreserve(bo); + + return ret; +} + +/* + * Move a vbox-owned buffer object to system memory if no one else has it + * pinned. The caller must have pinned it previously, and this call will + * release the caller's pin. + */ +int vbox_bo_push_sysram(struct vbox_bo *bo) +{ + struct ttm_operation_ctx ctx = { false, false }; + int i, ret; + + if (!bo->pin_count) { + DRM_ERROR("unpin bad %p\n", bo); + return 0; + } + bo->pin_count--; + if (bo->pin_count) + return 0; + + if (bo->kmap.virtual) { + ttm_bo_kunmap(&bo->kmap); + bo->kmap.virtual = NULL; + } + + vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); + + for (i = 0; i < bo->placement.num_placement; i++) + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; + + ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); + if (ret) { + DRM_ERROR("pushing to VRAM failed\n"); + return ret; + } + + return 0; +} + +int vbox_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_file *file_priv; + struct vbox_private *vbox; + + if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) + return -EINVAL; + + file_priv = filp->private_data; + vbox = file_priv->minor->dev->dev_private; + + return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev); +} + +void *vbox_bo_kmap(struct vbox_bo *bo) +{ + int ret; + + if (bo->kmap.virtual) + return bo->kmap.virtual; + + ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); + if (ret) { + DRM_ERROR("Error kmapping bo: %d\n", ret); + return NULL; + } + + return bo->kmap.virtual; +} + +void vbox_bo_kunmap(struct vbox_bo *bo) +{ + if (bo->kmap.virtual) { + ttm_bo_kunmap(&bo->kmap); + bo->kmap.virtual = NULL; + } +} diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo.h b/drivers/gpu/drm/vboxvideo/vboxvideo.h new file mode 100644 index 000000000000..0592004f71aa --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vboxvideo.h @@ -0,0 +1,442 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright (C) 2006-2016 Oracle Corporation */ + +#ifndef __VBOXVIDEO_H__ +#define __VBOXVIDEO_H__ + +#define VBOX_VIDEO_MAX_SCREENS 64 + +/* + * The last 4096 bytes of the guest VRAM contains the generic info for all + * DualView chunks: sizes and offsets of chunks. This is filled by miniport. + * + * Last 4096 bytes of each chunk contain chunk specific data: framebuffer info, + * etc. This is used exclusively by the corresponding instance of a display + * driver. + * + * The VRAM layout: + * Last 4096 bytes - Adapter information area. + * 4096 bytes aligned miniport heap (value specified in the config rouded up). + * Slack - what left after dividing the VRAM. + * 4096 bytes aligned framebuffers: + * last 4096 bytes of each framebuffer is the display information area. + * + * The Virtual Graphics Adapter information in the guest VRAM is stored by the + * guest video driver using structures prepended by VBOXVIDEOINFOHDR. + * + * When the guest driver writes dword 0 to the VBE_DISPI_INDEX_VBOX_VIDEO + * the host starts to process the info. The first element at the start of + * the 4096 bytes region should be normally be a LINK that points to + * actual information chain. That way the guest driver can have some + * fixed layout of the information memory block and just rewrite + * the link to point to relevant memory chain. + * + * The processing stops at the END element. + * + * The host can access the memory only when the port IO is processed. + * All data that will be needed later must be copied from these 4096 bytes. + * But other VRAM can be used by host until the mode is disabled. + * + * The guest driver writes dword 0xffffffff to the VBE_DISPI_INDEX_VBOX_VIDEO + * to disable the mode. + * + * VBE_DISPI_INDEX_VBOX_VIDEO is used to read the configuration information + * from the host and issue commands to the host. + * + * The guest writes the VBE_DISPI_INDEX_VBOX_VIDEO index register, the the + * following operations with the VBE data register can be performed: + * + * Operation Result + * write 16 bit value NOP + * read 16 bit value count of monitors + * write 32 bit value set the vbox cmd value and the cmd processed by the host + * read 32 bit value result of the last vbox command is returned + */ + +struct vbva_cmd_hdr { + s16 x; + s16 y; + u16 w; + u16 h; +} __packed; + +/* + * The VBVA ring buffer is suitable for transferring large (< 2GB) amount of + * data. For example big bitmaps which do not fit to the buffer. + * + * Guest starts writing to the buffer by initializing a record entry in the + * records queue. VBVA_F_RECORD_PARTIAL indicates that the record is being + * written. As data is written to the ring buffer, the guest increases + * free_offset. + * + * The host reads the records on flushes and processes all completed records. + * When host encounters situation when only a partial record presents and + * len_and_flags & ~VBVA_F_RECORD_PARTIAL >= VBVA_RING_BUFFER_SIZE - + * VBVA_RING_BUFFER_THRESHOLD, the host fetched all record data and updates + * data_offset. After that on each flush the host continues fetching the data + * until the record is completed. + */ + +#define VBVA_RING_BUFFER_SIZE (4194304 - 1024) +#define VBVA_RING_BUFFER_THRESHOLD (4096) + +#define VBVA_MAX_RECORDS (64) + +#define VBVA_F_MODE_ENABLED 0x00000001u +#define VBVA_F_MODE_VRDP 0x00000002u +#define VBVA_F_MODE_VRDP_RESET 0x00000004u +#define VBVA_F_MODE_VRDP_ORDER_MASK 0x00000008u + +#define VBVA_F_STATE_PROCESSING 0x00010000u + +#define VBVA_F_RECORD_PARTIAL 0x80000000u + +struct vbva_record { + u32 len_and_flags; +} __packed; + +/* + * The minimum HGSMI heap size is PAGE_SIZE (4096 bytes) and is a restriction of + * the runtime heapsimple API. Use minimum 2 pages here, because the info area + * also may contain other data (for example hgsmi_host_flags structure). + */ +#define VBVA_ADAPTER_INFORMATION_SIZE 65536 +#define VBVA_MIN_BUFFER_SIZE 65536 + +/* The value for port IO to let the adapter to interpret the adapter memory. */ +#define VBOX_VIDEO_DISABLE_ADAPTER_MEMORY 0xFFFFFFFF + +/* The value for port IO to let the adapter to interpret the adapter memory. */ +#define VBOX_VIDEO_INTERPRET_ADAPTER_MEMORY 0x00000000 + +/* + * The value for port IO to let the adapter to interpret the display memory. + * The display number is encoded in low 16 bits. + */ +#define VBOX_VIDEO_INTERPRET_DISPLAY_MEMORY_BASE 0x00010000 + +struct vbva_host_flags { + u32 host_events; + u32 supported_orders; +} __packed; + +struct vbva_buffer { + struct vbva_host_flags host_flags; + + /* The offset where the data start in the buffer. */ + u32 data_offset; + /* The offset where next data must be placed in the buffer. */ + u32 free_offset; + + /* The queue of record descriptions. */ + struct vbva_record records[VBVA_MAX_RECORDS]; + u32 record_first_index; + u32 record_free_index; + + /* Space to leave free when large partial records are transferred. */ + u32 partial_write_tresh; + + u32 data_len; + /* variable size for the rest of the vbva_buffer area in VRAM. */ + u8 data[0]; +} __packed; + +#define VBVA_MAX_RECORD_SIZE (128 * 1024 * 1024) + +/* guest->host commands */ +#define VBVA_QUERY_CONF32 1 +#define VBVA_SET_CONF32 2 +#define VBVA_INFO_VIEW 3 +#define VBVA_INFO_HEAP 4 +#define VBVA_FLUSH 5 +#define VBVA_INFO_SCREEN 6 +#define VBVA_ENABLE 7 +#define VBVA_MOUSE_POINTER_SHAPE 8 +/* informs host about HGSMI caps. see vbva_caps below */ +#define VBVA_INFO_CAPS 12 +/* configures scanline, see VBVASCANLINECFG below */ +#define VBVA_SCANLINE_CFG 13 +/* requests scanline info, see VBVASCANLINEINFO below */ +#define VBVA_SCANLINE_INFO 14 +/* inform host about VBVA Command submission */ +#define VBVA_CMDVBVA_SUBMIT 16 +/* inform host about VBVA Command submission */ +#define VBVA_CMDVBVA_FLUSH 17 +/* G->H DMA command */ +#define VBVA_CMDVBVA_CTL 18 +/* Query most recent mode hints sent */ +#define VBVA_QUERY_MODE_HINTS 19 +/* + * Report the guest virtual desktop position and size for mapping host and + * guest pointer positions. + */ +#define VBVA_REPORT_INPUT_MAPPING 20 +/* Report the guest cursor position and query the host position. */ +#define VBVA_CURSOR_POSITION 21 + +/* host->guest commands */ +#define VBVAHG_EVENT 1 +#define VBVAHG_DISPLAY_CUSTOM 2 + +/* vbva_conf32::index */ +#define VBOX_VBVA_CONF32_MONITOR_COUNT 0 +#define VBOX_VBVA_CONF32_HOST_HEAP_SIZE 1 +/* + * Returns VINF_SUCCESS if the host can report mode hints via VBVA. + * Set value to VERR_NOT_SUPPORTED before calling. + */ +#define VBOX_VBVA_CONF32_MODE_HINT_REPORTING 2 +/* + * Returns VINF_SUCCESS if the host can report guest cursor enabled status via + * VBVA. Set value to VERR_NOT_SUPPORTED before calling. + */ +#define VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING 3 +/* + * Returns the currently available host cursor capabilities. Available if + * VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING returns success. + */ +#define VBOX_VBVA_CONF32_CURSOR_CAPABILITIES 4 +/* Returns the supported flags in vbva_infoscreen.flags. */ +#define VBOX_VBVA_CONF32_SCREEN_FLAGS 5 +/* Returns the max size of VBVA record. */ +#define VBOX_VBVA_CONF32_MAX_RECORD_SIZE 6 + +struct vbva_conf32 { + u32 index; + u32 value; +} __packed; + +/* Reserved for historical reasons. */ +#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED0 BIT(0) +/* + * Guest cursor capability: can the host show a hardware cursor at the host + * pointer location? + */ +#define VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE BIT(1) +/* Reserved for historical reasons. */ +#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED2 BIT(2) +/* Reserved for historical reasons. Must always be unset. */ +#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED3 BIT(3) +/* Reserved for historical reasons. */ +#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED4 BIT(4) +/* Reserved for historical reasons. */ +#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED5 BIT(5) + +struct vbva_infoview { + /* Index of the screen, assigned by the guest. */ + u32 view_index; + + /* The screen offset in VRAM, the framebuffer starts here. */ + u32 view_offset; + + /* The size of the VRAM memory that can be used for the view. */ + u32 view_size; + + /* The recommended maximum size of the VRAM memory for the screen. */ + u32 max_screen_size; +} __packed; + +struct vbva_flush { + u32 reserved; +} __packed; + +/* vbva_infoscreen.flags */ +#define VBVA_SCREEN_F_NONE 0x0000 +#define VBVA_SCREEN_F_ACTIVE 0x0001 +/* + * The virtual monitor has been disabled by the guest and should be removed + * by the host and ignored for purposes of pointer position calculation. + */ +#define VBVA_SCREEN_F_DISABLED 0x0002 +/* + * The virtual monitor has been blanked by the guest and should be blacked + * out by the host using width, height, etc values from the vbva_infoscreen + * request. + */ +#define VBVA_SCREEN_F_BLANK 0x0004 +/* + * The virtual monitor has been blanked by the guest and should be blacked + * out by the host using the previous mode values for width. height, etc. + */ +#define VBVA_SCREEN_F_BLANK2 0x0008 + +struct vbva_infoscreen { + /* Which view contains the screen. */ + u32 view_index; + + /* Physical X origin relative to the primary screen. */ + s32 origin_x; + + /* Physical Y origin relative to the primary screen. */ + s32 origin_y; + + /* Offset of visible framebuffer relative to the framebuffer start. */ + u32 start_offset; + + /* The scan line size in bytes. */ + u32 line_size; + + /* Width of the screen. */ + u32 width; + + /* Height of the screen. */ + u32 height; + + /* Color depth. */ + u16 bits_per_pixel; + + /* VBVA_SCREEN_F_* */ + u16 flags; +} __packed; + +/* vbva_enable.flags */ +#define VBVA_F_NONE 0x00000000 +#define VBVA_F_ENABLE 0x00000001 +#define VBVA_F_DISABLE 0x00000002 +/* extended VBVA to be used with WDDM */ +#define VBVA_F_EXTENDED 0x00000004 +/* vbva offset is absolute VRAM offset */ +#define VBVA_F_ABSOFFSET 0x00000008 + +struct vbva_enable { + u32 flags; + u32 offset; + s32 result; +} __packed; + +struct vbva_enable_ex { + struct vbva_enable base; + u32 screen_id; +} __packed; + +struct vbva_mouse_pointer_shape { + /* The host result. */ + s32 result; + + /* VBOX_MOUSE_POINTER_* bit flags. */ + u32 flags; + + /* X coordinate of the hot spot. */ + u32 hot_X; + + /* Y coordinate of the hot spot. */ + u32 hot_y; + + /* Width of the pointer in pixels. */ + u32 width; + + /* Height of the pointer in scanlines. */ + u32 height; + + /* Pointer data. + * + * The data consists of 1 bpp AND mask followed by 32 bpp XOR (color) + * mask. + * + * For pointers without alpha channel the XOR mask pixels are 32 bit + * values: (lsb)BGR0(msb). For pointers with alpha channel the XOR mask + * consists of (lsb)BGRA(msb) 32 bit values. + * + * Guest driver must create the AND mask for pointers with alpha chan., + * so if host does not support alpha, the pointer could be displayed as + * a normal color pointer. The AND mask can be constructed from alpha + * values. For example alpha value >= 0xf0 means bit 0 in the AND mask. + * + * The AND mask is 1 bpp bitmap with byte aligned scanlines. Size of AND + * mask, therefore, is and_len = (width + 7) / 8 * height. The padding + * bits at the end of any scanline are undefined. + * + * The XOR mask follows the AND mask on the next 4 bytes aligned offset: + * u8 *xor = and + (and_len + 3) & ~3 + * Bytes in the gap between the AND and the XOR mask are undefined. + * XOR mask scanlines have no gap between them and size of XOR mask is: + * xor_len = width * 4 * height. + * + * Preallocate 4 bytes for accessing actual data as p->data. + */ + u8 data[4]; +} __packed; + +/* pointer is visible */ +#define VBOX_MOUSE_POINTER_VISIBLE 0x0001 +/* pointer has alpha channel */ +#define VBOX_MOUSE_POINTER_ALPHA 0x0002 +/* pointerData contains new pointer shape */ +#define VBOX_MOUSE_POINTER_SHAPE 0x0004 + +/* + * The guest driver can handle asynch guest cmd completion by reading the + * command offset from io port. + */ +#define VBVACAPS_COMPLETEGCMD_BY_IOREAD 0x00000001 +/* the guest driver can handle video adapter IRQs */ +#define VBVACAPS_IRQ 0x00000002 +/* The guest can read video mode hints sent via VBVA. */ +#define VBVACAPS_VIDEO_MODE_HINTS 0x00000004 +/* The guest can switch to a software cursor on demand. */ +#define VBVACAPS_DISABLE_CURSOR_INTEGRATION 0x00000008 +/* The guest does not depend on host handling the VBE registers. */ +#define VBVACAPS_USE_VBVA_ONLY 0x00000010 + +struct vbva_caps { + s32 rc; + u32 caps; +} __packed; + +/* Query the most recent mode hints received from the host. */ +struct vbva_query_mode_hints { + /* The maximum number of screens to return hints for. */ + u16 hints_queried_count; + /* The size of the mode hint structures directly following this one. */ + u16 hint_structure_guest_size; + /* Return code for the operation. Initialise to VERR_NOT_SUPPORTED. */ + s32 rc; +} __packed; + +/* + * Structure in which a mode hint is returned. The guest allocates an array + * of these immediately after the vbva_query_mode_hints structure. + * To accommodate future extensions, the vbva_query_mode_hints structure + * specifies the size of the vbva_modehint structures allocated by the guest, + * and the host only fills out structure elements which fit into that size. The + * host should fill any unused members (e.g. dx, dy) or structure space on the + * end with ~0. The whole structure can legally be set to ~0 to skip a screen. + */ +struct vbva_modehint { + u32 magic; + u32 cx; + u32 cy; + u32 bpp; /* Which has never been used... */ + u32 display; + u32 dx; /* X offset into the virtual frame-buffer. */ + u32 dy; /* Y offset into the virtual frame-buffer. */ + u32 enabled; /* Not flags. Add new members for new flags. */ +} __packed; + +#define VBVAMODEHINT_MAGIC 0x0801add9u + +/* + * Report the rectangle relative to which absolute pointer events should be + * expressed. This information remains valid until the next VBVA resize event + * for any screen, at which time it is reset to the bounding rectangle of all + * virtual screens and must be re-set. + */ +struct vbva_report_input_mapping { + s32 x; /* Upper left X co-ordinate relative to the first screen. */ + s32 y; /* Upper left Y co-ordinate relative to the first screen. */ + u32 cx; /* Rectangle width. */ + u32 cy; /* Rectangle height. */ +} __packed; + +/* + * Report the guest cursor position and query the host one. The host may wish + * to use the guest information to re-position its own cursor (though this is + * currently unlikely). + */ +struct vbva_cursor_position { + u32 report_position; /* Are we reporting a position? */ + u32 x; /* Guest cursor X position */ + u32 y; /* Guest cursor Y position */ +} __packed; + +#endif diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h b/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h new file mode 100644 index 000000000000..55fcee3a6470 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright (C) 2006-2016 Oracle Corporation */ + +#ifndef __VBOXVIDEO_GUEST_H__ +#define __VBOXVIDEO_GUEST_H__ + +#include <linux/genalloc.h> +#include "vboxvideo.h" + +/* + * Structure grouping the context needed for sending graphics acceleration + * information to the host via VBVA. Each screen has its own VBVA buffer. + */ +struct vbva_buf_ctx { + /* Offset of the buffer in the VRAM section for the screen */ + u32 buffer_offset; + /* Length of the buffer in bytes */ + u32 buffer_length; + /* Set if we wrote to the buffer faster than the host could read it */ + bool buffer_overflow; + /* VBVA record that we are currently preparing for the host, or NULL */ + struct vbva_record *record; + /* + * Pointer to the VBVA buffer mapped into the current address space. + * Will be NULL if VBVA is not enabled. + */ + struct vbva_buffer *vbva; +}; + +int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location); +int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps); +int hgsmi_test_query_conf(struct gen_pool *ctx); +int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret); +int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags, + u32 hot_x, u32 hot_y, u32 width, u32 height, + u8 *pixels, u32 len); +int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position, + u32 x, u32 y, u32 *x_host, u32 *y_host); + +bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, + struct vbva_buffer *vbva, s32 screen); +void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, + s32 screen); +bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx, + struct gen_pool *ctx); +void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx); +bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, + const void *p, u32 len); +void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx, + u32 buffer_offset, u32 buffer_length); + +void hgsmi_process_display_info(struct gen_pool *ctx, u32 display, + s32 origin_x, s32 origin_y, u32 start_offset, + u32 pitch, u32 width, u32 height, + u16 bpp, u16 flags); +int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y, + u32 width, u32 height); +int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens, + struct vbva_modehint *hints); + +#endif diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h b/drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h new file mode 100644 index 000000000000..427235869297 --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright (C) 2006-2016 Oracle Corporation */ + +#ifndef __VBOXVIDEO_VBE_H__ +#define __VBOXVIDEO_VBE_H__ + +/* GUEST <-> HOST Communication API */ + +#define VBE_DISPI_BANK_ADDRESS 0xA0000 +#define VBE_DISPI_BANK_SIZE_KB 64 + +#define VBE_DISPI_MAX_XRES 16384 +#define VBE_DISPI_MAX_YRES 16384 +#define VBE_DISPI_MAX_BPP 32 + +#define VBE_DISPI_IOPORT_INDEX 0x01CE +#define VBE_DISPI_IOPORT_DATA 0x01CF + +#define VBE_DISPI_IOPORT_DAC_WRITE_INDEX 0x03C8 +#define VBE_DISPI_IOPORT_DAC_DATA 0x03C9 + +#define VBE_DISPI_INDEX_ID 0x0 +#define VBE_DISPI_INDEX_XRES 0x1 +#define VBE_DISPI_INDEX_YRES 0x2 +#define VBE_DISPI_INDEX_BPP 0x3 +#define VBE_DISPI_INDEX_ENABLE 0x4 +#define VBE_DISPI_INDEX_BANK 0x5 +#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6 +#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7 +#define VBE_DISPI_INDEX_X_OFFSET 0x8 +#define VBE_DISPI_INDEX_Y_OFFSET 0x9 +#define VBE_DISPI_INDEX_VBOX_VIDEO 0xa +#define VBE_DISPI_INDEX_FB_BASE_HI 0xb + +#define VBE_DISPI_ID0 0xB0C0 +#define VBE_DISPI_ID1 0xB0C1 +#define VBE_DISPI_ID2 0xB0C2 +#define VBE_DISPI_ID3 0xB0C3 +#define VBE_DISPI_ID4 0xB0C4 + +#define VBE_DISPI_ID_VBOX_VIDEO 0xBE00 +/* The VBOX interface id. Indicates support for VBVA shared memory interface. */ +#define VBE_DISPI_ID_HGSMI 0xBE01 +#define VBE_DISPI_ID_ANYX 0xBE02 + +#define VBE_DISPI_DISABLED 0x00 +#define VBE_DISPI_ENABLED 0x01 +#define VBE_DISPI_GETCAPS 0x02 +#define VBE_DISPI_8BIT_DAC 0x20 + +#define VGA_PORT_HGSMI_HOST 0x3b0 +#define VGA_PORT_HGSMI_GUEST 0x3d0 + +#endif diff --git a/drivers/gpu/drm/vboxvideo/vbva_base.c b/drivers/gpu/drm/vboxvideo/vbva_base.c new file mode 100644 index 000000000000..36bc9824ec3f --- /dev/null +++ b/drivers/gpu/drm/vboxvideo/vbva_base.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: MIT +/* Copyright (C) 2006-2017 Oracle Corporation */ + +#include <linux/vbox_err.h> +#include "vbox_drv.h" +#include "vboxvideo_guest.h" +#include "hgsmi_channels.h" + +/* + * There is a hardware ring buffer in the graphics device video RAM, formerly + * in the VBox VMMDev PCI memory space. + * All graphics commands go there serialized by vbva_buffer_begin_update. + * and vbva_buffer_end_update. + * + * free_offset is writing position. data_offset is reading position. + * free_offset == data_offset means buffer is empty. + * There must be always gap between data_offset and free_offset when data + * are in the buffer. + * Guest only changes free_offset, host changes data_offset. + */ + +static u32 vbva_buffer_available(const struct vbva_buffer *vbva) +{ + s32 diff = vbva->data_offset - vbva->free_offset; + + return diff > 0 ? diff : vbva->data_len + diff; +} + +static void vbva_buffer_place_data_at(struct vbva_buf_ctx *vbva_ctx, + const void *p, u32 len, u32 offset) +{ + struct vbva_buffer *vbva = vbva_ctx->vbva; + u32 bytes_till_boundary = vbva->data_len - offset; + u8 *dst = &vbva->data[offset]; + s32 diff = len - bytes_till_boundary; + + if (diff <= 0) { + /* Chunk will not cross buffer boundary. */ + memcpy(dst, p, len); + } else { + /* Chunk crosses buffer boundary. */ + memcpy(dst, p, bytes_till_boundary); + memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff); + } +} + +static void vbva_buffer_flush(struct gen_pool *ctx) +{ + struct vbva_flush *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH); + if (!p) + return; + + p->reserved = 0; + + hgsmi_buffer_submit(ctx, p); + hgsmi_buffer_free(ctx, p); +} + +bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, + const void *p, u32 len) +{ + struct vbva_record *record; + struct vbva_buffer *vbva; + u32 available; + + vbva = vbva_ctx->vbva; + record = vbva_ctx->record; + + if (!vbva || vbva_ctx->buffer_overflow || + !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL)) + return false; + + available = vbva_buffer_available(vbva); + + while (len > 0) { + u32 chunk = len; + + if (chunk >= available) { + vbva_buffer_flush(ctx); + available = vbva_buffer_available(vbva); + } + + if (chunk >= available) { + if (WARN_ON(available <= vbva->partial_write_tresh)) { + vbva_ctx->buffer_overflow = true; + return false; + } + chunk = available - vbva->partial_write_tresh; + } + + vbva_buffer_place_data_at(vbva_ctx, p, chunk, + vbva->free_offset); + + vbva->free_offset = (vbva->free_offset + chunk) % + vbva->data_len; + record->len_and_flags += chunk; + available -= chunk; + len -= chunk; + p += chunk; + } + + return true; +} + +static bool vbva_inform_host(struct vbva_buf_ctx *vbva_ctx, + struct gen_pool *ctx, s32 screen, bool enable) +{ + struct vbva_enable_ex *p; + bool ret; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE); + if (!p) + return false; + + p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE; + p->base.offset = vbva_ctx->buffer_offset; + p->base.result = VERR_NOT_SUPPORTED; + if (screen >= 0) { + p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET; + p->screen_id = screen; + } + + hgsmi_buffer_submit(ctx, p); + + if (enable) + ret = p->base.result >= 0; + else + ret = true; + + hgsmi_buffer_free(ctx, p); + + return ret; +} + +bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, + struct vbva_buffer *vbva, s32 screen) +{ + bool ret = false; + + memset(vbva, 0, sizeof(*vbva)); + vbva->partial_write_tresh = 256; + vbva->data_len = vbva_ctx->buffer_length - sizeof(struct vbva_buffer); + vbva_ctx->vbva = vbva; + + ret = vbva_inform_host(vbva_ctx, ctx, screen, true); + if (!ret) + vbva_disable(vbva_ctx, ctx, screen); + + return ret; +} + +void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, + s32 screen) +{ + vbva_ctx->buffer_overflow = false; + vbva_ctx->record = NULL; + vbva_ctx->vbva = NULL; + + vbva_inform_host(vbva_ctx, ctx, screen, false); +} + +bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx, + struct gen_pool *ctx) +{ + struct vbva_record *record; + u32 next; + + if (!vbva_ctx->vbva || + !(vbva_ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED)) + return false; + + WARN_ON(vbva_ctx->buffer_overflow || vbva_ctx->record); + + next = (vbva_ctx->vbva->record_free_index + 1) % VBVA_MAX_RECORDS; + + /* Flush if all slots in the records queue are used */ + if (next == vbva_ctx->vbva->record_first_index) + vbva_buffer_flush(ctx); + + /* If even after flush there is no place then fail the request */ + if (next == vbva_ctx->vbva->record_first_index) + return false; + + record = &vbva_ctx->vbva->records[vbva_ctx->vbva->record_free_index]; + record->len_and_flags = VBVA_F_RECORD_PARTIAL; + vbva_ctx->vbva->record_free_index = next; + /* Remember which record we are using. */ + vbva_ctx->record = record; + + return true; +} + +void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx) +{ + struct vbva_record *record = vbva_ctx->record; + + WARN_ON(!vbva_ctx->vbva || !record || + !(record->len_and_flags & VBVA_F_RECORD_PARTIAL)); + + /* Mark the record completed. */ + record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL; + + vbva_ctx->buffer_overflow = false; + vbva_ctx->record = NULL; +} + +void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx, + u32 buffer_offset, u32 buffer_length) +{ + vbva_ctx->buffer_offset = buffer_offset; + vbva_ctx->buffer_length = buffer_length; +} diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 8dcce7182bb7..92e3f98d8478 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -201,8 +201,6 @@ static void vc4_bo_destroy(struct vc4_bo *bo) bo->validated_shader = NULL; } - reservation_object_fini(&bo->_resv); - drm_gem_cma_free_object(obj); } @@ -427,8 +425,6 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size) vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++; vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size; mutex_unlock(&vc4->bo_lock); - bo->resv = &bo->_resv; - reservation_object_init(bo->resv); return &bo->base.base; } @@ -684,13 +680,6 @@ static void vc4_bo_cache_time_timer(struct timer_list *t) schedule_work(&vc4->bo_cache.time_work); } -struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj) -{ - struct vc4_bo *bo = to_vc4_bo(obj); - - return bo->resv; -} - struct dma_buf * vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) { @@ -822,14 +811,12 @@ vc4_prime_import_sg_table(struct drm_device *dev, struct sg_table *sgt) { struct drm_gem_object *obj; - struct vc4_bo *bo; obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt); if (IS_ERR(obj)) return obj; - bo = to_vc4_bo(obj); - bo->resv = attach->dmabuf->resv; + obj->resv = attach->dmabuf->resv; return obj; } diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 730008d3da76..64c964b7c577 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -834,6 +834,14 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) drm_crtc_send_vblank_event(crtc, vc4_crtc->event); vc4_crtc->event = NULL; drm_crtc_vblank_put(crtc); + + /* Wait for the page flip to unmask the underrun to ensure that + * the display list was updated by the hardware. Before that + * happens, the HVS will be using the previous display list with + * the CRTC and encoder already reconfigured, leading to + * underruns. This can be seen when reconfiguring the CRTC. + */ + vc4_hvs_unmask_underrun(dev, vc4_crtc->channel); } spin_unlock_irqrestore(&dev->event_lock, flags); } diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c index 7a0003de71ab..59cdad89f844 100644 --- a/drivers/gpu/drm/vc4/vc4_debugfs.c +++ b/drivers/gpu/drm/vc4/vc4_debugfs.c @@ -23,6 +23,7 @@ static const struct drm_info_list vc4_debugfs_list[] = { {"vec_regs", vc4_vec_debugfs_regs, 0}, {"txp_regs", vc4_txp_debugfs_regs, 0}, {"hvs_regs", vc4_hvs_debugfs_regs, 0}, + {"hvs_underrun", vc4_hvs_debugfs_underrun, 0}, {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0}, {"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1}, {"crtc2_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)2}, @@ -35,6 +36,15 @@ static const struct drm_info_list vc4_debugfs_list[] = { int vc4_debugfs_init(struct drm_minor *minor) { + struct vc4_dev *vc4 = to_vc4_dev(minor->dev); + struct dentry *dentry; + + dentry = debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR, + minor->debugfs_root, + &vc4->load_tracker_enabled); + if (!dentry) + return -ENOMEM; + return drm_debugfs_create_files(vc4_debugfs_list, VC4_DEBUGFS_ENTRIES, minor->debugfs_root, minor); } diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 5fcd2f0da7f7..4daf44fd4548 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -200,7 +200,6 @@ static struct drm_driver vc4_drm_driver = { .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = drm_gem_prime_import, .gem_prime_export = vc4_prime_export, - .gem_prime_res_obj = vc4_prime_res_obj, .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, .gem_prime_import_sg_table = vc4_prime_import_sg_table, .gem_prime_vmap = vc4_prime_vmap, @@ -287,7 +286,7 @@ static int vc4_drm_bind(struct device *dev) vc4_kms_load(drm); - drm_fbdev_generic_setup(drm, 32); + drm_fbdev_generic_setup(drm, 16); return 0; @@ -312,6 +311,7 @@ static void vc4_drm_unbind(struct device *dev) drm_mode_config_cleanup(drm); + drm_atomic_private_obj_fini(&vc4->load_tracker); drm_atomic_private_obj_fini(&vc4->ctm_manager); drm_dev_put(drm); diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 2c635f001c71..7a3c093e7443 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -7,7 +7,6 @@ */ #include <linux/mm_types.h> -#include <linux/reservation.h> #include <drm/drmP.h> #include <drm/drm_util.h> #include <drm/drm_encoder.h> @@ -185,10 +184,20 @@ struct vc4_dev { /* Bitmask of the current bin_alloc used for overflow memory. */ uint32_t bin_alloc_overflow; + /* Incremented when an underrun error happened after an atomic commit. + * This is particularly useful to detect when a specific modeset is too + * demanding in term of memory or HVS bandwidth which is hard to guess + * at atomic check time. + */ + atomic_t underrun; + struct work_struct overflow_mem_work; int power_refcount; + /* Set to true when the load tracker is active. */ + bool load_tracker_enabled; + /* Mutex controlling the power refcount. */ struct mutex power_lock; @@ -201,6 +210,7 @@ struct vc4_dev { struct drm_modeset_lock ctm_state_lock; struct drm_private_obj ctm_manager; + struct drm_private_obj load_tracker; }; static inline struct vc4_dev * @@ -240,10 +250,6 @@ struct vc4_bo { */ struct vc4_validated_shader_info *validated_shader; - /* normally (resv == &_resv) except for imported bo's */ - struct reservation_object *resv; - struct reservation_object _resv; - /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i * for user-allocated labels. */ @@ -376,6 +382,16 @@ struct vc4_plane_state { * when async update is not possible. */ bool dlist_initialized; + + /* Load of this plane on the HVS block. The load is expressed in HVS + * cycles/sec. + */ + u64 hvs_load; + + /* Memory bandwidth needed for this plane. This is expressed in + * bytes/sec. + */ + u64 membus_load; }; static inline struct vc4_plane_state * @@ -685,7 +701,6 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); vm_fault_t vc4_fault(struct vm_fault *vmf); int vc4_mmap(struct file *filp, struct vm_area_struct *vma); -struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj); int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, @@ -773,6 +788,9 @@ void vc4_irq_reset(struct drm_device *dev); extern struct platform_driver vc4_hvs_driver; void vc4_hvs_dump_state(struct drm_device *dev); int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused); +int vc4_hvs_debugfs_underrun(struct seq_file *m, void *unused); +void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel); +void vc4_hvs_mask_underrun(struct drm_device *dev, int channel); /* vc4_kms.c */ int vc4_kms_load(struct drm_device *dev); diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index aea2b8dfec17..5ee5bf7fedf7 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -536,7 +536,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) bo = to_vc4_bo(&exec->bo[i]->base); bo->seqno = seqno; - reservation_object_add_shared_fence(bo->resv, exec->fence); + reservation_object_add_shared_fence(bo->base.base.resv, exec->fence); } list_for_each_entry(bo, &exec->unref_list, unref_head) { @@ -547,7 +547,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) bo = to_vc4_bo(&exec->rcl_write_bo[i]->base); bo->write_seqno = seqno; - reservation_object_add_excl_fence(bo->resv, exec->fence); + reservation_object_add_excl_fence(bo->base.base.resv, exec->fence); } } @@ -559,7 +559,7 @@ vc4_unlock_bo_reservations(struct drm_device *dev, int i; for (i = 0; i < exec->bo_count; i++) { - struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base); + struct drm_gem_object *bo = &exec->bo[i]->base; ww_mutex_unlock(&bo->resv->lock); } @@ -581,13 +581,13 @@ vc4_lock_bo_reservations(struct drm_device *dev, { int contended_lock = -1; int i, ret; - struct vc4_bo *bo; + struct drm_gem_object *bo; ww_acquire_init(acquire_ctx, &reservation_ww_class); retry: if (contended_lock != -1) { - bo = to_vc4_bo(&exec->bo[contended_lock]->base); + bo = &exec->bo[contended_lock]->base; ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, acquire_ctx); if (ret) { @@ -600,19 +600,19 @@ retry: if (i == contended_lock) continue; - bo = to_vc4_bo(&exec->bo[i]->base); + bo = &exec->bo[i]->base; ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx); if (ret) { int j; for (j = 0; j < i; j++) { - bo = to_vc4_bo(&exec->bo[j]->base); + bo = &exec->bo[j]->base; ww_mutex_unlock(&bo->resv->lock); } if (contended_lock != -1 && contended_lock >= i) { - bo = to_vc4_bo(&exec->bo[contended_lock]->base); + bo = &exec->bo[contended_lock]->base; ww_mutex_unlock(&bo->resv->lock); } @@ -633,7 +633,7 @@ retry: * before we commit the CL to the hardware. */ for (i = 0; i < exec->bo_count; i++) { - bo = to_vc4_bo(&exec->bo[i]->base); + bo = &exec->bo[i]->base; ret = reservation_object_reserve_shared(bo->resv, 1); if (ret) { diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index 5d8c749c9749..918e71256ecc 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -22,6 +22,7 @@ * each CRTC. */ +#include <drm/drm_atomic_helper.h> #include <linux/component.h> #include "vc4_drv.h" #include "vc4_regs.h" @@ -102,6 +103,18 @@ int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused) return 0; } + +int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data) +{ + struct drm_info_node *node = m->private; + struct drm_device *dev = node->minor->dev; + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct drm_printer p = drm_seq_file_printer(m); + + drm_printf(&p, "%d\n", atomic_read(&vc4->underrun)); + + return 0; +} #endif /* The filter kernel is composed of dwords each containing 3 9-bit @@ -166,6 +179,67 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs, return 0; } +void vc4_hvs_mask_underrun(struct drm_device *dev, int channel) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + u32 dispctrl = HVS_READ(SCALER_DISPCTRL); + + dispctrl &= ~SCALER_DISPCTRL_DSPEISLUR(channel); + + HVS_WRITE(SCALER_DISPCTRL, dispctrl); +} + +void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + u32 dispctrl = HVS_READ(SCALER_DISPCTRL); + + dispctrl |= SCALER_DISPCTRL_DSPEISLUR(channel); + + HVS_WRITE(SCALER_DISPSTAT, + SCALER_DISPSTAT_EUFLOW(channel)); + HVS_WRITE(SCALER_DISPCTRL, dispctrl); +} + +static void vc4_hvs_report_underrun(struct drm_device *dev) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + + atomic_inc(&vc4->underrun); + DRM_DEV_ERROR(dev->dev, "HVS underrun\n"); +} + +static irqreturn_t vc4_hvs_irq_handler(int irq, void *data) +{ + struct drm_device *dev = data; + struct vc4_dev *vc4 = to_vc4_dev(dev); + irqreturn_t irqret = IRQ_NONE; + int channel; + u32 control; + u32 status; + + status = HVS_READ(SCALER_DISPSTAT); + control = HVS_READ(SCALER_DISPCTRL); + + for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) { + /* Interrupt masking is not always honored, so check it here. */ + if (status & SCALER_DISPSTAT_EUFLOW(channel) && + control & SCALER_DISPCTRL_DSPEISLUR(channel)) { + vc4_hvs_mask_underrun(dev, channel); + vc4_hvs_report_underrun(dev); + + irqret = IRQ_HANDLED; + } + } + + /* Clear every per-channel interrupt flag. */ + HVS_WRITE(SCALER_DISPSTAT, SCALER_DISPSTAT_IRQMASK(0) | + SCALER_DISPSTAT_IRQMASK(1) | + SCALER_DISPSTAT_IRQMASK(2)); + + return irqret; +} + static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); @@ -219,15 +293,36 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) dispctrl = HVS_READ(SCALER_DISPCTRL); dispctrl |= SCALER_DISPCTRL_ENABLE; + dispctrl |= SCALER_DISPCTRL_DISPEIRQ(0) | + SCALER_DISPCTRL_DISPEIRQ(1) | + SCALER_DISPCTRL_DISPEIRQ(2); /* Set DSP3 (PV1) to use HVS channel 2, which would otherwise * be unused. */ dispctrl &= ~SCALER_DISPCTRL_DSP3_MUX_MASK; + dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ | + SCALER_DISPCTRL_SLVWREIRQ | + SCALER_DISPCTRL_SLVRDEIRQ | + SCALER_DISPCTRL_DSPEIEOF(0) | + SCALER_DISPCTRL_DSPEIEOF(1) | + SCALER_DISPCTRL_DSPEIEOF(2) | + SCALER_DISPCTRL_DSPEIEOLN(0) | + SCALER_DISPCTRL_DSPEIEOLN(1) | + SCALER_DISPCTRL_DSPEIEOLN(2) | + SCALER_DISPCTRL_DSPEISLUR(0) | + SCALER_DISPCTRL_DSPEISLUR(1) | + SCALER_DISPCTRL_DSPEISLUR(2) | + SCALER_DISPCTRL_SCLEIRQ); dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); HVS_WRITE(SCALER_DISPCTRL, dispctrl); + ret = devm_request_irq(dev, platform_get_irq(pdev, 0), + vc4_hvs_irq_handler, 0, "vc4 hvs", drm); + if (ret) + return ret; + return 0; } diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 91b8c72ff361..5160cad25fce 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -34,6 +34,18 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv) return container_of(priv, struct vc4_ctm_state, base); } +struct vc4_load_tracker_state { + struct drm_private_state base; + u64 hvs_load; + u64 membus_load; +}; + +static struct vc4_load_tracker_state * +to_vc4_load_tracker_state(struct drm_private_state *priv) +{ + return container_of(priv, struct vc4_load_tracker_state, base); +} + static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state, struct drm_private_obj *manager) { @@ -138,6 +150,16 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_crtc *vc4_crtc; + int i; + + for (i = 0; i < dev->mode_config.num_crtc; i++) { + if (!state->crtcs[i].ptr || !state->crtcs[i].commit) + continue; + + vc4_crtc = to_vc4_crtc(state->crtcs[i].ptr); + vc4_hvs_mask_underrun(dev, vc4_crtc->channel); + } drm_atomic_helper_wait_for_fences(dev, state, false); @@ -385,6 +407,85 @@ vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) return 0; } +static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state) +{ + struct drm_plane_state *old_plane_state, *new_plane_state; + struct vc4_dev *vc4 = to_vc4_dev(state->dev); + struct vc4_load_tracker_state *load_state; + struct drm_private_state *priv_state; + struct drm_plane *plane; + int i; + + priv_state = drm_atomic_get_private_obj_state(state, + &vc4->load_tracker); + if (IS_ERR(priv_state)) + return PTR_ERR(priv_state); + + load_state = to_vc4_load_tracker_state(priv_state); + for_each_oldnew_plane_in_state(state, plane, old_plane_state, + new_plane_state, i) { + struct vc4_plane_state *vc4_plane_state; + + if (old_plane_state->fb && old_plane_state->crtc) { + vc4_plane_state = to_vc4_plane_state(old_plane_state); + load_state->membus_load -= vc4_plane_state->membus_load; + load_state->hvs_load -= vc4_plane_state->hvs_load; + } + + if (new_plane_state->fb && new_plane_state->crtc) { + vc4_plane_state = to_vc4_plane_state(new_plane_state); + load_state->membus_load += vc4_plane_state->membus_load; + load_state->hvs_load += vc4_plane_state->hvs_load; + } + } + + /* Don't check the load when the tracker is disabled. */ + if (!vc4->load_tracker_enabled) + return 0; + + /* The absolute limit is 2Gbyte/sec, but let's take a margin to let + * the system work when other blocks are accessing the memory. + */ + if (load_state->membus_load > SZ_1G + SZ_512M) + return -ENOSPC; + + /* HVS clock is supposed to run @ 250Mhz, let's take a margin and + * consider the maximum number of cycles is 240M. + */ + if (load_state->hvs_load > 240000000ULL) + return -ENOSPC; + + return 0; +} + +static struct drm_private_state * +vc4_load_tracker_duplicate_state(struct drm_private_obj *obj) +{ + struct vc4_load_tracker_state *state; + + state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + return &state->base; +} + +static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct vc4_load_tracker_state *load_state; + + load_state = to_vc4_load_tracker_state(state); + kfree(load_state); +} + +static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = { + .atomic_duplicate_state = vc4_load_tracker_duplicate_state, + .atomic_destroy_state = vc4_load_tracker_destroy_state, +}; + static int vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { @@ -394,7 +495,11 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) if (ret < 0) return ret; - return drm_atomic_helper_check(dev, state); + ret = drm_atomic_helper_check(dev, state); + if (ret) + return ret; + + return vc4_load_tracker_atomic_check(state); } static const struct drm_mode_config_funcs vc4_mode_funcs = { @@ -407,8 +512,14 @@ int vc4_kms_load(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_ctm_state *ctm_state; + struct vc4_load_tracker_state *load_state; int ret; + /* Start with the load tracker enabled. Can be disabled through the + * debugfs load_tracker file. + */ + vc4->load_tracker_enabled = true; + sema_init(&vc4->async_modeset, 1); /* Set support for vblank irq fast disable, before drm_vblank_init() */ @@ -436,6 +547,15 @@ int vc4_kms_load(struct drm_device *dev) drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base, &vc4_ctm_state_funcs); + load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); + if (!load_state) { + drm_atomic_private_obj_fini(&vc4->ctm_manager); + return -ENOMEM; + } + + drm_atomic_private_obj_init(dev, &vc4->load_tracker, &load_state->base, + &vc4_load_tracker_state_funcs); + drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index d098337c10e9..4d918d3e4858 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -488,6 +488,61 @@ static void vc4_write_scaling_parameters(struct drm_plane_state *state, } } +static void vc4_plane_calc_load(struct drm_plane_state *state) +{ + unsigned int hvs_load_shift, vrefresh, i; + struct drm_framebuffer *fb = state->fb; + struct vc4_plane_state *vc4_state; + struct drm_crtc_state *crtc_state; + unsigned int vscale_factor; + + vc4_state = to_vc4_plane_state(state); + crtc_state = drm_atomic_get_existing_crtc_state(state->state, + state->crtc); + vrefresh = drm_mode_vrefresh(&crtc_state->adjusted_mode); + + /* The HVS is able to process 2 pixels/cycle when scaling the source, + * 4 pixels/cycle otherwise. + * Alpha blending step seems to be pipelined and it's always operating + * at 4 pixels/cycle, so the limiting aspect here seems to be the + * scaler block. + * HVS load is expressed in clk-cycles/sec (AKA Hz). + */ + if (vc4_state->x_scaling[0] != VC4_SCALING_NONE || + vc4_state->x_scaling[1] != VC4_SCALING_NONE || + vc4_state->y_scaling[0] != VC4_SCALING_NONE || + vc4_state->y_scaling[1] != VC4_SCALING_NONE) + hvs_load_shift = 1; + else + hvs_load_shift = 2; + + vc4_state->membus_load = 0; + vc4_state->hvs_load = 0; + for (i = 0; i < fb->format->num_planes; i++) { + /* Even if the bandwidth/plane required for a single frame is + * + * vc4_state->src_w[i] * vc4_state->src_h[i] * cpp * vrefresh + * + * when downscaling, we have to read more pixels per line in + * the time frame reserved for a single line, so the bandwidth + * demand can be punctually higher. To account for that, we + * calculate the down-scaling factor and multiply the plane + * load by this number. We're likely over-estimating the read + * demand, but that's better than under-estimating it. + */ + vscale_factor = DIV_ROUND_UP(vc4_state->src_h[i], + vc4_state->crtc_h); + vc4_state->membus_load += vc4_state->src_w[i] * + vc4_state->src_h[i] * vscale_factor * + fb->format->cpp[i]; + vc4_state->hvs_load += vc4_state->crtc_h * vc4_state->crtc_w; + } + + vc4_state->hvs_load *= vrefresh; + vc4_state->hvs_load >>= hvs_load_shift; + vc4_state->membus_load *= vrefresh; +} + static int vc4_plane_allocate_lbm(struct drm_plane_state *state) { struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev); @@ -875,6 +930,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane, */ vc4_state->dlist_initialized = 1; + vc4_plane_calc_load(state); + return 0; } @@ -1082,7 +1139,7 @@ static int vc4_prepare_fb(struct drm_plane *plane, bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base); - fence = reservation_object_get_excl_rcu(bo->resv); + fence = reservation_object_get_excl_rcu(bo->base.base.resv); drm_atomic_set_fence_for_plane(state, fence); if (plane->state->fb == state->fb) diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h index 931088014272..c0c5fadaf7e3 100644 --- a/drivers/gpu/drm/vc4/vc4_regs.h +++ b/drivers/gpu/drm/vc4/vc4_regs.h @@ -212,11 +212,11 @@ #define PV_HACT_ACT 0x30 +#define SCALER_CHANNELS_COUNT 3 + #define SCALER_DISPCTRL 0x00000000 /* Global register for clock gating the HVS */ # define SCALER_DISPCTRL_ENABLE BIT(31) -# define SCALER_DISPCTRL_DSP2EISLUR BIT(15) -# define SCALER_DISPCTRL_DSP1EISLUR BIT(14) # define SCALER_DISPCTRL_DSP3_MUX_MASK VC4_MASK(19, 18) # define SCALER_DISPCTRL_DSP3_MUX_SHIFT 18 @@ -224,45 +224,25 @@ * SCALER_DISPSTAT_IRQDISP0. Note that short frame contributions are * always enabled. */ -# define SCALER_DISPCTRL_DSP0EISLUR BIT(13) -# define SCALER_DISPCTRL_DSP2EIEOLN BIT(12) -# define SCALER_DISPCTRL_DSP2EIEOF BIT(11) -# define SCALER_DISPCTRL_DSP1EIEOLN BIT(10) -# define SCALER_DISPCTRL_DSP1EIEOF BIT(9) +# define SCALER_DISPCTRL_DSPEISLUR(x) BIT(13 + (x)) /* Enables Display 0 end-of-line-N contribution to * SCALER_DISPSTAT_IRQDISP0 */ -# define SCALER_DISPCTRL_DSP0EIEOLN BIT(8) +# define SCALER_DISPCTRL_DSPEIEOLN(x) BIT(8 + ((x) * 2)) /* Enables Display 0 EOF contribution to SCALER_DISPSTAT_IRQDISP0 */ -# define SCALER_DISPCTRL_DSP0EIEOF BIT(7) +# define SCALER_DISPCTRL_DSPEIEOF(x) BIT(7 + ((x) * 2)) # define SCALER_DISPCTRL_SLVRDEIRQ BIT(6) # define SCALER_DISPCTRL_SLVWREIRQ BIT(5) # define SCALER_DISPCTRL_DMAEIRQ BIT(4) -# define SCALER_DISPCTRL_DISP2EIRQ BIT(3) -# define SCALER_DISPCTRL_DISP1EIRQ BIT(2) /* Enables interrupt generation on the enabled EOF/EOLN/EISLUR * bits and short frames.. */ -# define SCALER_DISPCTRL_DISP0EIRQ BIT(1) +# define SCALER_DISPCTRL_DISPEIRQ(x) BIT(1 + (x)) /* Enables interrupt generation on scaler profiler interrupt. */ # define SCALER_DISPCTRL_SCLEIRQ BIT(0) #define SCALER_DISPSTAT 0x00000004 -# define SCALER_DISPSTAT_COBLOW2 BIT(29) -# define SCALER_DISPSTAT_EOLN2 BIT(28) -# define SCALER_DISPSTAT_ESFRAME2 BIT(27) -# define SCALER_DISPSTAT_ESLINE2 BIT(26) -# define SCALER_DISPSTAT_EUFLOW2 BIT(25) -# define SCALER_DISPSTAT_EOF2 BIT(24) - -# define SCALER_DISPSTAT_COBLOW1 BIT(21) -# define SCALER_DISPSTAT_EOLN1 BIT(20) -# define SCALER_DISPSTAT_ESFRAME1 BIT(19) -# define SCALER_DISPSTAT_ESLINE1 BIT(18) -# define SCALER_DISPSTAT_EUFLOW1 BIT(17) -# define SCALER_DISPSTAT_EOF1 BIT(16) - # define SCALER_DISPSTAT_RESP_MASK VC4_MASK(15, 14) # define SCALER_DISPSTAT_RESP_SHIFT 14 # define SCALER_DISPSTAT_RESP_OKAY 0 @@ -270,23 +250,26 @@ # define SCALER_DISPSTAT_RESP_SLVERR 2 # define SCALER_DISPSTAT_RESP_DECERR 3 -# define SCALER_DISPSTAT_COBLOW0 BIT(13) +# define SCALER_DISPSTAT_COBLOW(x) BIT(13 + ((x) * 8)) /* Set when the DISPEOLN line is done compositing. */ -# define SCALER_DISPSTAT_EOLN0 BIT(12) +# define SCALER_DISPSTAT_EOLN(x) BIT(12 + ((x) * 8)) /* Set when VSTART is seen but there are still pixels in the current * output line. */ -# define SCALER_DISPSTAT_ESFRAME0 BIT(11) +# define SCALER_DISPSTAT_ESFRAME(x) BIT(11 + ((x) * 8)) /* Set when HSTART is seen but there are still pixels in the current * output line. */ -# define SCALER_DISPSTAT_ESLINE0 BIT(10) +# define SCALER_DISPSTAT_ESLINE(x) BIT(10 + ((x) * 8)) /* Set when the the downstream tries to read from the display FIFO * while it's empty. */ -# define SCALER_DISPSTAT_EUFLOW0 BIT(9) +# define SCALER_DISPSTAT_EUFLOW(x) BIT(9 + ((x) * 8)) /* Set when the display mode changes from RUN to EOF */ -# define SCALER_DISPSTAT_EOF0 BIT(8) +# define SCALER_DISPSTAT_EOF(x) BIT(8 + ((x) * 8)) + +# define SCALER_DISPSTAT_IRQMASK(x) VC4_MASK(13 + ((x) * 8), \ + 8 + ((x) * 8)) /* Set on AXI invalid DMA ID error. */ # define SCALER_DISPSTAT_DMA_ERROR BIT(7) @@ -298,12 +281,10 @@ * SCALER_DISPSTAT_RESP_ERROR is not SCALER_DISPSTAT_RESP_OKAY. */ # define SCALER_DISPSTAT_IRQDMA BIT(4) -# define SCALER_DISPSTAT_IRQDISP2 BIT(3) -# define SCALER_DISPSTAT_IRQDISP1 BIT(2) /* Set when any of the EOF/EOLN/ESFRAME/ESLINE bits are set and their * corresponding interrupt bit is enabled in DISPCTRL. */ -# define SCALER_DISPSTAT_IRQDISP0 BIT(1) +# define SCALER_DISPSTAT_IRQDISP(x) BIT(1 + (x)) /* On read, the profiler interrupt. On write, clear *all* interrupt bits. */ # define SCALER_DISPSTAT_IRQSCL BIT(0) diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c index aa279b5b0de7..cc2888dd7171 100644 --- a/drivers/gpu/drm/vc4/vc4_txp.c +++ b/drivers/gpu/drm/vc4/vc4_txp.c @@ -249,7 +249,6 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn, struct drm_connector_state *conn_state) { struct drm_crtc_state *crtc_state; - struct drm_gem_cma_object *gem; struct drm_framebuffer *fb; int i; @@ -275,8 +274,6 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn, if (i == ARRAY_SIZE(drm_fmts)) return -EINVAL; - gem = drm_fb_cma_get_gem_obj(fb, 0); - /* Pitch must be aligned on 16 bytes. */ if (fb->pitches[0] & GENMASK(3, 0)) return -EINVAL; @@ -327,7 +324,7 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn, TXP_WRITE(TXP_DST_CTRL, ctrl); - drm_writeback_queue_job(&txp->connector, conn_state->writeback_job); + drm_writeback_queue_job(&txp->connector, conn_state); } static const struct drm_connector_helper_funcs vc4_txp_connector_helper_funcs = { diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index b996ac1d4fcc..7c2893181ba4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -205,10 +205,10 @@ static struct drm_driver driver = { #if defined(CONFIG_DEBUG_FS) .debugfs_init = virtio_gpu_debugfs_init, #endif + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, - .gem_prime_pin = virtgpu_gem_prime_pin, - .gem_prime_unpin = virtgpu_gem_prime_unpin, + .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table, .gem_prime_vmap = virtgpu_gem_prime_vmap, .gem_prime_vunmap = virtgpu_gem_prime_vunmap, .gem_prime_mmap = virtgpu_gem_prime_mmap, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 3238fdf58eb4..86a264cee362 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -352,8 +352,7 @@ void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo); int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait); /* virtgpu_prime.c */ -int virtgpu_gem_prime_pin(struct drm_gem_object *obj); -void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); +struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index c59ec34c80a5..22ef151410e0 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -28,15 +28,16 @@ * device that might share buffers with virtgpu */ -int virtgpu_gem_prime_pin(struct drm_gem_object *obj) +struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) { - WARN_ONCE(1, "not implemented"); - return -ENODEV; -} + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); -void virtgpu_gem_prime_unpin(struct drm_gem_object *obj) -{ - WARN_ONCE(1, "not implemented"); + if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages) + /* should not happen */ + return ERR_PTR(-EINVAL); + + return drm_prime_pages_to_sg(bo->tbo.ttm->pages, + bo->tbo.ttm->num_pages); } void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) @@ -56,7 +57,10 @@ void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) } int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *area) + struct vm_area_struct *vma) { - return -ENODEV; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + + bo->gem_base.vma_node.vm_node.start = bo->tbo.vma_node.vm_node.start; + return drm_gem_prime_mmap(obj, vma); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index b913a56f3426..2a9112515f46 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -564,11 +564,9 @@ static int vmw_fb_set_par(struct fb_info *info) 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }; - struct drm_display_mode *old_mode; struct drm_display_mode *mode; int ret; - old_mode = par->set_mode; mode = drm_mode_duplicate(vmw_priv->dev, &new_mode); if (!mode) { DRM_ERROR("Could not create new fb mode.\n"); @@ -579,11 +577,7 @@ static int vmw_fb_set_par(struct fb_info *info) mode->vdisplay = var->yres; vmw_guess_mode_timing(mode); - if (old_mode && drm_mode_equal(old_mode, mode)) { - drm_mode_destroy(vmw_priv->dev, mode); - mode = old_mode; - old_mode = NULL; - } else if (!vmw_kms_validate_mode_vram(vmw_priv, + if (!vmw_kms_validate_mode_vram(vmw_priv, mode->hdisplay * DIV_ROUND_UP(var->bits_per_pixel, 8), mode->vdisplay)) { @@ -620,8 +614,8 @@ static int vmw_fb_set_par(struct fb_info *info) schedule_delayed_work(&par->local_work, 0); out_unlock: - if (old_mode) - drm_mode_destroy(vmw_priv->dev, old_mode); + if (par->set_mode) + drm_mode_destroy(vmw_priv->dev, par->set_mode); par->set_mode = mode; mutex_unlock(&par->bo_mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index b93c558dd86e..7da752ca1c34 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -57,7 +57,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); if (id < 0) - return id; + return (id != -ENOMEM ? 0 : id); spin_lock(&gman->lock); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 31786b200afc..a3357ff7540d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -311,7 +311,13 @@ static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter) static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter) { - return sg_page_iter_dma_address(&viter->iter); + /* + * FIXME: This driver wrongly mixes DMA and CPU SG list iteration and + * needs revision. See + * https://lore.kernel.org/lkml/20190104223531.GA1705@ziepe.ca/ + */ + return sg_page_iter_dma_address( + container_of(&viter->iter, struct sg_dma_page_iter, base)); } diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 3e78a832d7f9..84aa4d61dc42 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -582,6 +582,7 @@ static void xen_drm_drv_fini(struct xen_drm_front_info *front_info) drm_kms_helper_poll_fini(dev); drm_dev_unplug(dev); + drm_dev_put(dev); front_info->drm_info = NULL; diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c index 163fadb8a33a..d047a6867c59 100644 --- a/drivers/gpu/ipu-v3/ipu-cpmem.c +++ b/drivers/gpu/ipu-v3/ipu-cpmem.c @@ -277,9 +277,10 @@ void ipu_cpmem_set_uv_offset(struct ipuv3_channel *ch, u32 u_off, u32 v_off) } EXPORT_SYMBOL_GPL(ipu_cpmem_set_uv_offset); -void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride) +void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride, + u32 pixelformat) { - u32 ilo, sly; + u32 ilo, sly, sluv; if (stride < 0) { stride = -stride; @@ -290,9 +291,30 @@ void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride) sly = (stride * 2) - 1; + switch (pixelformat) { + case V4L2_PIX_FMT_YUV420: + case V4L2_PIX_FMT_YVU420: + sluv = stride / 2 - 1; + break; + case V4L2_PIX_FMT_NV12: + sluv = stride - 1; + break; + case V4L2_PIX_FMT_YUV422P: + sluv = stride - 1; + break; + case V4L2_PIX_FMT_NV16: + sluv = stride * 2 - 1; + break; + default: + sluv = 0; + break; + } + ipu_ch_param_write_field(ch, IPU_FIELD_SO, 1); ipu_ch_param_write_field(ch, IPU_FIELD_ILO, ilo); ipu_ch_param_write_field(ch, IPU_FIELD_SLY, sly); + if (sluv) + ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, sluv); }; EXPORT_SYMBOL_GPL(ipu_cpmem_interlaced_scan); diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c index aa0e30a2ba18..d1e575571a8d 100644 --- a/drivers/gpu/ipu-v3/ipu-csi.c +++ b/drivers/gpu/ipu-v3/ipu-csi.c @@ -325,12 +325,21 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code, return 0; } +/* translate alternate field mode based on given standard */ +static inline enum v4l2_field +ipu_csi_translate_field(enum v4l2_field field, v4l2_std_id std) +{ + return (field != V4L2_FIELD_ALTERNATE) ? field : + ((std & V4L2_STD_525_60) ? + V4L2_FIELD_SEQ_BT : V4L2_FIELD_SEQ_TB); +} + /* * Fill a CSI bus config struct from mbus_config and mbus_framefmt. */ static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg, - struct v4l2_mbus_config *mbus_cfg, - struct v4l2_mbus_framefmt *mbus_fmt) + const struct v4l2_mbus_config *mbus_cfg, + const struct v4l2_mbus_framefmt *mbus_fmt) { int ret; @@ -374,22 +383,76 @@ static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg, return 0; } +static int +ipu_csi_set_bt_interlaced_codes(struct ipu_csi *csi, + const struct v4l2_mbus_framefmt *infmt, + const struct v4l2_mbus_framefmt *outfmt, + v4l2_std_id std) +{ + enum v4l2_field infield, outfield; + bool swap_fields; + + /* get translated field type of input and output */ + infield = ipu_csi_translate_field(infmt->field, std); + outfield = ipu_csi_translate_field(outfmt->field, std); + + /* + * Write the H-V-F codes the CSI will match against the + * incoming data for start/end of active and blanking + * field intervals. If input and output field types are + * sequential but not the same (one is SEQ_BT and the other + * is SEQ_TB), swap the F-bit so that the CSI will capture + * field 1 lines before field 0 lines. + */ + swap_fields = (V4L2_FIELD_IS_SEQUENTIAL(infield) && + V4L2_FIELD_IS_SEQUENTIAL(outfield) && + infield != outfield); + + if (!swap_fields) { + /* + * Field0BlankEnd = 110, Field0BlankStart = 010 + * Field0ActiveEnd = 100, Field0ActiveStart = 000 + * Field1BlankEnd = 111, Field1BlankStart = 011 + * Field1ActiveEnd = 101, Field1ActiveStart = 001 + */ + ipu_csi_write(csi, 0x40596 | CSI_CCIR_ERR_DET_EN, + CSI_CCIR_CODE_1); + ipu_csi_write(csi, 0xD07DF, CSI_CCIR_CODE_2); + } else { + dev_dbg(csi->ipu->dev, "capture field swap\n"); + + /* same as above but with F-bit inverted */ + ipu_csi_write(csi, 0xD07DF | CSI_CCIR_ERR_DET_EN, + CSI_CCIR_CODE_1); + ipu_csi_write(csi, 0x40596, CSI_CCIR_CODE_2); + } + + ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3); + + return 0; +} + + int ipu_csi_init_interface(struct ipu_csi *csi, - struct v4l2_mbus_config *mbus_cfg, - struct v4l2_mbus_framefmt *mbus_fmt) + const struct v4l2_mbus_config *mbus_cfg, + const struct v4l2_mbus_framefmt *infmt, + const struct v4l2_mbus_framefmt *outfmt) { struct ipu_csi_bus_config cfg; unsigned long flags; u32 width, height, data = 0; + v4l2_std_id std; int ret; - ret = fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt); + ret = fill_csi_bus_cfg(&cfg, mbus_cfg, infmt); if (ret < 0) return ret; /* set default sensor frame width and height */ - width = mbus_fmt->width; - height = mbus_fmt->height; + width = infmt->width; + height = infmt->height; + if (infmt->field == V4L2_FIELD_ALTERNATE) + height *= 2; /* Set the CSI_SENS_CONF register remaining fields */ data |= cfg.data_width << CSI_SENS_CONF_DATA_WIDTH_SHIFT | @@ -416,42 +479,22 @@ int ipu_csi_init_interface(struct ipu_csi *csi, ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3); break; case IPU_CSI_CLK_MODE_CCIR656_INTERLACED: - if (mbus_fmt->width == 720 && mbus_fmt->height == 576) { - /* - * PAL case - * - * Field0BlankEnd = 0x6, Field0BlankStart = 0x2, - * Field0ActiveEnd = 0x4, Field0ActiveStart = 0 - * Field1BlankEnd = 0x7, Field1BlankStart = 0x3, - * Field1ActiveEnd = 0x5, Field1ActiveStart = 0x1 - */ - height = 625; /* framelines for PAL */ - - ipu_csi_write(csi, 0x40596 | CSI_CCIR_ERR_DET_EN, - CSI_CCIR_CODE_1); - ipu_csi_write(csi, 0xD07DF, CSI_CCIR_CODE_2); - ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3); - } else if (mbus_fmt->width == 720 && mbus_fmt->height == 480) { - /* - * NTSC case - * - * Field0BlankEnd = 0x7, Field0BlankStart = 0x3, - * Field0ActiveEnd = 0x5, Field0ActiveStart = 0x1 - * Field1BlankEnd = 0x6, Field1BlankStart = 0x2, - * Field1ActiveEnd = 0x4, Field1ActiveStart = 0 - */ - height = 525; /* framelines for NTSC */ - - ipu_csi_write(csi, 0xD07DF | CSI_CCIR_ERR_DET_EN, - CSI_CCIR_CODE_1); - ipu_csi_write(csi, 0x40596, CSI_CCIR_CODE_2); - ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3); + if (width == 720 && height == 480) { + std = V4L2_STD_NTSC; + height = 525; + } else if (width == 720 && height == 576) { + std = V4L2_STD_PAL; + height = 625; } else { dev_err(csi->ipu->dev, - "Unsupported CCIR656 interlaced video mode\n"); - spin_unlock_irqrestore(&csi->lock, flags); - return -EINVAL; + "Unsupported interlaced video mode\n"); + ret = -EINVAL; + goto out_unlock; } + + ret = ipu_csi_set_bt_interlaced_codes(csi, infmt, outfmt, std); + if (ret) + goto out_unlock; break; case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR: case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR: @@ -476,9 +519,10 @@ int ipu_csi_init_interface(struct ipu_csi *csi, dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE = 0x%08X\n", ipu_csi_read(csi, CSI_ACT_FRM_SIZE)); +out_unlock: spin_unlock_irqrestore(&csi->lock, flags); - return 0; + return ret; } EXPORT_SYMBOL_GPL(ipu_csi_init_interface); diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index dc8e039bfab5..f2f3ef8af271 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -48,6 +48,8 @@ #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/screen_info.h> +#include <linux/vt.h> +#include <linux/console.h> #include <linux/uaccess.h> @@ -168,6 +170,53 @@ void vga_set_default_device(struct pci_dev *pdev) vga_default = pci_dev_get(pdev); } +/** + * vga_remove_vgacon - deactivete vga console + * + * Unbind and unregister vgacon in case pdev is the default vga + * device. Can be called by gpu drivers on initialization to make + * sure vga register access done by vgacon will not disturb the + * device. + * + * @pdev: pci device. + */ +#if !defined(CONFIG_VGA_CONSOLE) +int vga_remove_vgacon(struct pci_dev *pdev) +{ + return 0; +} +#elif !defined(CONFIG_DUMMY_CONSOLE) +int vga_remove_vgacon(struct pci_dev *pdev) +{ + return -ENODEV; +} +#else +int vga_remove_vgacon(struct pci_dev *pdev) +{ + int ret = 0; + + if (pdev != vga_default) + return 0; + vgaarb_info(&pdev->dev, "deactivate vga console\n"); + + console_lock(); + if (con_is_bound(&vga_con)) + ret = do_take_over_console(&dummy_con, 0, + MAX_NR_CONSOLES - 1, 1); + if (ret == 0) { + ret = do_unregister_con_driver(&vga_con); + + /* Ignore "already unregistered". */ + if (ret == -ENODEV) + ret = 0; + } + console_unlock(); + + return ret; +} +#endif +EXPORT_SYMBOL(vga_remove_vgacon); + static inline void vga_irq_set_state(struct vga_device *vgadev, bool state) { if (vgadev->irq_set_state) |