diff options
author | Dave Airlie <airlied@redhat.com> | 2022-02-24 22:30:17 +0300 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2022-02-24 22:50:18 +0300 |
commit | 54f43c17d681f6d9523fcfaeefc9df77993802e1 (patch) | |
tree | a1b3807e26d05b9b17aea43b7725caf3757522ca /drivers | |
parent | 7f44571b53fd07e36ae4d2537a6fb40d79b39462 (diff) | |
parent | f915686bd97a9c234602426e6d132b74a112a8d6 (diff) | |
download | linux-54f43c17d681f6d9523fcfaeefc9df77993802e1.tar.xz |
Merge tag 'drm-misc-next-2022-02-23' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for v5.18:
UAPI Changes:
Cross-subsystem Changes:
- Split out panel-lvds and lvds dt bindings .
- Put yes/no on/off disabled/enabled strings in linux/string_helpers.h
and use it in drivers and tomoyo.
- Clarify dma_fence_chain and dma_fence_array should never include eachother.
- Flatten chains in syncobj's.
- Don't double add in fbdev/defio when page is already enlisted.
- Don't sort deferred-I/O pages by default in fbdev.
Core Changes:
- Fix missing pm_runtime_put_sync in bridge.
- Set modifier support to only linear fb modifier if drivers don't
advertise support.
- As a result, we remove allow_fb_modifiers.
- Add missing clear for EDID Deep Color Modes in drm_reset_display_info.
- Assorted documentation updates.
- Warn once in drm_clflush if there is no arch support.
- Add missing select for dp helper in drm_panel_edp.
- Assorted small fixes.
- Improve fb-helper's clipping handling.
- Don't dump shmem mmaps in a core dump.
- Add accounting to ttm resource manager, and use it in amdgpu.
- Allow querying the detected eDP panel through debugfs.
- Add helpers for xrgb8888 to 8 and 1 bits gray.
- Improve drm's buddy allocator.
- Add selftests for the buddy allocator.
Driver Changes:
- Add support for nomodeset to a lot of drm drivers.
- Use drm_module_*_driver in a lot of drm drivers.
- Assorted small fixes to bridge/lt9611, v3d, vc4, vmwgfx, mxsfb, nouveau,
bridge/dw-hdmi, panfrost, lima, ingenic, sprd, bridge/anx7625, ti-sn65dsi86.
- Add bridge/it6505.
- Create DP and DVI-I connectors in ast.
- Assorted nouveau backlight fixes.
- Rework amdgpu reset handling.
- Add dt bindings for ingenic,jz4780-dw-hdmi.
- Support reading edid through aux channel in ingenic.
- Add a drm driver for Solomon SSD130x OLED displays.
- Add simple support for sharp LQ140M1JW46.
- Add more panels to nt35560.
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/686ec871-e77f-c230-22e5-9e3bb80f064a@linux.intel.com
Diffstat (limited to 'drivers')
168 files changed, 8047 insertions, 1688 deletions
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index 3e07f961e2f3..cb1bacb5a42b 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -176,6 +176,20 @@ struct dma_fence_array *dma_fence_array_create(int num_fences, array->base.error = PENDING_ERROR; + /* + * dma_fence_array objects should never contain any other fence + * containers or otherwise we run into recursion and potential kernel + * stack overflow on operations on the dma_fence_array. + * + * The correct way of handling this is to flatten out the array by the + * caller instead. + * + * Enforce this here by checking that we don't create a dma_fence_array + * with any container inside. + */ + while (num_fences--) + WARN_ON(dma_fence_is_container(fences[num_fences])); + return array; } EXPORT_SYMBOL(dma_fence_array_create); diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c index 1b4cb3e5cec9..06f8ef97c6e8 100644 --- a/drivers/dma-buf/dma-fence-chain.c +++ b/drivers/dma-buf/dma-fence-chain.c @@ -148,8 +148,7 @@ static bool dma_fence_chain_enable_signaling(struct dma_fence *fence) dma_fence_get(&head->base); dma_fence_chain_for_each(fence, &head->base) { - struct dma_fence_chain *chain = to_dma_fence_chain(fence); - struct dma_fence *f = chain ? chain->fence : fence; + struct dma_fence *f = dma_fence_chain_contained(fence); dma_fence_get(f); if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) { @@ -165,8 +164,7 @@ static bool dma_fence_chain_enable_signaling(struct dma_fence *fence) static bool dma_fence_chain_signaled(struct dma_fence *fence) { dma_fence_chain_for_each(fence, fence) { - struct dma_fence_chain *chain = to_dma_fence_chain(fence); - struct dma_fence *f = chain ? chain->fence : fence; + struct dma_fence *f = dma_fence_chain_contained(fence); if (!dma_fence_is_signaled(f)) { dma_fence_put(fence); @@ -254,5 +252,14 @@ void dma_fence_chain_init(struct dma_fence_chain *chain, dma_fence_init(&chain->base, &dma_fence_chain_ops, &chain->lock, context, seqno); + + /* + * Chaining dma_fence_chain container together is only allowed through + * the prev fence and not through the contained fence. + * + * The correct way of handling this is to flatten out the fence + * structure into a dma_fence_array by the caller instead. + */ + WARN_ON(dma_fence_is_chain(fence)); } EXPORT_SYMBOL(dma_fence_chain_init); diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 6dd9a40b55d4..b51416405e86 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -256,6 +256,11 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) dma_resv_assert_held(obj); + /* Drivers should not add containers here, instead add each fence + * individually. + */ + WARN_ON(dma_fence_is_container(fence)); + fobj = dma_resv_shared_list(obj); count = fobj->shared_count; @@ -323,12 +328,8 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) } EXPORT_SYMBOL(dma_resv_add_excl_fence); -/** - * dma_resv_iter_restart_unlocked - restart the unlocked iterator - * @cursor: The dma_resv_iter object to restart - * - * Restart the unlocked iteration by initializing the cursor object. - */ +/* Restart the iterator by initializing all the necessary fields, but not the + * relation to the dma_resv object. */ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor) { cursor->seq = read_seqcount_begin(&cursor->obj->seq); @@ -344,14 +345,7 @@ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor) cursor->is_restarted = true; } -/** - * dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj - * @cursor: cursor to record the current position - * - * Return all the fences in the dma_resv object which are not yet signaled. - * The returned fence has an extra local reference so will stay alive. - * If a concurrent modify is detected the whole iteration is started over again. - */ +/* Walk to the next not signaled fence and grab a reference to it */ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor) { struct dma_resv *obj = cursor->obj; @@ -387,6 +381,12 @@ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor) * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj. * @cursor: the cursor with the current position * + * Subsequent fences are iterated with dma_resv_iter_next_unlocked(). + * + * Beware that the iterator can be restarted. Code which accumulates statistics + * or similar needs to check for this with dma_resv_iter_is_restarted(). For + * this reason prefer the locked dma_resv_iter_first() whenver possible. + * * Returns the first fence from an unlocked dma_resv obj. */ struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor) @@ -406,6 +406,10 @@ EXPORT_SYMBOL(dma_resv_iter_first_unlocked); * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj. * @cursor: the cursor with the current position * + * Beware that the iterator can be restarted. Code which accumulates statistics + * or similar needs to check for this with dma_resv_iter_is_restarted(). For + * this reason prefer the locked dma_resv_iter_next() whenver possible. + * * Returns the next fence from an unlocked dma_resv obj. */ struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor) @@ -431,6 +435,8 @@ EXPORT_SYMBOL(dma_resv_iter_next_unlocked); * dma_resv_iter_first - first fence from a locked dma_resv object * @cursor: cursor to record the current position * + * Subsequent fences are iterated with dma_resv_iter_next_unlocked(). + * * Return the first fence in the dma_resv object while holding the * &dma_resv.lock. */ diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index dfdd3ec5f793..f1422bee3dcc 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -71,6 +71,7 @@ config DRM_DEBUG_SELFTEST select DRM_DP_HELPER select DRM_LIB_RANDOM select DRM_KMS_HELPER + select DRM_BUDDY select DRM_EXPORT_FOR_TESTS if m default n help @@ -403,6 +404,8 @@ source "drivers/gpu/drm/xlnx/Kconfig" source "drivers/gpu/drm/gud/Kconfig" +source "drivers/gpu/drm/solomon/Kconfig" + source "drivers/gpu/drm/sprd/Kconfig" config DRM_HYPERV diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 8675c2af7ae1..c2ef5f9fce54 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -132,4 +132,5 @@ obj-$(CONFIG_DRM_TIDSS) += tidss/ obj-y += xlnx/ obj-y += gud/ obj-$(CONFIG_DRM_HYPERV) += hyperv/ +obj-y += solomon/ obj-$(CONFIG_DRM_SPRD) += sprd/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 566303c9942f..8685784f24a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -815,6 +815,8 @@ struct ip_discovery_top; #define AMDGPU_RESET_MAGIC_NUM 64 #define AMDGPU_MAX_DF_PERFMONS 4 #define AMDGPU_PRODUCT_NAME_LEN 64 +struct amdgpu_reset_domain; + struct amdgpu_device { struct device *dev; struct pci_dev *pdev; @@ -1050,9 +1052,7 @@ struct amdgpu_device { bool in_s4; bool in_s0ix; - atomic_t in_gpu_reset; enum pp_mp1_state mp1_state; - struct rw_semaphore reset_sem; struct amdgpu_doorbell_index doorbell_index; struct mutex notifier_lock; @@ -1100,6 +1100,8 @@ struct amdgpu_device { struct list_head ras_list; struct ip_discovery_top *ip_top; + + struct amdgpu_reset_domain *reset_domain; }; static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) @@ -1293,6 +1295,8 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev); bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); int amdgpu_device_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job); +int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev, + struct amdgpu_job *job); void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); int amdgpu_device_pci_reset(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev); @@ -1479,8 +1483,6 @@ static inline bool amdgpu_is_tmz(struct amdgpu_device *adev) return adev->gmc.tmz_enabled; } -static inline int amdgpu_in_reset(struct amdgpu_device *adev) -{ - return atomic_read(&adev->in_gpu_reset); -} +int amdgpu_in_reset(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 10b9e99c8941..e762e45b7b85 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -312,7 +312,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, } total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size); - used_vram = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr); + used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager); free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; spin_lock(&adev->mm_stats.lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index db8a8710333e..ae0c83237608 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -37,6 +37,8 @@ #include "amdgpu_fw_attestation.h" #include "amdgpu_umr.h" +#include "amdgpu_reset.h" + #if defined(CONFIG_DEBUG_FS) /** @@ -1284,7 +1286,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) } /* Avoid accidently unparking the sched thread during GPU reset */ - r = down_write_killable(&adev->reset_sem); + r = down_write_killable(&adev->reset_domain->sem); if (r) return r; @@ -1313,7 +1315,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) kthread_unpark(ring->sched.thread); } - up_write(&adev->reset_sem); + up_write(&adev->reset_domain->sem); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); @@ -1522,7 +1524,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) return -ENOMEM; /* Avoid accidently unparking the sched thread during GPU reset */ - r = down_read_killable(&adev->reset_sem); + r = down_read_killable(&adev->reset_domain->sem); if (r) goto pro_end; @@ -1565,7 +1567,7 @@ failure: /* restart the scheduler */ kthread_unpark(ring->sched.thread); - up_read(&adev->reset_sem); + up_read(&adev->reset_domain->sem); ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 0cfccea722f8..ca8e76771ea1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -426,10 +426,10 @@ bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev) * the lock. */ if (in_task()) { - if (down_read_trylock(&adev->reset_sem)) - up_read(&adev->reset_sem); + if (down_read_trylock(&adev->reset_domain->sem)) + up_read(&adev->reset_domain->sem); else - lockdep_assert_held(&adev->reset_sem); + lockdep_assert_held(&adev->reset_domain->sem); } #endif return false; @@ -455,9 +455,9 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, if ((reg * 4) < adev->rmmio_size) { if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) && - down_read_trylock(&adev->reset_sem)) { + down_read_trylock(&adev->reset_domain->sem)) { ret = amdgpu_kiq_rreg(adev, reg); - up_read(&adev->reset_sem); + up_read(&adev->reset_domain->sem); } else { ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); } @@ -540,9 +540,9 @@ void amdgpu_device_wreg(struct amdgpu_device *adev, if ((reg * 4) < adev->rmmio_size) { if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) && - down_read_trylock(&adev->reset_sem)) { + down_read_trylock(&adev->reset_domain->sem)) { amdgpu_kiq_wreg(adev, reg, v); - up_read(&adev->reset_sem); + up_read(&adev->reset_domain->sem); } else { writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); } @@ -2331,6 +2331,49 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev) return r; } +static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) +{ + long timeout; + int r, i; + + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { + struct amdgpu_ring *ring = adev->rings[i]; + + /* No need to setup the GPU scheduler for rings that don't need it */ + if (!ring || ring->no_scheduler) + continue; + + switch (ring->funcs->type) { + case AMDGPU_RING_TYPE_GFX: + timeout = adev->gfx_timeout; + break; + case AMDGPU_RING_TYPE_COMPUTE: + timeout = adev->compute_timeout; + break; + case AMDGPU_RING_TYPE_SDMA: + timeout = adev->sdma_timeout; + break; + default: + timeout = adev->video_timeout; + break; + } + + r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, + ring->num_hw_submission, amdgpu_job_hang_limit, + timeout, adev->reset_domain->wq, + ring->sched_score, ring->name, + adev->dev); + if (r) { + DRM_ERROR("Failed to create scheduler on ring %s.\n", + ring->name); + return r; + } + } + + return 0; +} + + /** * amdgpu_device_ip_init - run init for hardware IPs * @@ -2442,8 +2485,28 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (r) goto init_failed; - if (adev->gmc.xgmi.num_physical_nodes > 1) - amdgpu_xgmi_add_device(adev); + /** + * In case of XGMI grab extra reference for reset domain for this device + */ + if (adev->gmc.xgmi.num_physical_nodes > 1) { + if (amdgpu_xgmi_add_device(adev) == 0) { + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); + + if (!hive->reset_domain || + !amdgpu_reset_get_reset_domain(hive->reset_domain)) { + r = -ENOENT; + goto init_failed; + } + + /* Drop the early temporary reset domain we created for device */ + amdgpu_reset_put_reset_domain(adev->reset_domain); + adev->reset_domain = hive->reset_domain; + } + } + + r = amdgpu_device_init_schedulers(adev); + if (r) + goto init_failed; /* Don't init kfd if whole hive need to be reset during init */ if (!adev->gmc.xgmi.pending_reset) @@ -3543,8 +3606,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->mn_lock); mutex_init(&adev->virt.vf_errors.lock); hash_init(adev->mn_hash); - atomic_set(&adev->in_gpu_reset, 0); - init_rwsem(&adev->reset_sem); mutex_init(&adev->psp.mutex); mutex_init(&adev->notifier_lock); mutex_init(&adev->pm.stable_pstate_ctx_lock); @@ -3630,6 +3691,15 @@ int amdgpu_device_init(struct amdgpu_device *adev, return r; } + /* + * Reset domain needs to be present early, before XGMI hive discovered + * (if any) and intitialized to use reset sem and in_gpu reset flag + * early on during init. + */ + adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE ,"amdgpu-reset-dev"); + if (!adev->reset_domain) + return -ENOMEM; + /* early init functions */ r = amdgpu_device_ip_early_init(adev); if (r) @@ -4006,6 +4076,9 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) if (adev->mman.discovery_bin) amdgpu_discovery_fini(adev); + amdgpu_reset_put_reset_domain(adev->reset_domain); + adev->reset_domain = NULL; + kfree(adev->pci_state); } @@ -4817,17 +4890,8 @@ end: return r; } -static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, - struct amdgpu_hive_info *hive) +static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev) { - if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0) - return false; - - if (hive) { - down_write_nest_lock(&adev->reset_sem, &hive->hive_lock); - } else { - down_write(&adev->reset_sem); - } switch (amdgpu_asic_reset_method(adev)) { case AMD_RESET_METHOD_MODE1: @@ -4840,56 +4904,12 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, adev->mp1_state = PP_MP1_STATE_NONE; break; } - - return true; } -static void amdgpu_device_unlock_adev(struct amdgpu_device *adev) +static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev) { amdgpu_vf_error_trans_all(adev); adev->mp1_state = PP_MP1_STATE_NONE; - atomic_set(&adev->in_gpu_reset, 0); - up_write(&adev->reset_sem); -} - -/* - * to lockup a list of amdgpu devices in a hive safely, if not a hive - * with multiple nodes, it will be similar as amdgpu_device_lock_adev. - * - * unlock won't require roll back. - */ -static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive) -{ - struct amdgpu_device *tmp_adev = NULL; - - if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) { - if (!hive) { - dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes"); - return -ENODEV; - } - list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { - if (!amdgpu_device_lock_adev(tmp_adev, hive)) - goto roll_back; - } - } else if (!amdgpu_device_lock_adev(adev, hive)) - return -EAGAIN; - - return 0; -roll_back: - if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) { - /* - * if the lockup iteration break in the middle of a hive, - * it may means there may has a race issue, - * or a hive device locked up independently. - * we may be in trouble and may not, so will try to roll back - * the lock and give out a warnning. - */ - dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock"); - list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) { - amdgpu_device_unlock_adev(tmp_adev); - } - } - return -EAGAIN; } static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev) @@ -5023,7 +5043,7 @@ retry: } /** - * amdgpu_device_gpu_recover - reset the asic and recover scheduler + * amdgpu_device_gpu_recover_imp - reset the asic and recover scheduler * * @adev: amdgpu_device pointer * @job: which job trigger hang @@ -5033,7 +5053,7 @@ retry: * Returns 0 for success or an error on failure. */ -int amdgpu_device_gpu_recover(struct amdgpu_device *adev, +int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev, struct amdgpu_job *job) { struct list_head device_list, *device_list_handle = NULL; @@ -5067,26 +5087,10 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, dev_info(adev->dev, "GPU %s begin!\n", need_emergency_restart ? "jobs stop":"reset"); - /* - * Here we trylock to avoid chain of resets executing from - * either trigger by jobs on different adevs in XGMI hive or jobs on - * different schedulers for same device while this TO handler is running. - * We always reset all schedulers for device and all devices for XGMI - * hive so that should take care of them too. - */ if (!amdgpu_sriov_vf(adev)) hive = amdgpu_get_xgmi_hive(adev); - if (hive) { - if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) { - DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress", - job ? job->base.id : -1, hive->hive_id); - amdgpu_put_xgmi_hive(hive); - if (job && job->vm) - drm_sched_increase_karma(&job->base); - return 0; - } + if (hive) mutex_lock(&hive->hive_lock); - } reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; @@ -5095,22 +5099,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); /* - * lock the device before we try to operate the linked list - * if didn't get the device lock, don't touch the linked list since - * others may iterating it. - */ - r = amdgpu_device_lock_hive_adev(adev, hive); - if (r) { - dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress", - job ? job->base.id : -1); - - /* even we skipped this reset, still need to set the job to guilty */ - if (job && job->vm) - drm_sched_increase_karma(&job->base); - goto skip_recovery; - } - - /* * Build list of devices to reset. * In case we are in XGMI hive mode, resort the device list * to put adev in the 1st position. @@ -5127,8 +5115,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, device_list_handle = &device_list; } + /* We need to lock reset domain only once both for XGMI and single device */ + tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, + reset_list); + amdgpu_device_lock_reset_domain(tmp_adev->reset_domain); + /* block all schedulers and reset given job's ring */ list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + + amdgpu_device_set_mp1_state(tmp_adev); + /* * Try to put the audio codec into suspend state * before gpu reset started. @@ -5280,21 +5276,55 @@ skip_sched_resume: if (audio_suspended) amdgpu_device_resume_display_audio(tmp_adev); - amdgpu_device_unlock_adev(tmp_adev); + + amdgpu_device_unset_mp1_state(tmp_adev); } -skip_recovery: + tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, + reset_list); + amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); + if (hive) { - atomic_set(&hive->in_reset, 0); mutex_unlock(&hive->hive_lock); amdgpu_put_xgmi_hive(hive); } - if (r && r != -EAGAIN) + if (r) dev_info(adev->dev, "GPU reset end with ret = %d\n", r); return r; } +struct amdgpu_recover_work_struct { + struct work_struct base; + struct amdgpu_device *adev; + struct amdgpu_job *job; + int ret; +}; + +static void amdgpu_device_queue_gpu_recover_work(struct work_struct *work) +{ + struct amdgpu_recover_work_struct *recover_work = container_of(work, struct amdgpu_recover_work_struct, base); + + recover_work->ret = amdgpu_device_gpu_recover_imp(recover_work->adev, recover_work->job); +} +/* + * Serialize gpu recover into reset domain single threaded wq + */ +int amdgpu_device_gpu_recover(struct amdgpu_device *adev, + struct amdgpu_job *job) +{ + struct amdgpu_recover_work_struct work = {.adev = adev, .job = job}; + + INIT_WORK(&work.base, amdgpu_device_queue_gpu_recover_work); + + if (!amdgpu_reset_domain_schedule(adev->reset_domain, &work.base)) + return -EAGAIN; + + flush_work(&work.base); + + return work.ret; +} + /** * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot * @@ -5482,20 +5512,6 @@ int amdgpu_device_baco_exit(struct drm_device *dev) return 0; } -static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - struct amdgpu_ring *ring = adev->rings[i]; - - if (!ring || !ring->sched.thread) - continue; - - cancel_delayed_work_sync(&ring->sched.work_tdr); - } -} - /** * amdgpu_pci_error_detected - Called when a PCI error is detected. * @pdev: PCI device struct @@ -5526,14 +5542,11 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta /* Fatal error, prepare for slot reset */ case pci_channel_io_frozen: /* - * Cancel and wait for all TDRs in progress if failing to - * set adev->in_gpu_reset in amdgpu_device_lock_adev - * - * Locking adev->reset_sem will prevent any external access + * Locking adev->reset_domain->sem will prevent any external access * to GPU during PCI error recovery */ - while (!amdgpu_device_lock_adev(adev, NULL)) - amdgpu_cancel_all_tdr(adev); + amdgpu_device_lock_reset_domain(adev->reset_domain); + amdgpu_device_set_mp1_state(adev); /* * Block any work scheduling as we do for regular GPU reset @@ -5640,7 +5653,8 @@ out: DRM_INFO("PCIe error recovery succeeded\n"); } else { DRM_ERROR("PCIe error recovery failed, err:%d", r); - amdgpu_device_unlock_adev(adev); + amdgpu_device_unset_mp1_state(adev); + amdgpu_device_unlock_reset_domain(adev->reset_domain); } return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; @@ -5677,7 +5691,8 @@ void amdgpu_pci_resume(struct pci_dev *pdev) drm_sched_start(&ring->sched, true); } - amdgpu_device_unlock_adev(adev); + amdgpu_device_unset_mp1_state(adev); + amdgpu_device_unlock_reset_domain(adev->reset_domain); } bool amdgpu_device_cache_pci_state(struct pci_dev *pdev) @@ -5754,6 +5769,11 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, amdgpu_asic_invalidate_hdp(adev, ring); } +int amdgpu_in_reset(struct amdgpu_device *adev) +{ + return atomic_read(&adev->reset_domain->in_gpu_reset); + } + /** * amdgpu_device_halt() - bring hardware to some kind of halt state * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index ec4c9ef5f795..9e5fc4cdb8ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -961,7 +961,7 @@ static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb) int ret; unsigned int i, block_width, block_height, block_size_log2; - if (!rfb->base.dev->mode_config.allow_fb_modifiers) + if (rfb->base.dev->mode_config.fb_modifiers_not_supported) return 0; for (i = 0; i < format_info->num_planes; ++i) { @@ -1148,7 +1148,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev, if (ret) return ret; - if (!dev->mode_config.allow_fb_modifiers) { + if (dev->mode_config.fb_modifiers_not_supported) { drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI, "GFX9+ requires FB check based on format modifier\n"); ret = check_tiling_flags_gfx6(rfb); @@ -1156,7 +1156,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev, return ret; } - if (dev->mode_config.allow_fb_modifiers && + if (!dev->mode_config.fb_modifiers_not_supported && !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) { ret = convert_tiling_flags_to_modifier(rfb); if (ret) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 45977a72b5dd..5d13ed376ab4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -446,24 +446,18 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, * for the requested ring. * * @ring: ring to init the fence driver on - * @num_hw_submission: number of entries on the hardware queue - * @sched_score: optional score atomic shared with other schedulers * * Init the fence driver for the requested ring (all asics). * Helper function for amdgpu_fence_driver_init(). */ -int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, - unsigned num_hw_submission, - atomic_t *sched_score) +int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - long timeout; - int r; if (!adev) return -EINVAL; - if (!is_power_of_2(num_hw_submission)) + if (!is_power_of_2(ring->num_hw_submission)) return -EINVAL; ring->fence_drv.cpu_addr = NULL; @@ -474,41 +468,14 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0); - ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1; + ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1; spin_lock_init(&ring->fence_drv.lock); - ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *), + ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *), GFP_KERNEL); + if (!ring->fence_drv.fences) return -ENOMEM; - /* No need to setup the GPU scheduler for rings that don't need it */ - if (ring->no_scheduler) - return 0; - - switch (ring->funcs->type) { - case AMDGPU_RING_TYPE_GFX: - timeout = adev->gfx_timeout; - break; - case AMDGPU_RING_TYPE_COMPUTE: - timeout = adev->compute_timeout; - break; - case AMDGPU_RING_TYPE_SDMA: - timeout = adev->sdma_timeout; - break; - default: - timeout = adev->video_timeout; - break; - } - - r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, - num_hw_submission, amdgpu_job_hang_limit, - timeout, NULL, sched_score, ring->name); - if (r) { - DRM_ERROR("Failed to create scheduler on ring %s.\n", - ring->name); - return r; - } - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 899a47011a67..dd78402e3cb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -60,7 +60,7 @@ static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev, struct ttm_resource_manager *man; man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); - return sysfs_emit(buf, "%llu\n", man->size * PAGE_SIZE); + return sysfs_emit(buf, "%llu\n", man->size); } /** @@ -77,8 +77,9 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); + struct ttm_resource_manager *man = &adev->mman.gtt_mgr.manager; - return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr)); + return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man)); } static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO, @@ -130,20 +131,17 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, struct amdgpu_gtt_node *node; int r; - if (!(place->flags & TTM_PL_FLAG_TEMPORARY) && - atomic64_add_return(num_pages, &mgr->used) > man->size) { - atomic64_sub(num_pages, &mgr->used); - return -ENOSPC; - } - node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL); - if (!node) { - r = -ENOMEM; - goto err_out; - } + if (!node) + return -ENOMEM; node->tbo = tbo; ttm_resource_init(tbo, place, &node->base.base); + if (!(place->flags & TTM_PL_FLAG_TEMPORARY) && + ttm_resource_manager_usage(man) > man->size) { + r = -ENOSPC; + goto err_free; + } if (place->lpfn) { spin_lock(&mgr->lock); @@ -169,11 +167,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, err_free: ttm_resource_fini(man, &node->base.base); kfree(node); - -err_out: - if (!(place->flags & TTM_PL_FLAG_TEMPORARY)) - atomic64_sub(num_pages, &mgr->used); - return r; } @@ -196,26 +189,11 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, drm_mm_remove_node(&node->base.mm_nodes[0]); spin_unlock(&mgr->lock); - if (!(res->placement & TTM_PL_FLAG_TEMPORARY)) - atomic64_sub(res->num_pages, &mgr->used); - ttm_resource_fini(man, res); kfree(node); } /** - * amdgpu_gtt_mgr_usage - return usage of GTT domain - * - * @mgr: amdgpu_gtt_mgr pointer - * - * Return how many bytes are used in the GTT domain - */ -uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr) -{ - return atomic64_read(&mgr->used) * PAGE_SIZE; -} - -/** * amdgpu_gtt_mgr_recover - re-init gart * * @mgr: amdgpu_gtt_mgr pointer @@ -255,9 +233,6 @@ static void amdgpu_gtt_mgr_debug(struct ttm_resource_manager *man, spin_lock(&mgr->lock); drm_mm_print(&mgr->mm, printer); spin_unlock(&mgr->lock); - - drm_printf(printer, "man size:%llu pages, gtt used:%llu pages\n", - man->size, atomic64_read(&mgr->used)); } static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = { @@ -283,14 +258,12 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) man->use_tt = true; man->func = &amdgpu_gtt_mgr_func; - ttm_resource_manager_init(man, &adev->mman.bdev, - gtt_size >> PAGE_SHIFT); + ttm_resource_manager_init(man, &adev->mman.bdev, gtt_size); start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; size = (adev->gmc.gart_size >> PAGE_SHIFT) - start; drm_mm_init(&mgr->mm, start, size); spin_lock_init(&mgr->lock); - atomic64_set(&mgr->used, 0); ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, &mgr->manager); ttm_resource_manager_set_used(man, true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 4870e093213d..d970336d2261 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -64,10 +64,9 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) ti.process_name, ti.tgid, ti.task_name, ti.pid); if (amdgpu_device_should_recover_gpu(ring->adev)) { - r = amdgpu_device_gpu_recover(ring->adev, job); + r = amdgpu_device_gpu_recover_imp(ring->adev, job); if (r) DRM_ERROR("GPU Recovery Failed: %d\n", r); - } else { drm_sched_suspend_timeout(&ring->sched); if (amdgpu_sriov_vf(adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 9f985bd463be..6b626c293e72 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -604,13 +604,13 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ui64 = atomic64_read(&adev->num_vram_cpu_page_faults); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VRAM_USAGE: - ui64 = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr); + ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VIS_VRAM_USAGE: ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_GTT_USAGE: - ui64 = amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr); + ui64 = ttm_resource_manager_usage(&adev->mman.gtt_mgr.manager); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_GDS_CONFIG: { struct drm_amdgpu_info_gds gds_info; @@ -642,14 +642,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) case AMDGPU_INFO_MEMORY: { struct drm_amdgpu_memory_info mem; struct ttm_resource_manager *gtt_man = - ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); + &adev->mman.gtt_mgr.manager; + struct ttm_resource_manager *vram_man = + &adev->mman.vram_mgr.manager; + memset(&mem, 0, sizeof(mem)); mem.vram.total_heap_size = adev->gmc.real_vram_size; mem.vram.usable_heap_size = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size) - AMDGPU_VM_RESERVED_VRAM; mem.vram.heap_usage = - amdgpu_vram_mgr_usage(&adev->mman.vram_mgr); + ttm_resource_manager_usage(vram_man); mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; mem.cpu_accessible_vram.total_heap_size = @@ -667,8 +670,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) mem.gtt.total_heap_size *= PAGE_SIZE; mem.gtt.usable_heap_size = mem.gtt.total_heap_size - atomic64_read(&adev->gart_pin_size); - mem.gtt.heap_usage = - amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr); + mem.gtt.heap_usage = ttm_resource_manager_usage(gtt_man); mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; return copy_to_user(out, &mem, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 23c9a60693ee..25731719c627 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -451,7 +451,7 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, if (domain & AMDGPU_GEM_DOMAIN_GTT) { man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); - if (size < (man->size << PAGE_SHIFT)) + if (size < man->size) return true; else goto fail; @@ -460,7 +460,7 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, if (domain & AMDGPU_GEM_DOMAIN_VRAM) { man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); - if (size < (man->size << PAGE_SHIFT)) + if (size < man->size) return true; else goto fail; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c index 0d85c2096ab5..e8adfd0a570a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c @@ -25,12 +25,6 @@ #include "amdgpu.h" -static inline struct amdgpu_preempt_mgr * -to_preempt_mgr(struct ttm_resource_manager *man) -{ - return container_of(man, struct amdgpu_preempt_mgr, manager); -} - /** * DOC: mem_info_preempt_used * @@ -45,10 +39,9 @@ static ssize_t mem_info_preempt_used_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - struct ttm_resource_manager *man; + struct ttm_resource_manager *man = &adev->mman.preempt_mgr; - man = ttm_manager_type(&adev->mman.bdev, AMDGPU_PL_PREEMPT); - return sysfs_emit(buf, "%llu\n", amdgpu_preempt_mgr_usage(man)); + return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man)); } static DEVICE_ATTR_RO(mem_info_preempt_used); @@ -68,16 +61,12 @@ static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man, const struct ttm_place *place, struct ttm_resource **res) { - struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man); - *res = kzalloc(sizeof(**res), GFP_KERNEL); if (!*res) return -ENOMEM; ttm_resource_init(tbo, place, *res); (*res)->start = AMDGPU_BO_INVALID_OFFSET; - - atomic64_add((*res)->num_pages, &mgr->used); return 0; } @@ -92,49 +81,13 @@ static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man, static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man, struct ttm_resource *res) { - struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man); - - atomic64_sub(res->num_pages, &mgr->used); ttm_resource_fini(man, res); kfree(res); } -/** - * amdgpu_preempt_mgr_usage - return usage of PREEMPT domain - * - * @man: TTM memory type manager - * - * Return how many bytes are used in the GTT domain - */ -uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man) -{ - struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man); - s64 result = atomic64_read(&mgr->used); - - return (result > 0 ? result : 0) * PAGE_SIZE; -} - -/** - * amdgpu_preempt_mgr_debug - dump VRAM table - * - * @man: TTM memory type manager - * @printer: DRM printer to use - * - * Dump the table content using printk. - */ -static void amdgpu_preempt_mgr_debug(struct ttm_resource_manager *man, - struct drm_printer *printer) -{ - struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man); - - drm_printf(printer, "man size:%llu pages, preempt used:%lld pages\n", - man->size, (u64)atomic64_read(&mgr->used)); -} - static const struct ttm_resource_manager_func amdgpu_preempt_mgr_func = { .alloc = amdgpu_preempt_mgr_new, .free = amdgpu_preempt_mgr_del, - .debug = amdgpu_preempt_mgr_debug }; /** @@ -146,8 +99,7 @@ static const struct ttm_resource_manager_func amdgpu_preempt_mgr_func = { */ int amdgpu_preempt_mgr_init(struct amdgpu_device *adev) { - struct amdgpu_preempt_mgr *mgr = &adev->mman.preempt_mgr; - struct ttm_resource_manager *man = &mgr->manager; + struct ttm_resource_manager *man = &adev->mman.preempt_mgr; int ret; man->use_tt = true; @@ -155,16 +107,13 @@ int amdgpu_preempt_mgr_init(struct amdgpu_device *adev) ttm_resource_manager_init(man, &adev->mman.bdev, (1 << 30)); - atomic64_set(&mgr->used, 0); - ret = device_create_file(adev->dev, &dev_attr_mem_info_preempt_used); if (ret) { DRM_ERROR("Failed to create device file mem_info_preempt_used\n"); return ret; } - ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT, - &mgr->manager); + ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT, man); ttm_resource_manager_set_used(man, true); return 0; } @@ -179,8 +128,7 @@ int amdgpu_preempt_mgr_init(struct amdgpu_device *adev) */ void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev) { - struct amdgpu_preempt_mgr *mgr = &adev->mman.preempt_mgr; - struct ttm_resource_manager *man = &mgr->manager; + struct ttm_resource_manager *man = &adev->mman.preempt_mgr; int ret; ttm_resource_manager_set_used(man, false); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 2b844a5aafdb..a44f2eeed6ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -31,6 +31,8 @@ #include <linux/debugfs.h> #include <linux/uaccess.h> +#include "amdgpu_reset.h" + #define EEPROM_I2C_MADDR_VEGA20 0x0 #define EEPROM_I2C_MADDR_ARCTURUS 0x40000 #define EEPROM_I2C_MADDR_ARCTURUS_D342 0x0 @@ -193,12 +195,12 @@ static int __write_table_header(struct amdgpu_ras_eeprom_control *control) __encode_table_header_to_buf(&control->tbl_hdr, buf); /* i2c may be unstable in gpu reset */ - down_read(&adev->reset_sem); + down_read(&adev->reset_domain->sem); res = amdgpu_eeprom_write(adev->pm.ras_eeprom_i2c_bus, control->i2c_address + control->ras_header_offset, buf, RAS_TABLE_HEADER_SIZE); - up_read(&adev->reset_sem); + up_read(&adev->reset_domain->sem); if (res < 0) { DRM_ERROR("Failed to write EEPROM table header:%d", res); @@ -390,13 +392,13 @@ static int __amdgpu_ras_eeprom_write(struct amdgpu_ras_eeprom_control *control, int res; /* i2c may be unstable in gpu reset */ - down_read(&adev->reset_sem); + down_read(&adev->reset_domain->sem); buf_size = num * RAS_TABLE_RECORD_SIZE; res = amdgpu_eeprom_write(adev->pm.ras_eeprom_i2c_bus, control->i2c_address + RAS_INDEX_TO_OFFSET(control, fri), buf, buf_size); - up_read(&adev->reset_sem); + up_read(&adev->reset_domain->sem); if (res < 0) { DRM_ERROR("Writing %d EEPROM table records error:%d", num, res); @@ -550,12 +552,12 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) goto Out; } - down_read(&adev->reset_sem); + down_read(&adev->reset_domain->sem); res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus, control->i2c_address + control->ras_record_offset, buf, buf_size); - up_read(&adev->reset_sem); + up_read(&adev->reset_domain->sem); if (res < 0) { DRM_ERROR("EEPROM failed reading records:%d\n", res); @@ -645,13 +647,13 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, int res; /* i2c may be unstable in gpu reset */ - down_read(&adev->reset_sem); + down_read(&adev->reset_domain->sem); buf_size = num * RAS_TABLE_RECORD_SIZE; res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus, control->i2c_address + RAS_INDEX_TO_OFFSET(control, fri), buf, buf_size); - up_read(&adev->reset_sem); + up_read(&adev->reset_domain->sem); if (res < 0) { DRM_ERROR("Reading %d EEPROM table records error:%d", num, res); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index 02afd4115675..248d64158721 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -96,3 +96,59 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev, return reset_handler->restore_hwcontext(adev->reset_cntl, reset_context); } + + +void amdgpu_reset_destroy_reset_domain(struct kref *ref) +{ + struct amdgpu_reset_domain *reset_domain = container_of(ref, + struct amdgpu_reset_domain, + refcount); + if (reset_domain->wq) + destroy_workqueue(reset_domain->wq); + + kvfree(reset_domain); +} + +struct amdgpu_reset_domain *amdgpu_reset_create_reset_domain(enum amdgpu_reset_domain_type type, + char *wq_name) +{ + struct amdgpu_reset_domain *reset_domain; + + reset_domain = kvzalloc(sizeof(struct amdgpu_reset_domain), GFP_KERNEL); + if (!reset_domain) { + DRM_ERROR("Failed to allocate amdgpu_reset_domain!"); + return NULL; + } + + reset_domain->type = type; + kref_init(&reset_domain->refcount); + + reset_domain->wq = create_singlethread_workqueue(wq_name); + if (!reset_domain->wq) { + DRM_ERROR("Failed to allocate wq for amdgpu_reset_domain!"); + amdgpu_reset_put_reset_domain(reset_domain); + return NULL; + + } + + atomic_set(&reset_domain->in_gpu_reset, 0); + init_rwsem(&reset_domain->sem); + + return reset_domain; +} + +void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain) +{ + atomic_set(&reset_domain->in_gpu_reset, 1); + down_write(&reset_domain->sem); +} + + +void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain) +{ + atomic_set(&reset_domain->in_gpu_reset, 0); + up_write(&reset_domain->sem); +} + + + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h index e00d38d9160a..1949dbe28a86 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h @@ -70,6 +70,21 @@ struct amdgpu_reset_control { void (*async_reset)(struct work_struct *work); }; + +enum amdgpu_reset_domain_type { + SINGLE_DEVICE, + XGMI_HIVE +}; + +struct amdgpu_reset_domain { + struct kref refcount; + struct workqueue_struct *wq; + enum amdgpu_reset_domain_type type; + struct rw_semaphore sem; + atomic_t in_gpu_reset; +}; + + int amdgpu_reset_init(struct amdgpu_device *adev); int amdgpu_reset_fini(struct amdgpu_device *adev); @@ -82,4 +97,29 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev, int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl, struct amdgpu_reset_handler *handler); +struct amdgpu_reset_domain *amdgpu_reset_create_reset_domain(enum amdgpu_reset_domain_type type, + char *wq_name); + +void amdgpu_reset_destroy_reset_domain(struct kref *ref); + +static inline bool amdgpu_reset_get_reset_domain(struct amdgpu_reset_domain *domain) +{ + return kref_get_unless_zero(&domain->refcount) != 0; +} + +static inline void amdgpu_reset_put_reset_domain(struct amdgpu_reset_domain *domain) +{ + kref_put(&domain->refcount, amdgpu_reset_destroy_reset_domain); +} + +static inline bool amdgpu_reset_domain_schedule(struct amdgpu_reset_domain *domain, + struct work_struct *work) +{ + return queue_work(domain->wq, work); +} + +void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain); + +void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index ab2351ba9574..35bcb6dc1816 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -191,8 +191,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, ring->adev = adev; ring->idx = adev->num_rings++; adev->rings[ring->idx] = ring; - r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission, - sched_score); + ring->num_hw_submission = sched_hw_submission; + ring->sched_score = sched_score; + r = amdgpu_fence_driver_init_ring(ring); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index fae7d185ad0d..48365da213dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -114,9 +114,7 @@ struct amdgpu_fence_driver { void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring); void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring); -int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, - unsigned num_hw_submission, - atomic_t *sched_score); +int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, struct amdgpu_irq_src *irq_src, unsigned irq_type); @@ -251,6 +249,8 @@ struct amdgpu_ring { bool has_compute_vm_bug; bool no_scheduler; int hw_prio; + unsigned num_hw_submission; + atomic_t *sched_score; }; #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index f7d8487799b2..40e06745fae9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -261,10 +261,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, dma_resv_for_each_fence(&cursor, resv, true, f) { dma_fence_chain_for_each(f, f) { - struct dma_fence_chain *chain = to_dma_fence_chain(f); + struct dma_fence *tmp = dma_fence_chain_contained(f); - if (amdgpu_sync_test_fence(adev, mode, owner, chain ? - chain->fence : f)) { + if (amdgpu_sync_test_fence(adev, mode, owner, tmp)) { r = amdgpu_sync_fence(sync, f); dma_fence_put(f); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 414a22dddc78..4b9ee6e27f74 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1941,7 +1941,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) size = adev->gmc.real_vram_size; else size = adev->gmc.visible_vram_size; - man->size = size >> PAGE_SHIFT; + man->size = size; adev->mman.buffer_funcs_enabled = enable; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 0e4ecc77db3f..9120ae80ef52 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -44,7 +44,6 @@ struct amdgpu_vram_mgr { spinlock_t lock; struct list_head reservations_pending; struct list_head reserved_pages; - atomic64_t usage; atomic64_t vis_usage; }; @@ -52,12 +51,6 @@ struct amdgpu_gtt_mgr { struct ttm_resource_manager manager; struct drm_mm mm; spinlock_t lock; - atomic64_t used; -}; - -struct amdgpu_preempt_mgr { - struct ttm_resource_manager manager; - atomic64_t used; }; struct amdgpu_mman { @@ -76,7 +69,7 @@ struct amdgpu_mman { struct amdgpu_vram_mgr vram_mgr; struct amdgpu_gtt_mgr gtt_mgr; - struct amdgpu_preempt_mgr preempt_mgr; + struct ttm_resource_manager preempt_mgr; uint64_t stolen_vga_size; struct amdgpu_bo *stolen_vga_memory; @@ -118,7 +111,6 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev); void amdgpu_vram_mgr_fini(struct amdgpu_device *adev); bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem); -uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr); void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr); uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man); @@ -133,7 +125,6 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, void amdgpu_vram_mgr_free_sgt(struct device *dev, enum dma_data_direction dir, struct sg_table *sgt); -uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr); uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr); int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, uint64_t start, uint64_t size); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 5656bf7d9267..a025f080aa6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -575,8 +575,10 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) vf2pf_info->driver_cert = 0; vf2pf_info->os_info.all = 0; - vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr) >> 20; - vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20; + vf2pf_info->fb_usage = + ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20; + vf2pf_info->fb_vis_usage = + amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20; vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20; vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index fce9a13a6ba1..0a7611648573 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -96,9 +96,9 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); + struct ttm_resource_manager *man = &adev->mman.vram_mgr.manager; - return sysfs_emit(buf, "%llu\n", - amdgpu_vram_mgr_usage(&adev->mman.vram_mgr)); + return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man)); } /** @@ -253,7 +253,9 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man) vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node); atomic64_add(vis_usage, &mgr->vis_usage); - atomic64_add(rsv->mm_node.size << PAGE_SHIFT, &mgr->usage); + spin_lock(&man->bdev->lru_lock); + man->usage += rsv->mm_node.size << PAGE_SHIFT; + spin_unlock(&man->bdev->lru_lock); list_move(&rsv->node, &mgr->reserved_pages); } } @@ -378,19 +380,13 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, lpfn = place->lpfn; if (!lpfn) - lpfn = man->size; + lpfn = man->size >> PAGE_SHIFT; max_bytes = adev->gmc.mc_vram_size; if (tbo->type != ttm_bo_type_kernel) max_bytes -= AMDGPU_VM_RESERVED_VRAM; - /* bail out quickly if there's likely not enough VRAM for this BO */ mem_bytes = tbo->base.size; - if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) { - r = -ENOSPC; - goto error_sub; - } - if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { pages_per_node = ~0ul; num_nodes = 1; @@ -408,13 +404,17 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, node = kvmalloc(struct_size(node, mm_nodes, num_nodes), GFP_KERNEL | __GFP_ZERO); - if (!node) { - r = -ENOMEM; - goto error_sub; - } + if (!node) + return -ENOMEM; ttm_resource_init(tbo, place, &node->base); + /* bail out quickly if there's likely not enough VRAM for this BO */ + if (ttm_resource_manager_usage(man) > max_bytes) { + r = -ENOSPC; + goto error_fini; + } + mode = DRM_MM_INSERT_BEST; if (place->flags & TTM_PL_FLAG_TOPDOWN) mode = DRM_MM_INSERT_HIGH; @@ -472,11 +472,10 @@ error_free: while (i--) drm_mm_remove_node(&node->mm_nodes[i]); spin_unlock(&mgr->lock); +error_fini: ttm_resource_fini(man, &node->base); kvfree(node); -error_sub: - atomic64_sub(mem_bytes, &mgr->usage); return r; } @@ -494,7 +493,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = to_amdgpu_device(mgr); - uint64_t usage = 0, vis_usage = 0; + uint64_t vis_usage = 0; unsigned i, pages; spin_lock(&mgr->lock); @@ -503,13 +502,11 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, struct drm_mm_node *mm = &node->mm_nodes[i]; drm_mm_remove_node(mm); - usage += mm->size << PAGE_SHIFT; vis_usage += amdgpu_vram_mgr_vis_size(adev, mm); } amdgpu_vram_mgr_do_reserve(man); spin_unlock(&mgr->lock); - atomic64_sub(usage, &mgr->usage); atomic64_sub(vis_usage, &mgr->vis_usage); ttm_resource_fini(man, res); @@ -628,18 +625,6 @@ void amdgpu_vram_mgr_free_sgt(struct device *dev, } /** - * amdgpu_vram_mgr_usage - how many bytes are used in this domain - * - * @mgr: amdgpu_vram_mgr pointer - * - * Returns how many bytes are used in this domain. - */ -uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr) -{ - return atomic64_read(&mgr->usage); -} - -/** * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part * * @mgr: amdgpu_vram_mgr pointer @@ -664,13 +649,12 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); + drm_printf(printer, " vis usage:%llu\n", + amdgpu_vram_mgr_vis_usage(mgr)); + spin_lock(&mgr->lock); drm_mm_print(&mgr->mm, printer); spin_unlock(&mgr->lock); - - drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", - man->size, amdgpu_vram_mgr_usage(mgr) >> 20, - amdgpu_vram_mgr_vis_usage(mgr) >> 20); } static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { @@ -692,11 +676,11 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) struct ttm_resource_manager *man = &mgr->manager; ttm_resource_manager_init(man, &adev->mman.bdev, - adev->gmc.real_vram_size >> PAGE_SHIFT); + adev->gmc.real_vram_size); man->func = &amdgpu_vram_mgr_func; - drm_mm_init(&mgr->mm, 0, man->size); + drm_mm_init(&mgr->mm, 0, man->size >> PAGE_SHIFT); spin_lock_init(&mgr->lock); INIT_LIST_HEAD(&mgr->reservations_pending); INIT_LIST_HEAD(&mgr->reserved_pages); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 77b65434ccc2..91817a31f3e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -32,6 +32,8 @@ #include "wafl/wafl2_4_0_0_smn.h" #include "wafl/wafl2_4_0_0_sh_mask.h" +#include "amdgpu_reset.h" + #define smnPCS_XGMI23_PCS_ERROR_STATUS 0x11a01210 #define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c #define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210 @@ -227,6 +229,9 @@ static void amdgpu_xgmi_hive_release(struct kobject *kobj) struct amdgpu_hive_info *hive = container_of( kobj, struct amdgpu_hive_info, kobj); + amdgpu_reset_put_reset_domain(hive->reset_domain); + hive->reset_domain = NULL; + mutex_destroy(&hive->hive_lock); kfree(hive); } @@ -398,15 +403,35 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) goto pro_end; } + /** + * Avoid recreating reset domain when hive is reconstructed for the case + * of reset the devices in the XGMI hive during probe for SRIOV + * See https://www.spinics.net/lists/amd-gfx/msg58836.html + */ + if (adev->reset_domain->type != XGMI_HIVE) { + hive->reset_domain = amdgpu_reset_create_reset_domain(XGMI_HIVE, "amdgpu-reset-hive"); + if (!hive->reset_domain) { + dev_err(adev->dev, "XGMI: failed initializing reset domain for xgmi hive\n"); + ret = -ENOMEM; + kobject_put(&hive->kobj); + kfree(hive); + hive = NULL; + goto pro_end; + } + } else { + amdgpu_reset_get_reset_domain(adev->reset_domain); + hive->reset_domain = adev->reset_domain; + } + hive->hive_id = adev->gmc.xgmi.hive_id; INIT_LIST_HEAD(&hive->device_list); INIT_LIST_HEAD(&hive->node); mutex_init(&hive->hive_lock); - atomic_set(&hive->in_reset, 0); atomic_set(&hive->number_devices, 0); task_barrier_init(&hive->tb); hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN; hive->hi_req_gpu = NULL; + /* * hive pstate on boot is high in vega20 so we have to go to low * pstate on after boot. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index 0afca51c3c0c..b4a705545657 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -33,7 +33,6 @@ struct amdgpu_hive_info { struct list_head node; atomic_t number_devices; struct mutex hive_lock; - atomic_t in_reset; int hi_req_count; struct amdgpu_device *hi_req_gpu; struct task_barrier tb; @@ -42,6 +41,8 @@ struct amdgpu_hive_info { AMDGPU_XGMI_PSTATE_MAX_VEGA20, AMDGPU_XGMI_PSTATE_UNKNOWN } pstate; + + struct amdgpu_reset_domain *reset_domain; }; struct amdgpu_pcs_ras_field { diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index 6fa2229b7229..1c5d9388ad0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -25,6 +25,8 @@ #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/string_helpers.h> + #include <asm/unaligned.h> #include <drm/drm_util.h> @@ -740,7 +742,7 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) break; } if (arg != ATOM_COND_ALWAYS) - SDEBUG(" taken: %s\n", execute ? "yes" : "no"); + SDEBUG(" taken: %s\n", str_yes_no(execute)); SDEBUG(" target: 0x%04X\n", target); if (execute) { if (ctx->last_jump == (ctx->start + target)) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 5d5205870861..288fce7dc0ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2798,6 +2798,8 @@ static int dce_v10_0_sw_init(void *handle) adev_to_drm(adev)->mode_config.preferred_depth = 24; adev_to_drm(adev)->mode_config.prefer_shadow = 1; + adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; + adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; r = amdgpu_display_modeset_create_props(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 4d812b22c54f..cbe5250b31cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2916,6 +2916,8 @@ static int dce_v11_0_sw_init(void *handle) adev_to_drm(adev)->mode_config.preferred_depth = 24; adev_to_drm(adev)->mode_config.prefer_shadow = 1; + adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; + adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; r = amdgpu_display_modeset_create_props(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index b90bc2adf778..982855e6cf52 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -2674,6 +2674,7 @@ static int dce_v6_0_sw_init(void *handle) adev_to_drm(adev)->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.preferred_depth = 24; adev_to_drm(adev)->mode_config.prefer_shadow = 1; + adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; r = amdgpu_display_modeset_create_props(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 7c1379b02f94..84440741c60b 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2695,6 +2695,8 @@ static int dce_v8_0_sw_init(void *handle) adev_to_drm(adev)->mode_config.preferred_depth = 24; adev_to_drm(adev)->mode_config.prefer_shadow = 1; + adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; + adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; r = amdgpu_display_modeset_create_props(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index e7add2020d48..3dcd82b49481 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -48,6 +48,8 @@ #include "athub_v2_0.h" #include "athub_v2_1.h" +#include "amdgpu_reset.h" + #if 0 static const struct soc15_reg_golden golden_settings_navi10_hdp[] = { @@ -328,7 +330,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, */ if (adev->gfx.kiq.ring.sched.ready && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && - down_read_trylock(&adev->reset_sem)) { + down_read_trylock(&adev->reset_domain->sem)) { struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; const unsigned eng = 17; u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); @@ -338,7 +340,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, 1 << vmid); - up_read(&adev->reset_sem); + up_read(&adev->reset_domain->sem); return; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 412e44af1608..df35f0252eea 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -62,6 +62,8 @@ #include "amdgpu_ras.h" #include "amdgpu_xgmi.h" +#include "amdgpu_reset.h" + /* add these here since we already include dce12 headers and these are for DCN */ #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2 @@ -787,13 +789,13 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, */ if (adev->gfx.kiq.ring.sched.ready && (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && - down_read_trylock(&adev->reset_sem)) { + down_read_trylock(&adev->reset_domain->sem)) { uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng; uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, 1 << vmid); - up_read(&adev->reset_sem); + up_read(&adev->reset_domain->sem); return; } @@ -900,7 +902,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, if (amdgpu_in_reset(adev)) return -EIO; - if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) { + if (ring->sched.ready && down_read_trylock(&adev->reset_domain->sem)) { /* Vega20+XGMI caches PTEs in TC and TLB. Add a * heavy-weight TLB flush (type 2), which flushes * both. Due to a race condition with concurrent @@ -927,7 +929,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, if (r) { amdgpu_ring_undo(ring); spin_unlock(&adev->gfx.kiq.ring_lock); - up_read(&adev->reset_sem); + up_read(&adev->reset_domain->sem); return -ETIME; } @@ -936,10 +938,10 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); if (r < 1) { dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); - up_read(&adev->reset_sem); + up_read(&adev->reset_domain->sem); return -ETIME; } - up_read(&adev->reset_sem); + up_read(&adev->reset_domain->sem); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 56da5ab82987..b81acf59870c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -32,6 +32,8 @@ #include "soc15_common.h" #include "mxgpu_ai.h" +#include "amdgpu_reset.h" + static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev) { WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2); @@ -257,10 +259,10 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) * otherwise the mailbox msg will be ruined/reseted by * the VF FLR. */ - if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0) + if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0) return; - down_write(&adev->reset_sem); + down_write(&adev->reset_domain->sem); amdgpu_virt_fini_data_exchange(adev); @@ -275,14 +277,14 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) } while (timeout > 1); flr_done: - atomic_set(&adev->in_gpu_reset, 0); - up_write(&adev->reset_sem); + atomic_set(&adev->reset_domain->in_gpu_reset, 0); + up_write(&adev->reset_domain->sem); /* Trigger recovery for world switch failure if no TDR */ if (amdgpu_device_should_recover_gpu(adev) && (!amdgpu_device_has_job_running(adev) || adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)) - amdgpu_device_gpu_recover(adev, NULL); + amdgpu_device_gpu_recover_imp(adev, NULL); } static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, @@ -307,8 +309,11 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev, switch (event) { case IDH_FLR_NOTIFICATION: - if (amdgpu_sriov_runtime(adev)) - schedule_work(&adev->virt.flr_work); + if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev)) + WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain, + &adev->virt.flr_work), + "Failed to queue work! at %s", + __func__); break; case IDH_QUERY_ALIVE: xgpu_ai_mailbox_send_ack(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index 477d0dde19c5..22c10b97ea81 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -31,6 +31,8 @@ #include "soc15_common.h" #include "mxgpu_nv.h" +#include "amdgpu_reset.h" + static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev) { WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2); @@ -281,10 +283,10 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) * otherwise the mailbox msg will be ruined/reseted by * the VF FLR. */ - if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0) + if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0) return; - down_write(&adev->reset_sem); + down_write(&adev->reset_domain->sem); amdgpu_virt_fini_data_exchange(adev); @@ -299,8 +301,8 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) } while (timeout > 1); flr_done: - atomic_set(&adev->in_gpu_reset, 0); - up_write(&adev->reset_sem); + atomic_set(&adev->reset_domain->in_gpu_reset, 0); + up_write(&adev->reset_domain->sem); /* Trigger recovery for world switch failure if no TDR */ if (amdgpu_device_should_recover_gpu(adev) @@ -309,7 +311,7 @@ flr_done: adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT || adev->compute_timeout == MAX_SCHEDULE_TIMEOUT || adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) - amdgpu_device_gpu_recover(adev, NULL); + amdgpu_device_gpu_recover_imp(adev, NULL); } static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev, @@ -337,8 +339,11 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev, switch (event) { case IDH_FLR_NOTIFICATION: - if (amdgpu_sriov_runtime(adev)) - schedule_work(&adev->virt.flr_work); + if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev)) + WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain, + &adev->virt.flr_work), + "Failed to queue work! at %s", + __func__); break; /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore * it byfar since that polling thread will handle it, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index a642c04cf17d..7b63d30b9b79 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -42,6 +42,8 @@ #include "smu/smu_7_1_3_d.h" #include "mxgpu_vi.h" +#include "amdgpu_reset.h" + /* VI golden setting */ static const u32 xgpu_fiji_mgcg_cgcg_init[] = { mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff, @@ -521,7 +523,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work) /* Trigger recovery due to world switch failure */ if (amdgpu_device_should_recover_gpu(adev)) - amdgpu_device_gpu_recover(adev, NULL); + amdgpu_device_gpu_recover_imp(adev, NULL); } static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev, @@ -550,8 +552,11 @@ static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev, r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION); /* only handle FLR_NOTIFY now */ - if (!r) - schedule_work(&adev->virt.flr_work); + if (!r && !amdgpu_in_reset(adev)) + WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain, + &adev->virt.flr_work), + "Failed to queue work! at %s", + __func__); } return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 493be0c49777..b94d215fa5c5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7899,6 +7899,9 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, if (res) return res; + if (modifiers == NULL) + adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true; + res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs, &dm_plane_funcs, formats, num_formats, modifiers, plane->type, NULL); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index bdea177fae55..0e8c5e6b2a5b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -23,6 +23,7 @@ * */ +#include <linux/string_helpers.h> #include <linux/uaccess.h> #include "dc.h" @@ -49,11 +50,6 @@ struct dmub_debugfs_trace_entry { uint32_t param1; }; -static inline const char *yesno(bool v) -{ - return v ? "yes" : "no"; -} - /* parse_write_buffer_into_params - Helper function to parse debugfs write buffer into an array * * Function takes in attributes passed to debugfs write entry @@ -857,12 +853,12 @@ static int psr_capability_show(struct seq_file *m, void *data) if (!(link->connector_signal & SIGNAL_TYPE_EDP)) return -ENODEV; - seq_printf(m, "Sink support: %s", yesno(link->dpcd_caps.psr_caps.psr_version != 0)); + seq_printf(m, "Sink support: %s", str_yes_no(link->dpcd_caps.psr_caps.psr_version != 0)); if (link->dpcd_caps.psr_caps.psr_version) seq_printf(m, " [0x%02x]", link->dpcd_caps.psr_caps.psr_version); seq_puts(m, "\n"); - seq_printf(m, "Driver support: %s", yesno(link->psr_settings.psr_feature_enabled)); + seq_printf(m, "Driver support: %s", str_yes_no(link->psr_settings.psr_feature_enabled)); if (link->psr_settings.psr_version) seq_printf(m, " [0x%02x]", link->psr_settings.psr_version); seq_puts(m, "\n"); @@ -1211,8 +1207,8 @@ static int dp_dsc_fec_support_show(struct seq_file *m, void *data) drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); - seq_printf(m, "FEC_Sink_Support: %s\n", yesno(is_fec_supported)); - seq_printf(m, "DSC_Sink_Support: %s\n", yesno(is_dsc_supported)); + seq_printf(m, "FEC_Sink_Support: %s\n", str_yes_no(is_fec_supported)); + seq_printf(m, "DSC_Sink_Support: %s\n", str_yes_no(is_dsc_supported)); return ret; } diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 8e3e98f13db4..4f9b0a9f13e3 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -273,6 +273,9 @@ static int __init armada_drm_init(void) { int ret; + if (drm_firmware_drivers_only()) + return -ENODEV; + ret = platform_driver_register(&armada_lcd_platform_driver); if (ret) return ret; diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index 65f172807a0d..13f496473b9e 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -20,6 +20,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_module.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include <drm/drm_vblank.h> @@ -359,7 +360,7 @@ static struct platform_driver aspeed_gfx_platform_driver = { }, }; -module_platform_driver(aspeed_gfx_platform_driver); +drm_module_platform_driver(aspeed_gfx_platform_driver); MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>"); MODULE_DESCRIPTION("ASPEED BMC DRM/KMS driver"); diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c index cd93c44f2662..204c926a18ea 100644 --- a/drivers/gpu/drm/ast/ast_dp501.c +++ b/drivers/gpu/drm/ast/ast_dp501.c @@ -272,64 +272,6 @@ static bool ast_launch_m68k(struct drm_device *dev) return true; } -u8 ast_get_dp501_max_clk(struct drm_device *dev) -{ - struct ast_private *ast = to_ast_private(dev); - u32 boot_address, offset, data; - u8 linkcap[4], linkrate, linklanes, maxclk = 0xff; - u32 *plinkcap; - - if (ast->config_mode == ast_use_p2a) { - boot_address = get_fw_base(ast); - - /* validate FW version */ - offset = AST_DP501_GBL_VERSION; - data = ast_mindwm(ast, boot_address + offset); - if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1) /* version: 1x */ - return maxclk; - - /* Read Link Capability */ - offset = AST_DP501_LINKRATE; - plinkcap = (u32 *)linkcap; - *plinkcap = ast_mindwm(ast, boot_address + offset); - if (linkcap[2] == 0) { - linkrate = linkcap[0]; - linklanes = linkcap[1]; - data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes); - if (data > 0xff) - data = 0xff; - maxclk = (u8)data; - } - } else { - if (!ast->dp501_fw_buf) - return AST_DP501_DEFAULT_DCLK; /* 1024x768 as default */ - - /* dummy read */ - offset = 0x0000; - data = readl(ast->dp501_fw_buf + offset); - - /* validate FW version */ - offset = AST_DP501_GBL_VERSION; - data = readl(ast->dp501_fw_buf + offset); - if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1) /* version: 1x */ - return maxclk; - - /* Read Link Capability */ - offset = AST_DP501_LINKRATE; - plinkcap = (u32 *)linkcap; - *plinkcap = readl(ast->dp501_fw_buf + offset); - if (linkcap[2] == 0) { - linkrate = linkcap[0]; - linklanes = linkcap[1]; - data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes); - if (data > 0xff) - data = 0xff; - maxclk = (u8)data; - } - } - return maxclk; -} - bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata) { struct ast_private *ast = to_ast_private(dev); diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 9c8d56b0a41b..a19315b2f7e5 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -69,7 +69,6 @@ enum ast_chip { enum ast_tx_chip { AST_TX_NONE, AST_TX_SIL164, - AST_TX_ITE66121, AST_TX_DP501, }; @@ -130,15 +129,26 @@ struct ast_i2c_chan { struct i2c_algo_bit_data bit; }; -struct ast_connector { +struct ast_vga_connector { struct drm_connector base; struct ast_i2c_chan *i2c; }; -static inline struct ast_connector * -to_ast_connector(struct drm_connector *connector) +static inline struct ast_vga_connector * +to_ast_vga_connector(struct drm_connector *connector) { - return container_of(connector, struct ast_connector, base); + return container_of(connector, struct ast_vga_connector, base); +} + +struct ast_sil164_connector { + struct drm_connector base; + struct ast_i2c_chan *i2c; +}; + +static inline struct ast_sil164_connector * +to_ast_sil164_connector(struct drm_connector *connector) +{ + return container_of(connector, struct ast_sil164_connector, base); } /* @@ -161,8 +171,20 @@ struct ast_private { struct drm_plane primary_plane; struct ast_cursor_plane cursor_plane; struct drm_crtc crtc; - struct drm_encoder encoder; - struct ast_connector connector; + union { + struct { + struct drm_encoder encoder; + struct ast_vga_connector vga_connector; + } vga; + struct { + struct drm_encoder encoder; + struct ast_sil164_connector sil164_connector; + } sil164; + struct { + struct drm_encoder encoder; + struct drm_connector connector; + } dp501; + } output; bool support_wide_screen; enum { @@ -172,7 +194,6 @@ struct ast_private { } config_mode; enum ast_tx_chip tx_chip_type; - u8 dp501_maxclk; u8 *dp501_fw_addr; const struct firmware *dp501_fw; /* dp501 fw */ }; diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 2c7115a4d81f..45b56b39ad47 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -40,6 +40,7 @@ #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_vram_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> @@ -1005,6 +1006,71 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) } } +static enum drm_mode_status +ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) +{ + struct ast_private *ast = to_ast_private(crtc->dev); + enum drm_mode_status status; + uint32_t jtemp; + + if (ast->support_wide_screen) { + if ((mode->hdisplay == 1680) && (mode->vdisplay == 1050)) + return MODE_OK; + if ((mode->hdisplay == 1280) && (mode->vdisplay == 800)) + return MODE_OK; + if ((mode->hdisplay == 1440) && (mode->vdisplay == 900)) + return MODE_OK; + if ((mode->hdisplay == 1360) && (mode->vdisplay == 768)) + return MODE_OK; + if ((mode->hdisplay == 1600) && (mode->vdisplay == 900)) + return MODE_OK; + + if ((ast->chip == AST2100) || (ast->chip == AST2200) || + (ast->chip == AST2300) || (ast->chip == AST2400) || + (ast->chip == AST2500)) { + if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080)) + return MODE_OK; + + if ((mode->hdisplay == 1920) && (mode->vdisplay == 1200)) { + jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); + if (jtemp & 0x01) + return MODE_NOMODE; + else + return MODE_OK; + } + } + } + + status = MODE_NOMODE; + + switch (mode->hdisplay) { + case 640: + if (mode->vdisplay == 480) + status = MODE_OK; + break; + case 800: + if (mode->vdisplay == 600) + status = MODE_OK; + break; + case 1024: + if (mode->vdisplay == 768) + status = MODE_OK; + break; + case 1280: + if (mode->vdisplay == 1024) + status = MODE_OK; + break; + case 1600: + if (mode->vdisplay == 1200) + status = MODE_OK; + break; + default: + break; + } + + return status; +} + static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) { @@ -1107,6 +1173,7 @@ ast_crtc_helper_atomic_disable(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = { + .mode_valid = ast_crtc_helper_mode_valid, .atomic_check = ast_crtc_helper_atomic_check, .atomic_flush = ast_crtc_helper_atomic_flush, .atomic_enable = ast_crtc_helper_atomic_enable, @@ -1187,128 +1254,229 @@ static int ast_crtc_init(struct drm_device *dev) } /* - * Encoder + * VGA Connector */ -static int ast_encoder_init(struct drm_device *dev) +static int ast_vga_connector_helper_get_modes(struct drm_connector *connector) { - struct ast_private *ast = to_ast_private(dev); - struct drm_encoder *encoder = &ast->encoder; + struct ast_vga_connector *ast_vga_connector = to_ast_vga_connector(connector); + struct edid *edid; + int count; + + if (!ast_vga_connector->i2c) + goto err_drm_connector_update_edid_property; + + edid = drm_get_edid(connector, &ast_vga_connector->i2c->adapter); + if (!edid) + goto err_drm_connector_update_edid_property; + + count = drm_add_edid_modes(connector, edid); + kfree(edid); + + return count; + +err_drm_connector_update_edid_property: + drm_connector_update_edid_property(connector, NULL); + return 0; +} + +static const struct drm_connector_helper_funcs ast_vga_connector_helper_funcs = { + .get_modes = ast_vga_connector_helper_get_modes, +}; + +static const struct drm_connector_funcs ast_vga_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int ast_vga_connector_init(struct drm_device *dev, + struct ast_vga_connector *ast_vga_connector) +{ + struct drm_connector *connector = &ast_vga_connector->base; + int ret; + + ast_vga_connector->i2c = ast_i2c_create(dev); + if (!ast_vga_connector->i2c) + drm_err(dev, "failed to add ddc bus for connector\n"); + + if (ast_vga_connector->i2c) + ret = drm_connector_init_with_ddc(dev, connector, &ast_vga_connector_funcs, + DRM_MODE_CONNECTOR_VGA, + &ast_vga_connector->i2c->adapter); + else + ret = drm_connector_init(dev, connector, &ast_vga_connector_funcs, + DRM_MODE_CONNECTOR_VGA); + if (ret) + return ret; + + drm_connector_helper_add(connector, &ast_vga_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + + return 0; +} + +static int ast_vga_output_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; + struct drm_encoder *encoder = &ast->output.vga.encoder; + struct ast_vga_connector *ast_vga_connector = &ast->output.vga.vga_connector; + struct drm_connector *connector = &ast_vga_connector->base; int ret; ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC); if (ret) return ret; + encoder->possible_crtcs = drm_crtc_mask(crtc); - encoder->possible_crtcs = 1; + ret = ast_vga_connector_init(dev, ast_vga_connector); + if (ret) + return ret; + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) + return ret; return 0; } /* - * Connector + * SIL164 Connector */ -static int ast_get_modes(struct drm_connector *connector) +static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector) +{ + struct ast_sil164_connector *ast_sil164_connector = to_ast_sil164_connector(connector); + struct edid *edid; + int count; + + if (!ast_sil164_connector->i2c) + goto err_drm_connector_update_edid_property; + + edid = drm_get_edid(connector, &ast_sil164_connector->i2c->adapter); + if (!edid) + goto err_drm_connector_update_edid_property; + + count = drm_add_edid_modes(connector, edid); + kfree(edid); + + return count; + +err_drm_connector_update_edid_property: + drm_connector_update_edid_property(connector, NULL); + return 0; +} + +static const struct drm_connector_helper_funcs ast_sil164_connector_helper_funcs = { + .get_modes = ast_sil164_connector_helper_get_modes, +}; + +static const struct drm_connector_funcs ast_sil164_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int ast_sil164_connector_init(struct drm_device *dev, + struct ast_sil164_connector *ast_sil164_connector) { - struct ast_connector *ast_connector = to_ast_connector(connector); - struct ast_private *ast = to_ast_private(connector->dev); - struct edid *edid = NULL; - bool flags = false; + struct drm_connector *connector = &ast_sil164_connector->base; int ret; - if (ast->tx_chip_type == AST_TX_DP501) { - ast->dp501_maxclk = 0xff; - edid = kmalloc(128, GFP_KERNEL); - if (!edid) - return -ENOMEM; + ast_sil164_connector->i2c = ast_i2c_create(dev); + if (!ast_sil164_connector->i2c) + drm_err(dev, "failed to add ddc bus for connector\n"); - flags = ast_dp501_read_edid(connector->dev, (u8 *)edid); - if (flags) - ast->dp501_maxclk = ast_get_dp501_max_clk(connector->dev); - else - kfree(edid); - } - if (!flags && ast_connector->i2c) - edid = drm_get_edid(connector, &ast_connector->i2c->adapter); - if (edid) { - drm_connector_update_edid_property(&ast_connector->base, edid); - ret = drm_add_edid_modes(connector, edid); - kfree(edid); + if (ast_sil164_connector->i2c) + ret = drm_connector_init_with_ddc(dev, connector, &ast_sil164_connector_funcs, + DRM_MODE_CONNECTOR_DVII, + &ast_sil164_connector->i2c->adapter); + else + ret = drm_connector_init(dev, connector, &ast_sil164_connector_funcs, + DRM_MODE_CONNECTOR_DVII); + if (ret) return ret; - } - drm_connector_update_edid_property(&ast_connector->base, NULL); + + drm_connector_helper_add(connector, &ast_sil164_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + return 0; } -static enum drm_mode_status ast_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static int ast_sil164_output_init(struct ast_private *ast) { - struct ast_private *ast = to_ast_private(connector->dev); - int flags = MODE_NOMODE; - uint32_t jtemp; + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; + struct drm_encoder *encoder = &ast->output.sil164.encoder; + struct ast_sil164_connector *ast_sil164_connector = &ast->output.sil164.sil164_connector; + struct drm_connector *connector = &ast_sil164_connector->base; + int ret; - if (ast->support_wide_screen) { - if ((mode->hdisplay == 1680) && (mode->vdisplay == 1050)) - return MODE_OK; - if ((mode->hdisplay == 1280) && (mode->vdisplay == 800)) - return MODE_OK; - if ((mode->hdisplay == 1440) && (mode->vdisplay == 900)) - return MODE_OK; - if ((mode->hdisplay == 1360) && (mode->vdisplay == 768)) - return MODE_OK; - if ((mode->hdisplay == 1600) && (mode->vdisplay == 900)) - return MODE_OK; + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS); + if (ret) + return ret; + encoder->possible_crtcs = drm_crtc_mask(crtc); - if ((ast->chip == AST2100) || (ast->chip == AST2200) || - (ast->chip == AST2300) || (ast->chip == AST2400) || - (ast->chip == AST2500)) { - if ((mode->hdisplay == 1920) && (mode->vdisplay == 1080)) - return MODE_OK; + ret = ast_sil164_connector_init(dev, ast_sil164_connector); + if (ret) + return ret; - if ((mode->hdisplay == 1920) && (mode->vdisplay == 1200)) { - jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); - if (jtemp & 0x01) - return MODE_NOMODE; - else - return MODE_OK; - } - } - } - switch (mode->hdisplay) { - case 640: - if (mode->vdisplay == 480) - flags = MODE_OK; - break; - case 800: - if (mode->vdisplay == 600) - flags = MODE_OK; - break; - case 1024: - if (mode->vdisplay == 768) - flags = MODE_OK; - break; - case 1280: - if (mode->vdisplay == 1024) - flags = MODE_OK; - break; - case 1600: - if (mode->vdisplay == 1200) - flags = MODE_OK; - break; - default: - return flags; - } + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) + return ret; - return flags; + return 0; } -static const struct drm_connector_helper_funcs ast_connector_helper_funcs = { - .get_modes = ast_get_modes, - .mode_valid = ast_mode_valid, +/* + * DP501 Connector + */ + +static int ast_dp501_connector_helper_get_modes(struct drm_connector *connector) +{ + void *edid; + bool succ; + int count; + + edid = kmalloc(EDID_LENGTH, GFP_KERNEL); + if (!edid) + goto err_drm_connector_update_edid_property; + + succ = ast_dp501_read_edid(connector->dev, edid); + if (!succ) + goto err_kfree; + + drm_connector_update_edid_property(connector, edid); + count = drm_add_edid_modes(connector, edid); + kfree(edid); + + return count; + +err_kfree: + kfree(edid); +err_drm_connector_update_edid_property: + drm_connector_update_edid_property(connector, NULL); + return 0; +} + +static const struct drm_connector_helper_funcs ast_dp501_connector_helper_funcs = { + .get_modes = ast_dp501_connector_helper_get_modes, }; -static const struct drm_connector_funcs ast_connector_funcs = { +static const struct drm_connector_funcs ast_dp501_connector_funcs = { .reset = drm_atomic_helper_connector_reset, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = drm_connector_cleanup, @@ -1316,33 +1484,45 @@ static const struct drm_connector_funcs ast_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static int ast_connector_init(struct drm_device *dev) +static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector *connector) { - struct ast_private *ast = to_ast_private(dev); - struct ast_connector *ast_connector = &ast->connector; - struct drm_connector *connector = &ast_connector->base; - struct drm_encoder *encoder = &ast->encoder; - - ast_connector->i2c = ast_i2c_create(dev); - if (!ast_connector->i2c) - drm_err(dev, "failed to add ddc bus for connector\n"); + int ret; - if (ast_connector->i2c) - drm_connector_init_with_ddc(dev, connector, &ast_connector_funcs, - DRM_MODE_CONNECTOR_VGA, - &ast_connector->i2c->adapter); - else - drm_connector_init(dev, connector, &ast_connector_funcs, - DRM_MODE_CONNECTOR_VGA); + ret = drm_connector_init(dev, connector, &ast_dp501_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); + if (ret) + return ret; - drm_connector_helper_add(connector, &ast_connector_helper_funcs); + drm_connector_helper_add(connector, &ast_dp501_connector_helper_funcs); connector->interlace_allowed = 0; connector->doublescan_allowed = 0; connector->polled = DRM_CONNECTOR_POLL_CONNECT; - drm_connector_attach_encoder(connector, encoder); + return 0; +} + +static int ast_dp501_output_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; + struct drm_encoder *encoder = &ast->output.dp501.encoder; + struct drm_connector *connector = &ast->output.dp501.connector; + int ret; + + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS); + if (ret) + return ret; + encoder->possible_crtcs = drm_crtc_mask(crtc); + + ret = ast_dp501_connector_init(dev, connector); + if (ret) + return ret; + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) + return ret; return 0; } @@ -1351,8 +1531,7 @@ static int ast_connector_init(struct drm_device *dev) * Mode config */ -static const struct drm_mode_config_helper_funcs -ast_mode_config_helper_funcs = { +static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs = { .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, }; @@ -1404,8 +1583,20 @@ int ast_mode_config_init(struct ast_private *ast) return ret; ast_crtc_init(dev); - ast_encoder_init(dev); - ast_connector_init(dev); + + switch (ast->tx_chip_type) { + case AST_TX_NONE: + ret = ast_vga_output_init(ast); + break; + case AST_TX_SIL164: + ret = ast_sil164_output_init(ast); + break; + case AST_TX_DP501: + ret = ast_dp501_output_init(ast); + break; + } + if (ret) + return ret; drm_mode_config_reset(dev); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 1656d27b78b6..651e3c109360 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -22,6 +22,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_module.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -833,7 +834,7 @@ static struct platform_driver atmel_hlcdc_dc_platform_driver = { .of_match_table = atmel_hlcdc_dc_of_match, }, }; -module_platform_driver(atmel_hlcdc_dc_platform_driver); +drm_module_platform_driver(atmel_hlcdc_dc_platform_driver); MODULE_AUTHOR("Jean-Jacques Hiblot <jjhiblot@traphandler.com>"); MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>"); diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index fcd93f1aec90..c86f5be4dfe0 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -75,6 +75,14 @@ config DRM_DISPLAY_CONNECTOR on ARM-based platforms. Saying Y here when this driver is not needed will not cause any issue. +config DRM_ITE_IT6505 + tristate "ITE IT6505 DisplayPort bridge" + depends on OF + select DRM_KMS_HELPER + select EXTCON + help + ITE IT6505 DisplayPort bridge chip driver. + config DRM_LONTIUM_LT8912B tristate "Lontium LT8912B DSI/HDMI bridge" depends on OF diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index f2c73683cfcb..425844c30495 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_CHIPONE_ICN6211) += chipone-icn6211.o obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o +obj-$(CONFIG_DRM_ITE_IT6505) += ite-it6505.o obj-$(CONFIG_DRM_LONTIUM_LT8912B) += lontium-lt8912b.o obj-$(CONFIG_DRM_LONTIUM_LT9611) += lontium-lt9611.o obj-$(CONFIG_DRM_LONTIUM_LT9611UXC) += lontium-lt9611uxc.o diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig index 319ba0df57be..cc0aa6572d98 100644 --- a/drivers/gpu/drm/bridge/analogix/Kconfig +++ b/drivers/gpu/drm/bridge/analogix/Kconfig @@ -32,6 +32,8 @@ config DRM_ANALOGIX_ANX7625 tristate "Analogix Anx7625 MIPI to DP interface support" depends on DRM depends on OF + select DRM_DP_AUX_BUS + select DRM_DP_HELPER select DRM_MIPI_DSI help ANX7625 is an ultra-low power 4K mobile HD transmitter diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 76662fce4ce6..633618bafd75 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -24,6 +24,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_crtc_helper.h> +#include <drm/dp/drm_dp_aux_bus.h> #include <drm/dp/drm_dp_helper.h> #include <drm/drm_edid.h> #include <drm/drm_hdcp.h> @@ -129,6 +130,23 @@ static int anx7625_reg_write(struct anx7625_data *ctx, return ret; } +static int anx7625_reg_block_write(struct anx7625_data *ctx, + struct i2c_client *client, + u8 reg_addr, u8 len, u8 *buf) +{ + int ret; + struct device *dev = &client->dev; + + i2c_access_workaround(ctx, client); + + ret = i2c_smbus_write_i2c_block_data(client, reg_addr, len, buf); + if (ret < 0) + dev_err(dev, "write i2c block failed id=%x\n:%x", + client->addr, reg_addr); + + return ret; +} + static int anx7625_write_or(struct anx7625_data *ctx, struct i2c_client *client, u8 offset, u8 mask) @@ -214,25 +232,28 @@ static int wait_aux_op_finish(struct anx7625_data *ctx) return 0; } -static int anx7625_aux_dpcd_read(struct anx7625_data *ctx, - u32 address, u8 len, u8 *buf) +static int anx7625_aux_trans(struct anx7625_data *ctx, u8 op, u32 address, + u8 len, u8 *buf) { struct device *dev = &ctx->client->dev; int ret; u8 addrh, addrm, addrl; u8 cmd; + bool is_write = !(op & DP_AUX_I2C_READ); - if (len > MAX_DPCD_BUFFER_SIZE) { + if (len > DP_AUX_MAX_PAYLOAD_BYTES) { dev_err(dev, "exceed aux buffer len.\n"); return -EINVAL; } + if (!len) + return len; + addrl = address & 0xFF; addrm = (address >> 8) & 0xFF; addrh = (address >> 16) & 0xFF; - cmd = DPCD_CMD(len, DPCD_READ); - cmd = ((len - 1) << 4) | 0x09; + cmd = DPCD_CMD(len, op); /* Set command and length */ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, @@ -246,6 +267,9 @@ static int anx7625_aux_dpcd_read(struct anx7625_data *ctx, ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, AP_AUX_ADDR_19_16, addrh); + if (is_write) + ret |= anx7625_reg_block_write(ctx, ctx->i2c.rx_p0_client, + AP_AUX_BUFF_START, len, buf); /* Enable aux access */ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client, AP_AUX_CTRL_STATUS, AP_AUX_CTRL_OP_EN); @@ -255,14 +279,17 @@ static int anx7625_aux_dpcd_read(struct anx7625_data *ctx, return -EIO; } - usleep_range(2000, 2100); - ret = wait_aux_op_finish(ctx); - if (ret) { + if (ret < 0) { dev_err(dev, "aux IO error: wait aux op finish.\n"); return ret; } + /* Write done */ + if (is_write) + return len; + + /* Read done, read out dpcd data */ ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client, AP_AUX_BUFF_START, len, buf); if (ret < 0) { @@ -270,7 +297,7 @@ static int anx7625_aux_dpcd_read(struct anx7625_data *ctx, return -EIO; } - return 0; + return len; } static int anx7625_video_mute_control(struct anx7625_data *ctx, @@ -845,7 +872,7 @@ static int anx7625_hdcp_enable(struct anx7625_data *ctx) } /* Read downstream capability */ - anx7625_aux_dpcd_read(ctx, 0x68028, 1, &bcap); + anx7625_aux_trans(ctx, DP_AUX_NATIVE_READ, 0x68028, 1, &bcap); if (!(bcap & 0x01)) { pr_warn("downstream not support HDCP 1.4, cap(%x).\n", bcap); return 0; @@ -918,6 +945,7 @@ static void anx7625_dp_stop(struct anx7625_data *ctx) { struct device *dev = &ctx->client->dev; int ret; + u8 data; DRM_DEV_DEBUG_DRIVER(dev, "stop dp output\n"); @@ -929,6 +957,11 @@ static void anx7625_dp_stop(struct anx7625_data *ctx) ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client, 0x08, 0x7f); ret |= anx7625_video_mute_control(ctx, 1); + + dev_dbg(dev, "notify downstream enter into standby\n"); + /* Downstream monitor enter into standby mode */ + data = 2; + ret |= anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, 0x000600, 1, &data); if (ret < 0) DRM_DEV_ERROR(dev, "IO error : mute video fail\n"); @@ -1076,7 +1109,8 @@ static int segments_edid_read(struct anx7625_data *ctx, static int sp_tx_edid_read(struct anx7625_data *ctx, u8 *pedid_blocks_buf) { - u8 offset, edid_pos; + u8 offset; + int edid_pos; int count, blocks_num; u8 pblock_buf[MAX_DPCD_BUFFER_SIZE]; u8 i, j; @@ -1627,11 +1661,56 @@ static int anx7625_parse_dt(struct device *dev, return 0; } +static bool anx7625_of_panel_on_aux_bus(struct device *dev) +{ + struct device_node *bus, *panel; + + bus = of_get_child_by_name(dev->of_node, "aux-bus"); + if (!bus) + return false; + + panel = of_get_child_by_name(bus, "panel"); + of_node_put(bus); + if (!panel) + return false; + of_node_put(panel); + + return true; +} + static inline struct anx7625_data *bridge_to_anx7625(struct drm_bridge *bridge) { return container_of(bridge, struct anx7625_data, bridge); } +static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg) +{ + struct anx7625_data *ctx = container_of(aux, struct anx7625_data, aux); + struct device *dev = &ctx->client->dev; + u8 request = msg->request & ~DP_AUX_I2C_MOT; + int ret = 0; + + pm_runtime_get_sync(dev); + msg->reply = 0; + switch (request) { + case DP_AUX_NATIVE_WRITE: + case DP_AUX_I2C_WRITE: + case DP_AUX_NATIVE_READ: + case DP_AUX_I2C_READ: + break; + default: + ret = -EINVAL; + } + if (!ret) + ret = anx7625_aux_trans(ctx, msg->request, msg->address, + msg->size, msg->buffer); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; +} + static struct edid *anx7625_get_edid(struct anx7625_data *ctx) { struct device *dev = &ctx->client->dev; @@ -2038,6 +2117,13 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge, return -ENODEV; } + ctx->aux.drm_dev = bridge->dev; + err = drm_dp_aux_register(&ctx->aux); + if (err) { + dev_err(dev, "failed to register aux channel: %d\n", err); + return err; + } + if (ctx->pdata.panel_bridge) { err = drm_bridge_attach(bridge->encoder, ctx->pdata.panel_bridge, @@ -2051,6 +2137,13 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge, return 0; } +static void anx7625_bridge_detach(struct drm_bridge *bridge) +{ + struct anx7625_data *ctx = bridge_to_anx7625(bridge); + + drm_dp_aux_unregister(&ctx->aux); +} + static enum drm_mode_status anx7625_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, @@ -2316,6 +2409,7 @@ static struct edid *anx7625_bridge_get_edid(struct drm_bridge *bridge, static const struct drm_bridge_funcs anx7625_bridge_funcs = { .attach = anx7625_bridge_attach, + .detach = anx7625_bridge_detach, .mode_valid = anx7625_bridge_mode_valid, .mode_set = anx7625_bridge_mode_set, .atomic_check = anx7625_bridge_atomic_check, @@ -2473,6 +2567,12 @@ static const struct dev_pm_ops anx7625_pm_ops = { anx7625_runtime_pm_resume, NULL) }; +static void anx7625_runtime_disable(void *data) +{ + pm_runtime_dont_use_autosuspend(data); + pm_runtime_disable(data); +} + static int anx7625_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -2487,7 +2587,7 @@ static int anx7625_i2c_probe(struct i2c_client *client, return -ENODEV; } - platform = kzalloc(sizeof(*platform), GFP_KERNEL); + platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); if (!platform) { DRM_DEV_ERROR(dev, "fail to allocate driver data\n"); return -ENOMEM; @@ -2495,13 +2595,6 @@ static int anx7625_i2c_probe(struct i2c_client *client, pdata = &platform->pdata; - ret = anx7625_parse_dt(dev, pdata); - if (ret) { - if (ret != -EPROBE_DEFER) - DRM_DEV_ERROR(dev, "fail to parse DT : %d\n", ret); - goto free_platform; - } - platform->client = client; i2c_set_clientdata(client, platform); @@ -2524,7 +2617,7 @@ static int anx7625_i2c_probe(struct i2c_client *client, if (!platform->hdcp_workqueue) { dev_err(dev, "fail to create work queue\n"); ret = -ENOMEM; - goto free_platform; + return ret; } platform->pdata.intp_irq = client->irq; @@ -2549,6 +2642,19 @@ static int anx7625_i2c_probe(struct i2c_client *client, } } + platform->aux.name = "anx7625-aux"; + platform->aux.dev = dev; + platform->aux.transfer = anx7625_aux_transfer; + drm_dp_aux_init(&platform->aux); + devm_of_dp_aux_populate_ep_devices(&platform->aux); + + ret = anx7625_parse_dt(dev, pdata); + if (ret) { + if (ret != -EPROBE_DEFER) + DRM_DEV_ERROR(dev, "fail to parse DT : %d\n", ret); + return ret; + } + if (anx7625_register_i2c_dummy_clients(platform, client) != 0) { ret = -ENOMEM; DRM_DEV_ERROR(dev, "fail to reserve I2C bus.\n"); @@ -2556,6 +2662,12 @@ static int anx7625_i2c_probe(struct i2c_client *client, } pm_runtime_enable(dev); + pm_runtime_set_autosuspend_delay(dev, 1000); + pm_runtime_use_autosuspend(dev); + pm_suspend_ignore_children(dev, true); + ret = devm_add_action_or_reset(dev, anx7625_runtime_disable, dev); + if (ret) + return ret; if (!platform->pdata.low_power_mode) { anx7625_disable_pd_protocol(platform); @@ -2568,7 +2680,8 @@ static int anx7625_i2c_probe(struct i2c_client *client, platform->bridge.funcs = &anx7625_bridge_funcs; platform->bridge.of_node = client->dev.of_node; - platform->bridge.ops = DRM_BRIDGE_OP_EDID; + if (!anx7625_of_panel_on_aux_bus(&client->dev)) + platform->bridge.ops |= DRM_BRIDGE_OP_EDID; if (!platform->pdata.panel_bridge) platform->bridge.ops |= DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_DETECT; @@ -2609,9 +2722,6 @@ free_hdcp_wq: if (platform->hdcp_workqueue) destroy_workqueue(platform->hdcp_workqueue); -free_platform: - kfree(platform); - return ret; } @@ -2638,7 +2748,6 @@ static int anx7625_i2c_remove(struct i2c_client *client) if (platform->pdata.audio_en) anx7625_unregister_audio(platform); - kfree(platform); return 0; } diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h index 56165f5b254c..edbbfe410a56 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.h +++ b/drivers/gpu/drm/bridge/analogix/anx7625.h @@ -242,8 +242,6 @@ #define AP_AUX_COMMAND 0x27 /* com+len */ #define LENGTH_SHIFT 4 -#define DPCD_READ 0x09 -#define DPCD_WRITE 0x08 #define DPCD_CMD(len, cmd) ((((len) - 1) << LENGTH_SHIFT) | (cmd)) /* Bit 0&1: 3D video structure */ @@ -474,6 +472,7 @@ struct anx7625_data { u8 bridge_attached; struct drm_connector *connector; struct mipi_dsi_device *dsi; + struct drm_dp_aux aux; }; #endif /* __ANX7625_H__ */ diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c new file mode 100644 index 000000000000..fb16a176822d --- /dev/null +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -0,0 +1,3352 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ +#include <linux/bits.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/extcon.h> +#include <linux/fs.h> +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/types.h> +#include <linux/wait.h> + +#include <crypto/hash.h> + +#include <drm/dp/drm_dp_helper.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_edid.h> +#include <drm/drm_hdcp.h> +#include <drm/drm_print.h> +#include <drm/drm_probe_helper.h> + +#include <sound/hdmi-codec.h> + +#define REG_IC_VER 0x04 + +#define REG_RESET_CTRL 0x05 +#define VIDEO_RESET BIT(0) +#define AUDIO_RESET BIT(1) +#define ALL_LOGIC_RESET BIT(2) +#define AUX_RESET BIT(3) +#define HDCP_RESET BIT(4) + +#define INT_STATUS_01 0x06 +#define INT_MASK_01 0x09 +#define INT_HPD_CHANGE 0 +#define INT_RECEIVE_HPD_IRQ 1 +#define INT_SCDT_CHANGE 2 +#define INT_HDCP_FAIL 3 +#define INT_HDCP_DONE 4 +#define BIT_OFFSET(x) (((x) - INT_STATUS_01) * BITS_PER_BYTE) +#define BIT_INT_HPD INT_HPD_CHANGE +#define BIT_INT_HPD_IRQ INT_RECEIVE_HPD_IRQ +#define BIT_INT_SCDT INT_SCDT_CHANGE +#define BIT_INT_HDCP_FAIL INT_HDCP_FAIL +#define BIT_INT_HDCP_DONE INT_HDCP_DONE + +#define INT_STATUS_02 0x07 +#define INT_MASK_02 0x0A +#define INT_AUX_CMD_FAIL 0 +#define INT_HDCP_KSV_CHECK 1 +#define INT_AUDIO_FIFO_ERROR 2 +#define BIT_INT_AUX_CMD_FAIL (BIT_OFFSET(0x07) + INT_AUX_CMD_FAIL) +#define BIT_INT_HDCP_KSV_CHECK (BIT_OFFSET(0x07) + INT_HDCP_KSV_CHECK) +#define BIT_INT_AUDIO_FIFO_ERROR (BIT_OFFSET(0x07) + INT_AUDIO_FIFO_ERROR) + +#define INT_STATUS_03 0x08 +#define INT_MASK_03 0x0B +#define INT_LINK_TRAIN_FAIL 4 +#define INT_VID_FIFO_ERROR 5 +#define INT_IO_LATCH_FIFO_OVERFLOW 7 +#define BIT_INT_LINK_TRAIN_FAIL (BIT_OFFSET(0x08) + INT_LINK_TRAIN_FAIL) +#define BIT_INT_VID_FIFO_ERROR (BIT_OFFSET(0x08) + INT_VID_FIFO_ERROR) +#define BIT_INT_IO_FIFO_OVERFLOW (BIT_OFFSET(0x08) + INT_IO_LATCH_FIFO_OVERFLOW) + +#define REG_SYSTEM_STS 0x0D +#define INT_STS BIT(0) +#define HPD_STS BIT(1) +#define VIDEO_STB BIT(2) + +#define REG_LINK_TRAIN_STS 0x0E +#define LINK_STATE_CR BIT(2) +#define LINK_STATE_EQ BIT(3) +#define LINK_STATE_NORP BIT(4) + +#define REG_BANK_SEL 0x0F +#define REG_CLK_CTRL0 0x10 +#define M_PCLK_DELAY 0x03 + +#define REG_AUX_OPT 0x11 +#define AUX_AUTO_RST BIT(0) +#define AUX_FIX_FREQ BIT(3) + +#define REG_DATA_CTRL0 0x12 +#define VIDEO_LATCH_EDGE BIT(4) +#define ENABLE_PCLK_COUNTER BIT(7) + +#define REG_PCLK_COUNTER_VALUE 0x13 + +#define REG_501_FIFO_CTRL 0x15 +#define RST_501_FIFO BIT(1) + +#define REG_TRAIN_CTRL0 0x16 +#define FORCE_LBR BIT(0) +#define LANE_COUNT_MASK 0x06 +#define LANE_SWAP BIT(3) +#define SPREAD_AMP_5 BIT(4) +#define FORCE_CR_DONE BIT(5) +#define FORCE_EQ_DONE BIT(6) + +#define REG_TRAIN_CTRL1 0x17 +#define AUTO_TRAIN BIT(0) +#define MANUAL_TRAIN BIT(1) +#define FORCE_RETRAIN BIT(2) + +#define REG_AUX_CTRL 0x23 +#define CLR_EDID_FIFO BIT(0) +#define AUX_USER_MODE BIT(1) +#define AUX_NO_SEGMENT_WR BIT(6) +#define AUX_EN_FIFO_READ BIT(7) + +#define REG_AUX_ADR_0_7 0x24 +#define REG_AUX_ADR_8_15 0x25 +#define REG_AUX_ADR_16_19 0x26 +#define REG_AUX_OUT_DATA0 0x27 + +#define REG_AUX_CMD_REQ 0x2B +#define AUX_BUSY BIT(5) + +#define REG_AUX_DATA_0_7 0x2C +#define REG_AUX_DATA_8_15 0x2D +#define REG_AUX_DATA_16_23 0x2E +#define REG_AUX_DATA_24_31 0x2F + +#define REG_AUX_DATA_FIFO 0x2F + +#define REG_AUX_ERROR_STS 0x9F +#define M_AUX_REQ_FAIL 0x03 + +#define REG_HDCP_CTRL1 0x38 +#define HDCP_CP_ENABLE BIT(0) + +#define REG_HDCP_TRIGGER 0x39 +#define HDCP_TRIGGER_START BIT(0) +#define HDCP_TRIGGER_CPIRQ BIT(1) +#define HDCP_TRIGGER_KSV_DONE BIT(4) +#define HDCP_TRIGGER_KSV_FAIL BIT(5) + +#define REG_HDCP_CTRL2 0x3A +#define HDCP_AN_SEL BIT(0) +#define HDCP_AN_GEN BIT(1) +#define HDCP_HW_HPDIRQ_ACT BIT(2) +#define HDCP_EN_M0_READ BIT(5) + +#define REG_M0_0_7 0x4C +#define REG_AN_0_7 0x4C +#define REG_SP_CTRL0 0x58 +#define REG_IP_CTRL1 0x59 +#define REG_IP_CTRL2 0x5A + +#define REG_LINK_DRV 0x5C +#define DRV_HS BIT(1) + +#define REG_DRV_LN_DATA_SEL 0x5D + +#define REG_AUX 0x5E + +#define REG_VID_BUS_CTRL0 0x60 +#define IN_DDR BIT(2) +#define DDR_CD (0x01 << 6) + +#define REG_VID_BUS_CTRL1 0x61 +#define TX_FIFO_RESET BIT(1) + +#define REG_INPUT_CTRL 0xA0 +#define INPUT_HSYNC_POL BIT(0) +#define INPUT_VSYNC_POL BIT(2) +#define INPUT_INTERLACED BIT(4) + +#define REG_INPUT_HTOTAL 0xA1 +#define REG_INPUT_HACTIVE_START 0xA3 +#define REG_INPUT_HACTIVE_WIDTH 0xA5 +#define REG_INPUT_HFRONT_PORCH 0xA7 +#define REG_INPUT_HSYNC_WIDTH 0xA9 +#define REG_INPUT_VTOTAL 0xAB +#define REG_INPUT_VACTIVE_START 0xAD +#define REG_INPUT_VACTIVE_WIDTH 0xAF +#define REG_INPUT_VFRONT_PORCH 0xB1 +#define REG_INPUT_VSYNC_WIDTH 0xB3 + +#define REG_AUDIO_SRC_CTRL 0xB8 +#define M_AUDIO_I2S_EN 0x0F +#define EN_I2S0 BIT(0) +#define EN_I2S1 BIT(1) +#define EN_I2S2 BIT(2) +#define EN_I2S3 BIT(3) +#define AUDIO_FIFO_RESET BIT(7) + +#define REG_AUDIO_FMT 0xB9 +#define REG_AUDIO_FIFO_SEL 0xBA + +#define REG_AUDIO_CTRL0 0xBB +#define AUDIO_FULL_PKT BIT(4) +#define AUDIO_16B_BOUND BIT(5) + +#define REG_AUDIO_CTRL1 0xBC +#define REG_AUDIO_INPUT_FREQ 0xBE + +#define REG_IEC958_STS0 0xBF +#define REG_IEC958_STS1 0xC0 +#define REG_IEC958_STS2 0xC1 +#define REG_IEC958_STS3 0xC2 +#define REG_IEC958_STS4 0xC3 + +#define REG_HPD_IRQ_TIME 0xC9 +#define REG_AUX_DEBUG_MODE 0xCA +#define REG_AUX_OPT2 0xCB +#define REG_HDCP_OPT 0xCE +#define REG_USER_DRV_PRE 0xCF + +#define REG_DATA_MUTE_CTRL 0xD3 +#define ENABLE_ENHANCED_FRAME BIT(0) +#define ENABLE_AUTO_VIDEO_FIFO_RESET BIT(1) +#define EN_VID_MUTE BIT(4) +#define EN_AUD_MUTE BIT(5) + +#define REG_TIME_STMP_CTRL 0xD4 +#define EN_ENHANCE_VID_STMP BIT(0) +#define EN_ENHANCE_AUD_STMP BIT(2) +#define M_STAMP_STEP 0x30 +#define EN_SSC_GAT BIT(6) + +#define REG_INFOFRAME_CTRL 0xE8 +#define EN_AVI_PKT BIT(0) +#define EN_AUD_PKT BIT(1) +#define EN_MPG_PKT BIT(2) +#define EN_GEN_PKT BIT(3) +#define EN_VID_TIME_STMP BIT(4) +#define EN_AUD_TIME_STMP BIT(5) +#define EN_VID_CTRL_PKT (EN_AVI_PKT | EN_VID_TIME_STMP) +#define EN_AUD_CTRL_PKT (EN_AUD_PKT | EN_AUD_TIME_STMP) + +#define REG_AUDIO_N_0_7 0xDE +#define REG_AUDIO_N_8_15 0xDF +#define REG_AUDIO_N_16_23 0xE0 + +#define REG_AVI_INFO_DB1 0xE9 +#define REG_AVI_INFO_DB2 0xEA +#define REG_AVI_INFO_DB3 0xEB +#define REG_AVI_INFO_DB4 0xEC +#define REG_AVI_INFO_DB5 0xED +#define REG_AVI_INFO_SUM 0xF6 + +#define REG_AUD_INFOFRAM_DB1 0xF7 +#define REG_AUD_INFOFRAM_DB2 0xF8 +#define REG_AUD_INFOFRAM_DB3 0xF9 +#define REG_AUD_INFOFRAM_DB4 0xFA +#define REG_AUD_INFOFRAM_SUM 0xFB + +/* the following six registers are in bank1 */ +#define REG_DRV_0_DB_800_MV 0x7E +#define REG_PRE_0_DB_800_MV 0x7F +#define REG_PRE_3P5_DB_800_MV 0x81 +#define REG_SSC_CTRL0 0x88 +#define REG_SSC_CTRL1 0x89 +#define REG_SSC_CTRL2 0x8A + +#define RBR DP_LINK_BW_1_62 +#define HBR DP_LINK_BW_2_7 +#define HBR2 DP_LINK_BW_5_4 +#define HBR3 DP_LINK_BW_8_1 + +#define DPCD_V_1_1 0x11 +#define MISC_VERB 0xF0 +#define MISC_VERC 0x70 +#define I2S_INPUT_FORMAT_STANDARD 0 +#define I2S_INPUT_FORMAT_32BIT 1 +#define I2S_INPUT_LEFT_JUSTIFIED 0 +#define I2S_INPUT_RIGHT_JUSTIFIED 1 +#define I2S_DATA_1T_DELAY 0 +#define I2S_DATA_NO_DELAY 1 +#define I2S_WS_LEFT_CHANNEL 0 +#define I2S_WS_RIGHT_CHANNEL 1 +#define I2S_DATA_MSB_FIRST 0 +#define I2S_DATA_LSB_FIRST 1 +#define WORD_LENGTH_16BIT 0 +#define WORD_LENGTH_18BIT 1 +#define WORD_LENGTH_20BIT 2 +#define WORD_LENGTH_24BIT 3 +#define DEBUGFS_DIR_NAME "it6505-debugfs" +#define READ_BUFFER_SIZE 200 + +/* Vendor option */ +#define HDCP_DESIRED 1 +#define MAX_LANE_COUNT 4 +#define MAX_LINK_RATE HBR +#define AUTO_TRAIN_RETRY 3 +#define MAX_HDCP_DOWN_STREAM_COUNT 10 +#define MAX_CR_LEVEL 0x03 +#define MAX_EQ_LEVEL 0x03 +#define AUX_WAIT_TIMEOUT_MS 15 +#define AUX_FIFO_MAX_SIZE 32 +#define PIXEL_CLK_DELAY 1 +#define PIXEL_CLK_INVERSE 0 +#define ADJUST_PHASE_THRESHOLD 80000 +#define DPI_PIXEL_CLK_MAX 95000 +#define HDCP_SHA1_FIFO_LEN (MAX_HDCP_DOWN_STREAM_COUNT * 5 + 10) +#define DEFAULT_PWR_ON 0 +#define DEFAULT_DRV_HOLD 0 + +#define AUDIO_SELECT I2S +#define AUDIO_TYPE LPCM +#define AUDIO_SAMPLE_RATE SAMPLE_RATE_48K +#define AUDIO_CHANNEL_COUNT 2 +#define I2S_INPUT_FORMAT I2S_INPUT_FORMAT_32BIT +#define I2S_JUSTIFIED I2S_INPUT_LEFT_JUSTIFIED +#define I2S_DATA_DELAY I2S_DATA_1T_DELAY +#define I2S_WS_CHANNEL I2S_WS_LEFT_CHANNEL +#define I2S_DATA_SEQUENCE I2S_DATA_MSB_FIRST +#define AUDIO_WORD_LENGTH WORD_LENGTH_24BIT + +enum aux_cmd_type { + CMD_AUX_NATIVE_READ = 0x0, + CMD_AUX_NATIVE_WRITE = 0x5, + CMD_AUX_I2C_EDID_READ = 0xB, +}; + +enum aux_cmd_reply { + REPLY_ACK, + REPLY_NACK, + REPLY_DEFER, +}; + +enum link_train_status { + LINK_IDLE, + LINK_BUSY, + LINK_OK, +}; + +enum hdcp_state { + HDCP_AUTH_IDLE, + HDCP_AUTH_GOING, + HDCP_AUTH_DONE, +}; + +struct it6505_platform_data { + struct regulator *pwr18; + struct regulator *ovdd; + struct gpio_desc *gpiod_reset; +}; + +enum it6505_audio_select { + I2S = 0, + SPDIF, +}; + +enum it6505_audio_sample_rate { + SAMPLE_RATE_24K = 0x6, + SAMPLE_RATE_32K = 0x3, + SAMPLE_RATE_48K = 0x2, + SAMPLE_RATE_96K = 0xA, + SAMPLE_RATE_192K = 0xE, + SAMPLE_RATE_44_1K = 0x0, + SAMPLE_RATE_88_2K = 0x8, + SAMPLE_RATE_176_4K = 0xC, +}; + +enum it6505_audio_type { + LPCM = 0, + NLPCM, + DSS, +}; + +struct it6505_audio_data { + enum it6505_audio_select select; + enum it6505_audio_sample_rate sample_rate; + enum it6505_audio_type type; + u8 word_length; + u8 channel_count; + u8 i2s_input_format; + u8 i2s_justified; + u8 i2s_data_delay; + u8 i2s_ws_channel; + u8 i2s_data_sequence; +}; + +struct it6505_audio_sample_rate_map { + enum it6505_audio_sample_rate rate; + int sample_rate_value; +}; + +struct it6505_drm_dp_link { + unsigned char revision; + unsigned int rate; + unsigned int num_lanes; + unsigned long capabilities; +}; + +struct debugfs_entries { + char *name; + const struct file_operations *fops; +}; + +struct it6505 { + struct drm_dp_aux aux; + struct drm_bridge bridge; + struct i2c_client *client; + struct it6505_drm_dp_link link; + struct it6505_platform_data pdata; + /* + * Mutex protects extcon and interrupt functions from interfering + * each other. + */ + struct mutex extcon_lock; + struct mutex mode_lock; /* used to bridge_detect */ + struct mutex aux_lock; /* used to aux data transfers */ + struct regmap *regmap; + struct drm_display_mode source_output_mode; + struct drm_display_mode video_info; + struct notifier_block event_nb; + struct extcon_dev *extcon; + struct work_struct extcon_wq; + enum drm_connector_status connector_status; + enum link_train_status link_state; + struct work_struct link_works; + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + u8 lane_count; + u8 link_rate_bw_code; + u8 sink_count; + bool step_train; + bool branch_device; + bool enable_ssc; + bool lane_swap_disabled; + bool lane_swap; + bool powered; + bool hpd_state; + u32 afe_setting; + enum hdcp_state hdcp_status; + struct delayed_work hdcp_work; + struct work_struct hdcp_wait_ksv_list; + struct completion wait_edid_complete; + u8 auto_train_retry; + bool hdcp_desired; + bool is_repeater; + u8 hdcp_down_stream_count; + u8 bksvs[DRM_HDCP_KSV_LEN]; + u8 sha1_input[HDCP_SHA1_FIFO_LEN]; + bool enable_enhanced_frame; + hdmi_codec_plugged_cb plugged_cb; + struct device *codec_dev; + struct delayed_work delayed_audio; + struct it6505_audio_data audio; + struct dentry *debugfs; + + /* it6505 driver hold option */ + bool enable_drv_hold; +}; + +struct it6505_step_train_para { + u8 voltage_swing[MAX_LANE_COUNT]; + u8 pre_emphasis[MAX_LANE_COUNT]; +}; + +/* + * Vendor option afe settings for different platforms + * 0: without FPC cable + * 1: with FPC cable + */ + +static const u8 afe_setting_table[][3] = { + {0x82, 0x00, 0x45}, + {0x93, 0x2A, 0x85} +}; + +static const struct it6505_audio_sample_rate_map audio_sample_rate_map[] = { + {SAMPLE_RATE_24K, 24000}, + {SAMPLE_RATE_32K, 32000}, + {SAMPLE_RATE_48K, 48000}, + {SAMPLE_RATE_96K, 96000}, + {SAMPLE_RATE_192K, 192000}, + {SAMPLE_RATE_44_1K, 44100}, + {SAMPLE_RATE_88_2K, 88200}, + {SAMPLE_RATE_176_4K, 176400}, +}; + +static const struct regmap_range it6505_bridge_volatile_ranges[] = { + { .range_min = 0, .range_max = 0xFF }, +}; + +static const struct regmap_access_table it6505_bridge_volatile_table = { + .yes_ranges = it6505_bridge_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(it6505_bridge_volatile_ranges), +}; + +static const struct regmap_config it6505_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .volatile_table = &it6505_bridge_volatile_table, + .cache_type = REGCACHE_NONE, +}; + +static int it6505_read(struct it6505 *it6505, unsigned int reg_addr) +{ + unsigned int value; + int err; + struct device *dev = &it6505->client->dev; + + err = regmap_read(it6505->regmap, reg_addr, &value); + if (err < 0) { + dev_err(dev, "read failed reg[0x%x] err: %d", reg_addr, err); + return err; + } + + return value; +} + +static int it6505_write(struct it6505 *it6505, unsigned int reg_addr, + unsigned int reg_val) +{ + int err; + struct device *dev = &it6505->client->dev; + + err = regmap_write(it6505->regmap, reg_addr, reg_val); + + if (err < 0) { + dev_err(dev, "write failed reg[0x%x] = 0x%x err = %d", + reg_addr, reg_val, err); + return err; + } + + return 0; +} + +static int it6505_set_bits(struct it6505 *it6505, unsigned int reg, + unsigned int mask, unsigned int value) +{ + int err; + struct device *dev = &it6505->client->dev; + + err = regmap_update_bits(it6505->regmap, reg, mask, value); + if (err < 0) { + dev_err(dev, "write reg[0x%x] = 0x%x mask = 0x%x failed err %d", + reg, value, mask, err); + return err; + } + + return 0; +} + +static void it6505_debug_print(struct it6505 *it6505, unsigned int reg, + const char *prefix) +{ + struct device *dev = &it6505->client->dev; + int val; + + if (likely(!(__drm_debug & DRM_UT_DRIVER))) + return; + + val = it6505_read(it6505, reg); + if (val < 0) + DRM_DEV_DEBUG_DRIVER(dev, "%s reg[%02x] read error (%d)", + prefix, reg, val); + else + DRM_DEV_DEBUG_DRIVER(dev, "%s reg[%02x] = 0x%02x", prefix, reg, + val); +} + +static int it6505_dpcd_read(struct it6505 *it6505, unsigned long offset) +{ + u8 value; + int ret; + struct device *dev = &it6505->client->dev; + + ret = drm_dp_dpcd_readb(&it6505->aux, offset, &value); + if (ret < 0) { + dev_err(dev, "DPCD read failed [0x%lx] ret: %d", offset, ret); + return ret; + } + return value; +} + +static int it6505_dpcd_write(struct it6505 *it6505, unsigned long offset, + u8 datain) +{ + int ret; + struct device *dev = &it6505->client->dev; + + ret = drm_dp_dpcd_writeb(&it6505->aux, offset, datain); + if (ret < 0) { + dev_err(dev, "DPCD write failed [0x%lx] ret: %d", offset, ret); + return ret; + } + return 0; +} + +static int it6505_get_dpcd(struct it6505 *it6505, int offset, u8 *dpcd, int num) +{ + int ret; + struct device *dev = &it6505->client->dev; + + ret = drm_dp_dpcd_read(&it6505->aux, offset, dpcd, num); + + if (ret < 0) + return ret; + + DRM_DEV_DEBUG_DRIVER(dev, "ret = %d DPCD[0x%x] = 0x%*ph", ret, offset, + num, dpcd); + + return 0; +} + +static void it6505_dump(struct it6505 *it6505) +{ + unsigned int i, j; + u8 regs[16]; + struct device *dev = &it6505->client->dev; + + for (i = 0; i <= 0xff; i += 16) { + for (j = 0; j < 16; j++) + regs[j] = it6505_read(it6505, i + j); + + DRM_DEV_DEBUG_DRIVER(dev, "[0x%02x] = %16ph", i, regs); + } +} + +static bool it6505_get_sink_hpd_status(struct it6505 *it6505) +{ + int reg_0d; + + reg_0d = it6505_read(it6505, REG_SYSTEM_STS); + + if (reg_0d < 0) + return false; + + return reg_0d & HPD_STS; +} + +static int it6505_read_word(struct it6505 *it6505, unsigned int reg) +{ + int val0, val1; + + val0 = it6505_read(it6505, reg); + if (val0 < 0) + return val0; + + val1 = it6505_read(it6505, reg + 1); + if (val1 < 0) + return val1; + + return (val1 << 8) | val0; +} + +static void it6505_calc_video_info(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + int hsync_pol, vsync_pol, interlaced; + int htotal, hdes, hdew, hfph, hsyncw; + int vtotal, vdes, vdew, vfph, vsyncw; + int rddata, i, pclk, sum = 0; + + usleep_range(10000, 15000); + rddata = it6505_read(it6505, REG_INPUT_CTRL); + hsync_pol = rddata & INPUT_HSYNC_POL; + vsync_pol = (rddata & INPUT_VSYNC_POL) >> 2; + interlaced = (rddata & INPUT_INTERLACED) >> 4; + + htotal = it6505_read_word(it6505, REG_INPUT_HTOTAL) & 0x1FFF; + hdes = it6505_read_word(it6505, REG_INPUT_HACTIVE_START) & 0x1FFF; + hdew = it6505_read_word(it6505, REG_INPUT_HACTIVE_WIDTH) & 0x1FFF; + hfph = it6505_read_word(it6505, REG_INPUT_HFRONT_PORCH) & 0x1FFF; + hsyncw = it6505_read_word(it6505, REG_INPUT_HSYNC_WIDTH) & 0x1FFF; + + vtotal = it6505_read_word(it6505, REG_INPUT_VTOTAL) & 0xFFF; + vdes = it6505_read_word(it6505, REG_INPUT_VACTIVE_START) & 0xFFF; + vdew = it6505_read_word(it6505, REG_INPUT_VACTIVE_WIDTH) & 0xFFF; + vfph = it6505_read_word(it6505, REG_INPUT_VFRONT_PORCH) & 0xFFF; + vsyncw = it6505_read_word(it6505, REG_INPUT_VSYNC_WIDTH) & 0xFFF; + + DRM_DEV_DEBUG_DRIVER(dev, "hsync_pol:%d, vsync_pol:%d, interlaced:%d", + hsync_pol, vsync_pol, interlaced); + DRM_DEV_DEBUG_DRIVER(dev, "hactive_start:%d, vactive_start:%d", + hdes, vdes); + + for (i = 0; i < 10; i++) { + it6505_set_bits(it6505, REG_DATA_CTRL0, ENABLE_PCLK_COUNTER, + ENABLE_PCLK_COUNTER); + usleep_range(10000, 15000); + it6505_set_bits(it6505, REG_DATA_CTRL0, ENABLE_PCLK_COUNTER, + 0x00); + rddata = it6505_read_word(it6505, REG_PCLK_COUNTER_VALUE) & + 0xFFF; + + sum += rddata; + } + + if (sum == 0) { + DRM_DEV_DEBUG_DRIVER(dev, "calc video timing error"); + return; + } + + sum /= 10; + pclk = 13500 * 2048 / sum; + it6505->video_info.clock = pclk; + it6505->video_info.hdisplay = hdew; + it6505->video_info.hsync_start = hdew + hfph; + it6505->video_info.hsync_end = hdew + hfph + hsyncw; + it6505->video_info.htotal = htotal; + it6505->video_info.vdisplay = vdew; + it6505->video_info.vsync_start = vdew + vfph; + it6505->video_info.vsync_end = vdew + vfph + vsyncw; + it6505->video_info.vtotal = vtotal; + + DRM_DEV_DEBUG_DRIVER(dev, DRM_MODE_FMT, + DRM_MODE_ARG(&it6505->video_info)); +} + +static int it6505_drm_dp_link_probe(struct drm_dp_aux *aux, + struct it6505_drm_dp_link *link) +{ + u8 values[3]; + int err; + + memset(link, 0, sizeof(*link)); + + err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values)); + if (err < 0) + return err; + + link->revision = values[0]; + link->rate = drm_dp_bw_code_to_link_rate(values[1]); + link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK; + + if (values[2] & DP_ENHANCED_FRAME_CAP) + link->capabilities = DP_ENHANCED_FRAME_CAP; + + return 0; +} + +static int it6505_drm_dp_link_power_up(struct drm_dp_aux *aux, + struct it6505_drm_dp_link *link) +{ + u8 value; + int err; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (link->revision < DPCD_V_1_1) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D0; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + /* + * According to the DP 1.1 specification, a "Sink Device must exit the + * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink + * Control Field" (register 0x600). + */ + usleep_range(1000, 2000); + + return 0; +} + +static void it6505_clear_int(struct it6505 *it6505) +{ + it6505_write(it6505, INT_STATUS_01, 0xFF); + it6505_write(it6505, INT_STATUS_02, 0xFF); + it6505_write(it6505, INT_STATUS_03, 0xFF); +} + +static void it6505_int_mask_enable(struct it6505 *it6505) +{ + it6505_write(it6505, INT_MASK_01, BIT(INT_HPD_CHANGE) | + BIT(INT_RECEIVE_HPD_IRQ) | BIT(INT_SCDT_CHANGE) | + BIT(INT_HDCP_FAIL) | BIT(INT_HDCP_DONE)); + + it6505_write(it6505, INT_MASK_02, BIT(INT_AUX_CMD_FAIL) | + BIT(INT_HDCP_KSV_CHECK) | BIT(INT_AUDIO_FIFO_ERROR)); + + it6505_write(it6505, INT_MASK_03, BIT(INT_LINK_TRAIN_FAIL) | + BIT(INT_VID_FIFO_ERROR) | BIT(INT_IO_LATCH_FIFO_OVERFLOW)); +} + +static void it6505_int_mask_disable(struct it6505 *it6505) +{ + it6505_write(it6505, INT_MASK_01, 0x00); + it6505_write(it6505, INT_MASK_02, 0x00); + it6505_write(it6505, INT_MASK_03, 0x00); +} + +static void it6505_lane_termination_on(struct it6505 *it6505) +{ + int regcf; + + regcf = it6505_read(it6505, REG_USER_DRV_PRE); + + if (regcf == MISC_VERB) + it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x80, 0x00); + + if (regcf == MISC_VERC) { + if (it6505->lane_swap) { + switch (it6505->lane_count) { + case 1: + case 2: + it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, + 0x0C, 0x08); + break; + default: + it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, + 0x0C, 0x0C); + break; + } + } else { + switch (it6505->lane_count) { + case 1: + case 2: + it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, + 0x0C, 0x04); + break; + default: + it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, + 0x0C, 0x0C); + break; + } + } + } +} + +static void it6505_lane_termination_off(struct it6505 *it6505) +{ + int regcf; + + regcf = it6505_read(it6505, REG_USER_DRV_PRE); + + if (regcf == MISC_VERB) + it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x80, 0x80); + + if (regcf == MISC_VERC) + it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x0C, 0x00); +} + +static void it6505_lane_power_on(struct it6505 *it6505) +{ + it6505_set_bits(it6505, REG_LINK_DRV, 0xF1, + (it6505->lane_swap ? + GENMASK(7, 8 - it6505->lane_count) : + GENMASK(3 + it6505->lane_count, 4)) | + 0x01); +} + +static void it6505_lane_power_off(struct it6505 *it6505) +{ + it6505_set_bits(it6505, REG_LINK_DRV, 0xF0, 0x00); +} + +static void it6505_lane_off(struct it6505 *it6505) +{ + it6505_lane_power_off(it6505); + it6505_lane_termination_off(it6505); +} + +static void it6505_aux_termination_on(struct it6505 *it6505) +{ + int regcf; + + regcf = it6505_read(it6505, REG_USER_DRV_PRE); + + if (regcf == MISC_VERB) + it6505_lane_termination_on(it6505); + + if (regcf == MISC_VERC) + it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x80, 0x80); +} + +static void it6505_aux_power_on(struct it6505 *it6505) +{ + it6505_set_bits(it6505, REG_AUX, 0x02, 0x02); +} + +static void it6505_aux_on(struct it6505 *it6505) +{ + it6505_aux_power_on(it6505); + it6505_aux_termination_on(it6505); +} + +static void it6505_aux_reset(struct it6505 *it6505) +{ + it6505_set_bits(it6505, REG_RESET_CTRL, AUX_RESET, AUX_RESET); + it6505_set_bits(it6505, REG_RESET_CTRL, AUX_RESET, 0x00); +} + +static void it6505_reset_logic(struct it6505 *it6505) +{ + regmap_write(it6505->regmap, REG_RESET_CTRL, ALL_LOGIC_RESET); + usleep_range(1000, 1500); +} + +static bool it6505_aux_op_finished(struct it6505 *it6505) +{ + int reg2b = it6505_read(it6505, REG_AUX_CMD_REQ); + + if (reg2b < 0) + return false; + + return (reg2b & AUX_BUSY) == 0; +} + +static int it6505_aux_wait(struct it6505 *it6505) +{ + int status; + unsigned long timeout; + struct device *dev = &it6505->client->dev; + + timeout = jiffies + msecs_to_jiffies(AUX_WAIT_TIMEOUT_MS) + 1; + + while (!it6505_aux_op_finished(it6505)) { + if (time_after(jiffies, timeout)) { + dev_err(dev, "Timed out waiting AUX to finish"); + return -ETIMEDOUT; + } + usleep_range(1000, 2000); + } + + status = it6505_read(it6505, REG_AUX_ERROR_STS); + if (status < 0) { + dev_err(dev, "Failed to read AUX channel: %d", status); + return status; + } + + return 0; +} + +static ssize_t it6505_aux_operation(struct it6505 *it6505, + enum aux_cmd_type cmd, + unsigned int address, u8 *buffer, + size_t size, enum aux_cmd_reply *reply) +{ + int i, ret; + bool aux_write_check = false; + + if (!it6505_get_sink_hpd_status(it6505)) + return -EIO; + + /* set AUX user mode */ + it6505_set_bits(it6505, REG_AUX_CTRL, AUX_USER_MODE, AUX_USER_MODE); + +aux_op_start: + if (cmd == CMD_AUX_I2C_EDID_READ) { + /* AUX EDID FIFO has max length of AUX_FIFO_MAX_SIZE bytes. */ + size = min_t(size_t, size, AUX_FIFO_MAX_SIZE); + /* Enable AUX FIFO read back and clear FIFO */ + it6505_set_bits(it6505, REG_AUX_CTRL, + AUX_EN_FIFO_READ | CLR_EDID_FIFO, + AUX_EN_FIFO_READ | CLR_EDID_FIFO); + + it6505_set_bits(it6505, REG_AUX_CTRL, + AUX_EN_FIFO_READ | CLR_EDID_FIFO, + AUX_EN_FIFO_READ); + } else { + /* The DP AUX transmit buffer has 4 bytes. */ + size = min_t(size_t, size, 4); + it6505_set_bits(it6505, REG_AUX_CTRL, AUX_NO_SEGMENT_WR, + AUX_NO_SEGMENT_WR); + } + + /* Start Address[7:0] */ + it6505_write(it6505, REG_AUX_ADR_0_7, (address >> 0) & 0xFF); + /* Start Address[15:8] */ + it6505_write(it6505, REG_AUX_ADR_8_15, (address >> 8) & 0xFF); + /* WriteNum[3:0]+StartAdr[19:16] */ + it6505_write(it6505, REG_AUX_ADR_16_19, + ((address >> 16) & 0x0F) | ((size - 1) << 4)); + + if (cmd == CMD_AUX_NATIVE_WRITE) + regmap_bulk_write(it6505->regmap, REG_AUX_OUT_DATA0, buffer, + size); + + /* Aux Fire */ + it6505_write(it6505, REG_AUX_CMD_REQ, cmd); + + ret = it6505_aux_wait(it6505); + if (ret < 0) + goto aux_op_err; + + ret = it6505_read(it6505, REG_AUX_ERROR_STS); + if (ret < 0) + goto aux_op_err; + + switch ((ret >> 6) & 0x3) { + case 0: + *reply = REPLY_ACK; + break; + case 1: + *reply = REPLY_DEFER; + ret = -EAGAIN; + goto aux_op_err; + case 2: + *reply = REPLY_NACK; + ret = -EIO; + goto aux_op_err; + case 3: + ret = -ETIMEDOUT; + goto aux_op_err; + } + + /* Read back Native Write data */ + if (cmd == CMD_AUX_NATIVE_WRITE) { + aux_write_check = true; + cmd = CMD_AUX_NATIVE_READ; + goto aux_op_start; + } + + if (cmd == CMD_AUX_I2C_EDID_READ) { + for (i = 0; i < size; i++) { + ret = it6505_read(it6505, REG_AUX_DATA_FIFO); + if (ret < 0) + goto aux_op_err; + buffer[i] = ret; + } + } else { + for (i = 0; i < size; i++) { + ret = it6505_read(it6505, REG_AUX_DATA_0_7 + i); + if (ret < 0) + goto aux_op_err; + + if (aux_write_check && buffer[size - 1 - i] != ret) { + ret = -EINVAL; + goto aux_op_err; + } + + buffer[size - 1 - i] = ret; + } + } + + ret = i; + +aux_op_err: + if (cmd == CMD_AUX_I2C_EDID_READ) { + /* clear AUX FIFO */ + it6505_set_bits(it6505, REG_AUX_CTRL, + AUX_EN_FIFO_READ | CLR_EDID_FIFO, + AUX_EN_FIFO_READ | CLR_EDID_FIFO); + it6505_set_bits(it6505, REG_AUX_CTRL, + AUX_EN_FIFO_READ | CLR_EDID_FIFO, 0x00); + } + + /* Leave AUX user mode */ + it6505_set_bits(it6505, REG_AUX_CTRL, AUX_USER_MODE, 0); + + return ret; +} + +static ssize_t it6505_aux_do_transfer(struct it6505 *it6505, + enum aux_cmd_type cmd, + unsigned int address, u8 *buffer, + size_t size, enum aux_cmd_reply *reply) +{ + int i, ret_size, ret = 0, request_size; + + mutex_lock(&it6505->aux_lock); + for (i = 0; i < size; i += 4) { + request_size = min((int)size - i, 4); + ret_size = it6505_aux_operation(it6505, cmd, address + i, + buffer + i, request_size, + reply); + if (ret_size < 0) { + ret = ret_size; + goto aux_op_err; + } + + ret += ret_size; + } + +aux_op_err: + mutex_unlock(&it6505->aux_lock); + return ret; +} + +static ssize_t it6505_aux_transfer(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg) +{ + struct it6505 *it6505 = container_of(aux, struct it6505, aux); + u8 cmd; + bool is_i2c = !(msg->request & DP_AUX_NATIVE_WRITE); + int ret; + enum aux_cmd_reply reply; + + /* IT6505 doesn't support arbitrary I2C read / write. */ + if (is_i2c) + return -EINVAL; + + switch (msg->request) { + case DP_AUX_NATIVE_READ: + cmd = CMD_AUX_NATIVE_READ; + break; + case DP_AUX_NATIVE_WRITE: + cmd = CMD_AUX_NATIVE_WRITE; + break; + default: + return -EINVAL; + } + + ret = it6505_aux_do_transfer(it6505, cmd, msg->address, msg->buffer, + msg->size, &reply); + if (ret < 0) + return ret; + + switch (reply) { + case REPLY_ACK: + msg->reply = DP_AUX_NATIVE_REPLY_ACK; + break; + case REPLY_NACK: + msg->reply = DP_AUX_NATIVE_REPLY_NACK; + break; + case REPLY_DEFER: + msg->reply = DP_AUX_NATIVE_REPLY_DEFER; + break; + } + + return ret; +} + +static int it6505_get_edid_block(void *data, u8 *buf, unsigned int block, + size_t len) +{ + struct it6505 *it6505 = data; + struct device *dev = &it6505->client->dev; + enum aux_cmd_reply reply; + int offset, ret, aux_retry = 100; + + it6505_aux_reset(it6505); + DRM_DEV_DEBUG_DRIVER(dev, "block number = %d", block); + + for (offset = 0; offset < EDID_LENGTH;) { + ret = it6505_aux_do_transfer(it6505, CMD_AUX_I2C_EDID_READ, + block * EDID_LENGTH + offset, + buf + offset, 8, &reply); + + if (ret < 0 && ret != -EAGAIN) + return ret; + + switch (reply) { + case REPLY_ACK: + DRM_DEV_DEBUG_DRIVER(dev, "[0x%02x]: %8ph", offset, + buf + offset); + offset += 8; + aux_retry = 100; + break; + case REPLY_NACK: + return -EIO; + case REPLY_DEFER: + msleep(20); + if (!(--aux_retry)) + return -EIO; + } + } + + return 0; +} + +static void it6505_variable_config(struct it6505 *it6505) +{ + it6505->link_rate_bw_code = HBR; + it6505->lane_count = MAX_LANE_COUNT; + it6505->link_state = LINK_IDLE; + it6505->hdcp_desired = HDCP_DESIRED; + it6505->auto_train_retry = AUTO_TRAIN_RETRY; + it6505->audio.select = AUDIO_SELECT; + it6505->audio.sample_rate = AUDIO_SAMPLE_RATE; + it6505->audio.channel_count = AUDIO_CHANNEL_COUNT; + it6505->audio.type = AUDIO_TYPE; + it6505->audio.i2s_input_format = I2S_INPUT_FORMAT; + it6505->audio.i2s_justified = I2S_JUSTIFIED; + it6505->audio.i2s_data_delay = I2S_DATA_DELAY; + it6505->audio.i2s_ws_channel = I2S_WS_CHANNEL; + it6505->audio.i2s_data_sequence = I2S_DATA_SEQUENCE; + it6505->audio.word_length = AUDIO_WORD_LENGTH; + memset(it6505->sha1_input, 0, sizeof(it6505->sha1_input)); + memset(it6505->bksvs, 0, sizeof(it6505->bksvs)); +} + +static int it6505_send_video_infoframe(struct it6505 *it6505, + struct hdmi_avi_infoframe *frame) +{ + u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; + int err; + struct device *dev = &it6505->client->dev; + + err = hdmi_avi_infoframe_pack(frame, buffer, sizeof(buffer)); + if (err < 0) { + dev_err(dev, "Failed to pack AVI infoframe: %d", err); + return err; + } + + err = it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_AVI_PKT, 0x00); + if (err) + return err; + + err = regmap_bulk_write(it6505->regmap, REG_AVI_INFO_DB1, + buffer + HDMI_INFOFRAME_HEADER_SIZE, + frame->length); + if (err) + return err; + + err = it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_AVI_PKT, + EN_AVI_PKT); + if (err) + return err; + + return 0; +} + +static void it6505_get_extcon_property(struct it6505 *it6505) +{ + int err; + union extcon_property_value property; + struct device *dev = &it6505->client->dev; + + if (it6505->extcon && !it6505->lane_swap_disabled) { + err = extcon_get_property(it6505->extcon, EXTCON_DISP_DP, + EXTCON_PROP_USB_TYPEC_POLARITY, + &property); + if (err) { + dev_err(dev, "get property fail!"); + return; + } + it6505->lane_swap = property.intval; + } +} + +static void it6505_clk_phase_adjustment(struct it6505 *it6505, + const struct drm_display_mode *mode) +{ + int clock = mode->clock; + + it6505_set_bits(it6505, REG_CLK_CTRL0, M_PCLK_DELAY, + clock < ADJUST_PHASE_THRESHOLD ? PIXEL_CLK_DELAY : 0); + it6505_set_bits(it6505, REG_DATA_CTRL0, VIDEO_LATCH_EDGE, + PIXEL_CLK_INVERSE << 4); +} + +static void it6505_link_reset_step_train(struct it6505 *it6505) +{ + it6505_set_bits(it6505, REG_TRAIN_CTRL0, + FORCE_CR_DONE | FORCE_EQ_DONE, 0x00); + it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_DISABLE); +} + +static void it6505_init(struct it6505 *it6505) +{ + it6505_write(it6505, REG_AUX_OPT, AUX_AUTO_RST | AUX_FIX_FREQ); + it6505_write(it6505, REG_AUX_CTRL, AUX_NO_SEGMENT_WR); + it6505_write(it6505, REG_HDCP_CTRL2, HDCP_AN_SEL | HDCP_HW_HPDIRQ_ACT); + it6505_write(it6505, REG_VID_BUS_CTRL0, IN_DDR | DDR_CD); + it6505_write(it6505, REG_VID_BUS_CTRL1, 0x01); + it6505_write(it6505, REG_AUDIO_CTRL0, AUDIO_16B_BOUND); + + /* chip internal setting, don't modify */ + it6505_write(it6505, REG_HPD_IRQ_TIME, 0xF5); + it6505_write(it6505, REG_AUX_DEBUG_MODE, 0x4D); + it6505_write(it6505, REG_AUX_OPT2, 0x17); + it6505_write(it6505, REG_HDCP_OPT, 0x60); + it6505_write(it6505, REG_DATA_MUTE_CTRL, + EN_VID_MUTE | EN_AUD_MUTE | ENABLE_AUTO_VIDEO_FIFO_RESET); + it6505_write(it6505, REG_TIME_STMP_CTRL, + EN_SSC_GAT | EN_ENHANCE_VID_STMP | EN_ENHANCE_AUD_STMP); + it6505_write(it6505, REG_INFOFRAME_CTRL, 0x00); + it6505_write(it6505, REG_BANK_SEL, 0x01); + it6505_write(it6505, REG_DRV_0_DB_800_MV, + afe_setting_table[it6505->afe_setting][0]); + it6505_write(it6505, REG_PRE_0_DB_800_MV, + afe_setting_table[it6505->afe_setting][1]); + it6505_write(it6505, REG_PRE_3P5_DB_800_MV, + afe_setting_table[it6505->afe_setting][2]); + it6505_write(it6505, REG_SSC_CTRL0, 0x9E); + it6505_write(it6505, REG_SSC_CTRL1, 0x1C); + it6505_write(it6505, REG_SSC_CTRL2, 0x42); + it6505_write(it6505, REG_BANK_SEL, 0x00); +} + +static void it6505_video_disable(struct it6505 *it6505) +{ + it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_VID_MUTE, EN_VID_MUTE); + it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_VID_CTRL_PKT, 0x00); + it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, VIDEO_RESET); +} + +static void it6505_video_reset(struct it6505 *it6505) +{ + it6505_link_reset_step_train(it6505); + it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_VID_MUTE, EN_VID_MUTE); + it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_VID_CTRL_PKT, 0x00); + it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, VIDEO_RESET); + it6505_set_bits(it6505, REG_501_FIFO_CTRL, RST_501_FIFO, RST_501_FIFO); + it6505_set_bits(it6505, REG_501_FIFO_CTRL, RST_501_FIFO, 0x00); + it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, 0x00); +} + +static void it6505_update_video_parameter(struct it6505 *it6505, + const struct drm_display_mode *mode) +{ + it6505_clk_phase_adjustment(it6505, mode); + it6505_video_disable(it6505); +} + +static bool it6505_audio_input(struct it6505 *it6505) +{ + int reg05, regbe; + + reg05 = it6505_read(it6505, REG_RESET_CTRL); + it6505_set_bits(it6505, REG_RESET_CTRL, AUDIO_RESET, 0x00); + usleep_range(3000, 4000); + regbe = it6505_read(it6505, REG_AUDIO_INPUT_FREQ); + it6505_write(it6505, REG_RESET_CTRL, reg05); + + return regbe != 0xFF; +} + +static void it6505_setup_audio_channel_status(struct it6505 *it6505) +{ + enum it6505_audio_sample_rate sample_rate = it6505->audio.sample_rate; + u8 audio_word_length_map[] = { 0x02, 0x04, 0x03, 0x0B }; + + /* Channel Status */ + it6505_write(it6505, REG_IEC958_STS0, it6505->audio.type << 1); + it6505_write(it6505, REG_IEC958_STS1, 0x00); + it6505_write(it6505, REG_IEC958_STS2, 0x00); + it6505_write(it6505, REG_IEC958_STS3, sample_rate); + it6505_write(it6505, REG_IEC958_STS4, (~sample_rate << 4) | + audio_word_length_map[it6505->audio.word_length]); +} + +static void it6505_setup_audio_format(struct it6505 *it6505) +{ + /* I2S MODE */ + it6505_write(it6505, REG_AUDIO_FMT, + (it6505->audio.word_length << 5) | + (it6505->audio.i2s_data_sequence << 4) | + (it6505->audio.i2s_ws_channel << 3) | + (it6505->audio.i2s_data_delay << 2) | + (it6505->audio.i2s_justified << 1) | + it6505->audio.i2s_input_format); + if (it6505->audio.select == SPDIF) { + it6505_write(it6505, REG_AUDIO_FIFO_SEL, 0x00); + /* 0x30 = 128*FS */ + it6505_set_bits(it6505, REG_AUX_OPT, 0xF0, 0x30); + } else { + it6505_write(it6505, REG_AUDIO_FIFO_SEL, 0xE4); + } + + it6505_write(it6505, REG_AUDIO_CTRL0, 0x20); + it6505_write(it6505, REG_AUDIO_CTRL1, 0x00); +} + +static void it6505_enable_audio_source(struct it6505 *it6505) +{ + unsigned int audio_source_count; + + audio_source_count = BIT(DIV_ROUND_UP(it6505->audio.channel_count, 2)) + - 1; + + audio_source_count |= it6505->audio.select << 4; + + it6505_write(it6505, REG_AUDIO_SRC_CTRL, audio_source_count); +} + +static void it6505_enable_audio_infoframe(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + u8 audio_info_ca[] = { 0x00, 0x00, 0x01, 0x03, 0x07, 0x0B, 0x0F, 0x1F }; + + DRM_DEV_DEBUG_DRIVER(dev, "infoframe channel_allocation:0x%02x", + audio_info_ca[it6505->audio.channel_count - 1]); + + it6505_write(it6505, REG_AUD_INFOFRAM_DB1, it6505->audio.channel_count + - 1); + it6505_write(it6505, REG_AUD_INFOFRAM_DB2, 0x00); + it6505_write(it6505, REG_AUD_INFOFRAM_DB3, + audio_info_ca[it6505->audio.channel_count - 1]); + it6505_write(it6505, REG_AUD_INFOFRAM_DB4, 0x00); + it6505_write(it6505, REG_AUD_INFOFRAM_SUM, 0x00); + + /* Enable Audio InfoFrame */ + it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_AUD_CTRL_PKT, + EN_AUD_CTRL_PKT); +} + +static void it6505_disable_audio(struct it6505 *it6505) +{ + it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_AUD_MUTE, EN_AUD_MUTE); + it6505_set_bits(it6505, REG_AUDIO_SRC_CTRL, M_AUDIO_I2S_EN, 0x00); + it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_AUD_CTRL_PKT, 0x00); + it6505_set_bits(it6505, REG_RESET_CTRL, AUDIO_RESET, AUDIO_RESET); +} + +static void it6505_enable_audio(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + int regbe; + + DRM_DEV_DEBUG_DRIVER(dev, "start"); + it6505_disable_audio(it6505); + + it6505_setup_audio_channel_status(it6505); + it6505_setup_audio_format(it6505); + it6505_enable_audio_source(it6505); + it6505_enable_audio_infoframe(it6505); + + it6505_write(it6505, REG_AUDIO_N_0_7, 0x00); + it6505_write(it6505, REG_AUDIO_N_8_15, 0x80); + it6505_write(it6505, REG_AUDIO_N_16_23, 0x00); + + it6505_set_bits(it6505, REG_AUDIO_SRC_CTRL, AUDIO_FIFO_RESET, + AUDIO_FIFO_RESET); + it6505_set_bits(it6505, REG_AUDIO_SRC_CTRL, AUDIO_FIFO_RESET, 0x00); + it6505_set_bits(it6505, REG_RESET_CTRL, AUDIO_RESET, 0x00); + regbe = it6505_read(it6505, REG_AUDIO_INPUT_FREQ); + DRM_DEV_DEBUG_DRIVER(dev, "regbe:0x%02x audio input fs: %d.%d kHz", + regbe, 6750 / regbe, (6750 % regbe) * 10 / regbe); + it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_AUD_MUTE, 0x00); +} + +static bool it6505_use_step_train_check(struct it6505 *it6505) +{ + if (it6505->link.revision >= 0x12) + return it6505->dpcd[DP_TRAINING_AUX_RD_INTERVAL] >= 0x01; + + return true; +} + +static void it6505_parse_link_capabilities(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + struct it6505_drm_dp_link *link = &it6505->link; + int bcaps; + + if (it6505->dpcd[0] == 0) { + it6505_aux_on(it6505); + it6505_get_dpcd(it6505, DP_DPCD_REV, it6505->dpcd, + ARRAY_SIZE(it6505->dpcd)); + } + + DRM_DEV_DEBUG_DRIVER(dev, "DPCD Rev.: %d.%d", + link->revision >> 4, link->revision & 0x0F); + + DRM_DEV_DEBUG_DRIVER(dev, "Sink max link rate: %d.%02d Gbps per lane", + link->rate / 100000, link->rate / 1000 % 100); + + it6505->link_rate_bw_code = drm_dp_link_rate_to_bw_code(link->rate); + DRM_DEV_DEBUG_DRIVER(dev, "link rate bw code:0x%02x", + it6505->link_rate_bw_code); + it6505->link_rate_bw_code = min_t(int, it6505->link_rate_bw_code, + MAX_LINK_RATE); + + it6505->lane_count = link->num_lanes; + DRM_DEV_DEBUG_DRIVER(dev, "Sink support %d lanes training", + it6505->lane_count); + it6505->lane_count = min_t(int, it6505->lane_count, MAX_LANE_COUNT); + + it6505->branch_device = drm_dp_is_branch(it6505->dpcd); + DRM_DEV_DEBUG_DRIVER(dev, "Sink %sbranch device", + it6505->branch_device ? "" : "Not "); + + it6505->enable_enhanced_frame = link->capabilities; + DRM_DEV_DEBUG_DRIVER(dev, "Sink %sSupport Enhanced Framing", + it6505->enable_enhanced_frame ? "" : "Not "); + + it6505->enable_ssc = (it6505->dpcd[DP_MAX_DOWNSPREAD] & + DP_MAX_DOWNSPREAD_0_5); + DRM_DEV_DEBUG_DRIVER(dev, "Maximum Down-Spread: %s, %ssupport SSC!", + it6505->enable_ssc ? "0.5" : "0", + it6505->enable_ssc ? "" : "Not "); + + it6505->step_train = it6505_use_step_train_check(it6505); + if (it6505->step_train) + DRM_DEV_DEBUG_DRIVER(dev, "auto train fail, will step train"); + + bcaps = it6505_dpcd_read(it6505, DP_AUX_HDCP_BCAPS); + DRM_DEV_DEBUG_DRIVER(dev, "bcaps:0x%02x", bcaps); + if (bcaps & DP_BCAPS_HDCP_CAPABLE) { + it6505->is_repeater = (bcaps & DP_BCAPS_REPEATER_PRESENT); + DRM_DEV_DEBUG_DRIVER(dev, "Support HDCP! Downstream is %s!", + it6505->is_repeater ? "repeater" : + "receiver"); + } else { + DRM_DEV_DEBUG_DRIVER(dev, "Sink not support HDCP!"); + it6505->hdcp_desired = false; + } + DRM_DEV_DEBUG_DRIVER(dev, "HDCP %s", + it6505->hdcp_desired ? "desired" : "undesired"); +} + +static void it6505_setup_ssc(struct it6505 *it6505) +{ + it6505_set_bits(it6505, REG_TRAIN_CTRL0, SPREAD_AMP_5, + it6505->enable_ssc ? SPREAD_AMP_5 : 0x00); + if (it6505->enable_ssc) { + it6505_write(it6505, REG_BANK_SEL, 0x01); + it6505_write(it6505, REG_SSC_CTRL0, 0x9E); + it6505_write(it6505, REG_SSC_CTRL1, 0x1C); + it6505_write(it6505, REG_SSC_CTRL2, 0x42); + it6505_write(it6505, REG_BANK_SEL, 0x00); + it6505_write(it6505, REG_SP_CTRL0, 0x07); + it6505_write(it6505, REG_IP_CTRL1, 0x29); + it6505_write(it6505, REG_IP_CTRL2, 0x03); + /* Stamp Interrupt Step */ + it6505_set_bits(it6505, REG_TIME_STMP_CTRL, M_STAMP_STEP, + 0x10); + it6505_dpcd_write(it6505, DP_DOWNSPREAD_CTRL, + DP_SPREAD_AMP_0_5); + } else { + it6505_dpcd_write(it6505, DP_DOWNSPREAD_CTRL, 0x00); + it6505_set_bits(it6505, REG_TIME_STMP_CTRL, M_STAMP_STEP, + 0x00); + } +} + +static inline void it6505_link_rate_setup(struct it6505 *it6505) +{ + it6505_set_bits(it6505, REG_TRAIN_CTRL0, FORCE_LBR, + (it6505->link_rate_bw_code == RBR) ? FORCE_LBR : 0x00); + it6505_set_bits(it6505, REG_LINK_DRV, DRV_HS, + (it6505->link_rate_bw_code == RBR) ? 0x00 : DRV_HS); +} + +static void it6505_lane_count_setup(struct it6505 *it6505) +{ + it6505_get_extcon_property(it6505); + it6505_set_bits(it6505, REG_TRAIN_CTRL0, LANE_SWAP, + it6505->lane_swap ? LANE_SWAP : 0x00); + it6505_set_bits(it6505, REG_TRAIN_CTRL0, LANE_COUNT_MASK, + (it6505->lane_count - 1) << 1); +} + +static void it6505_link_training_setup(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + if (it6505->enable_enhanced_frame) + it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, + ENABLE_ENHANCED_FRAME, ENABLE_ENHANCED_FRAME); + + it6505_link_rate_setup(it6505); + it6505_lane_count_setup(it6505); + it6505_setup_ssc(it6505); + DRM_DEV_DEBUG_DRIVER(dev, + "%s, %d lanes, %sable ssc, %sable enhanced frame", + it6505->link_rate_bw_code != RBR ? "HBR" : "RBR", + it6505->lane_count, + it6505->enable_ssc ? "en" : "dis", + it6505->enable_enhanced_frame ? "en" : "dis"); +} + +static bool it6505_link_start_auto_train(struct it6505 *it6505) +{ + int timeout = 500, link_training_state; + bool state = false; + + mutex_lock(&it6505->aux_lock); + it6505_set_bits(it6505, REG_TRAIN_CTRL0, + FORCE_CR_DONE | FORCE_EQ_DONE, 0x00); + it6505_write(it6505, REG_TRAIN_CTRL1, FORCE_RETRAIN); + it6505_write(it6505, REG_TRAIN_CTRL1, AUTO_TRAIN); + + while (timeout > 0) { + usleep_range(1000, 2000); + link_training_state = it6505_read(it6505, REG_LINK_TRAIN_STS); + + if (link_training_state > 0 && + (link_training_state & LINK_STATE_NORP)) { + state = true; + goto unlock; + } + + timeout--; + } +unlock: + mutex_unlock(&it6505->aux_lock); + + return state; +} + +static int it6505_drm_dp_link_configure(struct it6505 *it6505) +{ + u8 values[2]; + int err; + struct drm_dp_aux *aux = &it6505->aux; + + values[0] = it6505->link_rate_bw_code; + values[1] = it6505->lane_count; + + if (it6505->enable_enhanced_frame) + values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); + if (err < 0) + return err; + + return 0; +} + +static bool it6505_check_voltage_swing_max(u8 lane_voltage_swing_pre_emphasis) +{ + return ((lane_voltage_swing_pre_emphasis & 0x03) == MAX_CR_LEVEL); +} + +static bool it6505_check_pre_emphasis_max(u8 lane_voltage_swing_pre_emphasis) +{ + return ((lane_voltage_swing_pre_emphasis & 0x03) == MAX_EQ_LEVEL); +} + +static bool it6505_check_max_voltage_swing_reached(u8 *lane_voltage_swing, + u8 lane_count) +{ + u8 i; + + for (i = 0; i < lane_count; i++) { + if (lane_voltage_swing[i] & DP_TRAIN_MAX_SWING_REACHED) + return true; + } + + return false; +} + +static bool +step_train_lane_voltage_para_set(struct it6505 *it6505, + struct it6505_step_train_para + *lane_voltage_pre_emphasis, + u8 *lane_voltage_pre_emphasis_set) +{ + u8 *voltage_swing = lane_voltage_pre_emphasis->voltage_swing; + u8 *pre_emphasis = lane_voltage_pre_emphasis->pre_emphasis; + u8 i; + + for (i = 0; i < it6505->lane_count; i++) { + voltage_swing[i] &= 0x03; + lane_voltage_pre_emphasis_set[i] = voltage_swing[i]; + if (it6505_check_voltage_swing_max(voltage_swing[i])) + lane_voltage_pre_emphasis_set[i] |= + DP_TRAIN_MAX_SWING_REACHED; + + pre_emphasis[i] &= 0x03; + lane_voltage_pre_emphasis_set[i] |= pre_emphasis[i] + << DP_TRAIN_PRE_EMPHASIS_SHIFT; + if (it6505_check_pre_emphasis_max(pre_emphasis[i])) + lane_voltage_pre_emphasis_set[i] |= + DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + it6505_dpcd_write(it6505, DP_TRAINING_LANE0_SET + i, + lane_voltage_pre_emphasis_set[i]); + + if (lane_voltage_pre_emphasis_set[i] != + it6505_dpcd_read(it6505, DP_TRAINING_LANE0_SET + i)) + return false; + } + + return true; +} + +static bool +it6505_step_cr_train(struct it6505 *it6505, + struct it6505_step_train_para *lane_voltage_pre_emphasis) +{ + u8 loop_count = 0, i = 0, j; + u8 link_status[DP_LINK_STATUS_SIZE] = { 0 }; + u8 lane_level_config[MAX_LANE_COUNT] = { 0 }; + int pre_emphasis_adjust = -1, voltage_swing_adjust = -1; + const struct drm_dp_aux *aux = &it6505->aux; + + it6505_dpcd_write(it6505, DP_DOWNSPREAD_CTRL, + it6505->enable_ssc ? DP_SPREAD_AMP_0_5 : 0x00); + it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_1); + + while (loop_count < 5 && i < 10) { + i++; + if (!step_train_lane_voltage_para_set(it6505, + lane_voltage_pre_emphasis, + lane_level_config)) + continue; + drm_dp_link_train_clock_recovery_delay(aux, it6505->dpcd); + drm_dp_dpcd_read_link_status(&it6505->aux, link_status); + + if (drm_dp_clock_recovery_ok(link_status, it6505->lane_count)) { + it6505_set_bits(it6505, REG_TRAIN_CTRL0, FORCE_CR_DONE, + FORCE_CR_DONE); + return true; + } + DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "cr not done"); + + if (it6505_check_max_voltage_swing_reached(lane_level_config, + it6505->lane_count)) + goto cr_train_fail; + + for (j = 0; j < it6505->lane_count; j++) { + lane_voltage_pre_emphasis->voltage_swing[j] = + drm_dp_get_adjust_request_voltage(link_status, + j) >> + DP_TRAIN_VOLTAGE_SWING_SHIFT; + lane_voltage_pre_emphasis->pre_emphasis[j] = + drm_dp_get_adjust_request_pre_emphasis(link_status, + j) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT; + if (voltage_swing_adjust == + lane_voltage_pre_emphasis->voltage_swing[j] && + pre_emphasis_adjust == + lane_voltage_pre_emphasis->pre_emphasis[j]) { + loop_count++; + continue; + } + + voltage_swing_adjust = + lane_voltage_pre_emphasis->voltage_swing[j]; + pre_emphasis_adjust = + lane_voltage_pre_emphasis->pre_emphasis[j]; + loop_count = 0; + + if (voltage_swing_adjust + pre_emphasis_adjust > + MAX_EQ_LEVEL) + lane_voltage_pre_emphasis->voltage_swing[j] = + MAX_EQ_LEVEL - + lane_voltage_pre_emphasis + ->pre_emphasis[j]; + } + } + +cr_train_fail: + it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_DISABLE); + + return false; +} + +static bool +it6505_step_eq_train(struct it6505 *it6505, + struct it6505_step_train_para *lane_voltage_pre_emphasis) +{ + u8 loop_count = 0, i, link_status[DP_LINK_STATUS_SIZE] = { 0 }; + u8 lane_level_config[MAX_LANE_COUNT] = { 0 }; + const struct drm_dp_aux *aux = &it6505->aux; + + it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_2); + + while (loop_count < 6) { + loop_count++; + + if (!step_train_lane_voltage_para_set(it6505, + lane_voltage_pre_emphasis, + lane_level_config)) + continue; + + drm_dp_link_train_channel_eq_delay(aux, it6505->dpcd); + drm_dp_dpcd_read_link_status(&it6505->aux, link_status); + + if (!drm_dp_clock_recovery_ok(link_status, it6505->lane_count)) + goto eq_train_fail; + + if (drm_dp_channel_eq_ok(link_status, it6505->lane_count)) { + it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_DISABLE); + it6505_set_bits(it6505, REG_TRAIN_CTRL0, FORCE_EQ_DONE, + FORCE_EQ_DONE); + return true; + } + DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "eq not done"); + + for (i = 0; i < it6505->lane_count; i++) { + lane_voltage_pre_emphasis->voltage_swing[i] = + drm_dp_get_adjust_request_voltage(link_status, + i) >> + DP_TRAIN_VOLTAGE_SWING_SHIFT; + lane_voltage_pre_emphasis->pre_emphasis[i] = + drm_dp_get_adjust_request_pre_emphasis(link_status, + i) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT; + + if (lane_voltage_pre_emphasis->voltage_swing[i] + + lane_voltage_pre_emphasis->pre_emphasis[i] > + MAX_EQ_LEVEL) + lane_voltage_pre_emphasis->voltage_swing[i] = + 0x03 - lane_voltage_pre_emphasis + ->pre_emphasis[i]; + } + } + +eq_train_fail: + it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_DISABLE); + return false; +} + +static bool it6505_link_start_step_train(struct it6505 *it6505) +{ + int err; + struct it6505_step_train_para lane_voltage_pre_emphasis = { + .voltage_swing = { 0 }, + .pre_emphasis = { 0 }, + }; + + DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "start"); + err = it6505_drm_dp_link_configure(it6505); + + if (err < 0) + return false; + if (!it6505_step_cr_train(it6505, &lane_voltage_pre_emphasis)) + return false; + if (!it6505_step_eq_train(it6505, &lane_voltage_pre_emphasis)) + return false; + return true; +} + +static bool it6505_get_video_status(struct it6505 *it6505) +{ + int reg_0d; + + reg_0d = it6505_read(it6505, REG_SYSTEM_STS); + + if (reg_0d < 0) + return false; + + return reg_0d & VIDEO_STB; +} + +static void it6505_reset_hdcp(struct it6505 *it6505) +{ + it6505->hdcp_status = HDCP_AUTH_IDLE; + /* Disable CP_Desired */ + it6505_set_bits(it6505, REG_HDCP_CTRL1, HDCP_CP_ENABLE, 0x00); + it6505_set_bits(it6505, REG_RESET_CTRL, HDCP_RESET, HDCP_RESET); +} + +static void it6505_start_hdcp(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "start"); + it6505_reset_hdcp(it6505); + queue_delayed_work(system_wq, &it6505->hdcp_work, + msecs_to_jiffies(2400)); +} + +static void it6505_stop_hdcp(struct it6505 *it6505) +{ + it6505_reset_hdcp(it6505); + cancel_delayed_work(&it6505->hdcp_work); +} + +static bool it6505_hdcp_is_ksv_valid(u8 *ksv) +{ + int i, ones = 0; + + /* KSV has 20 1's and 20 0's */ + for (i = 0; i < DRM_HDCP_KSV_LEN; i++) + ones += hweight8(ksv[i]); + if (ones != 20) + return false; + return true; +} + +static void it6505_hdcp_part1_auth(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + u8 hdcp_bcaps; + + it6505_set_bits(it6505, REG_RESET_CTRL, HDCP_RESET, 0x00); + /* Disable CP_Desired */ + it6505_set_bits(it6505, REG_HDCP_CTRL1, HDCP_CP_ENABLE, 0x00); + + usleep_range(1000, 1500); + hdcp_bcaps = it6505_dpcd_read(it6505, DP_AUX_HDCP_BCAPS); + DRM_DEV_DEBUG_DRIVER(dev, "DPCD[0x68028]: 0x%02x", + hdcp_bcaps); + + if (!hdcp_bcaps) + return; + + /* clear the repeater List Chk Done and fail bit */ + it6505_set_bits(it6505, REG_HDCP_TRIGGER, + HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL, + 0x00); + + /* Enable An Generator */ + it6505_set_bits(it6505, REG_HDCP_CTRL2, HDCP_AN_GEN, HDCP_AN_GEN); + /* delay1ms(10);*/ + usleep_range(10000, 15000); + /* Stop An Generator */ + it6505_set_bits(it6505, REG_HDCP_CTRL2, HDCP_AN_GEN, 0x00); + + it6505_set_bits(it6505, REG_HDCP_CTRL1, HDCP_CP_ENABLE, HDCP_CP_ENABLE); + + it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_START, + HDCP_TRIGGER_START); + + it6505->hdcp_status = HDCP_AUTH_GOING; +} + +static int it6505_sha1_digest(struct it6505 *it6505, u8 *sha1_input, + unsigned int size, u8 *output_av) +{ + struct shash_desc *desc; + struct crypto_shash *tfm; + int err; + struct device *dev = &it6505->client->dev; + + tfm = crypto_alloc_shash("sha1", 0, 0); + if (IS_ERR(tfm)) { + dev_err(dev, "crypto_alloc_shash sha1 failed"); + return PTR_ERR(tfm); + } + desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); + if (!desc) { + crypto_free_shash(tfm); + return -ENOMEM; + } + + desc->tfm = tfm; + err = crypto_shash_digest(desc, sha1_input, size, output_av); + if (err) + dev_err(dev, "crypto_shash_digest sha1 failed"); + + crypto_free_shash(tfm); + kfree(desc); + return err; +} + +static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input) +{ + struct device *dev = &it6505->client->dev; + u8 binfo[2]; + int down_stream_count, i, err, msg_count = 0; + + err = it6505_get_dpcd(it6505, DP_AUX_HDCP_BINFO, binfo, + ARRAY_SIZE(binfo)); + + if (err < 0) { + dev_err(dev, "Read binfo value Fail"); + return err; + } + + down_stream_count = binfo[0] & 0x7F; + DRM_DEV_DEBUG_DRIVER(dev, "binfo:0x%*ph", (int)ARRAY_SIZE(binfo), + binfo); + + if ((binfo[0] & BIT(7)) || (binfo[1] & BIT(3))) { + dev_err(dev, "HDCP max cascade device exceed"); + return 0; + } + + if (!down_stream_count || + down_stream_count > MAX_HDCP_DOWN_STREAM_COUNT) { + dev_err(dev, "HDCP down stream count Error %d", + down_stream_count); + return 0; + } + + for (i = 0; i < down_stream_count; i++) { + err = it6505_get_dpcd(it6505, DP_AUX_HDCP_KSV_FIFO + + (i % 3) * DRM_HDCP_KSV_LEN, + sha1_input + msg_count, + DRM_HDCP_KSV_LEN); + + if (err < 0) + return err; + + msg_count += 5; + } + + it6505->hdcp_down_stream_count = down_stream_count; + sha1_input[msg_count++] = binfo[0]; + sha1_input[msg_count++] = binfo[1]; + + it6505_set_bits(it6505, REG_HDCP_CTRL2, HDCP_EN_M0_READ, + HDCP_EN_M0_READ); + + err = regmap_bulk_read(it6505->regmap, REG_M0_0_7, + sha1_input + msg_count, 8); + + it6505_set_bits(it6505, REG_HDCP_CTRL2, HDCP_EN_M0_READ, 0x00); + + if (err < 0) { + dev_err(dev, " Warning, Read M value Fail"); + return err; + } + + msg_count += 8; + + return msg_count; +} + +static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + u8 av[5][4], bv[5][4]; + int i, err; + + i = it6505_setup_sha1_input(it6505, it6505->sha1_input); + if (i <= 0) { + dev_err(dev, "SHA-1 Input length error %d", i); + return false; + } + + it6505_sha1_digest(it6505, it6505->sha1_input, i, (u8 *)av); + + err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv, + sizeof(bv)); + + if (err < 0) { + dev_err(dev, "Read V' value Fail"); + return false; + } + + for (i = 0; i < 5; i++) + if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] || + bv[i][1] != av[i][2] || bv[i][0] != av[i][3]) + return false; + + DRM_DEV_DEBUG_DRIVER(dev, "V' all match!!"); + return true; +} + +static void it6505_hdcp_wait_ksv_list(struct work_struct *work) +{ + struct it6505 *it6505 = container_of(work, struct it6505, + hdcp_wait_ksv_list); + struct device *dev = &it6505->client->dev; + unsigned int timeout = 5000; + u8 bstatus = 0; + bool ksv_list_check; + + timeout /= 20; + while (timeout > 0) { + if (!it6505_get_sink_hpd_status(it6505)) + return; + + bstatus = it6505_dpcd_read(it6505, DP_AUX_HDCP_BSTATUS); + + if (bstatus & DP_BSTATUS_READY) + break; + + msleep(20); + timeout--; + } + + if (timeout == 0) { + DRM_DEV_DEBUG_DRIVER(dev, "timeout and ksv list wait failed"); + goto timeout; + } + + ksv_list_check = it6505_hdcp_part2_ksvlist_check(it6505); + DRM_DEV_DEBUG_DRIVER(dev, "ksv list ready, ksv list check %s", + ksv_list_check ? "pass" : "fail"); + if (ksv_list_check) { + it6505_set_bits(it6505, REG_HDCP_TRIGGER, + HDCP_TRIGGER_KSV_DONE, HDCP_TRIGGER_KSV_DONE); + return; + } +timeout: + it6505_set_bits(it6505, REG_HDCP_TRIGGER, + HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL, + HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL); +} + +static void it6505_hdcp_work(struct work_struct *work) +{ + struct it6505 *it6505 = container_of(work, struct it6505, + hdcp_work.work); + struct device *dev = &it6505->client->dev; + int ret; + u8 link_status[DP_LINK_STATUS_SIZE] = { 0 }; + + DRM_DEV_DEBUG_DRIVER(dev, "start"); + + if (!it6505_get_sink_hpd_status(it6505)) + return; + + ret = drm_dp_dpcd_read_link_status(&it6505->aux, link_status); + DRM_DEV_DEBUG_DRIVER(dev, "ret: %d link_status: %*ph", ret, + (int)sizeof(link_status), link_status); + + if (ret < 0 || !drm_dp_channel_eq_ok(link_status, it6505->lane_count) || + !it6505_get_video_status(it6505)) { + DRM_DEV_DEBUG_DRIVER(dev, "link train not done or no video"); + return; + } + + ret = it6505_get_dpcd(it6505, DP_AUX_HDCP_BKSV, it6505->bksvs, + ARRAY_SIZE(it6505->bksvs)); + if (ret < 0) { + dev_err(dev, "fail to get bksv ret: %d", ret); + it6505_set_bits(it6505, REG_HDCP_TRIGGER, + HDCP_TRIGGER_KSV_FAIL, HDCP_TRIGGER_KSV_FAIL); + } + + DRM_DEV_DEBUG_DRIVER(dev, "bksv = 0x%*ph", + (int)ARRAY_SIZE(it6505->bksvs), it6505->bksvs); + + if (!it6505_hdcp_is_ksv_valid(it6505->bksvs)) { + dev_err(dev, "Display Port bksv not valid"); + it6505_set_bits(it6505, REG_HDCP_TRIGGER, + HDCP_TRIGGER_KSV_FAIL, HDCP_TRIGGER_KSV_FAIL); + } + + it6505_hdcp_part1_auth(it6505); +} + +static void it6505_show_hdcp_info(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + int i; + u8 *sha1 = it6505->sha1_input; + + DRM_DEV_DEBUG_DRIVER(dev, "hdcp_status: %d is_repeater: %d", + it6505->hdcp_status, it6505->is_repeater); + DRM_DEV_DEBUG_DRIVER(dev, "bksv = 0x%*ph", + (int)ARRAY_SIZE(it6505->bksvs), it6505->bksvs); + + if (it6505->is_repeater) { + DRM_DEV_DEBUG_DRIVER(dev, "hdcp_down_stream_count: %d", + it6505->hdcp_down_stream_count); + DRM_DEV_DEBUG_DRIVER(dev, "sha1_input: 0x%*ph", + (int)ARRAY_SIZE(it6505->sha1_input), + it6505->sha1_input); + for (i = 0; i < it6505->hdcp_down_stream_count; i++) { + DRM_DEV_DEBUG_DRIVER(dev, "KSV_%d = 0x%*ph", i, + DRM_HDCP_KSV_LEN, sha1); + sha1 += DRM_HDCP_KSV_LEN; + } + DRM_DEV_DEBUG_DRIVER(dev, "binfo: 0x%2ph M0: 0x%8ph", + sha1, sha1 + 2); + } +} + +static void it6505_stop_link_train(struct it6505 *it6505) +{ + it6505->link_state = LINK_IDLE; + cancel_work_sync(&it6505->link_works); + it6505_write(it6505, REG_TRAIN_CTRL1, FORCE_RETRAIN); +} + +static void it6505_link_train_ok(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + it6505->link_state = LINK_OK; + /* disalbe mute enable avi info frame */ + it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_VID_MUTE, 0x00); + it6505_set_bits(it6505, REG_INFOFRAME_CTRL, + EN_VID_CTRL_PKT, EN_VID_CTRL_PKT); + + if (it6505_audio_input(it6505)) { + DRM_DEV_DEBUG_DRIVER(dev, "Enable audio!"); + it6505_enable_audio(it6505); + } + + if (it6505->hdcp_desired) + it6505_start_hdcp(it6505); +} + +static void it6505_link_step_train_process(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + int ret, i, step_retry = 3; + + DRM_DEV_DEBUG_DRIVER(dev, "Start step train"); + + if (it6505->sink_count == 0) { + DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d, force eq", + it6505->sink_count); + it6505_set_bits(it6505, REG_TRAIN_CTRL0, FORCE_EQ_DONE, + FORCE_EQ_DONE); + return; + } + + if (!it6505->step_train) { + DRM_DEV_DEBUG_DRIVER(dev, "not support step train"); + return; + } + + /* step training start here */ + for (i = 0; i < step_retry; i++) { + it6505_link_reset_step_train(it6505); + ret = it6505_link_start_step_train(it6505); + DRM_DEV_DEBUG_DRIVER(dev, "step train %s, retry:%d times", + ret ? "pass" : "failed", i + 1); + if (ret) { + it6505_link_train_ok(it6505); + return; + } + } + + DRM_DEV_DEBUG_DRIVER(dev, "training fail"); + it6505->link_state = LINK_IDLE; + it6505_video_reset(it6505); +} + +static void it6505_link_training_work(struct work_struct *work) +{ + struct it6505 *it6505 = container_of(work, struct it6505, link_works); + struct device *dev = &it6505->client->dev; + int ret; + + DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count: %d", + it6505->sink_count); + + if (!it6505_get_sink_hpd_status(it6505)) + return; + + it6505_link_training_setup(it6505); + it6505_reset_hdcp(it6505); + it6505_aux_reset(it6505); + + if (it6505->auto_train_retry < 1) { + it6505_link_step_train_process(it6505); + return; + } + + ret = it6505_link_start_auto_train(it6505); + DRM_DEV_DEBUG_DRIVER(dev, "auto train %s, auto_train_retry: %d", + ret ? "pass" : "failed", it6505->auto_train_retry); + it6505->auto_train_retry--; + + if (ret) { + it6505_link_train_ok(it6505); + return; + } + + it6505_dump(it6505); +} + +static void it6505_plugged_status_to_codec(struct it6505 *it6505) +{ + enum drm_connector_status status = it6505->connector_status; + + if (it6505->plugged_cb && it6505->codec_dev) + it6505->plugged_cb(it6505->codec_dev, + status == connector_status_connected); +} + +static int it6505_process_hpd_irq(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + int ret, dpcd_sink_count, dp_irq_vector, bstatus; + u8 link_status[DP_LINK_STATUS_SIZE]; + + if (!it6505_get_sink_hpd_status(it6505)) { + DRM_DEV_DEBUG_DRIVER(dev, "HPD_IRQ HPD low"); + it6505->sink_count = 0; + return 0; + } + + ret = it6505_dpcd_read(it6505, DP_SINK_COUNT); + if (ret < 0) + return ret; + + dpcd_sink_count = DP_GET_SINK_COUNT(ret); + DRM_DEV_DEBUG_DRIVER(dev, "dpcd_sink_count: %d it6505->sink_count:%d", + dpcd_sink_count, it6505->sink_count); + + if (it6505->branch_device && dpcd_sink_count != it6505->sink_count) { + memset(it6505->dpcd, 0, sizeof(it6505->dpcd)); + it6505->sink_count = dpcd_sink_count; + it6505_reset_logic(it6505); + it6505_int_mask_enable(it6505); + it6505_init(it6505); + return 0; + } + + dp_irq_vector = it6505_dpcd_read(it6505, DP_DEVICE_SERVICE_IRQ_VECTOR); + if (dp_irq_vector < 0) + return dp_irq_vector; + + DRM_DEV_DEBUG_DRIVER(dev, "dp_irq_vector = 0x%02x", dp_irq_vector); + + if (dp_irq_vector & DP_CP_IRQ) { + it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_CPIRQ, + HDCP_TRIGGER_CPIRQ); + + bstatus = it6505_dpcd_read(it6505, DP_AUX_HDCP_BSTATUS); + if (bstatus < 0) + return bstatus; + + DRM_DEV_DEBUG_DRIVER(dev, "Bstatus = 0x%02x", bstatus); + } + + ret = drm_dp_dpcd_read_link_status(&it6505->aux, link_status); + if (ret < 0) { + dev_err(dev, "Fail to read link status ret: %d", ret); + return ret; + } + + DRM_DEV_DEBUG_DRIVER(dev, "link status = 0x%*ph", + (int)ARRAY_SIZE(link_status), link_status); + + if (!drm_dp_channel_eq_ok(link_status, it6505->lane_count)) { + it6505->auto_train_retry = AUTO_TRAIN_RETRY; + it6505_video_reset(it6505); + } + + return 0; +} + +static void it6505_irq_hpd(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + it6505->hpd_state = it6505_get_sink_hpd_status(it6505); + DRM_DEV_DEBUG_DRIVER(dev, "hpd change interrupt, change to %s", + it6505->hpd_state ? "high" : "low"); + + if (it6505->bridge.dev) + drm_helper_hpd_irq_event(it6505->bridge.dev); + DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count: %d", + it6505->sink_count); + + if (it6505->hpd_state) { + wait_for_completion_timeout(&it6505->wait_edid_complete, + msecs_to_jiffies(6000)); + it6505_lane_termination_on(it6505); + it6505_lane_power_on(it6505); + + /* + * for some dongle which issue HPD_irq + * when sink count change from 0->1 + * it6505 not able to receive HPD_IRQ + * if HW never go into trainig done + */ + + if (it6505->branch_device && it6505->sink_count == 0) + schedule_work(&it6505->link_works); + + if (!it6505_get_video_status(it6505)) + it6505_video_reset(it6505); + + it6505_calc_video_info(it6505); + } else { + memset(it6505->dpcd, 0, sizeof(it6505->dpcd)); + + if (it6505->hdcp_desired) + it6505_stop_hdcp(it6505); + + it6505_video_disable(it6505); + it6505_disable_audio(it6505); + it6505_stop_link_train(it6505); + it6505_lane_off(it6505); + it6505_link_reset_step_train(it6505); + } +} + +static void it6505_irq_hpd_irq(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "hpd_irq interrupt"); + + if (it6505_process_hpd_irq(it6505) < 0) + DRM_DEV_DEBUG_DRIVER(dev, "process hpd_irq fail!"); +} + +static void it6505_irq_scdt(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + bool data; + + data = it6505_get_video_status(it6505); + DRM_DEV_DEBUG_DRIVER(dev, "video stable change interrupt, %s", + data ? "stable" : "unstable"); + it6505_calc_video_info(it6505); + it6505_link_reset_step_train(it6505); + + if (data) + schedule_work(&it6505->link_works); +} + +static void it6505_irq_hdcp_done(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "hdcp done interrupt"); + it6505->hdcp_status = HDCP_AUTH_DONE; + it6505_show_hdcp_info(it6505); +} + +static void it6505_irq_hdcp_fail(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "hdcp fail interrupt"); + it6505->hdcp_status = HDCP_AUTH_IDLE; + it6505_show_hdcp_info(it6505); + it6505_start_hdcp(it6505); +} + +static void it6505_irq_aux_cmd_fail(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "AUX PC Request Fail Interrupt"); +} + +static void it6505_irq_hdcp_ksv_check(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "HDCP event Interrupt"); + schedule_work(&it6505->hdcp_wait_ksv_list); +} + +static void it6505_irq_audio_fifo_error(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "audio fifo error Interrupt"); + + if (it6505_audio_input(it6505)) + it6505_enable_audio(it6505); +} + +static void it6505_irq_link_train_fail(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "link training fail interrupt"); + schedule_work(&it6505->link_works); +} + +static void it6505_irq_video_fifo_error(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "video fifo overflow interrupt"); + it6505->auto_train_retry = AUTO_TRAIN_RETRY; + flush_work(&it6505->link_works); + it6505_stop_hdcp(it6505); + it6505_video_reset(it6505); +} + +static void it6505_irq_io_latch_fifo_overflow(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "IO latch fifo overflow interrupt"); + it6505->auto_train_retry = AUTO_TRAIN_RETRY; + flush_work(&it6505->link_works); + it6505_stop_hdcp(it6505); + it6505_video_reset(it6505); +} + +static bool it6505_test_bit(unsigned int bit, const unsigned int *addr) +{ + return 1 & (addr[bit / BITS_PER_BYTE] >> (bit % BITS_PER_BYTE)); +} + +static irqreturn_t it6505_int_threaded_handler(int unused, void *data) +{ + struct it6505 *it6505 = data; + struct device *dev = &it6505->client->dev; + static const struct { + int bit; + void (*handler)(struct it6505 *it6505); + } irq_vec[] = { + { BIT_INT_HPD, it6505_irq_hpd }, + { BIT_INT_HPD_IRQ, it6505_irq_hpd_irq }, + { BIT_INT_SCDT, it6505_irq_scdt }, + { BIT_INT_HDCP_FAIL, it6505_irq_hdcp_fail }, + { BIT_INT_HDCP_DONE, it6505_irq_hdcp_done }, + { BIT_INT_AUX_CMD_FAIL, it6505_irq_aux_cmd_fail }, + { BIT_INT_HDCP_KSV_CHECK, it6505_irq_hdcp_ksv_check }, + { BIT_INT_AUDIO_FIFO_ERROR, it6505_irq_audio_fifo_error }, + { BIT_INT_LINK_TRAIN_FAIL, it6505_irq_link_train_fail }, + { BIT_INT_VID_FIFO_ERROR, it6505_irq_video_fifo_error }, + { BIT_INT_IO_FIFO_OVERFLOW, it6505_irq_io_latch_fifo_overflow }, + }; + int int_status[3], i; + + msleep(100); + mutex_lock(&it6505->extcon_lock); + + if (it6505->enable_drv_hold || !it6505->powered) + goto unlock; + + int_status[0] = it6505_read(it6505, INT_STATUS_01); + int_status[1] = it6505_read(it6505, INT_STATUS_02); + int_status[2] = it6505_read(it6505, INT_STATUS_03); + + it6505_write(it6505, INT_STATUS_01, int_status[0]); + it6505_write(it6505, INT_STATUS_02, int_status[1]); + it6505_write(it6505, INT_STATUS_03, int_status[2]); + + DRM_DEV_DEBUG_DRIVER(dev, "reg06 = 0x%02x", int_status[0]); + DRM_DEV_DEBUG_DRIVER(dev, "reg07 = 0x%02x", int_status[1]); + DRM_DEV_DEBUG_DRIVER(dev, "reg08 = 0x%02x", int_status[2]); + it6505_debug_print(it6505, REG_SYSTEM_STS, ""); + + if (it6505_test_bit(irq_vec[0].bit, (unsigned int *)int_status)) + irq_vec[0].handler(it6505); + + if (!it6505->hpd_state) + goto unlock; + + for (i = 1; i < ARRAY_SIZE(irq_vec); i++) { + if (it6505_test_bit(irq_vec[i].bit, (unsigned int *)int_status)) + irq_vec[i].handler(it6505); + } + +unlock: + mutex_unlock(&it6505->extcon_lock); + + return IRQ_HANDLED; +} + +static int it6505_poweron(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + struct it6505_platform_data *pdata = &it6505->pdata; + int err; + + DRM_DEV_DEBUG_DRIVER(dev, "it6505 start powered on"); + + if (it6505->powered) { + DRM_DEV_DEBUG_DRIVER(dev, "it6505 already powered on"); + return 0; + } + + if (pdata->pwr18) { + err = regulator_enable(pdata->pwr18); + if (err) { + DRM_DEV_DEBUG_DRIVER(dev, "Failed to enable VDD18: %d", + err); + return err; + } + } + + if (pdata->ovdd) { + /* time interval between IVDD and OVDD at least be 1ms */ + usleep_range(1000, 2000); + err = regulator_enable(pdata->ovdd); + if (err) { + regulator_disable(pdata->pwr18); + return err; + } + } + /* time interval between OVDD and SYSRSTN at least be 10ms */ + if (pdata->gpiod_reset) { + usleep_range(10000, 20000); + gpiod_set_value_cansleep(pdata->gpiod_reset, 0); + usleep_range(1000, 2000); + gpiod_set_value_cansleep(pdata->gpiod_reset, 1); + usleep_range(10000, 20000); + } + + it6505_reset_logic(it6505); + it6505_int_mask_enable(it6505); + it6505_init(it6505); + it6505_lane_off(it6505); + + it6505->powered = true; + + return 0; +} + +static int it6505_poweroff(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + struct it6505_platform_data *pdata = &it6505->pdata; + int err; + + DRM_DEV_DEBUG_DRIVER(dev, "it6505 start power off"); + + if (!it6505->powered) { + DRM_DEV_DEBUG_DRIVER(dev, "power had been already off"); + return 0; + } + + if (pdata->gpiod_reset) + gpiod_set_value_cansleep(pdata->gpiod_reset, 0); + + if (pdata->pwr18) { + err = regulator_disable(pdata->pwr18); + if (err) + return err; + } + + if (pdata->ovdd) { + err = regulator_disable(pdata->ovdd); + if (err) + return err; + } + + it6505->powered = false; + it6505->sink_count = 0; + + return 0; +} + +static enum drm_connector_status it6505_detect(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + enum drm_connector_status status = connector_status_disconnected; + int dp_sink_count; + + DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d powered:%d", + it6505->sink_count, it6505->powered); + + mutex_lock(&it6505->mode_lock); + + if (!it6505->powered) + goto unlock; + + if (it6505->enable_drv_hold) { + status = it6505_get_sink_hpd_status(it6505) ? + connector_status_connected : + connector_status_disconnected; + goto unlock; + } + + if (it6505_get_sink_hpd_status(it6505)) { + it6505_aux_on(it6505); + it6505_drm_dp_link_probe(&it6505->aux, &it6505->link); + it6505_drm_dp_link_power_up(&it6505->aux, &it6505->link); + it6505->auto_train_retry = AUTO_TRAIN_RETRY; + + if (it6505->dpcd[0] == 0) { + it6505_get_dpcd(it6505, DP_DPCD_REV, it6505->dpcd, + ARRAY_SIZE(it6505->dpcd)); + it6505_variable_config(it6505); + it6505_parse_link_capabilities(it6505); + } + + dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT); + it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count); + DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d branch:%d", + it6505->sink_count, it6505->branch_device); + + if (it6505->branch_device) { + status = (it6505->sink_count != 0) ? + connector_status_connected : + connector_status_disconnected; + } else { + status = connector_status_connected; + } + } else { + it6505->sink_count = 0; + memset(it6505->dpcd, 0, sizeof(it6505->dpcd)); + } + +unlock: + if (it6505->connector_status != status) { + it6505->connector_status = status; + it6505_plugged_status_to_codec(it6505); + } + + mutex_unlock(&it6505->mode_lock); + + return status; +} + +static int it6505_extcon_notifier(struct notifier_block *self, + unsigned long event, void *ptr) +{ + struct it6505 *it6505 = container_of(self, struct it6505, event_nb); + + schedule_work(&it6505->extcon_wq); + return NOTIFY_DONE; +} + +static void it6505_extcon_work(struct work_struct *work) +{ + struct it6505 *it6505 = container_of(work, struct it6505, extcon_wq); + struct device *dev = &it6505->client->dev; + int state = extcon_get_state(it6505->extcon, EXTCON_DISP_DP); + unsigned int pwroffretry = 0; + + if (it6505->enable_drv_hold) + return; + + mutex_lock(&it6505->extcon_lock); + + DRM_DEV_DEBUG_DRIVER(dev, "EXTCON_DISP_DP = 0x%02x", state); + if (state > 0) { + DRM_DEV_DEBUG_DRIVER(dev, "start to power on"); + msleep(100); + it6505_poweron(it6505); + } else { + DRM_DEV_DEBUG_DRIVER(dev, "start to power off"); + while (it6505_poweroff(it6505) && pwroffretry++ < 5) { + DRM_DEV_DEBUG_DRIVER(dev, "power off fail %d times", + pwroffretry); + } + + drm_helper_hpd_irq_event(it6505->bridge.dev); + memset(it6505->dpcd, 0, sizeof(it6505->dpcd)); + DRM_DEV_DEBUG_DRIVER(dev, "power off it6505 success!"); + } + + mutex_unlock(&it6505->extcon_lock); +} + +static int it6505_use_notifier_module(struct it6505 *it6505) +{ + int ret; + struct device *dev = &it6505->client->dev; + + it6505->event_nb.notifier_call = it6505_extcon_notifier; + INIT_WORK(&it6505->extcon_wq, it6505_extcon_work); + ret = devm_extcon_register_notifier(&it6505->client->dev, + it6505->extcon, EXTCON_DISP_DP, + &it6505->event_nb); + if (ret) { + dev_err(dev, "failed to register notifier for DP"); + return ret; + } + + schedule_work(&it6505->extcon_wq); + + return 0; +} + +static void it6505_remove_notifier_module(struct it6505 *it6505) +{ + if (it6505->extcon) { + devm_extcon_unregister_notifier(&it6505->client->dev, + it6505->extcon, EXTCON_DISP_DP, + &it6505->event_nb); + + flush_work(&it6505->extcon_wq); + } +} + +static void __maybe_unused it6505_delayed_audio(struct work_struct *work) +{ + struct it6505 *it6505 = container_of(work, struct it6505, + delayed_audio.work); + + DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "start"); + + if (!it6505->powered) + return; + + if (!it6505->enable_drv_hold) + it6505_enable_audio(it6505); +} + +static int __maybe_unused it6505_audio_setup_hw_params(struct it6505 *it6505, + struct hdmi_codec_params + *params) +{ + struct device *dev = &it6505->client->dev; + int i = 0; + + DRM_DEV_DEBUG_DRIVER(dev, "%s %d Hz, %d bit, %d channels\n", __func__, + params->sample_rate, params->sample_width, + params->cea.channels); + + if (!it6505->bridge.encoder) + return -ENODEV; + + if (params->cea.channels <= 1 || params->cea.channels > 8) { + DRM_DEV_DEBUG_DRIVER(dev, "channel number: %d not support", + it6505->audio.channel_count); + return -EINVAL; + } + + it6505->audio.channel_count = params->cea.channels; + + while (i < ARRAY_SIZE(audio_sample_rate_map) && + params->sample_rate != + audio_sample_rate_map[i].sample_rate_value) { + i++; + } + if (i == ARRAY_SIZE(audio_sample_rate_map)) { + DRM_DEV_DEBUG_DRIVER(dev, "sample rate: %d Hz not support", + params->sample_rate); + return -EINVAL; + } + it6505->audio.sample_rate = audio_sample_rate_map[i].rate; + + switch (params->sample_width) { + case 16: + it6505->audio.word_length = WORD_LENGTH_16BIT; + break; + case 18: + it6505->audio.word_length = WORD_LENGTH_18BIT; + break; + case 20: + it6505->audio.word_length = WORD_LENGTH_20BIT; + break; + case 24: + case 32: + it6505->audio.word_length = WORD_LENGTH_24BIT; + break; + default: + DRM_DEV_DEBUG_DRIVER(dev, "wordlength: %d bit not support", + params->sample_width); + return -EINVAL; + } + + return 0; +} + +static void __maybe_unused it6505_audio_shutdown(struct device *dev, void *data) +{ + struct it6505 *it6505 = dev_get_drvdata(dev); + + if (it6505->powered) + it6505_disable_audio(it6505); +} + +static int __maybe_unused it6505_audio_hook_plugged_cb(struct device *dev, + void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + struct it6505 *it6505 = data; + + it6505->plugged_cb = fn; + it6505->codec_dev = codec_dev; + it6505_plugged_status_to_codec(it6505); + + return 0; +} + +static inline struct it6505 *bridge_to_it6505(struct drm_bridge *bridge) +{ + return container_of(bridge, struct it6505, bridge); +} + +static int it6505_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct it6505 *it6505 = bridge_to_it6505(bridge); + struct device *dev = &it6505->client->dev; + int ret; + + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { + DRM_ERROR("DRM_BRIDGE_ATTACH_NO_CONNECTOR must be supplied"); + return -EINVAL; + } + + if (!bridge->encoder) { + dev_err(dev, "Parent encoder object not found"); + return -ENODEV; + } + + /* Register aux channel */ + it6505->aux.name = "DP-AUX"; + it6505->aux.dev = dev; + it6505->aux.drm_dev = bridge->dev; + it6505->aux.transfer = it6505_aux_transfer; + + ret = drm_dp_aux_register(&it6505->aux); + + if (ret < 0) { + dev_err(dev, "Failed to register aux: %d", ret); + return ret; + } + + if (it6505->extcon) { + ret = it6505_use_notifier_module(it6505); + if (ret < 0) { + dev_err(dev, "use notifier module failed"); + return ret; + } + } + + return 0; +} + +static void it6505_bridge_detach(struct drm_bridge *bridge) +{ + struct it6505 *it6505 = bridge_to_it6505(bridge); + + flush_work(&it6505->link_works); + it6505_remove_notifier_module(it6505); +} + +static enum drm_mode_status +it6505_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct it6505 *it6505 = bridge_to_it6505(bridge); + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + return MODE_NO_INTERLACE; + + if (mode->clock > DPI_PIXEL_CLK_MAX) + return MODE_CLOCK_HIGH; + + it6505->video_info.clock = mode->clock; + + return MODE_OK; +} + +static void it6505_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_state) +{ + struct it6505 *it6505 = bridge_to_it6505(bridge); + struct device *dev = &it6505->client->dev; + struct drm_atomic_state *state = old_state->base.state; + struct hdmi_avi_infoframe frame; + struct drm_crtc_state *crtc_state; + struct drm_connector_state *conn_state; + struct drm_display_mode *mode; + struct drm_connector *connector; + int ret; + + DRM_DEV_DEBUG_DRIVER(dev, "start"); + + connector = drm_atomic_get_new_connector_for_encoder(state, + bridge->encoder); + + if (WARN_ON(!connector)) + return; + + conn_state = drm_atomic_get_new_connector_state(state, connector); + + if (WARN_ON(!conn_state)) + return; + + crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); + + if (WARN_ON(!crtc_state)) + return; + + mode = &crtc_state->adjusted_mode; + + if (WARN_ON(!mode)) + return; + + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, + connector, + mode); + if (ret) + dev_err(dev, "Failed to setup AVI infoframe: %d", ret); + + it6505_update_video_parameter(it6505, mode); + + ret = it6505_send_video_infoframe(it6505, &frame); + + if (ret) + dev_err(dev, "Failed to send AVI infoframe: %d", ret); + + it6505_int_mask_enable(it6505); + it6505_video_reset(it6505); +} + +static void it6505_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_state) +{ + struct it6505 *it6505 = bridge_to_it6505(bridge); + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "start"); + + if (it6505->powered) + it6505_video_disable(it6505); +} + +static enum drm_connector_status +it6505_bridge_detect(struct drm_bridge *bridge) +{ + struct it6505 *it6505 = bridge_to_it6505(bridge); + + return it6505_detect(it6505); +} + +static struct edid *it6505_bridge_get_edid(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct it6505 *it6505 = bridge_to_it6505(bridge); + struct device *dev = &it6505->client->dev; + struct edid *edid; + + edid = drm_do_get_edid(connector, it6505_get_edid_block, it6505); + + if (!edid) { + DRM_DEV_DEBUG_DRIVER(dev, "failed to get edid!"); + return NULL; + } + + return edid; +} + +static const struct drm_bridge_funcs it6505_bridge_funcs = { + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .attach = it6505_bridge_attach, + .detach = it6505_bridge_detach, + .mode_valid = it6505_bridge_mode_valid, + .atomic_enable = it6505_bridge_atomic_enable, + .atomic_disable = it6505_bridge_atomic_disable, + .detect = it6505_bridge_detect, + .get_edid = it6505_bridge_get_edid, +}; + +static __maybe_unused int it6505_bridge_resume(struct device *dev) +{ + struct it6505 *it6505 = dev_get_drvdata(dev); + + return it6505_poweron(it6505); +} + +static __maybe_unused int it6505_bridge_suspend(struct device *dev) +{ + struct it6505 *it6505 = dev_get_drvdata(dev); + + return it6505_poweroff(it6505); +} + +static SIMPLE_DEV_PM_OPS(it6505_bridge_pm_ops, it6505_bridge_suspend, + it6505_bridge_resume); + +static int it6505_init_pdata(struct it6505 *it6505) +{ + struct it6505_platform_data *pdata = &it6505->pdata; + struct device *dev = &it6505->client->dev; + + /* 1.0V digital core power regulator */ + pdata->pwr18 = devm_regulator_get(dev, "pwr18"); + if (IS_ERR(pdata->pwr18)) { + dev_err(dev, "pwr18 regulator not found"); + return PTR_ERR(pdata->pwr18); + } + + pdata->ovdd = devm_regulator_get(dev, "ovdd"); + if (IS_ERR(pdata->ovdd)) { + dev_err(dev, "ovdd regulator not found"); + return PTR_ERR(pdata->ovdd); + } + + pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(pdata->gpiod_reset)) { + dev_err(dev, "gpiod_reset gpio not found"); + return PTR_ERR(pdata->gpiod_reset); + } + + return 0; +} + +static void it6505_parse_dt(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + u32 *afe_setting = &it6505->afe_setting; + + it6505->lane_swap_disabled = + device_property_read_bool(dev, "no-laneswap"); + + if (it6505->lane_swap_disabled) + it6505->lane_swap = false; + + if (device_property_read_u32(dev, "afe-setting", afe_setting) == 0) { + if (*afe_setting >= ARRAY_SIZE(afe_setting_table)) { + dev_err(dev, "afe setting error, use default"); + *afe_setting = 0; + } + } else { + *afe_setting = 0; + } + DRM_DEV_DEBUG_DRIVER(dev, "using afe_setting: %d", *afe_setting); +} + +static ssize_t receive_timing_debugfs_show(struct file *file, char __user *buf, + size_t len, loff_t *ppos) +{ + struct it6505 *it6505 = file->private_data; + struct drm_display_mode *vid = &it6505->video_info; + u8 read_buf[READ_BUFFER_SIZE]; + u8 *str = read_buf, *end = read_buf + PAGE_SIZE; + ssize_t ret, count; + + if (!it6505) + return -ENODEV; + + it6505_calc_video_info(it6505); + str += scnprintf(str, end - str, "---video timing---\n"); + str += scnprintf(str, end - str, "PCLK:%d.%03dMHz\n", + vid->clock / 1000, vid->clock % 1000); + str += scnprintf(str, end - str, "HTotal:%d\n", vid->htotal); + str += scnprintf(str, end - str, "HActive:%d\n", vid->hdisplay); + str += scnprintf(str, end - str, "HFrontPorch:%d\n", + vid->hsync_start - vid->hdisplay); + str += scnprintf(str, end - str, "HSyncWidth:%d\n", + vid->hsync_end - vid->hsync_start); + str += scnprintf(str, end - str, "HBackPorch:%d\n", + vid->htotal - vid->hsync_end); + str += scnprintf(str, end - str, "VTotal:%d\n", vid->vtotal); + str += scnprintf(str, end - str, "VActive:%d\n", vid->vdisplay); + str += scnprintf(str, end - str, "VFrontPorch:%d\n", + vid->vsync_start - vid->vdisplay); + str += scnprintf(str, end - str, "VSyncWidth:%d\n", + vid->vsync_end - vid->vsync_start); + str += scnprintf(str, end - str, "VBackPorch:%d\n", + vid->vtotal - vid->vsync_end); + + count = str - read_buf; + ret = simple_read_from_buffer(buf, len, ppos, read_buf, count); + + return ret; +} + +static int force_power_on_off_debugfs_write(void *data, u64 value) +{ + struct it6505 *it6505 = data; + + if (!it6505) + return -ENODEV; + + if (value) + it6505_poweron(it6505); + else + it6505_poweroff(it6505); + + return 0; +} + +static int enable_drv_hold_debugfs_show(void *data, u64 *buf) +{ + struct it6505 *it6505 = data; + + if (!it6505) + return -ENODEV; + + *buf = it6505->enable_drv_hold; + + return 0; +} + +static int enable_drv_hold_debugfs_write(void *data, u64 drv_hold) +{ + struct it6505 *it6505 = data; + + if (!it6505) + return -ENODEV; + + it6505->enable_drv_hold = drv_hold; + + if (it6505->enable_drv_hold) { + it6505_int_mask_disable(it6505); + } else { + it6505_clear_int(it6505); + it6505_int_mask_enable(it6505); + + if (it6505->powered) { + it6505->connector_status = + it6505_get_sink_hpd_status(it6505) ? + connector_status_connected : + connector_status_disconnected; + } else { + it6505->connector_status = + connector_status_disconnected; + } + } + + return 0; +} + +static const struct file_operations receive_timing_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = receive_timing_debugfs_show, + .llseek = default_llseek, +}; + +DEFINE_DEBUGFS_ATTRIBUTE(fops_force_power, NULL, + force_power_on_off_debugfs_write, "%llu\n"); + +DEFINE_DEBUGFS_ATTRIBUTE(fops_enable_drv_hold, enable_drv_hold_debugfs_show, + enable_drv_hold_debugfs_write, "%llu\n"); + +static const struct debugfs_entries debugfs_entry[] = { + { "receive_timing", &receive_timing_fops }, + { "force_power_on_off", &fops_force_power }, + { "enable_drv_hold", &fops_enable_drv_hold }, + { NULL, NULL }, +}; + +static void debugfs_create_files(struct it6505 *it6505) +{ + int i = 0; + + while (debugfs_entry[i].name && debugfs_entry[i].fops) { + debugfs_create_file(debugfs_entry[i].name, 0644, + it6505->debugfs, it6505, + debugfs_entry[i].fops); + i++; + } +} + +static void debugfs_init(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + it6505->debugfs = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL); + + if (IS_ERR(it6505->debugfs)) { + dev_err(dev, "failed to create debugfs root"); + return; + } + + debugfs_create_files(it6505); +} + +static void it6505_debugfs_remove(struct it6505 *it6505) +{ + debugfs_remove_recursive(it6505->debugfs); +} + +static void it6505_shutdown(struct i2c_client *client) +{ + struct it6505 *it6505 = dev_get_drvdata(&client->dev); + + if (it6505->powered) + it6505_lane_off(it6505); +} + +static int it6505_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct it6505 *it6505; + struct device *dev = &client->dev; + struct extcon_dev *extcon; + int err, intp_irq; + + it6505 = devm_kzalloc(&client->dev, sizeof(*it6505), GFP_KERNEL); + if (!it6505) + return -ENOMEM; + + mutex_init(&it6505->extcon_lock); + mutex_init(&it6505->mode_lock); + mutex_init(&it6505->aux_lock); + + it6505->bridge.of_node = client->dev.of_node; + it6505->connector_status = connector_status_disconnected; + it6505->client = client; + i2c_set_clientdata(client, it6505); + + /* get extcon device from DTS */ + extcon = extcon_get_edev_by_phandle(dev, 0); + if (PTR_ERR(extcon) == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (IS_ERR(extcon)) { + dev_err(dev, "can not get extcon device!"); + return PTR_ERR(extcon); + } + + it6505->extcon = extcon; + + it6505->regmap = devm_regmap_init_i2c(client, &it6505_regmap_config); + if (IS_ERR(it6505->regmap)) { + dev_err(dev, "regmap i2c init failed"); + err = PTR_ERR(it6505->regmap); + return err; + } + + err = it6505_init_pdata(it6505); + if (err) { + dev_err(dev, "Failed to initialize pdata: %d", err); + return err; + } + + it6505_parse_dt(it6505); + + intp_irq = client->irq; + + if (!intp_irq) { + dev_err(dev, "Failed to get INTP IRQ"); + err = -ENODEV; + return err; + } + + err = devm_request_threaded_irq(&client->dev, intp_irq, NULL, + it6505_int_threaded_handler, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + "it6505-intp", it6505); + if (err) { + dev_err(dev, "Failed to request INTP threaded IRQ: %d", err); + return err; + } + + INIT_WORK(&it6505->link_works, it6505_link_training_work); + INIT_WORK(&it6505->hdcp_wait_ksv_list, it6505_hdcp_wait_ksv_list); + INIT_DELAYED_WORK(&it6505->hdcp_work, it6505_hdcp_work); + init_completion(&it6505->wait_edid_complete); + memset(it6505->dpcd, 0, sizeof(it6505->dpcd)); + it6505->powered = false; + it6505->enable_drv_hold = DEFAULT_DRV_HOLD; + + if (DEFAULT_PWR_ON) + it6505_poweron(it6505); + + DRM_DEV_DEBUG_DRIVER(dev, "it6505 device name: %s", dev_name(dev)); + debugfs_init(it6505); + + it6505->bridge.funcs = &it6505_bridge_funcs; + it6505->bridge.type = DRM_MODE_CONNECTOR_DisplayPort; + it6505->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | + DRM_BRIDGE_OP_HPD; + drm_bridge_add(&it6505->bridge); + + return 0; +} + +static int it6505_i2c_remove(struct i2c_client *client) +{ + struct it6505 *it6505 = i2c_get_clientdata(client); + + drm_bridge_remove(&it6505->bridge); + drm_dp_aux_unregister(&it6505->aux); + it6505_debugfs_remove(it6505); + it6505_poweroff(it6505); + + return 0; +} + +static const struct i2c_device_id it6505_id[] = { + { "it6505", 0 }, + { } +}; + +MODULE_DEVICE_TABLE(i2c, it6505_id); + +static const struct of_device_id it6505_of_match[] = { + { .compatible = "ite,it6505" }, + { } +}; + +static struct i2c_driver it6505_i2c_driver = { + .driver = { + .name = "it6505", + .of_match_table = it6505_of_match, + .pm = &it6505_bridge_pm_ops, + }, + .probe = it6505_i2c_probe, + .remove = it6505_i2c_remove, + .shutdown = it6505_shutdown, + .id_table = it6505_id, +}; + +module_i2c_driver(it6505_i2c_driver); + +MODULE_AUTHOR("Allen Chen <allen.chen@ite.com.tw>"); +MODULE_DESCRIPTION("IT6505 DisplayPort Transmitter driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c index feb128a4557d..63df2e8a8abc 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611.c @@ -1164,7 +1164,11 @@ static int lt9611_probe(struct i2c_client *client, lt9611_enable_hpd_interrupts(lt9611); - return lt9611_audio_init(dev, lt9611); + ret = lt9611_audio_init(dev, lt9611); + if (ret) + goto err_remove_bridge; + + return 0; err_remove_bridge: drm_bridge_remove(<9611->bridge); diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index 5abb5ec3de46..963a6794735f 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -860,18 +860,19 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge, memcpy(&dsi->mode, adjusted_mode, sizeof(dsi->mode)); drm_mode_debug_printmodeline(adjusted_mode); - pm_runtime_get_sync(dev); + if (pm_runtime_resume_and_get(dev) < 0) + return; if (clk_prepare_enable(dsi->lcdif_clk) < 0) - return; + goto runtime_put; if (clk_prepare_enable(dsi->core_clk) < 0) - return; + goto runtime_put; /* Step 1 from DSI reset-out instructions */ ret = reset_control_deassert(dsi->rst_pclk); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to deassert PCLK: %d\n", ret); - return; + goto runtime_put; } /* Step 2 from DSI reset-out instructions */ @@ -881,13 +882,18 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge, ret = reset_control_deassert(dsi->rst_esc); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to deassert ESC: %d\n", ret); - return; + goto runtime_put; } ret = reset_control_deassert(dsi->rst_byte); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to deassert BYTE: %d\n", ret); - return; + goto runtime_put; } + + return; + +runtime_put: + pm_runtime_put_sync(dev); } static void diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c index b32295abd9e7..5be057575183 100644 --- a/drivers/gpu/drm/bridge/panel.c +++ b/drivers/gpu/drm/bridge/panel.c @@ -138,6 +138,17 @@ static int panel_bridge_get_modes(struct drm_bridge *bridge, return drm_panel_get_modes(panel_bridge->panel, connector); } +static void panel_bridge_debugfs_init(struct drm_bridge *bridge, + struct dentry *root) +{ + struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge); + struct drm_panel *panel = panel_bridge->panel; + + root = debugfs_create_dir("panel", root); + if (panel->funcs->debugfs_init) + panel->funcs->debugfs_init(panel, root); +} + static const struct drm_bridge_funcs panel_bridge_bridge_funcs = { .attach = panel_bridge_attach, .detach = panel_bridge_detach, @@ -150,6 +161,7 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt, + .debugfs_init = panel_bridge_debugfs_init, }; /** diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index b0d8110dd412..4befc104d220 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -2551,8 +2551,9 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, if (!output_fmts) return NULL; - /* If dw-hdmi is the only bridge, avoid negociating with ourselves */ - if (list_is_singular(&bridge->encoder->bridge_chain)) { + /* If dw-hdmi is the first or only bridge, avoid negociating with ourselves */ + if (list_is_singular(&bridge->encoder->bridge_chain) || + list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain)) { *num_output_fmts = 1; output_fmts[0] = MEDIA_BUS_FMT_FIXED; diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index ba136a188be7..38616aab12ac 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -26,6 +26,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> #include <drm/dp/drm_dp_aux_bus.h> #include <drm/dp/drm_dp_helper.h> #include <drm/drm_mipi_dsi.h> @@ -174,7 +175,7 @@ struct ti_sn65dsi86 { struct regmap *regmap; struct drm_dp_aux aux; struct drm_bridge bridge; - struct drm_connector connector; + struct drm_connector *connector; struct device_node *host_node; struct mipi_dsi_device *dsi; struct clk *refclk; @@ -646,54 +647,6 @@ static struct auxiliary_driver ti_sn_aux_driver = { .id_table = ti_sn_aux_id_table, }; -/* ----------------------------------------------------------------------------- - * DRM Connector Operations - */ - -static struct ti_sn65dsi86 * -connector_to_ti_sn65dsi86(struct drm_connector *connector) -{ - return container_of(connector, struct ti_sn65dsi86, connector); -} - -static int ti_sn_bridge_connector_get_modes(struct drm_connector *connector) -{ - struct ti_sn65dsi86 *pdata = connector_to_ti_sn65dsi86(connector); - - return drm_bridge_get_modes(pdata->next_bridge, connector); -} - -static struct drm_connector_helper_funcs ti_sn_bridge_connector_helper_funcs = { - .get_modes = ti_sn_bridge_connector_get_modes, -}; - -static const struct drm_connector_funcs ti_sn_bridge_connector_funcs = { - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = drm_connector_cleanup, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static int ti_sn_bridge_connector_init(struct ti_sn65dsi86 *pdata) -{ - int ret; - - ret = drm_connector_init(pdata->bridge.dev, &pdata->connector, - &ti_sn_bridge_connector_funcs, - DRM_MODE_CONNECTOR_eDP); - if (ret) { - DRM_ERROR("Failed to initialize connector with drm\n"); - return ret; - } - - drm_connector_helper_add(&pdata->connector, - &ti_sn_bridge_connector_helper_funcs); - drm_connector_attach_encoder(&pdata->connector, pdata->bridge.encoder); - - return 0; -} - /*------------------------------------------------------------------------------ * DRM Bridge */ @@ -757,10 +710,6 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge, return ret; } - ret = ti_sn_bridge_connector_init(pdata); - if (ret < 0) - goto err_conn_init; - /* We never want the next bridge to *also* create a connector: */ flags |= DRM_BRIDGE_ATTACH_NO_CONNECTOR; @@ -768,13 +717,20 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge, ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge, &pdata->bridge, flags); if (ret < 0) - goto err_dsi_host; + goto err_initted_aux; + + pdata->connector = drm_bridge_connector_init(pdata->bridge.dev, + pdata->bridge.encoder); + if (IS_ERR(pdata->connector)) { + ret = PTR_ERR(pdata->connector); + goto err_initted_aux; + } + + drm_connector_attach_encoder(pdata->connector, pdata->bridge.encoder); return 0; -err_dsi_host: - drm_connector_cleanup(&pdata->connector); -err_conn_init: +err_initted_aux: drm_dp_aux_unregister(&pdata->aux); return ret; } @@ -824,7 +780,7 @@ static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata) static unsigned int ti_sn_bridge_get_bpp(struct ti_sn65dsi86 *pdata) { - if (pdata->connector.display_info.bpc <= 6) + if (pdata->connector->display_info.bpc <= 6) return 18; else return 24; diff --git a/drivers/gpu/drm/dp/drm_dp.c b/drivers/gpu/drm/dp/drm_dp.c index a20b0f8f24b8..e159b81800d4 100644 --- a/drivers/gpu/drm/dp/drm_dp.c +++ b/drivers/gpu/drm/dp/drm_dp.c @@ -28,6 +28,7 @@ #include <linux/module.h> #include <linux/sched.h> #include <linux/seq_file.h> +#include <linux/string_helpers.h> #include <drm/dp/drm_dp_helper.h> #include <drm/drm_print.h> @@ -1322,7 +1323,7 @@ void drm_dp_downstream_debug(struct seq_file *m, bool branch_device = drm_dp_is_branch(dpcd); seq_printf(m, "\tDP branch device present: %s\n", - branch_device ? "yes" : "no"); + str_yes_no(branch_device)); if (!branch_device) return; diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c index 791379816837..60923cdfe8e1 100644 --- a/drivers/gpu/drm/drm_bridge_connector.c +++ b/drivers/gpu/drm/drm_bridge_connector.c @@ -216,6 +216,20 @@ static void drm_bridge_connector_destroy(struct drm_connector *connector) kfree(bridge_connector); } +static void drm_bridge_connector_debugfs_init(struct drm_connector *connector, + struct dentry *root) +{ + struct drm_bridge_connector *bridge_connector = + to_drm_bridge_connector(connector); + struct drm_encoder *encoder = bridge_connector->encoder; + struct drm_bridge *bridge; + + list_for_each_entry(bridge, &encoder->bridge_chain, chain_node) { + if (bridge->funcs->debugfs_init) + bridge->funcs->debugfs_init(bridge, root); + } +} + static const struct drm_connector_funcs drm_bridge_connector_funcs = { .reset = drm_atomic_helper_connector_reset, .detect = drm_bridge_connector_detect, @@ -223,6 +237,7 @@ static const struct drm_connector_funcs drm_bridge_connector_funcs = { .destroy = drm_bridge_connector_destroy, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .debugfs_init = drm_bridge_connector_debugfs_init, }; /* ----------------------------------------------------------------------------- diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c index d60878bc9c20..72f52f293249 100644 --- a/drivers/gpu/drm/drm_buddy.c +++ b/drivers/gpu/drm/drm_buddy.c @@ -211,7 +211,7 @@ static int split_block(struct drm_buddy *mm, } static struct drm_buddy_block * -get_buddy(struct drm_buddy_block *block) +__get_buddy(struct drm_buddy_block *block) { struct drm_buddy_block *parent; @@ -225,6 +225,23 @@ get_buddy(struct drm_buddy_block *block) return parent->left; } +/** + * drm_get_buddy - get buddy address + * + * @block: DRM buddy block + * + * Returns the corresponding buddy block for @block, or NULL + * if this is a root block and can't be merged further. + * Requires some kind of locking to protect against + * any concurrent allocate and free operations. + */ +struct drm_buddy_block * +drm_get_buddy(struct drm_buddy_block *block) +{ + return __get_buddy(block); +} +EXPORT_SYMBOL(drm_get_buddy); + static void __drm_buddy_free(struct drm_buddy *mm, struct drm_buddy_block *block) { @@ -233,7 +250,7 @@ static void __drm_buddy_free(struct drm_buddy *mm, while ((parent = block->parent)) { struct drm_buddy_block *buddy; - buddy = get_buddy(block); + buddy = __get_buddy(block); if (!drm_buddy_block_is_free(buddy)) break; @@ -282,34 +299,134 @@ void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects) } EXPORT_SYMBOL(drm_buddy_free_list); -/** - * drm_buddy_alloc_blocks - allocate power-of-two blocks - * - * @mm: DRM buddy manager to allocate from - * @order: size of the allocation - * - * The order value here translates to: - * - * 0 = 2^0 * mm->chunk_size - * 1 = 2^1 * mm->chunk_size - * 2 = 2^2 * mm->chunk_size - * - * Returns: - * allocated ptr to the &drm_buddy_block on success - */ -struct drm_buddy_block * -drm_buddy_alloc_blocks(struct drm_buddy *mm, unsigned int order) +static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2) +{ + return s1 <= e2 && e1 >= s2; +} + +static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2) +{ + return s1 <= s2 && e1 >= e2; +} + +static struct drm_buddy_block * +alloc_range_bias(struct drm_buddy *mm, + u64 start, u64 end, + unsigned int order) +{ + struct drm_buddy_block *block; + struct drm_buddy_block *buddy; + LIST_HEAD(dfs); + int err; + int i; + + end = end - 1; + + for (i = 0; i < mm->n_roots; ++i) + list_add_tail(&mm->roots[i]->tmp_link, &dfs); + + do { + u64 block_start; + u64 block_end; + + block = list_first_entry_or_null(&dfs, + struct drm_buddy_block, + tmp_link); + if (!block) + break; + + list_del(&block->tmp_link); + + if (drm_buddy_block_order(block) < order) + continue; + + block_start = drm_buddy_block_offset(block); + block_end = block_start + drm_buddy_block_size(mm, block) - 1; + + if (!overlaps(start, end, block_start, block_end)) + continue; + + if (drm_buddy_block_is_allocated(block)) + continue; + + if (contains(start, end, block_start, block_end) && + order == drm_buddy_block_order(block)) { + /* + * Find the free block within the range. + */ + if (drm_buddy_block_is_free(block)) + return block; + + continue; + } + + if (!drm_buddy_block_is_split(block)) { + err = split_block(mm, block); + if (unlikely(err)) + goto err_undo; + } + + list_add(&block->right->tmp_link, &dfs); + list_add(&block->left->tmp_link, &dfs); + } while (1); + + return ERR_PTR(-ENOSPC); + +err_undo: + /* + * We really don't want to leave around a bunch of split blocks, since + * bigger is better, so make sure we merge everything back before we + * free the allocated blocks. + */ + buddy = __get_buddy(block); + if (buddy && + (drm_buddy_block_is_free(block) && + drm_buddy_block_is_free(buddy))) + __drm_buddy_free(mm, block); + return ERR_PTR(err); +} + +static struct drm_buddy_block * +get_maxblock(struct list_head *head) +{ + struct drm_buddy_block *max_block = NULL, *node; + + max_block = list_first_entry_or_null(head, + struct drm_buddy_block, + link); + if (!max_block) + return NULL; + + list_for_each_entry(node, head, link) { + if (drm_buddy_block_offset(node) > + drm_buddy_block_offset(max_block)) + max_block = node; + } + + return max_block; +} + +static struct drm_buddy_block * +alloc_from_freelist(struct drm_buddy *mm, + unsigned int order, + unsigned long flags) { struct drm_buddy_block *block = NULL; unsigned int i; int err; for (i = order; i <= mm->max_order; ++i) { - block = list_first_entry_or_null(&mm->free_list[i], - struct drm_buddy_block, - link); - if (block) - break; + if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) { + block = get_maxblock(&mm->free_list[i]); + if (block) + break; + } else { + block = list_first_entry_or_null(&mm->free_list[i], + struct drm_buddy_block, + link); + if (block) + break; + } } if (!block) @@ -320,78 +437,29 @@ drm_buddy_alloc_blocks(struct drm_buddy *mm, unsigned int order) while (i != order) { err = split_block(mm, block); if (unlikely(err)) - goto out_free; + goto err_undo; - /* Go low */ - block = block->left; + block = block->right; i--; } - - mark_allocated(block); - mm->avail -= drm_buddy_block_size(mm, block); - kmemleak_update_trace(block); return block; -out_free: +err_undo: if (i != order) __drm_buddy_free(mm, block); return ERR_PTR(err); } -EXPORT_SYMBOL(drm_buddy_alloc_blocks); - -static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2) -{ - return s1 <= e2 && e1 >= s2; -} -static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2) -{ - return s1 <= s2 && e1 >= e2; -} - -/** - * drm_buddy_alloc_range - allocate range - * - * @mm: DRM buddy manager to allocate from - * @blocks: output list head to add allocated blocks - * @start: start of the allowed range for this block - * @size: size of the allocation - * - * Intended for pre-allocating portions of the address space, for example to - * reserve a block for the initial framebuffer or similar, hence the expectation - * here is that drm_buddy_alloc_blocks() is still the main vehicle for - * allocations, so if that's not the case then the drm_mm range allocator is - * probably a much better fit, and so you should probably go use that instead. - * - * Note that it's safe to chain together multiple alloc_ranges - * with the same blocks list - * - * Returns: - * 0 on success, error code on failure. - */ -int drm_buddy_alloc_range(struct drm_buddy *mm, - struct list_head *blocks, - u64 start, u64 size) +static int __alloc_range(struct drm_buddy *mm, + struct list_head *dfs, + u64 start, u64 size, + struct list_head *blocks) { struct drm_buddy_block *block; struct drm_buddy_block *buddy; LIST_HEAD(allocated); - LIST_HEAD(dfs); u64 end; int err; - int i; - - if (size < mm->chunk_size) - return -EINVAL; - - if (!IS_ALIGNED(size | start, mm->chunk_size)) - return -EINVAL; - - if (range_overflows(start, size, mm->size)) - return -EINVAL; - - for (i = 0; i < mm->n_roots; ++i) - list_add_tail(&mm->roots[i]->tmp_link, &dfs); end = start + size - 1; @@ -399,7 +467,7 @@ int drm_buddy_alloc_range(struct drm_buddy *mm, u64 block_start; u64 block_end; - block = list_first_entry_or_null(&dfs, + block = list_first_entry_or_null(dfs, struct drm_buddy_block, tmp_link); if (!block) @@ -436,8 +504,8 @@ int drm_buddy_alloc_range(struct drm_buddy *mm, goto err_undo; } - list_add(&block->right->tmp_link, &dfs); - list_add(&block->left->tmp_link, &dfs); + list_add(&block->right->tmp_link, dfs); + list_add(&block->left->tmp_link, dfs); } while (1); list_splice_tail(&allocated, blocks); @@ -449,7 +517,7 @@ err_undo: * bigger is better, so make sure we merge everything back before we * free the allocated blocks. */ - buddy = get_buddy(block); + buddy = __get_buddy(block); if (buddy && (drm_buddy_block_is_free(block) && drm_buddy_block_is_free(buddy))) @@ -459,7 +527,189 @@ err_free: drm_buddy_free_list(mm, &allocated); return err; } -EXPORT_SYMBOL(drm_buddy_alloc_range); + +static int __drm_buddy_alloc_range(struct drm_buddy *mm, + u64 start, + u64 size, + struct list_head *blocks) +{ + LIST_HEAD(dfs); + int i; + + for (i = 0; i < mm->n_roots; ++i) + list_add_tail(&mm->roots[i]->tmp_link, &dfs); + + return __alloc_range(mm, &dfs, start, size, blocks); +} + +/** + * drm_buddy_block_trim - free unused pages + * + * @mm: DRM buddy manager + * @new_size: original size requested + * @blocks: Input and output list of allocated blocks. + * MUST contain single block as input to be trimmed. + * On success will contain the newly allocated blocks + * making up the @new_size. Blocks always appear in + * ascending order + * + * For contiguous allocation, we round up the size to the nearest + * power of two value, drivers consume *actual* size, so remaining + * portions are unused and can be optionally freed with this function + * + * Returns: + * 0 on success, error code on failure. + */ +int drm_buddy_block_trim(struct drm_buddy *mm, + u64 new_size, + struct list_head *blocks) +{ + struct drm_buddy_block *parent; + struct drm_buddy_block *block; + LIST_HEAD(dfs); + u64 new_start; + int err; + + if (!list_is_singular(blocks)) + return -EINVAL; + + block = list_first_entry(blocks, + struct drm_buddy_block, + link); + + if (WARN_ON(!drm_buddy_block_is_allocated(block))) + return -EINVAL; + + if (new_size > drm_buddy_block_size(mm, block)) + return -EINVAL; + + if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size)) + return -EINVAL; + + if (new_size == drm_buddy_block_size(mm, block)) + return 0; + + list_del(&block->link); + mark_free(mm, block); + mm->avail += drm_buddy_block_size(mm, block); + + /* Prevent recursively freeing this node */ + parent = block->parent; + block->parent = NULL; + + new_start = drm_buddy_block_offset(block); + list_add(&block->tmp_link, &dfs); + err = __alloc_range(mm, &dfs, new_start, new_size, blocks); + if (err) { + mark_allocated(block); + mm->avail -= drm_buddy_block_size(mm, block); + list_add(&block->link, blocks); + } + + block->parent = parent; + return err; +} +EXPORT_SYMBOL(drm_buddy_block_trim); + +/** + * drm_buddy_alloc_blocks - allocate power-of-two blocks + * + * @mm: DRM buddy manager to allocate from + * @start: start of the allowed range for this block + * @end: end of the allowed range for this block + * @size: size of the allocation + * @min_page_size: alignment of the allocation + * @blocks: output list head to add allocated blocks + * @flags: DRM_BUDDY_*_ALLOCATION flags + * + * alloc_range_bias() called on range limitations, which traverses + * the tree and returns the desired block. + * + * alloc_from_freelist() called when *no* range restrictions + * are enforced, which picks the block from the freelist. + * + * Returns: + * 0 on success, error code on failure. + */ +int drm_buddy_alloc_blocks(struct drm_buddy *mm, + u64 start, u64 end, u64 size, + u64 min_page_size, + struct list_head *blocks, + unsigned long flags) +{ + struct drm_buddy_block *block = NULL; + unsigned int min_order, order; + unsigned long pages; + LIST_HEAD(allocated); + int err; + + if (size < mm->chunk_size) + return -EINVAL; + + if (min_page_size < mm->chunk_size) + return -EINVAL; + + if (!is_power_of_2(min_page_size)) + return -EINVAL; + + if (!IS_ALIGNED(start | end | size, mm->chunk_size)) + return -EINVAL; + + if (end > mm->size) + return -EINVAL; + + if (range_overflows(start, size, mm->size)) + return -EINVAL; + + /* Actual range allocation */ + if (start + size == end) + return __drm_buddy_alloc_range(mm, start, size, blocks); + + pages = size >> ilog2(mm->chunk_size); + order = fls(pages) - 1; + min_order = ilog2(min_page_size) - ilog2(mm->chunk_size); + + do { + order = min(order, (unsigned int)fls(pages) - 1); + BUG_ON(order > mm->max_order); + BUG_ON(order < min_order); + + do { + if (flags & DRM_BUDDY_RANGE_ALLOCATION) + /* Allocate traversing within the range */ + block = alloc_range_bias(mm, start, end, order); + else + /* Allocate from freelist */ + block = alloc_from_freelist(mm, order, flags); + + if (!IS_ERR(block)) + break; + + if (order-- == min_order) { + err = -ENOSPC; + goto err_free; + } + } while (1); + + mark_allocated(block); + mm->avail -= drm_buddy_block_size(mm, block); + kmemleak_update_trace(block); + list_add_tail(&block->link, &allocated); + + pages -= BIT(order); + + if (!pages) + break; + } while (1); + + list_splice_tail(&allocated, blocks); + return 0; + +err_free: + drm_buddy_free_list(mm, &allocated); + return err; +} +EXPORT_SYMBOL(drm_buddy_alloc_blocks); /** * drm_buddy_block_print - print block information diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 4b0da6baff78..c3e6e615bf09 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -112,8 +112,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages) kunmap_atomic(page_virtual); } #else - pr_err("Architecture has no drm_cache.c support\n"); - WARN_ON_ONCE(1); + WARN_ONCE(1, "Architecture has no drm_cache.c support\n"); #endif } EXPORT_SYMBOL(drm_clflush_pages); @@ -143,8 +142,7 @@ drm_clflush_sg(struct sg_table *st) if (wbinvd_on_all_cpus()) pr_err("Timed out waiting for cache flush\n"); #else - pr_err("Architecture has no drm_cache.c support\n"); - WARN_ON_ONCE(1); + WARN_ONCE(1, "Architecture has no drm_cache.c support\n"); #endif } EXPORT_SYMBOL(drm_clflush_sg); @@ -177,8 +175,7 @@ drm_clflush_virt_range(void *addr, unsigned long length) if (wbinvd_on_all_cpus()) pr_err("Timed out waiting for cache flush\n"); #else - pr_err("Architecture has no drm_cache.c support\n"); - WARN_ON_ONCE(1); + WARN_ONCE(1, "Architecture has no drm_cache.c support\n"); #endif } EXPORT_SYMBOL(drm_clflush_virt_range); diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index ced09c7c06f9..e6346a67cd98 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> +#include <linux/string_helpers.h> #include <drm/drm_atomic.h> #include <drm/drm_client.h> @@ -241,7 +242,7 @@ static void drm_client_connectors_enabled(struct drm_connector **connectors, connector = connectors[i]; enabled[i] = drm_connector_enabled(connector, true); DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id, - connector->display_info.non_desktop ? "non desktop" : enabled[i] ? "yes" : "no"); + connector->display_info.non_desktop ? "non desktop" : str_yes_no(enabled[i])); any_enabled |= enabled[i]; } diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index b0a826489488..7f1b82dbaebb 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -436,6 +436,9 @@ void drm_debugfs_connector_add(struct drm_connector *connector) /* vrr range */ debugfs_create_file("vrr_range", S_IRUGO, root, connector, &vrr_range_fops); + + if (connector->funcs->debugfs_init) + connector->funcs->debugfs_init(connector, root); } void drm_debugfs_connector_remove(struct drm_connector *connector) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index a504542238ed..a7663f9a11d2 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -5340,6 +5340,9 @@ drm_reset_display_info(struct drm_connector *connector) info->rgb_quant_range_selectable = false; memset(&info->hdmi, 0, sizeof(info->hdmi)); + info->edid_hdmi_rgb444_dc_modes = 0; + info->edid_hdmi_ycbcr444_dc_modes = 0; + info->non_desktop = 0; memset(&info->monitor_range, 0, sizeof(info->monitor_range)); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 6f72627369f8..d265a73313c9 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -680,6 +680,31 @@ static void drm_fb_helper_damage(struct fb_info *info, u32 x, u32 y, schedule_work(&helper->damage_work); } +/* Convert memory region into area of scanlines and pixels per scanline */ +static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off, size_t len, + struct drm_rect *clip) +{ + off_t end = off + len; + u32 x1 = 0; + u32 y1 = off / info->fix.line_length; + u32 x2 = info->var.xres; + u32 y2 = DIV_ROUND_UP(end, info->fix.line_length); + + if ((y2 - y1) == 1) { + /* + * We've only written to a single scanline. Try to reduce + * the number of horizontal pixels that need an update. + */ + off_t bit_off = (off % info->fix.line_length) * 8; + off_t bit_end = (end % info->fix.line_length) * 8; + + x1 = bit_off / info->var.bits_per_pixel; + x2 = DIV_ROUND_UP(bit_end, info->var.bits_per_pixel); + } + + drm_rect_init(clip, x1, y1, x2 - x1, y2 - y1); +} + /** * drm_fb_helper_deferred_io() - fbdev deferred_io callback function * @info: fb_info struct pointer @@ -693,23 +718,23 @@ void drm_fb_helper_deferred_io(struct fb_info *info, { unsigned long start, end, min, max; struct page *page; - u32 y1, y2; + struct drm_rect damage_area; min = ULONG_MAX; max = 0; list_for_each_entry(page, pagelist, lru) { start = page->index << PAGE_SHIFT; - end = start + PAGE_SIZE - 1; + end = start + PAGE_SIZE; min = min(min, start); max = max(max, end); } + if (min >= max) + return; - if (min < max) { - y1 = min / info->fix.line_length; - y2 = min_t(u32, DIV_ROUND_UP(max, info->fix.line_length), - info->var.yres); - drm_fb_helper_damage(info, 0, y1, info->var.xres, y2 - y1); - } + drm_fb_helper_memory_range_to_clip(info, min, max - min, &damage_area); + drm_fb_helper_damage(info, damage_area.x1, damage_area.y1, + drm_rect_width(&damage_area), + drm_rect_height(&damage_area)); } EXPORT_SYMBOL(drm_fb_helper_deferred_io); @@ -741,11 +766,18 @@ EXPORT_SYMBOL(drm_fb_helper_sys_read); ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, size_t count, loff_t *ppos) { + loff_t pos = *ppos; ssize_t ret; + struct drm_rect damage_area; ret = fb_sys_write(info, buf, count, ppos); - if (ret > 0) - drm_fb_helper_damage(info, 0, 0, info->var.xres, info->var.yres); + if (ret <= 0) + return ret; + + drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area); + drm_fb_helper_damage(info, damage_area.x1, damage_area.y1, + drm_rect_width(&damage_area), + drm_rect_height(&damage_area)); return ret; } @@ -2224,6 +2256,7 @@ static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf, loff_t pos = *ppos; size_t total_size; ssize_t ret; + struct drm_rect damage_area; int err = 0; if (info->screen_size) @@ -2252,13 +2285,19 @@ static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf, else ret = fb_write_screen_buffer(info, buf, count, pos); - if (ret > 0) - *ppos += ret; + if (ret < 0) + return ret; /* return last error, if any */ + else if (!ret) + return err; /* return previous error, if any */ - if (ret > 0) - drm_fb_helper_damage(info, 0, 0, info->var.xres_virtual, info->var.yres_virtual); + *ppos += ret; - return ret ? ret : err; + drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area); + drm_fb_helper_damage(info, damage_area.x1, damage_area.y1, + drm_rect_width(&damage_area), + drm_rect_height(&damage_area)); + + return ret; } static void drm_fbdev_fb_fillrect(struct fb_info *info, @@ -2346,6 +2385,7 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, fbi->fbops = &drm_fbdev_fb_ops; fbi->screen_size = sizes->surface_height * fb->pitches[0]; fbi->fix.smem_len = fbi->screen_size; + fbi->flags = FBINFO_DEFAULT; drm_fb_helper_fill_info(fbi, fb_helper, sizes); @@ -2353,19 +2393,21 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, fbi->screen_buffer = vzalloc(fbi->screen_size); if (!fbi->screen_buffer) return -ENOMEM; + fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; fbi->fbdefio = &drm_fbdev_defio; - fb_deferred_io_init(fbi); } else { /* buffer is mapped for HW framebuffer */ ret = drm_client_buffer_vmap(fb_helper->buffer, &map); if (ret) return ret; - if (map.is_iomem) + if (map.is_iomem) { fbi->screen_base = map.vaddr_iomem; - else + } else { fbi->screen_buffer = map.vaddr; + fbi->flags |= FBINFO_VIRTFB; + } /* * Shamelessly leak the physical address to user-space. As diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c index 0f28dd2bdd72..bc0f49773868 100644 --- a/drivers/gpu/drm/drm_format_helper.c +++ b/drivers/gpu/drm/drm_format_helper.c @@ -12,9 +12,11 @@ #include <linux/slab.h> #include <linux/io.h> +#include <drm/drm_device.h> #include <drm/drm_format_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_fourcc.h> +#include <drm/drm_print.h> #include <drm/drm_rect.h> static unsigned int clip_offset(const struct drm_rect *clip, unsigned int pitch, unsigned int cpp) @@ -464,6 +466,21 @@ void drm_fb_xrgb8888_to_xrgb2101010_toio(void __iomem *dst, } EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010_toio); +static void drm_fb_xrgb8888_to_gray8_line(u8 *dst, const u32 *src, unsigned int pixels) +{ + unsigned int x; + + for (x = 0; x < pixels; x++) { + u8 r = (*src & 0x00ff0000) >> 16; + u8 g = (*src & 0x0000ff00) >> 8; + u8 b = *src & 0x000000ff; + + /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */ + *dst++ = (3 * r + 6 * g + b) / 10; + src++; + } +} + /** * drm_fb_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale * @dst: 8-bit grayscale destination buffer @@ -484,8 +501,9 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010_toio); void drm_fb_xrgb8888_to_gray8(void *dst, unsigned int dst_pitch, const void *vaddr, const struct drm_framebuffer *fb, const struct drm_rect *clip) { - unsigned int len = (clip->x2 - clip->x1) * sizeof(u32); - unsigned int x, y; + unsigned int linepixels = clip->x2 - clip->x1; + unsigned int len = linepixels * sizeof(u32); + unsigned int y; void *buf; u8 *dst8; u32 *src32; @@ -508,16 +526,7 @@ void drm_fb_xrgb8888_to_gray8(void *dst, unsigned int dst_pitch, const void *vad for (y = clip->y1; y < clip->y2; y++) { dst8 = dst; src32 = memcpy(buf, vaddr, len); - for (x = clip->x1; x < clip->x2; x++) { - u8 r = (*src32 & 0x00ff0000) >> 16; - u8 g = (*src32 & 0x0000ff00) >> 8; - u8 b = *src32 & 0x000000ff; - - /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */ - *dst8++ = (3 * r + 6 * g + b) / 10; - src32++; - } - + drm_fb_xrgb8888_to_gray8_line(dst8, src32, linepixels); vaddr += fb->pitches[0]; dst += dst_pitch; } @@ -584,3 +593,111 @@ int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_for return -EINVAL; } EXPORT_SYMBOL(drm_fb_blit_toio); + +static void drm_fb_gray8_to_mono_reversed_line(u8 *dst, const u8 *src, unsigned int pixels, + unsigned int start_offset, unsigned int end_len) +{ + unsigned int xb, i; + + for (xb = 0; xb < pixels; xb++) { + unsigned int start = 0, end = 8; + u8 byte = 0x00; + + if (xb == 0 && start_offset) + start = start_offset; + + if (xb == pixels - 1 && end_len) + end = end_len; + + for (i = start; i < end; i++) { + unsigned int x = xb * 8 + i; + + byte >>= 1; + if (src[x] >> 7) + byte |= BIT(7); + } + *dst++ = byte; + } +} + +/** + * drm_fb_xrgb8888_to_mono_reversed - Convert XRGB8888 to reversed monochrome + * @dst: reversed monochrome destination buffer + * @dst_pitch: Number of bytes between two consecutive scanlines within dst + * @src: XRGB8888 source buffer + * @fb: DRM framebuffer + * @clip: Clip rectangle area to copy + * + * DRM doesn't have native monochrome support. + * Such drivers can announce the commonly supported XR24 format to userspace + * and use this function to convert to the native format. + * + * This function uses drm_fb_xrgb8888_to_gray8() to convert to grayscale and + * then the result is converted from grayscale to reversed monohrome. + */ +void drm_fb_xrgb8888_to_mono_reversed(void *dst, unsigned int dst_pitch, const void *vaddr, + const struct drm_framebuffer *fb, const struct drm_rect *clip) +{ + unsigned int linepixels = drm_rect_width(clip); + unsigned int lines = clip->y2 - clip->y1; + unsigned int cpp = fb->format->cpp[0]; + unsigned int len_src32 = linepixels * cpp; + struct drm_device *dev = fb->dev; + unsigned int start_offset, end_len; + unsigned int y; + u8 *mono = dst, *gray8; + u32 *src32; + + if (drm_WARN_ON(dev, fb->format->format != DRM_FORMAT_XRGB8888)) + return; + + /* + * The reversed mono destination buffer contains 1 bit per pixel + * and destination scanlines have to be in multiple of 8 pixels. + */ + if (!dst_pitch) + dst_pitch = DIV_ROUND_UP(linepixels, 8); + + drm_WARN_ONCE(dev, dst_pitch % 8 != 0, "dst_pitch is not a multiple of 8\n"); + + /* + * The cma memory is write-combined so reads are uncached. + * Speed up by fetching one line at a time. + * + * Also, format conversion from XR24 to reversed monochrome + * are done line-by-line but are converted to 8-bit grayscale + * as an intermediate step. + * + * Allocate a buffer to be used for both copying from the cma + * memory and to store the intermediate grayscale line pixels. + */ + src32 = kmalloc(len_src32 + linepixels, GFP_KERNEL); + if (!src32) + return; + + gray8 = (u8 *)src32 + len_src32; + + /* + * For damage handling, it is possible that only parts of the source + * buffer is copied and this could lead to start and end pixels that + * are not aligned to multiple of 8. + * + * Calculate if the start and end pixels are not aligned and set the + * offsets for the reversed mono line conversion function to adjust. + */ + start_offset = clip->x1 % 8; + end_len = clip->x2 % 8; + + vaddr += clip_offset(clip, fb->pitches[0], cpp); + for (y = 0; y < lines; y++) { + src32 = memcpy(src32, vaddr, len_src32); + drm_fb_xrgb8888_to_gray8_line(gray8, src32, linepixels); + drm_fb_gray8_to_mono_reversed_line(mono, gray8, dst_pitch, + start_offset, end_len); + vaddr += fb->pitches[0]; + mono += dst_pitch; + } + + kfree(src32); +} +EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono_reversed); diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 07f5abc875e9..4562a8b86579 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -309,7 +309,7 @@ drm_internal_framebuffer_create(struct drm_device *dev, } if (r->flags & DRM_MODE_FB_MODIFIERS && - !dev->mode_config.allow_fb_modifiers) { + dev->mode_config.fb_modifiers_not_supported) { DRM_DEBUG_KMS("driver does not support fb modifiers\n"); return ERR_PTR(-EINVAL); } @@ -594,7 +594,7 @@ int drm_mode_getfb2_ioctl(struct drm_device *dev, r->pixel_format = fb->format->format; r->flags = 0; - if (dev->mode_config.allow_fb_modifiers) + if (!dev->mode_config.fb_modifiers_not_supported) r->flags |= DRM_MODE_FB_MODIFIERS; for (i = 0; i < ARRAY_SIZE(r->handles); i++) { @@ -607,7 +607,7 @@ int drm_mode_getfb2_ioctl(struct drm_device *dev, for (i = 0; i < fb->format->num_planes; i++) { r->pitches[i] = fb->pitches[i]; r->offsets[i] = fb->offsets[i]; - if (dev->mode_config.allow_fb_modifiers) + if (!dev->mode_config.fb_modifiers_not_supported) r->modifier[i] = fb->modifier; } diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 8c7b24f4b0e4..56fb87885146 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -25,20 +25,21 @@ * */ -#include <linux/types.h> -#include <linux/slab.h> -#include <linux/mm.h> -#include <linux/uaccess.h> -#include <linux/fs.h> -#include <linux/file.h> -#include <linux/module.h> -#include <linux/mman.h> -#include <linux/pagemap.h> -#include <linux/shmem_fs.h> #include <linux/dma-buf.h> +#include <linux/file.h> +#include <linux/fs.h> #include <linux/iosys-map.h> #include <linux/mem_encrypt.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/module.h> +#include <linux/pagemap.h> #include <linux/pagevec.h> +#include <linux/shmem_fs.h> +#include <linux/slab.h> +#include <linux/string_helpers.h> +#include <linux/types.h> +#include <linux/uaccess.h> #include <drm/drm.h> #include <drm/drm_device.h> @@ -1145,7 +1146,7 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent, drm_vma_node_start(&obj->vma_node)); drm_printf_indent(p, indent, "size=%zu\n", obj->size); drm_printf_indent(p, indent, "imported=%s\n", - obj->import_attach ? "yes" : "no"); + str_yes_no(obj->import_attach)); if (obj->funcs->print_info) obj->funcs->print_info(p, indent, obj); diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 3e738aea2664..8ad0e02991ca 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -46,6 +46,7 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { .vmap = drm_gem_shmem_object_vmap, .vunmap = drm_gem_shmem_object_vunmap, .mmap = drm_gem_shmem_object_mmap, + .vm_ops = &drm_gem_shmem_vm_ops, }; static struct drm_gem_shmem_object * @@ -588,11 +589,12 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) drm_gem_vm_close(vma); } -static const struct vm_operations_struct drm_gem_shmem_vm_ops = { +const struct vm_operations_struct drm_gem_shmem_vm_ops = { .fault = drm_gem_shmem_fault, .open = drm_gem_shmem_vm_open, .close = drm_gem_shmem_vm_close, }; +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops); /** * drm_gem_shmem_mmap - Memory-map a shmem GEM object @@ -624,11 +626,10 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct return ret; } - vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND; + vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); if (shmem->map_wc) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); - vma->vm_ops = &drm_gem_shmem_vm_ops; return 0; } diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 8b8744dcf691..51fcf1298023 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -297,7 +297,7 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_ req->value = 64; break; case DRM_CAP_ADDFB2_MODIFIERS: - req->value = dev->mode_config.allow_fb_modifiers; + req->value = !dev->mode_config.fb_modifiers_not_supported; break; case DRM_CAP_CRTC_IN_VBLANK_EVENT: req->value = 1; diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 1c72208d8133..96b13e36293c 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -880,7 +880,7 @@ EXPORT_SYMBOL(drm_mode_set_crtcinfo); * @dst: mode to overwrite * @src: mode to copy * - * Copy an existing mode into another mode, preserving the object id and + * Copy an existing mode into another mode, preserving the * list head of the destination mode. */ void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src) diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index deeec60a3315..bf0daa8d9bbd 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -237,6 +237,9 @@ static int __drm_universal_plane_init(struct drm_device *dev, const char *name, va_list ap) { struct drm_mode_config *config = &dev->mode_config; + static const uint64_t default_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + }; unsigned int format_modifier_count = 0; int ret; @@ -277,16 +280,16 @@ static int __drm_universal_plane_init(struct drm_device *dev, while (*temp_modifiers++ != DRM_FORMAT_MOD_INVALID) format_modifier_count++; + } else { + if (!dev->mode_config.fb_modifiers_not_supported) { + format_modifiers = default_modifiers; + format_modifier_count = ARRAY_SIZE(default_modifiers); + } } /* autoset the cap and check for consistency across all planes */ - if (format_modifier_count) { - drm_WARN_ON(dev, !config->allow_fb_modifiers && - !list_empty(&config->plane_list)); - config->allow_fb_modifiers = true; - } else { - drm_WARN_ON(dev, config->allow_fb_modifiers); - } + drm_WARN_ON(dev, config->fb_modifiers_not_supported && + format_modifier_count); plane->modifier_count = format_modifier_count; plane->modifiers = kmalloc_array(format_modifier_count, @@ -341,7 +344,7 @@ static int __drm_universal_plane_init(struct drm_device *dev, drm_object_attach_property(&plane->base, config->prop_src_h, 0); } - if (config->allow_fb_modifiers) + if (format_modifier_count) create_in_format_blob(dev, plane); return 0; @@ -368,8 +371,8 @@ static int __drm_universal_plane_init(struct drm_device *dev, * drm_universal_plane_init() to let the DRM managed resource infrastructure * take care of cleanup and deallocation. * - * Drivers supporting modifiers must set @format_modifiers on all their planes, - * even those that only support DRM_FORMAT_MOD_LINEAR. + * Drivers that only support the DRM_FORMAT_MOD_LINEAR modifier support may set + * @format_modifiers to NULL. The plane will advertise the linear modifier. * * Returns: * Zero on success, error code on failure. diff --git a/drivers/gpu/drm/drm_privacy_screen.c b/drivers/gpu/drm/drm_privacy_screen.c index 2b082e5e9732..6cc39e30781f 100644 --- a/drivers/gpu/drm/drm_privacy_screen.c +++ b/drivers/gpu/drm/drm_privacy_screen.c @@ -379,6 +379,7 @@ static void drm_privacy_screen_device_release(struct device *dev) * drm_privacy_screen_register - register a privacy-screen * @parent: parent-device for the privacy-screen * @ops: &struct drm_privacy_screen_ops pointer with ops for the privacy-screen + * @data: Private data owned by the privacy screen provider * * Create and register a privacy-screen. * diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index c313a5b4549c..7e48dcd1bee4 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -853,12 +853,57 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, &args->handle); } + +/* + * Try to flatten a dma_fence_chain into a dma_fence_array so that it can be + * added as timeline fence to a chain again. + */ +static int drm_syncobj_flatten_chain(struct dma_fence **f) +{ + struct dma_fence_chain *chain = to_dma_fence_chain(*f); + struct dma_fence *tmp, **fences; + struct dma_fence_array *array; + unsigned int count; + + if (!chain) + return 0; + + count = 0; + dma_fence_chain_for_each(tmp, &chain->base) + ++count; + + fences = kmalloc_array(count, sizeof(*fences), GFP_KERNEL); + if (!fences) + return -ENOMEM; + + count = 0; + dma_fence_chain_for_each(tmp, &chain->base) + fences[count++] = dma_fence_get(tmp); + + array = dma_fence_array_create(count, fences, + dma_fence_context_alloc(1), + 1, false); + if (!array) + goto free_fences; + + dma_fence_put(*f); + *f = &array->base; + return 0; + +free_fences: + while (count--) + dma_fence_put(fences[count]); + + kfree(fences); + return -ENOMEM; +} + static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private, struct drm_syncobj_transfer *args) { struct drm_syncobj *timeline_syncobj = NULL; - struct dma_fence *fence; struct dma_fence_chain *chain; + struct dma_fence *fence; int ret; timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle); @@ -869,16 +914,22 @@ static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private, args->src_point, args->flags, &fence); if (ret) - goto err; + goto err_put_timeline; + + ret = drm_syncobj_flatten_chain(&fence); + if (ret) + goto err_free_fence; + chain = dma_fence_chain_alloc(); if (!chain) { ret = -ENOMEM; - goto err1; + goto err_free_fence; } + drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point); -err1: +err_free_fence: dma_fence_put(fence); -err: +err_put_timeline: drm_syncobj_put(timeline_syncobj); return ret; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index 58f593b278c1..35e5ef7dbdcc 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -195,7 +195,7 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu) ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, etnaviv_hw_jobs_limit, etnaviv_job_hang_limit, msecs_to_jiffies(500), NULL, NULL, - dev_name(gpu->dev)); + dev_name(gpu->dev), gpu->dev); if (ret) return ret; diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 9743b6b17447..c68498497c0b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -455,6 +455,9 @@ static int exynos_drm_init(void) { int ret; + if (drm_firmware_drivers_only()) + return -ENODEV; + ret = exynos_drm_register_devices(); if (ret) return ret; diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 660fe573db96..7a503bf08d0f 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -24,6 +24,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_modeset_helper.h> +#include <drm/drm_module.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -368,7 +369,7 @@ static struct platform_driver fsl_dcu_drm_platform_driver = { }, }; -module_platform_driver(fsl_dcu_drm_platform_driver); +drm_module_platform_driver(fsl_dcu_drm_platform_driver); MODULE_DESCRIPTION("Freescale DCU DRM Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 65cf1c79dd7c..eeb681be9c95 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -538,6 +538,9 @@ static struct pci_driver psb_pci_driver = { static int __init psb_init(void) { + if (drm_firmware_drivers_only()) + return -ENODEV; + return pci_register_driver(&psb_pci_driver); } diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig index 43943e980203..073adfe438dd 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig +++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_HISI_HIBMC tristate "DRM Support for Hisilicon Hibmc" - depends on DRM && PCI && ARM64 + depends on DRM && PCI && (ARM64 || COMPILE_TEST) select DRM_KMS_HELPER select DRM_VRAM_HELPER select DRM_TTM diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index 98ae9a48f3fe..3cf057269f2a 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -23,6 +23,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_module.h> #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -307,7 +308,7 @@ static struct platform_driver kirin_drm_platform_driver = { }, }; -module_platform_driver(kirin_drm_platform_driver); +drm_module_platform_driver(kirin_drm_platform_driver); MODULE_AUTHOR("Xinliang Liu <xinliang.liu@linaro.org>"); MODULE_AUTHOR("Xinliang Liu <z.liuxinliang@hisilicon.com>"); diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c index 00e53de4812b..4a8941fa0815 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c @@ -305,6 +305,9 @@ static int __init hyperv_init(void) { int ret; + if (drm_firmware_drivers_only()) + return -ENODEV; + ret = pci_register_driver(&hyperv_pci_driver); if (ret != 0) return ret; diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c index 247714bab044..76d5211c25eb 100644 --- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c +++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c @@ -36,13 +36,14 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man, struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); struct i915_ttm_buddy_resource *bman_res; struct drm_buddy *mm = &bman->mm; - unsigned long n_pages; - unsigned int min_order; + unsigned long n_pages, lpfn; u64 min_page_size; u64 size; int err; - GEM_BUG_ON(place->fpfn || place->lpfn); + lpfn = place->lpfn; + if (!lpfn) + lpfn = man->size; bman_res = kzalloc(sizeof(*bman_res), GFP_KERNEL); if (!bman_res) @@ -52,6 +53,12 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man, INIT_LIST_HEAD(&bman_res->blocks); bman_res->mm = mm; + if (place->flags & TTM_PL_FLAG_TOPDOWN) + bman_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; + + if (place->fpfn || lpfn != man->size) + bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION; + GEM_BUG_ON(!bman_res->base.num_pages); size = bman_res->base.num_pages << PAGE_SHIFT; @@ -60,10 +67,16 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man, min_page_size = bo->page_alignment << PAGE_SHIFT; GEM_BUG_ON(min_page_size < mm->chunk_size); - min_order = ilog2(min_page_size) - ilog2(mm->chunk_size); + if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { + unsigned long pages; + size = roundup_pow_of_two(size); - min_order = ilog2(size) - ilog2(mm->chunk_size); + min_page_size = size; + + pages = size >> ilog2(mm->chunk_size); + if (pages > lpfn) + lpfn = pages; } if (size > mm->size) { @@ -73,34 +86,26 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man, n_pages = size >> ilog2(mm->chunk_size); - do { - struct drm_buddy_block *block; - unsigned int order; - - order = fls(n_pages) - 1; - GEM_BUG_ON(order > mm->max_order); - GEM_BUG_ON(order < min_order); - - do { - mutex_lock(&bman->lock); - block = drm_buddy_alloc_blocks(mm, order); - mutex_unlock(&bman->lock); - if (!IS_ERR(block)) - break; - - if (order-- == min_order) { - err = -ENOSPC; - goto err_free_blocks; - } - } while (1); - - n_pages -= BIT(order); + mutex_lock(&bman->lock); + err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT, + (u64)lpfn << PAGE_SHIFT, + (u64)n_pages << PAGE_SHIFT, + min_page_size, + &bman_res->blocks, + bman_res->flags); + mutex_unlock(&bman->lock); + if (unlikely(err)) + goto err_free_blocks; - list_add_tail(&block->link, &bman_res->blocks); + if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { + u64 original_size = (u64)bman_res->base.num_pages << PAGE_SHIFT; - if (!n_pages) - break; - } while (1); + mutex_lock(&bman->lock); + drm_buddy_block_trim(mm, + original_size, + &bman_res->blocks); + mutex_unlock(&bman->lock); + } *res = &bman_res->base; return 0; @@ -268,10 +273,17 @@ int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man, { struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); struct drm_buddy *mm = &bman->mm; + unsigned long flags = 0; int ret; + flags |= DRM_BUDDY_RANGE_ALLOCATION; + mutex_lock(&bman->lock); - ret = drm_buddy_alloc_range(mm, &bman->reserved, start, size); + ret = drm_buddy_alloc_blocks(mm, start, + start + size, + size, mm->chunk_size, + &bman->reserved, + flags); mutex_unlock(&bman->lock); return ret; diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h index 312077941411..72c90b432e87 100644 --- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h +++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h @@ -20,6 +20,7 @@ struct drm_buddy; * * @base: struct ttm_resource base class we extend * @blocks: the list of struct i915_buddy_block for this resource/allocation + * @flags: DRM_BUDDY_*_ALLOCATION flags * @mm: the struct i915_buddy_mm for this resource * * Extends the struct ttm_resource to manage an address space allocation with @@ -28,6 +29,7 @@ struct drm_buddy; struct i915_ttm_buddy_resource { struct ttm_resource base; struct list_head blocks; + unsigned long flags; struct drm_buddy *mm; }; diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index cb685fe2039b..a57812ec36b1 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -341,6 +341,9 @@ static struct platform_driver * const drivers[] = { static int __init imx_drm_init(void) { + if (drm_firmware_drivers_only()) + return -ENODEV; + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } module_init(imx_drm_init); diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index 542c4af70661..dcf44cb00821 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -65,8 +65,10 @@ struct ingenic_dma_hwdescs { struct jz_soc_info { bool needs_dev_clk; bool has_osd; + bool has_alpha; bool map_noncoherent; bool use_extended_hwdesc; + bool plane_f0_not_working; unsigned int max_width, max_height; const u32 *formats_f0, *formats_f1; unsigned int num_formats_f0, num_formats_f1; @@ -453,7 +455,7 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane, if (!crtc) return 0; - if (plane == &priv->f0) + if (priv->soc_info->plane_f0_not_working && plane == &priv->f0) return -EINVAL; crtc_state = drm_atomic_get_existing_crtc_state(state, @@ -1055,6 +1057,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) long parent_rate; unsigned int i, clone_mask = 0; int ret, irq; + u32 osdc = 0; soc_info = of_device_get_match_data(dev); if (!soc_info) { @@ -1312,7 +1315,10 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) /* Enable OSD if available */ if (soc_info->has_osd) - regmap_write(priv->map, JZ_REG_LCD_OSDC, JZ_LCD_OSDC_OSDEN); + osdc |= JZ_LCD_OSDC_OSDEN; + if (soc_info->has_alpha) + osdc |= JZ_LCD_OSDC_ALPHAEN; + regmap_write(priv->map, JZ_REG_LCD_OSDC, osdc); mutex_init(&priv->clk_mutex); priv->clock_nb.notifier_call = ingenic_drm_update_pixclk; @@ -1511,7 +1517,9 @@ static const struct jz_soc_info jz4770_soc_info = { static const struct jz_soc_info jz4780_soc_info = { .needs_dev_clk = true, .has_osd = true, + .has_alpha = true, .use_extended_hwdesc = true, + .plane_f0_not_working = true, /* REVISIT */ .max_width = 4096, .max_height = 2048, .formats_f1 = jz4770_formats_f1, @@ -1543,6 +1551,9 @@ static int ingenic_drm_init(void) { int err; + if (drm_firmware_drivers_only()) + return -ENODEV; + if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU)) { err = platform_driver_register(ingenic_ipu_driver_ptr); if (err) diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c index ed2424350773..76fef0880504 100644 --- a/drivers/gpu/drm/kmb/kmb_drv.c +++ b/drivers/gpu/drm/kmb/kmb_drv.c @@ -18,6 +18,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_module.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -628,7 +629,7 @@ static struct platform_driver kmb_platform_driver = { }, }; -module_platform_driver(kmb_platform_driver); +drm_module_platform_driver(kmb_platform_driver); MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("Keembay Display driver"); diff --git a/drivers/gpu/drm/lib/drm_random.c b/drivers/gpu/drm/lib/drm_random.c index eeb155826d27..31b5a3e21911 100644 --- a/drivers/gpu/drm/lib/drm_random.c +++ b/drivers/gpu/drm/lib/drm_random.c @@ -7,10 +7,11 @@ #include "drm_random.h" -static inline u32 drm_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state) +u32 drm_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state) { return upper_32_bits((u64)prandom_u32_state(state) * ep_ro); } +EXPORT_SYMBOL(drm_prandom_u32_max_state); void drm_random_reorder(unsigned int *order, unsigned int count, struct rnd_state *state) diff --git a/drivers/gpu/drm/lib/drm_random.h b/drivers/gpu/drm/lib/drm_random.h index 4a3e94dfa0c0..5543bf0474bc 100644 --- a/drivers/gpu/drm/lib/drm_random.h +++ b/drivers/gpu/drm/lib/drm_random.h @@ -22,5 +22,7 @@ unsigned int *drm_random_order(unsigned int count, void drm_random_reorder(unsigned int *order, unsigned int count, struct rnd_state *state); +u32 drm_prandom_u32_max_state(u32 ep_ro, + struct rnd_state *state); #endif /* !__DRM_RANDOM_H__ */ diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index d0c2b1422b3b..55bb1ec3c4f7 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -214,6 +214,7 @@ static const struct drm_gem_object_funcs lima_gem_funcs = { .vmap = lima_gem_vmap, .vunmap = drm_gem_shmem_object_vunmap, .mmap = lima_gem_mmap, + .vm_ops = &drm_gem_shmem_vm_ops, }; struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size) diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index 390c969f74ad..e82931712d8a 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -409,7 +409,8 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job drm_sched_increase_karma(&task->base); - lima_sched_build_error_task_list(task); + if (lima_max_error_tasks) + lima_sched_build_error_task_list(task); pipe->task_error(pipe); @@ -490,7 +491,7 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name) return drm_sched_init(&pipe->base, &lima_sched_ops, 1, lima_job_hang_limit, msecs_to_jiffies(timeout), NULL, - NULL, name); + NULL, name, pipe->ldev->dev); } void lima_sched_pipe_fini(struct lima_sched_pipe *pipe) diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index 5b5afc6aaf8e..0b2910e69b42 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -491,6 +491,9 @@ static int __init mcde_drm_register(void) { int ret; + if (drm_firmware_drivers_only()) + return -ENODEV; + ret = platform_register_drivers(component_drivers, ARRAY_SIZE(component_drivers)); if (ret) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 56ff8c57ef8f..dd029307be7d 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -236,6 +236,9 @@ static int mtk_drm_kms_init(struct drm_device *drm) struct device *dma_dev; int ret; + if (drm_firmware_drivers_only()) + return -ENODEV; + if (!iommu_present(&platform_bus_type)) return -EPROBE_DEFER; diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 26aeaf0ab86e..93a7a033a3e8 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -22,6 +22,7 @@ #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_module.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -542,7 +543,7 @@ static struct platform_driver meson_drm_platform_driver = { }, }; -module_platform_driver(meson_drm_platform_driver); +drm_module_platform_driver(meson_drm_platform_driver); MODULE_AUTHOR("Jasper St. Pierre <jstpierre@mecheye.net>"); MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 740108a006ba..217844d71ab5 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -14,6 +14,7 @@ #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_ioctl.h> +#include <drm/drm_module.h> #include <drm/drm_pciids.h> #include "mgag200_drv.h" @@ -375,24 +376,7 @@ static struct pci_driver mgag200_pci_driver = { .remove = mgag200_pci_remove, }; -static int __init mgag200_init(void) -{ - if (drm_firmware_drivers_only() && mgag200_modeset == -1) - return -EINVAL; - - if (mgag200_modeset == 0) - return -EINVAL; - - return pci_register_driver(&mgag200_pci_driver); -} - -static void __exit mgag200_exit(void) -{ - pci_unregister_driver(&mgag200_pci_driver); -} - -module_init(mgag200_init); -module_exit(mgag200_exit); +drm_module_pci_driver_if_modeset(mgag200_pci_driver, mgag200_modeset); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 555666e3f960..ff19c5a94d6f 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -512,6 +512,9 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) struct msm_kms *kms; int ret, i; + if (drm_firmware_drivers_only()) + return -ENODEV; + ddev = drm_dev_alloc(drv, dev); if (IS_ERR(ddev)) { DRM_DEV_ERROR(dev, "failed to allocate drm_device\n"); diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 3bbf574c3bdc..367a6aaa3a20 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -89,7 +89,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, ret = drm_sched_init(&ring->sched, &msm_sched_ops, num_hw_submissions, 0, sched_timeout, - NULL, NULL, to_msm_bo(ring->bo)->name); + NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev); if (ret) { goto fail; } diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 375f26d4a417..9d71c55a31c0 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -25,6 +25,7 @@ #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_mode_config.h> +#include <drm/drm_module.h> #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -258,8 +259,7 @@ static int mxsfb_load(struct drm_device *drm, ret = mxsfb_attach_bridge(mxsfb); if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(drm->dev, "Cannot connect bridge: %d\n", ret); + dev_err_probe(drm->dev, ret, "Cannot connect bridge\n"); goto err_vblank; } @@ -419,7 +419,7 @@ static struct platform_driver mxsfb_platform_driver = { }, }; -module_platform_driver(mxsfb_platform_driver); +drm_module_platform_driver(mxsfb_platform_driver); MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); MODULE_DESCRIPTION("Freescale MXS DRM/KMS driver"); diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index ae2f2abc8f5a..daf9f87477ba 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -101,7 +101,6 @@ nv40_backlight_init(struct nouveau_encoder *encoder, if (!(nvif_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)) return -ENODEV; - props->type = BACKLIGHT_RAW; props->max_brightness = 31; *ops = &nv40_bl_ops; return 0; @@ -294,7 +293,8 @@ nv50_backlight_init(struct nouveau_backlight *bl, struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); struct nvif_object *device = &drm->client.device.object; - if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1))) + if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)) || + nv_conn->base.status != connector_status_connected) return -ENODEV; if (nv_conn->type == DCB_CONNECTOR_eDP) { @@ -342,7 +342,6 @@ nv50_backlight_init(struct nouveau_backlight *bl, else *ops = &nva3_bl_ops; - props->type = BACKLIGHT_RAW; props->max_brightness = 100; return 0; @@ -410,6 +409,7 @@ nouveau_backlight_init(struct drm_connector *connector) goto fail_alloc; } + props.type = BACKLIGHT_RAW; bl->dev = backlight_device_register(backlight_name, connector->kdev, nv_encoder, ops, &props); if (IS_ERR(bl->dev)) { diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 2b460835a438..2cd0932b3d68 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -708,10 +708,12 @@ nouveau_display_create(struct drm_device *dev) &disp->disp); if (ret == 0) { nouveau_display_create_properties(dev); - if (disp->disp.object.oclass < NV50_DISP) + if (disp->disp.object.oclass < NV50_DISP) { + dev->mode_config.fb_modifiers_not_supported = true; ret = nv04_display_create(dev); - else + } else { ret = nv50_display_create(dev); + } } } else { ret = 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 266809e511e2..46a5a1016e37 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -925,8 +925,8 @@ nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm, mutex_lock(&svmm->mutex); - ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) + - npages * sizeof(args->p.phys[0]), NULL); + ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, + struct_size(args, p.phys, npages), NULL); mutex_unlock(&svmm->mutex); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c index a11637b0f6cc..d063d0dc13c5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c @@ -21,6 +21,9 @@ * * Authors: Ben Skeggs */ + +#include <linux/string_helpers.h> + #include "aux.h" #include "pad.h" @@ -94,7 +97,7 @@ void nvkm_i2c_aux_monitor(struct nvkm_i2c_aux *aux, bool monitor) { struct nvkm_i2c_pad *pad = aux->pad; - AUX_TRACE(aux, "monitor: %s", monitor ? "yes" : "no"); + AUX_TRACE(aux, "monitor: %s", str_yes_no(monitor)); if (monitor) nvkm_i2c_pad_mode(pad, NVKM_I2C_PAD_AUX); else diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 2720a58ccd90..eaf67b9e5f12 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -727,6 +727,9 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) DBG("%s", dev_name(dev)); + if (drm_firmware_drivers_only()) + return -ENODEV; + /* Allocate and initialize the DRM device. */ ddev = drm_dev_alloc(&omap_drm_driver, dev); if (IS_ERR(ddev)) diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 434c2861bb40..bb2e47229c68 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -106,6 +106,7 @@ config DRM_PANEL_EDP depends on PM select VIDEOMODE_HELPERS select DRM_DP_AUX_BUS + select DRM_DP_HELPER help DRM panel driver for dumb eDP panels that need at most a regulator and a GPIO to be powered up. Optionally a backlight can be attached so @@ -292,6 +293,18 @@ config DRM_PANEL_NOVATEK_NT35510 around the Novatek NT35510 display controller, such as some Hydis panels. +config DRM_PANEL_NOVATEK_NT35560 + tristate "Novatek NT35560 DSI command mode panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + select VIDEOMODE_HELPERS + help + Say Y here if you want to enable the Novatek NT35560 display + controller. This panel supports DSI in both command and video + mode. This supports several panels such as Sony ACX424AKM and + ACX424AKP. + config DRM_PANEL_NOVATEK_NT35950 tristate "Novatek NT35950 DSI panel" depends on OF @@ -592,17 +605,6 @@ config DRM_PANEL_SITRONIX_ST7789V Say Y here if you want to enable support for the Sitronix ST7789V controller for 240x320 LCD panels -config DRM_PANEL_SONY_ACX424AKP - tristate "Sony ACX424AKP DSI command mode panel" - depends on OF - depends on DRM_MIPI_DSI - depends on BACKLIGHT_CLASS_DEVICE - select VIDEOMODE_HELPERS - help - Say Y here if you want to enable the Sony ACX424 display - panel. This panel supports DSI in both command and video - mode. - config DRM_PANEL_SONY_ACX565AKM tristate "Sony ACX565AKM panel" depends on GPIOLIB && OF && SPI diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index d99fbbce49d1..5740911f637c 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35510) += panel-novatek-nt35510.o +obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35560) += panel-novatek-nt35560.o obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35950) += panel-novatek-nt35950.o obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672A) += panel-novatek-nt36672a.o obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o @@ -60,7 +61,6 @@ obj-$(CONFIG_DRM_PANEL_SHARP_LS060T1SX01) += panel-sharp-ls060t1sx01.o obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7703) += panel-sitronix-st7703.o obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o -obj-$(CONFIG_DRM_PANEL_SONY_ACX424AKP) += panel-sony-acx424akp.o obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o obj-$(CONFIG_DRM_PANEL_SONY_TULIP_TRULY_NT35521) += panel-sony-tulip-truly-nt35521.o obj-$(CONFIG_DRM_PANEL_TDO_TL070WSH30) += panel-tdo-tl070wsh30.o diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index a394a15dc3fb..f7bfcf63d48e 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -21,6 +21,7 @@ * DEALINGS IN THE SOFTWARE. */ +#include <linux/debugfs.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/iopoll.h> @@ -222,6 +223,8 @@ struct panel_edp { struct gpio_desc *enable_gpio; struct gpio_desc *hpd_gpio; + const struct edp_panel_entry *detected_panel; + struct edid *edid; struct drm_display_mode override_mode; @@ -606,6 +609,28 @@ static int panel_edp_get_timings(struct drm_panel *panel, return p->desc->num_timings; } +static int detected_panel_show(struct seq_file *s, void *data) +{ + struct drm_panel *panel = s->private; + struct panel_edp *p = to_panel_edp(panel); + + if (IS_ERR(p->detected_panel)) + seq_puts(s, "UNKNOWN\n"); + else if (!p->detected_panel) + seq_puts(s, "HARDCODED\n"); + else + seq_printf(s, "%s\n", p->detected_panel->name); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(detected_panel); + +static void panel_edp_debugfs_init(struct drm_panel *panel, struct dentry *root) +{ + debugfs_create_file("detected_panel", 0600, root, panel, &detected_panel_fops); +} + static const struct drm_panel_funcs panel_edp_funcs = { .disable = panel_edp_disable, .unprepare = panel_edp_unprepare, @@ -613,6 +638,7 @@ static const struct drm_panel_funcs panel_edp_funcs = { .enable = panel_edp_enable, .get_modes = panel_edp_get_modes, .get_timings = panel_edp_get_timings, + .debugfs_init = panel_edp_debugfs_init, }; #define PANEL_EDP_BOUNDS_CHECK(to_check, bounds, field) \ @@ -666,7 +692,6 @@ static const struct edp_panel_entry *find_edp_panel(u32 panel_id); static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel) { - const struct edp_panel_entry *edp_panel; struct panel_desc *desc; u32 panel_id; char vend[4]; @@ -705,14 +730,14 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel) } drm_edid_decode_panel_id(panel_id, vend, &product_id); - edp_panel = find_edp_panel(panel_id); + panel->detected_panel = find_edp_panel(panel_id); /* * We're using non-optimized timings and want it really obvious that * someone needs to add an entry to the table, so we'll do a WARN_ON * splat. */ - if (WARN_ON(!edp_panel)) { + if (WARN_ON(!panel->detected_panel)) { dev_warn(dev, "Unknown panel %s %#06x, using conservative timings\n", vend, product_id); @@ -734,12 +759,14 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel) */ desc->delay.unprepare = 2000; desc->delay.enable = 200; + + panel->detected_panel = ERR_PTR(-EINVAL); } else { dev_info(dev, "Detected %s %s (%#06x)\n", - vend, edp_panel->name, product_id); + vend, panel->detected_panel->name, product_id); /* Update the delay; everything else comes from EDID */ - desc->delay = *edp_panel->delay; + desc->delay = *panel->detected_panel->delay; } ret = 0; @@ -1605,6 +1632,47 @@ static const struct panel_desc sharp_lq123p1jx31 = { }, }; +static const struct drm_display_mode sharp_lq140m1jw46_mode[] = { + { + .clock = 346500, + .hdisplay = 1920, + .hsync_start = 1920 + 48, + .hsync_end = 1920 + 48 + 32, + .htotal = 1920 + 48 + 32 + 80, + .vdisplay = 1080, + .vsync_start = 1080 + 3, + .vsync_end = 1080 + 3 + 5, + .vtotal = 1080 + 3 + 5 + 69, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, + }, { + .clock = 144370, + .hdisplay = 1920, + .hsync_start = 1920 + 48, + .hsync_end = 1920 + 48 + 32, + .htotal = 1920 + 48 + 32 + 80, + .vdisplay = 1080, + .vsync_start = 1080 + 3, + .vsync_end = 1080 + 3 + 5, + .vtotal = 1080 + 3 + 5 + 69, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, + }, +}; + +static const struct panel_desc sharp_lq140m1jw46 = { + .modes = sharp_lq140m1jw46_mode, + .num_modes = ARRAY_SIZE(sharp_lq140m1jw46_mode), + .bpc = 8, + .size = { + .width = 309, + .height = 174, + }, + .delay = { + .hpd_absent = 80, + .enable = 50, + .unprepare = 500, + }, +}; + static const struct drm_display_mode starry_kr122ea0sra_mode = { .clock = 147000, .hdisplay = 1920, @@ -1719,6 +1787,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "sharp,lq123p1jx31", .data = &sharp_lq123p1jx31, }, { + .compatible = "sharp,lq140m1jw46", + .data = &sharp_lq140m1jw46, + }, { .compatible = "starry,kr122ea0sra", .data = &starry_kr122ea0sra, }, { diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c new file mode 100644 index 000000000000..1b6042321ea1 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c @@ -0,0 +1,561 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * MIPI-DSI Novatek NT35560-based panel controller. + * + * Supported panels include: + * Sony ACX424AKM - a 480x854 AMOLED DSI panel + * Sony ACX424AKP - a 480x864 AMOLED DSI panel + * + * Copyright (C) Linaro Ltd. 2019-2021 + * Author: Linus Walleij + * Based on code and know-how from Marcus Lorentzon + * Copyright (C) ST-Ericsson SA 2010 + * Based on code and know-how from Johan Olson and Joakim Wesslen + * Copyright (C) Sony Ericsson Mobile Communications 2010 + */ +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/regulator/consumer.h> + +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +#define NT35560_DCS_READ_ID1 0xDA +#define NT35560_DCS_READ_ID2 0xDB +#define NT35560_DCS_READ_ID3 0xDC +#define NT35560_DCS_SET_MDDI 0xAE + +/* + * Sony seems to use vendor ID 0x81 + */ +#define DISPLAY_SONY_ACX424AKP_ID1 0x8103 +#define DISPLAY_SONY_ACX424AKP_ID2 0x811a +#define DISPLAY_SONY_ACX424AKP_ID3 0x811b +/* + * The fourth ID looks like a bug, vendor IDs begin at 0x80 + * and panel 00 ... seems like default values. + */ +#define DISPLAY_SONY_ACX424AKP_ID4 0x8000 + +struct nt35560_config { + const struct drm_display_mode *vid_mode; + const struct drm_display_mode *cmd_mode; +}; + +struct nt35560 { + const struct nt35560_config *conf; + struct drm_panel panel; + struct device *dev; + struct regulator *supply; + struct gpio_desc *reset_gpio; + bool video_mode; +}; + +static const struct drm_display_mode sony_acx424akp_vid_mode = { + .clock = 27234, + .hdisplay = 480, + .hsync_start = 480 + 15, + .hsync_end = 480 + 15 + 0, + .htotal = 480 + 15 + 0 + 15, + .vdisplay = 864, + .vsync_start = 864 + 14, + .vsync_end = 864 + 14 + 1, + .vtotal = 864 + 14 + 1 + 11, + .width_mm = 48, + .height_mm = 84, + .flags = DRM_MODE_FLAG_PVSYNC, +}; + +/* + * The timings are not very helpful as the display is used in + * command mode using the maximum HS frequency. + */ +static const struct drm_display_mode sony_acx424akp_cmd_mode = { + .clock = 35478, + .hdisplay = 480, + .hsync_start = 480 + 154, + .hsync_end = 480 + 154 + 16, + .htotal = 480 + 154 + 16 + 32, + .vdisplay = 864, + .vsync_start = 864 + 1, + .vsync_end = 864 + 1 + 1, + .vtotal = 864 + 1 + 1 + 1, + /* + * Some desired refresh rate, experiments at the maximum "pixel" + * clock speed (HS clock 420 MHz) yields around 117Hz. + */ + .width_mm = 48, + .height_mm = 84, +}; + +static const struct nt35560_config sony_acx424akp_data = { + .vid_mode = &sony_acx424akp_vid_mode, + .cmd_mode = &sony_acx424akp_cmd_mode, +}; + +static const struct drm_display_mode sony_acx424akm_vid_mode = { + .clock = 27234, + .hdisplay = 480, + .hsync_start = 480 + 15, + .hsync_end = 480 + 15 + 0, + .htotal = 480 + 15 + 0 + 15, + .vdisplay = 854, + .vsync_start = 854 + 14, + .vsync_end = 854 + 14 + 1, + .vtotal = 854 + 14 + 1 + 11, + .width_mm = 46, + .height_mm = 82, + .flags = DRM_MODE_FLAG_PVSYNC, +}; + +/* + * The timings are not very helpful as the display is used in + * command mode using the maximum HS frequency. + */ +static const struct drm_display_mode sony_acx424akm_cmd_mode = { + .clock = 35478, + .hdisplay = 480, + .hsync_start = 480 + 154, + .hsync_end = 480 + 154 + 16, + .htotal = 480 + 154 + 16 + 32, + .vdisplay = 854, + .vsync_start = 854 + 1, + .vsync_end = 854 + 1 + 1, + .vtotal = 854 + 1 + 1 + 1, + .width_mm = 46, + .height_mm = 82, +}; + +static const struct nt35560_config sony_acx424akm_data = { + .vid_mode = &sony_acx424akm_vid_mode, + .cmd_mode = &sony_acx424akm_cmd_mode, +}; + +static inline struct nt35560 *panel_to_nt35560(struct drm_panel *panel) +{ + return container_of(panel, struct nt35560, panel); +} + +#define FOSC 20 /* 20Mhz */ +#define SCALE_FACTOR_NS_DIV_MHZ 1000 + +static int nt35560_set_brightness(struct backlight_device *bl) +{ + struct nt35560 *nt = bl_get_data(bl); + struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); + int period_ns = 1023; + int duty_ns = bl->props.brightness; + u8 pwm_ratio; + u8 pwm_div; + u8 par; + int ret; + + if (backlight_is_blank(bl)) { + /* Disable backlight */ + par = 0x00; + ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, + &par, 1); + if (ret) { + dev_err(nt->dev, "failed to disable display backlight (%d)\n", ret); + return ret; + } + return 0; + } + + /* Calculate the PWM duty cycle in n/256's */ + pwm_ratio = max(((duty_ns * 256) / period_ns) - 1, 1); + pwm_div = max(1, + ((FOSC * period_ns) / 256) / + SCALE_FACTOR_NS_DIV_MHZ); + + /* Set up PWM dutycycle ONE byte (differs from the standard) */ + dev_dbg(nt->dev, "calculated duty cycle %02x\n", pwm_ratio); + ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, + &pwm_ratio, 1); + if (ret < 0) { + dev_err(nt->dev, "failed to set display PWM ratio (%d)\n", ret); + return ret; + } + + /* + * Sequence to write PWMDIV: + * address data + * 0xF3 0xAA CMD2 Unlock + * 0x00 0x01 Enter CMD2 page 0 + * 0X7D 0x01 No reload MTP of CMD2 P1 + * 0x22 PWMDIV + * 0x7F 0xAA CMD2 page 1 lock + */ + par = 0xaa; + ret = mipi_dsi_dcs_write(dsi, 0xf3, &par, 1); + if (ret < 0) { + dev_err(nt->dev, "failed to unlock CMD 2 (%d)\n", ret); + return ret; + } + par = 0x01; + ret = mipi_dsi_dcs_write(dsi, 0x00, &par, 1); + if (ret < 0) { + dev_err(nt->dev, "failed to enter page 1 (%d)\n", ret); + return ret; + } + par = 0x01; + ret = mipi_dsi_dcs_write(dsi, 0x7d, &par, 1); + if (ret < 0) { + dev_err(nt->dev, "failed to disable MTP reload (%d)\n", ret); + return ret; + } + ret = mipi_dsi_dcs_write(dsi, 0x22, &pwm_div, 1); + if (ret < 0) { + dev_err(nt->dev, "failed to set PWM divisor (%d)\n", ret); + return ret; + } + par = 0xaa; + ret = mipi_dsi_dcs_write(dsi, 0x7f, &par, 1); + if (ret < 0) { + dev_err(nt->dev, "failed to lock CMD 2 (%d)\n", ret); + return ret; + } + + /* Enable backlight */ + par = 0x24; + ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, + &par, 1); + if (ret < 0) { + dev_err(nt->dev, "failed to enable display backlight (%d)\n", ret); + return ret; + } + + return 0; +} + +static const struct backlight_ops nt35560_bl_ops = { + .update_status = nt35560_set_brightness, +}; + +static const struct backlight_properties nt35560_bl_props = { + .type = BACKLIGHT_RAW, + .brightness = 512, + .max_brightness = 1023, +}; + +static int nt35560_read_id(struct nt35560 *nt) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); + u8 vendor, version, panel; + u16 val; + int ret; + + ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID1, &vendor, 1); + if (ret < 0) { + dev_err(nt->dev, "could not vendor ID byte\n"); + return ret; + } + ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID2, &version, 1); + if (ret < 0) { + dev_err(nt->dev, "could not read device version byte\n"); + return ret; + } + ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID3, &panel, 1); + if (ret < 0) { + dev_err(nt->dev, "could not read panel ID byte\n"); + return ret; + } + + if (vendor == 0x00) { + dev_err(nt->dev, "device vendor ID is zero\n"); + return -ENODEV; + } + + val = (vendor << 8) | panel; + switch (val) { + case DISPLAY_SONY_ACX424AKP_ID1: + case DISPLAY_SONY_ACX424AKP_ID2: + case DISPLAY_SONY_ACX424AKP_ID3: + case DISPLAY_SONY_ACX424AKP_ID4: + dev_info(nt->dev, "MTP vendor: %02x, version: %02x, panel: %02x\n", + vendor, version, panel); + break; + default: + dev_info(nt->dev, "unknown vendor: %02x, version: %02x, panel: %02x\n", + vendor, version, panel); + break; + } + + return 0; +} + +static int nt35560_power_on(struct nt35560 *nt) +{ + int ret; + + ret = regulator_enable(nt->supply); + if (ret) { + dev_err(nt->dev, "failed to enable supply (%d)\n", ret); + return ret; + } + + /* Assert RESET */ + gpiod_set_value_cansleep(nt->reset_gpio, 1); + udelay(20); + /* De-assert RESET */ + gpiod_set_value_cansleep(nt->reset_gpio, 0); + usleep_range(11000, 20000); + + return 0; +} + +static void nt35560_power_off(struct nt35560 *nt) +{ + /* Assert RESET */ + gpiod_set_value_cansleep(nt->reset_gpio, 1); + usleep_range(11000, 20000); + + regulator_disable(nt->supply); +} + +static int nt35560_prepare(struct drm_panel *panel) +{ + struct nt35560 *nt = panel_to_nt35560(panel); + struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); + const u8 mddi = 3; + int ret; + + ret = nt35560_power_on(nt); + if (ret) + return ret; + + ret = nt35560_read_id(nt); + if (ret) { + dev_err(nt->dev, "failed to read panel ID (%d)\n", ret); + goto err_power_off; + } + + /* Enabe tearing mode: send TE (tearing effect) at VBLANK */ + ret = mipi_dsi_dcs_set_tear_on(dsi, + MIPI_DSI_DCS_TEAR_MODE_VBLANK); + if (ret) { + dev_err(nt->dev, "failed to enable vblank TE (%d)\n", ret); + goto err_power_off; + } + + /* + * Set MDDI + * + * This presumably deactivates the Qualcomm MDDI interface and + * selects DSI, similar code is found in other drivers such as the + * Sharp LS043T1LE01 which makes us suspect that this panel may be + * using a Novatek NT35565 or similar display driver chip that shares + * this command. Due to the lack of documentation we cannot know for + * sure. + */ + ret = mipi_dsi_dcs_write(dsi, NT35560_DCS_SET_MDDI, + &mddi, sizeof(mddi)); + if (ret < 0) { + dev_err(nt->dev, "failed to set MDDI (%d)\n", ret); + goto err_power_off; + } + + /* Exit sleep mode */ + ret = mipi_dsi_dcs_exit_sleep_mode(dsi); + if (ret) { + dev_err(nt->dev, "failed to exit sleep mode (%d)\n", ret); + goto err_power_off; + } + msleep(140); + + ret = mipi_dsi_dcs_set_display_on(dsi); + if (ret) { + dev_err(nt->dev, "failed to turn display on (%d)\n", ret); + goto err_power_off; + } + if (nt->video_mode) { + /* In video mode turn peripheral on */ + ret = mipi_dsi_turn_on_peripheral(dsi); + if (ret) { + dev_err(nt->dev, "failed to turn on peripheral\n"); + goto err_power_off; + } + } + + return 0; + +err_power_off: + nt35560_power_off(nt); + return ret; +} + +static int nt35560_unprepare(struct drm_panel *panel) +{ + struct nt35560 *nt = panel_to_nt35560(panel); + struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); + int ret; + + ret = mipi_dsi_dcs_set_display_off(dsi); + if (ret) { + dev_err(nt->dev, "failed to turn display off (%d)\n", ret); + return ret; + } + + /* Enter sleep mode */ + ret = mipi_dsi_dcs_enter_sleep_mode(dsi); + if (ret) { + dev_err(nt->dev, "failed to enter sleep mode (%d)\n", ret); + return ret; + } + msleep(85); + + nt35560_power_off(nt); + + return 0; +} + + +static int nt35560_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct nt35560 *nt = panel_to_nt35560(panel); + const struct nt35560_config *conf = nt->conf; + struct drm_display_mode *mode; + + if (nt->video_mode) + mode = drm_mode_duplicate(connector->dev, + conf->vid_mode); + else + mode = drm_mode_duplicate(connector->dev, + conf->cmd_mode); + if (!mode) { + dev_err(panel->dev, "bad mode or failed to add mode\n"); + return -EINVAL; + } + drm_mode_set_name(mode); + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; + + drm_mode_probed_add(connector, mode); + + return 1; /* Number of modes */ +} + +static const struct drm_panel_funcs nt35560_drm_funcs = { + .unprepare = nt35560_unprepare, + .prepare = nt35560_prepare, + .get_modes = nt35560_get_modes, +}; + +static int nt35560_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct nt35560 *nt; + int ret; + + nt = devm_kzalloc(dev, sizeof(struct nt35560), GFP_KERNEL); + if (!nt) + return -ENOMEM; + nt->video_mode = of_property_read_bool(dev->of_node, + "enforce-video-mode"); + + mipi_dsi_set_drvdata(dsi, nt); + nt->dev = dev; + + nt->conf = of_device_get_match_data(dev); + if (!nt->conf) { + dev_err(dev, "missing device configuration\n"); + return -ENODEV; + } + + dsi->lanes = 2; + dsi->format = MIPI_DSI_FMT_RGB888; + /* + * FIXME: these come from the ST-Ericsson vendor driver for the + * HREF520 and seems to reflect limitations in the PLLs on that + * platform, if you have the datasheet, please cross-check the + * actual max rates. + */ + dsi->lp_rate = 19200000; + dsi->hs_rate = 420160000; + + if (nt->video_mode) + /* Burst mode using event for sync */ + dsi->mode_flags = + MIPI_DSI_MODE_VIDEO | + MIPI_DSI_MODE_VIDEO_BURST; + else + dsi->mode_flags = + MIPI_DSI_CLOCK_NON_CONTINUOUS; + + nt->supply = devm_regulator_get(dev, "vddi"); + if (IS_ERR(nt->supply)) + return PTR_ERR(nt->supply); + + /* This asserts RESET by default */ + nt->reset_gpio = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(nt->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(nt->reset_gpio), + "failed to request GPIO\n"); + + drm_panel_init(&nt->panel, dev, &nt35560_drm_funcs, + DRM_MODE_CONNECTOR_DSI); + + nt->panel.backlight = devm_backlight_device_register(dev, "nt35560", dev, nt, + &nt35560_bl_ops, &nt35560_bl_props); + if (IS_ERR(nt->panel.backlight)) + return dev_err_probe(dev, PTR_ERR(nt->panel.backlight), + "failed to register backlight device\n"); + + drm_panel_add(&nt->panel); + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + drm_panel_remove(&nt->panel); + return ret; + } + + return 0; +} + +static int nt35560_remove(struct mipi_dsi_device *dsi) +{ + struct nt35560 *nt = mipi_dsi_get_drvdata(dsi); + + mipi_dsi_detach(dsi); + drm_panel_remove(&nt->panel); + + return 0; +} + +static const struct of_device_id nt35560_of_match[] = { + { + .compatible = "sony,acx424akp", + .data = &sony_acx424akp_data, + }, + { + .compatible = "sony,acx424akm", + .data = &sony_acx424akm_data, + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, nt35560_of_match); + +static struct mipi_dsi_driver nt35560_driver = { + .probe = nt35560_probe, + .remove = nt35560_remove, + .driver = { + .name = "panel-novatek-nt35560", + .of_match_table = nt35560_of_match, + }, +}; +module_mipi_dsi_driver(nt35560_driver); + +MODULE_AUTHOR("Linus Wallei <linus.walleij@linaro.org>"); +MODULE_DESCRIPTION("MIPI-DSI Novatek NT35560 Panel Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c deleted file mode 100644 index 9536d56a94a5..000000000000 --- a/drivers/gpu/drm/panel/panel-sony-acx424akp.c +++ /dev/null @@ -1,490 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * MIPI-DSI Sony ACX424AKP panel driver. This is a 480x864 - * AMOLED panel with a command-only DSI interface. - * - * Copyright (C) Linaro Ltd. 2019 - * Author: Linus Walleij - * Based on code and know-how from Marcus Lorentzon - * Copyright (C) ST-Ericsson SA 2010 - */ -#include <linux/backlight.h> -#include <linux/delay.h> -#include <linux/gpio/consumer.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/regulator/consumer.h> - -#include <video/mipi_display.h> - -#include <drm/drm_mipi_dsi.h> -#include <drm/drm_modes.h> -#include <drm/drm_panel.h> - -#define ACX424_DCS_READ_ID1 0xDA -#define ACX424_DCS_READ_ID2 0xDB -#define ACX424_DCS_READ_ID3 0xDC -#define ACX424_DCS_SET_MDDI 0xAE - -/* - * Sony seems to use vendor ID 0x81 - */ -#define DISPLAY_SONY_ACX424AKP_ID1 0x811b -#define DISPLAY_SONY_ACX424AKP_ID2 0x811a -/* - * The third ID looks like a bug, vendor IDs begin at 0x80 - * and panel 00 ... seems like default values. - */ -#define DISPLAY_SONY_ACX424AKP_ID3 0x8000 - -struct acx424akp { - struct drm_panel panel; - struct device *dev; - struct regulator *supply; - struct gpio_desc *reset_gpio; - bool video_mode; -}; - -static const struct drm_display_mode sony_acx424akp_vid_mode = { - .clock = 27234, - .hdisplay = 480, - .hsync_start = 480 + 15, - .hsync_end = 480 + 15 + 0, - .htotal = 480 + 15 + 0 + 15, - .vdisplay = 864, - .vsync_start = 864 + 14, - .vsync_end = 864 + 14 + 1, - .vtotal = 864 + 14 + 1 + 11, - .width_mm = 48, - .height_mm = 84, - .flags = DRM_MODE_FLAG_PVSYNC, -}; - -/* - * The timings are not very helpful as the display is used in - * command mode using the maximum HS frequency. - */ -static const struct drm_display_mode sony_acx424akp_cmd_mode = { - .clock = 35478, - .hdisplay = 480, - .hsync_start = 480 + 154, - .hsync_end = 480 + 154 + 16, - .htotal = 480 + 154 + 16 + 32, - .vdisplay = 864, - .vsync_start = 864 + 1, - .vsync_end = 864 + 1 + 1, - .vtotal = 864 + 1 + 1 + 1, - /* - * Some desired refresh rate, experiments at the maximum "pixel" - * clock speed (HS clock 420 MHz) yields around 117Hz. - */ - .width_mm = 48, - .height_mm = 84, -}; - -static inline struct acx424akp *panel_to_acx424akp(struct drm_panel *panel) -{ - return container_of(panel, struct acx424akp, panel); -} - -#define FOSC 20 /* 20Mhz */ -#define SCALE_FACTOR_NS_DIV_MHZ 1000 - -static int acx424akp_set_brightness(struct backlight_device *bl) -{ - struct acx424akp *acx = bl_get_data(bl); - struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev); - int period_ns = 1023; - int duty_ns = bl->props.brightness; - u8 pwm_ratio; - u8 pwm_div; - u8 par; - int ret; - - if (backlight_is_blank(bl)) { - /* Disable backlight */ - par = 0x00; - ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, - &par, 1); - if (ret) { - dev_err(acx->dev, "failed to disable display backlight (%d)\n", ret); - return ret; - } - return 0; - } - - /* Calculate the PWM duty cycle in n/256's */ - pwm_ratio = max(((duty_ns * 256) / period_ns) - 1, 1); - pwm_div = max(1, - ((FOSC * period_ns) / 256) / - SCALE_FACTOR_NS_DIV_MHZ); - - /* Set up PWM dutycycle ONE byte (differs from the standard) */ - dev_dbg(acx->dev, "calculated duty cycle %02x\n", pwm_ratio); - ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, - &pwm_ratio, 1); - if (ret < 0) { - dev_err(acx->dev, "failed to set display PWM ratio (%d)\n", ret); - return ret; - } - - /* - * Sequence to write PWMDIV: - * address data - * 0xF3 0xAA CMD2 Unlock - * 0x00 0x01 Enter CMD2 page 0 - * 0X7D 0x01 No reload MTP of CMD2 P1 - * 0x22 PWMDIV - * 0x7F 0xAA CMD2 page 1 lock - */ - par = 0xaa; - ret = mipi_dsi_dcs_write(dsi, 0xf3, &par, 1); - if (ret < 0) { - dev_err(acx->dev, "failed to unlock CMD 2 (%d)\n", ret); - return ret; - } - par = 0x01; - ret = mipi_dsi_dcs_write(dsi, 0x00, &par, 1); - if (ret < 0) { - dev_err(acx->dev, "failed to enter page 1 (%d)\n", ret); - return ret; - } - par = 0x01; - ret = mipi_dsi_dcs_write(dsi, 0x7d, &par, 1); - if (ret < 0) { - dev_err(acx->dev, "failed to disable MTP reload (%d)\n", ret); - return ret; - } - ret = mipi_dsi_dcs_write(dsi, 0x22, &pwm_div, 1); - if (ret < 0) { - dev_err(acx->dev, "failed to set PWM divisor (%d)\n", ret); - return ret; - } - par = 0xaa; - ret = mipi_dsi_dcs_write(dsi, 0x7f, &par, 1); - if (ret < 0) { - dev_err(acx->dev, "failed to lock CMD 2 (%d)\n", ret); - return ret; - } - - /* Enable backlight */ - par = 0x24; - ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, - &par, 1); - if (ret < 0) { - dev_err(acx->dev, "failed to enable display backlight (%d)\n", ret); - return ret; - } - - return 0; -} - -static const struct backlight_ops acx424akp_bl_ops = { - .update_status = acx424akp_set_brightness, -}; - -static const struct backlight_properties acx424akp_bl_props = { - .type = BACKLIGHT_RAW, - .brightness = 512, - .max_brightness = 1023, -}; - -static int acx424akp_read_id(struct acx424akp *acx) -{ - struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev); - u8 vendor, version, panel; - u16 val; - int ret; - - ret = mipi_dsi_dcs_read(dsi, ACX424_DCS_READ_ID1, &vendor, 1); - if (ret < 0) { - dev_err(acx->dev, "could not vendor ID byte\n"); - return ret; - } - ret = mipi_dsi_dcs_read(dsi, ACX424_DCS_READ_ID2, &version, 1); - if (ret < 0) { - dev_err(acx->dev, "could not read device version byte\n"); - return ret; - } - ret = mipi_dsi_dcs_read(dsi, ACX424_DCS_READ_ID3, &panel, 1); - if (ret < 0) { - dev_err(acx->dev, "could not read panel ID byte\n"); - return ret; - } - - if (vendor == 0x00) { - dev_err(acx->dev, "device vendor ID is zero\n"); - return -ENODEV; - } - - val = (vendor << 8) | panel; - switch (val) { - case DISPLAY_SONY_ACX424AKP_ID1: - case DISPLAY_SONY_ACX424AKP_ID2: - case DISPLAY_SONY_ACX424AKP_ID3: - dev_info(acx->dev, "MTP vendor: %02x, version: %02x, panel: %02x\n", - vendor, version, panel); - break; - default: - dev_info(acx->dev, "unknown vendor: %02x, version: %02x, panel: %02x\n", - vendor, version, panel); - break; - } - - return 0; -} - -static int acx424akp_power_on(struct acx424akp *acx) -{ - int ret; - - ret = regulator_enable(acx->supply); - if (ret) { - dev_err(acx->dev, "failed to enable supply (%d)\n", ret); - return ret; - } - - /* Assert RESET */ - gpiod_set_value_cansleep(acx->reset_gpio, 1); - udelay(20); - /* De-assert RESET */ - gpiod_set_value_cansleep(acx->reset_gpio, 0); - usleep_range(11000, 20000); - - return 0; -} - -static void acx424akp_power_off(struct acx424akp *acx) -{ - /* Assert RESET */ - gpiod_set_value_cansleep(acx->reset_gpio, 1); - usleep_range(11000, 20000); - - regulator_disable(acx->supply); -} - -static int acx424akp_prepare(struct drm_panel *panel) -{ - struct acx424akp *acx = panel_to_acx424akp(panel); - struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev); - const u8 mddi = 3; - int ret; - - ret = acx424akp_power_on(acx); - if (ret) - return ret; - - ret = acx424akp_read_id(acx); - if (ret) { - dev_err(acx->dev, "failed to read panel ID (%d)\n", ret); - goto err_power_off; - } - - /* Enabe tearing mode: send TE (tearing effect) at VBLANK */ - ret = mipi_dsi_dcs_set_tear_on(dsi, - MIPI_DSI_DCS_TEAR_MODE_VBLANK); - if (ret) { - dev_err(acx->dev, "failed to enable vblank TE (%d)\n", ret); - goto err_power_off; - } - - /* - * Set MDDI - * - * This presumably deactivates the Qualcomm MDDI interface and - * selects DSI, similar code is found in other drivers such as the - * Sharp LS043T1LE01 which makes us suspect that this panel may be - * using a Novatek NT35565 or similar display driver chip that shares - * this command. Due to the lack of documentation we cannot know for - * sure. - */ - ret = mipi_dsi_dcs_write(dsi, ACX424_DCS_SET_MDDI, - &mddi, sizeof(mddi)); - if (ret < 0) { - dev_err(acx->dev, "failed to set MDDI (%d)\n", ret); - goto err_power_off; - } - - /* Exit sleep mode */ - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret) { - dev_err(acx->dev, "failed to exit sleep mode (%d)\n", ret); - goto err_power_off; - } - msleep(140); - - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret) { - dev_err(acx->dev, "failed to turn display on (%d)\n", ret); - goto err_power_off; - } - if (acx->video_mode) { - /* In video mode turn peripheral on */ - ret = mipi_dsi_turn_on_peripheral(dsi); - if (ret) { - dev_err(acx->dev, "failed to turn on peripheral\n"); - goto err_power_off; - } - } - - return 0; - -err_power_off: - acx424akp_power_off(acx); - return ret; -} - -static int acx424akp_unprepare(struct drm_panel *panel) -{ - struct acx424akp *acx = panel_to_acx424akp(panel); - struct mipi_dsi_device *dsi = to_mipi_dsi_device(acx->dev); - int ret; - - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret) { - dev_err(acx->dev, "failed to turn display off (%d)\n", ret); - return ret; - } - - /* Enter sleep mode */ - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret) { - dev_err(acx->dev, "failed to enter sleep mode (%d)\n", ret); - return ret; - } - msleep(85); - - acx424akp_power_off(acx); - - return 0; -} - - -static int acx424akp_get_modes(struct drm_panel *panel, - struct drm_connector *connector) -{ - struct acx424akp *acx = panel_to_acx424akp(panel); - struct drm_display_mode *mode; - - if (acx->video_mode) - mode = drm_mode_duplicate(connector->dev, - &sony_acx424akp_vid_mode); - else - mode = drm_mode_duplicate(connector->dev, - &sony_acx424akp_cmd_mode); - if (!mode) { - dev_err(panel->dev, "bad mode or failed to add mode\n"); - return -EINVAL; - } - drm_mode_set_name(mode); - mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - - connector->display_info.width_mm = mode->width_mm; - connector->display_info.height_mm = mode->height_mm; - - drm_mode_probed_add(connector, mode); - - return 1; /* Number of modes */ -} - -static const struct drm_panel_funcs acx424akp_drm_funcs = { - .unprepare = acx424akp_unprepare, - .prepare = acx424akp_prepare, - .get_modes = acx424akp_get_modes, -}; - -static int acx424akp_probe(struct mipi_dsi_device *dsi) -{ - struct device *dev = &dsi->dev; - struct acx424akp *acx; - int ret; - - acx = devm_kzalloc(dev, sizeof(struct acx424akp), GFP_KERNEL); - if (!acx) - return -ENOMEM; - acx->video_mode = of_property_read_bool(dev->of_node, - "enforce-video-mode"); - - mipi_dsi_set_drvdata(dsi, acx); - acx->dev = dev; - - dsi->lanes = 2; - dsi->format = MIPI_DSI_FMT_RGB888; - /* - * FIXME: these come from the ST-Ericsson vendor driver for the - * HREF520 and seems to reflect limitations in the PLLs on that - * platform, if you have the datasheet, please cross-check the - * actual max rates. - */ - dsi->lp_rate = 19200000; - dsi->hs_rate = 420160000; - - if (acx->video_mode) - /* Burst mode using event for sync */ - dsi->mode_flags = - MIPI_DSI_MODE_VIDEO | - MIPI_DSI_MODE_VIDEO_BURST; - else - dsi->mode_flags = - MIPI_DSI_CLOCK_NON_CONTINUOUS; - - acx->supply = devm_regulator_get(dev, "vddi"); - if (IS_ERR(acx->supply)) - return PTR_ERR(acx->supply); - - /* This asserts RESET by default */ - acx->reset_gpio = devm_gpiod_get_optional(dev, "reset", - GPIOD_OUT_HIGH); - if (IS_ERR(acx->reset_gpio)) - return dev_err_probe(dev, PTR_ERR(acx->reset_gpio), - "failed to request GPIO\n"); - - drm_panel_init(&acx->panel, dev, &acx424akp_drm_funcs, - DRM_MODE_CONNECTOR_DSI); - - acx->panel.backlight = devm_backlight_device_register(dev, "acx424akp", dev, acx, - &acx424akp_bl_ops, &acx424akp_bl_props); - if (IS_ERR(acx->panel.backlight)) - return dev_err_probe(dev, PTR_ERR(acx->panel.backlight), - "failed to register backlight device\n"); - - drm_panel_add(&acx->panel); - - ret = mipi_dsi_attach(dsi); - if (ret < 0) { - drm_panel_remove(&acx->panel); - return ret; - } - - return 0; -} - -static int acx424akp_remove(struct mipi_dsi_device *dsi) -{ - struct acx424akp *acx = mipi_dsi_get_drvdata(dsi); - - mipi_dsi_detach(dsi); - drm_panel_remove(&acx->panel); - - return 0; -} - -static const struct of_device_id acx424akp_of_match[] = { - { .compatible = "sony,acx424akp" }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, acx424akp_of_match); - -static struct mipi_dsi_driver acx424akp_driver = { - .probe = acx424akp_probe, - .remove = acx424akp_remove, - .driver = { - .name = "panel-sony-acx424akp", - .of_match_table = acx424akp_of_match, - }, -}; -module_mipi_dsi_driver(acx424akp_driver); - -MODULE_AUTHOR("Linus Wallei <linus.walleij@linaro.org>"); -MODULE_DESCRIPTION("MIPI-DSI Sony acx424akp Panel Driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panfrost/panfrost_features.h b/drivers/gpu/drm/panfrost/panfrost_features.h index 34f2bae1ec8c..36fadcf9634e 100644 --- a/drivers/gpu/drm/panfrost/panfrost_features.h +++ b/drivers/gpu/drm/panfrost/panfrost_features.h @@ -20,6 +20,7 @@ enum panfrost_hw_feature { HW_FEATURE_AARCH64_MMU, HW_FEATURE_TLS_HASHING, HW_FEATURE_THREAD_GROUP_SPLIT, + HW_FEATURE_IDVS_GROUP_SIZE, HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG, }; @@ -74,6 +75,7 @@ enum panfrost_hw_feature { BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \ BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \ + BIT_ULL(HW_FEATURE_IDVS_GROUP_SIZE) | \ BIT_ULL(HW_FEATURE_COHERENCY_REG)) #define hw_features_g76 (\ @@ -87,6 +89,7 @@ enum panfrost_hw_feature { BIT_ULL(HW_FEATURE_COHERENCY_REG) | \ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \ BIT_ULL(HW_FEATURE_TLS_HASHING) | \ + BIT_ULL(HW_FEATURE_IDVS_GROUP_SIZE) | \ BIT_ULL(HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG)) #define hw_features_g31 (\ diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index ead65f5fa2bc..293e799e2fe8 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -206,6 +206,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = { .vmap = drm_gem_shmem_object_vmap, .vunmap = drm_gem_shmem_object_vunmap, .mmap = drm_gem_shmem_object_mmap, + .vm_ops = &drm_gem_shmem_vm_ops, }; /** diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index 15cec831a99a..aa89926742fd 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -145,6 +145,9 @@ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev) quirks |= (COHERENCY_ACE_LITE | COHERENCY_ACE) << JM_FORCE_COHERENCY_FEATURES_SHIFT; + if (panfrost_has_hw_feature(pfdev, HW_FEATURE_IDVS_GROUP_SIZE)) + quirks |= JM_DEFAULT_IDVS_GROUP_SIZE << JM_IDVS_GROUP_SIZE_SHIFT; + if (quirks) gpu_write(pfdev, GPU_JM_CONFIG, quirks); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index 908d79520853..a6925dbb6224 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -812,7 +812,7 @@ int panfrost_job_init(struct panfrost_device *pfdev) nentries, 0, msecs_to_jiffies(JOB_TIMEOUT_MS), pfdev->reset.wq, - NULL, "pan_js"); + NULL, "pan_js", pfdev->dev); if (ret) { dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret); goto err_sched; diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h index 6c5a11ef1ee8..16e776cc82ea 100644 --- a/drivers/gpu/drm/panfrost/panfrost_regs.h +++ b/drivers/gpu/drm/panfrost/panfrost_regs.h @@ -208,6 +208,7 @@ #define JM_MAX_JOB_THROTTLE_LIMIT 0x3F #define JM_FORCE_COHERENCY_FEATURES_SHIFT 2 #define JM_IDVS_GROUP_SIZE_SHIFT 16 +#define JM_DEFAULT_IDVS_GROUP_SIZE 0xF #define JM_MAX_IDVS_GROUP_SIZE 0x3F diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index e4b16421500b..1cb6f0c224bb 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -39,6 +39,7 @@ #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_gem_ttm_helper.h> +#include <drm/drm_module.h> #include <drm/drm_modeset_helper.h> #include <drm/drm_prime.h> #include <drm/drm_probe_helper.h> @@ -269,6 +270,16 @@ static struct pci_driver qxl_pci_driver = { .driver.pm = &qxl_pm_ops, }; +static const struct drm_ioctl_desc qxl_ioctls[] = { + DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl, DRM_AUTH), +}; + static struct drm_driver qxl_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, @@ -282,6 +293,7 @@ static struct drm_driver qxl_driver = { .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table, .fops = &qxl_fops, .ioctls = qxl_ioctls, + .num_ioctls = ARRAY_SIZE(qxl_ioctls), .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, @@ -292,24 +304,7 @@ static struct drm_driver qxl_driver = { .release = qxl_drm_release, }; -static int __init qxl_init(void) -{ - if (drm_firmware_drivers_only() && qxl_modeset == -1) - return -EINVAL; - - if (qxl_modeset == 0) - return -EINVAL; - qxl_driver.num_ioctls = qxl_max_ioctls; - return pci_register_driver(&qxl_pci_driver); -} - -static void __exit qxl_exit(void) -{ - pci_unregister_driver(&qxl_pci_driver); -} - -module_init(qxl_init); -module_exit(qxl_exit); +drm_module_pci_driver_if_modeset(qxl_pci_driver, qxl_modeset); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 9796099ff18f..47c169673088 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -65,7 +65,6 @@ struct iosys_map; #define QXL_DEBUGFS_MAX_COMPONENTS 32 extern int qxl_num_crtc; -extern int qxl_max_ioctls; #define QXL_INTERRUPT_MASK (\ QXL_INTERRUPT_DISPLAY |\ @@ -261,9 +260,6 @@ struct qxl_device { int qxl_debugfs_fence_init(struct qxl_device *rdev); -extern const struct drm_ioctl_desc qxl_ioctls[]; -extern int qxl_max_ioctl; - int qxl_device_init(struct qxl_device *qdev, struct pci_dev *pdev); void qxl_device_fini(struct qxl_device *qdev); @@ -457,4 +453,13 @@ struct qxl_drv_surface * qxl_surface_lookup(struct drm_device *dev, int surface_id); void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing); +/* qxl_ioctl.c */ +int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int qxl_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int qxl_update_area_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int qxl_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int qxl_clientcap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *file); + #endif diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 38aabcbe2238..30f58b21372a 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -33,8 +33,7 @@ * TODO: allocating a new gem(in qxl_bo) for each request. * This is wasteful since bo's are page aligned. */ -static int qxl_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_alloc *qxl_alloc = data; @@ -61,8 +60,7 @@ static int qxl_alloc_ioctl(struct drm_device *dev, void *data, return 0; } -static int qxl_map_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int qxl_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_map *qxl_map = data; @@ -272,8 +270,7 @@ out_free_reloc: return ret; } -static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_execbuffer *execbuffer = data; @@ -297,8 +294,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, return 0; } -static int qxl_update_area_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) +int qxl_update_area_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_update_area *update_area = data; @@ -347,8 +343,7 @@ out: return ret; } -static int qxl_getparam_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int qxl_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_getparam *param = data; @@ -366,8 +361,7 @@ static int qxl_getparam_ioctl(struct drm_device *dev, void *data, return 0; } -static int qxl_clientcap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int qxl_clientcap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = to_qxl(dev); struct pci_dev *pdev = to_pci_dev(dev->dev); @@ -388,8 +382,7 @@ static int qxl_clientcap_ioctl(struct drm_device *dev, void *data, return -ENOSYS; } -static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) +int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_alloc_surf *param = data; @@ -422,23 +415,3 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, param->handle = handle; return ret; } - -const struct drm_ioctl_desc qxl_ioctls[] = { - DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH), - - DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH), - - DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl, - DRM_AUTH), - DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl, - DRM_AUTH), - DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl, - DRM_AUTH), - DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl, - DRM_AUTH), - - DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl, - DRM_AUTH), -}; - -int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls); diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index f15b20da5315..c1bbfbe28bda 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -25,6 +25,7 @@ #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/string_helpers.h> #include <asm/unaligned.h> @@ -722,7 +723,7 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) break; } if (arg != ATOM_COND_ALWAYS) - SDEBUG(" taken: %s\n", execute ? "yes" : "no"); + SDEBUG(" taken: %s\n", str_yes_no(execute)); SDEBUG(" target: 0x%04X\n", target); if (execute) { if (ctx->last_jump == (ctx->start + target)) { diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 895776c421d4..08f83bf2c330 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2462,8 +2462,6 @@ struct radeon_device { struct radeon_vm_manager vm_manager; struct mutex gpu_clock_mutex; /* memory stats */ - atomic64_t vram_usage; - atomic64_t gtt_usage; atomic64_t num_bytes_moved; atomic_t gpu_reset_counter; /* ACPI interface */ diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 573154268d43..b9a07677a71e 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1596,6 +1596,8 @@ int radeon_modeset_init(struct radeon_device *rdev) rdev->ddev->mode_config.preferred_depth = 24; rdev->ddev->mode_config.prefer_shadow = 1; + rdev->ddev->mode_config.fb_modifiers_not_supported = true; + rdev->ddev->mode_config.fb_base = rdev->mc.aper_base; ret = radeon_modeset_create_props(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 11ad210919c8..965161b8565b 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -241,6 +241,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) struct drm_radeon_info *info = data; struct radeon_mode_info *minfo = &rdev->mode_info; uint32_t *value, value_tmp, *value_ptr, value_size; + struct ttm_resource_manager *man; uint64_t value64; struct drm_crtc *crtc; int i, found; @@ -550,12 +551,14 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) case RADEON_INFO_VRAM_USAGE: value = (uint32_t*)&value64; value_size = sizeof(uint64_t); - value64 = atomic64_read(&rdev->vram_usage); + man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM); + value64 = ttm_resource_manager_usage(man); break; case RADEON_INFO_GTT_USAGE: value = (uint32_t*)&value64; value_size = sizeof(uint64_t); - value64 = atomic64_read(&rdev->gtt_usage); + man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_TT); + value64 = ttm_resource_manager_usage(man); break; case RADEON_INFO_ACTIVE_CU_COUNT: if (rdev->family >= CHIP_BONAIRE) diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 87536d205593..91a72cd14304 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -49,27 +49,6 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); * function are calling it. */ -static void radeon_update_memory_usage(struct ttm_buffer_object *bo, - unsigned int mem_type, int sign) -{ - struct radeon_device *rdev = radeon_get_rdev(bo->bdev); - - switch (mem_type) { - case TTM_PL_TT: - if (sign > 0) - atomic64_add(bo->base.size, &rdev->gtt_usage); - else - atomic64_sub(bo->base.size, &rdev->gtt_usage); - break; - case TTM_PL_VRAM: - if (sign > 0) - atomic64_add(bo->base.size, &rdev->vram_usage); - else - atomic64_sub(bo->base.size, &rdev->vram_usage); - break; - } -} - static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) { struct radeon_bo *bo; @@ -434,7 +413,9 @@ void radeon_bo_fini(struct radeon_device *rdev) static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) { u64 real_vram_size = rdev->mc.real_vram_size; - u64 vram_usage = atomic64_read(&rdev->vram_usage); + struct ttm_resource_manager *man = + ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM); + u64 vram_usage = ttm_resource_manager_usage(man); /* This function is based on the current VRAM usage. * @@ -723,16 +704,10 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, return radeon_bo_get_surface_reg(bo); } -void radeon_bo_move_notify(struct ttm_buffer_object *bo, - unsigned int old_type, - struct ttm_resource *new_mem) +void radeon_bo_move_notify(struct ttm_buffer_object *bo) { struct radeon_bo *rbo; - radeon_update_memory_usage(bo, old_type, -1); - if (new_mem) - radeon_update_memory_usage(bo, new_mem->mem_type, 1); - if (!radeon_ttm_bo_is_radeon_bo(bo)) return; diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 1afc7992ef91..0a6ef49e990a 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -160,9 +160,7 @@ extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo, u32 *tiling_flags, u32 *pitch); extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, bool force_drop); -extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, - unsigned int old_type, - struct ttm_resource *new_mem); +extern void radeon_bo_move_notify(struct ttm_buffer_object *bo); extern vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 0d1283cdc8fb..44594d16611f 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -199,7 +199,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *old_mem = bo->resource; struct radeon_device *rdev; struct radeon_bo *rbo; - int r, old_type; + int r; if (new_mem->mem_type == TTM_PL_TT) { r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem); @@ -216,9 +216,6 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, if (WARN_ON_ONCE(rbo->tbo.pin_count > 0)) return -EINVAL; - /* Save old type for statistics update */ - old_type = old_mem->mem_type; - rdev = radeon_get_rdev(bo->bdev); if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { ttm_bo_move_null(bo, new_mem); @@ -264,7 +261,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, out: /* update statistics */ atomic64_add(bo->base.size, &rdev->num_bytes_moved); - radeon_bo_move_notify(bo, old_type, new_mem); + radeon_bo_move_notify(bo); return 0; } @@ -679,16 +676,6 @@ bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev, return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY); } -static void -radeon_bo_delete_mem_notify(struct ttm_buffer_object *bo) -{ - unsigned int old_type = TTM_PL_SYSTEM; - - if (bo->resource) - old_type = bo->resource->mem_type; - radeon_bo_move_notify(bo, old_type, NULL); -} - static struct ttm_device_funcs radeon_bo_driver = { .ttm_tt_create = &radeon_ttm_tt_create, .ttm_tt_populate = &radeon_ttm_tt_populate, @@ -697,7 +684,6 @@ static struct ttm_device_funcs radeon_bo_driver = { .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = &radeon_evict_flags, .move = &radeon_bo_move, - .delete_mem_notify = &radeon_bo_delete_mem_notify, .io_mem_reserve = &radeon_ttm_io_mem_reserve, }; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 5a8131ef81d5..982e450233ed 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -701,6 +701,9 @@ static struct platform_driver rcar_du_platform_driver = { static int __init rcar_du_init(void) { + if (drm_firmware_drivers_only()) + return -ENODEV; + rcar_du_of_init(rcar_du_of_table); return platform_driver_register(&rcar_du_platform_driver); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index bec207de4544..ac190e2b1f7a 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -457,6 +457,9 @@ static int __init rockchip_drm_init(void) { int ret; + if (drm_firmware_drivers_only()) + return -ENODEV; + num_rockchip_sub_drivers = 0; ADD_ROCKCHIP_SUB_DRIVER(vop_platform_driver, CONFIG_DRM_ROCKCHIP); ADD_ROCKCHIP_SUB_DRIVER(rockchip_lvds_driver, diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index f91fb31ab7a7..b81fceb0b8a2 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -491,7 +491,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) if (r == -ENOENT) drm_sched_job_done(s_job); else if (r) - DRM_ERROR("fence add callback failed (%d)\n", + DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r); } else drm_sched_job_done(s_job); @@ -957,7 +957,7 @@ static int drm_sched_main(void *param) if (r == -ENOENT) drm_sched_job_done(sched_job); else if (r) - DRM_ERROR("fence add callback failed (%d)\n", + DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r); dma_fence_put(fence); } else { @@ -991,7 +991,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, unsigned hw_submission, unsigned hang_limit, long timeout, struct workqueue_struct *timeout_wq, - atomic_t *score, const char *name) + atomic_t *score, const char *name, struct device *dev) { int i, ret; sched->ops = ops; @@ -1001,6 +1001,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, sched->timeout_wq = timeout_wq ? : system_wq; sched->hang_limit = hang_limit; sched->score = score ? score : &sched->_score; + sched->dev = dev; for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++) drm_sched_rq_init(sched, &sched->sched_rq[i]); @@ -1018,7 +1019,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, if (IS_ERR(sched->thread)) { ret = PTR_ERR(sched->thread); sched->thread = NULL; - DRM_ERROR("Failed to create scheduler for %s.\n", name); + DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name); return ret; } diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile index 0856e4b12f70..5ba5f9138c95 100644 --- a/drivers/gpu/drm/selftests/Makefile +++ b/drivers/gpu/drm/selftests/Makefile @@ -4,4 +4,5 @@ test-drm_modeset-y := test-drm_modeset_common.o test-drm_plane_helper.o \ test-drm_damage_helper.o test-drm_dp_mst_helper.o \ test-drm_rect.o -obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm_modeset.o test-drm_cmdline_parser.o +obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm_modeset.o test-drm_cmdline_parser.o \ + test-drm_buddy.o diff --git a/drivers/gpu/drm/selftests/drm_buddy_selftests.h b/drivers/gpu/drm/selftests/drm_buddy_selftests.h new file mode 100644 index 000000000000..455b756c4ae5 --- /dev/null +++ b/drivers/gpu/drm/selftests/drm_buddy_selftests.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* List each unit test as selftest(name, function) + * + * The name is used as both an enum and expanded as igt__name to create + * a module parameter. It must be unique and legal for a C identifier. + * + * Tests are executed in order by igt/drm_buddy + */ +selftest(sanitycheck, igt_sanitycheck) /* keep first (selfcheck for igt) */ +selftest(buddy_alloc_limit, igt_buddy_alloc_limit) +selftest(buddy_alloc_range, igt_buddy_alloc_range) +selftest(buddy_alloc_optimistic, igt_buddy_alloc_optimistic) +selftest(buddy_alloc_pessimistic, igt_buddy_alloc_pessimistic) +selftest(buddy_alloc_smoke, igt_buddy_alloc_smoke) +selftest(buddy_alloc_pathological, igt_buddy_alloc_pathological) diff --git a/drivers/gpu/drm/selftests/test-drm_buddy.c b/drivers/gpu/drm/selftests/test-drm_buddy.c new file mode 100644 index 000000000000..fa997f89522b --- /dev/null +++ b/drivers/gpu/drm/selftests/test-drm_buddy.c @@ -0,0 +1,992 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#define pr_fmt(fmt) "drm_buddy: " fmt + +#include <linux/module.h> +#include <linux/prime_numbers.h> +#include <linux/sched/signal.h> + +#include <drm/drm_buddy.h> + +#include "../lib/drm_random.h" + +#define TESTS "drm_buddy_selftests.h" +#include "drm_selftest.h" + +#define IGT_TIMEOUT(name__) \ + unsigned long name__ = jiffies + MAX_SCHEDULE_TIMEOUT + +static unsigned int random_seed; + +static inline u64 get_size(int order, u64 chunk_size) +{ + return (1 << order) * chunk_size; +} + +__printf(2, 3) +static bool __igt_timeout(unsigned long timeout, const char *fmt, ...) +{ + va_list va; + + if (!signal_pending(current)) { + cond_resched(); + if (time_before(jiffies, timeout)) + return false; + } + + if (fmt) { + va_start(va, fmt); + vprintk(fmt, va); + va_end(va); + } + + return true; +} + +static inline const char *yesno(bool v) +{ + return v ? "yes" : "no"; +} + +static void __igt_dump_block(struct drm_buddy *mm, + struct drm_buddy_block *block, + bool buddy) +{ + pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n", + block->header, + drm_buddy_block_state(block), + drm_buddy_block_order(block), + drm_buddy_block_offset(block), + drm_buddy_block_size(mm, block), + yesno(!block->parent), + yesno(buddy)); +} + +static void igt_dump_block(struct drm_buddy *mm, + struct drm_buddy_block *block) +{ + struct drm_buddy_block *buddy; + + __igt_dump_block(mm, block, false); + + buddy = drm_get_buddy(block); + if (buddy) + __igt_dump_block(mm, buddy, true); +} + +static int igt_check_block(struct drm_buddy *mm, + struct drm_buddy_block *block) +{ + struct drm_buddy_block *buddy; + unsigned int block_state; + u64 block_size; + u64 offset; + int err = 0; + + block_state = drm_buddy_block_state(block); + + if (block_state != DRM_BUDDY_ALLOCATED && + block_state != DRM_BUDDY_FREE && + block_state != DRM_BUDDY_SPLIT) { + pr_err("block state mismatch\n"); + err = -EINVAL; + } + + block_size = drm_buddy_block_size(mm, block); + offset = drm_buddy_block_offset(block); + + if (block_size < mm->chunk_size) { + pr_err("block size smaller than min size\n"); + err = -EINVAL; + } + + if (!is_power_of_2(block_size)) { + pr_err("block size not power of two\n"); + err = -EINVAL; + } + + if (!IS_ALIGNED(block_size, mm->chunk_size)) { + pr_err("block size not aligned to min size\n"); + err = -EINVAL; + } + + if (!IS_ALIGNED(offset, mm->chunk_size)) { + pr_err("block offset not aligned to min size\n"); + err = -EINVAL; + } + + if (!IS_ALIGNED(offset, block_size)) { + pr_err("block offset not aligned to block size\n"); + err = -EINVAL; + } + + buddy = drm_get_buddy(block); + + if (!buddy && block->parent) { + pr_err("buddy has gone fishing\n"); + err = -EINVAL; + } + + if (buddy) { + if (drm_buddy_block_offset(buddy) != (offset ^ block_size)) { + pr_err("buddy has wrong offset\n"); + err = -EINVAL; + } + + if (drm_buddy_block_size(mm, buddy) != block_size) { + pr_err("buddy size mismatch\n"); + err = -EINVAL; + } + + if (drm_buddy_block_state(buddy) == block_state && + block_state == DRM_BUDDY_FREE) { + pr_err("block and its buddy are free\n"); + err = -EINVAL; + } + } + + return err; +} + +static int igt_check_blocks(struct drm_buddy *mm, + struct list_head *blocks, + u64 expected_size, + bool is_contiguous) +{ + struct drm_buddy_block *block; + struct drm_buddy_block *prev; + u64 total; + int err = 0; + + block = NULL; + prev = NULL; + total = 0; + + list_for_each_entry(block, blocks, link) { + err = igt_check_block(mm, block); + + if (!drm_buddy_block_is_allocated(block)) { + pr_err("block not allocated\n"), + err = -EINVAL; + } + + if (is_contiguous && prev) { + u64 prev_block_size; + u64 prev_offset; + u64 offset; + + prev_offset = drm_buddy_block_offset(prev); + prev_block_size = drm_buddy_block_size(mm, prev); + offset = drm_buddy_block_offset(block); + + if (offset != (prev_offset + prev_block_size)) { + pr_err("block offset mismatch\n"); + err = -EINVAL; + } + } + + if (err) + break; + + total += drm_buddy_block_size(mm, block); + prev = block; + } + + if (!err) { + if (total != expected_size) { + pr_err("size mismatch, expected=%llx, found=%llx\n", + expected_size, total); + err = -EINVAL; + } + return err; + } + + if (prev) { + pr_err("prev block, dump:\n"); + igt_dump_block(mm, prev); + } + + pr_err("bad block, dump:\n"); + igt_dump_block(mm, block); + + return err; +} + +static int igt_check_mm(struct drm_buddy *mm) +{ + struct drm_buddy_block *root; + struct drm_buddy_block *prev; + unsigned int i; + u64 total; + int err = 0; + + if (!mm->n_roots) { + pr_err("n_roots is zero\n"); + return -EINVAL; + } + + if (mm->n_roots != hweight64(mm->size)) { + pr_err("n_roots mismatch, n_roots=%u, expected=%lu\n", + mm->n_roots, hweight64(mm->size)); + return -EINVAL; + } + + root = NULL; + prev = NULL; + total = 0; + + for (i = 0; i < mm->n_roots; ++i) { + struct drm_buddy_block *block; + unsigned int order; + + root = mm->roots[i]; + if (!root) { + pr_err("root(%u) is NULL\n", i); + err = -EINVAL; + break; + } + + err = igt_check_block(mm, root); + + if (!drm_buddy_block_is_free(root)) { + pr_err("root not free\n"); + err = -EINVAL; + } + + order = drm_buddy_block_order(root); + + if (!i) { + if (order != mm->max_order) { + pr_err("max order root missing\n"); + err = -EINVAL; + } + } + + if (prev) { + u64 prev_block_size; + u64 prev_offset; + u64 offset; + + prev_offset = drm_buddy_block_offset(prev); + prev_block_size = drm_buddy_block_size(mm, prev); + offset = drm_buddy_block_offset(root); + + if (offset != (prev_offset + prev_block_size)) { + pr_err("root offset mismatch\n"); + err = -EINVAL; + } + } + + block = list_first_entry_or_null(&mm->free_list[order], + struct drm_buddy_block, + link); + if (block != root) { + pr_err("root mismatch at order=%u\n", order); + err = -EINVAL; + } + + if (err) + break; + + prev = root; + total += drm_buddy_block_size(mm, root); + } + + if (!err) { + if (total != mm->size) { + pr_err("expected mm size=%llx, found=%llx\n", mm->size, + total); + err = -EINVAL; + } + return err; + } + + if (prev) { + pr_err("prev root(%u), dump:\n", i - 1); + igt_dump_block(mm, prev); + } + + if (root) { + pr_err("bad root(%u), dump:\n", i); + igt_dump_block(mm, root); + } + + return err; +} + +static void igt_mm_config(u64 *size, u64 *chunk_size) +{ + DRM_RND_STATE(prng, random_seed); + u32 s, ms; + + /* Nothing fancy, just try to get an interesting bit pattern */ + + prandom_seed_state(&prng, random_seed); + + /* Let size be a random number of pages up to 8 GB (2M pages) */ + s = 1 + drm_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng); + /* Let the chunk size be a random power of 2 less than size */ + ms = BIT(drm_prandom_u32_max_state(ilog2(s), &prng)); + /* Round size down to the chunk size */ + s &= -ms; + + /* Convert from pages to bytes */ + *chunk_size = (u64)ms << 12; + *size = (u64)s << 12; +} + +static int igt_buddy_alloc_pathological(void *arg) +{ + u64 mm_size, size, min_page_size, start = 0; + struct drm_buddy_block *block; + const int max_order = 3; + unsigned long flags = 0; + int order, top, err; + struct drm_buddy mm; + LIST_HEAD(blocks); + LIST_HEAD(holes); + LIST_HEAD(tmp); + + /* + * Create a pot-sized mm, then allocate one of each possible + * order within. This should leave the mm with exactly one + * page left. Free the largest block, then whittle down again. + * Eventually we will have a fully 50% fragmented mm. + */ + + mm_size = PAGE_SIZE << max_order; + err = drm_buddy_init(&mm, mm_size, PAGE_SIZE); + if (err) { + pr_err("buddy_init failed(%d)\n", err); + return err; + } + BUG_ON(mm.max_order != max_order); + + for (top = max_order; top; top--) { + /* Make room by freeing the largest allocated block */ + block = list_first_entry_or_null(&blocks, typeof(*block), link); + if (block) { + list_del(&block->link); + drm_buddy_free_block(&mm, block); + } + + for (order = top; order--; ) { + size = min_page_size = get_size(order, PAGE_SIZE); + err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, + min_page_size, &tmp, flags); + if (err) { + pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n", + order, top); + goto err; + } + + block = list_first_entry_or_null(&tmp, + struct drm_buddy_block, + link); + if (!block) { + pr_err("alloc_blocks has no blocks\n"); + err = -EINVAL; + goto err; + } + + list_move_tail(&block->link, &blocks); + } + + /* There should be one final page for this sub-allocation */ + size = min_page_size = get_size(0, PAGE_SIZE); + err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags); + if (err) { + pr_info("buddy_alloc hit -ENOMEM for hole\n"); + goto err; + } + + block = list_first_entry_or_null(&tmp, + struct drm_buddy_block, + link); + if (!block) { + pr_err("alloc_blocks has no blocks\n"); + err = -EINVAL; + goto err; + } + + list_move_tail(&block->link, &holes); + + size = min_page_size = get_size(top, PAGE_SIZE); + err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags); + if (!err) { + pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!", + top, max_order); + block = list_first_entry_or_null(&tmp, + struct drm_buddy_block, + link); + if (!block) { + pr_err("alloc_blocks has no blocks\n"); + err = -EINVAL; + goto err; + } + + list_move_tail(&block->link, &blocks); + err = -EINVAL; + goto err; + } + } + + drm_buddy_free_list(&mm, &holes); + + /* Nothing larger than blocks of chunk_size now available */ + for (order = 1; order <= max_order; order++) { + size = min_page_size = get_size(order, PAGE_SIZE); + err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags); + if (!err) { + pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!", + order); + block = list_first_entry_or_null(&tmp, + struct drm_buddy_block, + link); + if (!block) { + pr_err("alloc_blocks has no blocks\n"); + err = -EINVAL; + goto err; + } + + list_move_tail(&block->link, &blocks); + err = -EINVAL; + goto err; + } + } + + if (err) + err = 0; + +err: + list_splice_tail(&holes, &blocks); + drm_buddy_free_list(&mm, &blocks); + drm_buddy_fini(&mm); + return err; +} + +static int igt_buddy_alloc_smoke(void *arg) +{ + u64 mm_size, min_page_size, chunk_size, start = 0; + unsigned long flags = 0; + struct drm_buddy mm; + int *order; + int err, i; + + DRM_RND_STATE(prng, random_seed); + IGT_TIMEOUT(end_time); + + igt_mm_config(&mm_size, &chunk_size); + + err = drm_buddy_init(&mm, mm_size, chunk_size); + if (err) { + pr_err("buddy_init failed(%d)\n", err); + return err; + } + + order = drm_random_order(mm.max_order + 1, &prng); + if (!order) + goto out_fini; + + for (i = 0; i <= mm.max_order; ++i) { + struct drm_buddy_block *block; + int max_order = order[i]; + bool timeout = false; + LIST_HEAD(blocks); + u64 total, size; + LIST_HEAD(tmp); + int order; + + err = igt_check_mm(&mm); + if (err) { + pr_err("pre-mm check failed, abort\n"); + break; + } + + order = max_order; + total = 0; + + do { +retry: + size = min_page_size = get_size(order, chunk_size); + err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, + min_page_size, &tmp, flags); + if (err) { + if (err == -ENOMEM) { + pr_info("buddy_alloc hit -ENOMEM with order=%d\n", + order); + } else { + if (order--) { + err = 0; + goto retry; + } + + pr_err("buddy_alloc with order=%d failed(%d)\n", + order, err); + } + + break; + } + + block = list_first_entry_or_null(&tmp, + struct drm_buddy_block, + link); + if (!block) { + pr_err("alloc_blocks has no blocks\n"); + err = -EINVAL; + break; + } + + list_move_tail(&block->link, &blocks); + + if (drm_buddy_block_order(block) != order) { + pr_err("buddy_alloc order mismatch\n"); + err = -EINVAL; + break; + } + + total += drm_buddy_block_size(&mm, block); + + if (__igt_timeout(end_time, NULL)) { + timeout = true; + break; + } + } while (total < mm.size); + + if (!err) + err = igt_check_blocks(&mm, &blocks, total, false); + + drm_buddy_free_list(&mm, &blocks); + + if (!err) { + err = igt_check_mm(&mm); + if (err) + pr_err("post-mm check failed\n"); + } + + if (err || timeout) + break; + + cond_resched(); + } + + if (err == -ENOMEM) + err = 0; + + kfree(order); +out_fini: + drm_buddy_fini(&mm); + + return err; +} + +static int igt_buddy_alloc_pessimistic(void *arg) +{ + u64 mm_size, size, min_page_size, start = 0; + struct drm_buddy_block *block, *bn; + const unsigned int max_order = 16; + unsigned long flags = 0; + struct drm_buddy mm; + unsigned int order; + LIST_HEAD(blocks); + LIST_HEAD(tmp); + int err; + + /* + * Create a pot-sized mm, then allocate one of each possible + * order within. This should leave the mm with exactly one + * page left. + */ + + mm_size = PAGE_SIZE << max_order; + err = drm_buddy_init(&mm, mm_size, PAGE_SIZE); + if (err) { + pr_err("buddy_init failed(%d)\n", err); + return err; + } + BUG_ON(mm.max_order != max_order); + + for (order = 0; order < max_order; order++) { + size = min_page_size = get_size(order, PAGE_SIZE); + err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags); + if (err) { + pr_info("buddy_alloc hit -ENOMEM with order=%d\n", + order); + goto err; + } + + block = list_first_entry_or_null(&tmp, + struct drm_buddy_block, + link); + if (!block) { + pr_err("alloc_blocks has no blocks\n"); + err = -EINVAL; + goto err; + } + + list_move_tail(&block->link, &blocks); + } + + /* And now the last remaining block available */ + size = min_page_size = get_size(0, PAGE_SIZE); + err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags); + if (err) { + pr_info("buddy_alloc hit -ENOMEM on final alloc\n"); + goto err; + } + + block = list_first_entry_or_null(&tmp, + struct drm_buddy_block, + link); + if (!block) { + pr_err("alloc_blocks has no blocks\n"); + err = -EINVAL; + goto err; + } + + list_move_tail(&block->link, &blocks); + + /* Should be completely full! */ + for (order = max_order; order--; ) { + size = min_page_size = get_size(order, PAGE_SIZE); + err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags); + if (!err) { + pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!", + order); + block = list_first_entry_or_null(&tmp, + struct drm_buddy_block, + link); + if (!block) { + pr_err("alloc_blocks has no blocks\n"); + err = -EINVAL; + goto err; + } + + list_move_tail(&block->link, &blocks); + err = -EINVAL; + goto err; + } + } + + block = list_last_entry(&blocks, typeof(*block), link); + list_del(&block->link); + drm_buddy_free_block(&mm, block); + + /* As we free in increasing size, we make available larger blocks */ + order = 1; + list_for_each_entry_safe(block, bn, &blocks, link) { + list_del(&block->link); + drm_buddy_free_block(&mm, block); + + size = min_page_size = get_size(order, PAGE_SIZE); + err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags); + if (err) { + pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n", + order); + goto err; + } + + block = list_first_entry_or_null(&tmp, + struct drm_buddy_block, + link); + if (!block) { + pr_err("alloc_blocks has no blocks\n"); + err = -EINVAL; + goto err; + } + + list_del(&block->link); + drm_buddy_free_block(&mm, block); + order++; + } + + /* To confirm, now the whole mm should be available */ + size = min_page_size = get_size(max_order, PAGE_SIZE); + err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags); + if (err) { + pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n", + max_order); + goto err; + } + + block = list_first_entry_or_null(&tmp, + struct drm_buddy_block, + link); + if (!block) { + pr_err("alloc_blocks has no blocks\n"); + err = -EINVAL; + goto err; + } + + list_del(&block->link); + drm_buddy_free_block(&mm, block); + +err: + drm_buddy_free_list(&mm, &blocks); + drm_buddy_fini(&mm); + return err; +} + +static int igt_buddy_alloc_optimistic(void *arg) +{ + u64 mm_size, size, min_page_size, start = 0; + struct drm_buddy_block *block; + unsigned long flags = 0; + const int max_order = 16; + struct drm_buddy mm; + LIST_HEAD(blocks); + LIST_HEAD(tmp); + int order, err; + + /* + * Create a mm with one block of each order available, and + * try to allocate them all. + */ + + mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1); + err = drm_buddy_init(&mm, + mm_size, + PAGE_SIZE); + if (err) { + pr_err("buddy_init failed(%d)\n", err); + return err; + } + + BUG_ON(mm.max_order != max_order); + + for (order = 0; order <= max_order; order++) { + size = min_page_size = get_size(order, PAGE_SIZE); + err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags); + if (err) { + pr_info("buddy_alloc hit -ENOMEM with order=%d\n", + order); + goto err; + } + + block = list_first_entry_or_null(&tmp, + struct drm_buddy_block, + link); + if (!block) { + pr_err("alloc_blocks has no blocks\n"); + err = -EINVAL; + goto err; + } + + list_move_tail(&block->link, &blocks); + } + + /* Should be completely full! */ + size = min_page_size = get_size(0, PAGE_SIZE); + err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags); + if (!err) { + pr_info("buddy_alloc unexpectedly succeeded, it should be full!"); + block = list_first_entry_or_null(&tmp, + struct drm_buddy_block, + link); + if (!block) { + pr_err("alloc_blocks has no blocks\n"); + err = -EINVAL; + goto err; + } + + list_move_tail(&block->link, &blocks); + err = -EINVAL; + goto err; + } else { + err = 0; + } + +err: + drm_buddy_free_list(&mm, &blocks); + drm_buddy_fini(&mm); + return err; +} + +static int igt_buddy_alloc_range(void *arg) +{ + unsigned long flags = DRM_BUDDY_RANGE_ALLOCATION; + u64 offset, size, rem, chunk_size, end; + unsigned long page_num; + struct drm_buddy mm; + LIST_HEAD(blocks); + int err; + + igt_mm_config(&size, &chunk_size); + + err = drm_buddy_init(&mm, size, chunk_size); + if (err) { + pr_err("buddy_init failed(%d)\n", err); + return err; + } + + err = igt_check_mm(&mm); + if (err) { + pr_err("pre-mm check failed, abort, abort, abort!\n"); + goto err_fini; + } + + rem = mm.size; + offset = 0; + + for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) { + struct drm_buddy_block *block; + LIST_HEAD(tmp); + + size = min(page_num * mm.chunk_size, rem); + end = offset + size; + + err = drm_buddy_alloc_blocks(&mm, offset, end, size, mm.chunk_size, &tmp, flags); + if (err) { + if (err == -ENOMEM) { + pr_info("alloc_range hit -ENOMEM with size=%llx\n", + size); + } else { + pr_err("alloc_range with offset=%llx, size=%llx failed(%d)\n", + offset, size, err); + } + + break; + } + + block = list_first_entry_or_null(&tmp, + struct drm_buddy_block, + link); + if (!block) { + pr_err("alloc_range has no blocks\n"); + err = -EINVAL; + break; + } + + if (drm_buddy_block_offset(block) != offset) { + pr_err("alloc_range start offset mismatch, found=%llx, expected=%llx\n", + drm_buddy_block_offset(block), offset); + err = -EINVAL; + } + + if (!err) + err = igt_check_blocks(&mm, &tmp, size, true); + + list_splice_tail(&tmp, &blocks); + + if (err) + break; + + offset += size; + + rem -= size; + if (!rem) + break; + + cond_resched(); + } + + if (err == -ENOMEM) + err = 0; + + drm_buddy_free_list(&mm, &blocks); + + if (!err) { + err = igt_check_mm(&mm); + if (err) + pr_err("post-mm check failed\n"); + } + +err_fini: + drm_buddy_fini(&mm); + + return err; +} + +static int igt_buddy_alloc_limit(void *arg) +{ + u64 end, size = U64_MAX, start = 0; + struct drm_buddy_block *block; + unsigned long flags = 0; + LIST_HEAD(allocated); + struct drm_buddy mm; + int err; + + size = end = round_down(size, 4096); + err = drm_buddy_init(&mm, size, PAGE_SIZE); + if (err) + return err; + + if (mm.max_order != DRM_BUDDY_MAX_ORDER) { + pr_err("mm.max_order(%d) != %d\n", + mm.max_order, DRM_BUDDY_MAX_ORDER); + err = -EINVAL; + goto out_fini; + } + + err = drm_buddy_alloc_blocks(&mm, start, end, size, + PAGE_SIZE, &allocated, flags); + + if (unlikely(err)) + goto out_free; + + block = list_first_entry_or_null(&allocated, + struct drm_buddy_block, + link); + + if (!block) { + err = -EINVAL; + goto out_fini; + } + + if (drm_buddy_block_order(block) != mm.max_order) { + pr_err("block order(%d) != %d\n", + drm_buddy_block_order(block), mm.max_order); + err = -EINVAL; + goto out_free; + } + + if (drm_buddy_block_size(&mm, block) != + BIT_ULL(mm.max_order) * PAGE_SIZE) { + pr_err("block size(%llu) != %llu\n", + drm_buddy_block_size(&mm, block), + BIT_ULL(mm.max_order) * PAGE_SIZE); + err = -EINVAL; + goto out_free; + } + +out_free: + drm_buddy_free_list(&mm, &allocated); +out_fini: + drm_buddy_fini(&mm); + return err; +} + +static int igt_sanitycheck(void *ignored) +{ + pr_info("%s - ok!\n", __func__); + return 0; +} + +#include "drm_selftest.c" + +static int __init test_drm_buddy_init(void) +{ + int err; + + while (!random_seed) + random_seed = get_random_int(); + + pr_info("Testing DRM buddy manager (struct drm_buddy), with random_seed=0x%x\n", + random_seed); + err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL); + + return err > 0 ? 0 : err; +} + +static void __exit test_drm_buddy_exit(void) +{ +} + +module_init(test_drm_buddy_init); +module_exit(test_drm_buddy_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/selftests/test-drm_framebuffer.c b/drivers/gpu/drm/selftests/test-drm_framebuffer.c index 61b44d3a6a61..f6d66285c5fc 100644 --- a/drivers/gpu/drm/selftests/test-drm_framebuffer.c +++ b/drivers/gpu/drm/selftests/test-drm_framebuffer.c @@ -323,7 +323,6 @@ static struct drm_device mock_drm_device = { .max_width = MAX_WIDTH, .min_height = MIN_HEIGHT, .max_height = MAX_HEIGHT, - .allow_fb_modifiers = true, .funcs = &mock_config_funcs, }, }; diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index 80078a9fd7f6..731cbad7520f 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -18,6 +18,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_cma_helper.h> +#include <drm/drm_module.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -295,7 +296,7 @@ static struct platform_driver shmob_drm_platform_driver = { }, }; -module_platform_driver(shmob_drm_platform_driver); +drm_module_platform_driver(shmob_drm_platform_driver); MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); MODULE_DESCRIPTION("Renesas SH Mobile DRM Driver"); diff --git a/drivers/gpu/drm/solomon/Kconfig b/drivers/gpu/drm/solomon/Kconfig new file mode 100644 index 000000000000..5861c3ab7c45 --- /dev/null +++ b/drivers/gpu/drm/solomon/Kconfig @@ -0,0 +1,21 @@ +config DRM_SSD130X + tristate "DRM support for Solomon SSD130x OLED displays" + depends on DRM + select BACKLIGHT_CLASS_DEVICE + select DRM_GEM_SHMEM_HELPER + select DRM_KMS_HELPER + help + DRM driver for the SSD1305, SSD1306, SSD1307 and SSD1309 Solomon + OLED controllers. This is only for the core driver, a driver for + the appropriate bus transport in your chip also must be selected. + + If M is selected the module will be called ssd130x. + +config DRM_SSD130X_I2C + tristate "DRM support for Solomon SSD130x OLED displays (I2C bus)" + depends on DRM_SSD130X && I2C + select REGMAP_I2C + help + Say Y here if the SSD130x OLED display is connected via I2C bus. + + If M is selected the module will be called ssd130x-i2c. diff --git a/drivers/gpu/drm/solomon/Makefile b/drivers/gpu/drm/solomon/Makefile new file mode 100644 index 000000000000..4bfc5acb0447 --- /dev/null +++ b/drivers/gpu/drm/solomon/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_DRM_SSD130X) += ssd130x.o +obj-$(CONFIG_DRM_SSD130X_I2C) += ssd130x-i2c.o diff --git a/drivers/gpu/drm/solomon/ssd130x-i2c.c b/drivers/gpu/drm/solomon/ssd130x-i2c.c new file mode 100644 index 000000000000..3126aeda4ced --- /dev/null +++ b/drivers/gpu/drm/solomon/ssd130x-i2c.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * DRM driver for Solomon SSD130x OLED displays (I2C bus) + * + * Copyright 2022 Red Hat Inc. + * Author: Javier Martinez Canillas <javierm@redhat.com> + * + * Based on drivers/video/fbdev/ssd1307fb.c + * Copyright 2012 Free Electrons + */ +#include <linux/i2c.h> +#include <linux/module.h> + +#include "ssd130x.h" + +#define DRIVER_NAME "ssd130x-i2c" +#define DRIVER_DESC "DRM driver for Solomon SSD130x OLED displays (I2C)" + +static const struct regmap_config ssd130x_i2c_regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; + +static int ssd130x_i2c_probe(struct i2c_client *client) +{ + struct ssd130x_device *ssd130x; + struct regmap *regmap; + + regmap = devm_regmap_init_i2c(client, &ssd130x_i2c_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + ssd130x = ssd130x_probe(&client->dev, regmap); + if (IS_ERR(ssd130x)) + return PTR_ERR(ssd130x); + + i2c_set_clientdata(client, ssd130x); + + return 0; +} + +static int ssd130x_i2c_remove(struct i2c_client *client) +{ + struct ssd130x_device *ssd130x = i2c_get_clientdata(client); + + return ssd130x_remove(ssd130x); +} + +static void ssd130x_i2c_shutdown(struct i2c_client *client) +{ + struct ssd130x_device *ssd130x = i2c_get_clientdata(client); + + ssd130x_shutdown(ssd130x); +} + +static struct ssd130x_deviceinfo ssd130x_ssd1305_deviceinfo = { + .default_vcomh = 0x34, + .default_dclk_div = 1, + .default_dclk_frq = 7, +}; + +static struct ssd130x_deviceinfo ssd130x_ssd1306_deviceinfo = { + .default_vcomh = 0x20, + .default_dclk_div = 1, + .default_dclk_frq = 8, + .need_chargepump = 1, +}; + +static struct ssd130x_deviceinfo ssd130x_ssd1307_deviceinfo = { + .default_vcomh = 0x20, + .default_dclk_div = 2, + .default_dclk_frq = 12, + .need_pwm = 1, +}; + +static struct ssd130x_deviceinfo ssd130x_ssd1309_deviceinfo = { + .default_vcomh = 0x34, + .default_dclk_div = 1, + .default_dclk_frq = 10, +}; + +static const struct of_device_id ssd130x_of_match[] = { + { + .compatible = "solomon,ssd1305fb-i2c", + .data = &ssd130x_ssd1305_deviceinfo, + }, + { + .compatible = "solomon,ssd1306fb-i2c", + .data = &ssd130x_ssd1306_deviceinfo, + }, + { + .compatible = "solomon,ssd1307fb-i2c", + .data = &ssd130x_ssd1307_deviceinfo, + }, + { + .compatible = "solomon,ssd1309fb-i2c", + .data = &ssd130x_ssd1309_deviceinfo, + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ssd130x_of_match); + +static struct i2c_driver ssd130x_i2c_driver = { + .driver = { + .name = DRIVER_NAME, + .of_match_table = ssd130x_of_match, + }, + .probe_new = ssd130x_i2c_probe, + .remove = ssd130x_i2c_remove, + .shutdown = ssd130x_i2c_shutdown, +}; +module_i2c_driver(ssd130x_i2c_driver); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR("Javier Martinez Canillas <javierm@redhat.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c new file mode 100644 index 000000000000..92c1902f53e4 --- /dev/null +++ b/drivers/gpu/drm/solomon/ssd130x.c @@ -0,0 +1,843 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * DRM driver for Solomon SSD130x OLED displays + * + * Copyright 2022 Red Hat Inc. + * Author: Javier Martinez Canillas <javierm@redhat.com> + * + * Based on drivers/video/fbdev/ssd1307fb.c + * Copyright 2012 Free Electrons + */ + +#include <linux/backlight.h> +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/property.h> +#include <linux/pwm.h> +#include <linux/regulator/consumer.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_damage_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_format_helper.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_gem_shmem_helper.h> +#include <drm/drm_managed.h> +#include <drm/drm_modes.h> +#include <drm/drm_rect.h> +#include <drm/drm_probe_helper.h> + +#include "ssd130x.h" + +#define DRIVER_NAME "ssd130x" +#define DRIVER_DESC "DRM driver for Solomon SSD130x OLED displays" +#define DRIVER_DATE "20220131" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 + +#define SSD130X_DATA 0x40 +#define SSD130X_COMMAND 0x80 + +#define SSD130X_SET_ADDRESS_MODE 0x20 +#define SSD130X_SET_COL_RANGE 0x21 +#define SSD130X_SET_PAGE_RANGE 0x22 +#define SSD130X_CONTRAST 0x81 +#define SSD130X_SET_LOOKUP_TABLE 0x91 +#define SSD130X_CHARGE_PUMP 0x8d +#define SSD130X_SEG_REMAP_ON 0xa1 +#define SSD130X_DISPLAY_OFF 0xae +#define SSD130X_SET_MULTIPLEX_RATIO 0xa8 +#define SSD130X_DISPLAY_ON 0xaf +#define SSD130X_START_PAGE_ADDRESS 0xb0 +#define SSD130X_SET_COM_SCAN_DIR 0xc0 +#define SSD130X_SET_DISPLAY_OFFSET 0xd3 +#define SSD130X_SET_CLOCK_FREQ 0xd5 +#define SSD130X_SET_AREA_COLOR_MODE 0xd8 +#define SSD130X_SET_PRECHARGE_PERIOD 0xd9 +#define SSD130X_SET_COM_PINS_CONFIG 0xda +#define SSD130X_SET_VCOMH 0xdb + +#define SSD130X_SET_COM_SCAN_DIR_MASK GENMASK(3, 2) +#define SSD130X_SET_COM_SCAN_DIR_SET(val) FIELD_PREP(SSD130X_SET_COM_SCAN_DIR_MASK, (val)) +#define SSD130X_SET_CLOCK_DIV_MASK GENMASK(3, 0) +#define SSD130X_SET_CLOCK_DIV_SET(val) FIELD_PREP(SSD130X_SET_CLOCK_DIV_MASK, (val)) +#define SSD130X_SET_CLOCK_FREQ_MASK GENMASK(7, 4) +#define SSD130X_SET_CLOCK_FREQ_SET(val) FIELD_PREP(SSD130X_SET_CLOCK_FREQ_MASK, (val)) +#define SSD130X_SET_PRECHARGE_PERIOD1_MASK GENMASK(3, 0) +#define SSD130X_SET_PRECHARGE_PERIOD1_SET(val) FIELD_PREP(SSD130X_SET_PRECHARGE_PERIOD1_MASK, (val)) +#define SSD130X_SET_PRECHARGE_PERIOD2_MASK GENMASK(7, 4) +#define SSD130X_SET_PRECHARGE_PERIOD2_SET(val) FIELD_PREP(SSD130X_SET_PRECHARGE_PERIOD2_MASK, (val)) +#define SSD130X_SET_COM_PINS_CONFIG1_MASK GENMASK(4, 4) +#define SSD130X_SET_COM_PINS_CONFIG1_SET(val) FIELD_PREP(SSD130X_SET_COM_PINS_CONFIG1_MASK, !(val)) +#define SSD130X_SET_COM_PINS_CONFIG2_MASK GENMASK(5, 5) +#define SSD130X_SET_COM_PINS_CONFIG2_SET(val) FIELD_PREP(SSD130X_SET_COM_PINS_CONFIG2_MASK, (val)) + +#define SSD130X_SET_ADDRESS_MODE_HORIZONTAL 0x00 +#define SSD130X_SET_ADDRESS_MODE_VERTICAL 0x01 +#define SSD130X_SET_ADDRESS_MODE_PAGE 0x02 + +#define SSD130X_SET_AREA_COLOR_MODE_ENABLE 0x1e +#define SSD130X_SET_AREA_COLOR_MODE_LOW_POWER 0x05 + +#define MAX_CONTRAST 255 + +static inline struct ssd130x_device *drm_to_ssd130x(struct drm_device *drm) +{ + return container_of(drm, struct ssd130x_device, drm); +} + +/* + * Helper to write data (SSD130X_DATA) to the device. + */ +static int ssd130x_write_data(struct ssd130x_device *ssd130x, u8 *values, int count) +{ + return regmap_bulk_write(ssd130x->regmap, SSD130X_DATA, values, count); +} + +/* + * Helper to write command (SSD130X_COMMAND). The fist variadic argument + * is the command to write and the following are the command options. + * + * Note that the ssd130x protocol requires each command and option to be + * written as a SSD130X_COMMAND device register value. That is why a call + * to regmap_write(..., SSD130X_COMMAND, ...) is done for each argument. + */ +static int ssd130x_write_cmd(struct ssd130x_device *ssd130x, int count, + /* u8 cmd, u8 option, ... */...) +{ + va_list ap; + u8 value; + int ret; + + va_start(ap, count); + + do { + value = va_arg(ap, int); + ret = regmap_write(ssd130x->regmap, SSD130X_COMMAND, value); + if (ret) + goto out_end; + } while (--count); + +out_end: + va_end(ap); + + return ret; +} + +static int ssd130x_set_col_range(struct ssd130x_device *ssd130x, + u8 col_start, u8 cols) +{ + u8 col_end = col_start + cols - 1; + int ret; + + if (col_start == ssd130x->col_start && col_end == ssd130x->col_end) + return 0; + + ret = ssd130x_write_cmd(ssd130x, 3, SSD130X_SET_COL_RANGE, col_start, col_end); + if (ret < 0) + return ret; + + ssd130x->col_start = col_start; + ssd130x->col_end = col_end; + return 0; +} + +static int ssd130x_set_page_range(struct ssd130x_device *ssd130x, + u8 page_start, u8 pages) +{ + u8 page_end = page_start + pages - 1; + int ret; + + if (page_start == ssd130x->page_start && page_end == ssd130x->page_end) + return 0; + + ret = ssd130x_write_cmd(ssd130x, 3, SSD130X_SET_PAGE_RANGE, page_start, page_end); + if (ret < 0) + return ret; + + ssd130x->page_start = page_start; + ssd130x->page_end = page_end; + return 0; +} + +static int ssd130x_pwm_enable(struct ssd130x_device *ssd130x) +{ + struct device *dev = ssd130x->dev; + struct pwm_state pwmstate; + + ssd130x->pwm = pwm_get(dev, NULL); + if (IS_ERR(ssd130x->pwm)) { + dev_err(dev, "Could not get PWM from firmware description!\n"); + return PTR_ERR(ssd130x->pwm); + } + + pwm_init_state(ssd130x->pwm, &pwmstate); + pwm_set_relative_duty_cycle(&pwmstate, 50, 100); + pwm_apply_state(ssd130x->pwm, &pwmstate); + + /* Enable the PWM */ + pwm_enable(ssd130x->pwm); + + dev_dbg(dev, "Using PWM%d with a %lluns period.\n", + ssd130x->pwm->pwm, pwm_get_period(ssd130x->pwm)); + + return 0; +} + +static void ssd130x_reset(struct ssd130x_device *ssd130x) +{ + if (!ssd130x->reset) + return; + + /* Reset the screen */ + gpiod_set_value_cansleep(ssd130x->reset, 1); + udelay(4); + gpiod_set_value_cansleep(ssd130x->reset, 0); + udelay(4); +} + +static int ssd130x_power_on(struct ssd130x_device *ssd130x) +{ + struct device *dev = ssd130x->dev; + int ret; + + ssd130x_reset(ssd130x); + + ret = regulator_enable(ssd130x->vcc_reg); + if (ret) { + dev_err(dev, "Failed to enable VCC: %d\n", ret); + return ret; + } + + if (ssd130x->device_info->need_pwm) { + ret = ssd130x_pwm_enable(ssd130x); + if (ret) { + dev_err(dev, "Failed to enable PWM: %d\n", ret); + regulator_disable(ssd130x->vcc_reg); + return ret; + } + } + + return 0; +} + +static void ssd130x_power_off(struct ssd130x_device *ssd130x) +{ + pwm_disable(ssd130x->pwm); + pwm_put(ssd130x->pwm); + + regulator_disable(ssd130x->vcc_reg); +} + +static int ssd130x_init(struct ssd130x_device *ssd130x) +{ + u32 precharge, dclk, com_invdir, compins, chargepump; + int ret; + + /* Set initial contrast */ + ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_CONTRAST, ssd130x->contrast); + if (ret < 0) + return ret; + + /* Set segment re-map */ + if (ssd130x->seg_remap) { + ret = ssd130x_write_cmd(ssd130x, 1, SSD130X_SEG_REMAP_ON); + if (ret < 0) + return ret; + } + + /* Set COM direction */ + com_invdir = (SSD130X_SET_COM_SCAN_DIR | + SSD130X_SET_COM_SCAN_DIR_SET(ssd130x->com_invdir)); + ret = ssd130x_write_cmd(ssd130x, 1, com_invdir); + if (ret < 0) + return ret; + + /* Set multiplex ratio value */ + ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_MULTIPLEX_RATIO, ssd130x->height - 1); + if (ret < 0) + return ret; + + /* set display offset value */ + ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_DISPLAY_OFFSET, ssd130x->com_offset); + if (ret < 0) + return ret; + + /* Set clock frequency */ + dclk = (SSD130X_SET_CLOCK_DIV_SET(ssd130x->dclk_div - 1) | + SSD130X_SET_CLOCK_FREQ_SET(ssd130x->dclk_frq)); + ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_CLOCK_FREQ, dclk); + if (ret < 0) + return ret; + + /* Set Area Color Mode ON/OFF & Low Power Display Mode */ + if (ssd130x->area_color_enable || ssd130x->low_power) { + u32 mode = 0; + + if (ssd130x->area_color_enable) + mode |= SSD130X_SET_AREA_COLOR_MODE_ENABLE; + + if (ssd130x->low_power) + mode |= SSD130X_SET_AREA_COLOR_MODE_LOW_POWER; + + ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_AREA_COLOR_MODE, mode); + if (ret < 0) + return ret; + } + + /* Set precharge period in number of ticks from the internal clock */ + precharge = (SSD130X_SET_PRECHARGE_PERIOD1_SET(ssd130x->prechargep1) | + SSD130X_SET_PRECHARGE_PERIOD1_SET(ssd130x->prechargep2)); + ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_PRECHARGE_PERIOD, precharge); + if (ret < 0) + return ret; + + /* Set COM pins configuration */ + compins = BIT(1); + compins |= (SSD130X_SET_COM_PINS_CONFIG1_SET(ssd130x->com_seq) | + SSD130X_SET_COM_PINS_CONFIG2_SET(ssd130x->com_lrremap)); + ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_COM_PINS_CONFIG, compins); + if (ret < 0) + return ret; + + /* Set VCOMH */ + ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_VCOMH, ssd130x->vcomh); + if (ret < 0) + return ret; + + /* Turn on the DC-DC Charge Pump */ + chargepump = BIT(4); + + if (ssd130x->device_info->need_chargepump) + chargepump |= BIT(2); + + ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_CHARGE_PUMP, chargepump); + if (ret < 0) + return ret; + + /* Set lookup table */ + if (ssd130x->lookup_table_set) { + int i; + + ret = ssd130x_write_cmd(ssd130x, 1, SSD130X_SET_LOOKUP_TABLE); + if (ret < 0) + return ret; + + for (i = 0; i < ARRAY_SIZE(ssd130x->lookup_table); i++) { + u8 val = ssd130x->lookup_table[i]; + + if (val < 31 || val > 63) + dev_warn(ssd130x->dev, + "lookup table index %d value out of range 31 <= %d <= 63\n", + i, val); + ret = ssd130x_write_cmd(ssd130x, 1, val); + if (ret < 0) + return ret; + } + } + + /* Switch to horizontal addressing mode */ + return ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_ADDRESS_MODE, + SSD130X_SET_ADDRESS_MODE_HORIZONTAL); +} + +static int ssd130x_update_rect(struct ssd130x_device *ssd130x, u8 *buf, + struct drm_rect *rect) +{ + unsigned int x = rect->x1; + unsigned int y = rect->y1; + unsigned int width = drm_rect_width(rect); + unsigned int height = drm_rect_height(rect); + unsigned int line_length = DIV_ROUND_UP(width, 8); + unsigned int pages = DIV_ROUND_UP(y % 8 + height, 8); + u32 array_idx = 0; + int ret, i, j, k; + u8 *data_array = NULL; + + data_array = kcalloc(width, pages, GFP_KERNEL); + if (!data_array) + return -ENOMEM; + + /* + * The screen is divided in pages, each having a height of 8 + * pixels, and the width of the screen. When sending a byte of + * data to the controller, it gives the 8 bits for the current + * column. I.e, the first byte are the 8 bits of the first + * column, then the 8 bits for the second column, etc. + * + * + * Representation of the screen, assuming it is 5 bits + * wide. Each letter-number combination is a bit that controls + * one pixel. + * + * A0 A1 A2 A3 A4 + * B0 B1 B2 B3 B4 + * C0 C1 C2 C3 C4 + * D0 D1 D2 D3 D4 + * E0 E1 E2 E3 E4 + * F0 F1 F2 F3 F4 + * G0 G1 G2 G3 G4 + * H0 H1 H2 H3 H4 + * + * If you want to update this screen, you need to send 5 bytes: + * (1) A0 B0 C0 D0 E0 F0 G0 H0 + * (2) A1 B1 C1 D1 E1 F1 G1 H1 + * (3) A2 B2 C2 D2 E2 F2 G2 H2 + * (4) A3 B3 C3 D3 E3 F3 G3 H3 + * (5) A4 B4 C4 D4 E4 F4 G4 H4 + */ + + ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset + x, width); + if (ret < 0) + goto out_free; + + ret = ssd130x_set_page_range(ssd130x, ssd130x->page_offset + y / 8, pages); + if (ret < 0) + goto out_free; + + for (i = y / 8; i < y / 8 + pages; i++) { + int m = 8; + + /* Last page may be partial */ + if (8 * (i + 1) > ssd130x->height) + m = ssd130x->height % 8; + for (j = x; j < x + width; j++) { + u8 data = 0; + + for (k = 0; k < m; k++) { + u8 byte = buf[(8 * i + k) * line_length + j / 8]; + u8 bit = (byte >> (j % 8)) & 1; + + data |= bit << k; + } + data_array[array_idx++] = data; + } + } + + ret = ssd130x_write_data(ssd130x, data_array, width * pages); + +out_free: + kfree(data_array); + return ret; +} + +static void ssd130x_clear_screen(struct ssd130x_device *ssd130x) +{ + u8 *buf = NULL; + struct drm_rect fullscreen = { + .x1 = 0, + .x2 = ssd130x->width, + .y1 = 0, + .y2 = ssd130x->height, + }; + + buf = kcalloc(ssd130x->width, ssd130x->height, GFP_KERNEL); + if (!buf) + return; + + ssd130x_update_rect(ssd130x, buf, &fullscreen); + + kfree(buf); +} + +static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb, const struct iosys_map *map, + struct drm_rect *rect) +{ + struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev); + void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */ + int ret = 0; + u8 *buf = NULL; + + buf = kcalloc(fb->width, fb->height, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + drm_fb_xrgb8888_to_mono_reversed(buf, 0, vmap, fb, rect); + + ssd130x_update_rect(ssd130x, buf, rect); + + kfree(buf); + + return ret; +} + +static int ssd130x_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe, + const struct drm_display_mode *mode) +{ + struct ssd130x_device *ssd130x = drm_to_ssd130x(pipe->crtc.dev); + + if (mode->hdisplay != ssd130x->mode.hdisplay && + mode->vdisplay != ssd130x->mode.vdisplay) + return MODE_ONE_SIZE; + + if (mode->hdisplay != ssd130x->mode.hdisplay) + return MODE_ONE_WIDTH; + + if (mode->vdisplay != ssd130x->mode.vdisplay) + return MODE_ONE_HEIGHT; + + return MODE_OK; +} + +static void ssd130x_display_pipe_enable(struct drm_simple_display_pipe *pipe, + struct drm_crtc_state *crtc_state, + struct drm_plane_state *plane_state) +{ + struct ssd130x_device *ssd130x = drm_to_ssd130x(pipe->crtc.dev); + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); + struct drm_device *drm = &ssd130x->drm; + int idx, ret; + + ret = ssd130x_power_on(ssd130x); + if (ret) + return; + + ret = ssd130x_init(ssd130x); + if (ret) + goto out_power_off; + + if (!drm_dev_enter(drm, &idx)) + goto out_power_off; + + ssd130x_fb_blit_rect(plane_state->fb, &shadow_plane_state->data[0], &plane_state->dst); + + ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_ON); + + backlight_enable(ssd130x->bl_dev); + + drm_dev_exit(idx); + + return; +out_power_off: + ssd130x_power_off(ssd130x); +} + +static void ssd130x_display_pipe_disable(struct drm_simple_display_pipe *pipe) +{ + struct ssd130x_device *ssd130x = drm_to_ssd130x(pipe->crtc.dev); + struct drm_device *drm = &ssd130x->drm; + int idx; + + if (!drm_dev_enter(drm, &idx)) + return; + + ssd130x_clear_screen(ssd130x); + + backlight_disable(ssd130x->bl_dev); + + ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_OFF); + + ssd130x_power_off(ssd130x); + + drm_dev_exit(idx); +} + +static void ssd130x_display_pipe_update(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *old_plane_state) +{ + struct ssd130x_device *ssd130x = drm_to_ssd130x(pipe->crtc.dev); + struct drm_plane_state *plane_state = pipe->plane.state; + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); + struct drm_framebuffer *fb = plane_state->fb; + struct drm_device *drm = &ssd130x->drm; + struct drm_rect src_clip, dst_clip; + int idx; + + if (!fb) + return; + + if (!pipe->crtc.state->active) + return; + + if (!drm_atomic_helper_damage_merged(old_plane_state, plane_state, &src_clip)) + return; + + dst_clip = plane_state->dst; + if (!drm_rect_intersect(&dst_clip, &src_clip)) + return; + + if (!drm_dev_enter(drm, &idx)) + return; + + ssd130x_fb_blit_rect(plane_state->fb, &shadow_plane_state->data[0], &dst_clip); + + drm_dev_exit(idx); +} + +static const struct drm_simple_display_pipe_funcs ssd130x_pipe_funcs = { + .mode_valid = ssd130x_display_pipe_mode_valid, + .enable = ssd130x_display_pipe_enable, + .disable = ssd130x_display_pipe_disable, + .update = ssd130x_display_pipe_update, + DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS, +}; + +static int ssd130x_connector_get_modes(struct drm_connector *connector) +{ + struct ssd130x_device *ssd130x = drm_to_ssd130x(connector->dev); + struct drm_display_mode *mode = &ssd130x->mode; + struct device *dev = ssd130x->dev; + + mode = drm_mode_duplicate(connector->dev, &ssd130x->mode); + if (!mode) { + dev_err(dev, "Failed to duplicated mode\n"); + return 0; + } + + drm_mode_probed_add(connector, mode); + drm_set_preferred_mode(connector, mode->hdisplay, mode->vdisplay); + + /* There is only a single mode */ + return 1; +} + +static const struct drm_connector_helper_funcs ssd130x_connector_helper_funcs = { + .get_modes = ssd130x_connector_get_modes, +}; + +static const struct drm_connector_funcs ssd130x_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const struct drm_mode_config_funcs ssd130x_mode_config_funcs = { + .fb_create = drm_gem_fb_create_with_dirty, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static const uint32_t ssd130x_formats[] = { + DRM_FORMAT_XRGB8888, +}; + +DEFINE_DRM_GEM_FOPS(ssd130x_fops); + +static const struct drm_driver ssd130x_drm_driver = { + DRM_GEM_SHMEM_DRIVER_OPS, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET, + .fops = &ssd130x_fops, +}; + +static int ssd130x_update_bl(struct backlight_device *bdev) +{ + struct ssd130x_device *ssd130x = bl_get_data(bdev); + int brightness = backlight_get_brightness(bdev); + int ret; + + ssd130x->contrast = brightness; + + ret = ssd130x_write_cmd(ssd130x, 1, SSD130X_CONTRAST); + if (ret < 0) + return ret; + + ret = ssd130x_write_cmd(ssd130x, 1, ssd130x->contrast); + if (ret < 0) + return ret; + + return 0; +} + +static const struct backlight_ops ssd130xfb_bl_ops = { + .update_status = ssd130x_update_bl, +}; + +static void ssd130x_parse_properties(struct ssd130x_device *ssd130x) +{ + struct device *dev = ssd130x->dev; + + if (device_property_read_u32(dev, "solomon,width", &ssd130x->width)) + ssd130x->width = 96; + + if (device_property_read_u32(dev, "solomon,height", &ssd130x->height)) + ssd130x->height = 16; + + if (device_property_read_u32(dev, "solomon,page-offset", &ssd130x->page_offset)) + ssd130x->page_offset = 1; + + if (device_property_read_u32(dev, "solomon,col-offset", &ssd130x->col_offset)) + ssd130x->col_offset = 0; + + if (device_property_read_u32(dev, "solomon,com-offset", &ssd130x->com_offset)) + ssd130x->com_offset = 0; + + if (device_property_read_u32(dev, "solomon,prechargep1", &ssd130x->prechargep1)) + ssd130x->prechargep1 = 2; + + if (device_property_read_u32(dev, "solomon,prechargep2", &ssd130x->prechargep2)) + ssd130x->prechargep2 = 2; + + if (!device_property_read_u8_array(dev, "solomon,lookup-table", + ssd130x->lookup_table, + ARRAY_SIZE(ssd130x->lookup_table))) + ssd130x->lookup_table_set = 1; + + ssd130x->seg_remap = !device_property_read_bool(dev, "solomon,segment-no-remap"); + ssd130x->com_seq = device_property_read_bool(dev, "solomon,com-seq"); + ssd130x->com_lrremap = device_property_read_bool(dev, "solomon,com-lrremap"); + ssd130x->com_invdir = device_property_read_bool(dev, "solomon,com-invdir"); + ssd130x->area_color_enable = + device_property_read_bool(dev, "solomon,area-color-enable"); + ssd130x->low_power = device_property_read_bool(dev, "solomon,low-power"); + + ssd130x->contrast = 127; + ssd130x->vcomh = ssd130x->device_info->default_vcomh; + + /* Setup display timing */ + if (device_property_read_u32(dev, "solomon,dclk-div", &ssd130x->dclk_div)) + ssd130x->dclk_div = ssd130x->device_info->default_dclk_div; + if (device_property_read_u32(dev, "solomon,dclk-frq", &ssd130x->dclk_frq)) + ssd130x->dclk_frq = ssd130x->device_info->default_dclk_frq; +} + +static int ssd130x_init_modeset(struct ssd130x_device *ssd130x) +{ + struct drm_display_mode *mode = &ssd130x->mode; + struct device *dev = ssd130x->dev; + struct drm_device *drm = &ssd130x->drm; + unsigned long max_width, max_height; + int ret; + + ret = drmm_mode_config_init(drm); + if (ret) { + dev_err(dev, "DRM mode config init failed: %d\n", ret); + return ret; + } + + mode->type = DRM_MODE_TYPE_DRIVER; + mode->clock = 1; + mode->hdisplay = mode->htotal = ssd130x->width; + mode->hsync_start = mode->hsync_end = ssd130x->width; + mode->vdisplay = mode->vtotal = ssd130x->height; + mode->vsync_start = mode->vsync_end = ssd130x->height; + mode->width_mm = 27; + mode->height_mm = 27; + + max_width = max_t(unsigned long, mode->hdisplay, DRM_SHADOW_PLANE_MAX_WIDTH); + max_height = max_t(unsigned long, mode->vdisplay, DRM_SHADOW_PLANE_MAX_HEIGHT); + + drm->mode_config.min_width = mode->hdisplay; + drm->mode_config.max_width = max_width; + drm->mode_config.min_height = mode->vdisplay; + drm->mode_config.max_height = max_height; + drm->mode_config.preferred_depth = 32; + drm->mode_config.funcs = &ssd130x_mode_config_funcs; + + ret = drm_connector_init(drm, &ssd130x->connector, &ssd130x_connector_funcs, + DRM_MODE_CONNECTOR_Unknown); + if (ret) { + dev_err(dev, "DRM connector init failed: %d\n", ret); + return ret; + } + + drm_connector_helper_add(&ssd130x->connector, &ssd130x_connector_helper_funcs); + + ret = drm_simple_display_pipe_init(drm, &ssd130x->pipe, &ssd130x_pipe_funcs, + ssd130x_formats, ARRAY_SIZE(ssd130x_formats), + NULL, &ssd130x->connector); + if (ret) { + dev_err(dev, "DRM simple display pipeline init failed: %d\n", ret); + return ret; + } + + drm_plane_enable_fb_damage_clips(&ssd130x->pipe.plane); + + drm_mode_config_reset(drm); + + return 0; +} + +static int ssd130x_get_resources(struct ssd130x_device *ssd130x) +{ + struct device *dev = ssd130x->dev; + + ssd130x->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(ssd130x->reset)) + return dev_err_probe(dev, PTR_ERR(ssd130x->reset), + "Failed to get reset gpio\n"); + + ssd130x->vcc_reg = devm_regulator_get(dev, "vcc"); + if (IS_ERR(ssd130x->vcc_reg)) + return dev_err_probe(dev, PTR_ERR(ssd130x->vcc_reg), + "Failed to get VCC regulator\n"); + + return 0; +} + +struct ssd130x_device *ssd130x_probe(struct device *dev, struct regmap *regmap) +{ + struct ssd130x_device *ssd130x; + struct backlight_device *bl; + struct drm_device *drm; + int ret; + + ssd130x = devm_drm_dev_alloc(dev, &ssd130x_drm_driver, + struct ssd130x_device, drm); + if (IS_ERR(ssd130x)) + return ERR_PTR(dev_err_probe(dev, PTR_ERR(ssd130x), + "Failed to allocate DRM device\n")); + + drm = &ssd130x->drm; + + ssd130x->dev = dev; + ssd130x->regmap = regmap; + ssd130x->device_info = device_get_match_data(dev); + + ssd130x_parse_properties(ssd130x); + + ret = ssd130x_get_resources(ssd130x); + if (ret) + return ERR_PTR(ret); + + bl = devm_backlight_device_register(dev, dev_name(dev), dev, ssd130x, + &ssd130xfb_bl_ops, NULL); + if (IS_ERR(bl)) + return ERR_PTR(dev_err_probe(dev, PTR_ERR(bl), + "Unable to register backlight device\n")); + + bl->props.brightness = ssd130x->contrast; + bl->props.max_brightness = MAX_CONTRAST; + ssd130x->bl_dev = bl; + + ret = ssd130x_init_modeset(ssd130x); + if (ret) + return ERR_PTR(ret); + + ret = drm_dev_register(drm, 0); + if (ret) + return ERR_PTR(dev_err_probe(dev, ret, "DRM device register failed\n")); + + drm_fbdev_generic_setup(drm, 0); + + return ssd130x; +} +EXPORT_SYMBOL_GPL(ssd130x_probe); + +int ssd130x_remove(struct ssd130x_device *ssd130x) +{ + drm_dev_unplug(&ssd130x->drm); + + return 0; +} +EXPORT_SYMBOL_GPL(ssd130x_remove); + +void ssd130x_shutdown(struct ssd130x_device *ssd130x) +{ + drm_atomic_helper_shutdown(&ssd130x->drm); +} +EXPORT_SYMBOL_GPL(ssd130x_shutdown); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR("Javier Martinez Canillas <javierm@redhat.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/solomon/ssd130x.h b/drivers/gpu/drm/solomon/ssd130x.h new file mode 100644 index 000000000000..cd21cdccb566 --- /dev/null +++ b/drivers/gpu/drm/solomon/ssd130x.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Header file for: + * DRM driver for Solomon SSD130x OLED displays + * + * Copyright 2022 Red Hat Inc. + * Author: Javier Martinez Canillas <javierm@redhat.com> + * + * Based on drivers/video/fbdev/ssd1307fb.c + * Copyright 2012 Free Electrons + */ + +#ifndef __SSD1307X_H__ +#define __SSD1307X_H__ + +#include <drm/drm_drv.h> +#include <drm/drm_simple_kms_helper.h> + +#include <linux/regmap.h> + +struct ssd130x_deviceinfo { + u32 default_vcomh; + u32 default_dclk_div; + u32 default_dclk_frq; + int need_pwm; + int need_chargepump; +}; + +struct ssd130x_device { + struct drm_device drm; + struct device *dev; + struct drm_simple_display_pipe pipe; + struct drm_display_mode mode; + struct drm_connector connector; + struct i2c_client *client; + + struct regmap *regmap; + + const struct ssd130x_deviceinfo *device_info; + + unsigned area_color_enable : 1; + unsigned com_invdir : 1; + unsigned com_lrremap : 1; + unsigned com_seq : 1; + unsigned lookup_table_set : 1; + unsigned low_power : 1; + unsigned seg_remap : 1; + u32 com_offset; + u32 contrast; + u32 dclk_div; + u32 dclk_frq; + u32 height; + u8 lookup_table[4]; + u32 page_offset; + u32 col_offset; + u32 prechargep1; + u32 prechargep2; + + struct backlight_device *bl_dev; + struct pwm_device *pwm; + struct gpio_desc *reset; + struct regulator *vcc_reg; + u32 vcomh; + u32 width; + /* Cached address ranges */ + u8 col_start; + u8 col_end; + u8 page_start; + u8 page_end; +}; + +struct ssd130x_device *ssd130x_probe(struct device *dev, struct regmap *regmap); +int ssd130x_remove(struct ssd130x_device *ssd130x); +void ssd130x_shutdown(struct ssd130x_device *ssd130x); + +#endif /* __SSD1307X_H__ */ diff --git a/drivers/gpu/drm/sprd/Kconfig b/drivers/gpu/drm/sprd/Kconfig index 3edeaeca0e65..9a9c7ebfc716 100644 --- a/drivers/gpu/drm/sprd/Kconfig +++ b/drivers/gpu/drm/sprd/Kconfig @@ -3,7 +3,6 @@ config DRM_SPRD depends on ARCH_SPRD || COMPILE_TEST depends on DRM && OF select DRM_GEM_CMA_HELPER - select DRM_KMS_CMA_HELPER select DRM_KMS_HELPER select DRM_MIPI_DSI select VIDEOMODE_HELPERS diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c index 06a3414ee43a..1637203ea103 100644 --- a/drivers/gpu/drm/sprd/sprd_dpu.c +++ b/drivers/gpu/drm/sprd/sprd_dpu.c @@ -790,6 +790,11 @@ static int sprd_dpu_context_init(struct sprd_dpu *dpu, int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "failed to get I/O resource\n"); + return -EINVAL; + } + ctx->base = devm_ioremap(dev, res->start, resource_size(res)); if (!ctx->base) { dev_err(dev, "failed to map dpu registers\n"); diff --git a/drivers/gpu/drm/sprd/sprd_drm.c b/drivers/gpu/drm/sprd/sprd_drm.c index a077e2d4d721..a60ecdd67d98 100644 --- a/drivers/gpu/drm/sprd/sprd_drm.c +++ b/drivers/gpu/drm/sprd/sprd_drm.c @@ -43,7 +43,6 @@ static void sprd_drm_mode_config_init(struct drm_device *drm) drm->mode_config.min_height = 0; drm->mode_config.max_width = 8192; drm->mode_config.max_height = 8192; - drm->mode_config.allow_fb_modifiers = true; drm->mode_config.funcs = &sprd_drm_mode_config_funcs; drm->mode_config.helper_private = &sprd_drm_mode_config_helper; @@ -155,7 +154,7 @@ static void sprd_drm_shutdown(struct platform_device *pdev) struct drm_device *drm = platform_get_drvdata(pdev); if (!drm) { - drm_warn(drm, "drm device is not available, no shutdown\n"); + dev_warn(&pdev->dev, "drm device is not available, no shutdown\n"); return; } @@ -186,6 +185,9 @@ static struct platform_driver *sprd_drm_drivers[] = { static int __init sprd_drm_init(void) { + if (drm_firmware_drivers_only()) + return -ENODEV; + return platform_register_drivers(sprd_drm_drivers, ARRAY_SIZE(sprd_drm_drivers)); } diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c index 911b3cddc264..12b67a5d5923 100644 --- a/drivers/gpu/drm/sprd/sprd_dsi.c +++ b/drivers/gpu/drm/sprd/sprd_dsi.c @@ -907,6 +907,11 @@ static int sprd_dsi_context_init(struct sprd_dsi *dsi, struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "failed to get I/O resource\n"); + return -EINVAL; + } + ctx->base = devm_ioremap(dev, res->start, resource_size(res)); if (!ctx->base) { drm_err(dsi->drm, "failed to map dsi host registers\n"); diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index c7efb43b83ee..860b2230aa08 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -287,6 +287,9 @@ static struct platform_driver * const drivers[] = { static int sti_drm_init(void) { + if (drm_firmware_drivers_only()) + return -ENODEV; + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } module_init(sti_drm_init); diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c index 9f441aadf2d5..0da7cce2a1a2 100644 --- a/drivers/gpu/drm/stm/drv.c +++ b/drivers/gpu/drm/stm/drv.c @@ -22,6 +22,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_module.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -241,7 +242,7 @@ static struct platform_driver stm_drm_platform_driver = { }, }; -module_platform_driver(stm_drm_platform_driver); +drm_module_platform_driver(stm_drm_platform_driver); MODULE_AUTHOR("Philippe Cornu <philippe.cornu@st.com>"); MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>"); diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index b630614b3d72..a3fd441dd9ad 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -19,6 +19,7 @@ #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> +#include <drm/drm_module.h> #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -441,7 +442,7 @@ static struct platform_driver sun4i_drv_platform_driver = { .pm = &sun4i_drv_drm_pm_ops, }, }; -module_platform_driver(sun4i_drv_platform_driver); +drm_module_platform_driver(sun4i_drv_platform_driver); MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>"); MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>"); diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index e9de91a4e7e8..9464f522e257 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -1410,6 +1410,9 @@ static int __init host1x_drm_init(void) { int err; + if (drm_firmware_drivers_only()) + return -ENODEV; + err = host1x_driver_register(&host1x_drm_driver); if (err < 0) return err; diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index 7c784e90e40e..04cfff89ee51 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -17,6 +17,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_managed.h> +#include <drm/drm_module.h> #include <drm/drm_probe_helper.h> #include "tidss_dispc.h" @@ -251,7 +252,7 @@ static struct platform_driver tidss_platform_driver = { }, }; -module_platform_driver(tidss_platform_driver); +drm_module_platform_driver(tidss_platform_driver); MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); MODULE_DESCRIPTION("TI Keystone DSS Driver"); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index cc567c87057d..eee3c447fbac 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -604,6 +604,9 @@ static struct platform_driver tilcdc_platform_driver = { static int __init tilcdc_drm_init(void) { + if (drm_firmware_drivers_only()) + return -ENODEV; + DBG("init"); tilcdc_panel_init(); return platform_driver_register(&tilcdc_platform_driver); diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c index f8531c50a072..f0fa3b15c341 100644 --- a/drivers/gpu/drm/tiny/arcpgu.c +++ b/drivers/gpu/drm/tiny/arcpgu.c @@ -15,6 +15,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_module.h> #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> @@ -427,7 +428,7 @@ static struct platform_driver arcpgu_platform_driver = { }, }; -module_platform_driver(arcpgu_platform_driver); +drm_module_platform_driver(arcpgu_platform_driver); MODULE_AUTHOR("Carlos Palminha <palminha@synopsys.com>"); MODULE_DESCRIPTION("ARC PGU DRM driver"); diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 29ee2eda7b7d..9a477f0fddee 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -41,6 +41,8 @@ void ttm_resource_init(struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_resource *res) { + struct ttm_resource_manager *man; + res->start = 0; res->num_pages = PFN_UP(bo->base.size); res->mem_type = place->mem_type; @@ -50,6 +52,11 @@ void ttm_resource_init(struct ttm_buffer_object *bo, res->bus.is_iomem = false; res->bus.caching = ttm_cached; res->bo = bo; + + man = ttm_manager_type(bo->bdev, place->mem_type); + spin_lock(&bo->bdev->lru_lock); + man->usage += bo->base.size; + spin_unlock(&bo->bdev->lru_lock); } EXPORT_SYMBOL(ttm_resource_init); @@ -65,6 +72,9 @@ EXPORT_SYMBOL(ttm_resource_init); void ttm_resource_fini(struct ttm_resource_manager *man, struct ttm_resource *res) { + spin_lock(&man->bdev->lru_lock); + man->usage -= res->bo->base.size; + spin_unlock(&man->bdev->lru_lock); } EXPORT_SYMBOL(ttm_resource_fini); @@ -153,19 +163,20 @@ void ttm_resource_set_bo(struct ttm_resource *res, * * @man: memory manager object to init * @bdev: ttm device this manager belongs to - * @p_size: size managed area in pages. + * @size: size of managed resources in arbitrary units * * Initialise core parts of a manager object. */ void ttm_resource_manager_init(struct ttm_resource_manager *man, struct ttm_device *bdev, - unsigned long p_size) + uint64_t size) { unsigned i; spin_lock_init(&man->move_lock); man->bdev = bdev; - man->size = p_size; + man->size = size; + man->usage = 0; for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) INIT_LIST_HEAD(&man->lru[i]); @@ -227,6 +238,24 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev, EXPORT_SYMBOL(ttm_resource_manager_evict_all); /** + * ttm_resource_manager_usage + * + * @man: A memory manager object. + * + * Return how many resources are currently used. + */ +uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man) +{ + uint64_t usage; + + spin_lock(&man->bdev->lru_lock); + usage = man->usage; + spin_unlock(&man->bdev->lru_lock); + return usage; +} +EXPORT_SYMBOL(ttm_resource_manager_usage); + +/** * ttm_resource_manager_debug * * @man: manager type to dump. @@ -238,6 +267,7 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man, drm_printf(p, " use_type: %d\n", man->use_type); drm_printf(p, " use_tt: %d\n", man->use_tt); drm_printf(p, " size: %llu\n", man->size); + drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man)); if (man->func->debug) man->func->debug(man, p); } diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c index 7fa71c8bb828..6d9d2921abf4 100644 --- a/drivers/gpu/drm/tve200/tve200_drv.c +++ b/drivers/gpu/drm/tve200/tve200_drv.c @@ -43,6 +43,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_module.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> @@ -266,7 +267,7 @@ static struct platform_driver tve200_driver = { .probe = tve200_probe, .remove = tve200_remove, }; -module_platform_driver(tve200_driver); +drm_module_platform_driver(tve200_driver); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index 6e3113f419f4..8b3229a37c6d 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -59,6 +59,7 @@ static const struct drm_gem_object_funcs v3d_gem_funcs = { .vmap = drm_gem_shmem_object_vmap, .vunmap = drm_gem_shmem_object_vunmap, .mmap = drm_gem_shmem_object_mmap, + .vm_ops = &drm_gem_shmem_vm_ops, }; /* gem_create_object function for allocating a BO struct and doing diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c index e76b24bb8828..29fd13109e43 100644 --- a/drivers/gpu/drm/v3d/v3d_debugfs.c +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c @@ -6,6 +6,7 @@ #include <linux/debugfs.h> #include <linux/pm_runtime.h> #include <linux/seq_file.h> +#include <linux/string_helpers.h> #include <drm/drm_debugfs.h> @@ -148,15 +149,15 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused) V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPREV), V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPIDX)); seq_printf(m, "MMU: %s\n", - (ident2 & V3D_HUB_IDENT2_WITH_MMU) ? "yes" : "no"); + str_yes_no(ident2 & V3D_HUB_IDENT2_WITH_MMU)); seq_printf(m, "TFU: %s\n", - (ident1 & V3D_HUB_IDENT1_WITH_TFU) ? "yes" : "no"); + str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TFU)); seq_printf(m, "TSY: %s\n", - (ident1 & V3D_HUB_IDENT1_WITH_TSY) ? "yes" : "no"); + str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY)); seq_printf(m, "MSO: %s\n", - (ident1 & V3D_HUB_IDENT1_WITH_MSO) ? "yes" : "no"); + str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_MSO)); seq_printf(m, "L3C: %s (%dkb)\n", - (ident1 & V3D_HUB_IDENT1_WITH_L3C) ? "yes" : "no", + str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_L3C), V3D_GET_FIELD(ident2, V3D_HUB_IDENT2_L3C_NKB)); for (core = 0; core < cores; core++) { diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index c7ed2e1cbab6..92bc0faee84f 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -798,7 +798,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, if (!render->base.perfmon) { ret = -ENOENT; - goto fail; + goto fail_perfmon; } } @@ -847,6 +847,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, fail_unreserve: mutex_unlock(&v3d->sched_lock); +fail_perfmon: drm_gem_unlock_reservations(last_job->bo, last_job->bo_count, &acquire_ctx); fail: @@ -1027,7 +1028,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, args->perfmon_id); if (!job->base.perfmon) { ret = -ENOENT; - goto fail; + goto fail_perfmon; } } @@ -1056,6 +1057,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, fail_unreserve: mutex_unlock(&v3d->sched_lock); +fail_perfmon: drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count, &acquire_ctx); fail: diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index e0cb7d0697a7..39459ae96f30 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -391,7 +391,7 @@ v3d_sched_init(struct v3d_dev *v3d) &v3d_bin_sched_ops, hw_jobs_limit, job_hang_limit, msecs_to_jiffies(hang_limit_ms), NULL, - NULL, "v3d_bin"); + NULL, "v3d_bin", v3d->drm.dev); if (ret) { dev_err(v3d->drm.dev, "Failed to create bin scheduler: %d.", ret); return ret; @@ -401,7 +401,7 @@ v3d_sched_init(struct v3d_dev *v3d) &v3d_render_sched_ops, hw_jobs_limit, job_hang_limit, msecs_to_jiffies(hang_limit_ms), NULL, - NULL, "v3d_render"); + NULL, "v3d_render", v3d->drm.dev); if (ret) { dev_err(v3d->drm.dev, "Failed to create render scheduler: %d.", ret); @@ -413,7 +413,7 @@ v3d_sched_init(struct v3d_dev *v3d) &v3d_tfu_sched_ops, hw_jobs_limit, job_hang_limit, msecs_to_jiffies(hang_limit_ms), NULL, - NULL, "v3d_tfu"); + NULL, "v3d_tfu", v3d->drm.dev); if (ret) { dev_err(v3d->drm.dev, "Failed to create TFU scheduler: %d.", ret); @@ -426,7 +426,7 @@ v3d_sched_init(struct v3d_dev *v3d) &v3d_csd_sched_ops, hw_jobs_limit, job_hang_limit, msecs_to_jiffies(hang_limit_ms), NULL, - NULL, "v3d_csd"); + NULL, "v3d_csd", v3d->drm.dev); if (ret) { dev_err(v3d->drm.dev, "Failed to create CSD scheduler: %d.", ret); @@ -438,7 +438,7 @@ v3d_sched_init(struct v3d_dev *v3d) &v3d_cache_clean_sched_ops, hw_jobs_limit, job_hang_limit, msecs_to_jiffies(hang_limit_ms), NULL, - NULL, "v3d_cache_clean"); + NULL, "v3d_cache_clean", v3d->drm.dev); if (ret) { dev_err(v3d->drm.dev, "Failed to create CACHE_CLEAN scheduler: %d.", ret); diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c index f35d9e44c6b7..f4f2bd79a7cb 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.c +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -18,6 +18,7 @@ #include <drm/drm_file.h> #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> +#include <drm/drm_module.h> #include "vbox_drv.h" @@ -190,24 +191,7 @@ static const struct drm_driver driver = { DRM_GEM_VRAM_DRIVER, }; -static int __init vbox_init(void) -{ - if (drm_firmware_drivers_only() && vbox_modeset == -1) - return -EINVAL; - - if (vbox_modeset == 0) - return -EINVAL; - - return pci_register_driver(&vbox_pci_driver); -} - -static void __exit vbox_exit(void) -{ - pci_unregister_driver(&vbox_pci_driver); -} - -module_init(vbox_init); -module_exit(vbox_exit); +drm_module_pci_driver_if_modeset(vbox_pci_driver, vbox_modeset); MODULE_AUTHOR("Oracle Corporation"); MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index 9300d3354c51..752f921735c6 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -1493,15 +1493,10 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) struct drm_device *drm = dev_get_drvdata(master); struct vc4_dsi *dsi = dev_get_drvdata(dev); struct vc4_dsi_encoder *vc4_dsi_encoder; - const struct of_device_id *match; dma_cap_mask_t dma_mask; int ret; - match = of_match_device(vc4_dsi_dt_match, dev); - if (!match) - return -ENODEV; - - dsi->variant = match->data; + dsi->variant = of_device_get_match_data(dev); vc4_dsi_encoder = devm_kzalloc(dev, sizeof(*vc4_dsi_encoder), GFP_KERNEL); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 92b1530aa17b..987cacc1eb15 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -1012,30 +1012,15 @@ static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi) "VC4_HDMI_FIFO_CTL_RECENTER_DONE"); } -static struct drm_connector_state * -vc4_hdmi_encoder_get_connector_state(struct drm_encoder *encoder, - struct drm_atomic_state *state) -{ - struct drm_connector_state *conn_state; - struct drm_connector *connector; - unsigned int i; - - for_each_new_connector_in_state(state, connector, conn_state, i) { - if (conn_state->best_encoder == encoder) - return conn_state; - } - - return NULL; -} - static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, struct drm_atomic_state *state) { + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + struct drm_connector *connector = &vc4_hdmi->connector; struct drm_connector_state *conn_state = - vc4_hdmi_encoder_get_connector_state(encoder, state); + drm_atomic_get_new_connector_state(state, connector); struct vc4_hdmi_connector_state *vc4_conn_state = conn_state_to_vc4_hdmi_conn_state(conn_state); - struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; unsigned long pixel_rate = vc4_conn_state->pixel_rate; unsigned long bvb_rate, hsm_rate; @@ -1249,9 +1234,8 @@ static void vc4_hdmi_encoder_atomic_mode_set(struct drm_encoder *encoder, struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); mutex_lock(&vc4_hdmi->mutex); - memcpy(&vc4_hdmi->saved_adjusted_mode, - &crtc_state->adjusted_mode, - sizeof(vc4_hdmi->saved_adjusted_mode)); + drm_mode_copy(&vc4_hdmi->saved_adjusted_mode, + &crtc_state->adjusted_mode); mutex_unlock(&vc4_hdmi->mutex); } diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c index b6954e2f75e6..853dd9aa397e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c @@ -23,6 +23,8 @@ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +#include <linux/string_helpers.h> + #include <drm/drm_debugfs.h> #include <drm/drm_file.h> @@ -31,7 +33,7 @@ static void virtio_gpu_add_bool(struct seq_file *m, const char *name, bool value) { - seq_printf(m, "%-16s : %s\n", name, value ? "yes" : "no"); + seq_printf(m, "%-16s : %s\n", name, str_yes_no(value)); } static void virtio_gpu_add_int(struct seq_file *m, const char *name, int value) diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index baef2c5f2aaf..f293e6ad52da 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -124,6 +124,7 @@ static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = { .vmap = drm_gem_shmem_object_vmap, .vunmap = drm_gem_shmem_object_vunmap, .mmap = drm_gem_shmem_object_mmap, + .vm_ops = &drm_gem_shmem_vm_ops, }; bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index fe36efdb7ff5..26eb5478394a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -32,9 +32,10 @@ #include <drm/drm_aperture.h> #include <drm/drm_drv.h> +#include <drm/drm_gem_ttm_helper.h> #include <drm/drm_ioctl.h> +#include <drm/drm_module.h> #include <drm/drm_sysfs.h> -#include <drm/drm_gem_ttm_helper.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_range_manager.h> #include <drm/ttm/ttm_placement.h> @@ -1643,26 +1644,7 @@ out_error: return ret; } -static int __init vmwgfx_init(void) -{ - int ret; - - if (drm_firmware_drivers_only()) - return -EINVAL; - - ret = pci_register_driver(&vmw_pci_driver); - if (ret) - DRM_ERROR("Failed initializing DRM.\n"); - return ret; -} - -static void __exit vmwgfx_exit(void) -{ - pci_unregister_driver(&vmw_pci_driver); -} - -module_init(vmwgfx_init); -module_exit(vmwgfx_exit); +drm_module_pci_driver(vmw_pci_driver); MODULE_AUTHOR("VMware Inc. and others"); MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index dd2ff441068e..d49de4905efa 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -4501,7 +4501,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, goto mksstats_out; } - ret = vmw_wait_dma_fence(dev_priv->fman, in_fence); + ret = dma_fence_wait(in_fence, true); if (ret) goto out; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 5001b87aebe8..59d6a2dd4c2e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -621,52 +621,6 @@ out_no_object: return ret; } - -/** - * vmw_wait_dma_fence - Wait for a dma fence - * - * @fman: pointer to a fence manager - * @fence: DMA fence to wait on - * - * This function handles the case when the fence is actually a fence - * array. If that's the case, it'll wait on each of the child fence - */ -int vmw_wait_dma_fence(struct vmw_fence_manager *fman, - struct dma_fence *fence) -{ - struct dma_fence_array *fence_array; - int ret = 0; - int i; - - - if (dma_fence_is_signaled(fence)) - return 0; - - if (!dma_fence_is_array(fence)) - return dma_fence_wait(fence, true); - - /* From i915: Note that if the fence-array was created in - * signal-on-any mode, we should *not* decompose it into its individual - * fences. However, we don't currently store which mode the fence-array - * is operating in. Fortunately, the only user of signal-on-any is - * private to amdgpu and we should not see any incoming fence-array - * from sync-file being in signal-on-any mode. - */ - - fence_array = to_dma_fence_array(fence); - for (i = 0; i < fence_array->num_fences; i++) { - struct dma_fence *child = fence_array->fences[i]; - - ret = dma_fence_wait(child, true); - - if (ret < 0) - return ret; - } - - return 0; -} - - /* * vmw_fence_fifo_down - signal all unsignaled fence objects. */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h index 079ab4f3ba51..a7eee579c76a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h @@ -104,9 +104,6 @@ extern int vmw_user_fence_create(struct drm_file *file_priv, struct vmw_fence_obj **p_fence, uint32_t *p_handle); -extern int vmw_wait_dma_fence(struct vmw_fence_manager *fman, - struct dma_fence *fence); - extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman); extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman); diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index e63088c2121d..0d8e6bd1ccbf 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -495,6 +495,9 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info) struct drm_device *drm_dev; int ret; + if (drm_firmware_drivers_only()) + return -ENODEV; + DRM_INFO("Creating %s\n", xen_drm_driver.desc); drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL); diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c index ac37053412a1..824b510e337b 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c @@ -25,6 +25,7 @@ #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> #include <drm/drm_mode_config.h> +#include <drm/drm_module.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -286,7 +287,7 @@ static struct platform_driver zynqmp_dpsub_driver = { }, }; -module_platform_driver(zynqmp_dpsub_driver); +drm_module_platform_driver(zynqmp_dpsub_driver); MODULE_AUTHOR("Xilinx, Inc."); MODULE_DESCRIPTION("ZynqMP DP Subsystem Driver"); diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index f2684d2d6851..4a35347b3020 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -654,6 +654,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, fbops->fb_blank = fbtft_fb_blank; fbdefio->delay = HZ / fps; + fbdefio->sort_pagelist = true; fbdefio->deferred_io = fbtft_deferred_io; fb_deferred_io_init(info); diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c index fd66f4d4a621..b9054f658838 100644 --- a/drivers/video/fbdev/broadsheetfb.c +++ b/drivers/video/fbdev/broadsheetfb.c @@ -1059,6 +1059,7 @@ static const struct fb_ops broadsheetfb_ops = { static struct fb_deferred_io broadsheetfb_defio = { .delay = HZ/4, + .sort_pagelist = true, .deferred_io = broadsheetfb_dpy_deferred_io, }; diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index a591d291b231..98b0f23bf5e2 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c @@ -59,6 +59,7 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) printk(KERN_ERR "no mapping available\n"); BUG_ON(!page->mapping); + INIT_LIST_HEAD(&page->lru); page->index = vmf->pgoff; vmf->page = page; @@ -95,7 +96,7 @@ static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) struct page *page = vmf->page; struct fb_info *info = vmf->vma->vm_private_data; struct fb_deferred_io *fbdefio = info->fbdefio; - struct page *cur; + struct list_head *pos = &fbdefio->pagelist; /* this is a callback we get when userspace first tries to write to the page. we schedule a workqueue. that workqueue @@ -122,21 +123,38 @@ static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) */ lock_page(page); - /* we loop through the pagelist before adding in order - to keep the pagelist sorted */ - list_for_each_entry(cur, &fbdefio->pagelist, lru) { - /* this check is to catch the case where a new - process could start writing to the same page - through a new pte. this new access can cause the - mkwrite even when the original ps's pte is marked - writable */ - if (unlikely(cur == page)) - goto page_already_added; - else if (cur->index > page->index) - break; + /* + * This check is to catch the case where a new process could start + * writing to the same page through a new PTE. This new access + * can cause a call to .page_mkwrite even if the original process' + * PTE is marked writable. + * + * TODO: The lru field is owned by the page cache; hence the name. + * We dequeue in fb_deferred_io_work() after flushing the + * page's content into video memory. Instead of lru, fbdefio + * should have it's own field. + */ + if (!list_empty(&page->lru)) + goto page_already_added; + + if (unlikely(fbdefio->sort_pagelist)) { + /* + * We loop through the pagelist before adding in order to + * keep the pagelist sorted. This has significant overhead + * of O(n^2) with n being the number of written pages. If + * possible, drivers should try to work with unsorted page + * lists instead. + */ + struct page *cur; + + list_for_each_entry(cur, &fbdefio->pagelist, lru) { + if (cur->index > page->index) + break; + } + pos = &cur->lru; } - list_add_tail(&page->lru, &cur->lru); + list_add_tail(&page->lru, pos); page_already_added: mutex_unlock(&fbdefio->lock); @@ -194,7 +212,7 @@ static void fb_deferred_io_work(struct work_struct *work) /* clear the list */ list_for_each_safe(node, next, &fbdefio->pagelist) { - list_del(node); + list_del_init(node); } mutex_unlock(&fbdefio->lock); } diff --git a/drivers/video/fbdev/metronomefb.c b/drivers/video/fbdev/metronomefb.c index 952826557a0c..af858dd23ea6 100644 --- a/drivers/video/fbdev/metronomefb.c +++ b/drivers/video/fbdev/metronomefb.c @@ -568,6 +568,7 @@ static const struct fb_ops metronomefb_ops = { static struct fb_deferred_io metronomefb_defio = { .delay = HZ, + .sort_pagelist = true, .deferred_io = metronomefb_dpy_deferred_io, }; diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index b9cdd02c1000..184bb8433b78 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -980,6 +980,7 @@ static int dlfb_ops_open(struct fb_info *info, int user) if (fbdefio) { fbdefio->delay = DL_DEFIO_WRITE_DELAY; + fbdefio->sort_pagelist = true; fbdefio->deferred_io = dlfb_dpy_deferred_io; } |