diff options
137 files changed, 2006 insertions, 5379 deletions
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml index 2ed010f91e2d..20ce88ab4b3a 100644 --- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml +++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml @@ -22,7 +22,7 @@ properties: items: - enum: # ili9341 240*320 Color on stm32f429-disco board - - st,sf-tc240t-9370-t + - st,sf-tc240t-9370-t - const: ilitek,ili9341 reg: true diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6d27a1.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6d27a1.yaml new file mode 100644 index 000000000000..26e3c820a2f7 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/samsung,s6d27a1.yaml @@ -0,0 +1,98 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/samsung,s6d27a1.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Samsung S6D27A1 display panel + +description: The S6D27A1 is a 480x800 DPI display panel from Samsung Mobile + Displays (SMD). The panel must obey the rules for a SPI slave device + as specified in spi/spi-controller.yaml + +maintainers: + - Markuss Broks <markuss.broks@gmail.com> + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + const: samsung,s6d27a1 + + reg: true + + interrupts: + description: provides an optional ESD (electrostatic discharge) + interrupt that signals abnormalities in the display hardware. + This can also be raised for other reasons like erroneous + configuration. + maxItems: 1 + + reset-gpios: true + + vci-supply: + description: regulator that supplies the VCI analog voltage + usually around 3.0 V + + vccio-supply: + description: regulator that supplies the VCCIO voltage usually + around 1.8 V + + backlight: true + + spi-cpha: true + + spi-cpol: true + + spi-max-frequency: + maximum: 1200000 + + port: true + +required: + - compatible + - reg + - vci-supply + - vccio-supply + - spi-cpha + - spi-cpol + - port + +unevaluatedProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + #include <dt-bindings/interrupt-controller/irq.h> + + spi { + compatible = "spi-gpio"; + sck-gpios = <&gpio 0 GPIO_ACTIVE_HIGH>; + miso-gpios = <&gpio 1 GPIO_ACTIVE_HIGH>; + mosi-gpios = <&gpio 2 GPIO_ACTIVE_HIGH>; + cs-gpios = <&gpio 3 GPIO_ACTIVE_HIGH>; + num-chipselects = <1>; + #address-cells = <1>; + #size-cells = <0>; + panel@0 { + compatible = "samsung,s6d27a1"; + spi-max-frequency = <1200000>; + spi-cpha; + spi-cpol; + reg = <0>; + vci-supply = <&lcd_3v0_reg>; + vccio-supply = <&lcd_1v8_reg>; + reset-gpios = <&gpio 4 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio>; + interrupts = <5 IRQ_TYPE_EDGE_RISING>; + + port { + panel_in: endpoint { + remote-endpoint = <&display_out>; + }; + }; + }; + }; + +... diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst index f5ac4c90b237..2cd7db82d9fe 100644 --- a/Documentation/driver-api/dma-buf.rst +++ b/Documentation/driver-api/dma-buf.rst @@ -176,12 +176,6 @@ DMA Fences Functions Reference .. kernel-doc:: include/linux/dma-fence.h :internal: -Seqno Hardware Fences -~~~~~~~~~~~~~~~~~~~~~ - -.. kernel-doc:: include/linux/seqno-fence.h - :internal: - DMA Fence Array ~~~~~~~~~~~~~~~ diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst index 8126beadc7df..e0538083a2c0 100644 --- a/Documentation/gpu/drm-mm.rst +++ b/Documentation/gpu/drm-mm.rst @@ -28,56 +28,53 @@ UMA devices. The Translation Table Manager (TTM) =================================== -TTM design background and information belongs here. +.. kernel-doc:: drivers/gpu/drm/ttm/ttm_module.c + :doc: TTM -TTM initialization ------------------- +.. kernel-doc:: include/drm/ttm/ttm_caching.h + :internal: - **Warning** - This section is outdated. +TTM device object reference +--------------------------- -Drivers wishing to support TTM must pass a filled :c:type:`ttm_bo_driver -<ttm_bo_driver>` structure to ttm_device_init, together with an -initialized global reference to the memory manager. The ttm_bo_driver -structure contains several fields with function pointers for -initializing the TTM, allocating and freeing memory, waiting for command -completion and fence synchronization, and memory migration. +.. kernel-doc:: include/drm/ttm/ttm_device.h + :internal: -The :c:type:`struct drm_global_reference <drm_global_reference>` is made -up of several fields: +.. kernel-doc:: drivers/gpu/drm/ttm/ttm_device.c + :export: -.. code-block:: c +TTM resource placement reference +-------------------------------- - struct drm_global_reference { - enum ttm_global_types global_type; - size_t size; - void *object; - int (*init) (struct drm_global_reference *); - void (*release) (struct drm_global_reference *); - }; - - -There should be one global reference structure for your memory manager -as a whole, and there will be others for each object created by the -memory manager at runtime. Your global TTM should have a type of -TTM_GLOBAL_TTM_MEM. The size field for the global object should be -sizeof(struct ttm_mem_global), and the init and release hooks should -point at your driver-specific init and release routines, which probably -eventually call ttm_mem_global_init and ttm_mem_global_release, -respectively. +.. kernel-doc:: include/drm/ttm/ttm_placement.h + :internal: + +TTM resource object reference +----------------------------- + +.. kernel-doc:: include/drm/ttm/ttm_resource.h + :internal: -Once your global TTM accounting structure is set up and initialized by -calling ttm_global_item_ref() on it, you need to create a buffer -object TTM to provide a pool for buffer object allocation by clients and -the kernel itself. The type of this object should be -TTM_GLOBAL_TTM_BO, and its size should be sizeof(struct -ttm_bo_global). Again, driver-specific init and release functions may -be provided, likely eventually calling ttm_bo_global_ref_init() and -ttm_bo_global_ref_release(), respectively. Also, like the previous -object, ttm_global_item_ref() is used to create an initial reference -count for the TTM, which will call your initialization function. +.. kernel-doc:: drivers/gpu/drm/ttm/ttm_resource.c + :export: + +TTM TT object reference +----------------------- + +.. kernel-doc:: include/drm/ttm/ttm_tt.h + :internal: + +.. kernel-doc:: drivers/gpu/drm/ttm/ttm_tt.c + :export: -See the radeon_ttm.c file for an example of usage. +TTM page pool reference +----------------------- + +.. kernel-doc:: include/drm/ttm/ttm_pool.h + :internal: + +.. kernel-doc:: drivers/gpu/drm/ttm/ttm_pool.c + :export: The Graphics Execution Manager (GEM) ==================================== @@ -504,3 +501,6 @@ Scheduler Function References .. kernel-doc:: drivers/gpu/drm/scheduler/sched_main.c :export: + +.. kernel-doc:: drivers/gpu/drm/scheduler/sched_entity.c + :export: diff --git a/MAINTAINERS b/MAINTAINERS index eeb4c70b3d5b..ccbabe0781b8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6018,6 +6018,12 @@ T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml F: drivers/gpu/drm/panel/panel-samsung-db7430.c +DRM DRIVER FOR SAMSUNG S6D27A1 PANELS +M: Markuss Broks <markuss.broks@gmail.com> +S: Maintained +F: Documentation/devicetree/bindings/display/panel/samsung,s6d27a1.yaml +F: driver/gpu/drm/panel/panel-samsung-s6d27a1.c + DRM DRIVER FOR SITRONIX ST7703 PANELS M: Guido Günther <agx@sigxcpu.org> R: Purism Kernel Team <kernel@puri.sm> diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 40d81f23cacf..1ef021273a06 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ - dma-resv.o seqno-fence.o + dma-resv.o obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o obj-$(CONFIG_DMABUF_HEAPS) += heaps/ obj-$(CONFIG_SYNC_FILE) += sync_file.o diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 63d32261b63f..474de2d988ca 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -82,6 +82,7 @@ static void dma_buf_release(struct dentry *dentry) if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) dma_resv_fini(dmabuf->resv); + WARN_ON(!list_empty(&dmabuf->attachments)); module_put(dmabuf->owner); kfree(dmabuf->name); kfree(dmabuf); diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index ce0f5eff575d..1e82ecd443fa 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -616,20 +616,17 @@ EXPORT_SYMBOL(dma_fence_enable_sw_signaling); * @cb: the callback to register * @func: the function to call * + * Add a software callback to the fence. The caller should keep a reference to + * the fence. + * * @cb will be initialized by dma_fence_add_callback(), no initialization * by the caller is required. Any number of callbacks can be registered * to a fence, but a callback can only be registered to one fence at a time. * - * Note that the callback can be called from an atomic context. If - * fence is already signaled, this function will return -ENOENT (and + * If fence is already signaled, this function will return -ENOENT (and * *not* call the callback). * - * Add a software callback to the fence. Same restrictions apply to - * refcount as it does to dma_fence_wait(), however the caller doesn't need to - * keep a refcount to fence afterward dma_fence_add_callback() has returned: - * when software access is enabled, the creator of the fence is required to keep - * the fence alive until after it signals with dma_fence_signal(). The callback - * itself can be called from irq context. + * Note that the callback can be called from an atomic context or irq context. * * Returns 0 in case of success, -ENOENT if the fence is already signaled * and -EINVAL in case of error. diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index e744fd87c63c..84fbe60629e3 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -48,6 +48,8 @@ * write operations) or N shared fences (read operations). The RCU * mechanism is used to protect read access to fences from locked * write-side updates. + * + * See struct dma_resv for more details. */ DEFINE_WD_CLASS(reservation_ww_class); @@ -137,7 +139,11 @@ EXPORT_SYMBOL(dma_resv_fini); * @num_fences: number of fences we want to add * * Should be called before dma_resv_add_shared_fence(). Must - * be called with obj->lock held. + * be called with @obj locked through dma_resv_lock(). + * + * Note that the preallocated slots need to be re-reserved if @obj is unlocked + * at any time before calling dma_resv_add_shared_fence(). This is validated + * when CONFIG_DEBUG_MUTEXES is enabled. * * RETURNS * Zero for success, or -errno @@ -234,8 +240,10 @@ EXPORT_SYMBOL(dma_resv_reset_shared_max); * @obj: the reservation object * @fence: the shared fence to add * - * Add a fence to a shared slot, obj->lock must be held, and + * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and * dma_resv_reserve_shared() has been called. + * + * See also &dma_resv.fence for a discussion of the semantics. */ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) { @@ -278,9 +286,11 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence); /** * dma_resv_add_excl_fence - Add an exclusive fence. * @obj: the reservation object - * @fence: the shared fence to add + * @fence: the exclusive fence to add * - * Add a fence to the exclusive slot. The obj->lock must be held. + * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock(). + * Note that this function replaces all fences attached to @obj, see also + * &dma_resv.fence_excl for a discussion of the semantics. */ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) { @@ -609,9 +619,11 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) * fence * * Callers are not required to hold specific locks, but maybe hold - * dma_resv_lock() already + * dma_resv_lock() already. + * * RETURNS - * true if all fences signaled, else false + * + * True if all fences signaled, else false. */ bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all) { diff --git a/drivers/dma-buf/seqno-fence.c b/drivers/dma-buf/seqno-fence.c deleted file mode 100644 index bfe14e94c488..000000000000 --- a/drivers/dma-buf/seqno-fence.c +++ /dev/null @@ -1,71 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * seqno-fence, using a dma-buf to synchronize fencing - * - * Copyright (C) 2012 Texas Instruments - * Copyright (C) 2012-2014 Canonical Ltd - * Authors: - * Rob Clark <robdclark@gmail.com> - * Maarten Lankhorst <maarten.lankhorst@canonical.com> - */ - -#include <linux/slab.h> -#include <linux/export.h> -#include <linux/seqno-fence.h> - -static const char *seqno_fence_get_driver_name(struct dma_fence *fence) -{ - struct seqno_fence *seqno_fence = to_seqno_fence(fence); - - return seqno_fence->ops->get_driver_name(fence); -} - -static const char *seqno_fence_get_timeline_name(struct dma_fence *fence) -{ - struct seqno_fence *seqno_fence = to_seqno_fence(fence); - - return seqno_fence->ops->get_timeline_name(fence); -} - -static bool seqno_enable_signaling(struct dma_fence *fence) -{ - struct seqno_fence *seqno_fence = to_seqno_fence(fence); - - return seqno_fence->ops->enable_signaling(fence); -} - -static bool seqno_signaled(struct dma_fence *fence) -{ - struct seqno_fence *seqno_fence = to_seqno_fence(fence); - - return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence); -} - -static void seqno_release(struct dma_fence *fence) -{ - struct seqno_fence *f = to_seqno_fence(fence); - - dma_buf_put(f->sync_buf); - if (f->ops->release) - f->ops->release(fence); - else - dma_fence_free(&f->base); -} - -static signed long seqno_wait(struct dma_fence *fence, bool intr, - signed long timeout) -{ - struct seqno_fence *f = to_seqno_fence(fence); - - return f->ops->wait(fence, intr, timeout); -} - -const struct dma_fence_ops seqno_fence_ops = { - .get_driver_name = seqno_fence_get_driver_name, - .get_timeline_name = seqno_fence_get_timeline_name, - .enable_signaling = seqno_enable_signaling, - .signaled = seqno_signaled, - .wait = seqno_wait, - .release = seqno_release, -}; -EXPORT_SYMBOL(seqno_fence_ops); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index cea777ae7fb9..b17e231ca6f7 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -211,7 +211,7 @@ config DRM_KMS_CMA_HELPER config DRM_GEM_SHMEM_HELPER bool - depends on DRM + depends on DRM && MMU help Choose this if you need the GEM shmem helper functions @@ -271,7 +271,8 @@ source "drivers/gpu/drm/kmb/Kconfig" config DRM_VGEM tristate "Virtual GEM provider" - depends on DRM + depends on DRM && MMU + select DRM_GEM_SHMEM_HELPER help Choose this option to get a virtual graphics memory manager, as used by Mesa's software renderer for enhanced performance. @@ -279,7 +280,7 @@ config DRM_VGEM config DRM_VKMS tristate "Virtual KMS (EXPERIMENTAL)" - depends on DRM + depends on DRM && MMU select DRM_KMS_HELPER select DRM_GEM_SHMEM_HELPER select CRC32 @@ -351,8 +352,6 @@ source "drivers/gpu/drm/hisilicon/Kconfig" source "drivers/gpu/drm/mediatek/Kconfig" -source "drivers/gpu/drm/zte/Kconfig" - source "drivers/gpu/drm/mxsfb/Kconfig" source "drivers/gpu/drm/meson/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index ad1112154898..0dff40bb863c 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -113,7 +113,6 @@ obj-y += bridge/ obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/ obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/ obj-y += hisilicon/ -obj-$(CONFIG_DRM_ZTE) += zte/ obj-$(CONFIG_DRM_MXSFB) += mxsfb/ obj-y += tiny/ obj-$(CONFIG_DRM_PL111) += pl111/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 913f9eaa9cd6..0311d799a010 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1222,6 +1222,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, if (r) goto error_unlock; + drm_sched_job_arm(&job->base); + /* No memory allocation is allowed while holding the notifier lock. * The lock is held until amdgpu_cs_submit is finished and fence is * added to BOs. @@ -1259,7 +1261,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, trace_amdgpu_cs_ioctl(job); amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket); - drm_sched_entity_push_job(&job->base, entity); + drm_sched_entity_push_job(&job->base); amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 8d682befe0d6..a25e192e8a3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -266,7 +266,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) struct amdgpu_fence_driver *drv = &ring->fence_drv; struct amdgpu_device *adev = ring->adev; uint32_t seq, last_seq; - int r; do { last_seq = atomic_read(&ring->fence_drv.last_seq); @@ -298,12 +297,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) if (!fence) continue; - r = dma_fence_signal(fence); - if (!r) - DMA_FENCE_TRACE(fence, "signaled from irq context\n"); - else - BUG(); - + dma_fence_signal(fence); dma_fence_put(fence); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); @@ -684,8 +678,6 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f) if (!timer_pending(&ring->fence_drv.fallback_timer)) amdgpu_fence_schedule_fallback(ring); - DMA_FENCE_TRACE(f, "armed on ring %i!\n", ring->idx); - return true; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index de29518673dd..4e30a09bc887 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -182,9 +182,11 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, if (r) return r; + drm_sched_job_arm(&job->base); + *f = dma_fence_get(&job->base.s_fence->finished); amdgpu_job_free_resources(job); - drm_sched_entity_push_job(&job->base, entity); + drm_sched_entity_push_job(&job->base); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 38dade421d46..1129e17e9f09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1066,8 +1066,6 @@ static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev, { struct amdgpu_ttm_tt *gtt = (void *)ttm; - amdgpu_ttm_backend_unbind(bdev, ttm); - ttm_tt_destroy_common(bdev, ttm); if (gtt->usertask) put_task_struct(gtt->usertask); @@ -1148,6 +1146,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev, struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_device *adev; + amdgpu_ttm_backend_unbind(bdev, ttm); + if (gtt->userptr) { amdgpu_ttm_tt_set_user_pages(ttm, NULL); kfree(ttm->sg); diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 8c2ab3d653b7..0562bdaac00c 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -165,7 +165,7 @@ bool malidp_format_mod_supported(struct drm_device *drm, return !malidp_hw_format_is_afbc_only(format); } - if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) { + if (!fourcc_mod_is_vendor(modifier, ARM)) { DRM_ERROR("Unknown modifier (not Arm)\n"); return false; } diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c index a20a45c0b353..28d9becc939c 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c @@ -1,21 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * adv7511_cec.c - Analog Devices ADV7511/33 cec driver * * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. - * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * */ #include <linux/device.h> diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 14d73fb1dd15..1a871f6b6822 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -720,7 +720,7 @@ static int edid_read(struct anx7625_data *ctx, ret = sp_tx_aux_rd(ctx, 0xf1); if (ret) { - sp_tx_rst_aux(ctx); + ret = sp_tx_rst_aux(ctx); DRM_DEV_DEBUG_DRIVER(dev, "edid read fail, reset!\n"); } else { ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client, @@ -735,7 +735,7 @@ static int edid_read(struct anx7625_data *ctx, if (cnt > EDID_TRY_CNT) return -EIO; - return 0; + return ret; } static int segments_edid_read(struct anx7625_data *ctx, @@ -785,7 +785,7 @@ static int segments_edid_read(struct anx7625_data *ctx, if (cnt > EDID_TRY_CNT) return -EIO; - return 0; + return ret; } static int sp_tx_edid_read(struct anx7625_data *ctx, @@ -845,8 +845,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx, if (g_edid_break == 1) break; - segments_edid_read(ctx, count / 2, - pblock_buf, offset); + ret = segments_edid_read(ctx, count / 2, + pblock_buf, offset); + if (ret < 0) + return ret; + memcpy(&pedid_blocks_buf[edid_pos], pblock_buf, MAX_DPCD_BUFFER_SIZE); @@ -863,8 +866,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx, if (g_edid_break == 1) break; - segments_edid_read(ctx, count / 2, - pblock_buf, offset); + ret = segments_edid_read(ctx, count / 2, + pblock_buf, offset); + if (ret < 0) + return ret; + memcpy(&pedid_blocks_buf[edid_pos], pblock_buf, MAX_DPCD_BUFFER_SIZE); @@ -887,7 +893,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx, } /* Reset aux channel */ - sp_tx_rst_aux(ctx); + ret = sp_tx_rst_aux(ctx); + if (ret < 0) { + DRM_DEV_ERROR(dev, "Failed to reset aux channel!\n"); + return ret; + } return (blocks_num + 1); } @@ -1325,7 +1335,6 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx) dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | - MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE; if (mipi_dsi_attach(dsi) < 0) { diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c index e6e331071a00..d8a15c459b42 100644 --- a/drivers/gpu/drm/bridge/cdns-dsi.c +++ b/drivers/gpu/drm/bridge/cdns-dsi.c @@ -1171,7 +1171,6 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev) { struct cdns_dsi *dsi; struct cdns_dsi_input *input; - struct resource *res; int ret, irq; u32 val; @@ -1183,8 +1182,7 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev) input = &dsi->input; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dsi->regs = devm_ioremap_resource(&pdev->dev, res); + dsi->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dsi->regs)) return PTR_ERR(dsi->regs); diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c index 2f2a09adb4bc..9dc41a7b9136 100644 --- a/drivers/gpu/drm/bridge/ite-it66121.c +++ b/drivers/gpu/drm/bridge/ite-it66121.c @@ -889,7 +889,7 @@ unlock: static int it66121_probe(struct i2c_client *client, const struct i2c_device_id *id) { - u32 vendor_ids[2], device_ids[2], revision_id; + u32 revision_id, vendor_ids[2] = { 0 }, device_ids[2] = { 0 }; struct device_node *ep; int ret; struct it66121_ctx *ctx; @@ -924,6 +924,9 @@ static int it66121_probe(struct i2c_client *client, ctx->next_bridge = of_drm_find_bridge(ep); of_node_put(ep); + if (!ctx->next_bridge) + return -EPROBE_DEFER; + i2c_set_clientdata(client, ctx); mutex_init(&ctx->lock); diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c index 7bd0affa057a..685e9c38b2db 100644 --- a/drivers/gpu/drm/bridge/parade-ps8640.c +++ b/drivers/gpu/drm/bridge/parade-ps8640.c @@ -18,16 +18,18 @@ #include <drm/drm_print.h> #define PAGE2_GPIO_H 0xa7 -#define PS_GPIO9 BIT(1) +#define PS_GPIO9 BIT(1) #define PAGE2_I2C_BYPASS 0xea -#define I2C_BYPASS_EN 0xd0 +#define I2C_BYPASS_EN 0xd0 #define PAGE2_MCS_EN 0xf3 -#define MCS_EN BIT(0) +#define MCS_EN BIT(0) + #define PAGE3_SET_ADD 0xfe -#define VDO_CTL_ADD 0x13 -#define VDO_DIS 0x18 -#define VDO_EN 0x1c -#define DP_NUM_LANES 4 +#define VDO_CTL_ADD 0x13 +#define VDO_DIS 0x18 +#define VDO_EN 0x1c + +#define NUM_MIPI_LANES 4 /* * PS8640 uses multiple addresses: @@ -254,7 +256,7 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge, dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE; dsi->format = MIPI_DSI_FMT_RGB888; - dsi->lanes = DP_NUM_LANES; + dsi->lanes = NUM_MIPI_LANES; ret = mipi_dsi_attach(dsi); if (ret) goto err_dsi_attach; diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 2ba257b1ae20..e0a30e0ee86a 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -65,6 +65,14 @@ * support can instead use e.g. drm_helper_hpd_irq_event(). */ +/* + * Global connector list for drm_connector_find_by_fwnode(). + * Note drm_connector_[un]register() first take connector->lock and then + * take the connector_list_lock. + */ +static DEFINE_MUTEX(connector_list_lock); +static LIST_HEAD(connector_list); + struct drm_conn_prop_enum_list { int type; const char *name; @@ -267,6 +275,7 @@ int drm_connector_init(struct drm_device *dev, goto out_put_type_id; } + INIT_LIST_HEAD(&connector->global_connector_list_entry); INIT_LIST_HEAD(&connector->probed_modes); INIT_LIST_HEAD(&connector->modes); mutex_init(&connector->mutex); @@ -474,6 +483,8 @@ void drm_connector_cleanup(struct drm_connector *connector) drm_mode_object_unregister(dev, &connector->base); kfree(connector->name); connector->name = NULL; + fwnode_handle_put(connector->fwnode); + connector->fwnode = NULL; spin_lock_irq(&dev->mode_config.connector_list_lock); list_del(&connector->head); dev->mode_config.num_connector--; @@ -532,6 +543,9 @@ int drm_connector_register(struct drm_connector *connector) /* Let userspace know we have a new connector */ drm_sysfs_hotplug_event(connector->dev); + mutex_lock(&connector_list_lock); + list_add_tail(&connector->global_connector_list_entry, &connector_list); + mutex_unlock(&connector_list_lock); goto unlock; err_debugfs: @@ -560,6 +574,10 @@ void drm_connector_unregister(struct drm_connector *connector) return; } + mutex_lock(&connector_list_lock); + list_del_init(&connector->global_connector_list_entry); + mutex_unlock(&connector_list_lock); + if (connector->funcs->early_unregister) connector->funcs->early_unregister(connector); @@ -2543,6 +2561,67 @@ out: return ret; } +/** + * drm_connector_find_by_fwnode - Find a connector based on the associated fwnode + * @fwnode: fwnode for which to find the matching drm_connector + * + * This functions looks up a drm_connector based on its associated fwnode. When + * a connector is found a reference to the connector is returned. The caller must + * call drm_connector_put() to release this reference when it is done with the + * connector. + * + * Returns: A reference to the found connector or an ERR_PTR(). + */ +struct drm_connector *drm_connector_find_by_fwnode(struct fwnode_handle *fwnode) +{ + struct drm_connector *connector, *found = ERR_PTR(-ENODEV); + + if (!fwnode) + return ERR_PTR(-ENODEV); + + mutex_lock(&connector_list_lock); + + list_for_each_entry(connector, &connector_list, global_connector_list_entry) { + if (connector->fwnode == fwnode || + (connector->fwnode && connector->fwnode->secondary == fwnode)) { + drm_connector_get(connector); + found = connector; + break; + } + } + + mutex_unlock(&connector_list_lock); + + return found; +} + +/** + * drm_connector_oob_hotplug_event - Report out-of-band hotplug event to connector + * @connector: connector to report the event on + * + * On some hardware a hotplug event notification may come from outside the display + * driver / device. An example of this is some USB Type-C setups where the hardware + * muxes the DisplayPort data and aux-lines but does not pass the altmode HPD + * status bit to the GPU's DP HPD pin. + * + * This function can be used to report these out-of-band events after obtaining + * a drm_connector reference through calling drm_connector_find_by_fwnode(). + */ +void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode) +{ + struct drm_connector *connector; + + connector = drm_connector_find_by_fwnode(connector_fwnode); + if (IS_ERR(connector)) + return; + + if (connector->funcs->oob_hotplug_event) + connector->funcs->oob_hotplug_event(connector); + + drm_connector_put(connector); +} +EXPORT_SYMBOL(drm_connector_oob_hotplug_event); + /** * DOC: Tile group diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index edb772947cb4..63279e984342 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -58,6 +58,7 @@ struct drm_property; struct edid; struct kref; struct work_struct; +struct fwnode_handle; /* drm_crtc.c */ int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj, @@ -186,6 +187,7 @@ int drm_connector_set_obj_prop(struct drm_mode_object *obj, int drm_connector_create_standard_properties(struct drm_device *dev); const char *drm_get_connector_force_name(enum drm_connector_force force); void drm_connector_free_work_fn(struct work_struct *work); +struct drm_connector *drm_connector_find_by_fwnode(struct fwnode_handle *fwnode); /* IOCTL */ int drm_connector_property_set_ioctl(struct drm_device *dev, diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index a61946374c82..0e0986dfbe0c 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -10,6 +10,10 @@ #include <linux/slab.h> #include <linux/vmalloc.h> +#ifdef CONFIG_X86 +#include <asm/set_memory.h> +#endif + #include <drm/drm.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> @@ -162,6 +166,16 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) return PTR_ERR(pages); } + /* + * TODO: Allocating WC pages which are correctly flushed is only + * supported on x86. Ideal solution would be a GFP_WC flag, which also + * ttm_pool.c could use. + */ +#ifdef CONFIG_X86 + if (shmem->map_wc) + set_pages_array_wc(pages, obj->size >> PAGE_SHIFT); +#endif + shmem->pages = pages; return 0; @@ -203,6 +217,11 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) if (--shmem->pages_use_count > 0) return; +#ifdef CONFIG_X86 + if (shmem->map_wc) + set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT); +#endif + drm_gem_put_pages(obj, shmem->pages, shmem->pages_mark_dirty_on_put, shmem->pages_mark_accessed_on_put); @@ -542,7 +561,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) } else { page = shmem->pages[page_offset]; - ret = vmf_insert_page(vma, vmf->address, page); + ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); } mutex_unlock(&shmem->pages_lock); @@ -612,7 +631,7 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) return ret; } - vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; + vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); if (shmem->map_wc) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 43cf7e887d1a..bfa386b98134 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -846,7 +846,6 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = { static void bo_driver_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt) { - ttm_tt_destroy_common(bdev, tt); ttm_tt_fini(tt); kfree(tt); } diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index be4a52dc4d6f..8b8744dcf691 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -522,19 +522,7 @@ int drm_version(struct drm_device *dev, void *data, return err; } -/** - * drm_ioctl_permit - Check ioctl permissions against caller - * - * @flags: ioctl permission flags. - * @file_priv: Pointer to struct drm_file identifying the caller. - * - * Checks whether the caller is allowed to run an ioctl with the - * indicated permissions. - * - * Returns: - * Zero if allowed, -EACCES otherwise. - */ -int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) +static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) { /* ROOT_ONLY is only for CAP_SYS_ADMIN */ if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN))) @@ -557,7 +545,6 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) return 0; } -EXPORT_SYMBOL(drm_ioctl_permit); #define DRM_IOCTL_DEF(ioctl, _func, _flags) \ [DRM_IOCTL_NR(ioctl)] = { \ @@ -725,7 +712,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER), }; -#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) +#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE(drm_ioctls) /** * DOC: driver specific ioctls @@ -834,8 +821,8 @@ long drm_ioctl(struct file *filp, if (drm_dev_is_unplugged(dev)) return -ENODEV; - if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE) - return -ENOTTY; + if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE) + return -ENOTTY; is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END; diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c index f933da1656eb..47e92400548d 100644 --- a/drivers/gpu/drm/drm_kms_helper_common.c +++ b/drivers/gpu/drm/drm_kms_helper_common.c @@ -64,17 +64,6 @@ MODULE_PARM_DESC(edid_firmware, static int __init drm_kms_helper_init(void) { - /* - * The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT) - * but the module doesn't depend on any fb console symbols. At least - * attempt to load fbcon to avoid leaving the system without a usable - * console. - */ - if (IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) && - IS_MODULE(CONFIG_FRAMEBUFFER_CONSOLE) && - !IS_ENABLED(CONFIG_EXPERT)) - request_module_nowait("fbcon"); - return drm_dp_aux_dev_init(); } diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index f6bdec7fa925..62e8ccc7ab9c 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -109,6 +109,12 @@ static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = { .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +static const struct drm_dmi_panel_orientation_data lcd1280x1920_rightside_up = { + .width = 1280, + .height = 1920, + .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + static const struct dmi_system_id orientation_data[] = { { /* Acer One 10 (S1003) */ .matches = { @@ -134,6 +140,20 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Chuwi HiBook (CWI514) */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"), + DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"), + /* Above matches are too generic, add bios-date match */ + DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, + }, { /* Chuwi Hi10 Pro (CWI529) */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Hampoo"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, }, { /* GPD MicroPC (generic strings, also match on bios date) */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"), @@ -193,6 +213,13 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "TW891"), }, .driver_data = (void *)&itworks_tw891, + }, { /* KD Kurio Smart C15200 2-in-1 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "KD Interactive"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Kurio Smart"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "KDM960BCP"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* * Lenovo Ideapad Miix 310 laptop, only some production batches * have a portrait screen, the resolution checks makes the quirk @@ -211,10 +238,15 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), }, .driver_data = (void *)&lcd800x1280_rightside_up, - }, { /* Lenovo Ideapad D330 */ + }, { /* Lenovo Ideapad D330-10IGM (HD) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Lenovo Ideapad D330-10IGM (FHD) */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81H3"), DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"), }, .driver_data = (void *)&lcd1200x1920_rightside_up, @@ -225,6 +257,19 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"), }, .driver_data = (void *)&onegx1_pro, + }, { /* Samsung GalaxyBook 10.6 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galaxy Book 10.6"), + }, + .driver_data = (void *)&lcd1280x1920_rightside_up, + }, { /* Valve Steam Deck */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jupiter"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* VIOS LTH17 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"), diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 968a9560b4aa..76ff6ec3421b 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -10,6 +10,7 @@ * Copyright (c) 2003-2004 IBM Corp. */ +#include <linux/acpi.h> #include <linux/device.h> #include <linux/err.h> #include <linux/export.h> @@ -50,8 +51,45 @@ static struct device_type drm_sysfs_device_minor = { .name = "drm_minor" }; +static struct device_type drm_sysfs_device_connector = { + .name = "drm_connector", +}; + struct class *drm_class; +#ifdef CONFIG_ACPI +static bool drm_connector_acpi_bus_match(struct device *dev) +{ + return dev->type == &drm_sysfs_device_connector; +} + +static struct acpi_device *drm_connector_acpi_find_companion(struct device *dev) +{ + struct drm_connector *connector = to_drm_connector(dev); + + return to_acpi_device_node(connector->fwnode); +} + +static struct acpi_bus_type drm_connector_acpi_bus = { + .name = "drm_connector", + .match = drm_connector_acpi_bus_match, + .find_companion = drm_connector_acpi_find_companion, +}; + +static void drm_sysfs_acpi_register(void) +{ + register_acpi_bus_type(&drm_connector_acpi_bus); +} + +static void drm_sysfs_acpi_unregister(void) +{ + unregister_acpi_bus_type(&drm_connector_acpi_bus); +} +#else +static void drm_sysfs_acpi_register(void) { } +static void drm_sysfs_acpi_unregister(void) { } +#endif + static char *drm_devnode(struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev)); @@ -85,6 +123,8 @@ int drm_sysfs_init(void) } drm_class->devnode = drm_devnode; + + drm_sysfs_acpi_register(); return 0; } @@ -97,11 +137,17 @@ void drm_sysfs_destroy(void) { if (IS_ERR_OR_NULL(drm_class)) return; + drm_sysfs_acpi_unregister(); class_remove_file(drm_class, &class_attr_version.attr); class_destroy(drm_class); drm_class = NULL; } +static void drm_sysfs_release(struct device *dev) +{ + kfree(dev); +} + /* * Connector properties */ @@ -273,27 +319,47 @@ static const struct attribute_group *connector_dev_groups[] = { int drm_sysfs_connector_add(struct drm_connector *connector) { struct drm_device *dev = connector->dev; + struct device *kdev; + int r; if (connector->kdev) return 0; - connector->kdev = - device_create_with_groups(drm_class, dev->primary->kdev, 0, - connector, connector_dev_groups, - "card%d-%s", dev->primary->index, - connector->name); + kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); + if (!kdev) + return -ENOMEM; + + device_initialize(kdev); + kdev->class = drm_class; + kdev->type = &drm_sysfs_device_connector; + kdev->parent = dev->primary->kdev; + kdev->groups = connector_dev_groups; + kdev->release = drm_sysfs_release; + dev_set_drvdata(kdev, connector); + + r = dev_set_name(kdev, "card%d-%s", dev->primary->index, connector->name); + if (r) + goto err_free; + DRM_DEBUG("adding \"%s\" to sysfs\n", connector->name); - if (IS_ERR(connector->kdev)) { - DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev)); - return PTR_ERR(connector->kdev); + r = device_add(kdev); + if (r) { + drm_err(dev, "failed to register connector device: %d\n", r); + goto err_free; } + connector->kdev = kdev; + if (connector->ddc) return sysfs_create_link(&connector->kdev->kobj, &connector->ddc->dev.kobj, "ddc"); return 0; + +err_free: + put_device(kdev); + return r; } void drm_sysfs_connector_remove(struct drm_connector *connector) @@ -374,11 +440,6 @@ void drm_sysfs_connector_status_event(struct drm_connector *connector, } EXPORT_SYMBOL(drm_sysfs_connector_status_event); -static void drm_sysfs_release(struct device *dev) -{ - kfree(dev); -} - struct device *drm_sysfs_minor_alloc(struct drm_minor *minor) { const char *minor_str; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index feb6da1b6ceb..180bb633d5c5 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -163,6 +163,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity, if (ret) goto out_unlock; + drm_sched_job_arm(&submit->sched_job); + submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished); submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr, submit->out_fence, 0, @@ -176,7 +178,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity, /* the scheduler holds on to the job now */ kref_get(&submit->refcount); - drm_sched_entity_push_job(&submit->sched_job, sched_entity); + drm_sched_entity_push_job(&submit->sched_job); out_unlock: mutex_unlock(&submit->gpu->fence_lock); diff --git a/drivers/gpu/drm/gud/Kconfig b/drivers/gpu/drm/gud/Kconfig index 1c8601bf4d91..9c1e61f9eec3 100644 --- a/drivers/gpu/drm/gud/Kconfig +++ b/drivers/gpu/drm/gud/Kconfig @@ -2,7 +2,7 @@ config DRM_GUD tristate "GUD USB Display" - depends on DRM && USB + depends on DRM && USB && MMU select LZ4_COMPRESS select DRM_KMS_HELPER select DRM_GEM_SHMEM_HELPER diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 35eedc14f522..f0a61a9474fc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -214,7 +214,6 @@ static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) { struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); - ttm_tt_destroy_common(bdev, ttm); ttm_tt_fini(ttm); kfree(i915_tt); } diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index de62966243cd..640acc060467 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -267,7 +267,9 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo, if (explicit) return 0; - return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write); + return drm_sched_job_add_implicit_dependencies(&task->base, + &bo->base.base, + write); } static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit) @@ -285,7 +287,7 @@ static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit) if (err) return err; - err = drm_gem_fence_array_add(&submit->task->deps, fence); + err = drm_sched_job_add_dependency(&submit->task->base, fence); if (err) { dma_fence_put(fence); return err; @@ -359,8 +361,7 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) goto err_out2; } - fence = lima_sched_context_queue_task( - submit->ctx->context + submit->pipe, submit->task); + fence = lima_sched_context_queue_task(submit->task); for (i = 0; i < submit->nr_bos; i++) { if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE) diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index dba8329937a3..99d5f6f1a882 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -129,27 +129,20 @@ int lima_sched_task_init(struct lima_sched_task *task, return err; } + drm_sched_job_arm(&task->base); + task->num_bos = num_bos; task->vm = lima_vm_get(vm); - xa_init_flags(&task->deps, XA_FLAGS_ALLOC); - return 0; } void lima_sched_task_fini(struct lima_sched_task *task) { - struct dma_fence *fence; - unsigned long index; int i; drm_sched_job_cleanup(&task->base); - xa_for_each(&task->deps, index, fence) { - dma_fence_put(fence); - } - xa_destroy(&task->deps); - if (task->bos) { for (i = 0; i < task->num_bos; i++) drm_gem_object_put(&task->bos[i]->base.base); @@ -175,27 +168,15 @@ void lima_sched_context_fini(struct lima_sched_pipe *pipe, drm_sched_entity_fini(&context->base); } -struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context, - struct lima_sched_task *task) +struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task) { struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished); trace_lima_task_submit(task); - drm_sched_entity_push_job(&task->base, &context->base); + drm_sched_entity_push_job(&task->base); return fence; } -static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job, - struct drm_sched_entity *entity) -{ - struct lima_sched_task *task = to_lima_task(job); - - if (!xa_empty(&task->deps)) - return xa_erase(&task->deps, task->last_dep++); - - return NULL; -} - static int lima_pm_busy(struct lima_device *ldev) { int ret; @@ -471,7 +452,6 @@ static void lima_sched_free_job(struct drm_sched_job *job) } static const struct drm_sched_backend_ops lima_sched_ops = { - .dependency = lima_sched_dependency, .run_job = lima_sched_run_job, .timedout_job = lima_sched_timedout_job, .free_job = lima_sched_free_job, diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h index 90f03c48ef4a..6a11764d87b3 100644 --- a/drivers/gpu/drm/lima/lima_sched.h +++ b/drivers/gpu/drm/lima/lima_sched.h @@ -23,9 +23,6 @@ struct lima_sched_task { struct lima_vm *vm; void *frame; - struct xarray deps; - unsigned long last_dep; - struct lima_bo **bos; int num_bos; @@ -98,8 +95,7 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe, atomic_t *guilty); void lima_sched_context_fini(struct lima_sched_pipe *pipe, struct lima_sched_context *context); -struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context, - struct lima_sched_task *task); +struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task); int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name); void lima_sched_pipe_fini(struct lima_sched_pipe *pipe); diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index e60566a5739c..5b5afc6aaf8e 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -276,7 +276,6 @@ static int mcde_probe(struct platform_device *pdev) struct drm_device *drm; struct mcde *mcde; struct component_match *match = NULL; - struct resource *res; u32 pid; int irq; int ret; @@ -344,8 +343,7 @@ static int mcde_probe(struct platform_device *pdev) goto clk_disable; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mcde->regs = devm_ioremap_resource(dev, res); + mcde->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mcde->regs)) { dev_err(dev, "no MCDE regs\n"); ret = -EINVAL; diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c index 180ebbccbeda..5651734ce977 100644 --- a/drivers/gpu/drm/mcde/mcde_dsi.c +++ b/drivers/gpu/drm/mcde/mcde_dsi.c @@ -1169,7 +1169,6 @@ static int mcde_dsi_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct mcde_dsi *d; struct mipi_dsi_host *host; - struct resource *res; u32 dsi_id; int ret; @@ -1187,8 +1186,7 @@ static int mcde_dsi_probe(struct platform_device *pdev) return PTR_ERR(d->prcmu); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - d->regs = devm_ioremap_resource(dev, res); + d->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(d->regs)) return PTR_ERR(d->regs); diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index bc0d60df04ae..7f41a33592c8 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -206,8 +206,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) priv->compat = match->compat; priv->afbcd.ops = match->afbcd_ops; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu"); - regs = devm_ioremap_resource(dev, res); + regs = devm_platform_ioremap_resource_byname(pdev, "vpu"); if (IS_ERR(regs)) { ret = PTR_ERR(regs); goto free_drm; diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 2ed87cfdd735..0afbd1e70bfc 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -978,7 +978,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, struct dw_hdmi_plat_data *dw_plat_data; struct drm_bridge *next_bridge; struct drm_encoder *encoder; - struct resource *res; int irq; int ret; @@ -1042,8 +1041,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, return PTR_ERR(meson_dw_hdmi->hdmitx_phy); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - meson_dw_hdmi->hdmitx = devm_ioremap_resource(dev, res); + meson_dw_hdmi->hdmitx = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(meson_dw_hdmi->hdmitx)) return PTR_ERR(meson_dw_hdmi->hdmitx); diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index e39a8e7ad843..54ca0817d807 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -309,11 +309,6 @@ struct msm_gem_submit { struct ww_acquire_ctx ticket; uint32_t seqno; /* Sequence number of the submit on the ring */ - /* Array of struct dma_fence * to block on before submitting this job. - */ - struct xarray deps; - unsigned long last_dep; - /* Hw fence, which is created when the scheduler executes the job, and * is signaled when the hw finishes (via seqno write from cmdstream) */ diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index fdc5367aecaa..924b01b9c105 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -52,8 +52,6 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, return ERR_PTR(ret); } - xa_init_flags(&submit->deps, XA_FLAGS_ALLOC); - kref_init(&submit->ref); submit->dev = dev; submit->aspace = queue->ctx->aspace; @@ -72,8 +70,6 @@ void __msm_gem_submit_destroy(struct kref *kref) { struct msm_gem_submit *submit = container_of(kref, struct msm_gem_submit, ref); - unsigned long index; - struct dma_fence *fence; unsigned i; if (submit->fence_id) { @@ -82,12 +78,6 @@ void __msm_gem_submit_destroy(struct kref *kref) mutex_unlock(&submit->queue->lock); } - xa_for_each (&submit->deps, index, fence) { - dma_fence_put(fence); - } - - xa_destroy(&submit->deps); - dma_fence_put(submit->user_fence); dma_fence_put(submit->hw_fence); @@ -340,11 +330,13 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) return ret; } - if (no_implicit) + /* exclusive fences must be ordered */ + if (no_implicit && !write) continue; - ret = drm_gem_fence_array_add_implicit(&submit->deps, obj, - write); + ret = drm_sched_job_add_implicit_dependencies(&submit->base, + obj, + write); if (ret) break; } @@ -588,7 +580,7 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit, if (ret) break; - ret = drm_gem_fence_array_add(&submit->deps, fence); + ret = drm_sched_job_add_dependency(&submit->base, fence); if (ret) break; @@ -798,7 +790,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, goto out_unlock; } - ret = drm_gem_fence_array_add(&submit->deps, in_fence); + ret = drm_sched_job_add_dependency(&submit->base, in_fence); if (ret) goto out_unlock; } @@ -878,6 +870,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, submit->nr_cmds = i; + drm_sched_job_arm(&submit->base); + submit->user_fence = dma_fence_get(&submit->base.s_fence->finished); /* @@ -889,17 +883,16 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (submit->fence_id < 0) { ret = submit->fence_id = 0; submit->fence_id = 0; - goto out; } - if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) { + if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) { struct sync_file *sync_file = sync_file_create(submit->user_fence); if (!sync_file) { ret = -ENOMEM; - goto out; + } else { + fd_install(out_fence_fd, sync_file->file); + args->fence_fd = out_fence_fd; } - fd_install(out_fence_fd, sync_file->file); - args->fence_fd = out_fence_fd; } submit_attach_object_fences(submit); @@ -907,7 +900,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, /* The scheduler owns a ref now: */ msm_gem_submit_get(submit); - drm_sched_entity_push_job(&submit->base, &queue->entity); + drm_sched_entity_push_job(&submit->base); args->fence = submit->fence_id; diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index bd54c1412649..652b1dedd7c1 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -11,17 +11,6 @@ static uint num_hw_submissions = 8; MODULE_PARM_DESC(num_hw_submissions, "The max # of jobs to write into ringbuffer (default 8)"); module_param(num_hw_submissions, uint, 0600); -static struct dma_fence *msm_job_dependency(struct drm_sched_job *job, - struct drm_sched_entity *s_entity) -{ - struct msm_gem_submit *submit = to_msm_submit(job); - - if (!xa_empty(&submit->deps)) - return xa_erase(&submit->deps, submit->last_dep++); - - return NULL; -} - static struct dma_fence *msm_job_run(struct drm_sched_job *job) { struct msm_gem_submit *submit = to_msm_submit(job); @@ -52,7 +41,6 @@ static void msm_job_free(struct drm_sched_job *job) } const struct drm_sched_backend_ops msm_sched_ops = { - .dependency = msm_job_dependency, .run_job = msm_job_run, .free_job = msm_job_free }; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 6d07e653f82d..33dca2565cca 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1277,6 +1277,8 @@ nouveau_ttm_tt_unpopulate(struct ttm_device *bdev, if (slave) return; + nouveau_ttm_tt_unbind(bdev, ttm); + drm = nouveau_bdev(bdev); dev = drm->dev->dev; @@ -1290,8 +1292,6 @@ nouveau_ttm_tt_destroy(struct ttm_device *bdev, #if IS_ENABLED(CONFIG_AGP) struct nouveau_drm *drm = nouveau_bdev(bdev); if (drm->agp.bridge) { - ttm_agp_unbind(ttm); - ttm_tt_destroy_common(bdev, ttm); ttm_agp_destroy(ttm); return; } diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 256ec5b35473..85c03c83259b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -21,8 +21,6 @@ nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; if (ttm) { - nouveau_sgdma_unbind(bdev, ttm); - ttm_tt_destroy_common(bdev, ttm); ttm_tt_fini(&nvbe->ttm); kfree(nvbe); } diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig index e7281da5bc6a..d6e4df291d6f 100644 --- a/drivers/gpu/drm/omapdrm/Kconfig +++ b/drivers/gpu/drm/omapdrm/Kconfig @@ -3,7 +3,6 @@ config DRM_OMAP tristate "OMAP DRM" depends on DRM depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM - select OMAP2_DSS select DRM_KMS_HELPER select VIDEOMODE_HELPERS select HDMI diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index beb581b96ecd..0b3784941312 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -392,6 +392,17 @@ config DRM_PANEL_SAMSUNG_S6D16D0 depends on DRM_MIPI_DSI select VIDEOMODE_HELPERS +config DRM_PANEL_SAMSUNG_S6D27A1 + tristate "Samsung S6D27A1 DPI panel driver" + depends on OF && SPI && GPIOLIB + select DRM_MIPI_DBI + help + Say Y here if you want to enable support for the Samsung + S6D27A1 DPI 480x800 panel. + + This panel can be found in Samsung Galaxy Ace 2 + GT-I8160 mobile phone. + config DRM_PANEL_SAMSUNG_S6E3HA2 tristate "Samsung S6E3HA2 DSI video mode panel" depends on OF diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index c8132050bcec..60c0149fc54a 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -39,6 +39,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20) += panel-samsung-atna33xc20.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_DB7430) += panel-samsung-db7430.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D27A1) += panel-samsung-s6d27a1.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c index f80b44a8a700..dfb43b1374e7 100644 --- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c +++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c @@ -60,6 +60,9 @@ #define MCS_CMD2_ENA1 0xFF00 /* Enable Access Command2 "CMD2" */ #define MCS_CMD2_ENA2 0xFF80 /* Enable Access Orise Command2 */ +#define OTM8009A_HDISPLAY 480 +#define OTM8009A_VDISPLAY 800 + struct otm8009a { struct device *dev; struct drm_panel panel; @@ -70,19 +73,35 @@ struct otm8009a { bool enabled; }; -static const struct drm_display_mode default_mode = { - .clock = 29700, - .hdisplay = 480, - .hsync_start = 480 + 98, - .hsync_end = 480 + 98 + 32, - .htotal = 480 + 98 + 32 + 98, - .vdisplay = 800, - .vsync_start = 800 + 15, - .vsync_end = 800 + 15 + 10, - .vtotal = 800 + 15 + 10 + 14, - .flags = 0, - .width_mm = 52, - .height_mm = 86, +static const struct drm_display_mode modes[] = { + { /* 50 Hz, preferred */ + .clock = 29700, + .hdisplay = 480, + .hsync_start = 480 + 98, + .hsync_end = 480 + 98 + 32, + .htotal = 480 + 98 + 32 + 98, + .vdisplay = 800, + .vsync_start = 800 + 15, + .vsync_end = 800 + 15 + 10, + .vtotal = 800 + 15 + 10 + 14, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, + .width_mm = 52, + .height_mm = 86, + }, + { /* 60 Hz */ + .clock = 33000, + .hdisplay = 480, + .hsync_start = 480 + 70, + .hsync_end = 480 + 70 + 32, + .htotal = 480 + 70 + 32 + 72, + .vdisplay = 800, + .vsync_start = 800 + 15, + .vsync_end = 800 + 15 + 10, + .vtotal = 800 + 15 + 10 + 16, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, + .width_mm = 52, + .height_mm = 86, + }, }; static inline struct otm8009a *panel_to_otm8009a(struct drm_panel *panel) @@ -208,12 +227,11 @@ static int otm8009a_init_sequence(struct otm8009a *ctx) /* Default portrait 480x800 rgb24 */ dcs_write_seq(ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00); - ret = mipi_dsi_dcs_set_column_address(dsi, 0, - default_mode.hdisplay - 1); + ret = mipi_dsi_dcs_set_column_address(dsi, 0, OTM8009A_HDISPLAY - 1); if (ret) return ret; - ret = mipi_dsi_dcs_set_page_address(dsi, 0, default_mode.vdisplay - 1); + ret = mipi_dsi_dcs_set_page_address(dsi, 0, OTM8009A_VDISPLAY - 1); if (ret) return ret; @@ -337,24 +355,33 @@ static int otm8009a_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct drm_display_mode *mode; - - mode = drm_mode_duplicate(connector->dev, &default_mode); - if (!mode) { - dev_err(panel->dev, "failed to add mode %ux%u@%u\n", - default_mode.hdisplay, default_mode.vdisplay, - drm_mode_vrefresh(&default_mode)); - return -ENOMEM; + unsigned int num_modes = ARRAY_SIZE(modes); + unsigned int i; + + for (i = 0; i < num_modes; i++) { + mode = drm_mode_duplicate(connector->dev, &modes[i]); + if (!mode) { + dev_err(panel->dev, "failed to add mode %ux%u@%u\n", + modes[i].hdisplay, + modes[i].vdisplay, + drm_mode_vrefresh(&modes[i])); + return -ENOMEM; + } + + mode->type = DRM_MODE_TYPE_DRIVER; + + /* Setting first mode as preferred */ + if (!i) + mode->type |= DRM_MODE_TYPE_PREFERRED; + + drm_mode_set_name(mode); + drm_mode_probed_add(connector, mode); } - drm_mode_set_name(mode); - - mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - drm_mode_probed_add(connector, mode); - connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; - return 1; + return num_modes; } static const struct drm_panel_funcs otm8009a_drm_funcs = { diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c new file mode 100644 index 000000000000..1696ceb36aa0 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Panel driver for the Samsung S6D27A1 480x800 DPI RGB panel. + * Found in the Samsung Galaxy Ace 2 GT-I8160 mobile phone. + */ + +#include <drm/drm_mipi_dbi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/media-bus-format.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> +#include <linux/spi/spi.h> + +#include <video/mipi_display.h> + +#define S6D27A1_PASSWD_L2 0xF0 /* Password Command for Level 2 Control */ +#define S6D27A1_RESCTL 0xB3 /* Resolution Select Control */ +#define S6D27A1_PANELCTL2 0xB4 /* ASG Signal Control */ +#define S6D27A1_READID1 0xDA /* Read panel ID 1 */ +#define S6D27A1_READID2 0xDB /* Read panel ID 2 */ +#define S6D27A1_READID3 0xDC /* Read panel ID 3 */ +#define S6D27A1_DISPCTL 0xF2 /* Display Control */ +#define S6D27A1_MANPWR 0xF3 /* Manual Control */ +#define S6D27A1_PWRCTL1 0xF4 /* Power Control */ +#define S6D27A1_SRCCTL 0xF6 /* Source Control */ +#define S6D27A1_PANELCTL 0xF7 /* Panel Control*/ + +static const u8 s6d27a1_dbi_read_commands[] = { + S6D27A1_READID1, + S6D27A1_READID2, + S6D27A1_READID3, + 0, /* sentinel */ +}; + +struct s6d27a1 { + struct device *dev; + struct mipi_dbi dbi; + struct drm_panel panel; + struct gpio_desc *reset; + struct regulator_bulk_data regulators[2]; +}; + +static const struct drm_display_mode s6d27a1_480_800_mode = { + /* + * The vendor driver states that the S6D27A1 panel + * has a pixel clock frequency of 49920000 Hz / 2 = 24960000 Hz. + */ + .clock = 24960, + .hdisplay = 480, + .hsync_start = 480 + 63, + .hsync_end = 480 + 63 + 2, + .htotal = 480 + 63 + 2 + 63, + .vdisplay = 800, + .vsync_start = 800 + 11, + .vsync_end = 800 + 11 + 2, + .vtotal = 800 + 11 + 2 + 10, + .width_mm = 50, + .height_mm = 84, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, +}; + +static inline struct s6d27a1 *to_s6d27a1(struct drm_panel *panel) +{ + return container_of(panel, struct s6d27a1, panel); +} + +static void s6d27a1_read_mtp_id(struct s6d27a1 *ctx) +{ + struct mipi_dbi *dbi = &ctx->dbi; + u8 id1, id2, id3; + int ret; + + ret = mipi_dbi_command_read(dbi, S6D27A1_READID1, &id1); + if (ret) { + dev_err(ctx->dev, "unable to read MTP ID 1\n"); + return; + } + ret = mipi_dbi_command_read(dbi, S6D27A1_READID2, &id2); + if (ret) { + dev_err(ctx->dev, "unable to read MTP ID 2\n"); + return; + } + ret = mipi_dbi_command_read(dbi, S6D27A1_READID3, &id3); + if (ret) { + dev_err(ctx->dev, "unable to read MTP ID 3\n"); + return; + } + dev_info(ctx->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3); +} + +static int s6d27a1_power_on(struct s6d27a1 *ctx) +{ + struct mipi_dbi *dbi = &ctx->dbi; + int ret; + + /* Power up */ + ret = regulator_bulk_enable(ARRAY_SIZE(ctx->regulators), + ctx->regulators); + if (ret) { + dev_err(ctx->dev, "failed to enable regulators: %d\n", ret); + return ret; + } + + msleep(20); + + /* Assert reset >=1 ms */ + gpiod_set_value_cansleep(ctx->reset, 1); + usleep_range(1000, 5000); + /* De-assert reset */ + gpiod_set_value_cansleep(ctx->reset, 0); + /* Wait >= 10 ms */ + msleep(20); + + /* + * Exit sleep mode and initialize display - some hammering is + * necessary. + */ + mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); + mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); + msleep(120); + + /* Magic to unlock level 2 control of the display */ + mipi_dbi_command(dbi, S6D27A1_PASSWD_L2, 0x5A, 0x5A); + + /* Configure resolution to 480RGBx800 */ + mipi_dbi_command(dbi, S6D27A1_RESCTL, 0x22); + + mipi_dbi_command(dbi, S6D27A1_PANELCTL2, 0x00, 0x02, 0x03, 0x04, 0x05, 0x08, 0x00, 0x0c); + + mipi_dbi_command(dbi, S6D27A1_MANPWR, 0x01, 0x00, 0x00, 0x08, 0x08, 0x02, 0x00); + + mipi_dbi_command(dbi, S6D27A1_DISPCTL, 0x19, 0x00, 0x08, 0x0D, 0x03, 0x41, 0x3F); + + mipi_dbi_command(dbi, S6D27A1_PWRCTL1, 0x00, 0x00, 0x00, 0x00, 0x55, + 0x44, 0x05, 0x88, 0x4B, 0x50); + + mipi_dbi_command(dbi, S6D27A1_SRCCTL, 0x03, 0x09, 0x8A, 0x00, 0x01, 0x16); + + mipi_dbi_command(dbi, S6D27A1_PANELCTL, 0x00, 0x05, 0x06, 0x07, 0x08, + 0x01, 0x09, 0x0D, 0x0A, 0x0E, + 0x0B, 0x0F, 0x0C, 0x10, 0x01, + 0x11, 0x12, 0x13, 0x14, 0x05, + 0x06, 0x07, 0x08, 0x01, 0x09, + 0x0D, 0x0A, 0x0E, 0x0B, 0x0F, + 0x0C, 0x10, 0x01, 0x11, 0x12, + 0x13, 0x14); + + /* lock the level 2 control */ + mipi_dbi_command(dbi, S6D27A1_PASSWD_L2, 0xA5, 0xA5); + + s6d27a1_read_mtp_id(ctx); + + return 0; +} + +static int s6d27a1_power_off(struct s6d27a1 *ctx) +{ + /* Go into RESET and disable regulators */ + gpiod_set_value_cansleep(ctx->reset, 1); + return regulator_bulk_disable(ARRAY_SIZE(ctx->regulators), + ctx->regulators); +} + +static int s6d27a1_unprepare(struct drm_panel *panel) +{ + struct s6d27a1 *ctx = to_s6d27a1(panel); + struct mipi_dbi *dbi = &ctx->dbi; + + mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE); + msleep(120); + return s6d27a1_power_off(to_s6d27a1(panel)); +} + +static int s6d27a1_disable(struct drm_panel *panel) +{ + struct s6d27a1 *ctx = to_s6d27a1(panel); + struct mipi_dbi *dbi = &ctx->dbi; + + mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF); + msleep(25); + + return 0; +} + +static int s6d27a1_prepare(struct drm_panel *panel) +{ + return s6d27a1_power_on(to_s6d27a1(panel)); +} + +static int s6d27a1_enable(struct drm_panel *panel) +{ + struct s6d27a1 *ctx = to_s6d27a1(panel); + struct mipi_dbi *dbi = &ctx->dbi; + + mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON); + + return 0; +} + +static int s6d27a1_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct s6d27a1 *ctx = to_s6d27a1(panel); + struct drm_display_mode *mode; + static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; + + mode = drm_mode_duplicate(connector->dev, &s6d27a1_480_800_mode); + if (!mode) { + dev_err(ctx->dev, "failed to add mode\n"); + return -ENOMEM; + } + + connector->display_info.bpc = 8; + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; + connector->display_info.bus_flags = + DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; + drm_display_info_set_bus_formats(&connector->display_info, + &bus_format, 1); + + drm_mode_set_name(mode); + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + + drm_mode_probed_add(connector, mode); + + return 1; +} + +static const struct drm_panel_funcs s6d27a1_drm_funcs = { + .disable = s6d27a1_disable, + .unprepare = s6d27a1_unprepare, + .prepare = s6d27a1_prepare, + .enable = s6d27a1_enable, + .get_modes = s6d27a1_get_modes, +}; + +static int s6d27a1_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct s6d27a1 *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->dev = dev; + + /* + * VCI is the analog voltage supply + * VCCIO is the digital I/O voltage supply + */ + ctx->regulators[0].supply = "vci"; + ctx->regulators[1].supply = "vccio"; + ret = devm_regulator_bulk_get(dev, + ARRAY_SIZE(ctx->regulators), + ctx->regulators); + if (ret) + return dev_err_probe(dev, ret, "failed to get regulators\n"); + + ctx->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset)) { + ret = PTR_ERR(ctx->reset); + return dev_err_probe(dev, ret, "no RESET GPIO\n"); + } + + ret = mipi_dbi_spi_init(spi, &ctx->dbi, NULL); + if (ret) + return dev_err_probe(dev, ret, "MIPI DBI init failed\n"); + + ctx->dbi.read_commands = s6d27a1_dbi_read_commands; + + drm_panel_init(&ctx->panel, dev, &s6d27a1_drm_funcs, + DRM_MODE_CONNECTOR_DPI); + + ret = drm_panel_of_backlight(&ctx->panel); + if (ret) + return dev_err_probe(dev, ret, "failed to add backlight\n"); + + spi_set_drvdata(spi, ctx); + + drm_panel_add(&ctx->panel); + + return 0; +} + +static int s6d27a1_remove(struct spi_device *spi) +{ + struct s6d27a1 *ctx = spi_get_drvdata(spi); + + drm_panel_remove(&ctx->panel); + return 0; +} + +static const struct of_device_id s6d27a1_match[] = { + { .compatible = "samsung,s6d27a1", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, s6d27a1_match); + +static struct spi_driver s6d27a1_driver = { + .probe = s6d27a1_probe, + .remove = s6d27a1_remove, + .driver = { + .name = "s6d27a1-panel", + .of_match_table = s6d27a1_match, + }, +}; +module_spi_driver(s6d27a1_driver); + +MODULE_AUTHOR("Markuss Broks <markuss.broks@gmail.com>"); +MODULE_DESCRIPTION("Samsung S6D27A1 panel driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 9b6c4e6c38a1..925079209086 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -3158,19 +3158,6 @@ static const struct panel_desc logictechno_lttd800480070_l6wh_rt = { .connector_type = DRM_MODE_CONNECTOR_DPI, }; -static const struct drm_display_mode mitsubishi_aa070mc01_mode = { - .clock = 30400, - .hdisplay = 800, - .hsync_start = 800 + 0, - .hsync_end = 800 + 1, - .htotal = 800 + 0 + 1 + 160, - .vdisplay = 480, - .vsync_start = 480 + 0, - .vsync_end = 480 + 48 + 1, - .vtotal = 480 + 48 + 1 + 0, - .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, -}; - static const struct drm_display_mode logicpd_type_28_mode = { .clock = 9107, .hdisplay = 480, @@ -3205,6 +3192,19 @@ static const struct panel_desc logicpd_type_28 = { .connector_type = DRM_MODE_CONNECTOR_DPI, }; +static const struct drm_display_mode mitsubishi_aa070mc01_mode = { + .clock = 30400, + .hdisplay = 800, + .hsync_start = 800 + 0, + .hsync_end = 800 + 1, + .htotal = 800 + 0 + 1 + 160, + .vdisplay = 480, + .vsync_start = 480 + 0, + .vsync_end = 480 + 48 + 1, + .vtotal = 480 + 48 + 1 + 0, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, +}; + static const struct panel_desc mitsubishi_aa070mc01 = { .modes = &mitsubishi_aa070mc01_mode, .num_modes = 1, diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c index bd9b7be63b0f..1c692428b0d4 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.c +++ b/drivers/gpu/drm/panfrost/panfrost_device.c @@ -198,7 +198,6 @@ err: int panfrost_device_init(struct panfrost_device *pfdev) { int err; - struct resource *res; mutex_init(&pfdev->sched_lock); INIT_LIST_HEAD(&pfdev->scheduled_jobs); @@ -236,8 +235,7 @@ int panfrost_device_init(struct panfrost_device *pfdev) if (err) goto out_reset; - res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0); - pfdev->iomem = devm_ioremap_resource(pfdev->dev, res); + pfdev->iomem = devm_platform_ioremap_resource(pfdev->pdev, 0); if (IS_ERR(pfdev->iomem)) { err = PTR_ERR(pfdev->iomem); goto out_pm_domain; diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 1ffaef5ec5ff..077cbbfa506b 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -218,7 +218,7 @@ panfrost_copy_in_sync(struct drm_device *dev, if (ret) goto fail; - ret = drm_gem_fence_array_add(&job->deps, fence); + ret = drm_sched_job_add_dependency(&job->base, fence); if (ret) goto fail; @@ -236,7 +236,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, struct drm_panfrost_submit *args = data; struct drm_syncobj *sync_out = NULL; struct panfrost_job *job; - int ret = 0; + int ret = 0, slot; if (!args->jc) return -EINVAL; @@ -253,38 +253,47 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, job = kzalloc(sizeof(*job), GFP_KERNEL); if (!job) { ret = -ENOMEM; - goto fail_out_sync; + goto out_put_syncout; } kref_init(&job->refcount); - xa_init_flags(&job->deps, XA_FLAGS_ALLOC); - job->pfdev = pfdev; job->jc = args->jc; job->requirements = args->requirements; job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev); job->file_priv = file->driver_priv; + slot = panfrost_job_get_slot(job); + + ret = drm_sched_job_init(&job->base, + &job->file_priv->sched_entity[slot], + NULL); + if (ret) + goto out_put_job; + ret = panfrost_copy_in_sync(dev, file, args, job); if (ret) - goto fail_job; + goto out_cleanup_job; ret = panfrost_lookup_bos(dev, file, args, job); if (ret) - goto fail_job; + goto out_cleanup_job; ret = panfrost_job_push(job); if (ret) - goto fail_job; + goto out_cleanup_job; /* Update the return sync object for the job */ if (sync_out) drm_syncobj_replace_fence(sync_out, job->render_done_fence); -fail_job: +out_cleanup_job: + if (ret) + drm_sched_job_cleanup(&job->base); +out_put_job: panfrost_job_put(job); -fail_out_sync: +out_put_syncout: if (sync_out) drm_syncobj_put(sync_out); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index 71a72fb50e6b..908d79520853 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -102,7 +102,7 @@ static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, in return &fence->base; } -static int panfrost_job_get_slot(struct panfrost_job *job) +int panfrost_job_get_slot(struct panfrost_job *job) { /* JS0: fragment jobs. * JS1: vertex/tiler jobs @@ -137,8 +137,8 @@ static void panfrost_job_write_affinity(struct panfrost_device *pfdev, */ affinity = pfdev->features.shader_present; - job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF); - job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32); + job_write(pfdev, JS_AFFINITY_NEXT_LO(js), lower_32_bits(affinity)); + job_write(pfdev, JS_AFFINITY_NEXT_HI(js), upper_32_bits(affinity)); } static u32 @@ -203,8 +203,8 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu); - job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF); - job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32); + job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head)); + job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head)); panfrost_job_write_affinity(pfdev, job->requirements, js); @@ -242,13 +242,14 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) static int panfrost_acquire_object_fences(struct drm_gem_object **bos, int bo_count, - struct xarray *deps) + struct drm_sched_job *job) { int i, ret; for (i = 0; i < bo_count; i++) { /* panfrost always uses write mode in its current uapi */ - ret = drm_gem_fence_array_add_implicit(deps, bos[i], true); + ret = drm_sched_job_add_implicit_dependencies(job, bos[i], + true); if (ret) return ret; } @@ -269,29 +270,21 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos, int panfrost_job_push(struct panfrost_job *job) { struct panfrost_device *pfdev = job->pfdev; - int slot = panfrost_job_get_slot(job); - struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot]; struct ww_acquire_ctx acquire_ctx; int ret = 0; - ret = drm_gem_lock_reservations(job->bos, job->bo_count, &acquire_ctx); if (ret) return ret; mutex_lock(&pfdev->sched_lock); - - ret = drm_sched_job_init(&job->base, entity, NULL); - if (ret) { - mutex_unlock(&pfdev->sched_lock); - goto unlock; - } + drm_sched_job_arm(&job->base); job->render_done_fence = dma_fence_get(&job->base.s_fence->finished); ret = panfrost_acquire_object_fences(job->bos, job->bo_count, - &job->deps); + &job->base); if (ret) { mutex_unlock(&pfdev->sched_lock); goto unlock; @@ -299,7 +292,7 @@ int panfrost_job_push(struct panfrost_job *job) kref_get(&job->refcount); /* put by scheduler job completion */ - drm_sched_entity_push_job(&job->base, entity); + drm_sched_entity_push_job(&job->base); mutex_unlock(&pfdev->sched_lock); @@ -316,15 +309,8 @@ static void panfrost_job_cleanup(struct kref *ref) { struct panfrost_job *job = container_of(ref, struct panfrost_job, refcount); - struct dma_fence *fence; - unsigned long index; unsigned int i; - xa_for_each(&job->deps, index, fence) { - dma_fence_put(fence); - } - xa_destroy(&job->deps); - dma_fence_put(job->done_fence); dma_fence_put(job->render_done_fence); @@ -363,17 +349,6 @@ static void panfrost_job_free(struct drm_sched_job *sched_job) panfrost_job_put(job); } -static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job, - struct drm_sched_entity *s_entity) -{ - struct panfrost_job *job = to_panfrost_job(sched_job); - - if (!xa_empty(&job->deps)) - return xa_erase(&job->deps, job->last_dep++); - - return NULL; -} - static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job) { struct panfrost_job *job = to_panfrost_job(sched_job); @@ -763,7 +738,6 @@ static void panfrost_reset_work(struct work_struct *work) } static const struct drm_sched_backend_ops panfrost_sched_ops = { - .dependency = panfrost_job_dependency, .run_job = panfrost_job_run, .timedout_job = panfrost_job_timedout, .free_job = panfrost_job_free diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h index 82306a03b57e..77e6d0e6f612 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.h +++ b/drivers/gpu/drm/panfrost/panfrost_job.h @@ -19,10 +19,6 @@ struct panfrost_job { struct panfrost_device *pfdev; struct panfrost_file_priv *file_priv; - /* Contains both explicit and implicit fences */ - struct xarray deps; - unsigned long last_dep; - /* Fence to be signaled by IRQ handler when the job is complete. */ struct dma_fence *done_fence; @@ -42,6 +38,7 @@ int panfrost_job_init(struct panfrost_device *pfdev); void panfrost_job_fini(struct panfrost_device *pfdev); int panfrost_job_open(struct panfrost_file_priv *panfrost_priv); void panfrost_job_close(struct panfrost_file_priv *panfrost_priv); +int panfrost_job_get_slot(struct panfrost_job *job); int panfrost_job_push(struct panfrost_job *job); void panfrost_job_put(struct panfrost_job *job); void panfrost_job_enable_interrupts(struct panfrost_device *pfdev); diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index dfe5f1d29763..f7f83cf63f42 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -71,8 +71,8 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr, region |= region_width; /* Lock the region that needs to be updated */ - mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL); - mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL); + mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region)); + mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region)); write_cmd(pfdev, as_nr, AS_COMMAND_LOCK); } @@ -114,14 +114,14 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM); - mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL); - mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32); + mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab)); + mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab)); /* Need to revisit mem attrs. * NC is the default, Mali driver is inner WT. */ - mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL); - mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32); + mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr)); + mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr)); write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE); } diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index 5ab03d605f57..e116a4d9b8e5 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -51,8 +51,8 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev) reinit_completion(&pfdev->perfcnt->dump_comp); gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT; - gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva); - gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32); + gpu_write(pfdev, GPU_PERFCNT_BASE_LO, lower_32_bits(gpuva)); + gpu_write(pfdev, GPU_PERFCNT_BASE_HI, upper_32_bits(gpuva)); gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_CLEAN_CACHES_COMPLETED | GPU_IRQ_PERFCNT_SAMPLE_COMPLETED); diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 37a1b6a6ad6d..b2e33d5ba5d0 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -101,7 +101,6 @@ int qxl_ttm_io_mem_reserve(struct ttm_device *bdev, */ static void qxl_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) { - ttm_tt_destroy_common(bdev, ttm); ttm_tt_fini(ttm); kfree(ttm); } diff --git a/drivers/gpu/drm/r128/ati_pcigart.c b/drivers/gpu/drm/r128/ati_pcigart.c index 0ecccf25a3c7..26001c2de9e9 100644 --- a/drivers/gpu/drm/r128/ati_pcigart.c +++ b/drivers/gpu/drm/r128/ati_pcigart.c @@ -99,7 +99,8 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info for (i = 0; i < pages; i++) { if (!entry->busaddr[i]) break; - pci_unmap_page(pdev, entry->busaddr[i], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + dma_unmap_page(&pdev->dev, entry->busaddr[i], + PAGE_SIZE, DMA_BIDIRECTIONAL); } if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) @@ -134,7 +135,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) { DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n"); - if (pci_set_dma_mask(pdev, gart_info->table_mask)) { + if (dma_set_mask(&pdev->dev, gart_info->table_mask)) { DRM_ERROR("fail to set dma mask to 0x%Lx\n", (unsigned long long)gart_info->table_mask); ret = -EFAULT; @@ -173,9 +174,9 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga gart_idx = 0; for (i = 0; i < pages; i++) { /* we need to support large memory configurations */ - entry->busaddr[i] = pci_map_page(pdev, entry->pagelist[i], - 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(pdev, entry->busaddr[i])) { + entry->busaddr[i] = dma_map_page(&pdev->dev, entry->pagelist[i], + 0, PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&pdev->dev, entry->busaddr[i])) { DRM_ERROR("unable to map PCIGART pages!\n"); drm_ati_pcigart_cleanup(dev, gart_info); address = NULL; diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index e9c47ec28ade..73e3117420bf 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -176,18 +176,11 @@ static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode, */ seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq); if (seq >= fence->seq) { - int ret = dma_fence_signal_locked(&fence->base); - - if (!ret) - DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n"); - else - DMA_FENCE_TRACE(&fence->base, "was already signaled\n"); - + dma_fence_signal_locked(&fence->base); radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring); __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake); dma_fence_put(&fence->base); - } else - DMA_FENCE_TRACE(&fence->base, "pending\n"); + } return 0; } @@ -422,8 +415,6 @@ static bool radeon_fence_enable_signaling(struct dma_fence *f) fence->fence_wake.func = radeon_fence_check_signaled; __add_wait_queue(&rdev->fence_queue, &fence->fence_wake); dma_fence_get(f); - - DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring); return true; } @@ -441,11 +432,7 @@ bool radeon_fence_signaled(struct radeon_fence *fence) return true; if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { - int ret; - - ret = dma_fence_signal(&fence->base); - if (!ret) - DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n"); + dma_fence_signal(&fence->base); return true; } return false; @@ -550,7 +537,6 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo { uint64_t seq[RADEON_NUM_RINGS] = {}; long r; - int r_sig; /* * This function should not be called on !radeon fences. @@ -567,9 +553,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo return r; } - r_sig = dma_fence_signal(&fence->base); - if (!r_sig) - DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); + dma_fence_signal(&fence->base); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index a06d4cc2fb1c..7793249bc549 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -488,9 +488,6 @@ static void radeon_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *t { struct radeon_ttm_tt *gtt = (void *)ttm; - radeon_ttm_backend_unbind(bdev, ttm); - ttm_tt_destroy_common(bdev, ttm); - ttm_tt_fini(>t->ttm); kfree(gtt); } @@ -574,6 +571,8 @@ static void radeon_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm); bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); + radeon_ttm_tt_unbind(bdev, ttm); + if (gtt && gtt->userptr) { kfree(ttm->sg); ttm->page_flags &= ~TTM_PAGE_FLAG_SG; @@ -651,8 +650,6 @@ static void radeon_ttm_tt_destroy(struct ttm_device *bdev, struct radeon_device *rdev = radeon_get_rdev(bdev); if (rdev->flags & RADEON_IS_AGP) { - ttm_agp_unbind(ttm); - ttm_tt_destroy_common(bdev, ttm); ttm_agp_destroy(ttm); return; } diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index 558f1b58bd69..9f1ecefc3933 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -9,7 +9,6 @@ config DRM_ROCKCHIP select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP select DRM_DW_HDMI if ROCKCHIP_DW_HDMI select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI - select DRM_RGB if ROCKCHIP_RGB select GENERIC_PHY if ROCKCHIP_DW_MIPI_DSI select GENERIC_PHY_MIPI_DPHY if ROCKCHIP_DW_MIPI_DSI select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 79554aa4dbb1..27e1573af96e 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -45,8 +45,14 @@ * @guilty: atomic_t set to 1 when a job on this queue * is found to be guilty causing a timeout * - * Note: the sched_list should have at least one element to schedule - * the entity + * Note that the &sched_list must have at least one element to schedule the entity. + * + * For changing @priority later on at runtime see + * drm_sched_entity_set_priority(). For changing the set of schedulers + * @sched_list at runtime see drm_sched_entity_modify_sched(). + * + * An entity is cleaned up by callind drm_sched_entity_fini(). See also + * drm_sched_entity_destroy(). * * Returns 0 on success or a negative error code on failure. */ @@ -92,6 +98,11 @@ EXPORT_SYMBOL(drm_sched_entity_init); * @sched_list: the list of new drm scheds which will replace * existing entity->sched_list * @num_sched_list: number of drm sched in sched_list + * + * Note that this must be called under the same common lock for @entity as + * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to + * guarantee through some other means that this is never called while new jobs + * can be pushed to @entity. */ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, struct drm_gpu_scheduler **sched_list, @@ -104,13 +115,6 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, } EXPORT_SYMBOL(drm_sched_entity_modify_sched); -/** - * drm_sched_entity_is_idle - Check if entity is idle - * - * @entity: scheduler entity - * - * Returns true if the entity does not have any unscheduled jobs. - */ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) { rmb(); /* for list_empty to work without lock */ @@ -123,13 +127,7 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) return false; } -/** - * drm_sched_entity_is_ready - Check if entity is ready - * - * @entity: scheduler entity - * - * Return true if entity could provide a job. - */ +/* Return true if entity could provide a job. */ bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) { if (spsc_queue_peek(&entity->job_queue) == NULL) @@ -192,14 +190,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) } EXPORT_SYMBOL(drm_sched_entity_flush); -/** - * drm_sched_entity_kill_jobs_cb - helper for drm_sched_entity_kill_jobs - * - * @f: signaled fence - * @cb: our callback structure - * - * Signal the scheduler finished fence when the entity in question is killed. - */ +/* Signal the scheduler finished fence when the entity in question is killed. */ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, struct dma_fence_cb *cb) { @@ -211,14 +202,19 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, job->sched->ops->free_job(job); } -/** - * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed - * - * @entity: entity which is cleaned up - * - * Makes sure that all remaining jobs in an entity are killed before it is - * destroyed. - */ +static struct dma_fence * +drm_sched_job_dependency(struct drm_sched_job *job, + struct drm_sched_entity *entity) +{ + if (!xa_empty(&job->dependencies)) + return xa_erase(&job->dependencies, job->last_dependency++); + + if (job->sched->ops->dependency) + return job->sched->ops->dependency(job, entity); + + return NULL; +} + static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity) { struct drm_sched_job *job; @@ -229,7 +225,7 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity) struct drm_sched_fence *s_fence = job->s_fence; /* Wait for all dependencies to avoid data corruptions */ - while ((f = job->sched->ops->dependency(job, entity))) + while ((f = drm_sched_job_dependency(job, entity))) dma_fence_wait(f, false); drm_sched_fence_scheduled(s_fence); @@ -260,9 +256,11 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity) * * @entity: scheduler entity * - * This should be called after @drm_sched_entity_do_release. It goes over the - * entity and signals all jobs with an error code if the process was killed. + * Cleanups up @entity which has been initialized by drm_sched_entity_init(). * + * If there are potentially job still in flight or getting newly queued + * drm_sched_entity_flush() must be called first. This function then goes over + * the entity and signals all jobs with an error code if the process was killed. */ void drm_sched_entity_fini(struct drm_sched_entity *entity) { @@ -302,10 +300,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini); /** * drm_sched_entity_destroy - Destroy a context entity - * * @entity: scheduler entity * - * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() + * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a + * convenience wrapper. */ void drm_sched_entity_destroy(struct drm_sched_entity *entity) { @@ -314,9 +312,7 @@ void drm_sched_entity_destroy(struct drm_sched_entity *entity) } EXPORT_SYMBOL(drm_sched_entity_destroy); -/* - * drm_sched_entity_clear_dep - callback to clear the entities dependency - */ +/* drm_sched_entity_clear_dep - callback to clear the entities dependency */ static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) { @@ -358,11 +354,7 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity, } EXPORT_SYMBOL(drm_sched_entity_set_priority); -/** - * drm_sched_entity_add_dependency_cb - add callback for the entities dependency - * - * @entity: entity with dependency - * +/* * Add a callback to the current dependency of the entity to wake up the * scheduler when the entity becomes available. */ @@ -410,16 +402,8 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) return false; } -/** - * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity - * - * @entity: entity to get the job from - * - * Process all dependencies and try to get one job from the entities queue. - */ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) { - struct drm_gpu_scheduler *sched = entity->rq->sched; struct drm_sched_job *sched_job; sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); @@ -427,7 +411,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) return NULL; while ((entity->dependency = - sched->ops->dependency(sched_job, entity))) { + drm_sched_job_dependency(sched_job, entity))) { trace_drm_sched_job_wait_dep(sched_job, entity->dependency); if (drm_sched_entity_add_dependency_cb(entity)) @@ -439,30 +423,45 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); dma_fence_put(entity->last_scheduled); + entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished); + /* + * If the queue is empty we allow drm_sched_entity_select_rq() to + * locklessly access ->last_scheduled. This only works if we set the + * pointer before we dequeue and if we a write barrier here. + */ + smp_wmb(); + spsc_queue_pop(&entity->job_queue); return sched_job; } -/** - * drm_sched_entity_select_rq - select a new rq for the entity - * - * @entity: scheduler entity - * - * Check all prerequisites and select a new rq for the entity for load - * balancing. - */ void drm_sched_entity_select_rq(struct drm_sched_entity *entity) { struct dma_fence *fence; struct drm_gpu_scheduler *sched; struct drm_sched_rq *rq; - if (spsc_queue_count(&entity->job_queue) || !entity->sched_list) + /* single possible engine and already selected */ + if (!entity->sched_list) return; - fence = READ_ONCE(entity->last_scheduled); + /* queue non-empty, stay on the same engine */ + if (spsc_queue_count(&entity->job_queue)) + return; + + /* + * Only when the queue is empty are we guaranteed that the scheduler + * thread cannot change ->last_scheduled. To enforce ordering we need + * a read barrier here. See drm_sched_entity_pop_job() for the other + * side. + */ + smp_rmb(); + + fence = entity->last_scheduled; + + /* stay on the same engine if the previous job hasn't finished */ if (fence && !dma_fence_is_signaled(fence)) return; @@ -481,19 +480,18 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity) /** * drm_sched_entity_push_job - Submit a job to the entity's job queue - * * @sched_job: job to submit - * @entity: scheduler entity * - * Note: To guarantee that the order of insertion to queue matches - * the job's fence sequence number this function should be - * called with drm_sched_job_init under common lock. + * Note: To guarantee that the order of insertion to queue matches the job's + * fence sequence number this function should be called with drm_sched_job_arm() + * under common lock for the struct drm_sched_entity that was set up for + * @sched_job in drm_sched_job_init(). * * Returns 0 for success, negative error code otherwise. */ -void drm_sched_entity_push_job(struct drm_sched_job *sched_job, - struct drm_sched_entity *entity) +void drm_sched_entity_push_job(struct drm_sched_job *sched_job) { + struct drm_sched_entity *entity = sched_job->entity; bool first; trace_drm_sched_job(sched_job, entity); diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index 69de2c76731f..7fd869520ef2 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -50,26 +50,12 @@ static void __exit drm_sched_fence_slab_fini(void) void drm_sched_fence_scheduled(struct drm_sched_fence *fence) { - int ret = dma_fence_signal(&fence->scheduled); - - if (!ret) - DMA_FENCE_TRACE(&fence->scheduled, - "signaled from irq context\n"); - else - DMA_FENCE_TRACE(&fence->scheduled, - "was already signaled\n"); + dma_fence_signal(&fence->scheduled); } void drm_sched_fence_finished(struct drm_sched_fence *fence) { - int ret = dma_fence_signal(&fence->finished); - - if (!ret) - DMA_FENCE_TRACE(&fence->finished, - "signaled from irq context\n"); - else - DMA_FENCE_TRACE(&fence->finished, - "was already signaled\n"); + dma_fence_signal(&fence->finished); } static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence) @@ -83,19 +69,28 @@ static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f) return (const char *)fence->sched->name; } +static void drm_sched_fence_free_rcu(struct rcu_head *rcu) +{ + struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); + struct drm_sched_fence *fence = to_drm_sched_fence(f); + + if (!WARN_ON_ONCE(!fence)) + kmem_cache_free(sched_fence_slab, fence); +} + /** - * drm_sched_fence_free - free up the fence memory + * drm_sched_fence_free - free up an uninitialized fence * - * @rcu: RCU callback head + * @fence: fence to free * - * Free up the fence memory after the RCU grace period. + * Free up the fence memory. Should only be used if drm_sched_fence_init() + * has not been called yet. */ -static void drm_sched_fence_free(struct rcu_head *rcu) +void drm_sched_fence_free(struct drm_sched_fence *fence) { - struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); - struct drm_sched_fence *fence = to_drm_sched_fence(f); - - kmem_cache_free(sched_fence_slab, fence); + /* This function should not be called if the fence has been initialized. */ + if (!WARN_ON_ONCE(fence->sched)) + kmem_cache_free(sched_fence_slab, fence); } /** @@ -111,7 +106,7 @@ static void drm_sched_fence_release_scheduled(struct dma_fence *f) struct drm_sched_fence *fence = to_drm_sched_fence(f); dma_fence_put(fence->parent); - call_rcu(&fence->finished.rcu, drm_sched_fence_free); + call_rcu(&fence->finished.rcu, drm_sched_fence_free_rcu); } /** @@ -152,27 +147,32 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f) } EXPORT_SYMBOL(to_drm_sched_fence); -struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity, - void *owner) +struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity, + void *owner) { struct drm_sched_fence *fence = NULL; - unsigned seq; fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); if (fence == NULL) return NULL; fence->owner = owner; - fence->sched = entity->rq->sched; spin_lock_init(&fence->lock); + return fence; +} + +void drm_sched_fence_init(struct drm_sched_fence *fence, + struct drm_sched_entity *entity) +{ + unsigned seq; + + fence->sched = entity->rq->sched; seq = atomic_inc_return(&entity->fence_seq); dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled, &fence->lock, entity->fence_context, seq); dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished, &fence->lock, entity->fence_context + 1, seq); - - return fence; } module_init(drm_sched_fence_slab_init); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 67382621b429..6987d412a946 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -48,9 +48,11 @@ #include <linux/wait.h> #include <linux/sched.h> #include <linux/completion.h> +#include <linux/dma-resv.h> #include <uapi/linux/sched/types.h> #include <drm/drm_print.h> +#include <drm/drm_gem.h> #include <drm/gpu_scheduler.h> #include <drm/spsc_queue.h> @@ -564,7 +566,6 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext); /** * drm_sched_job_init - init a scheduler job - * * @job: scheduler job to init * @entity: scheduler entity to use * @owner: job owner for debugging @@ -572,43 +573,193 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext); * Refer to drm_sched_entity_push_job() documentation * for locking considerations. * + * Drivers must make sure drm_sched_job_cleanup() if this function returns + * successfully, even when @job is aborted before drm_sched_job_arm() is called. + * + * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware + * has died, which can mean that there's no valid runqueue for a @entity. + * This function returns -ENOENT in this case (which probably should be -EIO as + * a more meanigful return value). + * * Returns 0 for success, negative error code otherwise. */ int drm_sched_job_init(struct drm_sched_job *job, struct drm_sched_entity *entity, void *owner) { - struct drm_gpu_scheduler *sched; - drm_sched_entity_select_rq(entity); if (!entity->rq) return -ENOENT; - sched = entity->rq->sched; - - job->sched = sched; job->entity = entity; - job->s_priority = entity->rq - sched->sched_rq; - job->s_fence = drm_sched_fence_create(entity, owner); + job->s_fence = drm_sched_fence_alloc(entity, owner); if (!job->s_fence) return -ENOMEM; - job->id = atomic64_inc_return(&sched->job_id_count); INIT_LIST_HEAD(&job->list); + xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC); + return 0; } EXPORT_SYMBOL(drm_sched_job_init); /** - * drm_sched_job_cleanup - clean up scheduler job resources + * drm_sched_job_arm - arm a scheduler job for execution + * @job: scheduler job to arm + * + * This arms a scheduler job for execution. Specifically it initializes the + * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv + * or other places that need to track the completion of this job. + * + * Refer to drm_sched_entity_push_job() documentation for locking + * considerations. + * + * This can only be called if drm_sched_job_init() succeeded. + */ +void drm_sched_job_arm(struct drm_sched_job *job) +{ + struct drm_gpu_scheduler *sched; + struct drm_sched_entity *entity = job->entity; + + BUG_ON(!entity); + + sched = entity->rq->sched; + + job->sched = sched; + job->s_priority = entity->rq - sched->sched_rq; + job->id = atomic64_inc_return(&sched->job_id_count); + + drm_sched_fence_init(job->s_fence, job->entity); +} +EXPORT_SYMBOL(drm_sched_job_arm); + +/** + * drm_sched_job_add_dependency - adds the fence as a job dependency + * @job: scheduler job to add the dependencies to + * @fence: the dma_fence to add to the list of dependencies. * + * Note that @fence is consumed in both the success and error cases. + * + * Returns: + * 0 on success, or an error on failing to expand the array. + */ +int drm_sched_job_add_dependency(struct drm_sched_job *job, + struct dma_fence *fence) +{ + struct dma_fence *entry; + unsigned long index; + u32 id = 0; + int ret; + + if (!fence) + return 0; + + /* Deduplicate if we already depend on a fence from the same context. + * This lets the size of the array of deps scale with the number of + * engines involved, rather than the number of BOs. + */ + xa_for_each(&job->dependencies, index, entry) { + if (entry->context != fence->context) + continue; + + if (dma_fence_is_later(fence, entry)) { + dma_fence_put(entry); + xa_store(&job->dependencies, index, fence, GFP_KERNEL); + } else { + dma_fence_put(fence); + } + return 0; + } + + ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL); + if (ret != 0) + dma_fence_put(fence); + + return ret; +} +EXPORT_SYMBOL(drm_sched_job_add_dependency); + +/** + * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job + * dependencies + * @job: scheduler job to add the dependencies to + * @obj: the gem object to add new dependencies from. + * @write: whether the job might write the object (so we need to depend on + * shared fences in the reservation object). + * + * This should be called after drm_gem_lock_reservations() on your array of + * GEM objects used in the job but before updating the reservations with your + * own fences. + * + * Returns: + * 0 on success, or an error on failing to expand the array. + */ +int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, + struct drm_gem_object *obj, + bool write) +{ + int ret; + struct dma_fence **fences; + unsigned int i, fence_count; + + if (!write) { + struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv); + + return drm_sched_job_add_dependency(job, fence); + } + + ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences); + if (ret || !fence_count) + return ret; + + for (i = 0; i < fence_count; i++) { + ret = drm_sched_job_add_dependency(job, fences[i]); + if (ret) + break; + } + + for (; i < fence_count; i++) + dma_fence_put(fences[i]); + kfree(fences); + return ret; +} +EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies); + + +/** + * drm_sched_job_cleanup - clean up scheduler job resources * @job: scheduler job to clean up + * + * Cleans up the resources allocated with drm_sched_job_init(). + * + * Drivers should call this from their error unwind code if @job is aborted + * before drm_sched_job_arm() is called. + * + * After that point of no return @job is committed to be executed by the + * scheduler, and this function should be called from the + * &drm_sched_backend_ops.free_job callback. */ void drm_sched_job_cleanup(struct drm_sched_job *job) { - dma_fence_put(&job->s_fence->finished); + struct dma_fence *fence; + unsigned long index; + + if (kref_read(&job->s_fence->finished.refcount)) { + /* drm_sched_job_arm() has been called */ + dma_fence_put(&job->s_fence->finished); + } else { + /* aborted job before committing to run it */ + drm_sched_fence_free(job->s_fence); + } + job->s_fence = NULL; + + xa_for_each(&job->dependencies, index, fence) { + dma_fence_put(fence); + } + xa_destroy(&job->dependencies); + } EXPORT_SYMBOL(drm_sched_job_cleanup); diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 195de30eb90c..dbdee954692a 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -845,7 +845,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane, LXCFBLR_CFBLL | LXCFBLR_CFBP, val); /* Specifies the constant alpha value */ - val = CONSTA_MAX; + val = newstate->alpha >> 8; reg_update_bits(ldev->regs, LTDC_L1CACR + lofs, LXCACR_CONSTA, val); /* Specifies the blending factors */ @@ -997,6 +997,8 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev, drm_plane_helper_add(plane, <dc_plane_helper_funcs); + drm_plane_create_alpha_property(plane); + DRM_DEBUG_DRIVER("plane:%d created\n", plane->base.id); return plane; @@ -1024,6 +1026,8 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc) return -EINVAL; } + drm_plane_create_zpos_immutable_property(primary, 0); + ret = drm_crtc_init_with_planes(ddev, crtc, primary, NULL, <dc_crtc_funcs, NULL); if (ret) { @@ -1046,6 +1050,7 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc) DRM_ERROR("Can not create overlay plane %d\n", i); goto cleanup; } + drm_plane_create_zpos_immutable_property(overlay, i); } return 0; diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index bf8cfefa0365..f52ff4e6c662 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -782,7 +782,6 @@ static int sun4i_backend_bind(struct device *dev, struct device *master, struct sun4i_drv *drv = drm->dev_private; struct sun4i_backend *backend; const struct sun4i_backend_quirks *quirks; - struct resource *res; void __iomem *regs; int i, ret; @@ -815,8 +814,7 @@ static int sun4i_backend_bind(struct device *dev, struct device *master, if (IS_ERR(backend->frontend)) dev_warn(dev, "Couldn't find matching frontend, frontend features disabled\n"); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - regs = devm_ioremap_resource(dev, res); + regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) return PTR_ERR(regs); diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c index edb60ae0a9b7..56ae38389db0 100644 --- a/drivers/gpu/drm/sun4i/sun4i_frontend.c +++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c @@ -561,7 +561,6 @@ static int sun4i_frontend_bind(struct device *dev, struct device *master, struct sun4i_frontend *frontend; struct drm_device *drm = data; struct sun4i_drv *drv = drm->dev_private; - struct resource *res; void __iomem *regs; frontend = devm_kzalloc(dev, sizeof(*frontend), GFP_KERNEL); @@ -576,8 +575,7 @@ static int sun4i_frontend_bind(struct device *dev, struct device *master, if (!frontend->data) return -ENODEV; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - regs = devm_ioremap_resource(dev, res); + regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) return PTR_ERR(regs); diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index 2f2c9f0a1071..3799a745b7dd 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -489,7 +489,6 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master, struct cec_connector_info conn_info; struct sun4i_drv *drv = drm->dev_private; struct sun4i_hdmi *hdmi; - struct resource *res; u32 reg; int ret; @@ -504,8 +503,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master, if (!hdmi->variant) return -EINVAL; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hdmi->base = devm_ioremap_resource(dev, res); + hdmi->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(hdmi->base)) { dev_err(dev, "Couldn't map the HDMI encoder registers\n"); return PTR_ERR(hdmi->base); diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 9f06dec0fc61..88db2d2a9336 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -841,11 +841,9 @@ static int sun4i_tcon_init_regmap(struct device *dev, struct sun4i_tcon *tcon) { struct platform_device *pdev = to_platform_device(dev); - struct resource *res; void __iomem *regs; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - regs = devm_ioremap_resource(dev, res); + regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) return PTR_ERR(regs); diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c index cb91bc11a0c7..94883abe0dfd 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tv.c +++ b/drivers/gpu/drm/sun4i/sun4i_tv.c @@ -538,7 +538,6 @@ static int sun4i_tv_bind(struct device *dev, struct device *master, struct drm_device *drm = data; struct sun4i_drv *drv = drm->dev_private; struct sun4i_tv *tv; - struct resource *res; void __iomem *regs; int ret; @@ -548,8 +547,7 @@ static int sun4i_tv_bind(struct device *dev, struct device *master, tv->drv = drv; dev_set_drvdata(dev, tv); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - regs = devm_ioremap_resource(dev, res); + regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) { dev_err(dev, "Couldn't map the TV encoder registers\n"); return PTR_ERR(regs); diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index 4f5efcace68e..4371684697bd 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -1104,7 +1104,6 @@ static int sun6i_dsi_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; const char *bus_clk_name = NULL; struct sun6i_dsi *dsi; - struct resource *res; void __iomem *base; int ret; @@ -1120,8 +1119,7 @@ static int sun6i_dsi_probe(struct platform_device *pdev) "allwinner,sun6i-a31-mipi-dsi")) bus_clk_name = "bus"; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) { dev_err(dev, "Couldn't map the DSI encoder registers\n"); return PTR_ERR(base); diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.h b/drivers/gpu/drm/sun4i/sun8i_csc.h index a55a38ad849c..022cafa6c06c 100644 --- a/drivers/gpu/drm/sun4i/sun8i_csc.h +++ b/drivers/gpu/drm/sun4i/sun8i_csc.h @@ -16,8 +16,8 @@ struct sun8i_mixer; #define CCSC10_OFFSET 0xA0000 #define CCSC11_OFFSET 0xF0000 -#define SUN8I_CSC_CTRL(base) (base + 0x0) -#define SUN8I_CSC_COEFF(base, i) (base + 0x10 + 4 * i) +#define SUN8I_CSC_CTRL(base) ((base) + 0x0) +#define SUN8I_CSC_COEFF(base, i) ((base) + 0x10 + 4 * (i)) #define SUN8I_CSC_CTRL_EN BIT(0) diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index 5b42cf25cc86..f5e8aeaa3cdf 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c @@ -337,7 +337,6 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master, struct drm_device *drm = data; struct sun4i_drv *drv = drm->dev_private; struct sun8i_mixer *mixer; - struct resource *res; void __iomem *regs; unsigned int base; int plane_cnt; @@ -390,8 +389,7 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master, if (!mixer->cfg) return -EINVAL; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - regs = devm_ioremap_resource(dev, res); + regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) return PTR_ERR(regs); diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c index 75d8e60c149d..1b9b8b48f4a7 100644 --- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c +++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c @@ -128,7 +128,6 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master, struct clk_hw_onecell_data *clk_data; struct sun8i_tcon_top *tcon_top; const struct sun8i_tcon_top_quirks *quirks; - struct resource *res; void __iomem *regs; int ret, i; @@ -158,8 +157,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master, return PTR_ERR(tcon_top->bus); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - regs = devm_ioremap_resource(dev, res); + regs = devm_platform_ioremap_resource(pdev, 0); tcon_top->regs = regs; if (IS_ERR(regs)) return PTR_ERR(regs); diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index cae8b8cbe9dd..c04dda8353fd 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -44,7 +44,7 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer, { uint64_t modifier = framebuffer->modifier; - if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) { + if (fourcc_mod_is_vendor(modifier, NVIDIA)) { if ((modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) == 0) tiling->sector_layout = TEGRA_BO_SECTOR_LAYOUT_TEGRA; else diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c index e00ec3f40ec8..16a1cdc28657 100644 --- a/drivers/gpu/drm/tegra/plane.c +++ b/drivers/gpu/drm/tegra/plane.c @@ -113,7 +113,7 @@ static bool tegra_plane_format_mod_supported(struct drm_plane *plane, return true; /* check for the sector layout bit */ - if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) { + if (fourcc_mod_is_vendor(modifier, NVIDIA)) { if (modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) { if (!tegra_plane_supports_sector_layout(plane)) return false; diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig index d31be274a2bd..1ceb93fbdc50 100644 --- a/drivers/gpu/drm/tiny/Kconfig +++ b/drivers/gpu/drm/tiny/Kconfig @@ -44,7 +44,7 @@ config DRM_CIRRUS_QEMU config DRM_GM12U320 tristate "GM12U320 driver for USB projectors" - depends on DRM && USB + depends on DRM && USB && MMU select DRM_KMS_HELPER select DRM_GEM_SHMEM_HELPER help @@ -53,7 +53,7 @@ config DRM_GM12U320 config DRM_SIMPLEDRM tristate "Simple framebuffer driver" - depends on DRM + depends on DRM && MMU select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER help diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index bb9e02c31946..3b22c0013dbf 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -69,7 +69,17 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, } } -static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) +static inline void ttm_bo_move_to_pinned(struct ttm_buffer_object *bo) +{ + struct ttm_device *bdev = bo->bdev; + + list_move_tail(&bo->lru, &bdev->pinned); + + if (bdev->funcs->del_from_lru_notify) + bdev->funcs->del_from_lru_notify(bo); +} + +static inline void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) { struct ttm_device *bdev = bo->bdev; @@ -98,7 +108,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, dma_resv_assert_held(bo->base.resv); if (bo->pin_count) { - ttm_bo_del_from_lru(bo); + ttm_bo_move_to_pinned(bo); return; } @@ -342,7 +352,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, return ret; } - ttm_bo_del_from_lru(bo); + ttm_bo_move_to_pinned(bo); list_del_init(&bo->ddestroy); spin_unlock(&bo->bdev->lru_lock); ttm_bo_cleanup_memtype_use(bo); @@ -914,57 +924,11 @@ out: return ret; } -static bool ttm_bo_places_compat(const struct ttm_place *places, - unsigned num_placement, - struct ttm_resource *mem, - uint32_t *new_flags) -{ - unsigned i; - - if (mem->placement & TTM_PL_FLAG_TEMPORARY) - return false; - - for (i = 0; i < num_placement; i++) { - const struct ttm_place *heap = &places[i]; - - if ((mem->start < heap->fpfn || - (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) - continue; - - *new_flags = heap->flags; - if ((mem->mem_type == heap->mem_type) && - (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) || - (mem->placement & TTM_PL_FLAG_CONTIGUOUS))) - return true; - } - return false; -} - -bool ttm_bo_mem_compat(struct ttm_placement *placement, - struct ttm_resource *mem, - uint32_t *new_flags) -{ - if (ttm_bo_places_compat(placement->placement, placement->num_placement, - mem, new_flags)) - return true; - - if ((placement->busy_placement != placement->placement || - placement->num_busy_placement > placement->num_placement) && - ttm_bo_places_compat(placement->busy_placement, - placement->num_busy_placement, - mem, new_flags)) - return true; - - return false; -} -EXPORT_SYMBOL(ttm_bo_mem_compat); - int ttm_bo_validate(struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_operation_ctx *ctx) { int ret; - uint32_t new_flags; dma_resv_assert_held(bo->base.resv); @@ -977,7 +941,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, /* * Check whether we need to move buffer. */ - if (!ttm_bo_mem_compat(placement, bo->resource, &new_flags)) { + if (!ttm_resource_compat(bo->resource, placement)) { ret = ttm_bo_move_buffer(bo, placement, ctx); if (ret) return ret; @@ -1165,7 +1129,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, return ret == -EBUSY ? -ENOSPC : ret; } - ttm_bo_del_from_lru(bo); + ttm_bo_move_to_pinned(bo); /* TODO: Cleanup the locking */ spin_unlock(&bo->bdev->lru_lock); @@ -1224,6 +1188,7 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo) if (bo->ttm == NULL) return; + ttm_tt_unpopulate(bo->bdev, bo->ttm); ttm_tt_destroy(bo->bdev, bo->ttm); bo->ttm = NULL; } diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c index 2df59b3c2ea1..be24bb6cefd0 100644 --- a/drivers/gpu/drm/ttm/ttm_device.c +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -220,6 +220,7 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs, INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue); spin_lock_init(&bdev->lru_lock); INIT_LIST_HEAD(&bdev->ddestroy); + INIT_LIST_HEAD(&bdev->pinned); bdev->dev_mapping = mapping; mutex_lock(&ttm_global_mutex); list_add_tail(&bdev->device_list, &glob->device_list); @@ -257,3 +258,50 @@ void ttm_device_fini(struct ttm_device *bdev) ttm_global_release(); } EXPORT_SYMBOL(ttm_device_fini); + +void ttm_device_clear_dma_mappings(struct ttm_device *bdev) +{ + struct ttm_resource_manager *man; + struct ttm_buffer_object *bo; + unsigned int i, j; + + spin_lock(&bdev->lru_lock); + while (!list_empty(&bdev->pinned)) { + bo = list_first_entry(&bdev->pinned, struct ttm_buffer_object, lru); + /* Take ref against racing releases once lru_lock is unlocked */ + if (ttm_bo_get_unless_zero(bo)) { + list_del_init(&bo->lru); + spin_unlock(&bdev->lru_lock); + + if (bo->ttm) + ttm_tt_unpopulate(bo->bdev, bo->ttm); + + ttm_bo_put(bo); + spin_lock(&bdev->lru_lock); + } + } + + for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) { + man = ttm_manager_type(bdev, i); + if (!man || !man->use_tt) + continue; + + for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) { + while (!list_empty(&man->lru[j])) { + bo = list_first_entry(&man->lru[j], struct ttm_buffer_object, lru); + if (ttm_bo_get_unless_zero(bo)) { + list_del_init(&bo->lru); + spin_unlock(&bdev->lru_lock); + + if (bo->ttm) + ttm_tt_unpopulate(bo->bdev, bo->ttm); + + ttm_bo_put(bo); + spin_lock(&bdev->lru_lock); + } + } + } + } + spin_unlock(&bdev->lru_lock); +} +EXPORT_SYMBOL(ttm_device_clear_dma_mappings); diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c index 7fcdef278c74..0037eefe3239 100644 --- a/drivers/gpu/drm/ttm/ttm_module.c +++ b/drivers/gpu/drm/ttm/ttm_module.c @@ -40,6 +40,18 @@ #include "ttm_module.h" /** + * DOC: TTM + * + * TTM is a memory manager for accelerator devices with dedicated memory. + * + * The basic idea is that resources are grouped together in buffer objects of + * certain size and TTM handles lifetime, movement and CPU mappings of those + * objects. + * + * TODO: Add more design background and information here. + */ + +/** * ttm_prot_from_caching - Modify the page protection according to the * ttm cacing mode * @caching: The ttm caching mode diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index cb38b1a17b09..af1b41369626 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -70,7 +70,7 @@ static struct ttm_pool_type global_uncached[MAX_ORDER]; static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER]; static struct ttm_pool_type global_dma32_uncached[MAX_ORDER]; -static struct mutex shrinker_lock; +static spinlock_t shrinker_lock; static struct list_head shrinker_list; static struct shrinker mm_shrinker; @@ -263,9 +263,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool, spin_lock_init(&pt->lock); INIT_LIST_HEAD(&pt->pages); - mutex_lock(&shrinker_lock); + spin_lock(&shrinker_lock); list_add_tail(&pt->shrinker_list, &shrinker_list); - mutex_unlock(&shrinker_lock); + spin_unlock(&shrinker_lock); } /* Remove a pool_type from the global shrinker list and free all pages */ @@ -273,9 +273,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt) { struct page *p; - mutex_lock(&shrinker_lock); + spin_lock(&shrinker_lock); list_del(&pt->shrinker_list); - mutex_unlock(&shrinker_lock); + spin_unlock(&shrinker_lock); while ((p = ttm_pool_type_take(pt))) ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); @@ -313,24 +313,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool, static unsigned int ttm_pool_shrink(void) { struct ttm_pool_type *pt; - unsigned int num_freed; + unsigned int num_pages; struct page *p; - mutex_lock(&shrinker_lock); + spin_lock(&shrinker_lock); pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list); + list_move_tail(&pt->shrinker_list, &shrinker_list); + spin_unlock(&shrinker_lock); p = ttm_pool_type_take(pt); if (p) { ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); - num_freed = 1 << pt->order; + num_pages = 1 << pt->order; } else { - num_freed = 0; + num_pages = 0; } - list_move_tail(&pt->shrinker_list, &shrinker_list); - mutex_unlock(&shrinker_lock); - - return num_freed; + return num_pages; } /* Return the allocation order based for a page */ @@ -530,6 +529,11 @@ void ttm_pool_fini(struct ttm_pool *pool) for (j = 0; j < MAX_ORDER; ++j) ttm_pool_type_fini(&pool->caching[i].orders[j]); } + + /* We removed the pool types from the LRU, but we need to also make sure + * that no shrinker is concurrently freeing pages from the pool. + */ + synchronize_shrinkers(); } /* As long as pages are available make sure to release at least one */ @@ -604,7 +608,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data) { ttm_pool_debugfs_header(m); - mutex_lock(&shrinker_lock); + spin_lock(&shrinker_lock); seq_puts(m, "wc\t:"); ttm_pool_debugfs_orders(global_write_combined, m); seq_puts(m, "uc\t:"); @@ -613,7 +617,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data) ttm_pool_debugfs_orders(global_dma32_write_combined, m); seq_puts(m, "uc 32\t:"); ttm_pool_debugfs_orders(global_dma32_uncached, m); - mutex_unlock(&shrinker_lock); + spin_unlock(&shrinker_lock); ttm_pool_debugfs_footer(m); @@ -640,7 +644,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m) ttm_pool_debugfs_header(m); - mutex_lock(&shrinker_lock); + spin_lock(&shrinker_lock); for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { seq_puts(m, "DMA "); switch (i) { @@ -656,7 +660,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m) } ttm_pool_debugfs_orders(pool->caching[i].orders, m); } - mutex_unlock(&shrinker_lock); + spin_unlock(&shrinker_lock); ttm_pool_debugfs_footer(m); return 0; @@ -693,7 +697,7 @@ int ttm_pool_mgr_init(unsigned long num_pages) if (!page_pool_size) page_pool_size = num_pages; - mutex_init(&shrinker_lock); + spin_lock_init(&shrinker_lock); INIT_LIST_HEAD(&shrinker_list); for (i = 0; i < MAX_ORDER; ++i) { diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index f4b08a8705b3..67d68a4a8640 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -138,7 +138,7 @@ static const struct ttm_resource_manager_func ttm_range_manager_func = { * Initialise a generic range manager for the selected memory type. * The range manager is installed for this device in the type slot. */ -int ttm_range_man_init(struct ttm_device *bdev, +int ttm_range_man_init_nocheck(struct ttm_device *bdev, unsigned type, bool use_tt, unsigned long p_size) { @@ -163,7 +163,7 @@ int ttm_range_man_init(struct ttm_device *bdev, ttm_resource_manager_set_used(man, true); return 0; } -EXPORT_SYMBOL(ttm_range_man_init); +EXPORT_SYMBOL(ttm_range_man_init_nocheck); /** * ttm_range_man_fini @@ -173,7 +173,7 @@ EXPORT_SYMBOL(ttm_range_man_init); * * Remove the generic range manager from a slot and tear it down. */ -int ttm_range_man_fini(struct ttm_device *bdev, +int ttm_range_man_fini_nocheck(struct ttm_device *bdev, unsigned type) { struct ttm_resource_manager *man = ttm_manager_type(bdev, type); @@ -200,4 +200,4 @@ int ttm_range_man_fini(struct ttm_device *bdev, kfree(rman); return 0; } -EXPORT_SYMBOL(ttm_range_man_fini); +EXPORT_SYMBOL(ttm_range_man_fini_nocheck); diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 2431717376e7..035d71332d18 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -67,6 +67,55 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) } EXPORT_SYMBOL(ttm_resource_free); +static bool ttm_resource_places_compat(struct ttm_resource *res, + const struct ttm_place *places, + unsigned num_placement) +{ + unsigned i; + + if (res->placement & TTM_PL_FLAG_TEMPORARY) + return false; + + for (i = 0; i < num_placement; i++) { + const struct ttm_place *heap = &places[i]; + + if (res->start < heap->fpfn || (heap->lpfn && + (res->start + res->num_pages) > heap->lpfn)) + continue; + + if ((res->mem_type == heap->mem_type) && + (!(heap->flags & TTM_PL_FLAG_CONTIGUOUS) || + (res->placement & TTM_PL_FLAG_CONTIGUOUS))) + return true; + } + return false; +} + +/** + * ttm_resource_compat - check if resource is compatible with placement + * + * @res: the resource to check + * @placement: the placement to check against + * + * Returns true if the placement is compatible. + */ +bool ttm_resource_compat(struct ttm_resource *res, + struct ttm_placement *placement) +{ + if (ttm_resource_places_compat(res, placement->placement, + placement->num_placement)) + return true; + + if ((placement->busy_placement != placement->placement || + placement->num_busy_placement > placement->num_placement) && + ttm_resource_places_compat(res, placement->busy_placement, + placement->num_busy_placement)) + return true; + + return false; +} +EXPORT_SYMBOL(ttm_resource_compat); + /** * ttm_resource_manager_init * diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index d5cd8b5dc0bf..dae52433beeb 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -122,17 +122,6 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm) return 0; } -void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm) -{ - ttm_tt_unpopulate(bdev, ttm); - - if (ttm->swap_storage) - fput(ttm->swap_storage); - - ttm->swap_storage = NULL; -} -EXPORT_SYMBOL(ttm_tt_destroy_common); - void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) { bdev->funcs->ttm_tt_destroy(bdev, ttm); @@ -167,6 +156,12 @@ EXPORT_SYMBOL(ttm_tt_init); void ttm_tt_fini(struct ttm_tt *ttm) { + WARN_ON(ttm->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED); + + if (ttm->swap_storage) + fput(ttm->swap_storage); + ttm->swap_storage = NULL; + if (ttm->pages) kvfree(ttm->pages); else diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig index 1f497d8f1ae5..c744175c6992 100644 --- a/drivers/gpu/drm/udl/Kconfig +++ b/drivers/gpu/drm/udl/Kconfig @@ -4,6 +4,7 @@ config DRM_UDL depends on DRM depends on USB depends on USB_ARCH_HAS_HCD + depends on MMU select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER help diff --git a/drivers/gpu/drm/v3d/Kconfig b/drivers/gpu/drm/v3d/Kconfig index 9a5c44606337..e973ec487484 100644 --- a/drivers/gpu/drm/v3d/Kconfig +++ b/drivers/gpu/drm/v3d/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_V3D tristate "Broadcom V3D 3.x and newer" - depends on ARCH_BCM || ARCH_BCMSTB || COMPILE_TEST + depends on ARCH_BCM || ARCH_BRCMSTB || COMPILE_TEST depends on DRM depends on COMMON_CLK depends on MMU diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 270134779073..b900a050d5e2 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -234,11 +234,6 @@ struct v3d_job { struct drm_gem_object **bo; u32 bo_count; - /* Array of struct dma_fence * to block on before submitting this job. - */ - struct xarray deps; - unsigned long last_dep; - /* v3d fence to be signaled by IRQ handler when the job is complete. */ struct dma_fence *irq_fence; @@ -379,6 +374,7 @@ int v3d_submit_csd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int v3d_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +void v3d_job_cleanup(struct v3d_job *job); void v3d_job_put(struct v3d_job *job); void v3d_reset(struct v3d_dev *v3d); void v3d_invalidate_caches(struct v3d_dev *v3d); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 5689da118197..a3529809d547 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -259,8 +259,8 @@ v3d_lock_bo_reservations(struct v3d_job *job, return ret; for (i = 0; i < job->bo_count; i++) { - ret = drm_gem_fence_array_add_implicit(&job->deps, - job->bo[i], true); + ret = drm_sched_job_add_implicit_dependencies(&job->base, + job->bo[i], true); if (ret) { drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); @@ -356,8 +356,6 @@ static void v3d_job_free(struct kref *ref) { struct v3d_job *job = container_of(ref, struct v3d_job, refcount); - unsigned long index; - struct dma_fence *fence; int i; for (i = 0; i < job->bo_count; i++) { @@ -366,11 +364,6 @@ v3d_job_free(struct kref *ref) } kvfree(job->bo); - xa_for_each(&job->deps, index, fence) { - dma_fence_put(fence); - } - xa_destroy(&job->deps); - dma_fence_put(job->irq_fence); dma_fence_put(job->done_fence); @@ -397,6 +390,12 @@ v3d_render_job_free(struct kref *ref) v3d_job_free(ref); } +void v3d_job_cleanup(struct v3d_job *job) +{ + drm_sched_job_cleanup(&job->base); + v3d_job_put(job); +} + void v3d_job_put(struct v3d_job *job) { kref_put(&job->refcount, job->free); @@ -438,9 +437,10 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data, static int v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, struct v3d_job *job, void (*free)(struct kref *ref), - u32 in_sync) + u32 in_sync, enum v3d_queue queue) { struct dma_fence *in_fence = NULL; + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; int ret; job->v3d = v3d; @@ -450,44 +450,40 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, if (ret < 0) return ret; - xa_init_flags(&job->deps, XA_FLAGS_ALLOC); + ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue], + v3d_priv); + if (ret) + goto fail; ret = drm_syncobj_find_fence(file_priv, in_sync, 0, 0, &in_fence); if (ret == -EINVAL) - goto fail; + goto fail_job; - ret = drm_gem_fence_array_add(&job->deps, in_fence); + ret = drm_sched_job_add_dependency(&job->base, in_fence); if (ret) - goto fail; + goto fail_job; kref_init(&job->refcount); return 0; +fail_job: + drm_sched_job_cleanup(&job->base); fail: - xa_destroy(&job->deps); pm_runtime_put_autosuspend(v3d->drm.dev); return ret; } -static int -v3d_push_job(struct v3d_file_priv *v3d_priv, - struct v3d_job *job, enum v3d_queue queue) +static void +v3d_push_job(struct v3d_job *job) { - int ret; - - ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue], - v3d_priv); - if (ret) - return ret; + drm_sched_job_arm(&job->base); job->done_fence = dma_fence_get(&job->base.s_fence->finished); /* put by scheduler job completion */ kref_get(&job->refcount); - drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[queue]); - - return 0; + drm_sched_entity_push_job(&job->base); } static void @@ -562,7 +558,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, INIT_LIST_HEAD(&render->unref_list); ret = v3d_job_init(v3d, file_priv, &render->base, - v3d_render_job_free, args->in_sync_rcl); + v3d_render_job_free, args->in_sync_rcl, V3D_RENDER); if (ret) { kfree(render); return ret; @@ -576,7 +572,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, } ret = v3d_job_init(v3d, file_priv, &bin->base, - v3d_job_free, args->in_sync_bcl); + v3d_job_free, args->in_sync_bcl, V3D_BIN); if (ret) { v3d_job_put(&render->base); kfree(bin); @@ -598,7 +594,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, goto fail; } - ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0); + ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0, V3D_CACHE_CLEAN); if (ret) { kfree(clean_job); clean_job = NULL; @@ -633,31 +629,26 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, if (bin) { bin->base.perfmon = render->base.perfmon; v3d_perfmon_get(bin->base.perfmon); - ret = v3d_push_job(v3d_priv, &bin->base, V3D_BIN); - if (ret) - goto fail_unreserve; + v3d_push_job(&bin->base); - ret = drm_gem_fence_array_add(&render->base.deps, - dma_fence_get(bin->base.done_fence)); + ret = drm_sched_job_add_dependency(&render->base.base, + dma_fence_get(bin->base.done_fence)); if (ret) goto fail_unreserve; } - ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER); - if (ret) - goto fail_unreserve; + v3d_push_job(&render->base); if (clean_job) { struct dma_fence *render_fence = dma_fence_get(render->base.done_fence); - ret = drm_gem_fence_array_add(&clean_job->deps, render_fence); + ret = drm_sched_job_add_dependency(&clean_job->base, + render_fence); if (ret) goto fail_unreserve; clean_job->perfmon = render->base.perfmon; v3d_perfmon_get(clean_job->perfmon); - ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN); - if (ret) - goto fail_unreserve; + v3d_push_job(clean_job); } mutex_unlock(&v3d->sched_lock); @@ -682,10 +673,10 @@ fail_unreserve: last_job->bo_count, &acquire_ctx); fail: if (bin) - v3d_job_put(&bin->base); - v3d_job_put(&render->base); + v3d_job_cleanup(&bin->base); + v3d_job_cleanup(&render->base); if (clean_job) - v3d_job_put(clean_job); + v3d_job_cleanup(clean_job); return ret; } @@ -704,7 +695,6 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct v3d_dev *v3d = to_v3d_dev(dev); - struct v3d_file_priv *v3d_priv = file_priv->driver_priv; struct drm_v3d_submit_tfu *args = data; struct v3d_tfu_job *job; struct ww_acquire_ctx acquire_ctx; @@ -717,7 +707,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, return -ENOMEM; ret = v3d_job_init(v3d, file_priv, &job->base, - v3d_job_free, args->in_sync); + v3d_job_free, args->in_sync, V3D_TFU); if (ret) { kfree(job); return ret; @@ -761,9 +751,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, goto fail; mutex_lock(&v3d->sched_lock); - ret = v3d_push_job(v3d_priv, &job->base, V3D_TFU); - if (ret) - goto fail_unreserve; + v3d_push_job(&job->base); mutex_unlock(&v3d->sched_lock); v3d_attach_fences_and_unlock_reservation(file_priv, @@ -775,12 +763,8 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, return 0; -fail_unreserve: - mutex_unlock(&v3d->sched_lock); - drm_gem_unlock_reservations(job->base.bo, job->base.bo_count, - &acquire_ctx); fail: - v3d_job_put(&job->base); + v3d_job_cleanup(&job->base); return ret; } @@ -818,7 +802,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, return -ENOMEM; ret = v3d_job_init(v3d, file_priv, &job->base, - v3d_job_free, args->in_sync); + v3d_job_free, args->in_sync, V3D_CSD); if (ret) { kfree(job); return ret; @@ -831,7 +815,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, return -ENOMEM; } - ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0); + ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0, V3D_CACHE_CLEAN); if (ret) { v3d_job_put(&job->base); kfree(clean_job); @@ -859,18 +843,14 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, } mutex_lock(&v3d->sched_lock); - ret = v3d_push_job(v3d_priv, &job->base, V3D_CSD); - if (ret) - goto fail_unreserve; + v3d_push_job(&job->base); - ret = drm_gem_fence_array_add(&clean_job->deps, - dma_fence_get(job->base.done_fence)); + ret = drm_sched_job_add_dependency(&clean_job->base, + dma_fence_get(job->base.done_fence)); if (ret) goto fail_unreserve; - ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN); - if (ret) - goto fail_unreserve; + v3d_push_job(clean_job); mutex_unlock(&v3d->sched_lock); v3d_attach_fences_and_unlock_reservation(file_priv, @@ -889,8 +869,8 @@ fail_unreserve: drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count, &acquire_ctx); fail: - v3d_job_put(&job->base); - v3d_job_put(clean_job); + v3d_job_cleanup(&job->base); + v3d_job_cleanup(clean_job); return ret; } diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index dd7fcc36d726..e0cb7d0697a7 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -13,7 +13,7 @@ * jobs when bulk background jobs are queued up, we submit a new job * to the HW only when it has completed the last one, instead of * filling up the CT[01]Q FIFOs with jobs. Similarly, we use - * v3d_job_dependency() to manage the dependency between bin and + * drm_sched_job_add_dependency() to manage the dependency between bin and * render, instead of having the clients submit jobs using the HW's * semaphores to interlock between them. */ @@ -55,12 +55,11 @@ to_csd_job(struct drm_sched_job *sched_job) } static void -v3d_job_free(struct drm_sched_job *sched_job) +v3d_sched_job_free(struct drm_sched_job *sched_job) { struct v3d_job *job = to_v3d_job(sched_job); - drm_sched_job_cleanup(sched_job); - v3d_job_put(job); + v3d_job_cleanup(job); } static void @@ -73,28 +72,6 @@ v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job) v3d_perfmon_start(v3d, job->perfmon); } -/* - * Returns the fences that the job depends on, one by one. - * - * If placed in the scheduler's .dependency method, the corresponding - * .run_job won't be called until all of them have been signaled. - */ -static struct dma_fence * -v3d_job_dependency(struct drm_sched_job *sched_job, - struct drm_sched_entity *s_entity) -{ - struct v3d_job *job = to_v3d_job(sched_job); - - /* XXX: Wait on a fence for switching the GMP if necessary, - * and then do so. - */ - - if (!xa_empty(&job->deps)) - return xa_erase(&job->deps, job->last_dep++); - - return NULL; -} - static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job) { struct v3d_bin_job *job = to_bin_job(sched_job); @@ -373,38 +350,33 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job) } static const struct drm_sched_backend_ops v3d_bin_sched_ops = { - .dependency = v3d_job_dependency, .run_job = v3d_bin_job_run, .timedout_job = v3d_bin_job_timedout, - .free_job = v3d_job_free, + .free_job = v3d_sched_job_free, }; static const struct drm_sched_backend_ops v3d_render_sched_ops = { - .dependency = v3d_job_dependency, .run_job = v3d_render_job_run, .timedout_job = v3d_render_job_timedout, - .free_job = v3d_job_free, + .free_job = v3d_sched_job_free, }; static const struct drm_sched_backend_ops v3d_tfu_sched_ops = { - .dependency = v3d_job_dependency, .run_job = v3d_tfu_job_run, .timedout_job = v3d_generic_job_timedout, - .free_job = v3d_job_free, + .free_job = v3d_sched_job_free, }; static const struct drm_sched_backend_ops v3d_csd_sched_ops = { - .dependency = v3d_job_dependency, .run_job = v3d_csd_job_run, .timedout_job = v3d_csd_job_timedout, - .free_job = v3d_job_free + .free_job = v3d_sched_job_free }; static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = { - .dependency = v3d_job_dependency, .run_job = v3d_cache_clean_job_run, .timedout_job = v3d_generic_job_timedout, - .free_job = v3d_job_free + .free_job = v3d_sched_job_free }; int diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index f6c16c5aee68..16abc3a3d601 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -50,13 +50,11 @@ #define DRIVER_PATCHLEVEL 0 /* Helper function for mapping the regs on a platform device. */ -void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index) +void __iomem *vc4_ioremap_regs(struct platform_device *pdev, int index) { - struct resource *res; void __iomem *map; - res = platform_get_resource(dev, IORESOURCE_MEM, index); - map = devm_ioremap_resource(&dev->dev, res); + map = devm_platform_ioremap_resource(pdev, index); if (IS_ERR(map)) return map; diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index bf38a7e319d1..a87eafa89e9f 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -38,6 +38,7 @@ #include <drm/drm_drv.h> #include <drm/drm_file.h> +#include <drm/drm_gem_shmem_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> #include <drm/drm_prime.h> @@ -50,87 +51,11 @@ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 0 -static const struct drm_gem_object_funcs vgem_gem_object_funcs; - static struct vgem_device { struct drm_device drm; struct platform_device *platform; } *vgem_device; -static void vgem_gem_free_object(struct drm_gem_object *obj) -{ - struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj); - - kvfree(vgem_obj->pages); - mutex_destroy(&vgem_obj->pages_lock); - - if (obj->import_attach) - drm_prime_gem_destroy(obj, vgem_obj->table); - - drm_gem_object_release(obj); - kfree(vgem_obj); -} - -static vm_fault_t vgem_gem_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct drm_vgem_gem_object *obj = vma->vm_private_data; - /* We don't use vmf->pgoff since that has the fake offset */ - unsigned long vaddr = vmf->address; - vm_fault_t ret = VM_FAULT_SIGBUS; - loff_t num_pages; - pgoff_t page_offset; - page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT; - - num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE); - - if (page_offset >= num_pages) - return VM_FAULT_SIGBUS; - - mutex_lock(&obj->pages_lock); - if (obj->pages) { - get_page(obj->pages[page_offset]); - vmf->page = obj->pages[page_offset]; - ret = 0; - } - mutex_unlock(&obj->pages_lock); - if (ret) { - struct page *page; - - page = shmem_read_mapping_page( - file_inode(obj->base.filp)->i_mapping, - page_offset); - if (!IS_ERR(page)) { - vmf->page = page; - ret = 0; - } else switch (PTR_ERR(page)) { - case -ENOSPC: - case -ENOMEM: - ret = VM_FAULT_OOM; - break; - case -EBUSY: - ret = VM_FAULT_RETRY; - break; - case -EFAULT: - case -EINVAL: - ret = VM_FAULT_SIGBUS; - break; - default: - WARN_ON(PTR_ERR(page)); - ret = VM_FAULT_SIGBUS; - break; - } - - } - return ret; -} - -static const struct vm_operations_struct vgem_gem_vm_ops = { - .fault = vgem_gem_fault, - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - static int vgem_open(struct drm_device *dev, struct drm_file *file) { struct vgem_file *vfile; @@ -159,266 +84,30 @@ static void vgem_postclose(struct drm_device *dev, struct drm_file *file) kfree(vfile); } -static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev, - unsigned long size) -{ - struct drm_vgem_gem_object *obj; - int ret; - - obj = kzalloc(sizeof(*obj), GFP_KERNEL); - if (!obj) - return ERR_PTR(-ENOMEM); - - obj->base.funcs = &vgem_gem_object_funcs; - - ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE)); - if (ret) { - kfree(obj); - return ERR_PTR(ret); - } - - mutex_init(&obj->pages_lock); - - return obj; -} - -static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj) -{ - drm_gem_object_release(&obj->base); - kfree(obj); -} - -static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, - struct drm_file *file, - unsigned int *handle, - unsigned long size) -{ - struct drm_vgem_gem_object *obj; - int ret; - - obj = __vgem_gem_create(dev, size); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - ret = drm_gem_handle_create(file, &obj->base, handle); - if (ret) { - drm_gem_object_put(&obj->base); - return ERR_PTR(ret); - } - - return &obj->base; -} - -static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, - struct drm_mode_create_dumb *args) -{ - struct drm_gem_object *gem_object; - u64 pitch, size; - - pitch = args->width * DIV_ROUND_UP(args->bpp, 8); - size = args->height * pitch; - if (size == 0) - return -EINVAL; - - gem_object = vgem_gem_create(dev, file, &args->handle, size); - if (IS_ERR(gem_object)) - return PTR_ERR(gem_object); - - args->size = gem_object->size; - args->pitch = pitch; - - drm_gem_object_put(gem_object); - - DRM_DEBUG("Created object of size %llu\n", args->size); - - return 0; -} - static struct drm_ioctl_desc vgem_ioctls[] = { DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW), }; -static int vgem_mmap(struct file *filp, struct vm_area_struct *vma) -{ - unsigned long flags = vma->vm_flags; - int ret; - - ret = drm_gem_mmap(filp, vma); - if (ret) - return ret; - - /* Keep the WC mmaping set by drm_gem_mmap() but our pages - * are ordinary and not special. - */ - vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP; - return 0; -} +DEFINE_DRM_GEM_FOPS(vgem_driver_fops); -static const struct file_operations vgem_driver_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .mmap = vgem_mmap, - .poll = drm_poll, - .read = drm_read, - .unlocked_ioctl = drm_ioctl, - .compat_ioctl = drm_compat_ioctl, - .release = drm_release, -}; - -static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo) -{ - mutex_lock(&bo->pages_lock); - if (bo->pages_pin_count++ == 0) { - struct page **pages; - - pages = drm_gem_get_pages(&bo->base); - if (IS_ERR(pages)) { - bo->pages_pin_count--; - mutex_unlock(&bo->pages_lock); - return pages; - } - - bo->pages = pages; - } - mutex_unlock(&bo->pages_lock); - - return bo->pages; -} - -static void vgem_unpin_pages(struct drm_vgem_gem_object *bo) -{ - mutex_lock(&bo->pages_lock); - if (--bo->pages_pin_count == 0) { - drm_gem_put_pages(&bo->base, bo->pages, true, true); - bo->pages = NULL; - } - mutex_unlock(&bo->pages_lock); -} - -static int vgem_prime_pin(struct drm_gem_object *obj) +static struct drm_gem_object *vgem_gem_create_object(struct drm_device *dev, size_t size) { - struct drm_vgem_gem_object *bo = to_vgem_bo(obj); - long n_pages = obj->size >> PAGE_SHIFT; - struct page **pages; + struct drm_gem_shmem_object *obj; - pages = vgem_pin_pages(bo); - if (IS_ERR(pages)) - return PTR_ERR(pages); + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return NULL; - /* Flush the object from the CPU cache so that importers can rely - * on coherent indirect access via the exported dma-address. + /* + * vgem doesn't have any begin/end cpu access ioctls, therefore must use + * coherent memory or dma-buf sharing just wont work. */ - drm_clflush_pages(pages, n_pages); - - return 0; -} - -static void vgem_prime_unpin(struct drm_gem_object *obj) -{ - struct drm_vgem_gem_object *bo = to_vgem_bo(obj); - - vgem_unpin_pages(bo); -} - -static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj) -{ - struct drm_vgem_gem_object *bo = to_vgem_bo(obj); - - return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT); -} - -static struct drm_gem_object* vgem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf) -{ - struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm); - - return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev); -} - -static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev, - struct dma_buf_attachment *attach, struct sg_table *sg) -{ - struct drm_vgem_gem_object *obj; - int npages; - - obj = __vgem_gem_create(dev, attach->dmabuf->size); - if (IS_ERR(obj)) - return ERR_CAST(obj); + obj->map_wc = true; - npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE; - - obj->table = sg; - obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); - if (!obj->pages) { - __vgem_gem_destroy(obj); - return ERR_PTR(-ENOMEM); - } - - obj->pages_pin_count++; /* perma-pinned */ - drm_prime_sg_to_page_array(obj->table, obj->pages, npages); return &obj->base; } -static int vgem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) -{ - struct drm_vgem_gem_object *bo = to_vgem_bo(obj); - long n_pages = obj->size >> PAGE_SHIFT; - struct page **pages; - void *vaddr; - - pages = vgem_pin_pages(bo); - if (IS_ERR(pages)) - return PTR_ERR(pages); - - vaddr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL)); - if (!vaddr) - return -ENOMEM; - dma_buf_map_set_vaddr(map, vaddr); - - return 0; -} - -static void vgem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) -{ - struct drm_vgem_gem_object *bo = to_vgem_bo(obj); - - vunmap(map->vaddr); - vgem_unpin_pages(bo); -} - -static int vgem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma) -{ - int ret; - - if (obj->size < vma->vm_end - vma->vm_start) - return -EINVAL; - - if (!obj->filp) - return -ENODEV; - - ret = call_mmap(obj->filp, vma); - if (ret) - return ret; - - vma_set_file(vma, obj->filp); - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; - vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); - - return 0; -} - -static const struct drm_gem_object_funcs vgem_gem_object_funcs = { - .free = vgem_gem_free_object, - .pin = vgem_prime_pin, - .unpin = vgem_prime_unpin, - .get_sg_table = vgem_prime_get_sg_table, - .vmap = vgem_prime_vmap, - .vunmap = vgem_prime_vunmap, - .vm_ops = &vgem_gem_vm_ops, -}; - static const struct drm_driver vgem_driver = { .driver_features = DRIVER_GEM | DRIVER_RENDER, .open = vgem_open, @@ -427,13 +116,8 @@ static const struct drm_driver vgem_driver = { .num_ioctls = ARRAY_SIZE(vgem_ioctls), .fops = &vgem_driver_fops, - .dumb_create = vgem_gem_dumb_create, - - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_import = vgem_prime_import, - .gem_prime_import_sg_table = vgem_prime_import_sg_table, - .gem_prime_mmap = vgem_prime_mmap, + DRM_GEM_SHMEM_DRIVER_OPS, + .gem_create_object = vgem_gem_create_object, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index d4e610a44e12..0c4810982530 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -26,6 +26,7 @@ #ifndef VIRTIO_DRV_H #define VIRTIO_DRV_H +#include <linux/dma-direction.h> #include <linux/virtio.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> @@ -459,4 +460,11 @@ bool virtio_gpu_is_vram(struct virtio_gpu_object *bo); int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_params *params, struct virtio_gpu_object **bo_ptr); +struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo, + struct device *dev, + enum dma_data_direction dir); +void virtio_gpu_vram_unmap_dma_buf(struct device *dev, + struct sg_table *sgt, + enum dma_data_direction dir); + #endif diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index e45dbf14b307..55d80b77d9b0 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -43,13 +43,41 @@ static int virtgpu_virtio_get_uuid(struct dma_buf *buf, return 0; } +static struct sg_table * +virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + struct drm_gem_object *obj = attach->dmabuf->priv; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + + if (virtio_gpu_is_vram(bo)) + return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir); + + return drm_gem_map_dma_buf(attach, dir); +} + +static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir) +{ + struct drm_gem_object *obj = attach->dmabuf->priv; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + + if (virtio_gpu_is_vram(bo)) { + virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir); + return; + } + + drm_gem_unmap_dma_buf(attach, sgt, dir); +} + static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { .ops = { .cache_sgt_mapping = true, .attach = virtio_dma_buf_attach, .detach = drm_gem_map_detach, - .map_dma_buf = drm_gem_map_dma_buf, - .unmap_dma_buf = drm_gem_unmap_dma_buf, + .map_dma_buf = virtgpu_gem_map_dma_buf, + .unmap_dma_buf = virtgpu_gem_unmap_dma_buf, .release = drm_gem_dmabuf_release, .mmap = drm_gem_dmabuf_mmap, .vmap = drm_gem_dmabuf_vmap, diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c index 5cc34e7330fa..6b45b0429fef 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vram.c +++ b/drivers/gpu/drm/virtio/virtgpu_vram.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 #include "virtgpu_drv.h" +#include <linux/dma-mapping.h> + static void virtio_gpu_vram_free(struct drm_gem_object *obj) { struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); @@ -64,6 +66,65 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj, return ret; } +struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo, + struct device *dev, + enum dma_data_direction dir) +{ + struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); + struct sg_table *sgt; + dma_addr_t addr; + int ret; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return ERR_PTR(-ENOMEM); + + if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) { + // Virtio devices can access the dma-buf via its UUID. Return a stub + // sg_table so the dma-buf API still works. + if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) { + ret = -EIO; + goto out; + } + return sgt; + } + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (ret) + goto out; + + addr = dma_map_resource(dev, vram->vram_node.start, + vram->vram_node.size, dir, + DMA_ATTR_SKIP_CPU_SYNC); + ret = dma_mapping_error(dev, addr); + if (ret) + goto out; + + sg_set_page(sgt->sgl, NULL, vram->vram_node.size, 0); + sg_dma_address(sgt->sgl) = addr; + sg_dma_len(sgt->sgl) = vram->vram_node.size; + + return sgt; +out: + sg_free_table(sgt); + kfree(sgt); + return ERR_PTR(ret); +} + +void virtio_gpu_vram_unmap_dma_buf(struct device *dev, + struct sg_table *sgt, + enum dma_data_direction dir) +{ + if (sgt->nents) { + dma_unmap_resource(dev, sg_dma_address(sgt->sgl), + sg_dma_len(sgt->sgl), dir, + DMA_ATTR_SKIP_CPU_SYNC); + } + sg_free_table(sgt); + kfree(sgt); +} + static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = { .open = virtio_gpu_gem_object_open, .close = virtio_gpu_gem_object_close, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 9e3e1429db94..fd007f1c1776 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -94,7 +94,6 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv, struct ttm_operation_ctx ctx = {interruptible, false }; struct ttm_buffer_object *bo = &buf->base; int ret; - uint32_t new_flags; vmw_execbuf_release_pinned_bo(dev_priv); @@ -103,8 +102,8 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv, goto err; if (buf->base.pin_count > 0) - ret = ttm_bo_mem_compat(placement, bo->resource, - &new_flags) == true ? 0 : -EINVAL; + ret = ttm_resource_compat(bo->resource, placement) + ? 0 : -EINVAL; else ret = ttm_bo_validate(bo, placement, &ctx); @@ -136,7 +135,6 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, struct ttm_operation_ctx ctx = {interruptible, false }; struct ttm_buffer_object *bo = &buf->base; int ret; - uint32_t new_flags; vmw_execbuf_release_pinned_bo(dev_priv); @@ -145,8 +143,8 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv, goto err; if (buf->base.pin_count > 0) { - ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, bo->resource, - &new_flags) == true ? 0 : -EINVAL; + ret = ttm_resource_compat(bo->resource, &vmw_vram_gmr_placement) + ? 0 : -EINVAL; goto out_unreserve; } @@ -208,7 +206,6 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, struct ttm_placement placement; struct ttm_place place; int ret = 0; - uint32_t new_flags; place = vmw_vram_placement.placement[0]; place.lpfn = bo->resource->num_pages; @@ -236,8 +233,8 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, } if (buf->base.pin_count > 0) - ret = ttm_bo_mem_compat(&placement, bo->resource, - &new_flags) == true ? 0 : -EINVAL; + ret = ttm_resource_compat(bo->resource, &placement) + ? 0 : -EINVAL; else ret = ttm_bo_validate(bo, &placement, &ctx); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 8b8991e3ed2d..e899a936a42a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -522,14 +522,8 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, dma_ttm); - vmw_ttm_unbind(bdev, ttm); - ttm_tt_destroy_common(bdev, ttm); vmw_ttm_unmap_dma(vmw_be); - if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) - ttm_tt_fini(&vmw_be->dma_ttm); - else - ttm_tt_fini(ttm); - + ttm_tt_fini(ttm); if (vmw_be->mob) vmw_mob_destroy(vmw_be->mob); @@ -574,6 +568,8 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev, dma_ttm); unsigned int i; + vmw_ttm_unbind(bdev, ttm); + if (vmw_tt->mob) { vmw_mob_destroy(vmw_tt->mob); vmw_tt->mob = NULL; diff --git a/drivers/gpu/drm/zte/Kconfig b/drivers/gpu/drm/zte/Kconfig deleted file mode 100644 index aa8594190b50..000000000000 --- a/drivers/gpu/drm/zte/Kconfig +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -config DRM_ZTE - tristate "DRM Support for ZTE SoCs" - depends on DRM && ARCH_ZX - select DRM_KMS_CMA_HELPER - select DRM_KMS_HELPER - select SND_SOC_HDMI_CODEC if SND_SOC - select VIDEOMODE_HELPERS - help - Choose this option to enable DRM on ZTE ZX SoCs. diff --git a/drivers/gpu/drm/zte/Makefile b/drivers/gpu/drm/zte/Makefile deleted file mode 100644 index b6d966d849dd..000000000000 --- a/drivers/gpu/drm/zte/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -zxdrm-y := \ - zx_drm_drv.o \ - zx_hdmi.o \ - zx_plane.o \ - zx_tvenc.o \ - zx_vga.o \ - zx_vou.o - -obj-$(CONFIG_DRM_ZTE) += zxdrm.o diff --git a/drivers/gpu/drm/zte/zx_common_regs.h b/drivers/gpu/drm/zte/zx_common_regs.h deleted file mode 100644 index b7b996db129d..000000000000 --- a/drivers/gpu/drm/zte/zx_common_regs.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2017 Sanechips Technology Co., Ltd. - * Copyright 2017 Linaro Ltd. - */ - -#ifndef __ZX_COMMON_REGS_H__ -#define __ZX_COMMON_REGS_H__ - -/* CSC registers */ -#define CSC_CTRL0 0x30 -#define CSC_COV_MODE_SHIFT 16 -#define CSC_COV_MODE_MASK (0xffff << CSC_COV_MODE_SHIFT) -#define CSC_BT601_IMAGE_RGB2YCBCR 0 -#define CSC_BT601_IMAGE_YCBCR2RGB 1 -#define CSC_BT601_VIDEO_RGB2YCBCR 2 -#define CSC_BT601_VIDEO_YCBCR2RGB 3 -#define CSC_BT709_IMAGE_RGB2YCBCR 4 -#define CSC_BT709_IMAGE_YCBCR2RGB 5 -#define CSC_BT709_VIDEO_RGB2YCBCR 6 -#define CSC_BT709_VIDEO_YCBCR2RGB 7 -#define CSC_BT2020_IMAGE_RGB2YCBCR 8 -#define CSC_BT2020_IMAGE_YCBCR2RGB 9 -#define CSC_BT2020_VIDEO_RGB2YCBCR 10 -#define CSC_BT2020_VIDEO_YCBCR2RGB 11 -#define CSC_WORK_ENABLE BIT(0) - -#endif /* __ZX_COMMON_REGS_H__ */ diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c deleted file mode 100644 index 064056503ebb..000000000000 --- a/drivers/gpu/drm/zte/zx_drm_drv.c +++ /dev/null @@ -1,184 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright 2016 Linaro Ltd. - * Copyright 2016 ZTE Corporation. - */ - -#include <linux/clk.h> -#include <linux/component.h> -#include <linux/list.h> -#include <linux/module.h> -#include <linux/of_graph.h> -#include <linux/of_platform.h> -#include <linux/spinlock.h> - -#include <drm/drm_atomic_helper.h> -#include <drm/drm_crtc.h> -#include <drm/drm_drv.h> -#include <drm/drm_fb_cma_helper.h> -#include <drm/drm_fb_helper.h> -#include <drm/drm_gem_cma_helper.h> -#include <drm/drm_gem_framebuffer_helper.h> -#include <drm/drm_of.h> -#include <drm/drm_probe_helper.h> -#include <drm/drm_vblank.h> - -#include "zx_drm_drv.h" -#include "zx_vou.h" - -static const struct drm_mode_config_funcs zx_drm_mode_config_funcs = { - .fb_create = drm_gem_fb_create, - .atomic_check = drm_atomic_helper_check, - .atomic_commit = drm_atomic_helper_commit, -}; - -DEFINE_DRM_GEM_CMA_FOPS(zx_drm_fops); - -static const struct drm_driver zx_drm_driver = { - .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, - DRM_GEM_CMA_DRIVER_OPS, - .fops = &zx_drm_fops, - .name = "zx-vou", - .desc = "ZTE VOU Controller DRM", - .date = "20160811", - .major = 1, - .minor = 0, -}; - -static int zx_drm_bind(struct device *dev) -{ - struct drm_device *drm; - int ret; - - drm = drm_dev_alloc(&zx_drm_driver, dev); - if (IS_ERR(drm)) - return PTR_ERR(drm); - - dev_set_drvdata(dev, drm); - - drm_mode_config_init(drm); - drm->mode_config.min_width = 16; - drm->mode_config.min_height = 16; - drm->mode_config.max_width = 4096; - drm->mode_config.max_height = 4096; - drm->mode_config.funcs = &zx_drm_mode_config_funcs; - - ret = component_bind_all(dev, drm); - if (ret) { - DRM_DEV_ERROR(dev, "failed to bind all components: %d\n", ret); - goto out_unregister; - } - - ret = drm_vblank_init(drm, drm->mode_config.num_crtc); - if (ret < 0) { - DRM_DEV_ERROR(dev, "failed to init vblank: %d\n", ret); - goto out_unbind; - } - - drm_mode_config_reset(drm); - drm_kms_helper_poll_init(drm); - - ret = drm_dev_register(drm, 0); - if (ret) - goto out_poll_fini; - - drm_fbdev_generic_setup(drm, 32); - - return 0; - -out_poll_fini: - drm_kms_helper_poll_fini(drm); - drm_mode_config_cleanup(drm); -out_unbind: - component_unbind_all(dev, drm); -out_unregister: - dev_set_drvdata(dev, NULL); - drm_dev_put(drm); - return ret; -} - -static void zx_drm_unbind(struct device *dev) -{ - struct drm_device *drm = dev_get_drvdata(dev); - - drm_dev_unregister(drm); - drm_kms_helper_poll_fini(drm); - drm_atomic_helper_shutdown(drm); - drm_mode_config_cleanup(drm); - component_unbind_all(dev, drm); - dev_set_drvdata(dev, NULL); - drm_dev_put(drm); -} - -static const struct component_master_ops zx_drm_master_ops = { - .bind = zx_drm_bind, - .unbind = zx_drm_unbind, -}; - -static int compare_of(struct device *dev, void *data) -{ - return dev->of_node == data; -} - -static int zx_drm_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct device_node *parent = dev->of_node; - struct device_node *child; - struct component_match *match = NULL; - int ret; - - ret = devm_of_platform_populate(dev); - if (ret) - return ret; - - for_each_available_child_of_node(parent, child) - component_match_add(dev, &match, compare_of, child); - - return component_master_add_with_match(dev, &zx_drm_master_ops, match); -} - -static int zx_drm_remove(struct platform_device *pdev) -{ - component_master_del(&pdev->dev, &zx_drm_master_ops); - return 0; -} - -static const struct of_device_id zx_drm_of_match[] = { - { .compatible = "zte,zx296718-vou", }, - { /* end */ }, -}; -MODULE_DEVICE_TABLE(of, zx_drm_of_match); - -static struct platform_driver zx_drm_platform_driver = { - .probe = zx_drm_probe, - .remove = zx_drm_remove, - .driver = { - .name = "zx-drm", - .of_match_table = zx_drm_of_match, - }, -}; - -static struct platform_driver *drivers[] = { - &zx_crtc_driver, - &zx_hdmi_driver, - &zx_tvenc_driver, - &zx_vga_driver, - &zx_drm_platform_driver, -}; - -static int zx_drm_init(void) -{ - return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); -} -module_init(zx_drm_init); - -static void zx_drm_exit(void) -{ - platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); -} -module_exit(zx_drm_exit); - -MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); -MODULE_DESCRIPTION("ZTE ZX VOU DRM driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/zte/zx_drm_drv.h b/drivers/gpu/drm/zte/zx_drm_drv.h deleted file mode 100644 index 80cdaf479c74..000000000000 --- a/drivers/gpu/drm/zte/zx_drm_drv.h +++ /dev/null @@ -1,34 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright 2016 Linaro Ltd. - * Copyright 2016 ZTE Corporation. - */ - -#ifndef __ZX_DRM_DRV_H__ -#define __ZX_DRM_DRV_H__ - -extern struct platform_driver zx_crtc_driver; -extern struct platform_driver zx_hdmi_driver; -extern struct platform_driver zx_tvenc_driver; -extern struct platform_driver zx_vga_driver; - -static inline u32 zx_readl(void __iomem *reg) -{ - return readl_relaxed(reg); -} - -static inline void zx_writel(void __iomem *reg, u32 val) -{ - writel_relaxed(val, reg); -} - -static inline void zx_writel_mask(void __iomem *reg, u32 mask, u32 val) -{ - u32 tmp; - - tmp = zx_readl(reg); - tmp = (tmp & ~mask) | (val & mask); - zx_writel(reg, tmp); -} - -#endif /* __ZX_DRM_DRV_H__ */ diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c deleted file mode 100644 index cd79ca0a92a9..000000000000 --- a/drivers/gpu/drm/zte/zx_hdmi.c +++ /dev/null @@ -1,760 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright 2016 Linaro Ltd. - * Copyright 2016 ZTE Corporation. - */ - -#include <linux/clk.h> -#include <linux/component.h> -#include <linux/delay.h> -#include <linux/err.h> -#include <linux/hdmi.h> -#include <linux/irq.h> -#include <linux/mfd/syscon.h> -#include <linux/module.h> -#include <linux/mutex.h> -#include <linux/of_device.h> - -#include <drm/drm_atomic_helper.h> -#include <drm/drm_edid.h> -#include <drm/drm_of.h> -#include <drm/drm_probe_helper.h> -#include <drm/drm_print.h> -#include <drm/drm_simple_kms_helper.h> - -#include <sound/hdmi-codec.h> - -#include "zx_hdmi_regs.h" -#include "zx_vou.h" - -#define ZX_HDMI_INFOFRAME_SIZE 31 -#define DDC_SEGMENT_ADDR 0x30 - -struct zx_hdmi_i2c { - struct i2c_adapter adap; - struct mutex lock; -}; - -struct zx_hdmi { - struct drm_connector connector; - struct drm_encoder encoder; - struct zx_hdmi_i2c *ddc; - struct device *dev; - struct drm_device *drm; - void __iomem *mmio; - struct clk *cec_clk; - struct clk *osc_clk; - struct clk *xclk; - bool sink_is_hdmi; - bool sink_has_audio; - struct platform_device *audio_pdev; -}; - -#define to_zx_hdmi(x) container_of(x, struct zx_hdmi, x) - -static inline u8 hdmi_readb(struct zx_hdmi *hdmi, u16 offset) -{ - return readl_relaxed(hdmi->mmio + offset * 4); -} - -static inline void hdmi_writeb(struct zx_hdmi *hdmi, u16 offset, u8 val) -{ - writel_relaxed(val, hdmi->mmio + offset * 4); -} - -static inline void hdmi_writeb_mask(struct zx_hdmi *hdmi, u16 offset, - u8 mask, u8 val) -{ - u8 tmp; - - tmp = hdmi_readb(hdmi, offset); - tmp = (tmp & ~mask) | (val & mask); - hdmi_writeb(hdmi, offset, tmp); -} - -static int zx_hdmi_infoframe_trans(struct zx_hdmi *hdmi, - union hdmi_infoframe *frame, u8 fsel) -{ - u8 buffer[ZX_HDMI_INFOFRAME_SIZE]; - int num; - int i; - - hdmi_writeb(hdmi, TPI_INFO_FSEL, fsel); - - num = hdmi_infoframe_pack(frame, buffer, ZX_HDMI_INFOFRAME_SIZE); - if (num < 0) { - DRM_DEV_ERROR(hdmi->dev, "failed to pack infoframe: %d\n", num); - return num; - } - - for (i = 0; i < num; i++) - hdmi_writeb(hdmi, TPI_INFO_B0 + i, buffer[i]); - - hdmi_writeb_mask(hdmi, TPI_INFO_EN, TPI_INFO_TRANS_RPT, - TPI_INFO_TRANS_RPT); - hdmi_writeb_mask(hdmi, TPI_INFO_EN, TPI_INFO_TRANS_EN, - TPI_INFO_TRANS_EN); - - return num; -} - -static int zx_hdmi_config_video_vsi(struct zx_hdmi *hdmi, - struct drm_display_mode *mode) -{ - union hdmi_infoframe frame; - int ret; - - ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi, - &hdmi->connector, - mode); - if (ret) { - DRM_DEV_ERROR(hdmi->dev, "failed to get vendor infoframe: %d\n", - ret); - return ret; - } - - return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_VSIF); -} - -static int zx_hdmi_config_video_avi(struct zx_hdmi *hdmi, - struct drm_display_mode *mode) -{ - union hdmi_infoframe frame; - int ret; - - ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, - &hdmi->connector, - mode); - if (ret) { - DRM_DEV_ERROR(hdmi->dev, "failed to get avi infoframe: %d\n", - ret); - return ret; - } - - /* We always use YUV444 for HDMI output. */ - frame.avi.colorspace = HDMI_COLORSPACE_YUV444; - - return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_AVI); -} - -static void zx_hdmi_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adj_mode) -{ - struct zx_hdmi *hdmi = to_zx_hdmi(encoder); - - if (hdmi->sink_is_hdmi) { - zx_hdmi_config_video_avi(hdmi, mode); - zx_hdmi_config_video_vsi(hdmi, mode); - } -} - -static void zx_hdmi_phy_start(struct zx_hdmi *hdmi) -{ - /* Copy from ZTE BSP code */ - hdmi_writeb(hdmi, 0x222, 0x0); - hdmi_writeb(hdmi, 0x224, 0x4); - hdmi_writeb(hdmi, 0x909, 0x0); - hdmi_writeb(hdmi, 0x7b0, 0x90); - hdmi_writeb(hdmi, 0x7b1, 0x00); - hdmi_writeb(hdmi, 0x7b2, 0xa7); - hdmi_writeb(hdmi, 0x7b8, 0xaa); - hdmi_writeb(hdmi, 0x7b2, 0xa7); - hdmi_writeb(hdmi, 0x7b3, 0x0f); - hdmi_writeb(hdmi, 0x7b4, 0x0f); - hdmi_writeb(hdmi, 0x7b5, 0x55); - hdmi_writeb(hdmi, 0x7b7, 0x03); - hdmi_writeb(hdmi, 0x7b9, 0x12); - hdmi_writeb(hdmi, 0x7ba, 0x32); - hdmi_writeb(hdmi, 0x7bc, 0x68); - hdmi_writeb(hdmi, 0x7be, 0x40); - hdmi_writeb(hdmi, 0x7bf, 0x84); - hdmi_writeb(hdmi, 0x7c1, 0x0f); - hdmi_writeb(hdmi, 0x7c8, 0x02); - hdmi_writeb(hdmi, 0x7c9, 0x03); - hdmi_writeb(hdmi, 0x7ca, 0x40); - hdmi_writeb(hdmi, 0x7dc, 0x31); - hdmi_writeb(hdmi, 0x7e2, 0x04); - hdmi_writeb(hdmi, 0x7e0, 0x06); - hdmi_writeb(hdmi, 0x7cb, 0x68); - hdmi_writeb(hdmi, 0x7f9, 0x02); - hdmi_writeb(hdmi, 0x7b6, 0x02); - hdmi_writeb(hdmi, 0x7f3, 0x0); -} - -static void zx_hdmi_hw_enable(struct zx_hdmi *hdmi) -{ - /* Enable pclk */ - hdmi_writeb_mask(hdmi, CLKPWD, CLKPWD_PDIDCK, CLKPWD_PDIDCK); - - /* Enable HDMI for TX */ - hdmi_writeb_mask(hdmi, FUNC_SEL, FUNC_HDMI_EN, FUNC_HDMI_EN); - - /* Enable deep color packet */ - hdmi_writeb_mask(hdmi, P2T_CTRL, P2T_DC_PKT_EN, P2T_DC_PKT_EN); - - /* Enable HDMI/MHL mode for output */ - hdmi_writeb_mask(hdmi, TEST_TXCTRL, TEST_TXCTRL_HDMI_MODE, - TEST_TXCTRL_HDMI_MODE); - - /* Configure reg_qc_sel */ - hdmi_writeb(hdmi, HDMICTL4, 0x3); - - /* Enable interrupt */ - hdmi_writeb_mask(hdmi, INTR1_MASK, INTR1_MONITOR_DETECT, - INTR1_MONITOR_DETECT); - - /* Start up phy */ - zx_hdmi_phy_start(hdmi); -} - -static void zx_hdmi_hw_disable(struct zx_hdmi *hdmi) -{ - /* Disable interrupt */ - hdmi_writeb_mask(hdmi, INTR1_MASK, INTR1_MONITOR_DETECT, 0); - - /* Disable deep color packet */ - hdmi_writeb_mask(hdmi, P2T_CTRL, P2T_DC_PKT_EN, P2T_DC_PKT_EN); - - /* Disable HDMI for TX */ - hdmi_writeb_mask(hdmi, FUNC_SEL, FUNC_HDMI_EN, 0); - - /* Disable pclk */ - hdmi_writeb_mask(hdmi, CLKPWD, CLKPWD_PDIDCK, 0); -} - -static void zx_hdmi_encoder_enable(struct drm_encoder *encoder) -{ - struct zx_hdmi *hdmi = to_zx_hdmi(encoder); - - clk_prepare_enable(hdmi->cec_clk); - clk_prepare_enable(hdmi->osc_clk); - clk_prepare_enable(hdmi->xclk); - - zx_hdmi_hw_enable(hdmi); - - vou_inf_enable(VOU_HDMI, encoder->crtc); -} - -static void zx_hdmi_encoder_disable(struct drm_encoder *encoder) -{ - struct zx_hdmi *hdmi = to_zx_hdmi(encoder); - - vou_inf_disable(VOU_HDMI, encoder->crtc); - - zx_hdmi_hw_disable(hdmi); - - clk_disable_unprepare(hdmi->xclk); - clk_disable_unprepare(hdmi->osc_clk); - clk_disable_unprepare(hdmi->cec_clk); -} - -static const struct drm_encoder_helper_funcs zx_hdmi_encoder_helper_funcs = { - .enable = zx_hdmi_encoder_enable, - .disable = zx_hdmi_encoder_disable, - .mode_set = zx_hdmi_encoder_mode_set, -}; - -static int zx_hdmi_connector_get_modes(struct drm_connector *connector) -{ - struct zx_hdmi *hdmi = to_zx_hdmi(connector); - struct edid *edid; - int ret; - - edid = drm_get_edid(connector, &hdmi->ddc->adap); - if (!edid) - return 0; - - hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid); - hdmi->sink_has_audio = drm_detect_monitor_audio(edid); - drm_connector_update_edid_property(connector, edid); - ret = drm_add_edid_modes(connector, edid); - kfree(edid); - - return ret; -} - -static enum drm_mode_status -zx_hdmi_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - return MODE_OK; -} - -static struct drm_connector_helper_funcs zx_hdmi_connector_helper_funcs = { - .get_modes = zx_hdmi_connector_get_modes, - .mode_valid = zx_hdmi_connector_mode_valid, -}; - -static enum drm_connector_status -zx_hdmi_connector_detect(struct drm_connector *connector, bool force) -{ - struct zx_hdmi *hdmi = to_zx_hdmi(connector); - - return (hdmi_readb(hdmi, TPI_HPD_RSEN) & TPI_HPD_CONNECTION) ? - connector_status_connected : connector_status_disconnected; -} - -static const struct drm_connector_funcs zx_hdmi_connector_funcs = { - .fill_modes = drm_helper_probe_single_connector_modes, - .detect = zx_hdmi_connector_detect, - .destroy = drm_connector_cleanup, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi) -{ - struct drm_encoder *encoder = &hdmi->encoder; - - encoder->possible_crtcs = VOU_CRTC_MASK; - - drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(encoder, &zx_hdmi_encoder_helper_funcs); - - hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD; - - drm_connector_init_with_ddc(drm, &hdmi->connector, - &zx_hdmi_connector_funcs, - DRM_MODE_CONNECTOR_HDMIA, - &hdmi->ddc->adap); - drm_connector_helper_add(&hdmi->connector, - &zx_hdmi_connector_helper_funcs); - - drm_connector_attach_encoder(&hdmi->connector, encoder); - - return 0; -} - -static irqreturn_t zx_hdmi_irq_thread(int irq, void *dev_id) -{ - struct zx_hdmi *hdmi = dev_id; - - drm_helper_hpd_irq_event(hdmi->connector.dev); - - return IRQ_HANDLED; -} - -static irqreturn_t zx_hdmi_irq_handler(int irq, void *dev_id) -{ - struct zx_hdmi *hdmi = dev_id; - u8 lstat; - - lstat = hdmi_readb(hdmi, L1_INTR_STAT); - - /* Monitor detect/HPD interrupt */ - if (lstat & L1_INTR_STAT_INTR1) { - u8 stat; - - stat = hdmi_readb(hdmi, INTR1_STAT); - hdmi_writeb(hdmi, INTR1_STAT, stat); - - if (stat & INTR1_MONITOR_DETECT) - return IRQ_WAKE_THREAD; - } - - return IRQ_NONE; -} - -static int zx_hdmi_audio_startup(struct device *dev, void *data) -{ - struct zx_hdmi *hdmi = dev_get_drvdata(dev); - struct drm_encoder *encoder = &hdmi->encoder; - - vou_inf_hdmi_audio_sel(encoder->crtc, VOU_HDMI_AUD_SPDIF); - - return 0; -} - -static void zx_hdmi_audio_shutdown(struct device *dev, void *data) -{ - struct zx_hdmi *hdmi = dev_get_drvdata(dev); - - /* Disable audio input */ - hdmi_writeb_mask(hdmi, AUD_EN, AUD_IN_EN, 0); -} - -static inline int zx_hdmi_audio_get_n(unsigned int fs) -{ - unsigned int n; - - if (fs && (fs % 44100) == 0) - n = 6272 * (fs / 44100); - else - n = fs * 128 / 1000; - - return n; -} - -static int zx_hdmi_audio_hw_params(struct device *dev, - void *data, - struct hdmi_codec_daifmt *daifmt, - struct hdmi_codec_params *params) -{ - struct zx_hdmi *hdmi = dev_get_drvdata(dev); - struct hdmi_audio_infoframe *cea = ¶ms->cea; - union hdmi_infoframe frame; - int n; - - /* We only support spdif for now */ - if (daifmt->fmt != HDMI_SPDIF) { - DRM_DEV_ERROR(hdmi->dev, "invalid daifmt %d\n", daifmt->fmt); - return -EINVAL; - } - - switch (params->sample_width) { - case 16: - hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK, - SPDIF_SAMPLE_SIZE_16BIT); - break; - case 20: - hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK, - SPDIF_SAMPLE_SIZE_20BIT); - break; - case 24: - hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK, - SPDIF_SAMPLE_SIZE_24BIT); - break; - default: - DRM_DEV_ERROR(hdmi->dev, "invalid sample width %d\n", - params->sample_width); - return -EINVAL; - } - - /* CTS is calculated by hardware, and we only need to take care of N */ - n = zx_hdmi_audio_get_n(params->sample_rate); - hdmi_writeb(hdmi, N_SVAL1, n & 0xff); - hdmi_writeb(hdmi, N_SVAL2, (n >> 8) & 0xff); - hdmi_writeb(hdmi, N_SVAL3, (n >> 16) & 0xf); - - /* Enable spdif mode */ - hdmi_writeb_mask(hdmi, AUD_MODE, SPDIF_EN, SPDIF_EN); - - /* Enable audio input */ - hdmi_writeb_mask(hdmi, AUD_EN, AUD_IN_EN, AUD_IN_EN); - - memcpy(&frame.audio, cea, sizeof(*cea)); - - return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_AUDIO); -} - -static int zx_hdmi_audio_mute(struct device *dev, void *data, - bool enable, int direction) -{ - struct zx_hdmi *hdmi = dev_get_drvdata(dev); - - if (enable) - hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, TPI_AUD_MUTE, - TPI_AUD_MUTE); - else - hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, TPI_AUD_MUTE, 0); - - return 0; -} - -static int zx_hdmi_audio_get_eld(struct device *dev, void *data, - uint8_t *buf, size_t len) -{ - struct zx_hdmi *hdmi = dev_get_drvdata(dev); - struct drm_connector *connector = &hdmi->connector; - - memcpy(buf, connector->eld, min(sizeof(connector->eld), len)); - - return 0; -} - -static const struct hdmi_codec_ops zx_hdmi_codec_ops = { - .audio_startup = zx_hdmi_audio_startup, - .hw_params = zx_hdmi_audio_hw_params, - .audio_shutdown = zx_hdmi_audio_shutdown, - .mute_stream = zx_hdmi_audio_mute, - .get_eld = zx_hdmi_audio_get_eld, - .no_capture_mute = 1, -}; - -static struct hdmi_codec_pdata zx_hdmi_codec_pdata = { - .ops = &zx_hdmi_codec_ops, - .spdif = 1, -}; - -static int zx_hdmi_audio_register(struct zx_hdmi *hdmi) -{ - struct platform_device *pdev; - - pdev = platform_device_register_data(hdmi->dev, HDMI_CODEC_DRV_NAME, - PLATFORM_DEVID_AUTO, - &zx_hdmi_codec_pdata, - sizeof(zx_hdmi_codec_pdata)); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); - - hdmi->audio_pdev = pdev; - - return 0; -} - -static int zx_hdmi_i2c_read(struct zx_hdmi *hdmi, struct i2c_msg *msg) -{ - int len = msg->len; - u8 *buf = msg->buf; - int retry = 0; - int ret = 0; - - /* Bits [9:8] of bytes */ - hdmi_writeb(hdmi, ZX_DDC_DIN_CNT2, (len >> 8) & 0xff); - /* Bits [7:0] of bytes */ - hdmi_writeb(hdmi, ZX_DDC_DIN_CNT1, len & 0xff); - - /* Clear FIFO */ - hdmi_writeb_mask(hdmi, ZX_DDC_CMD, DDC_CMD_MASK, DDC_CMD_CLEAR_FIFO); - - /* Kick off the read */ - hdmi_writeb_mask(hdmi, ZX_DDC_CMD, DDC_CMD_MASK, - DDC_CMD_SEQUENTIAL_READ); - - while (len > 0) { - int cnt, i; - - /* FIFO needs some time to get ready */ - usleep_range(500, 1000); - - cnt = hdmi_readb(hdmi, ZX_DDC_DOUT_CNT) & DDC_DOUT_CNT_MASK; - if (cnt == 0) { - if (++retry > 5) { - DRM_DEV_ERROR(hdmi->dev, - "DDC FIFO read timed out!"); - return -ETIMEDOUT; - } - continue; - } - - for (i = 0; i < cnt; i++) - *buf++ = hdmi_readb(hdmi, ZX_DDC_DATA); - len -= cnt; - } - - return ret; -} - -static int zx_hdmi_i2c_write(struct zx_hdmi *hdmi, struct i2c_msg *msg) -{ - /* - * The DDC I2C adapter is only for reading EDID data, so we assume - * that the write to this adapter must be the EDID data offset. - */ - if ((msg->len != 1) || - ((msg->addr != DDC_ADDR) && (msg->addr != DDC_SEGMENT_ADDR))) - return -EINVAL; - - if (msg->addr == DDC_SEGMENT_ADDR) - hdmi_writeb(hdmi, ZX_DDC_SEGM, msg->addr << 1); - else if (msg->addr == DDC_ADDR) - hdmi_writeb(hdmi, ZX_DDC_ADDR, msg->addr << 1); - - hdmi_writeb(hdmi, ZX_DDC_OFFSET, msg->buf[0]); - - return 0; -} - -static int zx_hdmi_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, - int num) -{ - struct zx_hdmi *hdmi = i2c_get_adapdata(adap); - struct zx_hdmi_i2c *ddc = hdmi->ddc; - int i, ret = 0; - - mutex_lock(&ddc->lock); - - /* Enable DDC master access */ - hdmi_writeb_mask(hdmi, TPI_DDC_MASTER_EN, HW_DDC_MASTER, HW_DDC_MASTER); - - for (i = 0; i < num; i++) { - DRM_DEV_DEBUG(hdmi->dev, - "xfer: num: %d/%d, len: %d, flags: %#x\n", - i + 1, num, msgs[i].len, msgs[i].flags); - - if (msgs[i].flags & I2C_M_RD) - ret = zx_hdmi_i2c_read(hdmi, &msgs[i]); - else - ret = zx_hdmi_i2c_write(hdmi, &msgs[i]); - - if (ret < 0) - break; - } - - if (!ret) - ret = num; - - /* Disable DDC master access */ - hdmi_writeb_mask(hdmi, TPI_DDC_MASTER_EN, HW_DDC_MASTER, 0); - - mutex_unlock(&ddc->lock); - - return ret; -} - -static u32 zx_hdmi_i2c_func(struct i2c_adapter *adapter) -{ - return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; -} - -static const struct i2c_algorithm zx_hdmi_algorithm = { - .master_xfer = zx_hdmi_i2c_xfer, - .functionality = zx_hdmi_i2c_func, -}; - -static int zx_hdmi_ddc_register(struct zx_hdmi *hdmi) -{ - struct i2c_adapter *adap; - struct zx_hdmi_i2c *ddc; - int ret; - - ddc = devm_kzalloc(hdmi->dev, sizeof(*ddc), GFP_KERNEL); - if (!ddc) - return -ENOMEM; - - hdmi->ddc = ddc; - mutex_init(&ddc->lock); - - adap = &ddc->adap; - adap->owner = THIS_MODULE; - adap->class = I2C_CLASS_DDC; - adap->dev.parent = hdmi->dev; - adap->algo = &zx_hdmi_algorithm; - snprintf(adap->name, sizeof(adap->name), "zx hdmi i2c"); - - ret = i2c_add_adapter(adap); - if (ret) { - DRM_DEV_ERROR(hdmi->dev, "failed to add I2C adapter: %d\n", - ret); - return ret; - } - - i2c_set_adapdata(adap, hdmi); - - return 0; -} - -static int zx_hdmi_bind(struct device *dev, struct device *master, void *data) -{ - struct platform_device *pdev = to_platform_device(dev); - struct drm_device *drm = data; - struct resource *res; - struct zx_hdmi *hdmi; - int irq; - int ret; - - hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); - if (!hdmi) - return -ENOMEM; - - hdmi->dev = dev; - hdmi->drm = drm; - - dev_set_drvdata(dev, hdmi); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hdmi->mmio = devm_ioremap_resource(dev, res); - if (IS_ERR(hdmi->mmio)) { - ret = PTR_ERR(hdmi->mmio); - DRM_DEV_ERROR(dev, "failed to remap hdmi region: %d\n", ret); - return ret; - } - - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; - - hdmi->cec_clk = devm_clk_get(hdmi->dev, "osc_cec"); - if (IS_ERR(hdmi->cec_clk)) { - ret = PTR_ERR(hdmi->cec_clk); - DRM_DEV_ERROR(dev, "failed to get cec_clk: %d\n", ret); - return ret; - } - - hdmi->osc_clk = devm_clk_get(hdmi->dev, "osc_clk"); - if (IS_ERR(hdmi->osc_clk)) { - ret = PTR_ERR(hdmi->osc_clk); - DRM_DEV_ERROR(dev, "failed to get osc_clk: %d\n", ret); - return ret; - } - - hdmi->xclk = devm_clk_get(hdmi->dev, "xclk"); - if (IS_ERR(hdmi->xclk)) { - ret = PTR_ERR(hdmi->xclk); - DRM_DEV_ERROR(dev, "failed to get xclk: %d\n", ret); - return ret; - } - - ret = zx_hdmi_ddc_register(hdmi); - if (ret) { - DRM_DEV_ERROR(dev, "failed to register ddc: %d\n", ret); - return ret; - } - - ret = zx_hdmi_audio_register(hdmi); - if (ret) { - DRM_DEV_ERROR(dev, "failed to register audio: %d\n", ret); - return ret; - } - - ret = zx_hdmi_register(drm, hdmi); - if (ret) { - DRM_DEV_ERROR(dev, "failed to register hdmi: %d\n", ret); - return ret; - } - - ret = devm_request_threaded_irq(dev, irq, zx_hdmi_irq_handler, - zx_hdmi_irq_thread, IRQF_SHARED, - dev_name(dev), hdmi); - if (ret) { - DRM_DEV_ERROR(dev, "failed to request threaded irq: %d\n", ret); - return ret; - } - - return 0; -} - -static void zx_hdmi_unbind(struct device *dev, struct device *master, - void *data) -{ - struct zx_hdmi *hdmi = dev_get_drvdata(dev); - - hdmi->connector.funcs->destroy(&hdmi->connector); - hdmi->encoder.funcs->destroy(&hdmi->encoder); - - if (hdmi->audio_pdev) - platform_device_unregister(hdmi->audio_pdev); -} - -static const struct component_ops zx_hdmi_component_ops = { - .bind = zx_hdmi_bind, - .unbind = zx_hdmi_unbind, -}; - -static int zx_hdmi_probe(struct platform_device *pdev) -{ - return component_add(&pdev->dev, &zx_hdmi_component_ops); -} - -static int zx_hdmi_remove(struct platform_device *pdev) -{ - component_del(&pdev->dev, &zx_hdmi_component_ops); - return 0; -} - -static const struct of_device_id zx_hdmi_of_match[] = { - { .compatible = "zte,zx296718-hdmi", }, - { /* end */ }, -}; -MODULE_DEVICE_TABLE(of, zx_hdmi_of_match); - -struct platform_driver zx_hdmi_driver = { - .probe = zx_hdmi_probe, - .remove = zx_hdmi_remove, - .driver = { - .name = "zx-hdmi", - .of_match_table = zx_hdmi_of_match, - }, -}; diff --git a/drivers/gpu/drm/zte/zx_hdmi_regs.h b/drivers/gpu/drm/zte/zx_hdmi_regs.h deleted file mode 100644 index 397949e64eff..000000000000 --- a/drivers/gpu/drm/zte/zx_hdmi_regs.h +++ /dev/null @@ -1,66 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright 2016 Linaro Ltd. - * Copyright 2016 ZTE Corporation. - */ - -#ifndef __ZX_HDMI_REGS_H__ -#define __ZX_HDMI_REGS_H__ - -#define FUNC_SEL 0x000b -#define FUNC_HDMI_EN BIT(0) -#define CLKPWD 0x000d -#define CLKPWD_PDIDCK BIT(2) -#define P2T_CTRL 0x0066 -#define P2T_DC_PKT_EN BIT(7) -#define L1_INTR_STAT 0x007e -#define L1_INTR_STAT_INTR1 BIT(0) -#define INTR1_STAT 0x008f -#define INTR1_MASK 0x0095 -#define INTR1_MONITOR_DETECT (BIT(5) | BIT(6)) -#define ZX_DDC_ADDR 0x00ed -#define ZX_DDC_SEGM 0x00ee -#define ZX_DDC_OFFSET 0x00ef -#define ZX_DDC_DIN_CNT1 0x00f0 -#define ZX_DDC_DIN_CNT2 0x00f1 -#define ZX_DDC_CMD 0x00f3 -#define DDC_CMD_MASK 0xf -#define DDC_CMD_CLEAR_FIFO 0x9 -#define DDC_CMD_SEQUENTIAL_READ 0x2 -#define ZX_DDC_DATA 0x00f4 -#define ZX_DDC_DOUT_CNT 0x00f5 -#define DDC_DOUT_CNT_MASK 0x1f -#define TEST_TXCTRL 0x00f7 -#define TEST_TXCTRL_HDMI_MODE BIT(1) -#define HDMICTL4 0x0235 -#define TPI_HPD_RSEN 0x063b -#define TPI_HPD_CONNECTION (BIT(1) | BIT(2)) -#define TPI_INFO_FSEL 0x06bf -#define FSEL_AVI 0 -#define FSEL_GBD 1 -#define FSEL_AUDIO 2 -#define FSEL_SPD 3 -#define FSEL_MPEG 4 -#define FSEL_VSIF 5 -#define TPI_INFO_B0 0x06c0 -#define TPI_INFO_EN 0x06df -#define TPI_INFO_TRANS_EN BIT(7) -#define TPI_INFO_TRANS_RPT BIT(6) -#define TPI_DDC_MASTER_EN 0x06f8 -#define HW_DDC_MASTER BIT(7) -#define N_SVAL1 0xa03 -#define N_SVAL2 0xa04 -#define N_SVAL3 0xa05 -#define AUD_EN 0xa13 -#define AUD_IN_EN BIT(0) -#define AUD_MODE 0xa14 -#define SPDIF_EN BIT(1) -#define TPI_AUD_CONFIG 0xa62 -#define SPDIF_SAMPLE_SIZE_SHIFT 6 -#define SPDIF_SAMPLE_SIZE_MASK (0x3 << SPDIF_SAMPLE_SIZE_SHIFT) -#define SPDIF_SAMPLE_SIZE_16BIT (0x1 << SPDIF_SAMPLE_SIZE_SHIFT) -#define SPDIF_SAMPLE_SIZE_20BIT (0x2 << SPDIF_SAMPLE_SIZE_SHIFT) -#define SPDIF_SAMPLE_SIZE_24BIT (0x3 << SPDIF_SAMPLE_SIZE_SHIFT) -#define TPI_AUD_MUTE BIT(4) - -#endif /* __ZX_HDMI_REGS_H__ */ diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c deleted file mode 100644 index 93bcca428e35..000000000000 --- a/drivers/gpu/drm/zte/zx_plane.c +++ /dev/null @@ -1,537 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright 2016 Linaro Ltd. - * Copyright 2016 ZTE Corporation. - */ - -#include <drm/drm_atomic.h> -#include <drm/drm_atomic_helper.h> -#include <drm/drm_fb_cma_helper.h> -#include <drm/drm_fourcc.h> -#include <drm/drm_gem_cma_helper.h> -#include <drm/drm_modeset_helper_vtables.h> -#include <drm/drm_plane_helper.h> - -#include "zx_common_regs.h" -#include "zx_drm_drv.h" -#include "zx_plane.h" -#include "zx_plane_regs.h" -#include "zx_vou.h" - -static const uint32_t gl_formats[] = { - DRM_FORMAT_ARGB8888, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_RGB888, - DRM_FORMAT_RGB565, - DRM_FORMAT_ARGB1555, - DRM_FORMAT_ARGB4444, -}; - -static const uint32_t vl_formats[] = { - DRM_FORMAT_NV12, /* Semi-planar YUV420 */ - DRM_FORMAT_YUV420, /* Planar YUV420 */ - DRM_FORMAT_YUYV, /* Packed YUV422 */ - DRM_FORMAT_YVYU, - DRM_FORMAT_UYVY, - DRM_FORMAT_VYUY, - DRM_FORMAT_YUV444, /* YUV444 8bit */ - /* - * TODO: add formats below that HW supports: - * - YUV420 P010 - * - YUV420 Hantro - * - YUV444 10bit - */ -}; - -#define FRAC_16_16(mult, div) (((mult) << 16) / (div)) - -static int zx_vl_plane_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, - plane); - struct drm_framebuffer *fb = plane_state->fb; - struct drm_crtc *crtc = plane_state->crtc; - struct drm_crtc_state *crtc_state; - int min_scale = FRAC_16_16(1, 8); - int max_scale = FRAC_16_16(8, 1); - - if (!crtc || WARN_ON(!fb)) - return 0; - - crtc_state = drm_atomic_get_existing_crtc_state(state, - crtc); - if (WARN_ON(!crtc_state)) - return -EINVAL; - - /* nothing to check when disabling or disabled */ - if (!crtc_state->enable) - return 0; - - /* plane must be enabled */ - if (!plane_state->crtc) - return -EINVAL; - - return drm_atomic_helper_check_plane_state(plane_state, crtc_state, - min_scale, max_scale, - true, true); -} - -static int zx_vl_get_fmt(uint32_t format) -{ - switch (format) { - case DRM_FORMAT_NV12: - return VL_FMT_YUV420; - case DRM_FORMAT_YUV420: - return VL_YUV420_PLANAR | VL_FMT_YUV420; - case DRM_FORMAT_YUYV: - return VL_YUV422_YUYV | VL_FMT_YUV422; - case DRM_FORMAT_YVYU: - return VL_YUV422_YVYU | VL_FMT_YUV422; - case DRM_FORMAT_UYVY: - return VL_YUV422_UYVY | VL_FMT_YUV422; - case DRM_FORMAT_VYUY: - return VL_YUV422_VYUY | VL_FMT_YUV422; - case DRM_FORMAT_YUV444: - return VL_FMT_YUV444_8BIT; - default: - WARN_ONCE(1, "invalid pixel format %d\n", format); - return -EINVAL; - } -} - -static inline void zx_vl_set_update(struct zx_plane *zplane) -{ - void __iomem *layer = zplane->layer; - - zx_writel_mask(layer + VL_CTRL0, VL_UPDATE, VL_UPDATE); -} - -static inline void zx_vl_rsz_set_update(struct zx_plane *zplane) -{ - zx_writel(zplane->rsz + RSZ_VL_ENABLE_CFG, 1); -} - -static int zx_vl_rsz_get_fmt(uint32_t format) -{ - switch (format) { - case DRM_FORMAT_NV12: - case DRM_FORMAT_YUV420: - return RSZ_VL_FMT_YCBCR420; - case DRM_FORMAT_YUYV: - case DRM_FORMAT_YVYU: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_VYUY: - return RSZ_VL_FMT_YCBCR422; - case DRM_FORMAT_YUV444: - return RSZ_VL_FMT_YCBCR444; - default: - WARN_ONCE(1, "invalid pixel format %d\n", format); - return -EINVAL; - } -} - -static inline u32 rsz_step_value(u32 src, u32 dst) -{ - u32 val = 0; - - if (src == dst) - val = 0; - else if (src < dst) - val = RSZ_PARA_STEP((src << 16) / dst); - else if (src > dst) - val = RSZ_DATA_STEP(src / dst) | - RSZ_PARA_STEP(((src << 16) / dst) & 0xffff); - - return val; -} - -static void zx_vl_rsz_setup(struct zx_plane *zplane, uint32_t format, - u32 src_w, u32 src_h, u32 dst_w, u32 dst_h) -{ - void __iomem *rsz = zplane->rsz; - u32 src_chroma_w = src_w; - u32 src_chroma_h = src_h; - int fmt; - - /* Set up source and destination resolution */ - zx_writel(rsz + RSZ_SRC_CFG, RSZ_VER(src_h - 1) | RSZ_HOR(src_w - 1)); - zx_writel(rsz + RSZ_DEST_CFG, RSZ_VER(dst_h - 1) | RSZ_HOR(dst_w - 1)); - - /* Configure data format for VL RSZ */ - fmt = zx_vl_rsz_get_fmt(format); - if (fmt >= 0) - zx_writel_mask(rsz + RSZ_VL_CTRL_CFG, RSZ_VL_FMT_MASK, fmt); - - /* Calculate Chroma height and width */ - if (fmt == RSZ_VL_FMT_YCBCR420) { - src_chroma_w = src_w >> 1; - src_chroma_h = src_h >> 1; - } else if (fmt == RSZ_VL_FMT_YCBCR422) { - src_chroma_w = src_w >> 1; - } - - /* Set up Luma and Chroma step registers */ - zx_writel(rsz + RSZ_VL_LUMA_HOR, rsz_step_value(src_w, dst_w)); - zx_writel(rsz + RSZ_VL_LUMA_VER, rsz_step_value(src_h, dst_h)); - zx_writel(rsz + RSZ_VL_CHROMA_HOR, rsz_step_value(src_chroma_w, dst_w)); - zx_writel(rsz + RSZ_VL_CHROMA_VER, rsz_step_value(src_chroma_h, dst_h)); - - zx_vl_rsz_set_update(zplane); -} - -static void zx_vl_plane_atomic_update(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct zx_plane *zplane = to_zx_plane(plane); - struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, - plane); - struct drm_framebuffer *fb = new_state->fb; - struct drm_rect *src = &new_state->src; - struct drm_rect *dst = &new_state->dst; - struct drm_gem_cma_object *cma_obj; - void __iomem *layer = zplane->layer; - void __iomem *hbsc = zplane->hbsc; - void __iomem *paddr_reg; - dma_addr_t paddr; - u32 src_x, src_y, src_w, src_h; - u32 dst_x, dst_y, dst_w, dst_h; - uint32_t format; - int fmt; - int i; - - if (!fb) - return; - - format = fb->format->format; - - src_x = src->x1 >> 16; - src_y = src->y1 >> 16; - src_w = drm_rect_width(src) >> 16; - src_h = drm_rect_height(src) >> 16; - - dst_x = dst->x1; - dst_y = dst->y1; - dst_w = drm_rect_width(dst); - dst_h = drm_rect_height(dst); - - /* Set up data address registers for Y, Cb and Cr planes */ - paddr_reg = layer + VL_Y; - for (i = 0; i < fb->format->num_planes; i++) { - cma_obj = drm_fb_cma_get_gem_obj(fb, i); - paddr = cma_obj->paddr + fb->offsets[i]; - paddr += src_y * fb->pitches[i]; - paddr += src_x * fb->format->cpp[i]; - zx_writel(paddr_reg, paddr); - paddr_reg += 4; - } - - /* Set up source height/width register */ - zx_writel(layer + VL_SRC_SIZE, GL_SRC_W(src_w) | GL_SRC_H(src_h)); - - /* Set up start position register */ - zx_writel(layer + VL_POS_START, GL_POS_X(dst_x) | GL_POS_Y(dst_y)); - - /* Set up end position register */ - zx_writel(layer + VL_POS_END, - GL_POS_X(dst_x + dst_w) | GL_POS_Y(dst_y + dst_h)); - - /* Strides of Cb and Cr planes should be identical */ - zx_writel(layer + VL_STRIDE, LUMA_STRIDE(fb->pitches[0]) | - CHROMA_STRIDE(fb->pitches[1])); - - /* Set up video layer data format */ - fmt = zx_vl_get_fmt(format); - if (fmt >= 0) - zx_writel(layer + VL_CTRL1, fmt); - - /* Always use scaler since it exists (set for not bypass) */ - zx_writel_mask(layer + VL_CTRL2, VL_SCALER_BYPASS_MODE, - VL_SCALER_BYPASS_MODE); - - zx_vl_rsz_setup(zplane, format, src_w, src_h, dst_w, dst_h); - - /* Enable HBSC block */ - zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, HBSC_CTRL_EN); - - zx_vou_layer_enable(plane); - - zx_vl_set_update(zplane); -} - -static void zx_plane_atomic_disable(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, - plane); - struct zx_plane *zplane = to_zx_plane(plane); - void __iomem *hbsc = zplane->hbsc; - - zx_vou_layer_disable(plane, old_state); - - /* Disable HBSC block */ - zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, 0); -} - -static const struct drm_plane_helper_funcs zx_vl_plane_helper_funcs = { - .atomic_check = zx_vl_plane_atomic_check, - .atomic_update = zx_vl_plane_atomic_update, - .atomic_disable = zx_plane_atomic_disable, -}; - -static int zx_gl_plane_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, - plane); - struct drm_framebuffer *fb = plane_state->fb; - struct drm_crtc *crtc = plane_state->crtc; - struct drm_crtc_state *crtc_state; - - if (!crtc || WARN_ON(!fb)) - return 0; - - crtc_state = drm_atomic_get_existing_crtc_state(state, - crtc); - if (WARN_ON(!crtc_state)) - return -EINVAL; - - /* nothing to check when disabling or disabled */ - if (!crtc_state->enable) - return 0; - - /* plane must be enabled */ - if (!plane_state->crtc) - return -EINVAL; - - return drm_atomic_helper_check_plane_state(plane_state, crtc_state, - DRM_PLANE_HELPER_NO_SCALING, - DRM_PLANE_HELPER_NO_SCALING, - false, true); -} - -static int zx_gl_get_fmt(uint32_t format) -{ - switch (format) { - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_XRGB8888: - return GL_FMT_ARGB8888; - case DRM_FORMAT_RGB888: - return GL_FMT_RGB888; - case DRM_FORMAT_RGB565: - return GL_FMT_RGB565; - case DRM_FORMAT_ARGB1555: - return GL_FMT_ARGB1555; - case DRM_FORMAT_ARGB4444: - return GL_FMT_ARGB4444; - default: - WARN_ONCE(1, "invalid pixel format %d\n", format); - return -EINVAL; - } -} - -static inline void zx_gl_set_update(struct zx_plane *zplane) -{ - void __iomem *layer = zplane->layer; - - zx_writel_mask(layer + GL_CTRL0, GL_UPDATE, GL_UPDATE); -} - -static inline void zx_gl_rsz_set_update(struct zx_plane *zplane) -{ - zx_writel(zplane->rsz + RSZ_ENABLE_CFG, 1); -} - -static void zx_gl_rsz_setup(struct zx_plane *zplane, u32 src_w, u32 src_h, - u32 dst_w, u32 dst_h) -{ - void __iomem *rsz = zplane->rsz; - - zx_writel(rsz + RSZ_SRC_CFG, RSZ_VER(src_h - 1) | RSZ_HOR(src_w - 1)); - zx_writel(rsz + RSZ_DEST_CFG, RSZ_VER(dst_h - 1) | RSZ_HOR(dst_w - 1)); - - zx_gl_rsz_set_update(zplane); -} - -static void zx_gl_plane_atomic_update(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, - plane); - struct zx_plane *zplane = to_zx_plane(plane); - struct drm_framebuffer *fb = new_state->fb; - struct drm_gem_cma_object *cma_obj; - void __iomem *layer = zplane->layer; - void __iomem *csc = zplane->csc; - void __iomem *hbsc = zplane->hbsc; - u32 src_x, src_y, src_w, src_h; - u32 dst_x, dst_y, dst_w, dst_h; - unsigned int bpp; - uint32_t format; - dma_addr_t paddr; - u32 stride; - int fmt; - - if (!fb) - return; - - format = fb->format->format; - stride = fb->pitches[0]; - - src_x = new_state->src_x >> 16; - src_y = new_state->src_y >> 16; - src_w = new_state->src_w >> 16; - src_h = new_state->src_h >> 16; - - dst_x = new_state->crtc_x; - dst_y = new_state->crtc_y; - dst_w = new_state->crtc_w; - dst_h = new_state->crtc_h; - - bpp = fb->format->cpp[0]; - - cma_obj = drm_fb_cma_get_gem_obj(fb, 0); - paddr = cma_obj->paddr + fb->offsets[0]; - paddr += src_y * stride + src_x * bpp / 8; - zx_writel(layer + GL_ADDR, paddr); - - /* Set up source height/width register */ - zx_writel(layer + GL_SRC_SIZE, GL_SRC_W(src_w) | GL_SRC_H(src_h)); - - /* Set up start position register */ - zx_writel(layer + GL_POS_START, GL_POS_X(dst_x) | GL_POS_Y(dst_y)); - - /* Set up end position register */ - zx_writel(layer + GL_POS_END, - GL_POS_X(dst_x + dst_w) | GL_POS_Y(dst_y + dst_h)); - - /* Set up stride register */ - zx_writel(layer + GL_STRIDE, stride & 0xffff); - - /* Set up graphic layer data format */ - fmt = zx_gl_get_fmt(format); - if (fmt >= 0) - zx_writel_mask(layer + GL_CTRL1, GL_DATA_FMT_MASK, - fmt << GL_DATA_FMT_SHIFT); - - /* Initialize global alpha with a sane value */ - zx_writel_mask(layer + GL_CTRL2, GL_GLOBAL_ALPHA_MASK, - 0xff << GL_GLOBAL_ALPHA_SHIFT); - - /* Setup CSC for the GL */ - if (dst_h > 720) - zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK, - CSC_BT709_IMAGE_RGB2YCBCR << CSC_COV_MODE_SHIFT); - else - zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK, - CSC_BT601_IMAGE_RGB2YCBCR << CSC_COV_MODE_SHIFT); - zx_writel_mask(csc + CSC_CTRL0, CSC_WORK_ENABLE, CSC_WORK_ENABLE); - - /* Always use scaler since it exists (set for not bypass) */ - zx_writel_mask(layer + GL_CTRL3, GL_SCALER_BYPASS_MODE, - GL_SCALER_BYPASS_MODE); - - zx_gl_rsz_setup(zplane, src_w, src_h, dst_w, dst_h); - - /* Enable HBSC block */ - zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, HBSC_CTRL_EN); - - zx_vou_layer_enable(plane); - - zx_gl_set_update(zplane); -} - -static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = { - .atomic_check = zx_gl_plane_atomic_check, - .atomic_update = zx_gl_plane_atomic_update, - .atomic_disable = zx_plane_atomic_disable, -}; - -static const struct drm_plane_funcs zx_plane_funcs = { - .update_plane = drm_atomic_helper_update_plane, - .disable_plane = drm_atomic_helper_disable_plane, - .destroy = drm_plane_cleanup, - .reset = drm_atomic_helper_plane_reset, - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, -}; - -void zx_plane_set_update(struct drm_plane *plane) -{ - struct zx_plane *zplane = to_zx_plane(plane); - - /* Do nothing if the plane is not enabled */ - if (!plane->state->crtc) - return; - - switch (plane->type) { - case DRM_PLANE_TYPE_PRIMARY: - zx_gl_rsz_set_update(zplane); - zx_gl_set_update(zplane); - break; - case DRM_PLANE_TYPE_OVERLAY: - zx_vl_rsz_set_update(zplane); - zx_vl_set_update(zplane); - break; - default: - WARN_ONCE(1, "unsupported plane type %d\n", plane->type); - } -} - -static void zx_plane_hbsc_init(struct zx_plane *zplane) -{ - void __iomem *hbsc = zplane->hbsc; - - /* - * Initialize HBSC block with a sane configuration per recommedation - * from ZTE BSP code. - */ - zx_writel(hbsc + HBSC_SATURATION, 0x200); - zx_writel(hbsc + HBSC_HUE, 0x0); - zx_writel(hbsc + HBSC_BRIGHT, 0x0); - zx_writel(hbsc + HBSC_CONTRAST, 0x200); - - zx_writel(hbsc + HBSC_THRESHOLD_COL1, (0x3ac << 16) | 0x40); - zx_writel(hbsc + HBSC_THRESHOLD_COL2, (0x3c0 << 16) | 0x40); - zx_writel(hbsc + HBSC_THRESHOLD_COL3, (0x3c0 << 16) | 0x40); -} - -int zx_plane_init(struct drm_device *drm, struct zx_plane *zplane, - enum drm_plane_type type) -{ - const struct drm_plane_helper_funcs *helper; - struct drm_plane *plane = &zplane->plane; - struct device *dev = zplane->dev; - const uint32_t *formats; - unsigned int format_count; - int ret; - - zx_plane_hbsc_init(zplane); - - switch (type) { - case DRM_PLANE_TYPE_PRIMARY: - helper = &zx_gl_plane_helper_funcs; - formats = gl_formats; - format_count = ARRAY_SIZE(gl_formats); - break; - case DRM_PLANE_TYPE_OVERLAY: - helper = &zx_vl_plane_helper_funcs; - formats = vl_formats; - format_count = ARRAY_SIZE(vl_formats); - break; - default: - return -ENODEV; - } - - ret = drm_universal_plane_init(drm, plane, VOU_CRTC_MASK, - &zx_plane_funcs, formats, format_count, - NULL, type, NULL); - if (ret) { - DRM_DEV_ERROR(dev, "failed to init universal plane: %d\n", ret); - return ret; - } - - drm_plane_helper_add(plane, helper); - - return 0; -} diff --git a/drivers/gpu/drm/zte/zx_plane.h b/drivers/gpu/drm/zte/zx_plane.h deleted file mode 100644 index 5a7cc8b3b985..000000000000 --- a/drivers/gpu/drm/zte/zx_plane.h +++ /dev/null @@ -1,26 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright 2016 Linaro Ltd. - * Copyright 2016 ZTE Corporation. - */ - -#ifndef __ZX_PLANE_H__ -#define __ZX_PLANE_H__ - -struct zx_plane { - struct drm_plane plane; - struct device *dev; - void __iomem *layer; - void __iomem *csc; - void __iomem *hbsc; - void __iomem *rsz; - const struct vou_layer_bits *bits; -}; - -#define to_zx_plane(plane) container_of(plane, struct zx_plane, plane) - -int zx_plane_init(struct drm_device *drm, struct zx_plane *zplane, - enum drm_plane_type type); -void zx_plane_set_update(struct drm_plane *plane); - -#endif /* __ZX_PLANE_H__ */ diff --git a/drivers/gpu/drm/zte/zx_plane_regs.h b/drivers/gpu/drm/zte/zx_plane_regs.h deleted file mode 100644 index ce830637a92d..000000000000 --- a/drivers/gpu/drm/zte/zx_plane_regs.h +++ /dev/null @@ -1,120 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright 2016 Linaro Ltd. - * Copyright 2016 ZTE Corporation. - */ - -#ifndef __ZX_PLANE_REGS_H__ -#define __ZX_PLANE_REGS_H__ - -/* GL registers */ -#define GL_CTRL0 0x00 -#define GL_UPDATE BIT(5) -#define GL_CTRL1 0x04 -#define GL_DATA_FMT_SHIFT 0 -#define GL_DATA_FMT_MASK (0xf << GL_DATA_FMT_SHIFT) -#define GL_FMT_ARGB8888 0 -#define GL_FMT_RGB888 1 -#define GL_FMT_RGB565 2 -#define GL_FMT_ARGB1555 3 -#define GL_FMT_ARGB4444 4 -#define GL_CTRL2 0x08 -#define GL_GLOBAL_ALPHA_SHIFT 8 -#define GL_GLOBAL_ALPHA_MASK (0xff << GL_GLOBAL_ALPHA_SHIFT) -#define GL_CTRL3 0x0c -#define GL_SCALER_BYPASS_MODE BIT(0) -#define GL_STRIDE 0x18 -#define GL_ADDR 0x1c -#define GL_SRC_SIZE 0x38 -#define GL_SRC_W_SHIFT 16 -#define GL_SRC_W_MASK (0x3fff << GL_SRC_W_SHIFT) -#define GL_SRC_H_SHIFT 0 -#define GL_SRC_H_MASK (0x3fff << GL_SRC_H_SHIFT) -#define GL_POS_START 0x9c -#define GL_POS_END 0xa0 -#define GL_POS_X_SHIFT 16 -#define GL_POS_X_MASK (0x1fff << GL_POS_X_SHIFT) -#define GL_POS_Y_SHIFT 0 -#define GL_POS_Y_MASK (0x1fff << GL_POS_Y_SHIFT) - -#define GL_SRC_W(x) (((x) << GL_SRC_W_SHIFT) & GL_SRC_W_MASK) -#define GL_SRC_H(x) (((x) << GL_SRC_H_SHIFT) & GL_SRC_H_MASK) -#define GL_POS_X(x) (((x) << GL_POS_X_SHIFT) & GL_POS_X_MASK) -#define GL_POS_Y(x) (((x) << GL_POS_Y_SHIFT) & GL_POS_Y_MASK) - -/* VL registers */ -#define VL_CTRL0 0x00 -#define VL_UPDATE BIT(3) -#define VL_CTRL1 0x04 -#define VL_YUV420_PLANAR BIT(5) -#define VL_YUV422_SHIFT 3 -#define VL_YUV422_YUYV (0 << VL_YUV422_SHIFT) -#define VL_YUV422_YVYU (1 << VL_YUV422_SHIFT) -#define VL_YUV422_UYVY (2 << VL_YUV422_SHIFT) -#define VL_YUV422_VYUY (3 << VL_YUV422_SHIFT) -#define VL_FMT_YUV420 0 -#define VL_FMT_YUV422 1 -#define VL_FMT_YUV420_P010 2 -#define VL_FMT_YUV420_HANTRO 3 -#define VL_FMT_YUV444_8BIT 4 -#define VL_FMT_YUV444_10BIT 5 -#define VL_CTRL2 0x08 -#define VL_SCALER_BYPASS_MODE BIT(0) -#define VL_STRIDE 0x0c -#define LUMA_STRIDE_SHIFT 16 -#define LUMA_STRIDE_MASK (0xffff << LUMA_STRIDE_SHIFT) -#define CHROMA_STRIDE_SHIFT 0 -#define CHROMA_STRIDE_MASK (0xffff << CHROMA_STRIDE_SHIFT) -#define VL_SRC_SIZE 0x10 -#define VL_Y 0x14 -#define VL_POS_START 0x30 -#define VL_POS_END 0x34 - -#define LUMA_STRIDE(x) (((x) << LUMA_STRIDE_SHIFT) & LUMA_STRIDE_MASK) -#define CHROMA_STRIDE(x) (((x) << CHROMA_STRIDE_SHIFT) & CHROMA_STRIDE_MASK) - -/* RSZ registers */ -#define RSZ_SRC_CFG 0x00 -#define RSZ_DEST_CFG 0x04 -#define RSZ_ENABLE_CFG 0x14 - -#define RSZ_VL_LUMA_HOR 0x08 -#define RSZ_VL_LUMA_VER 0x0c -#define RSZ_VL_CHROMA_HOR 0x10 -#define RSZ_VL_CHROMA_VER 0x14 -#define RSZ_VL_CTRL_CFG 0x18 -#define RSZ_VL_FMT_SHIFT 3 -#define RSZ_VL_FMT_MASK (0x3 << RSZ_VL_FMT_SHIFT) -#define RSZ_VL_FMT_YCBCR420 (0x0 << RSZ_VL_FMT_SHIFT) -#define RSZ_VL_FMT_YCBCR422 (0x1 << RSZ_VL_FMT_SHIFT) -#define RSZ_VL_FMT_YCBCR444 (0x2 << RSZ_VL_FMT_SHIFT) -#define RSZ_VL_ENABLE_CFG 0x1c - -#define RSZ_VER_SHIFT 16 -#define RSZ_VER_MASK (0xffff << RSZ_VER_SHIFT) -#define RSZ_HOR_SHIFT 0 -#define RSZ_HOR_MASK (0xffff << RSZ_HOR_SHIFT) - -#define RSZ_VER(x) (((x) << RSZ_VER_SHIFT) & RSZ_VER_MASK) -#define RSZ_HOR(x) (((x) << RSZ_HOR_SHIFT) & RSZ_HOR_MASK) - -#define RSZ_DATA_STEP_SHIFT 16 -#define RSZ_DATA_STEP_MASK (0xffff << RSZ_DATA_STEP_SHIFT) -#define RSZ_PARA_STEP_SHIFT 0 -#define RSZ_PARA_STEP_MASK (0xffff << RSZ_PARA_STEP_SHIFT) - -#define RSZ_DATA_STEP(x) (((x) << RSZ_DATA_STEP_SHIFT) & RSZ_DATA_STEP_MASK) -#define RSZ_PARA_STEP(x) (((x) << RSZ_PARA_STEP_SHIFT) & RSZ_PARA_STEP_MASK) - -/* HBSC registers */ -#define HBSC_SATURATION 0x00 -#define HBSC_HUE 0x04 -#define HBSC_BRIGHT 0x08 -#define HBSC_CONTRAST 0x0c -#define HBSC_THRESHOLD_COL1 0x10 -#define HBSC_THRESHOLD_COL2 0x14 -#define HBSC_THRESHOLD_COL3 0x18 -#define HBSC_CTRL0 0x28 -#define HBSC_CTRL_EN BIT(2) - -#endif /* __ZX_PLANE_REGS_H__ */ diff --git a/drivers/gpu/drm/zte/zx_tvenc.c b/drivers/gpu/drm/zte/zx_tvenc.c deleted file mode 100644 index d8a89ba383bc..000000000000 --- a/drivers/gpu/drm/zte/zx_tvenc.c +++ /dev/null @@ -1,400 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright 2017 Linaro Ltd. - * Copyright 2017 ZTE Corporation. - */ - -#include <linux/clk.h> -#include <linux/component.h> -#include <linux/mfd/syscon.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/regmap.h> - -#include <drm/drm_atomic_helper.h> -#include <drm/drm_print.h> -#include <drm/drm_probe_helper.h> -#include <drm/drm_simple_kms_helper.h> - -#include "zx_drm_drv.h" -#include "zx_tvenc_regs.h" -#include "zx_vou.h" - -struct zx_tvenc_pwrctrl { - struct regmap *regmap; - u32 reg; - u32 mask; -}; - -struct zx_tvenc { - struct drm_connector connector; - struct drm_encoder encoder; - struct device *dev; - void __iomem *mmio; - const struct vou_inf *inf; - struct zx_tvenc_pwrctrl pwrctrl; -}; - -#define to_zx_tvenc(x) container_of(x, struct zx_tvenc, x) - -struct zx_tvenc_mode { - struct drm_display_mode mode; - u32 video_info; - u32 video_res; - u32 field1_param; - u32 field2_param; - u32 burst_line_odd1; - u32 burst_line_even1; - u32 burst_line_odd2; - u32 burst_line_even2; - u32 line_timing_param; - u32 weight_value; - u32 blank_black_level; - u32 burst_level; - u32 control_param; - u32 sub_carrier_phase1; - u32 phase_line_incr_cvbs; -}; - -/* - * The CRM cannot directly provide a suitable frequency, and we have to - * ask a multiplied rate from CRM and use the divider in VOU to get the - * desired one. - */ -#define TVENC_CLOCK_MULTIPLIER 4 - -static const struct zx_tvenc_mode tvenc_mode_pal = { - .mode = { - .clock = 13500 * TVENC_CLOCK_MULTIPLIER, - .hdisplay = 720, - .hsync_start = 720 + 12, - .hsync_end = 720 + 12 + 2, - .htotal = 720 + 12 + 2 + 130, - .vdisplay = 576, - .vsync_start = 576 + 2, - .vsync_end = 576 + 2 + 2, - .vtotal = 576 + 2 + 2 + 20, - .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | - DRM_MODE_FLAG_INTERLACE, - }, - .video_info = 0x00040040, - .video_res = 0x05a9c760, - .field1_param = 0x0004d416, - .field2_param = 0x0009b94f, - .burst_line_odd1 = 0x0004d406, - .burst_line_even1 = 0x0009b53e, - .burst_line_odd2 = 0x0004d805, - .burst_line_even2 = 0x0009b93f, - .line_timing_param = 0x06a96fdf, - .weight_value = 0x00c188a0, - .blank_black_level = 0x0000fcfc, - .burst_level = 0x00001595, - .control_param = 0x00000001, - .sub_carrier_phase1 = 0x1504c566, - .phase_line_incr_cvbs = 0xc068db8c, -}; - -static const struct zx_tvenc_mode tvenc_mode_ntsc = { - .mode = { - .clock = 13500 * TVENC_CLOCK_MULTIPLIER, - .hdisplay = 720, - .hsync_start = 720 + 16, - .hsync_end = 720 + 16 + 2, - .htotal = 720 + 16 + 2 + 120, - .vdisplay = 480, - .vsync_start = 480 + 3, - .vsync_end = 480 + 3 + 2, - .vtotal = 480 + 3 + 2 + 17, - .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | - DRM_MODE_FLAG_INTERLACE, - }, - .video_info = 0x00040080, - .video_res = 0x05a8375a, - .field1_param = 0x00041817, - .field2_param = 0x0008351e, - .burst_line_odd1 = 0x00041006, - .burst_line_even1 = 0x0008290d, - .burst_line_odd2 = 0x00000000, - .burst_line_even2 = 0x00000000, - .line_timing_param = 0x06a8ef9e, - .weight_value = 0x00b68197, - .blank_black_level = 0x0000f0f0, - .burst_level = 0x0000009c, - .control_param = 0x00000001, - .sub_carrier_phase1 = 0x10f83e10, - .phase_line_incr_cvbs = 0x80000000, -}; - -static const struct zx_tvenc_mode *tvenc_modes[] = { - &tvenc_mode_pal, - &tvenc_mode_ntsc, -}; - -static const struct zx_tvenc_mode * -zx_tvenc_find_zmode(struct drm_display_mode *mode) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(tvenc_modes); i++) { - const struct zx_tvenc_mode *zmode = tvenc_modes[i]; - - if (drm_mode_equal(mode, &zmode->mode)) - return zmode; - } - - return NULL; -} - -static void zx_tvenc_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adj_mode) -{ - struct zx_tvenc *tvenc = to_zx_tvenc(encoder); - const struct zx_tvenc_mode *zmode; - struct vou_div_config configs[] = { - { VOU_DIV_INF, VOU_DIV_4 }, - { VOU_DIV_TVENC, VOU_DIV_1 }, - { VOU_DIV_LAYER, VOU_DIV_2 }, - }; - - zx_vou_config_dividers(encoder->crtc, configs, ARRAY_SIZE(configs)); - - zmode = zx_tvenc_find_zmode(mode); - if (!zmode) { - DRM_DEV_ERROR(tvenc->dev, "failed to find zmode\n"); - return; - } - - zx_writel(tvenc->mmio + VENC_VIDEO_INFO, zmode->video_info); - zx_writel(tvenc->mmio + VENC_VIDEO_RES, zmode->video_res); - zx_writel(tvenc->mmio + VENC_FIELD1_PARAM, zmode->field1_param); - zx_writel(tvenc->mmio + VENC_FIELD2_PARAM, zmode->field2_param); - zx_writel(tvenc->mmio + VENC_LINE_O_1, zmode->burst_line_odd1); - zx_writel(tvenc->mmio + VENC_LINE_E_1, zmode->burst_line_even1); - zx_writel(tvenc->mmio + VENC_LINE_O_2, zmode->burst_line_odd2); - zx_writel(tvenc->mmio + VENC_LINE_E_2, zmode->burst_line_even2); - zx_writel(tvenc->mmio + VENC_LINE_TIMING_PARAM, - zmode->line_timing_param); - zx_writel(tvenc->mmio + VENC_WEIGHT_VALUE, zmode->weight_value); - zx_writel(tvenc->mmio + VENC_BLANK_BLACK_LEVEL, - zmode->blank_black_level); - zx_writel(tvenc->mmio + VENC_BURST_LEVEL, zmode->burst_level); - zx_writel(tvenc->mmio + VENC_CONTROL_PARAM, zmode->control_param); - zx_writel(tvenc->mmio + VENC_SUB_CARRIER_PHASE1, - zmode->sub_carrier_phase1); - zx_writel(tvenc->mmio + VENC_PHASE_LINE_INCR_CVBS, - zmode->phase_line_incr_cvbs); -} - -static void zx_tvenc_encoder_enable(struct drm_encoder *encoder) -{ - struct zx_tvenc *tvenc = to_zx_tvenc(encoder); - struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl; - - /* Set bit to power up TVENC DAC */ - regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask, - pwrctrl->mask); - - vou_inf_enable(VOU_TV_ENC, encoder->crtc); - - zx_writel(tvenc->mmio + VENC_ENABLE, 1); -} - -static void zx_tvenc_encoder_disable(struct drm_encoder *encoder) -{ - struct zx_tvenc *tvenc = to_zx_tvenc(encoder); - struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl; - - zx_writel(tvenc->mmio + VENC_ENABLE, 0); - - vou_inf_disable(VOU_TV_ENC, encoder->crtc); - - /* Clear bit to power down TVENC DAC */ - regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask, 0); -} - -static const struct drm_encoder_helper_funcs zx_tvenc_encoder_helper_funcs = { - .enable = zx_tvenc_encoder_enable, - .disable = zx_tvenc_encoder_disable, - .mode_set = zx_tvenc_encoder_mode_set, -}; - -static int zx_tvenc_connector_get_modes(struct drm_connector *connector) -{ - struct zx_tvenc *tvenc = to_zx_tvenc(connector); - struct device *dev = tvenc->dev; - int i; - - for (i = 0; i < ARRAY_SIZE(tvenc_modes); i++) { - const struct zx_tvenc_mode *zmode = tvenc_modes[i]; - struct drm_display_mode *mode; - - mode = drm_mode_duplicate(connector->dev, &zmode->mode); - if (!mode) { - DRM_DEV_ERROR(dev, "failed to duplicate drm mode\n"); - continue; - } - - drm_mode_set_name(mode); - drm_mode_probed_add(connector, mode); - } - - return i; -} - -static enum drm_mode_status -zx_tvenc_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - struct zx_tvenc *tvenc = to_zx_tvenc(connector); - const struct zx_tvenc_mode *zmode; - - zmode = zx_tvenc_find_zmode(mode); - if (!zmode) { - DRM_DEV_ERROR(tvenc->dev, "unsupported mode: %s\n", mode->name); - return MODE_NOMODE; - } - - return MODE_OK; -} - -static struct drm_connector_helper_funcs zx_tvenc_connector_helper_funcs = { - .get_modes = zx_tvenc_connector_get_modes, - .mode_valid = zx_tvenc_connector_mode_valid, -}; - -static const struct drm_connector_funcs zx_tvenc_connector_funcs = { - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = drm_connector_cleanup, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static int zx_tvenc_register(struct drm_device *drm, struct zx_tvenc *tvenc) -{ - struct drm_encoder *encoder = &tvenc->encoder; - struct drm_connector *connector = &tvenc->connector; - - /* - * The tvenc is designed to use aux channel, as there is a deflicker - * block for the channel. - */ - encoder->possible_crtcs = BIT(1); - - drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TVDAC); - drm_encoder_helper_add(encoder, &zx_tvenc_encoder_helper_funcs); - - connector->interlace_allowed = true; - - drm_connector_init(drm, connector, &zx_tvenc_connector_funcs, - DRM_MODE_CONNECTOR_Composite); - drm_connector_helper_add(connector, &zx_tvenc_connector_helper_funcs); - - drm_connector_attach_encoder(connector, encoder); - - return 0; -} - -static int zx_tvenc_pwrctrl_init(struct zx_tvenc *tvenc) -{ - struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl; - struct device *dev = tvenc->dev; - struct of_phandle_args out_args; - struct regmap *regmap; - int ret; - - ret = of_parse_phandle_with_fixed_args(dev->of_node, - "zte,tvenc-power-control", 2, 0, &out_args); - if (ret) - return ret; - - regmap = syscon_node_to_regmap(out_args.np); - if (IS_ERR(regmap)) { - ret = PTR_ERR(regmap); - goto out; - } - - pwrctrl->regmap = regmap; - pwrctrl->reg = out_args.args[0]; - pwrctrl->mask = out_args.args[1]; - -out: - of_node_put(out_args.np); - return ret; -} - -static int zx_tvenc_bind(struct device *dev, struct device *master, void *data) -{ - struct platform_device *pdev = to_platform_device(dev); - struct drm_device *drm = data; - struct resource *res; - struct zx_tvenc *tvenc; - int ret; - - tvenc = devm_kzalloc(dev, sizeof(*tvenc), GFP_KERNEL); - if (!tvenc) - return -ENOMEM; - - tvenc->dev = dev; - dev_set_drvdata(dev, tvenc); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - tvenc->mmio = devm_ioremap_resource(dev, res); - if (IS_ERR(tvenc->mmio)) { - ret = PTR_ERR(tvenc->mmio); - DRM_DEV_ERROR(dev, "failed to remap tvenc region: %d\n", ret); - return ret; - } - - ret = zx_tvenc_pwrctrl_init(tvenc); - if (ret) { - DRM_DEV_ERROR(dev, "failed to init power control: %d\n", ret); - return ret; - } - - ret = zx_tvenc_register(drm, tvenc); - if (ret) { - DRM_DEV_ERROR(dev, "failed to register tvenc: %d\n", ret); - return ret; - } - - return 0; -} - -static void zx_tvenc_unbind(struct device *dev, struct device *master, - void *data) -{ - /* Nothing to do */ -} - -static const struct component_ops zx_tvenc_component_ops = { - .bind = zx_tvenc_bind, - .unbind = zx_tvenc_unbind, -}; - -static int zx_tvenc_probe(struct platform_device *pdev) -{ - return component_add(&pdev->dev, &zx_tvenc_component_ops); -} - -static int zx_tvenc_remove(struct platform_device *pdev) -{ - component_del(&pdev->dev, &zx_tvenc_component_ops); - return 0; -} - -static const struct of_device_id zx_tvenc_of_match[] = { - { .compatible = "zte,zx296718-tvenc", }, - { /* end */ }, -}; -MODULE_DEVICE_TABLE(of, zx_tvenc_of_match); - -struct platform_driver zx_tvenc_driver = { - .probe = zx_tvenc_probe, - .remove = zx_tvenc_remove, - .driver = { - .name = "zx-tvenc", - .of_match_table = zx_tvenc_of_match, - }, -}; diff --git a/drivers/gpu/drm/zte/zx_tvenc_regs.h b/drivers/gpu/drm/zte/zx_tvenc_regs.h deleted file mode 100644 index 40f033109374..000000000000 --- a/drivers/gpu/drm/zte/zx_tvenc_regs.h +++ /dev/null @@ -1,27 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright 2017 Linaro Ltd. - * Copyright 2017 ZTE Corporation. - */ - -#ifndef __ZX_TVENC_REGS_H__ -#define __ZX_TVENC_REGS_H__ - -#define VENC_VIDEO_INFO 0x04 -#define VENC_VIDEO_RES 0x08 -#define VENC_FIELD1_PARAM 0x10 -#define VENC_FIELD2_PARAM 0x14 -#define VENC_LINE_O_1 0x18 -#define VENC_LINE_E_1 0x1c -#define VENC_LINE_O_2 0x20 -#define VENC_LINE_E_2 0x24 -#define VENC_LINE_TIMING_PARAM 0x28 -#define VENC_WEIGHT_VALUE 0x2c -#define VENC_BLANK_BLACK_LEVEL 0x30 -#define VENC_BURST_LEVEL 0x34 -#define VENC_CONTROL_PARAM 0x3c -#define VENC_SUB_CARRIER_PHASE1 0x40 -#define VENC_PHASE_LINE_INCR_CVBS 0x48 -#define VENC_ENABLE 0xa8 - -#endif /* __ZX_TVENC_REGS_H__ */ diff --git a/drivers/gpu/drm/zte/zx_vga.c b/drivers/gpu/drm/zte/zx_vga.c deleted file mode 100644 index 0f9bbb7e3b8d..000000000000 --- a/drivers/gpu/drm/zte/zx_vga.c +++ /dev/null @@ -1,527 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2017 Sanechips Technology Co., Ltd. - * Copyright 2017 Linaro Ltd. - */ - -#include <linux/clk.h> -#include <linux/component.h> -#include <linux/mfd/syscon.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/regmap.h> - -#include <drm/drm_atomic_helper.h> -#include <drm/drm_print.h> -#include <drm/drm_probe_helper.h> -#include <drm/drm_simple_kms_helper.h> - -#include "zx_drm_drv.h" -#include "zx_vga_regs.h" -#include "zx_vou.h" - -struct zx_vga_pwrctrl { - struct regmap *regmap; - u32 reg; - u32 mask; -}; - -struct zx_vga_i2c { - struct i2c_adapter adap; - struct mutex lock; -}; - -struct zx_vga { - struct drm_connector connector; - struct drm_encoder encoder; - struct zx_vga_i2c *ddc; - struct device *dev; - void __iomem *mmio; - struct clk *i2c_wclk; - struct zx_vga_pwrctrl pwrctrl; - struct completion complete; - bool connected; -}; - -#define to_zx_vga(x) container_of(x, struct zx_vga, x) - -static void zx_vga_encoder_enable(struct drm_encoder *encoder) -{ - struct zx_vga *vga = to_zx_vga(encoder); - struct zx_vga_pwrctrl *pwrctrl = &vga->pwrctrl; - - /* Set bit to power up VGA DACs */ - regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask, - pwrctrl->mask); - - vou_inf_enable(VOU_VGA, encoder->crtc); -} - -static void zx_vga_encoder_disable(struct drm_encoder *encoder) -{ - struct zx_vga *vga = to_zx_vga(encoder); - struct zx_vga_pwrctrl *pwrctrl = &vga->pwrctrl; - - vou_inf_disable(VOU_VGA, encoder->crtc); - - /* Clear bit to power down VGA DACs */ - regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask, 0); -} - -static const struct drm_encoder_helper_funcs zx_vga_encoder_helper_funcs = { - .enable = zx_vga_encoder_enable, - .disable = zx_vga_encoder_disable, -}; - -static int zx_vga_connector_get_modes(struct drm_connector *connector) -{ - struct zx_vga *vga = to_zx_vga(connector); - struct edid *edid; - int ret; - - /* - * Clear both detection bits to switch I2C bus from device - * detecting to EDID reading. - */ - zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL, 0); - - edid = drm_get_edid(connector, &vga->ddc->adap); - if (!edid) { - /* - * If EDID reading fails, we set the device state into - * disconnected. Locking is not required here, since the - * VGA_AUTO_DETECT_SEL register write in irq handler cannot - * be triggered when both detection bits are cleared as above. - */ - zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL, - VGA_DETECT_SEL_NO_DEVICE); - vga->connected = false; - return 0; - } - - /* - * As edid reading succeeds, device must be connected, so we set - * up detection bit for unplug interrupt here. - */ - zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL, VGA_DETECT_SEL_HAS_DEVICE); - - drm_connector_update_edid_property(connector, edid); - ret = drm_add_edid_modes(connector, edid); - kfree(edid); - - return ret; -} - -static enum drm_mode_status -zx_vga_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - return MODE_OK; -} - -static struct drm_connector_helper_funcs zx_vga_connector_helper_funcs = { - .get_modes = zx_vga_connector_get_modes, - .mode_valid = zx_vga_connector_mode_valid, -}; - -static enum drm_connector_status -zx_vga_connector_detect(struct drm_connector *connector, bool force) -{ - struct zx_vga *vga = to_zx_vga(connector); - - return vga->connected ? connector_status_connected : - connector_status_disconnected; -} - -static const struct drm_connector_funcs zx_vga_connector_funcs = { - .fill_modes = drm_helper_probe_single_connector_modes, - .detect = zx_vga_connector_detect, - .destroy = drm_connector_cleanup, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static int zx_vga_register(struct drm_device *drm, struct zx_vga *vga) -{ - struct drm_encoder *encoder = &vga->encoder; - struct drm_connector *connector = &vga->connector; - struct device *dev = vga->dev; - int ret; - - encoder->possible_crtcs = VOU_CRTC_MASK; - - ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_DAC); - if (ret) { - DRM_DEV_ERROR(dev, "failed to init encoder: %d\n", ret); - return ret; - } - - drm_encoder_helper_add(encoder, &zx_vga_encoder_helper_funcs); - - vga->connector.polled = DRM_CONNECTOR_POLL_HPD; - - ret = drm_connector_init_with_ddc(drm, connector, - &zx_vga_connector_funcs, - DRM_MODE_CONNECTOR_VGA, - &vga->ddc->adap); - if (ret) { - DRM_DEV_ERROR(dev, "failed to init connector: %d\n", ret); - goto clean_encoder; - } - - drm_connector_helper_add(connector, &zx_vga_connector_helper_funcs); - - ret = drm_connector_attach_encoder(connector, encoder); - if (ret) { - DRM_DEV_ERROR(dev, "failed to attach encoder: %d\n", ret); - goto clean_connector; - } - - return 0; - -clean_connector: - drm_connector_cleanup(connector); -clean_encoder: - drm_encoder_cleanup(encoder); - return ret; -} - -static int zx_vga_pwrctrl_init(struct zx_vga *vga) -{ - struct zx_vga_pwrctrl *pwrctrl = &vga->pwrctrl; - struct device *dev = vga->dev; - struct of_phandle_args out_args; - struct regmap *regmap; - int ret; - - ret = of_parse_phandle_with_fixed_args(dev->of_node, - "zte,vga-power-control", 2, 0, &out_args); - if (ret) - return ret; - - regmap = syscon_node_to_regmap(out_args.np); - if (IS_ERR(regmap)) { - ret = PTR_ERR(regmap); - goto out; - } - - pwrctrl->regmap = regmap; - pwrctrl->reg = out_args.args[0]; - pwrctrl->mask = out_args.args[1]; - -out: - of_node_put(out_args.np); - return ret; -} - -static int zx_vga_i2c_read(struct zx_vga *vga, struct i2c_msg *msg) -{ - int len = msg->len; - u8 *buf = msg->buf; - u32 offset = 0; - int i; - - reinit_completion(&vga->complete); - - /* Select combo write */ - zx_writel_mask(vga->mmio + VGA_CMD_CFG, VGA_CMD_COMBO, VGA_CMD_COMBO); - zx_writel_mask(vga->mmio + VGA_CMD_CFG, VGA_CMD_RW, 0); - - while (len > 0) { - u32 cnt; - - /* Clear RX FIFO */ - zx_writel_mask(vga->mmio + VGA_RXF_CTRL, VGA_RX_FIFO_CLEAR, - VGA_RX_FIFO_CLEAR); - - /* Data offset to read from */ - zx_writel(vga->mmio + VGA_SUB_ADDR, offset); - - /* Kick off the transfer */ - zx_writel_mask(vga->mmio + VGA_CMD_CFG, VGA_CMD_TRANS, - VGA_CMD_TRANS); - - if (!wait_for_completion_timeout(&vga->complete, - msecs_to_jiffies(1000))) { - DRM_DEV_ERROR(vga->dev, "transfer timeout\n"); - return -ETIMEDOUT; - } - - cnt = zx_readl(vga->mmio + VGA_RXF_STATUS); - cnt = (cnt & VGA_RXF_COUNT_MASK) >> VGA_RXF_COUNT_SHIFT; - /* FIFO status may report more data than we need to read */ - cnt = min_t(u32, len, cnt); - - for (i = 0; i < cnt; i++) - *buf++ = zx_readl(vga->mmio + VGA_DATA); - - len -= cnt; - offset += cnt; - } - - return 0; -} - -static int zx_vga_i2c_write(struct zx_vga *vga, struct i2c_msg *msg) -{ - /* - * The DDC I2C adapter is only for reading EDID data, so we assume - * that the write to this adapter must be the EDID data offset. - */ - if ((msg->len != 1) || ((msg->addr != DDC_ADDR))) - return -EINVAL; - - /* Hardware will take care of the slave address shifting */ - zx_writel(vga->mmio + VGA_DEVICE_ADDR, msg->addr); - - return 0; -} - -static int zx_vga_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, - int num) -{ - struct zx_vga *vga = i2c_get_adapdata(adap); - struct zx_vga_i2c *ddc = vga->ddc; - int ret = 0; - int i; - - mutex_lock(&ddc->lock); - - for (i = 0; i < num; i++) { - if (msgs[i].flags & I2C_M_RD) - ret = zx_vga_i2c_read(vga, &msgs[i]); - else - ret = zx_vga_i2c_write(vga, &msgs[i]); - - if (ret < 0) - break; - } - - if (!ret) - ret = num; - - mutex_unlock(&ddc->lock); - - return ret; -} - -static u32 zx_vga_i2c_func(struct i2c_adapter *adapter) -{ - return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; -} - -static const struct i2c_algorithm zx_vga_algorithm = { - .master_xfer = zx_vga_i2c_xfer, - .functionality = zx_vga_i2c_func, -}; - -static int zx_vga_ddc_register(struct zx_vga *vga) -{ - struct device *dev = vga->dev; - struct i2c_adapter *adap; - struct zx_vga_i2c *ddc; - int ret; - - ddc = devm_kzalloc(dev, sizeof(*ddc), GFP_KERNEL); - if (!ddc) - return -ENOMEM; - - vga->ddc = ddc; - mutex_init(&ddc->lock); - - adap = &ddc->adap; - adap->owner = THIS_MODULE; - adap->class = I2C_CLASS_DDC; - adap->dev.parent = dev; - adap->algo = &zx_vga_algorithm; - snprintf(adap->name, sizeof(adap->name), "zx vga i2c"); - - ret = i2c_add_adapter(adap); - if (ret) { - DRM_DEV_ERROR(dev, "failed to add I2C adapter: %d\n", ret); - return ret; - } - - i2c_set_adapdata(adap, vga); - - return 0; -} - -static irqreturn_t zx_vga_irq_thread(int irq, void *dev_id) -{ - struct zx_vga *vga = dev_id; - - drm_helper_hpd_irq_event(vga->connector.dev); - - return IRQ_HANDLED; -} - -static irqreturn_t zx_vga_irq_handler(int irq, void *dev_id) -{ - struct zx_vga *vga = dev_id; - u32 status; - - status = zx_readl(vga->mmio + VGA_I2C_STATUS); - - /* Clear interrupt status */ - zx_writel_mask(vga->mmio + VGA_I2C_STATUS, VGA_CLEAR_IRQ, - VGA_CLEAR_IRQ); - - if (status & VGA_DEVICE_CONNECTED) { - /* - * Since VGA_DETECT_SEL bits need to be reset for switching DDC - * bus from device detection to EDID read, rather than setting - * up HAS_DEVICE bit here, we need to do that in .get_modes - * hook for unplug detecting after EDID read succeeds. - */ - vga->connected = true; - return IRQ_WAKE_THREAD; - } - - if (status & VGA_DEVICE_DISCONNECTED) { - zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL, - VGA_DETECT_SEL_NO_DEVICE); - vga->connected = false; - return IRQ_WAKE_THREAD; - } - - if (status & VGA_TRANS_DONE) { - complete(&vga->complete); - return IRQ_HANDLED; - } - - return IRQ_NONE; -} - -static void zx_vga_hw_init(struct zx_vga *vga) -{ - unsigned long ref = clk_get_rate(vga->i2c_wclk); - int div; - - /* - * Set up I2C fast speed divider per formula below to get 400kHz. - * scl = ref / ((div + 1) * 4) - */ - div = DIV_ROUND_UP(ref / 1000, 400 * 4) - 1; - zx_writel(vga->mmio + VGA_CLK_DIV_FS, div); - - /* Set up device detection */ - zx_writel(vga->mmio + VGA_AUTO_DETECT_PARA, 0x80); - zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL, VGA_DETECT_SEL_NO_DEVICE); - - /* - * We need to poke monitor via DDC bus to get connection irq - * start working. - */ - zx_writel(vga->mmio + VGA_DEVICE_ADDR, DDC_ADDR); - zx_writel_mask(vga->mmio + VGA_CMD_CFG, VGA_CMD_TRANS, VGA_CMD_TRANS); -} - -static int zx_vga_bind(struct device *dev, struct device *master, void *data) -{ - struct platform_device *pdev = to_platform_device(dev); - struct drm_device *drm = data; - struct resource *res; - struct zx_vga *vga; - int irq; - int ret; - - vga = devm_kzalloc(dev, sizeof(*vga), GFP_KERNEL); - if (!vga) - return -ENOMEM; - - vga->dev = dev; - dev_set_drvdata(dev, vga); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - vga->mmio = devm_ioremap_resource(dev, res); - if (IS_ERR(vga->mmio)) - return PTR_ERR(vga->mmio); - - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; - - vga->i2c_wclk = devm_clk_get(dev, "i2c_wclk"); - if (IS_ERR(vga->i2c_wclk)) { - ret = PTR_ERR(vga->i2c_wclk); - DRM_DEV_ERROR(dev, "failed to get i2c_wclk: %d\n", ret); - return ret; - } - - ret = zx_vga_pwrctrl_init(vga); - if (ret) { - DRM_DEV_ERROR(dev, "failed to init power control: %d\n", ret); - return ret; - } - - ret = zx_vga_ddc_register(vga); - if (ret) { - DRM_DEV_ERROR(dev, "failed to register ddc: %d\n", ret); - return ret; - } - - ret = zx_vga_register(drm, vga); - if (ret) { - DRM_DEV_ERROR(dev, "failed to register vga: %d\n", ret); - return ret; - } - - init_completion(&vga->complete); - - ret = devm_request_threaded_irq(dev, irq, zx_vga_irq_handler, - zx_vga_irq_thread, IRQF_SHARED, - dev_name(dev), vga); - if (ret) { - DRM_DEV_ERROR(dev, "failed to request threaded irq: %d\n", ret); - return ret; - } - - ret = clk_prepare_enable(vga->i2c_wclk); - if (ret) - return ret; - - zx_vga_hw_init(vga); - - return 0; -} - -static void zx_vga_unbind(struct device *dev, struct device *master, - void *data) -{ - struct zx_vga *vga = dev_get_drvdata(dev); - - clk_disable_unprepare(vga->i2c_wclk); -} - -static const struct component_ops zx_vga_component_ops = { - .bind = zx_vga_bind, - .unbind = zx_vga_unbind, -}; - -static int zx_vga_probe(struct platform_device *pdev) -{ - return component_add(&pdev->dev, &zx_vga_component_ops); -} - -static int zx_vga_remove(struct platform_device *pdev) -{ - component_del(&pdev->dev, &zx_vga_component_ops); - return 0; -} - -static const struct of_device_id zx_vga_of_match[] = { - { .compatible = "zte,zx296718-vga", }, - { /* end */ }, -}; -MODULE_DEVICE_TABLE(of, zx_vga_of_match); - -struct platform_driver zx_vga_driver = { - .probe = zx_vga_probe, - .remove = zx_vga_remove, - .driver = { - .name = "zx-vga", - .of_match_table = zx_vga_of_match, - }, -}; diff --git a/drivers/gpu/drm/zte/zx_vga_regs.h b/drivers/gpu/drm/zte/zx_vga_regs.h deleted file mode 100644 index 1e8825ae70a5..000000000000 --- a/drivers/gpu/drm/zte/zx_vga_regs.h +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2017 Sanechips Technology Co., Ltd. - * Copyright 2017 Linaro Ltd. - */ - -#ifndef __ZX_VGA_REGS_H__ -#define __ZX_VGA_REGS_H__ - -#define VGA_CMD_CFG 0x04 -#define VGA_CMD_TRANS BIT(6) -#define VGA_CMD_COMBO BIT(5) -#define VGA_CMD_RW BIT(4) -#define VGA_SUB_ADDR 0x0c -#define VGA_DEVICE_ADDR 0x10 -#define VGA_CLK_DIV_FS 0x14 -#define VGA_RXF_CTRL 0x20 -#define VGA_RX_FIFO_CLEAR BIT(7) -#define VGA_DATA 0x24 -#define VGA_I2C_STATUS 0x28 -#define VGA_DEVICE_DISCONNECTED BIT(7) -#define VGA_DEVICE_CONNECTED BIT(6) -#define VGA_CLEAR_IRQ BIT(4) -#define VGA_TRANS_DONE BIT(0) -#define VGA_RXF_STATUS 0x30 -#define VGA_RXF_COUNT_SHIFT 2 -#define VGA_RXF_COUNT_MASK GENMASK(7, 2) -#define VGA_AUTO_DETECT_PARA 0x34 -#define VGA_AUTO_DETECT_SEL 0x38 -#define VGA_DETECT_SEL_HAS_DEVICE BIT(1) -#define VGA_DETECT_SEL_NO_DEVICE BIT(0) - -#endif /* __ZX_VGA_REGS_H__ */ diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c deleted file mode 100644 index 904f62f3bfc1..000000000000 --- a/drivers/gpu/drm/zte/zx_vou.c +++ /dev/null @@ -1,921 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright 2016 Linaro Ltd. - * Copyright 2016 ZTE Corporation. - */ - -#include <linux/clk.h> -#include <linux/component.h> -#include <linux/module.h> -#include <linux/of_address.h> -#include <linux/platform_device.h> - -#include <video/videomode.h> - -#include <drm/drm_atomic_helper.h> -#include <drm/drm_crtc.h> -#include <drm/drm_fb_cma_helper.h> -#include <drm/drm_fb_helper.h> -#include <drm/drm_gem_cma_helper.h> -#include <drm/drm_of.h> -#include <drm/drm_plane_helper.h> -#include <drm/drm_probe_helper.h> -#include <drm/drm_vblank.h> - -#include "zx_common_regs.h" -#include "zx_drm_drv.h" -#include "zx_plane.h" -#include "zx_vou.h" -#include "zx_vou_regs.h" - -#define GL_NUM 2 -#define VL_NUM 3 - -enum vou_chn_type { - VOU_CHN_MAIN, - VOU_CHN_AUX, -}; - -struct zx_crtc_regs { - u32 fir_active; - u32 fir_htiming; - u32 fir_vtiming; - u32 sec_vtiming; - u32 timing_shift; - u32 timing_pi_shift; -}; - -static const struct zx_crtc_regs main_crtc_regs = { - .fir_active = FIR_MAIN_ACTIVE, - .fir_htiming = FIR_MAIN_H_TIMING, - .fir_vtiming = FIR_MAIN_V_TIMING, - .sec_vtiming = SEC_MAIN_V_TIMING, - .timing_shift = TIMING_MAIN_SHIFT, - .timing_pi_shift = TIMING_MAIN_PI_SHIFT, -}; - -static const struct zx_crtc_regs aux_crtc_regs = { - .fir_active = FIR_AUX_ACTIVE, - .fir_htiming = FIR_AUX_H_TIMING, - .fir_vtiming = FIR_AUX_V_TIMING, - .sec_vtiming = SEC_AUX_V_TIMING, - .timing_shift = TIMING_AUX_SHIFT, - .timing_pi_shift = TIMING_AUX_PI_SHIFT, -}; - -struct zx_crtc_bits { - u32 polarity_mask; - u32 polarity_shift; - u32 int_frame_mask; - u32 tc_enable; - u32 sec_vactive_shift; - u32 sec_vactive_mask; - u32 interlace_select; - u32 pi_enable; - u32 div_vga_shift; - u32 div_pic_shift; - u32 div_tvenc_shift; - u32 div_hdmi_pnx_shift; - u32 div_hdmi_shift; - u32 div_inf_shift; - u32 div_layer_shift; -}; - -static const struct zx_crtc_bits main_crtc_bits = { - .polarity_mask = MAIN_POL_MASK, - .polarity_shift = MAIN_POL_SHIFT, - .int_frame_mask = TIMING_INT_MAIN_FRAME, - .tc_enable = MAIN_TC_EN, - .sec_vactive_shift = SEC_VACT_MAIN_SHIFT, - .sec_vactive_mask = SEC_VACT_MAIN_MASK, - .interlace_select = MAIN_INTERLACE_SEL, - .pi_enable = MAIN_PI_EN, - .div_vga_shift = VGA_MAIN_DIV_SHIFT, - .div_pic_shift = PIC_MAIN_DIV_SHIFT, - .div_tvenc_shift = TVENC_MAIN_DIV_SHIFT, - .div_hdmi_pnx_shift = HDMI_MAIN_PNX_DIV_SHIFT, - .div_hdmi_shift = HDMI_MAIN_DIV_SHIFT, - .div_inf_shift = INF_MAIN_DIV_SHIFT, - .div_layer_shift = LAYER_MAIN_DIV_SHIFT, -}; - -static const struct zx_crtc_bits aux_crtc_bits = { - .polarity_mask = AUX_POL_MASK, - .polarity_shift = AUX_POL_SHIFT, - .int_frame_mask = TIMING_INT_AUX_FRAME, - .tc_enable = AUX_TC_EN, - .sec_vactive_shift = SEC_VACT_AUX_SHIFT, - .sec_vactive_mask = SEC_VACT_AUX_MASK, - .interlace_select = AUX_INTERLACE_SEL, - .pi_enable = AUX_PI_EN, - .div_vga_shift = VGA_AUX_DIV_SHIFT, - .div_pic_shift = PIC_AUX_DIV_SHIFT, - .div_tvenc_shift = TVENC_AUX_DIV_SHIFT, - .div_hdmi_pnx_shift = HDMI_AUX_PNX_DIV_SHIFT, - .div_hdmi_shift = HDMI_AUX_DIV_SHIFT, - .div_inf_shift = INF_AUX_DIV_SHIFT, - .div_layer_shift = LAYER_AUX_DIV_SHIFT, -}; - -struct zx_crtc { - struct drm_crtc crtc; - struct drm_plane *primary; - struct zx_vou_hw *vou; - void __iomem *chnreg; - void __iomem *chncsc; - void __iomem *dither; - const struct zx_crtc_regs *regs; - const struct zx_crtc_bits *bits; - enum vou_chn_type chn_type; - struct clk *pixclk; -}; - -#define to_zx_crtc(x) container_of(x, struct zx_crtc, crtc) - -struct vou_layer_bits { - u32 enable; - u32 chnsel; - u32 clksel; -}; - -static const struct vou_layer_bits zx_gl_bits[GL_NUM] = { - { - .enable = OSD_CTRL0_GL0_EN, - .chnsel = OSD_CTRL0_GL0_SEL, - .clksel = VOU_CLK_GL0_SEL, - }, { - .enable = OSD_CTRL0_GL1_EN, - .chnsel = OSD_CTRL0_GL1_SEL, - .clksel = VOU_CLK_GL1_SEL, - }, -}; - -static const struct vou_layer_bits zx_vl_bits[VL_NUM] = { - { - .enable = OSD_CTRL0_VL0_EN, - .chnsel = OSD_CTRL0_VL0_SEL, - .clksel = VOU_CLK_VL0_SEL, - }, { - .enable = OSD_CTRL0_VL1_EN, - .chnsel = OSD_CTRL0_VL1_SEL, - .clksel = VOU_CLK_VL1_SEL, - }, { - .enable = OSD_CTRL0_VL2_EN, - .chnsel = OSD_CTRL0_VL2_SEL, - .clksel = VOU_CLK_VL2_SEL, - }, -}; - -struct zx_vou_hw { - struct device *dev; - void __iomem *osd; - void __iomem *timing; - void __iomem *vouctl; - void __iomem *otfppu; - void __iomem *dtrc; - struct clk *axi_clk; - struct clk *ppu_clk; - struct clk *main_clk; - struct clk *aux_clk; - struct zx_crtc *main_crtc; - struct zx_crtc *aux_crtc; -}; - -enum vou_inf_data_sel { - VOU_YUV444 = 0, - VOU_RGB_101010 = 1, - VOU_RGB_888 = 2, - VOU_RGB_666 = 3, -}; - -struct vou_inf { - enum vou_inf_id id; - enum vou_inf_data_sel data_sel; - u32 clocks_en_bits; - u32 clocks_sel_bits; -}; - -static struct vou_inf vou_infs[] = { - [VOU_HDMI] = { - .data_sel = VOU_YUV444, - .clocks_en_bits = BIT(24) | BIT(18) | BIT(6), - .clocks_sel_bits = BIT(13) | BIT(2), - }, - [VOU_TV_ENC] = { - .data_sel = VOU_YUV444, - .clocks_en_bits = BIT(15), - .clocks_sel_bits = BIT(11) | BIT(0), - }, - [VOU_VGA] = { - .data_sel = VOU_RGB_888, - .clocks_en_bits = BIT(1), - .clocks_sel_bits = BIT(10), - }, -}; - -static inline struct zx_vou_hw *crtc_to_vou(struct drm_crtc *crtc) -{ - struct zx_crtc *zcrtc = to_zx_crtc(crtc); - - return zcrtc->vou; -} - -void vou_inf_hdmi_audio_sel(struct drm_crtc *crtc, - enum vou_inf_hdmi_audio aud) -{ - struct zx_crtc *zcrtc = to_zx_crtc(crtc); - struct zx_vou_hw *vou = zcrtc->vou; - - zx_writel_mask(vou->vouctl + VOU_INF_HDMI_CTRL, VOU_HDMI_AUD_MASK, aud); -} - -void vou_inf_enable(enum vou_inf_id id, struct drm_crtc *crtc) -{ - struct zx_crtc *zcrtc = to_zx_crtc(crtc); - struct zx_vou_hw *vou = zcrtc->vou; - struct vou_inf *inf = &vou_infs[id]; - void __iomem *dither = zcrtc->dither; - void __iomem *csc = zcrtc->chncsc; - bool is_main = zcrtc->chn_type == VOU_CHN_MAIN; - u32 data_sel_shift = id << 1; - - if (inf->data_sel != VOU_YUV444) { - /* Enable channel CSC for RGB output */ - zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK, - CSC_BT709_IMAGE_YCBCR2RGB << CSC_COV_MODE_SHIFT); - zx_writel_mask(csc + CSC_CTRL0, CSC_WORK_ENABLE, - CSC_WORK_ENABLE); - - /* Bypass Dither block for RGB output */ - zx_writel_mask(dither + OSD_DITHER_CTRL0, DITHER_BYSPASS, - DITHER_BYSPASS); - } else { - zx_writel_mask(csc + CSC_CTRL0, CSC_WORK_ENABLE, 0); - zx_writel_mask(dither + OSD_DITHER_CTRL0, DITHER_BYSPASS, 0); - } - - /* Select data format */ - zx_writel_mask(vou->vouctl + VOU_INF_DATA_SEL, 0x3 << data_sel_shift, - inf->data_sel << data_sel_shift); - - /* Select channel */ - zx_writel_mask(vou->vouctl + VOU_INF_CH_SEL, 0x1 << id, - zcrtc->chn_type << id); - - /* Select interface clocks */ - zx_writel_mask(vou->vouctl + VOU_CLK_SEL, inf->clocks_sel_bits, - is_main ? 0 : inf->clocks_sel_bits); - - /* Enable interface clocks */ - zx_writel_mask(vou->vouctl + VOU_CLK_EN, inf->clocks_en_bits, - inf->clocks_en_bits); - - /* Enable the device */ - zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << id, 1 << id); -} - -void vou_inf_disable(enum vou_inf_id id, struct drm_crtc *crtc) -{ - struct zx_vou_hw *vou = crtc_to_vou(crtc); - struct vou_inf *inf = &vou_infs[id]; - - /* Disable the device */ - zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << id, 0); - - /* Disable interface clocks */ - zx_writel_mask(vou->vouctl + VOU_CLK_EN, inf->clocks_en_bits, 0); -} - -void zx_vou_config_dividers(struct drm_crtc *crtc, - struct vou_div_config *configs, int num) -{ - struct zx_crtc *zcrtc = to_zx_crtc(crtc); - struct zx_vou_hw *vou = zcrtc->vou; - const struct zx_crtc_bits *bits = zcrtc->bits; - int i; - - /* Clear update flag bit */ - zx_writel_mask(vou->vouctl + VOU_DIV_PARA, DIV_PARA_UPDATE, 0); - - for (i = 0; i < num; i++) { - struct vou_div_config *cfg = configs + i; - u32 reg, shift; - - switch (cfg->id) { - case VOU_DIV_VGA: - reg = VOU_CLK_SEL; - shift = bits->div_vga_shift; - break; - case VOU_DIV_PIC: - reg = VOU_CLK_SEL; - shift = bits->div_pic_shift; - break; - case VOU_DIV_TVENC: - reg = VOU_DIV_PARA; - shift = bits->div_tvenc_shift; - break; - case VOU_DIV_HDMI_PNX: - reg = VOU_DIV_PARA; - shift = bits->div_hdmi_pnx_shift; - break; - case VOU_DIV_HDMI: - reg = VOU_DIV_PARA; - shift = bits->div_hdmi_shift; - break; - case VOU_DIV_INF: - reg = VOU_DIV_PARA; - shift = bits->div_inf_shift; - break; - case VOU_DIV_LAYER: - reg = VOU_DIV_PARA; - shift = bits->div_layer_shift; - break; - default: - continue; - } - - /* Each divider occupies 3 bits */ - zx_writel_mask(vou->vouctl + reg, 0x7 << shift, - cfg->val << shift); - } - - /* Set update flag bit to get dividers effected */ - zx_writel_mask(vou->vouctl + VOU_DIV_PARA, DIV_PARA_UPDATE, - DIV_PARA_UPDATE); -} - -static inline void vou_chn_set_update(struct zx_crtc *zcrtc) -{ - zx_writel(zcrtc->chnreg + CHN_UPDATE, 1); -} - -static void zx_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_atomic_state *state) -{ - struct drm_display_mode *mode = &crtc->state->adjusted_mode; - bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; - struct zx_crtc *zcrtc = to_zx_crtc(crtc); - struct zx_vou_hw *vou = zcrtc->vou; - const struct zx_crtc_regs *regs = zcrtc->regs; - const struct zx_crtc_bits *bits = zcrtc->bits; - struct videomode vm; - u32 scan_mask; - u32 pol = 0; - u32 val; - int ret; - - drm_display_mode_to_videomode(mode, &vm); - - /* Set up timing parameters */ - val = V_ACTIVE((interlaced ? vm.vactive / 2 : vm.vactive) - 1); - val |= H_ACTIVE(vm.hactive - 1); - zx_writel(vou->timing + regs->fir_active, val); - - val = SYNC_WIDE(vm.hsync_len - 1); - val |= BACK_PORCH(vm.hback_porch - 1); - val |= FRONT_PORCH(vm.hfront_porch - 1); - zx_writel(vou->timing + regs->fir_htiming, val); - - val = SYNC_WIDE(vm.vsync_len - 1); - val |= BACK_PORCH(vm.vback_porch - 1); - val |= FRONT_PORCH(vm.vfront_porch - 1); - zx_writel(vou->timing + regs->fir_vtiming, val); - - if (interlaced) { - u32 shift = bits->sec_vactive_shift; - u32 mask = bits->sec_vactive_mask; - - val = zx_readl(vou->timing + SEC_V_ACTIVE); - val &= ~mask; - val |= ((vm.vactive / 2 - 1) << shift) & mask; - zx_writel(vou->timing + SEC_V_ACTIVE, val); - - val = SYNC_WIDE(vm.vsync_len - 1); - /* - * The vback_porch for the second field needs to shift one on - * the value for the first field. - */ - val |= BACK_PORCH(vm.vback_porch); - val |= FRONT_PORCH(vm.vfront_porch - 1); - zx_writel(vou->timing + regs->sec_vtiming, val); - } - - /* Set up polarities */ - if (vm.flags & DISPLAY_FLAGS_VSYNC_LOW) - pol |= 1 << POL_VSYNC_SHIFT; - if (vm.flags & DISPLAY_FLAGS_HSYNC_LOW) - pol |= 1 << POL_HSYNC_SHIFT; - - zx_writel_mask(vou->timing + TIMING_CTRL, bits->polarity_mask, - pol << bits->polarity_shift); - - /* Setup SHIFT register by following what ZTE BSP does */ - val = H_SHIFT_VAL; - if (interlaced) - val |= V_SHIFT_VAL << 16; - zx_writel(vou->timing + regs->timing_shift, val); - zx_writel(vou->timing + regs->timing_pi_shift, H_PI_SHIFT_VAL); - - /* Progressive or interlace scan select */ - scan_mask = bits->interlace_select | bits->pi_enable; - zx_writel_mask(vou->timing + SCAN_CTRL, scan_mask, - interlaced ? scan_mask : 0); - - /* Enable TIMING_CTRL */ - zx_writel_mask(vou->timing + TIMING_TC_ENABLE, bits->tc_enable, - bits->tc_enable); - - /* Configure channel screen size */ - zx_writel_mask(zcrtc->chnreg + CHN_CTRL1, CHN_SCREEN_W_MASK, - vm.hactive << CHN_SCREEN_W_SHIFT); - zx_writel_mask(zcrtc->chnreg + CHN_CTRL1, CHN_SCREEN_H_MASK, - vm.vactive << CHN_SCREEN_H_SHIFT); - - /* Configure channel interlace buffer control */ - zx_writel_mask(zcrtc->chnreg + CHN_INTERLACE_BUF_CTRL, CHN_INTERLACE_EN, - interlaced ? CHN_INTERLACE_EN : 0); - - /* Update channel */ - vou_chn_set_update(zcrtc); - - /* Enable channel */ - zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, CHN_ENABLE); - - drm_crtc_vblank_on(crtc); - - ret = clk_set_rate(zcrtc->pixclk, mode->clock * 1000); - if (ret) { - DRM_DEV_ERROR(vou->dev, "failed to set pixclk rate: %d\n", ret); - return; - } - - ret = clk_prepare_enable(zcrtc->pixclk); - if (ret) - DRM_DEV_ERROR(vou->dev, "failed to enable pixclk: %d\n", ret); -} - -static void zx_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_atomic_state *state) -{ - struct zx_crtc *zcrtc = to_zx_crtc(crtc); - const struct zx_crtc_bits *bits = zcrtc->bits; - struct zx_vou_hw *vou = zcrtc->vou; - - clk_disable_unprepare(zcrtc->pixclk); - - drm_crtc_vblank_off(crtc); - - /* Disable channel */ - zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, 0); - - /* Disable TIMING_CTRL */ - zx_writel_mask(vou->timing + TIMING_TC_ENABLE, bits->tc_enable, 0); -} - -static void zx_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_atomic_state *state) -{ - struct drm_pending_vblank_event *event = crtc->state->event; - - if (!event) - return; - - crtc->state->event = NULL; - - spin_lock_irq(&crtc->dev->event_lock); - if (drm_crtc_vblank_get(crtc) == 0) - drm_crtc_arm_vblank_event(crtc, event); - else - drm_crtc_send_vblank_event(crtc, event); - spin_unlock_irq(&crtc->dev->event_lock); -} - -static const struct drm_crtc_helper_funcs zx_crtc_helper_funcs = { - .atomic_flush = zx_crtc_atomic_flush, - .atomic_enable = zx_crtc_atomic_enable, - .atomic_disable = zx_crtc_atomic_disable, -}; - -static int zx_vou_enable_vblank(struct drm_crtc *crtc) -{ - struct zx_crtc *zcrtc = to_zx_crtc(crtc); - struct zx_vou_hw *vou = crtc_to_vou(crtc); - u32 int_frame_mask = zcrtc->bits->int_frame_mask; - - zx_writel_mask(vou->timing + TIMING_INT_CTRL, int_frame_mask, - int_frame_mask); - - return 0; -} - -static void zx_vou_disable_vblank(struct drm_crtc *crtc) -{ - struct zx_crtc *zcrtc = to_zx_crtc(crtc); - struct zx_vou_hw *vou = crtc_to_vou(crtc); - - zx_writel_mask(vou->timing + TIMING_INT_CTRL, - zcrtc->bits->int_frame_mask, 0); -} - -static const struct drm_crtc_funcs zx_crtc_funcs = { - .destroy = drm_crtc_cleanup, - .set_config = drm_atomic_helper_set_config, - .page_flip = drm_atomic_helper_page_flip, - .reset = drm_atomic_helper_crtc_reset, - .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, - .enable_vblank = zx_vou_enable_vblank, - .disable_vblank = zx_vou_disable_vblank, -}; - -static int zx_crtc_init(struct drm_device *drm, struct zx_vou_hw *vou, - enum vou_chn_type chn_type) -{ - struct device *dev = vou->dev; - struct zx_plane *zplane; - struct zx_crtc *zcrtc; - int ret; - - zcrtc = devm_kzalloc(dev, sizeof(*zcrtc), GFP_KERNEL); - if (!zcrtc) - return -ENOMEM; - - zcrtc->vou = vou; - zcrtc->chn_type = chn_type; - - zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL); - if (!zplane) - return -ENOMEM; - - zplane->dev = dev; - - if (chn_type == VOU_CHN_MAIN) { - zplane->layer = vou->osd + MAIN_GL_OFFSET; - zplane->csc = vou->osd + MAIN_GL_CSC_OFFSET; - zplane->hbsc = vou->osd + MAIN_HBSC_OFFSET; - zplane->rsz = vou->otfppu + MAIN_RSZ_OFFSET; - zplane->bits = &zx_gl_bits[0]; - zcrtc->chnreg = vou->osd + OSD_MAIN_CHN; - zcrtc->chncsc = vou->osd + MAIN_CHN_CSC_OFFSET; - zcrtc->dither = vou->osd + MAIN_DITHER_OFFSET; - zcrtc->regs = &main_crtc_regs; - zcrtc->bits = &main_crtc_bits; - } else { - zplane->layer = vou->osd + AUX_GL_OFFSET; - zplane->csc = vou->osd + AUX_GL_CSC_OFFSET; - zplane->hbsc = vou->osd + AUX_HBSC_OFFSET; - zplane->rsz = vou->otfppu + AUX_RSZ_OFFSET; - zplane->bits = &zx_gl_bits[1]; - zcrtc->chnreg = vou->osd + OSD_AUX_CHN; - zcrtc->chncsc = vou->osd + AUX_CHN_CSC_OFFSET; - zcrtc->dither = vou->osd + AUX_DITHER_OFFSET; - zcrtc->regs = &aux_crtc_regs; - zcrtc->bits = &aux_crtc_bits; - } - - zcrtc->pixclk = devm_clk_get(dev, (chn_type == VOU_CHN_MAIN) ? - "main_wclk" : "aux_wclk"); - if (IS_ERR(zcrtc->pixclk)) { - ret = PTR_ERR(zcrtc->pixclk); - DRM_DEV_ERROR(dev, "failed to get pix clk: %d\n", ret); - return ret; - } - - ret = zx_plane_init(drm, zplane, DRM_PLANE_TYPE_PRIMARY); - if (ret) { - DRM_DEV_ERROR(dev, "failed to init primary plane: %d\n", ret); - return ret; - } - - zcrtc->primary = &zplane->plane; - - ret = drm_crtc_init_with_planes(drm, &zcrtc->crtc, zcrtc->primary, NULL, - &zx_crtc_funcs, NULL); - if (ret) { - DRM_DEV_ERROR(dev, "failed to init drm crtc: %d\n", ret); - return ret; - } - - drm_crtc_helper_add(&zcrtc->crtc, &zx_crtc_helper_funcs); - - if (chn_type == VOU_CHN_MAIN) - vou->main_crtc = zcrtc; - else - vou->aux_crtc = zcrtc; - - return 0; -} - -void zx_vou_layer_enable(struct drm_plane *plane) -{ - struct zx_crtc *zcrtc = to_zx_crtc(plane->state->crtc); - struct zx_vou_hw *vou = zcrtc->vou; - struct zx_plane *zplane = to_zx_plane(plane); - const struct vou_layer_bits *bits = zplane->bits; - - if (zcrtc->chn_type == VOU_CHN_MAIN) { - zx_writel_mask(vou->osd + OSD_CTRL0, bits->chnsel, 0); - zx_writel_mask(vou->vouctl + VOU_CLK_SEL, bits->clksel, 0); - } else { - zx_writel_mask(vou->osd + OSD_CTRL0, bits->chnsel, - bits->chnsel); - zx_writel_mask(vou->vouctl + VOU_CLK_SEL, bits->clksel, - bits->clksel); - } - - zx_writel_mask(vou->osd + OSD_CTRL0, bits->enable, bits->enable); -} - -void zx_vou_layer_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - struct zx_crtc *zcrtc = to_zx_crtc(old_state->crtc); - struct zx_vou_hw *vou = zcrtc->vou; - struct zx_plane *zplane = to_zx_plane(plane); - const struct vou_layer_bits *bits = zplane->bits; - - zx_writel_mask(vou->osd + OSD_CTRL0, bits->enable, 0); -} - -static void zx_overlay_init(struct drm_device *drm, struct zx_vou_hw *vou) -{ - struct device *dev = vou->dev; - struct zx_plane *zplane; - int i; - int ret; - - /* - * VL0 has some quirks on scaling support which need special handling. - * Let's leave it out for now. - */ - for (i = 1; i < VL_NUM; i++) { - zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL); - if (!zplane) { - DRM_DEV_ERROR(dev, "failed to allocate zplane %d\n", i); - return; - } - - zplane->layer = vou->osd + OSD_VL_OFFSET(i); - zplane->hbsc = vou->osd + HBSC_VL_OFFSET(i); - zplane->rsz = vou->otfppu + RSZ_VL_OFFSET(i); - zplane->bits = &zx_vl_bits[i]; - - ret = zx_plane_init(drm, zplane, DRM_PLANE_TYPE_OVERLAY); - if (ret) { - DRM_DEV_ERROR(dev, "failed to init overlay %d\n", i); - continue; - } - } -} - -static inline void zx_osd_int_update(struct zx_crtc *zcrtc) -{ - struct drm_crtc *crtc = &zcrtc->crtc; - struct drm_plane *plane; - - vou_chn_set_update(zcrtc); - - drm_for_each_plane_mask(plane, crtc->dev, crtc->state->plane_mask) - zx_plane_set_update(plane); -} - -static irqreturn_t vou_irq_handler(int irq, void *dev_id) -{ - struct zx_vou_hw *vou = dev_id; - u32 state; - - /* Handle TIMING_CTRL frame interrupts */ - state = zx_readl(vou->timing + TIMING_INT_STATE); - zx_writel(vou->timing + TIMING_INT_STATE, state); - - if (state & TIMING_INT_MAIN_FRAME) - drm_crtc_handle_vblank(&vou->main_crtc->crtc); - - if (state & TIMING_INT_AUX_FRAME) - drm_crtc_handle_vblank(&vou->aux_crtc->crtc); - - /* Handle OSD interrupts */ - state = zx_readl(vou->osd + OSD_INT_STA); - zx_writel(vou->osd + OSD_INT_CLRSTA, state); - - if (state & OSD_INT_MAIN_UPT) - zx_osd_int_update(vou->main_crtc); - - if (state & OSD_INT_AUX_UPT) - zx_osd_int_update(vou->aux_crtc); - - if (state & OSD_INT_ERROR) - DRM_DEV_ERROR(vou->dev, "OSD ERROR: 0x%08x!\n", state); - - return IRQ_HANDLED; -} - -static void vou_dtrc_init(struct zx_vou_hw *vou) -{ - /* Clear bit for bypass by ID */ - zx_writel_mask(vou->dtrc + DTRC_DETILE_CTRL, - TILE2RASTESCAN_BYPASS_MODE, 0); - - /* Select ARIDR mode */ - zx_writel_mask(vou->dtrc + DTRC_DETILE_CTRL, DETILE_ARIDR_MODE_MASK, - DETILE_ARID_IN_ARIDR); - - /* Bypass decompression for both frames */ - zx_writel_mask(vou->dtrc + DTRC_F0_CTRL, DTRC_DECOMPRESS_BYPASS, - DTRC_DECOMPRESS_BYPASS); - zx_writel_mask(vou->dtrc + DTRC_F1_CTRL, DTRC_DECOMPRESS_BYPASS, - DTRC_DECOMPRESS_BYPASS); - - /* Set up ARID register */ - zx_writel(vou->dtrc + DTRC_ARID, DTRC_ARID3(0xf) | DTRC_ARID2(0xe) | - DTRC_ARID1(0xf) | DTRC_ARID0(0xe)); -} - -static void vou_hw_init(struct zx_vou_hw *vou) -{ - /* Release reset for all VOU modules */ - zx_writel(vou->vouctl + VOU_SOFT_RST, ~0); - - /* Enable all VOU module clocks */ - zx_writel(vou->vouctl + VOU_CLK_EN, ~0); - - /* Clear both OSD and TIMING_CTRL interrupt state */ - zx_writel(vou->osd + OSD_INT_CLRSTA, ~0); - zx_writel(vou->timing + TIMING_INT_STATE, ~0); - - /* Enable OSD and TIMING_CTRL interrrupts */ - zx_writel(vou->osd + OSD_INT_MSK, OSD_INT_ENABLE); - zx_writel(vou->timing + TIMING_INT_CTRL, TIMING_INT_ENABLE); - - /* Select GPC as input to gl/vl scaler as a sane default setting */ - zx_writel(vou->otfppu + OTFPPU_RSZ_DATA_SOURCE, 0x2a); - - /* - * Needs to reset channel and layer logic per frame when frame starts - * to get VOU work properly. - */ - zx_writel_mask(vou->osd + OSD_RST_CLR, RST_PER_FRAME, RST_PER_FRAME); - - vou_dtrc_init(vou); -} - -static int zx_crtc_bind(struct device *dev, struct device *master, void *data) -{ - struct platform_device *pdev = to_platform_device(dev); - struct drm_device *drm = data; - struct zx_vou_hw *vou; - struct resource *res; - int irq; - int ret; - - vou = devm_kzalloc(dev, sizeof(*vou), GFP_KERNEL); - if (!vou) - return -ENOMEM; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "osd"); - vou->osd = devm_ioremap_resource(dev, res); - if (IS_ERR(vou->osd)) { - ret = PTR_ERR(vou->osd); - DRM_DEV_ERROR(dev, "failed to remap osd region: %d\n", ret); - return ret; - } - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "timing_ctrl"); - vou->timing = devm_ioremap_resource(dev, res); - if (IS_ERR(vou->timing)) { - ret = PTR_ERR(vou->timing); - DRM_DEV_ERROR(dev, "failed to remap timing_ctrl region: %d\n", - ret); - return ret; - } - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dtrc"); - vou->dtrc = devm_ioremap_resource(dev, res); - if (IS_ERR(vou->dtrc)) { - ret = PTR_ERR(vou->dtrc); - DRM_DEV_ERROR(dev, "failed to remap dtrc region: %d\n", ret); - return ret; - } - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vou_ctrl"); - vou->vouctl = devm_ioremap_resource(dev, res); - if (IS_ERR(vou->vouctl)) { - ret = PTR_ERR(vou->vouctl); - DRM_DEV_ERROR(dev, "failed to remap vou_ctrl region: %d\n", - ret); - return ret; - } - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otfppu"); - vou->otfppu = devm_ioremap_resource(dev, res); - if (IS_ERR(vou->otfppu)) { - ret = PTR_ERR(vou->otfppu); - DRM_DEV_ERROR(dev, "failed to remap otfppu region: %d\n", ret); - return ret; - } - - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; - - vou->axi_clk = devm_clk_get(dev, "aclk"); - if (IS_ERR(vou->axi_clk)) { - ret = PTR_ERR(vou->axi_clk); - DRM_DEV_ERROR(dev, "failed to get axi_clk: %d\n", ret); - return ret; - } - - vou->ppu_clk = devm_clk_get(dev, "ppu_wclk"); - if (IS_ERR(vou->ppu_clk)) { - ret = PTR_ERR(vou->ppu_clk); - DRM_DEV_ERROR(dev, "failed to get ppu_clk: %d\n", ret); - return ret; - } - - ret = clk_prepare_enable(vou->axi_clk); - if (ret) { - DRM_DEV_ERROR(dev, "failed to enable axi_clk: %d\n", ret); - return ret; - } - - clk_prepare_enable(vou->ppu_clk); - if (ret) { - DRM_DEV_ERROR(dev, "failed to enable ppu_clk: %d\n", ret); - goto disable_axi_clk; - } - - vou->dev = dev; - dev_set_drvdata(dev, vou); - - vou_hw_init(vou); - - ret = devm_request_irq(dev, irq, vou_irq_handler, 0, "zx_vou", vou); - if (ret < 0) { - DRM_DEV_ERROR(dev, "failed to request vou irq: %d\n", ret); - goto disable_ppu_clk; - } - - ret = zx_crtc_init(drm, vou, VOU_CHN_MAIN); - if (ret) { - DRM_DEV_ERROR(dev, "failed to init main channel crtc: %d\n", - ret); - goto disable_ppu_clk; - } - - ret = zx_crtc_init(drm, vou, VOU_CHN_AUX); - if (ret) { - DRM_DEV_ERROR(dev, "failed to init aux channel crtc: %d\n", - ret); - goto disable_ppu_clk; - } - - zx_overlay_init(drm, vou); - - return 0; - -disable_ppu_clk: - clk_disable_unprepare(vou->ppu_clk); -disable_axi_clk: - clk_disable_unprepare(vou->axi_clk); - return ret; -} - -static void zx_crtc_unbind(struct device *dev, struct device *master, - void *data) -{ - struct zx_vou_hw *vou = dev_get_drvdata(dev); - - clk_disable_unprepare(vou->axi_clk); - clk_disable_unprepare(vou->ppu_clk); -} - -static const struct component_ops zx_crtc_component_ops = { - .bind = zx_crtc_bind, - .unbind = zx_crtc_unbind, -}; - -static int zx_crtc_probe(struct platform_device *pdev) -{ - return component_add(&pdev->dev, &zx_crtc_component_ops); -} - -static int zx_crtc_remove(struct platform_device *pdev) -{ - component_del(&pdev->dev, &zx_crtc_component_ops); - return 0; -} - -static const struct of_device_id zx_crtc_of_match[] = { - { .compatible = "zte,zx296718-dpc", }, - { /* end */ }, -}; -MODULE_DEVICE_TABLE(of, zx_crtc_of_match); - -struct platform_driver zx_crtc_driver = { - .probe = zx_crtc_probe, - .remove = zx_crtc_remove, - .driver = { - .name = "zx-crtc", - .of_match_table = zx_crtc_of_match, - }, -}; diff --git a/drivers/gpu/drm/zte/zx_vou.h b/drivers/gpu/drm/zte/zx_vou.h deleted file mode 100644 index b25f34f865ae..000000000000 --- a/drivers/gpu/drm/zte/zx_vou.h +++ /dev/null @@ -1,64 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright 2016 Linaro Ltd. - * Copyright 2016 ZTE Corporation. - */ - -#ifndef __ZX_VOU_H__ -#define __ZX_VOU_H__ - -#define VOU_CRTC_MASK 0x3 - -/* VOU output interfaces */ -enum vou_inf_id { - VOU_HDMI = 0, - VOU_RGB_LCD = 1, - VOU_TV_ENC = 2, - VOU_MIPI_DSI = 3, - VOU_LVDS = 4, - VOU_VGA = 5, -}; - -enum vou_inf_hdmi_audio { - VOU_HDMI_AUD_SPDIF = BIT(0), - VOU_HDMI_AUD_I2S = BIT(1), - VOU_HDMI_AUD_DSD = BIT(2), - VOU_HDMI_AUD_HBR = BIT(3), - VOU_HDMI_AUD_PARALLEL = BIT(4), -}; - -void vou_inf_hdmi_audio_sel(struct drm_crtc *crtc, - enum vou_inf_hdmi_audio aud); -void vou_inf_enable(enum vou_inf_id id, struct drm_crtc *crtc); -void vou_inf_disable(enum vou_inf_id id, struct drm_crtc *crtc); - -enum vou_div_id { - VOU_DIV_VGA, - VOU_DIV_PIC, - VOU_DIV_TVENC, - VOU_DIV_HDMI_PNX, - VOU_DIV_HDMI, - VOU_DIV_INF, - VOU_DIV_LAYER, -}; - -enum vou_div_val { - VOU_DIV_1 = 0, - VOU_DIV_2 = 1, - VOU_DIV_4 = 3, - VOU_DIV_8 = 7, -}; - -struct vou_div_config { - enum vou_div_id id; - enum vou_div_val val; -}; - -void zx_vou_config_dividers(struct drm_crtc *crtc, - struct vou_div_config *configs, int num); - -void zx_vou_layer_enable(struct drm_plane *plane); -void zx_vou_layer_disable(struct drm_plane *plane, - struct drm_plane_state *old_state); - -#endif /* __ZX_VOU_H__ */ diff --git a/drivers/gpu/drm/zte/zx_vou_regs.h b/drivers/gpu/drm/zte/zx_vou_regs.h deleted file mode 100644 index 2ddb199cb912..000000000000 --- a/drivers/gpu/drm/zte/zx_vou_regs.h +++ /dev/null @@ -1,212 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright 2016 Linaro Ltd. - * Copyright 2016 ZTE Corporation. - */ - -#ifndef __ZX_VOU_REGS_H__ -#define __ZX_VOU_REGS_H__ - -/* Sub-module offset */ -#define MAIN_GL_OFFSET 0x130 -#define MAIN_GL_CSC_OFFSET 0x580 -#define MAIN_CHN_CSC_OFFSET 0x6c0 -#define MAIN_HBSC_OFFSET 0x820 -#define MAIN_DITHER_OFFSET 0x960 -#define MAIN_RSZ_OFFSET 0x600 /* OTFPPU sub-module */ - -#define AUX_GL_OFFSET 0x200 -#define AUX_GL_CSC_OFFSET 0x5d0 -#define AUX_CHN_CSC_OFFSET 0x710 -#define AUX_HBSC_OFFSET 0x860 -#define AUX_DITHER_OFFSET 0x970 -#define AUX_RSZ_OFFSET 0x800 - -#define OSD_VL0_OFFSET 0x040 -#define OSD_VL_OFFSET(i) (OSD_VL0_OFFSET + 0x050 * (i)) - -#define HBSC_VL0_OFFSET 0x760 -#define HBSC_VL_OFFSET(i) (HBSC_VL0_OFFSET + 0x040 * (i)) - -#define RSZ_VL1_U0 0xa00 -#define RSZ_VL_OFFSET(i) (RSZ_VL1_U0 + 0x200 * (i)) - -/* OSD (GPC_GLOBAL) registers */ -#define OSD_INT_STA 0x04 -#define OSD_INT_CLRSTA 0x08 -#define OSD_INT_MSK 0x0c -#define OSD_INT_AUX_UPT BIT(14) -#define OSD_INT_MAIN_UPT BIT(13) -#define OSD_INT_GL1_LBW BIT(10) -#define OSD_INT_GL0_LBW BIT(9) -#define OSD_INT_VL2_LBW BIT(8) -#define OSD_INT_VL1_LBW BIT(7) -#define OSD_INT_VL0_LBW BIT(6) -#define OSD_INT_BUS_ERR BIT(3) -#define OSD_INT_CFG_ERR BIT(2) -#define OSD_INT_ERROR (\ - OSD_INT_GL1_LBW | OSD_INT_GL0_LBW | \ - OSD_INT_VL2_LBW | OSD_INT_VL1_LBW | OSD_INT_VL0_LBW | \ - OSD_INT_BUS_ERR | OSD_INT_CFG_ERR \ -) -#define OSD_INT_ENABLE (OSD_INT_ERROR | OSD_INT_AUX_UPT | OSD_INT_MAIN_UPT) -#define OSD_CTRL0 0x10 -#define OSD_CTRL0_VL0_EN BIT(13) -#define OSD_CTRL0_VL0_SEL BIT(12) -#define OSD_CTRL0_VL1_EN BIT(11) -#define OSD_CTRL0_VL1_SEL BIT(10) -#define OSD_CTRL0_VL2_EN BIT(9) -#define OSD_CTRL0_VL2_SEL BIT(8) -#define OSD_CTRL0_GL0_EN BIT(7) -#define OSD_CTRL0_GL0_SEL BIT(6) -#define OSD_CTRL0_GL1_EN BIT(5) -#define OSD_CTRL0_GL1_SEL BIT(4) -#define OSD_RST_CLR 0x1c -#define RST_PER_FRAME BIT(19) - -/* Main/Aux channel registers */ -#define OSD_MAIN_CHN 0x470 -#define OSD_AUX_CHN 0x4d0 -#define CHN_CTRL0 0x00 -#define CHN_ENABLE BIT(0) -#define CHN_CTRL1 0x04 -#define CHN_SCREEN_W_SHIFT 18 -#define CHN_SCREEN_W_MASK (0x1fff << CHN_SCREEN_W_SHIFT) -#define CHN_SCREEN_H_SHIFT 5 -#define CHN_SCREEN_H_MASK (0x1fff << CHN_SCREEN_H_SHIFT) -#define CHN_UPDATE 0x08 -#define CHN_INTERLACE_BUF_CTRL 0x24 -#define CHN_INTERLACE_EN BIT(2) - -/* Dither registers */ -#define OSD_DITHER_CTRL0 0x00 -#define DITHER_BYSPASS BIT(31) - -/* TIMING_CTRL registers */ -#define TIMING_TC_ENABLE 0x04 -#define AUX_TC_EN BIT(1) -#define MAIN_TC_EN BIT(0) -#define FIR_MAIN_ACTIVE 0x08 -#define FIR_AUX_ACTIVE 0x0c -#define V_ACTIVE_SHIFT 16 -#define V_ACTIVE_MASK (0xffff << V_ACTIVE_SHIFT) -#define H_ACTIVE_SHIFT 0 -#define H_ACTIVE_MASK (0xffff << H_ACTIVE_SHIFT) -#define FIR_MAIN_H_TIMING 0x10 -#define FIR_MAIN_V_TIMING 0x14 -#define FIR_AUX_H_TIMING 0x18 -#define FIR_AUX_V_TIMING 0x1c -#define SYNC_WIDE_SHIFT 22 -#define SYNC_WIDE_MASK (0x3ff << SYNC_WIDE_SHIFT) -#define BACK_PORCH_SHIFT 11 -#define BACK_PORCH_MASK (0x7ff << BACK_PORCH_SHIFT) -#define FRONT_PORCH_SHIFT 0 -#define FRONT_PORCH_MASK (0x7ff << FRONT_PORCH_SHIFT) -#define TIMING_CTRL 0x20 -#define AUX_POL_SHIFT 3 -#define AUX_POL_MASK (0x7 << AUX_POL_SHIFT) -#define MAIN_POL_SHIFT 0 -#define MAIN_POL_MASK (0x7 << MAIN_POL_SHIFT) -#define POL_DE_SHIFT 2 -#define POL_VSYNC_SHIFT 1 -#define POL_HSYNC_SHIFT 0 -#define TIMING_INT_CTRL 0x24 -#define TIMING_INT_STATE 0x28 -#define TIMING_INT_AUX_FRAME BIT(3) -#define TIMING_INT_MAIN_FRAME BIT(1) -#define TIMING_INT_AUX_FRAME_SEL_VSW (0x2 << 10) -#define TIMING_INT_MAIN_FRAME_SEL_VSW (0x2 << 6) -#define TIMING_INT_ENABLE (\ - TIMING_INT_MAIN_FRAME_SEL_VSW | TIMING_INT_AUX_FRAME_SEL_VSW | \ - TIMING_INT_MAIN_FRAME | TIMING_INT_AUX_FRAME \ -) -#define TIMING_MAIN_SHIFT 0x2c -#define TIMING_AUX_SHIFT 0x30 -#define H_SHIFT_VAL 0x0048 -#define V_SHIFT_VAL 0x0001 -#define SCAN_CTRL 0x34 -#define AUX_PI_EN BIT(19) -#define MAIN_PI_EN BIT(18) -#define AUX_INTERLACE_SEL BIT(1) -#define MAIN_INTERLACE_SEL BIT(0) -#define SEC_V_ACTIVE 0x38 -#define SEC_VACT_MAIN_SHIFT 0 -#define SEC_VACT_MAIN_MASK (0xffff << SEC_VACT_MAIN_SHIFT) -#define SEC_VACT_AUX_SHIFT 16 -#define SEC_VACT_AUX_MASK (0xffff << SEC_VACT_AUX_SHIFT) -#define SEC_MAIN_V_TIMING 0x3c -#define SEC_AUX_V_TIMING 0x40 -#define TIMING_MAIN_PI_SHIFT 0x68 -#define TIMING_AUX_PI_SHIFT 0x6c -#define H_PI_SHIFT_VAL 0x000f - -#define V_ACTIVE(x) (((x) << V_ACTIVE_SHIFT) & V_ACTIVE_MASK) -#define H_ACTIVE(x) (((x) << H_ACTIVE_SHIFT) & H_ACTIVE_MASK) - -#define SYNC_WIDE(x) (((x) << SYNC_WIDE_SHIFT) & SYNC_WIDE_MASK) -#define BACK_PORCH(x) (((x) << BACK_PORCH_SHIFT) & BACK_PORCH_MASK) -#define FRONT_PORCH(x) (((x) << FRONT_PORCH_SHIFT) & FRONT_PORCH_MASK) - -/* DTRC registers */ -#define DTRC_F0_CTRL 0x2c -#define DTRC_F1_CTRL 0x5c -#define DTRC_DECOMPRESS_BYPASS BIT(17) -#define DTRC_DETILE_CTRL 0x68 -#define TILE2RASTESCAN_BYPASS_MODE BIT(30) -#define DETILE_ARIDR_MODE_MASK (0x3 << 0) -#define DETILE_ARID_ALL 0 -#define DETILE_ARID_IN_ARIDR 1 -#define DETILE_ARID_BYP_BUT_ARIDR 2 -#define DETILE_ARID_IN_ARIDR2 3 -#define DTRC_ARID 0x6c -#define DTRC_ARID3_SHIFT 24 -#define DTRC_ARID3_MASK (0xff << DTRC_ARID3_SHIFT) -#define DTRC_ARID2_SHIFT 16 -#define DTRC_ARID2_MASK (0xff << DTRC_ARID2_SHIFT) -#define DTRC_ARID1_SHIFT 8 -#define DTRC_ARID1_MASK (0xff << DTRC_ARID1_SHIFT) -#define DTRC_ARID0_SHIFT 0 -#define DTRC_ARID0_MASK (0xff << DTRC_ARID0_SHIFT) -#define DTRC_DEC2DDR_ARID 0x70 - -#define DTRC_ARID3(x) (((x) << DTRC_ARID3_SHIFT) & DTRC_ARID3_MASK) -#define DTRC_ARID2(x) (((x) << DTRC_ARID2_SHIFT) & DTRC_ARID2_MASK) -#define DTRC_ARID1(x) (((x) << DTRC_ARID1_SHIFT) & DTRC_ARID1_MASK) -#define DTRC_ARID0(x) (((x) << DTRC_ARID0_SHIFT) & DTRC_ARID0_MASK) - -/* VOU_CTRL registers */ -#define VOU_INF_EN 0x00 -#define VOU_INF_CH_SEL 0x04 -#define VOU_INF_DATA_SEL 0x08 -#define VOU_SOFT_RST 0x14 -#define VOU_CLK_SEL 0x18 -#define VGA_AUX_DIV_SHIFT 29 -#define VGA_MAIN_DIV_SHIFT 26 -#define PIC_MAIN_DIV_SHIFT 23 -#define PIC_AUX_DIV_SHIFT 20 -#define VOU_CLK_VL2_SEL BIT(8) -#define VOU_CLK_VL1_SEL BIT(7) -#define VOU_CLK_VL0_SEL BIT(6) -#define VOU_CLK_GL1_SEL BIT(5) -#define VOU_CLK_GL0_SEL BIT(4) -#define VOU_DIV_PARA 0x1c -#define DIV_PARA_UPDATE BIT(31) -#define TVENC_AUX_DIV_SHIFT 28 -#define HDMI_AUX_PNX_DIV_SHIFT 25 -#define HDMI_MAIN_PNX_DIV_SHIFT 22 -#define HDMI_AUX_DIV_SHIFT 19 -#define HDMI_MAIN_DIV_SHIFT 16 -#define TVENC_MAIN_DIV_SHIFT 13 -#define INF_AUX_DIV_SHIFT 9 -#define INF_MAIN_DIV_SHIFT 6 -#define LAYER_AUX_DIV_SHIFT 3 -#define LAYER_MAIN_DIV_SHIFT 0 -#define VOU_CLK_REQEN 0x20 -#define VOU_CLK_EN 0x24 -#define VOU_INF_HDMI_CTRL 0x30 -#define VOU_HDMI_AUD_MASK 0x1f - -/* OTFPPU_CTRL registers */ -#define OTFPPU_RSZ_DATA_SOURCE 0x04 - -#endif /* __ZX_VOU_REGS_H__ */ diff --git a/drivers/usb/typec/altmodes/Kconfig b/drivers/usb/typec/altmodes/Kconfig index 60d375e9c3c7..1a6b5e872b0d 100644 --- a/drivers/usb/typec/altmodes/Kconfig +++ b/drivers/usb/typec/altmodes/Kconfig @@ -4,6 +4,7 @@ menu "USB Type-C Alternate Mode drivers" config TYPEC_DP_ALTMODE tristate "DisplayPort Alternate Mode driver" + depends on DRM help DisplayPort USB Type-C Alternate Mode allows DisplayPort displays and adapters to be attached to the USB Type-C diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c index b7f094435b00..c1d8c23baa39 100644 --- a/drivers/usb/typec/altmodes/displayport.c +++ b/drivers/usb/typec/altmodes/displayport.c @@ -11,8 +11,10 @@ #include <linux/delay.h> #include <linux/mutex.h> #include <linux/module.h> +#include <linux/property.h> #include <linux/usb/pd_vdo.h> #include <linux/usb/typec_dp.h> +#include <drm/drm_connector.h> #include "displayport.h" #define DP_HEADER(_dp, ver, cmd) (VDO((_dp)->alt->svid, 1, ver, cmd) \ @@ -57,19 +59,28 @@ struct dp_altmode { struct typec_displayport_data data; enum dp_state state; + bool hpd; struct mutex lock; /* device lock */ struct work_struct work; struct typec_altmode *alt; const struct typec_altmode *port; + struct fwnode_handle *connector_fwnode; }; static int dp_altmode_notify(struct dp_altmode *dp) { - u8 state = get_count_order(DP_CONF_GET_PIN_ASSIGN(dp->data.conf)); + unsigned long conf; + u8 state; + + if (dp->data.conf) { + state = get_count_order(DP_CONF_GET_PIN_ASSIGN(dp->data.conf)); + conf = TYPEC_MODAL_STATE(state); + } else { + conf = TYPEC_STATE_USB; + } - return typec_altmode_notify(dp->alt, TYPEC_MODAL_STATE(state), - &dp->data); + return typec_altmode_notify(dp->alt, conf, &dp->data); } static int dp_altmode_configure(struct dp_altmode *dp, u8 con) @@ -118,6 +129,7 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con) static int dp_altmode_status_update(struct dp_altmode *dp) { bool configured = !!DP_CONF_GET_PIN_ASSIGN(dp->data.conf); + bool hpd = !!(dp->data.status & DP_STATUS_HPD_STATE); u8 con = DP_STATUS_CONNECTION(dp->data.status); int ret = 0; @@ -130,6 +142,11 @@ static int dp_altmode_status_update(struct dp_altmode *dp) ret = dp_altmode_configure(dp, con); if (!ret) dp->state = DP_STATE_CONFIGURE; + } else { + if (dp->hpd != hpd) { + drm_connector_oob_hotplug_event(dp->connector_fwnode); + dp->hpd = hpd; + } } return ret; @@ -137,21 +154,10 @@ static int dp_altmode_status_update(struct dp_altmode *dp) static int dp_altmode_configured(struct dp_altmode *dp) { - int ret; - sysfs_notify(&dp->alt->dev.kobj, "displayport", "configuration"); - - if (!dp->data.conf) - return typec_altmode_notify(dp->alt, TYPEC_STATE_USB, - &dp->data); - - ret = dp_altmode_notify(dp); - if (ret) - return ret; - sysfs_notify(&dp->alt->dev.kobj, "displayport", "pin_assignment"); - return 0; + return dp_altmode_notify(dp); } static int dp_altmode_configure_vdm(struct dp_altmode *dp, u32 conf) @@ -172,13 +178,8 @@ static int dp_altmode_configure_vdm(struct dp_altmode *dp, u32 conf) } ret = typec_altmode_vdm(dp->alt, header, &conf, 2); - if (ret) { - if (DP_CONF_GET_PIN_ASSIGN(dp->data.conf)) - dp_altmode_notify(dp); - else - typec_altmode_notify(dp->alt, TYPEC_STATE_USB, - &dp->data); - } + if (ret) + dp_altmode_notify(dp); return ret; } @@ -521,6 +522,7 @@ static const struct attribute_group dp_altmode_group = { int dp_altmode_probe(struct typec_altmode *alt) { const struct typec_altmode *port = typec_altmode_get_partner(alt); + struct fwnode_handle *fwnode; struct dp_altmode *dp; int ret; @@ -549,6 +551,11 @@ int dp_altmode_probe(struct typec_altmode *alt) alt->desc = "DisplayPort"; alt->ops = &dp_altmode_ops; + fwnode = dev_fwnode(alt->dev.parent->parent); /* typec_port fwnode */ + dp->connector_fwnode = fwnode_find_reference(fwnode, "displayport", 0); + if (IS_ERR(dp->connector_fwnode)) + dp->connector_fwnode = NULL; + typec_altmode_set_drvdata(alt, dp); dp->state = DP_STATE_ENTER; @@ -564,6 +571,13 @@ void dp_altmode_remove(struct typec_altmode *alt) sysfs_remove_group(&alt->dev.kobj, &dp_altmode_group); cancel_work_sync(&dp->work); + + if (dp->connector_fwnode) { + if (dp->hpd) + drm_connector_oob_hotplug_event(dp->connector_fwnode); + + fwnode_handle_put(dp->connector_fwnode); + } } EXPORT_SYMBOL_GPL(dp_altmode_remove); diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 1647960c9e50..79fa34e5ccdb 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -1084,6 +1084,14 @@ struct drm_connector_funcs { */ void (*atomic_print_state)(struct drm_printer *p, const struct drm_connector_state *state); + + /** + * @oob_hotplug_event: + * + * This will get called when a hotplug-event for a drm-connector + * has been received from a source outside the display driver / device. + */ + void (*oob_hotplug_event)(struct drm_connector *connector); }; /** @@ -1228,6 +1236,14 @@ struct drm_connector { struct device *kdev; /** @attr: sysfs attributes */ struct device_attribute *attr; + /** + * @fwnode: associated fwnode supplied by platform firmware + * + * Drivers can set this to associate a fwnode with a connector, drivers + * are expected to get a reference on the fwnode when setting this. + * drm_connector_cleanup() will call fwnode_handle_put() on this. + */ + struct fwnode_handle *fwnode; /** * @head: @@ -1239,6 +1255,14 @@ struct drm_connector { */ struct list_head head; + /** + * @global_connector_list_entry: + * + * Connector entry in the global connector-list, used by + * drm_connector_find_by_fwnode(). + */ + struct list_head global_connector_list_entry; + /** @base: base KMS object */ struct drm_mode_object base; @@ -1650,6 +1674,7 @@ drm_connector_is_unregistered(struct drm_connector *connector) DRM_CONNECTOR_UNREGISTERED; } +void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode); const char *drm_get_connector_type_name(unsigned int connector_type); const char *drm_get_connector_status_name(enum drm_connector_status status); const char *drm_get_subpixel_order_name(enum subpixel_order order); diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h index afb27cb6a7bd..6ed61c371f6c 100644 --- a/include/drm/drm_ioctl.h +++ b/include/drm/drm_ioctl.h @@ -167,7 +167,6 @@ struct drm_ioctl_desc { .name = #ioctl \ } -int drm_ioctl_permit(u32 flags, struct drm_file *file_priv); long drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); long drm_ioctl_kernel(struct file *, drm_ioctl_t, void *, u32); #ifdef CONFIG_COMPAT diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 1ddf7783fdf7..48b7de80daf5 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -103,14 +103,13 @@ struct drm_mode_config_funcs { * Callback used by helpers to inform the driver of output configuration * changes. * - * Drivers implementing fbdev emulation with the helpers can call - * drm_fb_helper_hotplug_changed from this hook to inform the fbdev - * helper of output changes. + * Drivers implementing fbdev emulation use drm_kms_helper_hotplug_event() + * to call this hook to inform the fbdev helper of output changes. * - * FIXME: - * - * Except that there's no vtable for device-level helper callbacks - * there's no reason this is a core function. + * This hook is deprecated, drivers should instead use + * drm_fbdev_generic_setup() which takes care of any necessary + * hotplug event forwarding already without further involvement by + * the driver. */ void (*output_poll_changed)(struct drm_device *dev); diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index fed97e35626f..0c1102dc4d88 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -43,7 +43,7 @@ enum drm_scaling_filter { /** * struct drm_plane_state - mutable plane state * - * Please not that the destination coordinates @crtc_x, @crtc_y, @crtc_h and + * Please note that the destination coordinates @crtc_x, @crtc_y, @crtc_h and * @crtc_w and the source coordinates @src_x, @src_y, @src_h and @src_w are the * raw coordinates provided by userspace. Drivers should use * drm_atomic_helper_check_plane_state() and only use the derived rectangles in diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 88ae7f331bb1..f011e4c407f2 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -27,9 +27,12 @@ #include <drm/spsc_queue.h> #include <linux/dma-fence.h> #include <linux/completion.h> +#include <linux/xarray.h> #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) +struct drm_gem_object; + struct drm_gpu_scheduler; struct drm_sched_rq; @@ -50,56 +53,147 @@ enum drm_sched_priority { * struct drm_sched_entity - A wrapper around a job queue (typically * attached to the DRM file_priv). * - * @list: used to append this struct to the list of entities in the - * runqueue. - * @rq: runqueue on which this entity is currently scheduled. - * @sched_list: A list of schedulers (drm_gpu_schedulers). - * Jobs from this entity can be scheduled on any scheduler - * on this list. - * @num_sched_list: number of drm_gpu_schedulers in the sched_list. - * @priority: priority of the entity - * @rq_lock: lock to modify the runqueue to which this entity belongs. - * @job_queue: the list of jobs of this entity. - * @fence_seq: a linearly increasing seqno incremented with each - * new &drm_sched_fence which is part of the entity. - * @fence_context: a unique context for all the fences which belong - * to this entity. - * The &drm_sched_fence.scheduled uses the - * fence_context but &drm_sched_fence.finished uses - * fence_context + 1. - * @dependency: the dependency fence of the job which is on the top - * of the job queue. - * @cb: callback for the dependency fence above. - * @guilty: points to ctx's guilty. - * @fini_status: contains the exit status in case the process was signalled. - * @last_scheduled: points to the finished fence of the last scheduled job. - * @last_user: last group leader pushing a job into the entity. - * @stopped: Marks the enity as removed from rq and destined for termination. - * @entity_idle: Signals when enityt is not in use - * * Entities will emit jobs in order to their corresponding hardware * ring, and the scheduler will alternate between entities based on * scheduling policy. */ struct drm_sched_entity { + /** + * @list: + * + * Used to append this struct to the list of entities in the runqueue + * @rq under &drm_sched_rq.entities. + * + * Protected by &drm_sched_rq.lock of @rq. + */ struct list_head list; + + /** + * @rq: + * + * Runqueue on which this entity is currently scheduled. + * + * FIXME: Locking is very unclear for this. Writers are protected by + * @rq_lock, but readers are generally lockless and seem to just race + * with not even a READ_ONCE. + */ struct drm_sched_rq *rq; + + /** + * @sched_list: + * + * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can + * be scheduled on any scheduler on this list. + * + * This can be modified by calling drm_sched_entity_modify_sched(). + * Locking is entirely up to the driver, see the above function for more + * details. + * + * This will be set to NULL if &num_sched_list equals 1 and @rq has been + * set already. + * + * FIXME: This means priority changes through + * drm_sched_entity_set_priority() will be lost henceforth in this case. + */ struct drm_gpu_scheduler **sched_list; + + /** + * @num_sched_list: + * + * Number of drm_gpu_schedulers in the @sched_list. + */ unsigned int num_sched_list; + + /** + * @priority: + * + * Priority of the entity. This can be modified by calling + * drm_sched_entity_set_priority(). Protected by &rq_lock. + */ enum drm_sched_priority priority; + + /** + * @rq_lock: + * + * Lock to modify the runqueue to which this entity belongs. + */ spinlock_t rq_lock; + /** + * @job_queue: the list of jobs of this entity. + */ struct spsc_queue job_queue; + /** + * @fence_seq: + * + * A linearly increasing seqno incremented with each new + * &drm_sched_fence which is part of the entity. + * + * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking, + * this doesn't need to be atomic. + */ atomic_t fence_seq; + + /** + * @fence_context: + * + * A unique context for all the fences which belong to this entity. The + * &drm_sched_fence.scheduled uses the fence_context but + * &drm_sched_fence.finished uses fence_context + 1. + */ uint64_t fence_context; + /** + * @dependency: + * + * The dependency fence of the job which is on the top of the job queue. + */ struct dma_fence *dependency; + + /** + * @cb: + * + * Callback for the dependency fence above. + */ struct dma_fence_cb cb; + + /** + * @guilty: + * + * Points to entities' guilty. + */ atomic_t *guilty; + + /** + * @last_scheduled: + * + * Points to the finished fence of the last scheduled job. Only written + * by the scheduler thread, can be accessed locklessly from + * drm_sched_job_arm() iff the queue is empty. + */ struct dma_fence *last_scheduled; + + /** + * @last_user: last group leader pushing a job into the entity. + */ struct task_struct *last_user; + + /** + * @stopped: + * + * Marks the enity as removed from rq and destined for + * termination. This is set by calling drm_sched_entity_flush() and by + * drm_sched_fini(). + */ bool stopped; + + /** + * @entity_idle: + * + * Signals when entity is not in use, used to sequence entity cleanup in + * drm_sched_entity_fini(). + */ struct completion entity_idle; }; @@ -198,6 +292,17 @@ struct drm_sched_job { enum drm_sched_priority s_priority; struct drm_sched_entity *entity; struct dma_fence_cb cb; + /** + * @dependencies: + * + * Contains the dependencies as struct dma_fence for this job, see + * drm_sched_job_add_dependency() and + * drm_sched_job_add_implicit_dependencies(). + */ + struct xarray dependencies; + + /** @last_dependency: tracks @dependencies as they signal */ + unsigned long last_dependency; }; static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, @@ -220,9 +325,15 @@ enum drm_gpu_sched_stat { */ struct drm_sched_backend_ops { /** - * @dependency: Called when the scheduler is considering scheduling - * this job next, to get another struct dma_fence for this job to - * block on. Once it returns NULL, run_job() may be called. + * @dependency: + * + * Called when the scheduler is considering scheduling this job next, to + * get another struct dma_fence for this job to block on. Once it + * returns NULL, run_job() may be called. + * + * If a driver exclusively uses drm_sched_job_add_dependency() and + * drm_sched_job_add_implicit_dependencies() this can be ommitted and + * left as NULL. */ struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, struct drm_sched_entity *s_entity); @@ -348,6 +459,14 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched); int drm_sched_job_init(struct drm_sched_job *job, struct drm_sched_entity *entity, void *owner); +void drm_sched_job_arm(struct drm_sched_job *job); +int drm_sched_job_add_dependency(struct drm_sched_job *job, + struct dma_fence *fence); +int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, + struct drm_gem_object *obj, + bool write); + + void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, struct drm_gpu_scheduler **sched_list, unsigned int num_sched_list); @@ -381,14 +500,17 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity); void drm_sched_entity_destroy(struct drm_sched_entity *entity); void drm_sched_entity_select_rq(struct drm_sched_entity *entity); struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); -void drm_sched_entity_push_job(struct drm_sched_job *sched_job, - struct drm_sched_entity *entity); +void drm_sched_entity_push_job(struct drm_sched_job *sched_job); void drm_sched_entity_set_priority(struct drm_sched_entity *entity, enum drm_sched_priority priority); bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); -struct drm_sched_fence *drm_sched_fence_create( +struct drm_sched_fence *drm_sched_fence_alloc( struct drm_sched_entity *s_entity, void *owner); +void drm_sched_fence_init(struct drm_sched_fence *fence, + struct drm_sched_entity *entity); +void drm_sched_fence_free(struct drm_sched_fence *fence); + void drm_sched_fence_scheduled(struct drm_sched_fence *fence); void drm_sched_fence_finished(struct drm_sched_fence *fence); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index f681bbdbc698..76d7c33884da 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -265,18 +265,6 @@ static inline int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_opera } /** - * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo - * - * @placement: Return immediately if buffer is busy. - * @mem: The struct ttm_resource indicating the region where the bo resides - * @new_flags: Describes compatible placement found - * - * Returns true if the placement is compatible - */ -bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_resource *mem, - uint32_t *new_flags); - -/** * ttm_bo_validate * * @bo: The buffer object. diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h index 3c9dd65f5aaf..235a743d90e1 100644 --- a/include/drm/ttm/ttm_caching.h +++ b/include/drm/ttm/ttm_caching.h @@ -27,9 +27,26 @@ #define TTM_NUM_CACHING_TYPES 3 +/** + * enum ttm_caching - CPU caching and BUS snooping behavior. + */ enum ttm_caching { + /** + * @ttm_uncached: Most defensive option for device mappings, + * don't even allow write combining. + */ ttm_uncached, + + /** + * @ttm_write_combined: Don't cache read accesses, but allow at least + * writes to be combined. + */ ttm_write_combined, + + /** + * @ttm_cached: Fully cached like normal system memory, requires that + * devices snoop the CPU cache on accesses. + */ ttm_cached }; diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h index cd592f8e941b..cbe03d45e883 100644 --- a/include/drm/ttm/ttm_device.h +++ b/include/drm/ttm/ttm_device.h @@ -39,31 +39,23 @@ struct ttm_operation_ctx; /** * struct ttm_global - Buffer object driver global data. - * - * @dummy_read_page: Pointer to a dummy page used for mapping requests - * of unpopulated pages. - * @shrink: A shrink callback object used for buffer object swap. - * @device_list_mutex: Mutex protecting the device list. - * This mutex is held while traversing the device list for pm options. - * @lru_lock: Spinlock protecting the bo subsystem lru lists. - * @device_list: List of buffer object devices. - * @swap_lru: Lru list of buffer objects used for swapping. */ extern struct ttm_global { /** - * Constant after init. + * @dummy_read_page: Pointer to a dummy page used for mapping requests + * of unpopulated pages. Constant after init. */ - struct page *dummy_read_page; /** - * Protected by ttm_global_mutex. + * @device_list: List of buffer object devices. Protected by + * ttm_global_mutex. */ struct list_head device_list; /** - * Internal protection. + * @bo_count: Number of buffer objects allocated by devices. */ atomic_t bo_count; } ttm_glob; @@ -230,49 +222,64 @@ struct ttm_device_funcs { /** * struct ttm_device - Buffer object driver device-specific data. - * - * @device_list: Our entry in the global device list. - * @funcs: Function table for the device. - * @sysman: Resource manager for the system domain. - * @man_drv: An array of resource_managers. - * @vma_manager: Address space manager. - * @pool: page pool for the device. - * @dev_mapping: A pointer to the struct address_space representing the - * device address space. - * @wq: Work queue structure for the delayed delete workqueue. */ struct ttm_device { - /* + /** + * @device_list: Our entry in the global device list. * Constant after bo device init */ struct list_head device_list; + + /** + * @funcs: Function table for the device. + * Constant after bo device init + */ struct ttm_device_funcs *funcs; - /* + /** + * @sysman: Resource manager for the system domain. * Access via ttm_manager_type. */ struct ttm_resource_manager sysman; + + /** + * @man_drv: An array of resource_managers, one per resource type. + */ struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES]; - /* - * Protected by internal locks. + /** + * @vma_manager: Address space manager for finding BOs to mmap. */ struct drm_vma_offset_manager *vma_manager; + + /** + * @pool: page pool for the device. + */ struct ttm_pool pool; - /* - * Protection for the per manager LRU and ddestroy lists. + /** + * @lru_lock: Protection for the per manager LRU and ddestroy lists. */ spinlock_t lru_lock; + + /** + * @ddestroy: Destroyed but not yet cleaned up buffer objects. + */ struct list_head ddestroy; - /* - * Protected by load / firstopen / lastclose /unload sync. + /** + * @pinned: Buffer objects which are pinned and so not on any LRU list. + */ + struct list_head pinned; + + /** + * @dev_mapping: A pointer to the struct address_space for invalidating + * CPU mappings on buffer move. Protected by load/unload sync. */ struct address_space *dev_mapping; - /* - * Internal protection. + /** + * @wq: Work queue structure for the delayed delete workqueue. */ struct delayed_work wq; }; @@ -284,12 +291,15 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, static inline struct ttm_resource_manager * ttm_manager_type(struct ttm_device *bdev, int mem_type) { + BUILD_BUG_ON(__builtin_constant_p(mem_type) + && mem_type >= TTM_NUM_MEM_TYPES); return bdev->man_drv[mem_type]; } static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type, struct ttm_resource_manager *manager) { + BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES); bdev->man_drv[type] = manager; } @@ -298,5 +308,6 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs, struct drm_vma_offset_manager *vma_manager, bool use_dma_alloc, bool use_dma32); void ttm_device_fini(struct ttm_device *bdev); +void ttm_device_clear_dma_mappings(struct ttm_device *bdev); #endif diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h index 8995c9e4ec1b..76d1b9119a2b 100644 --- a/include/drm/ttm/ttm_placement.h +++ b/include/drm/ttm/ttm_placement.h @@ -58,6 +58,7 @@ * * @fpfn: first valid page frame number to put the object * @lpfn: last valid page frame number to put the object + * @mem_type: One of TTM_PL_* where the resource should be allocated from. * @flags: memory domain and caching flags for the object * * Structure indicating a possible place to put an object. diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h index 4321728bdd11..ef09b23d29e3 100644 --- a/include/drm/ttm/ttm_pool.h +++ b/include/drm/ttm/ttm_pool.h @@ -37,7 +37,7 @@ struct ttm_pool; struct ttm_operation_ctx; /** - * ttm_pool_type - Pool for a certain memory type + * struct ttm_pool_type - Pool for a certain memory type * * @pool: the pool we belong to, might be NULL for the global ones * @order: the allocation order our pages have @@ -58,8 +58,9 @@ struct ttm_pool_type { }; /** - * ttm_pool - Pool for all caching and orders + * struct ttm_pool - Pool for all caching and orders * + * @dev: the device we allocate pages for * @use_dma_alloc: if coherent DMA allocations should be used * @use_dma32: if GFP_DMA32 should be used * @caching: pools for each caching/order diff --git a/include/drm/ttm/ttm_range_manager.h b/include/drm/ttm/ttm_range_manager.h index 22b6fa42ac20..7963b957e9ef 100644 --- a/include/drm/ttm/ttm_range_manager.h +++ b/include/drm/ttm/ttm_range_manager.h @@ -4,6 +4,7 @@ #define _TTM_RANGE_MANAGER_H_ #include <drm/ttm/ttm_resource.h> +#include <drm/ttm/ttm_device.h> #include <drm/drm_mm.h> /** @@ -33,10 +34,23 @@ to_ttm_range_mgr_node(struct ttm_resource *res) return container_of(res, struct ttm_range_mgr_node, base); } -int ttm_range_man_init(struct ttm_device *bdev, +int ttm_range_man_init_nocheck(struct ttm_device *bdev, unsigned type, bool use_tt, unsigned long p_size); -int ttm_range_man_fini(struct ttm_device *bdev, +int ttm_range_man_fini_nocheck(struct ttm_device *bdev, unsigned type); +static __always_inline int ttm_range_man_init(struct ttm_device *bdev, + unsigned int type, bool use_tt, + unsigned long p_size) +{ + BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES); + return ttm_range_man_init_nocheck(bdev, type, use_tt, p_size); +} +static __always_inline int ttm_range_man_fini(struct ttm_device *bdev, + unsigned int type) +{ + BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES); + return ttm_range_man_fini_nocheck(bdev, type); +} #endif diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 140b6b9a8bbe..5952051091cd 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -40,6 +40,7 @@ struct ttm_resource_manager; struct ttm_resource; struct ttm_place; struct ttm_buffer_object; +struct ttm_placement; struct dma_buf_map; struct io_mapping; struct sg_table; @@ -102,10 +103,7 @@ struct ttm_resource_manager_func { * struct ttm_resource_manager * * @use_type: The memory type is enabled. - * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory - * managed by this memory type. - * @gpu_offset: If used, the GPU offset of the first managed page of - * fixed memory or the first managed location in an aperture. + * @use_tt: If a TT object should be used for the backing store. * @size: Size of the managed region. * @func: structure pointer implementing the range manager. See above * @move_lock: lock for move fence @@ -143,6 +141,7 @@ struct ttm_resource_manager { * @addr: mapped virtual address * @offset: physical addr * @is_iomem: is this io memory ? + * @caching: See enum ttm_caching * * Structure indicating the bus placement of an object. */ @@ -266,6 +265,8 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_resource **res); void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res); +bool ttm_resource_compat(struct ttm_resource *res, + struct ttm_placement *placement); void ttm_resource_manager_init(struct ttm_resource_manager *man, unsigned long p_size); diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index b20e89d321b0..89b15d673b22 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -55,7 +55,7 @@ struct ttm_operation_ctx; * @dma_address: The DMA (bus) addresses of the pages * @swap_storage: Pointer to shmem struct file for swap storage. * @pages_list: used by some page allocation backend - * @caching: The current caching state of the pages. + * @caching: The current caching state of the pages, see enum ttm_caching. * * This is a structure holding the pages, caching- and aperture binding * status for a buffer object that isn't backed by fixed (VRAM / AGP) @@ -127,8 +127,9 @@ int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo, void ttm_tt_fini(struct ttm_tt *ttm); /** - * ttm_ttm_destroy: + * ttm_tt_destroy: * + * @bdev: the ttm_device this object belongs to * @ttm: The struct ttm_tt. * * Unbind, unpopulate and destroy common struct ttm_tt. @@ -136,13 +137,6 @@ void ttm_tt_fini(struct ttm_tt *ttm); void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm); /** - * ttm_tt_destroy_common: - * - * Called from driver to destroy common path. - */ -void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm); - -/** * ttm_tt_swapin: * * @ttm: The struct ttm_tt. @@ -156,15 +150,19 @@ int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm, /** * ttm_tt_populate - allocate pages for a ttm * + * @bdev: the ttm_device this object belongs to * @ttm: Pointer to the ttm_tt structure + * @ctx: operation context for populating the tt object. * * Calls the driver method to allocate pages for a ttm */ -int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); +int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx); /** * ttm_tt_unpopulate - free pages from a ttm * + * @bdev: the ttm_device this object belongs to * @ttm: Pointer to the ttm_tt structure * * Calls the driver method to free all pages from a ttm diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 8b32b4bdd590..66470c37e471 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -420,6 +420,13 @@ struct dma_buf { * - Dynamic importers should set fences for any access that they can't * disable immediately from their &dma_buf_attach_ops.move_notify * callback. + * + * IMPORTANT: + * + * All drivers must obey the struct dma_resv rules, specifically the + * rules for updating fences, see &dma_resv.fence_excl and + * &dma_resv.fence. If these dependency rules are broken access tracking + * can be lost resulting in use after free issues. */ struct dma_resv *resv; diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 6ffb4b2c6371..a706b7bf51d7 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -214,19 +214,15 @@ struct dma_fence_ops { * Custom wait implementation, defaults to dma_fence_default_wait() if * not set. * - * The dma_fence_default_wait implementation should work for any fence, as long - * as @enable_signaling works correctly. This hook allows drivers to - * have an optimized version for the case where a process context is - * already available, e.g. if @enable_signaling for the general case - * needs to set up a worker thread. + * Deprecated and should not be used by new implementations. Only used + * by existing implementations which need special handling for their + * hardware reset procedure. * * Must return -ERESTARTSYS if the wait is intr = true and the wait was * interrupted, and remaining jiffies if fence has signaled, or 0 if wait * timed out. Can also return other error values on custom implementations, * which should be treated as if the fence is signaled. For example a hardware * lockup could be reported like that. - * - * This callback is optional. */ signed long (*wait)(struct dma_fence *fence, bool intr, signed long timeout); @@ -590,26 +586,4 @@ struct dma_fence *dma_fence_get_stub(void); struct dma_fence *dma_fence_allocate_private_stub(void); u64 dma_fence_context_alloc(unsigned num); -#define DMA_FENCE_TRACE(f, fmt, args...) \ - do { \ - struct dma_fence *__ff = (f); \ - if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \ - pr_info("f %llu#%llu: " fmt, \ - __ff->context, __ff->seqno, ##args); \ - } while (0) - -#define DMA_FENCE_WARN(f, fmt, args...) \ - do { \ - struct dma_fence *__ff = (f); \ - pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\ - ##args); \ - } while (0) - -#define DMA_FENCE_ERR(f, fmt, args...) \ - do { \ - struct dma_fence *__ff = (f); \ - pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, \ - ##args); \ - } while (0) - #endif /* __LINUX_DMA_FENCE_H */ diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index e1ca2080a1ff..9100dd3dc21f 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -62,16 +62,90 @@ struct dma_resv_list { /** * struct dma_resv - a reservation object manages fences for a buffer - * @lock: update side lock - * @seq: sequence count for managing RCU read-side synchronization - * @fence_excl: the exclusive fence, if there is one currently - * @fence: list of current shared fences + * + * There are multiple uses for this, with sometimes slightly different rules in + * how the fence slots are used. + * + * One use is to synchronize cross-driver access to a struct dma_buf, either for + * dynamic buffer management or just to handle implicit synchronization between + * different users of the buffer in userspace. See &dma_buf.resv for a more + * in-depth discussion. + * + * The other major use is to manage access and locking within a driver in a + * buffer based memory manager. struct ttm_buffer_object is the canonical + * example here, since this is where reservation objects originated from. But + * use in drivers is spreading and some drivers also manage struct + * drm_gem_object with the same scheme. */ struct dma_resv { + /** + * @lock: + * + * Update side lock. Don't use directly, instead use the wrapper + * functions like dma_resv_lock() and dma_resv_unlock(). + * + * Drivers which use the reservation object to manage memory dynamically + * also use this lock to protect buffer object state like placement, + * allocation policies or throughout command submission. + */ struct ww_mutex lock; + + /** + * @seq: + * + * Sequence count for managing RCU read-side synchronization, allows + * read-only access to @fence_excl and @fence while ensuring we take a + * consistent snapshot. + */ seqcount_ww_mutex_t seq; + /** + * @fence_excl: + * + * The exclusive fence, if there is one currently. + * + * There are two ways to update this fence: + * + * - First by calling dma_resv_add_excl_fence(), which replaces all + * fences attached to the reservation object. To guarantee that no + * fences are lost, this new fence must signal only after all previous + * fences, both shared and exclusive, have signalled. In some cases it + * is convenient to achieve that by attaching a struct dma_fence_array + * with all the new and old fences. + * + * - Alternatively the fence can be set directly, which leaves the + * shared fences unchanged. To guarantee that no fences are lost, this + * new fence must signal only after the previous exclusive fence has + * signalled. Since the shared fences are staying intact, it is not + * necessary to maintain any ordering against those. If semantically + * only a new access is added without actually treating the previous + * one as a dependency the exclusive fences can be strung together + * using struct dma_fence_chain. + * + * Note that actual semantics of what an exclusive or shared fence mean + * is defined by the user, for reservation objects shared across drivers + * see &dma_buf.resv. + */ struct dma_fence __rcu *fence_excl; + + /** + * @fence: + * + * List of current shared fences. + * + * There are no ordering constraints of shared fences against the + * exclusive fence slot. If a waiter needs to wait for all access, it + * has to wait for both sets of fences to signal. + * + * A new fence is added by calling dma_resv_add_shared_fence(). Since + * this often needs to be done past the point of no return in command + * submission it cannot fail, and therefore sufficient slots need to be + * reserved by calling dma_resv_reserve_shared(). + * + * Note that actual semantics of what an exclusive or shared fence mean + * is defined by the user, for reservation objects shared across drivers + * see &dma_buf.resv. + */ struct dma_resv_list __rcu *fence; }; @@ -98,6 +172,13 @@ static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {} * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation * object may be locked by itself by passing NULL as @ctx. + * + * When a die situation is indicated by returning -EDEADLK all locks held by + * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj. + * + * Unlocked by calling dma_resv_unlock(). + * + * See also dma_resv_lock_interruptible() for the interruptible variant. */ static inline int dma_resv_lock(struct dma_resv *obj, struct ww_acquire_ctx *ctx) @@ -119,6 +200,12 @@ static inline int dma_resv_lock(struct dma_resv *obj, * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation * object may be locked by itself by passing NULL as @ctx. + * + * When a die situation is indicated by returning -EDEADLK all locks held by + * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on + * @obj. + * + * Unlocked by calling dma_resv_unlock(). */ static inline int dma_resv_lock_interruptible(struct dma_resv *obj, struct ww_acquire_ctx *ctx) @@ -134,6 +221,8 @@ static inline int dma_resv_lock_interruptible(struct dma_resv *obj, * Acquires the reservation object after a die case. This function * will sleep until the lock becomes available. See dma_resv_lock() as * well. + * + * See also dma_resv_lock_slow_interruptible() for the interruptible variant. */ static inline void dma_resv_lock_slow(struct dma_resv *obj, struct ww_acquire_ctx *ctx) @@ -167,7 +256,7 @@ static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj, * if they overlap with a writer. * * Also note that since no context is provided, no deadlock protection is - * possible. + * possible, which is also not needed for a trylock. * * Returns true if the lock was acquired, false otherwise. */ @@ -193,6 +282,11 @@ static inline bool dma_resv_is_locked(struct dma_resv *obj) * * Returns the context used to lock a reservation object or NULL if no context * was used or the object is not locked at all. + * + * WARNING: This interface is pretty horrible, but TTM needs it because it + * doesn't pass the struct ww_acquire_ctx around in some very long callchains. + * Everyone else just uses it to check whether they're holding a reservation or + * not. */ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) { diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h deleted file mode 100644 index 3cca2b8fac43..000000000000 --- a/include/linux/seqno-fence.h +++ /dev/null @@ -1,109 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * seqno-fence, using a dma-buf to synchronize fencing - * - * Copyright (C) 2012 Texas Instruments - * Copyright (C) 2012 Canonical Ltd - * Authors: - * Rob Clark <robdclark@gmail.com> - * Maarten Lankhorst <maarten.lankhorst@canonical.com> - */ - -#ifndef __LINUX_SEQNO_FENCE_H -#define __LINUX_SEQNO_FENCE_H - -#include <linux/dma-fence.h> -#include <linux/dma-buf.h> - -enum seqno_fence_condition { - SEQNO_FENCE_WAIT_GEQUAL, - SEQNO_FENCE_WAIT_NONZERO -}; - -struct seqno_fence { - struct dma_fence base; - - const struct dma_fence_ops *ops; - struct dma_buf *sync_buf; - uint32_t seqno_ofs; - enum seqno_fence_condition condition; -}; - -extern const struct dma_fence_ops seqno_fence_ops; - -/** - * to_seqno_fence - cast a fence to a seqno_fence - * @fence: fence to cast to a seqno_fence - * - * Returns NULL if the fence is not a seqno_fence, - * or the seqno_fence otherwise. - */ -static inline struct seqno_fence * -to_seqno_fence(struct dma_fence *fence) -{ - if (fence->ops != &seqno_fence_ops) - return NULL; - return container_of(fence, struct seqno_fence, base); -} - -/** - * seqno_fence_init - initialize a seqno fence - * @fence: seqno_fence to initialize - * @lock: pointer to spinlock to use for fence - * @sync_buf: buffer containing the memory location to signal on - * @context: the execution context this fence is a part of - * @seqno_ofs: the offset within @sync_buf - * @seqno: the sequence # to signal on - * @cond: fence wait condition - * @ops: the fence_ops for operations on this seqno fence - * - * This function initializes a struct seqno_fence with passed parameters, - * and takes a reference on sync_buf which is released on fence destruction. - * - * A seqno_fence is a dma_fence which can complete in software when - * enable_signaling is called, but it also completes when - * (s32)((sync_buf)[seqno_ofs] - seqno) >= 0 is true - * - * The seqno_fence will take a refcount on the sync_buf until it's - * destroyed, but actual lifetime of sync_buf may be longer if one of the - * callers take a reference to it. - * - * Certain hardware have instructions to insert this type of wait condition - * in the command stream, so no intervention from software would be needed. - * This type of fence can be destroyed before completed, however a reference - * on the sync_buf dma-buf can be taken. It is encouraged to re-use the same - * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the - * device's vm can be expensive. - * - * It is recommended for creators of seqno_fence to call dma_fence_signal() - * before destruction. This will prevent possible issues from wraparound at - * time of issue vs time of check, since users can check dma_fence_is_signaled() - * before submitting instructions for the hardware to wait on the fence. - * However, when ops.enable_signaling is not called, it doesn't have to be - * done as soon as possible, just before there's any real danger of seqno - * wraparound. - */ -static inline void -seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock, - struct dma_buf *sync_buf, uint32_t context, - uint32_t seqno_ofs, uint32_t seqno, - enum seqno_fence_condition cond, - const struct dma_fence_ops *ops) -{ - BUG_ON(!fence || !sync_buf || !ops); - BUG_ON(!ops->wait || !ops->enable_signaling || - !ops->get_driver_name || !ops->get_timeline_name); - - /* - * ops is used in dma_fence_init for get_driver_name, so needs to be - * initialized first - */ - fence->ops = ops; - dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno); - get_dma_buf(sync_buf); - fence->sync_buf = sync_buf; - fence->seqno_ofs = seqno_ofs; - fence->condition = cond; -} - -#endif /* __LINUX_SEQNO_FENCE_H */ diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 9814fff58a69..76fbf92b04d9 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -93,4 +93,5 @@ extern void register_shrinker_prepared(struct shrinker *shrinker); extern int register_shrinker(struct shrinker *shrinker); extern void unregister_shrinker(struct shrinker *shrinker); extern void free_prealloced_shrinker(struct shrinker *shrinker); +extern void synchronize_shrinkers(void); #endif diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 9f4bb4a6f358..45a914850be0 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -373,6 +373,12 @@ extern "C" { #define DRM_FORMAT_RESERVED ((1ULL << 56) - 1) +#define fourcc_mod_get_vendor(modifier) \ + (((modifier) >> 56) & 0xff) + +#define fourcc_mod_is_vendor(modifier, vendor) \ + (fourcc_mod_get_vendor(modifier) == DRM_FORMAT_MOD_VENDOR_## vendor) + #define fourcc_mod_code(vendor, val) \ ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | ((val) & 0x00ffffffffffffffULL)) diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 90c55383f1ee..e4a2570a6058 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -1110,6 +1110,9 @@ struct drm_mode_destroy_blob { * struct drm_mode_create_lease - Create lease * * Lease mode resources, creating another drm_master. + * + * The @object_ids array must reference at least one CRTC, one connector and + * one plane if &DRM_CLIENT_CAP_UNIVERSAL_PLANES is enabled. */ struct drm_mode_create_lease { /** @object_ids: Pointer to array of object ids (__u32) */ diff --git a/mm/vmscan.c b/mm/vmscan.c index 74296c2d1fed..b1512cefcffa 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -687,6 +687,21 @@ void unregister_shrinker(struct shrinker *shrinker) } EXPORT_SYMBOL(unregister_shrinker); +/** + * synchronize_shrinkers - Wait for all running shrinkers to complete. + * + * This is equivalent to calling unregister_shrink() and register_shrinker(), + * but atomically and with less overhead. This is useful to guarantee that all + * shrinker invocations have seen an update, before freeing memory, similar to + * rcu. + */ +void synchronize_shrinkers(void) +{ + down_write(&shrinker_rwsem); + up_write(&shrinker_rwsem); +} +EXPORT_SYMBOL(synchronize_shrinkers); + #define SHRINK_BATCH 128 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, |