diff options
author | Dave Airlie <airlied@redhat.com> | 2023-12-08 09:26:20 +0300 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2023-12-08 09:27:00 +0300 |
commit | a60501d7c2d3e70b3545b9b96576628e369d8e85 (patch) | |
tree | 0552353a7fc0a86faecc0a67c0c5721e6e2382f2 /drivers/gpu | |
parent | 2f8d8548c3e3f420e478b064a53bdaa4953749de (diff) | |
parent | 90d50b8d85834e73536fdccd5aa913b30494fef0 (diff) | |
download | linux-a60501d7c2d3e70b3545b9b96576628e369d8e85.tar.xz |
Merge tag 'drm-misc-next-2023-12-07' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 6.8:
UAPI Changes:
- Remove Userspace Mode-Setting ioctls
- v3d: New uapi to handle jobs involving the CPU
Cross-subsystem Changes:
Core Changes:
- atomic: Add support for FB-less planes which got reverted a bit
later for lack of IGT tests and userspace code, Dump private objects
state in drm_state_dump.
- dma-buf: Add fence deadline support
- encoder: Create per-encoder debugfs directory, move the bridge chain
file to that directory
Driver Changes:
- Include drm_auth.h in driver that use it but don't include it, Drop
drm_plane_helper.h from drivers that include it but don't use it
- imagination: Plenty of small fixes
- panfrost: Improve interrupt handling at poweroff
- qaic: Convert to persistent DRM devices
- tidss: Support for the AM62A7, a few probe improvements, some cleanups
- v3d: Support for jobs involving the CPU
- bridge:
- Create transparent aux-bridge for DP/USB-C
- lt8912b: Add suspend/resume support and power regulator support
- panel:
- himax-hx8394: Drop prepare, unprepare and shutdown logic, Support
panel rotation
- New panels: BOE BP101WX1-100, Powkiddy X55, Ampire AM8001280G,
Evervision VGG644804, SDC ATNA45AF01
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maxime Ripard <mripard@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/yu5heqaufyeo4nlowzieu4s5unwqrqyx4jixbfjmzdon677rpk@t53vceua2dao
Diffstat (limited to 'drivers/gpu')
151 files changed, 4887 insertions, 8046 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 740c1c0bd068..31cfe2c2a2af 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -74,12 +74,13 @@ config DRM_KUNIT_TEST_HELPERS config DRM_KUNIT_TEST tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS - depends on DRM && KUNIT + depends on DRM && KUNIT && MMU select DRM_BUDDY select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HELPER select DRM_EXEC select DRM_EXPORT_FOR_TESTS if m + select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER select DRM_KUNIT_TEST_HELPERS select DRM_LIB_RANDOM @@ -409,27 +410,6 @@ config DRM_HYPERV If M is selected the module will be called hyperv_drm. -# Keep legacy drivers last - -menuconfig DRM_LEGACY - bool "Enable legacy drivers (DANGEROUS)" - depends on DRM && MMU - help - Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous - APIs to user-space, which can be used to circumvent access - restrictions and other security measures. For backwards compatibility - those drivers are still available, but their use is highly - inadvisable and might harm your system. - - You are recommended to use the safe modeset-only drivers instead, and - perform 3D emulation in user-space. - - Unless you have strong reasons to go rogue, say "N". - -if DRM_LEGACY -# leave here to list legacy drivers -endif # DRM_LEGACY - config DRM_EXPORT_FOR_TESTS bool diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index b4cb0835620a..8ac6f4b9546e 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -47,18 +47,6 @@ drm-y := \ drm_vblank_work.o \ drm_vma_manager.o \ drm_writeback.o -drm-$(CONFIG_DRM_LEGACY) += \ - drm_agpsupport.o \ - drm_bufs.o \ - drm_context.o \ - drm_dma.o \ - drm_hashtab.o \ - drm_irq.o \ - drm_legacy_misc.o \ - drm_lock.o \ - drm_memory.o \ - drm_scatter.o \ - drm_vm.o drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_PANEL) += drm_panel.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index e2ae9ba147ba..5cb33ac99f70 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -73,10 +73,10 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio) return DRM_SCHED_PRIORITY_NORMAL; case AMDGPU_CTX_PRIORITY_VERY_LOW: - return DRM_SCHED_PRIORITY_MIN; + return DRM_SCHED_PRIORITY_LOW; case AMDGPU_CTX_PRIORITY_LOW: - return DRM_SCHED_PRIORITY_MIN; + return DRM_SCHED_PRIORITY_LOW; case AMDGPU_CTX_PRIORITY_NORMAL: return DRM_SCHED_PRIORITY_NORMAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 62bb7fc7448a..71a5cf37b472 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -325,7 +325,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) int i; /* Signal all jobs not yet scheduled */ - for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { struct drm_sched_rq *rq = sched->sched_rq[i]; spin_lock(&rq->lock); list_for_each_entry(s_entity, &rq->entities, list) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index bc7b8a325353..d6e325c848eb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -92,7 +92,6 @@ #include <drm/drm_vblank.h> #include <drm/drm_audio_component.h> #include <drm/drm_gem_atomic_helper.h> -#include <drm/drm_plane_helper.h> #include <acpi/video.h> diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 52d2c942d3d2..c78687c755a8 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -7,8 +7,9 @@ #include <linux/clk.h> #include <linux/component.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> @@ -1012,26 +1013,17 @@ armada_lcd_bind(struct device *dev, struct device *master, void *data) int irq = platform_get_irq(pdev, 0); const struct armada_variant *variant; struct device_node *port = NULL; + struct device_node *np, *parent = dev->of_node; if (irq < 0) return irq; - if (!dev->of_node) { - const struct platform_device_id *id; - id = platform_get_device_id(pdev); - if (!id) - return -ENXIO; - - variant = (const struct armada_variant *)id->driver_data; - } else { - const struct of_device_id *match; - struct device_node *np, *parent = dev->of_node; - - match = of_match_device(dev->driver->of_match_table, dev); - if (!match) - return -ENXIO; + variant = device_get_match_data(dev); + if (!variant) + return -ENXIO; + if (parent) { np = of_get_child_by_name(parent, "ports"); if (np) parent = np; @@ -1041,8 +1033,6 @@ armada_lcd_bind(struct device *dev, struct device *master, void *data) dev_err(dev, "no port node found in %pOF\n", parent); return -ENXIO; } - - variant = match->data; } return armada_drm_crtc_create(drm, dev, res, irq, variant, port); diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index 78122b35a0cb..a7a6b70220eb 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -6,10 +6,10 @@ #include <linux/irq.h> #include <linux/mfd/syscon.h> #include <linux/module.h> -#include <linux/of.h> -#include <linux/of_device.h> +#include <linux/mod_devicetable.h> #include <linux/of_reserved_mem.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/regmap.h> #include <linux/reset.h> @@ -143,7 +143,6 @@ static int aspeed_gfx_load(struct drm_device *drm) struct aspeed_gfx *priv = to_aspeed_gfx(drm); struct device_node *np = pdev->dev.of_node; const struct aspeed_gfx_config *config; - const struct of_device_id *match; struct resource *res; int ret; @@ -152,10 +151,9 @@ static int aspeed_gfx_load(struct drm_device *drm) if (IS_ERR(priv->base)) return PTR_ERR(priv->base); - match = of_match_device(aspeed_gfx_match, &pdev->dev); - if (!match) + config = device_get_match_data(&pdev->dev); + if (!config) return -EINVAL; - config = match->data; priv->dac_reg = config->dac_reg; priv->int_clr_reg = config->int_clear_reg; diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index cf5b754f044c..90bcb1eb9cd9 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -89,11 +89,194 @@ static const struct pci_device_id ast_pciidlist[] = { MODULE_DEVICE_TABLE(pci, ast_pciidlist); +static bool ast_is_vga_enabled(void __iomem *ioregs) +{ + u8 vgaer = __ast_read8(ioregs, AST_IO_VGAER); + + return vgaer & AST_IO_VGAER_VGA_ENABLE; +} + +static void ast_enable_vga(void __iomem *ioregs) +{ + __ast_write8(ioregs, AST_IO_VGAER, AST_IO_VGAER_VGA_ENABLE); + __ast_write8(ioregs, AST_IO_VGAMR_W, AST_IO_VGAMR_IOSEL); +} + +/* + * Run this function as part of the HW device cleanup; not + * when the DRM device gets released. + */ +static void ast_enable_mmio_release(void *data) +{ + void __iomem *ioregs = (void __force __iomem *)data; + + /* enable standard VGA decode */ + __ast_write8_i(ioregs, AST_IO_VGACRI, 0xa1, AST_IO_VGACRA1_MMIO_ENABLED); +} + +static int ast_enable_mmio(struct device *dev, void __iomem *ioregs) +{ + void *data = (void __force *)ioregs; + + __ast_write8_i(ioregs, AST_IO_VGACRI, 0xa1, + AST_IO_VGACRA1_MMIO_ENABLED | + AST_IO_VGACRA1_VGAIO_DISABLED); + + return devm_add_action_or_reset(dev, ast_enable_mmio_release, data); +} + +static void ast_open_key(void __iomem *ioregs) +{ + __ast_write8_i(ioregs, AST_IO_VGACRI, 0x80, AST_IO_VGACR80_PASSWORD); +} + +static int ast_detect_chip(struct pci_dev *pdev, + void __iomem *regs, void __iomem *ioregs, + enum ast_chip *chip_out, + enum ast_config_mode *config_mode_out) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + enum ast_config_mode config_mode = ast_use_defaults; + uint32_t scu_rev = 0xffffffff; + enum ast_chip chip; + u32 data; + u8 vgacrd0, vgacrd1; + + /* + * Find configuration mode and read SCU revision + */ + + /* Check if we have device-tree properties */ + if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", &data)) { + /* We do, disable P2A access */ + config_mode = ast_use_dt; + scu_rev = data; + } else if (pdev->device == PCI_CHIP_AST2000) { // Not all families have a P2A bridge + /* + * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge + * is disabled. We force using P2A if VGA only mode bit + * is set D[7] + */ + vgacrd0 = __ast_read8_i(ioregs, AST_IO_VGACRI, 0xd0); + vgacrd1 = __ast_read8_i(ioregs, AST_IO_VGACRI, 0xd1); + if (!(vgacrd0 & 0x80) || !(vgacrd1 & 0x10)) { + + /* + * We have a P2A bridge and it is enabled. + */ + + /* Patch AST2500/AST2510 */ + if ((pdev->revision & 0xf0) == 0x40) { + if (!(vgacrd0 & AST_VRAM_INIT_STATUS_MASK)) + ast_patch_ahb_2500(regs); + } + + /* Double check that it's actually working */ + data = __ast_read32(regs, 0xf004); + if ((data != 0xffffffff) && (data != 0x00)) { + config_mode = ast_use_p2a; + + /* Read SCU7c (silicon revision register) */ + __ast_write32(regs, 0xf004, 0x1e6e0000); + __ast_write32(regs, 0xf000, 0x1); + scu_rev = __ast_read32(regs, 0x1207c); + } + } + } + + switch (config_mode) { + case ast_use_defaults: + dev_info(dev, "Using default configuration\n"); + break; + case ast_use_dt: + dev_info(dev, "Using device-tree for configuration\n"); + break; + case ast_use_p2a: + dev_info(dev, "Using P2A bridge for configuration\n"); + break; + } + + /* + * Identify chipset + */ + + if (pdev->revision >= 0x50) { + chip = AST2600; + dev_info(dev, "AST 2600 detected\n"); + } else if (pdev->revision >= 0x40) { + switch (scu_rev & 0x300) { + case 0x0100: + chip = AST2510; + dev_info(dev, "AST 2510 detected\n"); + break; + default: + chip = AST2500; + dev_info(dev, "AST 2500 detected\n"); + break; + } + } else if (pdev->revision >= 0x30) { + switch (scu_rev & 0x300) { + case 0x0100: + chip = AST1400; + dev_info(dev, "AST 1400 detected\n"); + break; + default: + chip = AST2400; + dev_info(dev, "AST 2400 detected\n"); + break; + } + } else if (pdev->revision >= 0x20) { + switch (scu_rev & 0x300) { + case 0x0000: + chip = AST1300; + dev_info(dev, "AST 1300 detected\n"); + break; + default: + chip = AST2300; + dev_info(dev, "AST 2300 detected\n"); + break; + } + } else if (pdev->revision >= 0x10) { + switch (scu_rev & 0x0300) { + case 0x0200: + chip = AST1100; + dev_info(dev, "AST 1100 detected\n"); + break; + case 0x0100: + chip = AST2200; + dev_info(dev, "AST 2200 detected\n"); + break; + case 0x0000: + chip = AST2150; + dev_info(dev, "AST 2150 detected\n"); + break; + default: + chip = AST2100; + dev_info(dev, "AST 2100 detected\n"); + break; + } + } else { + chip = AST2000; + dev_info(dev, "AST 2000 detected\n"); + } + + *chip_out = chip; + *config_mode_out = config_mode; + + return 0; +} + static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - struct ast_device *ast; - struct drm_device *dev; + struct device *dev = &pdev->dev; int ret; + void __iomem *regs; + void __iomem *ioregs; + enum ast_config_mode config_mode; + enum ast_chip chip; + struct drm_device *drm; + bool need_post = false; ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &ast_driver); if (ret) @@ -103,16 +286,80 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; - ast = ast_device_create(&ast_driver, pdev, ent->driver_data); - if (IS_ERR(ast)) - return PTR_ERR(ast); - dev = &ast->base; + regs = pcim_iomap(pdev, 1, 0); + if (!regs) + return -EIO; + + if (pdev->revision >= 0x40) { + /* + * On AST2500 and later models, MMIO is enabled by + * default. Adopt it to be compatible with ARM. + */ + resource_size_t len = pci_resource_len(pdev, 1); + + if (len < AST_IO_MM_OFFSET) + return -EIO; + if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH) + return -EIO; + ioregs = regs + AST_IO_MM_OFFSET; + } else if (pci_resource_flags(pdev, 2) & IORESOURCE_IO) { + /* + * Map I/O registers if we have a PCI BAR for I/O. + */ + resource_size_t len = pci_resource_len(pdev, 2); + + if (len < AST_IO_MM_LENGTH) + return -EIO; + ioregs = pcim_iomap(pdev, 2, 0); + if (!ioregs) + return -EIO; + } else { + /* + * Anything else is best effort. + */ + resource_size_t len = pci_resource_len(pdev, 1); + + if (len < AST_IO_MM_OFFSET) + return -EIO; + if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH) + return -EIO; + ioregs = regs + AST_IO_MM_OFFSET; + + dev_info(dev, "Platform has no I/O space, using MMIO\n"); + } + + if (!ast_is_vga_enabled(ioregs)) { + dev_info(dev, "VGA not enabled on entry, requesting chip POST\n"); + need_post = true; + } + + /* + * If VGA isn't enabled, we need to enable now or subsequent + * access to the scratch registers will fail. + */ + if (need_post) + ast_enable_vga(ioregs); + /* Enable extended register access */ + ast_open_key(ioregs); + + ret = ast_enable_mmio(dev, ioregs); + if (ret) + return ret; + + ret = ast_detect_chip(pdev, regs, ioregs, &chip, &config_mode); + if (ret) + return ret; + + drm = ast_device_create(pdev, &ast_driver, chip, config_mode, regs, ioregs, need_post); + if (IS_ERR(drm)) + return PTR_ERR(drm); + pci_set_drvdata(pdev, drm); - ret = drm_dev_register(dev, ent->driver_data); + ret = drm_dev_register(drm, ent->driver_data); if (ret) return ret; - drm_fbdev_generic_setup(dev, 32); + drm_fbdev_generic_setup(drm, 32); return 0; } diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 772f3b049c16..3be5ccf1f5f4 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -98,6 +98,12 @@ enum ast_tx_chip { #define AST_TX_DP501_BIT BIT(AST_TX_DP501) #define AST_TX_ASTDP_BIT BIT(AST_TX_ASTDP) +enum ast_config_mode { + ast_use_p2a, + ast_use_dt, + ast_use_defaults +}; + #define AST_DRAM_512Mx16 0 #define AST_DRAM_1Gx16 1 #define AST_DRAM_512Mx32 2 @@ -192,12 +198,13 @@ to_ast_bmc_connector(struct drm_connector *connector) struct ast_device { struct drm_device base; - struct mutex ioregs_lock; /* Protects access to I/O registers in ioregs */ void __iomem *regs; void __iomem *ioregs; void __iomem *dp501_fw_buf; + enum ast_config_mode config_mode; enum ast_chip chip; + uint32_t dram_bus_width; uint32_t dram_type; uint32_t mclk; @@ -207,6 +214,8 @@ struct ast_device { unsigned long vram_size; unsigned long vram_fb_available; + struct mutex modeset_lock; /* Protects access to modeset I/O registers in ioregs */ + struct ast_plane primary_plane; struct ast_plane cursor_plane; struct drm_crtc crtc; @@ -234,11 +243,6 @@ struct ast_device { } output; bool support_wide_screen; - enum { - ast_use_p2a, - ast_use_dt, - ast_use_defaults - } config_mode; unsigned long tx_chip_types; /* bitfield of enum ast_chip_type */ u8 *dp501_fw_addr; @@ -250,9 +254,13 @@ static inline struct ast_device *to_ast_device(struct drm_device *dev) return container_of(dev, struct ast_device, base); } -struct ast_device *ast_device_create(const struct drm_driver *drv, - struct pci_dev *pdev, - unsigned long flags); +struct drm_device *ast_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); static inline unsigned long __ast_gen(struct ast_device *ast) { @@ -272,55 +280,94 @@ static inline bool __ast_gen_is_eq(struct ast_device *ast, unsigned long gen) #define IS_AST_GEN6(__ast) __ast_gen_is_eq(__ast, 6) #define IS_AST_GEN7(__ast) __ast_gen_is_eq(__ast, 7) +static inline u8 __ast_read8(const void __iomem *addr, u32 reg) +{ + return ioread8(addr + reg); +} + +static inline u32 __ast_read32(const void __iomem *addr, u32 reg) +{ + return ioread32(addr + reg); +} + +static inline void __ast_write8(void __iomem *addr, u32 reg, u8 val) +{ + iowrite8(val, addr + reg); +} + +static inline void __ast_write32(void __iomem *addr, u32 reg, u32 val) +{ + iowrite32(val, addr + reg); +} + +static inline u8 __ast_read8_i(void __iomem *addr, u32 reg, u8 index) +{ + __ast_write8(addr, reg, index); + return __ast_read8(addr, reg + 1); +} + +static inline u8 __ast_read8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask) +{ + u8 val = __ast_read8_i(addr, reg, index); + + return val & read_mask; +} + +static inline void __ast_write8_i(void __iomem *addr, u32 reg, u8 index, u8 val) +{ + __ast_write8(addr, reg, index); + __ast_write8(addr, reg + 1, val); +} + +static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask, + u8 val) +{ + u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask); + + tmp |= val; + __ast_write8_i(addr, reg, index, tmp); +} + static inline u32 ast_read32(struct ast_device *ast, u32 reg) { - return ioread32(ast->regs + reg); + return __ast_read32(ast->regs, reg); } static inline void ast_write32(struct ast_device *ast, u32 reg, u32 val) { - iowrite32(val, ast->regs + reg); + __ast_write32(ast->regs, reg, val); } static inline u8 ast_io_read8(struct ast_device *ast, u32 reg) { - return ioread8(ast->ioregs + reg); + return __ast_read8(ast->ioregs, reg); } static inline void ast_io_write8(struct ast_device *ast, u32 reg, u8 val) { - iowrite8(val, ast->ioregs + reg); + __ast_write8(ast->ioregs, reg, val); } static inline u8 ast_get_index_reg(struct ast_device *ast, u32 base, u8 index) { - ast_io_write8(ast, base, index); - ++base; - return ast_io_read8(ast, base); + return __ast_read8_i(ast->ioregs, base, index); } static inline u8 ast_get_index_reg_mask(struct ast_device *ast, u32 base, u8 index, u8 preserve_mask) { - u8 val = ast_get_index_reg(ast, base, index); - - return val & preserve_mask; + return __ast_read8_i_masked(ast->ioregs, base, index, preserve_mask); } static inline void ast_set_index_reg(struct ast_device *ast, u32 base, u8 index, u8 val) { - ast_io_write8(ast, base, index); - ++base; - ast_io_write8(ast, base, val); + __ast_write8_i(ast->ioregs, base, index, val); } static inline void ast_set_index_reg_mask(struct ast_device *ast, u32 base, u8 index, u8 preserve_mask, u8 val) { - u8 tmp = ast_get_index_reg_mask(ast, base, index, preserve_mask); - - tmp |= val; - ast_set_index_reg(ast, base, index, tmp); + __ast_write8_i_masked(ast->ioregs, base, index, preserve_mask, val); } #define AST_VIDMEM_SIZE_8M 0x00800000 @@ -442,7 +489,7 @@ int ast_mm_init(struct ast_device *ast); void ast_post_gpu(struct drm_device *dev); u32 ast_mindwm(struct ast_device *ast, u32 r); void ast_moutdwm(struct ast_device *ast, u32 r, u32 v); -void ast_patch_ahb_2500(struct ast_device *ast); +void ast_patch_ahb_2500(void __iomem *regs); /* ast dp501 */ void ast_set_dp501_video_output(struct drm_device *dev, u8 mode); bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index f4ab40e22cea..2f3ad5f949fc 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -35,180 +35,6 @@ #include "ast_drv.h" -static bool ast_is_vga_enabled(struct drm_device *dev) -{ - struct ast_device *ast = to_ast_device(dev); - u8 ch; - - ch = ast_io_read8(ast, AST_IO_VGAER); - - return !!(ch & 0x01); -} - -static void ast_enable_vga(struct drm_device *dev) -{ - struct ast_device *ast = to_ast_device(dev); - - ast_io_write8(ast, AST_IO_VGAER, 0x01); - ast_io_write8(ast, AST_IO_VGAMR_W, 0x01); -} - -/* - * Run this function as part of the HW device cleanup; not - * when the DRM device gets released. - */ -static void ast_enable_mmio_release(void *data) -{ - struct ast_device *ast = data; - - /* enable standard VGA decode */ - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x04); -} - -static int ast_enable_mmio(struct ast_device *ast) -{ - struct drm_device *dev = &ast->base; - - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x06); - - return devm_add_action_or_reset(dev->dev, ast_enable_mmio_release, ast); -} - -static void ast_open_key(struct ast_device *ast) -{ - ast_set_index_reg(ast, AST_IO_VGACRI, 0x80, 0xA8); -} - -static int ast_device_config_init(struct ast_device *ast) -{ - struct drm_device *dev = &ast->base; - struct pci_dev *pdev = to_pci_dev(dev->dev); - struct device_node *np = dev->dev->of_node; - uint32_t scu_rev = 0xffffffff; - u32 data; - u8 jregd0, jregd1; - - /* - * Find configuration mode and read SCU revision - */ - - ast->config_mode = ast_use_defaults; - - /* Check if we have device-tree properties */ - if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", &data)) { - /* We do, disable P2A access */ - ast->config_mode = ast_use_dt; - scu_rev = data; - } else if (pdev->device == PCI_CHIP_AST2000) { // Not all families have a P2A bridge - /* - * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge - * is disabled. We force using P2A if VGA only mode bit - * is set D[7] - */ - jregd0 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); - jregd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff); - if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) { - - /* - * We have a P2A bridge and it is enabled. - */ - - /* Patch AST2500/AST2510 */ - if ((pdev->revision & 0xf0) == 0x40) { - if (!(jregd0 & AST_VRAM_INIT_STATUS_MASK)) - ast_patch_ahb_2500(ast); - } - - /* Double check that it's actually working */ - data = ast_read32(ast, 0xf004); - if ((data != 0xffffffff) && (data != 0x00)) { - ast->config_mode = ast_use_p2a; - - /* Read SCU7c (silicon revision register) */ - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - scu_rev = ast_read32(ast, 0x1207c); - } - } - } - - switch (ast->config_mode) { - case ast_use_defaults: - drm_info(dev, "Using default configuration\n"); - break; - case ast_use_dt: - drm_info(dev, "Using device-tree for configuration\n"); - break; - case ast_use_p2a: - drm_info(dev, "Using P2A bridge for configuration\n"); - break; - } - - /* - * Identify chipset - */ - - if (pdev->revision >= 0x50) { - ast->chip = AST2600; - drm_info(dev, "AST 2600 detected\n"); - } else if (pdev->revision >= 0x40) { - switch (scu_rev & 0x300) { - case 0x0100: - ast->chip = AST2510; - drm_info(dev, "AST 2510 detected\n"); - break; - default: - ast->chip = AST2500; - drm_info(dev, "AST 2500 detected\n"); - } - } else if (pdev->revision >= 0x30) { - switch (scu_rev & 0x300) { - case 0x0100: - ast->chip = AST1400; - drm_info(dev, "AST 1400 detected\n"); - break; - default: - ast->chip = AST2400; - drm_info(dev, "AST 2400 detected\n"); - } - } else if (pdev->revision >= 0x20) { - switch (scu_rev & 0x300) { - case 0x0000: - ast->chip = AST1300; - drm_info(dev, "AST 1300 detected\n"); - break; - default: - ast->chip = AST2300; - drm_info(dev, "AST 2300 detected\n"); - break; - } - } else if (pdev->revision >= 0x10) { - switch (scu_rev & 0x0300) { - case 0x0200: - ast->chip = AST1100; - drm_info(dev, "AST 1100 detected\n"); - break; - case 0x0100: - ast->chip = AST2200; - drm_info(dev, "AST 2200 detected\n"); - break; - case 0x0000: - ast->chip = AST2150; - drm_info(dev, "AST 2150 detected\n"); - break; - default: - ast->chip = AST2100; - drm_info(dev, "AST 2100 detected\n"); - break; - } - } else { - ast->chip = AST2000; - drm_info(dev, "AST 2000 detected\n"); - } - - return 0; -} - static void ast_detect_widescreen(struct ast_device *ast) { u8 jreg; @@ -424,69 +250,27 @@ static int ast_get_dram_info(struct drm_device *dev) return 0; } -struct ast_device *ast_device_create(const struct drm_driver *drv, - struct pci_dev *pdev, - unsigned long flags) +struct drm_device *ast_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) { struct drm_device *dev; struct ast_device *ast; - bool need_post = false; - int ret = 0; + int ret; ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); if (IS_ERR(ast)) - return ast; + return ERR_CAST(ast); dev = &ast->base; - pci_set_drvdata(pdev, dev); - - ret = drmm_mutex_init(dev, &ast->ioregs_lock); - if (ret) - return ERR_PTR(ret); - - ast->regs = pcim_iomap(pdev, 1, 0); - if (!ast->regs) - return ERR_PTR(-EIO); - - /* - * After AST2500, MMIO is enabled by default, and it should be adopted - * to be compatible with Arm. - */ - if (pdev->revision >= 0x40) { - ast->ioregs = ast->regs + AST_IO_MM_OFFSET; - } else if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) { - drm_info(dev, "platform has no IO space, trying MMIO\n"); - ast->ioregs = ast->regs + AST_IO_MM_OFFSET; - } - - /* "map" IO regs if the above hasn't done so already */ - if (!ast->ioregs) { - ast->ioregs = pcim_iomap(pdev, 2, 0); - if (!ast->ioregs) - return ERR_PTR(-EIO); - } - - if (!ast_is_vga_enabled(dev)) { - drm_info(dev, "VGA not enabled on entry, requesting chip POST\n"); - need_post = true; - } - - /* - * If VGA isn't enabled, we need to enable now or subsequent - * access to the scratch registers will fail. - */ - if (need_post) - ast_enable_vga(dev); - - /* Enable extended register access */ - ast_open_key(ast); - ret = ast_enable_mmio(ast); - if (ret) - return ERR_PTR(ret); - - ret = ast_device_config_init(ast); - if (ret) - return ERR_PTR(ret); + ast->chip = chip; + ast->config_mode = config_mode; + ast->regs = regs; + ast->ioregs = ioregs; ast_detect_widescreen(ast); ast_detect_tx_chip(ast, need_post); @@ -517,5 +301,5 @@ struct ast_device *ast_device_create(const struct drm_driver *drv, if (ret) return ERR_PTR(ret); - return ast; + return dev; } diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index c20534d0ef7c..a718646a66b8 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -1358,13 +1358,13 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector) * Protect access to I/O registers from concurrent modesetting * by acquiring the I/O-register lock. */ - mutex_lock(&ast->ioregs_lock); + mutex_lock(&ast->modeset_lock); edid = drm_get_edid(connector, &ast_vga_connector->i2c->adapter); if (!edid) goto err_mutex_unlock; - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); count = drm_add_edid_modes(connector, edid); kfree(edid); @@ -1372,7 +1372,7 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector) return count; err_mutex_unlock: - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); err_drm_connector_update_edid_property: drm_connector_update_edid_property(connector, NULL); return 0; @@ -1464,13 +1464,13 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector * Protect access to I/O registers from concurrent modesetting * by acquiring the I/O-register lock. */ - mutex_lock(&ast->ioregs_lock); + mutex_lock(&ast->modeset_lock); edid = drm_get_edid(connector, &ast_sil164_connector->i2c->adapter); if (!edid) goto err_mutex_unlock; - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); count = drm_add_edid_modes(connector, edid); kfree(edid); @@ -1478,7 +1478,7 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector return count; err_mutex_unlock: - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); err_drm_connector_update_edid_property: drm_connector_update_edid_property(connector, NULL); return 0; @@ -1670,13 +1670,13 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector) * Protect access to I/O registers from concurrent modesetting * by acquiring the I/O-register lock. */ - mutex_lock(&ast->ioregs_lock); + mutex_lock(&ast->modeset_lock); succ = ast_astdp_read_edid(connector->dev, edid); if (succ < 0) goto err_mutex_unlock; - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); drm_connector_update_edid_property(connector, edid); count = drm_add_edid_modes(connector, edid); @@ -1685,7 +1685,7 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector) return count; err_mutex_unlock: - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); kfree(edid); err_drm_connector_update_edid_property: drm_connector_update_edid_property(connector, NULL); @@ -1870,9 +1870,9 @@ static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *s * display modes. Protect access to I/O registers by acquiring * the I/O-register lock. Released in atomic_flush(). */ - mutex_lock(&ast->ioregs_lock); + mutex_lock(&ast->modeset_lock); drm_atomic_helper_commit_tail_rpm(state); - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); } static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs = { @@ -1910,6 +1910,10 @@ int ast_mode_config_init(struct ast_device *ast) struct drm_connector *physical_connector = NULL; int ret; + ret = drmm_mutex_init(dev, &ast->modeset_lock); + if (ret) + return ret; + ret = drmm_mode_config_init(dev); if (ret) return ret; diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 7a993a384314..22f548805dfb 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -77,28 +77,42 @@ ast_set_def_ext_reg(struct drm_device *dev) ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg); } -u32 ast_mindwm(struct ast_device *ast, u32 r) +static u32 __ast_mindwm(void __iomem *regs, u32 r) { - uint32_t data; + u32 data; - ast_write32(ast, 0xf004, r & 0xffff0000); - ast_write32(ast, 0xf000, 0x1); + __ast_write32(regs, 0xf004, r & 0xffff0000); + __ast_write32(regs, 0xf000, 0x1); do { - data = ast_read32(ast, 0xf004) & 0xffff0000; + data = __ast_read32(regs, 0xf004) & 0xffff0000; } while (data != (r & 0xffff0000)); - return ast_read32(ast, 0x10000 + (r & 0x0000ffff)); + + return __ast_read32(regs, 0x10000 + (r & 0x0000ffff)); } -void ast_moutdwm(struct ast_device *ast, u32 r, u32 v) +static void __ast_moutdwm(void __iomem *regs, u32 r, u32 v) { - uint32_t data; - ast_write32(ast, 0xf004, r & 0xffff0000); - ast_write32(ast, 0xf000, 0x1); + u32 data; + + __ast_write32(regs, 0xf004, r & 0xffff0000); + __ast_write32(regs, 0xf000, 0x1); + do { - data = ast_read32(ast, 0xf004) & 0xffff0000; + data = __ast_read32(regs, 0xf004) & 0xffff0000; } while (data != (r & 0xffff0000)); - ast_write32(ast, 0x10000 + (r & 0x0000ffff), v); + + __ast_write32(regs, 0x10000 + (r & 0x0000ffff), v); +} + +u32 ast_mindwm(struct ast_device *ast, u32 r) +{ + return __ast_mindwm(ast->regs, r); +} + +void ast_moutdwm(struct ast_device *ast, u32 r, u32 v) +{ + __ast_moutdwm(ast->regs, r, v); } /* @@ -1987,17 +2001,18 @@ static bool ast_dram_init_2500(struct ast_device *ast) return true; } -void ast_patch_ahb_2500(struct ast_device *ast) +void ast_patch_ahb_2500(void __iomem *regs) { - u32 data; + u32 data; /* Clear bus lock condition */ - ast_moutdwm(ast, 0x1e600000, 0xAEED1A03); - ast_moutdwm(ast, 0x1e600084, 0x00010000); - ast_moutdwm(ast, 0x1e600088, 0x00000000); - ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8); - data = ast_mindwm(ast, 0x1e6e2070); - if (data & 0x08000000) { /* check fast reset */ + __ast_moutdwm(regs, 0x1e600000, 0xAEED1A03); + __ast_moutdwm(regs, 0x1e600084, 0x00010000); + __ast_moutdwm(regs, 0x1e600088, 0x00000000); + __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8); + + data = __ast_mindwm(regs, 0x1e6e2070); + if (data & 0x08000000) { /* check fast reset */ /* * If "Fast restet" is enabled for ARM-ICE debugger, * then WDT needs to enable, that @@ -2009,16 +2024,18 @@ void ast_patch_ahb_2500(struct ast_device *ast) * [1]:= 1:WDT will be cleeared and disabled after timeout occurs * [0]:= 1:WDT enable */ - ast_moutdwm(ast, 0x1E785004, 0x00000010); - ast_moutdwm(ast, 0x1E785008, 0x00004755); - ast_moutdwm(ast, 0x1E78500c, 0x00000033); + __ast_moutdwm(regs, 0x1E785004, 0x00000010); + __ast_moutdwm(regs, 0x1E785008, 0x00004755); + __ast_moutdwm(regs, 0x1E78500c, 0x00000033); udelay(1000); } + do { - ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8); - data = ast_mindwm(ast, 0x1e6e2000); - } while (data != 1); - ast_moutdwm(ast, 0x1e6e207c, 0x08000000); /* clear fast reset */ + __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8); + data = __ast_mindwm(regs, 0x1e6e2000); + } while (data != 1); + + __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */ } void ast_post_chip_2500(struct drm_device *dev) @@ -2030,7 +2047,7 @@ void ast_post_chip_2500(struct drm_device *dev) reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); if ((reg & AST_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */ /* Clear bus lock condition */ - ast_patch_ahb_2500(ast); + ast_patch_ahb_2500(ast->regs); /* Disable watchdog */ ast_moutdwm(ast, 0x1E78502C, 0x00000000); diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h index 555286ecf520..62dddbf3fe56 100644 --- a/drivers/gpu/drm/ast/ast_reg.h +++ b/drivers/gpu/drm/ast/ast_reg.h @@ -10,10 +10,17 @@ */ #define AST_IO_MM_OFFSET (0x380) +#define AST_IO_MM_LENGTH (128) #define AST_IO_VGAARI_W (0x40) + #define AST_IO_VGAMR_W (0x42) +#define AST_IO_VGAMR_R (0x4c) +#define AST_IO_VGAMR_IOSEL BIT(0) + #define AST_IO_VGAER (0x43) +#define AST_IO_VGAER_VGA_ENABLE BIT(0) + #define AST_IO_VGASRI (0x44) #define AST_IO_VGADRR (0x47) #define AST_IO_VGADWR (0x48) @@ -21,14 +28,15 @@ #define AST_IO_VGAGRI (0x4E) #define AST_IO_VGACRI (0x54) +#define AST_IO_VGACR80_PASSWORD (0xa8) +#define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1) +#define AST_IO_VGACRA1_MMIO_ENABLED BIT(2) #define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */ #define AST_IO_VGACRCB_HWC_ENABLED BIT(1) #define AST_IO_VGAIR1_R (0x5A) #define AST_IO_VGAIR1_VREFRESH BIT(3) -#define AST_IO_VGAMR_R (0x4C) - /* * Display Transmitter Type */ diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index ba82a1142adf..19d2dc05c397 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -12,6 +12,23 @@ config DRM_PANEL_BRIDGE help DRM bridge wrapper of DRM panels +config DRM_AUX_BRIDGE + tristate + depends on DRM_BRIDGE && OF + select AUXILIARY_BUS + select DRM_PANEL_BRIDGE + help + Simple transparent bridge that is used by several non-DRM drivers to + build bridges chain. + +config DRM_AUX_HPD_BRIDGE + tristate + depends on DRM_BRIDGE && OF + select AUXILIARY_BUS + help + Simple bridge that terminates the bridge chain and provides HPD + support. + menu "Display Interface Bridges" depends on DRM && DRM_BRIDGE diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 2b892b7ed59e..017b5832733b 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -1,4 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_DRM_AUX_BRIDGE) += aux-bridge.o +obj-$(CONFIG_DRM_AUX_HPD_BRIDGE) += aux-hpd-bridge.o obj-$(CONFIG_DRM_CHIPONE_ICN6211) += chipone-icn6211.o obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 8f740154707d..ef31033439bc 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -1298,10 +1298,32 @@ static void anx7625_config(struct anx7625_data *ctx) XTAL_FRQ_SEL, XTAL_FRQ_27M); } +static int anx7625_hpd_timer_config(struct anx7625_data *ctx) +{ + int ret; + + /* Set irq detect window to 2ms */ + ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, + HPD_DET_TIMER_BIT0_7, HPD_TIME & 0xFF); + ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, + HPD_DET_TIMER_BIT8_15, + (HPD_TIME >> 8) & 0xFF); + ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, + HPD_DET_TIMER_BIT16_23, + (HPD_TIME >> 16) & 0xFF); + + return ret; +} + +static int anx7625_read_hpd_gpio_config_status(struct anx7625_data *ctx) +{ + return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, GPIO_CTRL_2); +} + static void anx7625_disable_pd_protocol(struct anx7625_data *ctx) { struct device *dev = ctx->dev; - int ret; + int ret, val; /* Reset main ocm */ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x40); @@ -1315,6 +1337,19 @@ static void anx7625_disable_pd_protocol(struct anx7625_data *ctx) DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature fail.\n"); else DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature succeeded.\n"); + + /* + * Make sure the HPD GPIO already be configured after OCM release before + * setting HPD detect window register. Here we poll the status register + * at maximum 40ms, then config HPD irq detect window register + */ + readx_poll_timeout(anx7625_read_hpd_gpio_config_status, + ctx, val, + ((val & HPD_SOURCE) || (val < 0)), + 2000, 2000 * 20); + + /* Set HPD irq detect window to 2ms */ + anx7625_hpd_timer_config(ctx); } static int anx7625_ocm_loading_check(struct anx7625_data *ctx) @@ -1437,20 +1472,6 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx) static int anx7625_read_hpd_status_p0(struct anx7625_data *ctx) { - int ret; - - /* Set irq detect window to 2ms */ - ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, - HPD_DET_TIMER_BIT0_7, HPD_TIME & 0xFF); - ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, - HPD_DET_TIMER_BIT8_15, - (HPD_TIME >> 8) & 0xFF); - ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, - HPD_DET_TIMER_BIT16_23, - (HPD_TIME >> 16) & 0xFF); - if (ret < 0) - return ret; - return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS); } @@ -1464,9 +1485,6 @@ static int _anx7625_hpd_polling(struct anx7625_data *ctx, if (ctx->pdata.intp_irq) return 0; - /* Delay 200ms for FW HPD de-bounce */ - msleep(200); - ret = readx_poll_timeout(anx7625_read_hpd_status_p0, ctx, val, ((val & HPD_STATUS) || (val < 0)), diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h index 5af819611ebc..66ebee7f3d83 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.h +++ b/drivers/gpu/drm/bridge/analogix/anx7625.h @@ -259,6 +259,10 @@ #define AP_MIPI_RX_EN BIT(5) /* 1: MIPI RX input in 0: no RX in */ #define AP_DISABLE_PD BIT(6) #define AP_DISABLE_DISPLAY BIT(7) + +#define GPIO_CTRL_2 0x49 +#define HPD_SOURCE BIT(6) + /***************************************************************/ /* Register definition of device address 0x84 */ #define MIPI_PHY_CONTROL_3 0x03 diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c new file mode 100644 index 000000000000..49d7c2ab1ecc --- /dev/null +++ b/drivers/gpu/drm/bridge/aux-bridge.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2023 Linaro Ltd. + * + * Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org> + */ +#include <linux/auxiliary_bus.h> +#include <linux/module.h> + +#include <drm/drm_bridge.h> +#include <drm/bridge/aux-bridge.h> + +static DEFINE_IDA(drm_aux_bridge_ida); + +static void drm_aux_bridge_release(struct device *dev) +{ + struct auxiliary_device *adev = to_auxiliary_dev(dev); + + ida_free(&drm_aux_bridge_ida, adev->id); + + kfree(adev); +} + +static void drm_aux_bridge_unregister_adev(void *_adev) +{ + struct auxiliary_device *adev = _adev; + + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); +} + +/** + * drm_aux_bridge_register - Create a simple bridge device to link the chain + * @parent: device instance providing this bridge + * + * Creates a simple DRM bridge that doesn't implement any drm_bridge + * operations. Such bridges merely fill a place in the bridge chain linking + * surrounding DRM bridges. + * + * Return: zero on success, negative error code on failure + */ +int drm_aux_bridge_register(struct device *parent) +{ + struct auxiliary_device *adev; + int ret; + + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) + return -ENOMEM; + + ret = ida_alloc(&drm_aux_bridge_ida, GFP_KERNEL); + if (ret < 0) { + kfree(adev); + return ret; + } + + adev->id = ret; + adev->name = "aux_bridge"; + adev->dev.parent = parent; + adev->dev.of_node = parent->of_node; + adev->dev.release = drm_aux_bridge_release; + + ret = auxiliary_device_init(adev); + if (ret) { + ida_free(&drm_aux_bridge_ida, adev->id); + kfree(adev); + return ret; + } + + ret = auxiliary_device_add(adev); + if (ret) { + auxiliary_device_uninit(adev); + return ret; + } + + return devm_add_action_or_reset(parent, drm_aux_bridge_unregister_adev, adev); +} +EXPORT_SYMBOL_GPL(drm_aux_bridge_register); + +struct drm_aux_bridge_data { + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + struct device *dev; +}; + +static int drm_aux_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct drm_aux_bridge_data *data; + + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) + return -EINVAL; + + data = container_of(bridge, struct drm_aux_bridge_data, bridge); + + return drm_bridge_attach(bridge->encoder, data->next_bridge, bridge, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); +} + +static const struct drm_bridge_funcs drm_aux_bridge_funcs = { + .attach = drm_aux_bridge_attach, +}; + +static int drm_aux_bridge_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *id) +{ + struct drm_aux_bridge_data *data; + + data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dev = &auxdev->dev; + data->next_bridge = devm_drm_of_get_bridge(&auxdev->dev, auxdev->dev.of_node, 0, 0); + if (IS_ERR(data->next_bridge)) + return dev_err_probe(&auxdev->dev, PTR_ERR(data->next_bridge), + "failed to acquire drm_bridge\n"); + + data->bridge.funcs = &drm_aux_bridge_funcs; + data->bridge.of_node = data->dev->of_node; + + return devm_drm_bridge_add(data->dev, &data->bridge); +} + +static const struct auxiliary_device_id drm_aux_bridge_table[] = { + { .name = KBUILD_MODNAME ".aux_bridge" }, + {}, +}; +MODULE_DEVICE_TABLE(auxiliary, drm_aux_bridge_table); + +static struct auxiliary_driver drm_aux_bridge_drv = { + .name = "aux_bridge", + .id_table = drm_aux_bridge_table, + .probe = drm_aux_bridge_probe, +}; +module_auxiliary_driver(drm_aux_bridge_drv); + +MODULE_AUTHOR("Dmitry Baryshkov <dmitry.baryshkov@linaro.org>"); +MODULE_DESCRIPTION("DRM transparent bridge"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c new file mode 100644 index 000000000000..5d2ab3a715f9 --- /dev/null +++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2023 Linaro Ltd. + * + * Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org> + */ +#include <linux/auxiliary_bus.h> +#include <linux/module.h> +#include <linux/of_device.h> + +#include <drm/drm_bridge.h> +#include <drm/bridge/aux-bridge.h> + +static DEFINE_IDA(drm_aux_hpd_bridge_ida); + +struct drm_aux_hpd_bridge_data { + struct drm_bridge bridge; + struct device *dev; +}; + +static void drm_aux_hpd_bridge_release(struct device *dev) +{ + struct auxiliary_device *adev = to_auxiliary_dev(dev); + + ida_free(&drm_aux_hpd_bridge_ida, adev->id); + + of_node_put(adev->dev.platform_data); + + kfree(adev); +} + +static void drm_aux_hpd_bridge_unregister_adev(void *_adev) +{ + struct auxiliary_device *adev = _adev; + + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); +} + +/** + * drm_dp_hpd_bridge_register - Create a simple HPD DisplayPort bridge + * @parent: device instance providing this bridge + * @np: device node pointer corresponding to this bridge instance + * + * Creates a simple DRM bridge with the type set to + * DRM_MODE_CONNECTOR_DisplayPort, which terminates the bridge chain and is + * able to send the HPD events. + * + * Return: device instance that will handle created bridge or an error code + * encoded into the pointer. + */ +struct device *drm_dp_hpd_bridge_register(struct device *parent, + struct device_node *np) +{ + struct auxiliary_device *adev; + int ret; + + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) + return ERR_PTR(-ENOMEM); + + ret = ida_alloc(&drm_aux_hpd_bridge_ida, GFP_KERNEL); + if (ret < 0) { + kfree(adev); + return ERR_PTR(ret); + } + + adev->id = ret; + adev->name = "dp_hpd_bridge"; + adev->dev.parent = parent; + adev->dev.of_node = parent->of_node; + adev->dev.release = drm_aux_hpd_bridge_release; + adev->dev.platform_data = np; + + ret = auxiliary_device_init(adev); + if (ret) { + ida_free(&drm_aux_hpd_bridge_ida, adev->id); + kfree(adev); + return ERR_PTR(ret); + } + + ret = auxiliary_device_add(adev); + if (ret) { + auxiliary_device_uninit(adev); + return ERR_PTR(ret); + } + + ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_unregister_adev, adev); + if (ret) + return ERR_PTR(ret); + + return &adev->dev; +} +EXPORT_SYMBOL_GPL(drm_dp_hpd_bridge_register); + +/** + * drm_aux_hpd_bridge_notify - notify hot plug detection events + * @dev: device created for the HPD bridge + * @status: output connection status + * + * A wrapper around drm_bridge_hpd_notify() that is used to report hot plug + * detection events for bridges created via drm_dp_hpd_bridge_register(). + * + * This function shall be called in a context that can sleep. + */ +void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status) +{ + struct auxiliary_device *adev = to_auxiliary_dev(dev); + struct drm_aux_hpd_bridge_data *data = auxiliary_get_drvdata(adev); + + if (!data) + return; + + drm_bridge_hpd_notify(&data->bridge, status); +} +EXPORT_SYMBOL_GPL(drm_aux_hpd_bridge_notify); + +static int drm_aux_hpd_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL; +} + +static const struct drm_bridge_funcs drm_aux_hpd_bridge_funcs = { + .attach = drm_aux_hpd_bridge_attach, +}; + +static int drm_aux_hpd_bridge_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *id) +{ + struct drm_aux_hpd_bridge_data *data; + + data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dev = &auxdev->dev; + data->bridge.funcs = &drm_aux_hpd_bridge_funcs; + data->bridge.of_node = dev_get_platdata(data->dev); + data->bridge.ops = DRM_BRIDGE_OP_HPD; + data->bridge.type = id->driver_data; + + auxiliary_set_drvdata(auxdev, data); + + return devm_drm_bridge_add(data->dev, &data->bridge); +} + +static const struct auxiliary_device_id drm_aux_hpd_bridge_table[] = { + { .name = KBUILD_MODNAME ".dp_hpd_bridge", .driver_data = DRM_MODE_CONNECTOR_DisplayPort, }, + {}, +}; +MODULE_DEVICE_TABLE(auxiliary, drm_aux_hpd_bridge_table); + +static struct auxiliary_driver drm_aux_hpd_bridge_drv = { + .name = "aux_hpd_bridge", + .id_table = drm_aux_hpd_bridge_table, + .probe = drm_aux_hpd_bridge_probe, +}; +module_auxiliary_driver(drm_aux_hpd_bridge_drv); + +MODULE_AUTHOR("Dmitry Baryshkov <dmitry.baryshkov@linaro.org>"); +MODULE_DESCRIPTION("DRM HPD bridge"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c index 946212a95598..5e3b8edcf794 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c @@ -403,7 +403,8 @@ static int _cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp) static int _cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type) { - int ret, tries = 3; + int ret = -EINVAL; + int tries = 3; u32 i; for (i = 0; i < tries; i++) { diff --git a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c index 3ff30ce80c5b..2347f8dd632f 100644 --- a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c @@ -226,8 +226,8 @@ dphy_pll_get_configure_from_opts(struct imx93_dsi *dsi, unsigned long fout; unsigned long best_fout = 0; unsigned int fvco_div; - unsigned int min_n, max_n, n, best_n; - unsigned long m, best_m; + unsigned int min_n, max_n, n, best_n = UINT_MAX; + unsigned long m, best_m = 0; unsigned long min_delta = ULONG_MAX; unsigned long delta; u64 tmp; diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c index 03532efb893b..273157428c82 100644 --- a/drivers/gpu/drm/bridge/lontium-lt8912b.c +++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c @@ -43,6 +43,8 @@ struct lt8912 { struct videomode mode; + struct regulator_bulk_data supplies[7]; + u8 data_lanes; bool is_power_on; }; @@ -257,6 +259,12 @@ static int lt8912_free_i2c(struct lt8912 *lt) static int lt8912_hard_power_on(struct lt8912 *lt) { + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(lt->supplies), lt->supplies); + if (ret) + return ret; + gpiod_set_value_cansleep(lt->gp_reset, 0); msleep(20); @@ -267,6 +275,9 @@ static void lt8912_hard_power_off(struct lt8912 *lt) { gpiod_set_value_cansleep(lt->gp_reset, 1); msleep(20); + + regulator_bulk_disable(ARRAY_SIZE(lt->supplies), lt->supplies); + lt->is_power_on = false; } @@ -634,6 +645,48 @@ static const struct drm_bridge_funcs lt8912_bridge_funcs = { .get_edid = lt8912_bridge_get_edid, }; +static int lt8912_bridge_resume(struct device *dev) +{ + struct lt8912 *lt = dev_get_drvdata(dev); + int ret; + + ret = lt8912_hard_power_on(lt); + if (ret) + return ret; + + ret = lt8912_soft_power_on(lt); + if (ret) + return ret; + + return lt8912_video_on(lt); +} + +static int lt8912_bridge_suspend(struct device *dev) +{ + struct lt8912 *lt = dev_get_drvdata(dev); + + lt8912_hard_power_off(lt); + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(lt8912_bridge_pm_ops, lt8912_bridge_suspend, lt8912_bridge_resume); + +static int lt8912_get_regulators(struct lt8912 *lt) +{ + unsigned int i; + const char * const supply_names[] = { + "vdd", "vccmipirx", "vccsysclk", "vcclvdstx", + "vcchdmitx", "vcclvdspll", "vcchdmipll" + }; + + for (i = 0; i < ARRAY_SIZE(lt->supplies); i++) + lt->supplies[i].supply = supply_names[i]; + + return devm_regulator_bulk_get(lt->dev, ARRAY_SIZE(lt->supplies), + lt->supplies); +} + static int lt8912_parse_dt(struct lt8912 *lt) { struct gpio_desc *gp_reset; @@ -685,6 +738,10 @@ static int lt8912_parse_dt(struct lt8912 *lt) goto err_free_host_node; } + ret = lt8912_get_regulators(lt); + if (ret) + goto err_free_host_node; + of_node_put(port_node); return 0; @@ -770,6 +827,7 @@ static struct i2c_driver lt8912_i2c_driver = { .driver = { .name = "lt8912", .of_match_table = lt8912_dt_match, + .pm = pm_sleep_ptr(<8912_bridge_pm_ops), }, .probe = lt8912_probe, .remove = lt8912_remove, diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c index d81920227a8a..7c0076e49953 100644 --- a/drivers/gpu/drm/bridge/nxp-ptn3460.c +++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c @@ -54,13 +54,13 @@ static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr, int ret; ret = i2c_master_send(ptn_bridge->client, &addr, 1); - if (ret <= 0) { + if (ret < 0) { DRM_ERROR("Failed to send i2c command, ret=%d\n", ret); return ret; } ret = i2c_master_recv(ptn_bridge->client, buf, len); - if (ret <= 0) { + if (ret < 0) { DRM_ERROR("Failed to recv i2c data, ret=%d\n", ret); return ret; } @@ -78,7 +78,7 @@ static int ptn3460_write_byte(struct ptn3460_bridge *ptn_bridge, char addr, buf[1] = val; ret = i2c_master_send(ptn_bridge->client, buf, ARRAY_SIZE(buf)); - if (ret <= 0) { + if (ret < 0) { DRM_ERROR("Failed to send i2c command, ret=%d\n", ret); return ret; } diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index ef2e373606ba..615cc8f950d7 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -2273,7 +2273,7 @@ static int tc_probe(struct i2c_client *client) } else { if (tc->hpd_pin < 0 || tc->hpd_pin > 1) { dev_err(dev, "failed to parse HPD number\n"); - return ret; + return -EINVAL; } } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index c45c07840f64..5b8e1dfc458d 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -1413,11 +1413,9 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, int ret; if (!pdata->pwm_enabled) { - ret = pm_runtime_get_sync(pdata->dev); - if (ret < 0) { - pm_runtime_put_sync(pdata->dev); + ret = pm_runtime_resume_and_get(pdata->dev); + if (ret < 0) return ret; - } } if (state->enabled) { diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c deleted file mode 100644 index a4ad6fd13abc..000000000000 --- a/drivers/gpu/drm/drm_agpsupport.c +++ /dev/null @@ -1,451 +0,0 @@ -/* - * \file drm_agpsupport.c - * DRM support for AGP/GART backend - * - * \author Rickard E. (Rik) Faith <faith@valinux.com> - * \author Gareth Hughes <gareth@valinux.com> - */ - -/* - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/slab.h> - -#if IS_ENABLED(CONFIG_AGP) -#include <asm/agp.h> -#endif - -#include <drm/drm_device.h> -#include <drm/drm_drv.h> -#include <drm/drm_file.h> -#include <drm/drm_print.h> - -#include "drm_legacy.h" - -#if IS_ENABLED(CONFIG_AGP) - -/* - * Get AGP information. - * - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device has been initialized and acquired and fills in the - * drm_agp_info structure with the information in drm_agp_head::agp_info. - */ -int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info) -{ - struct agp_kern_info *kern; - - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - - kern = &dev->agp->agp_info; - info->agp_version_major = kern->version.major; - info->agp_version_minor = kern->version.minor; - info->mode = kern->mode; - info->aperture_base = kern->aper_base; - info->aperture_size = kern->aper_size * 1024 * 1024; - info->memory_allowed = kern->max_memory << PAGE_SHIFT; - info->memory_used = kern->current_memory << PAGE_SHIFT; - info->id_vendor = kern->device->vendor; - info->id_device = kern->device->device; - - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_info); - -int drm_legacy_agp_info_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_agp_info *info = data; - int err; - - err = drm_legacy_agp_info(dev, info); - if (err) - return err; - - return 0; -} - -/* - * Acquire the AGP device. - * - * \param dev DRM device that is to acquire AGP. - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device hasn't been acquired before and calls - * \c agp_backend_acquire. - */ -int drm_legacy_agp_acquire(struct drm_device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev->dev); - - if (!dev->agp) - return -ENODEV; - if (dev->agp->acquired) - return -EBUSY; - dev->agp->bridge = agp_backend_acquire(pdev); - if (!dev->agp->bridge) - return -ENODEV; - dev->agp->acquired = 1; - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_acquire); - -/* - * Acquire the AGP device (ioctl). - * - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device hasn't been acquired before and calls - * \c agp_backend_acquire. - */ -int drm_legacy_agp_acquire_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - return drm_legacy_agp_acquire((struct drm_device *)file_priv->minor->dev); -} - -/* - * Release the AGP device. - * - * \param dev DRM device that is to release AGP. - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device has been acquired and calls \c agp_backend_release. - */ -int drm_legacy_agp_release(struct drm_device *dev) -{ - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - agp_backend_release(dev->agp->bridge); - dev->agp->acquired = 0; - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_release); - -int drm_legacy_agp_release_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - return drm_legacy_agp_release(dev); -} - -/* - * Enable the AGP bus. - * - * \param dev DRM device that has previously acquired AGP. - * \param mode Requested AGP mode. - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device has been acquired but not enabled, and calls - * \c agp_enable. - */ -int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode) -{ - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - - dev->agp->mode = mode.mode; - agp_enable(dev->agp->bridge, mode.mode); - dev->agp->enabled = 1; - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_enable); - -int drm_legacy_agp_enable_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_agp_mode *mode = data; - - return drm_legacy_agp_enable(dev, *mode); -} - -/* - * Allocate AGP memory. - * - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device is present and has been acquired, allocates the - * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it. - */ -int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) -{ - struct drm_agp_mem *entry; - struct agp_memory *memory; - unsigned long pages; - u32 type; - - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - pages = DIV_ROUND_UP(request->size, PAGE_SIZE); - type = (u32) request->type; - memory = agp_allocate_memory(dev->agp->bridge, pages, type); - if (!memory) { - kfree(entry); - return -ENOMEM; - } - - entry->handle = (unsigned long)memory->key + 1; - entry->memory = memory; - entry->bound = 0; - entry->pages = pages; - list_add(&entry->head, &dev->agp->memory); - - request->handle = entry->handle; - request->physical = memory->physical; - - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_alloc); - - -int drm_legacy_agp_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_agp_buffer *request = data; - - return drm_legacy_agp_alloc(dev, request); -} - -/* - * Search for the AGP memory entry associated with a handle. - * - * \param dev DRM device structure. - * \param handle AGP memory handle. - * \return pointer to the drm_agp_mem structure associated with \p handle. - * - * Walks through drm_agp_head::memory until finding a matching handle. - */ -static struct drm_agp_mem *drm_legacy_agp_lookup_entry(struct drm_device *dev, - unsigned long handle) -{ - struct drm_agp_mem *entry; - - list_for_each_entry(entry, &dev->agp->memory, head) { - if (entry->handle == handle) - return entry; - } - return NULL; -} - -/* - * Unbind AGP memory from the GATT (ioctl). - * - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device is present and acquired, looks-up the AGP memory - * entry and passes it to the unbind_agp() function. - */ -int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request) -{ - struct drm_agp_mem *entry; - int ret; - - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - entry = drm_legacy_agp_lookup_entry(dev, request->handle); - if (!entry || !entry->bound) - return -EINVAL; - ret = agp_unbind_memory(entry->memory); - if (ret == 0) - entry->bound = 0; - return ret; -} -EXPORT_SYMBOL(drm_legacy_agp_unbind); - - -int drm_legacy_agp_unbind_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_agp_binding *request = data; - - return drm_legacy_agp_unbind(dev, request); -} - -/* - * Bind AGP memory into the GATT (ioctl) - * - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device is present and has been acquired and that no memory - * is currently bound into the GATT. Looks-up the AGP memory entry and passes - * it to bind_agp() function. - */ -int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request) -{ - struct drm_agp_mem *entry; - int retcode; - int page; - - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - entry = drm_legacy_agp_lookup_entry(dev, request->handle); - if (!entry || entry->bound) - return -EINVAL; - page = DIV_ROUND_UP(request->offset, PAGE_SIZE); - retcode = agp_bind_memory(entry->memory, page); - if (retcode) - return retcode; - entry->bound = dev->agp->base + (page << PAGE_SHIFT); - DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n", - dev->agp->base, entry->bound); - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_bind); - - -int drm_legacy_agp_bind_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_agp_binding *request = data; - - return drm_legacy_agp_bind(dev, request); -} - -/* - * Free AGP memory (ioctl). - * - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device is present and has been acquired and looks up the - * AGP memory entry. If the memory is currently bound, unbind it via - * unbind_agp(). Frees it via free_agp() as well as the entry itself - * and unlinks from the doubly linked list it's inserted in. - */ -int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request) -{ - struct drm_agp_mem *entry; - - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - entry = drm_legacy_agp_lookup_entry(dev, request->handle); - if (!entry) - return -EINVAL; - if (entry->bound) - agp_unbind_memory(entry->memory); - - list_del(&entry->head); - - agp_free_memory(entry->memory); - kfree(entry); - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_free); - - -int drm_legacy_agp_free_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_agp_buffer *request = data; - - return drm_legacy_agp_free(dev, request); -} - -/* - * Initialize the AGP resources. - * - * \return pointer to a drm_agp_head structure. - * - * Gets the drm_agp_t structure which is made available by the agpgart module - * via the inter_module_* functions. Creates and initializes a drm_agp_head - * structure. - * - * Note that final cleanup of the kmalloced structure is directly done in - * drm_pci_agp_destroy. - */ -struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev->dev); - struct drm_agp_head *head = NULL; - - head = kzalloc(sizeof(*head), GFP_KERNEL); - if (!head) - return NULL; - head->bridge = agp_find_bridge(pdev); - if (!head->bridge) { - head->bridge = agp_backend_acquire(pdev); - if (!head->bridge) { - kfree(head); - return NULL; - } - agp_copy_info(head->bridge, &head->agp_info); - agp_backend_release(head->bridge); - } else { - agp_copy_info(head->bridge, &head->agp_info); - } - if (head->agp_info.chipset == NOT_SUPPORTED) { - kfree(head); - return NULL; - } - INIT_LIST_HEAD(&head->memory); - head->cant_use_aperture = head->agp_info.cant_use_aperture; - head->page_mask = head->agp_info.page_mask; - head->base = head->agp_info.aper_base; - return head; -} -/* Only exported for i810.ko */ -EXPORT_SYMBOL(drm_legacy_agp_init); - -/** - * drm_legacy_agp_clear - Clear AGP resource list - * @dev: DRM device - * - * Iterate over all AGP resources and remove them. But keep the AGP head - * intact so it can still be used. It is safe to call this if AGP is disabled or - * was already removed. - * - * Cleanup is only done for drivers who have DRIVER_LEGACY set. - */ -void drm_legacy_agp_clear(struct drm_device *dev) -{ - struct drm_agp_mem *entry, *tempe; - - if (!dev->agp) - return; - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { - if (entry->bound) - agp_unbind_memory(entry->memory); - agp_free_memory(entry->memory); - kfree(entry); - } - INIT_LIST_HEAD(&dev->agp->memory); - - if (dev->agp->acquired) - drm_legacy_agp_release(dev); - - dev->agp->acquired = 0; - dev->agp->enabled = 0; -} - -#endif diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index f1a503aafe5a..c31fc0b48c31 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1773,6 +1773,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, struct drm_crtc *crtc; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; + struct drm_private_obj *obj; if (!drm_drv_uses_atomic_modeset(dev)) return; @@ -1801,6 +1802,14 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, if (take_locks) drm_modeset_unlock(&dev->mode_config.connection_mutex); drm_connector_list_iter_end(&conn_iter); + + list_for_each_entry(obj, &config->privobj_list, head) { + if (take_locks) + drm_modeset_lock(&obj->lock, NULL); + drm_atomic_private_obj_print_state(p, obj->state); + if (take_locks) + drm_modeset_unlock(&obj->lock); + } } /** diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index 784e63d70a42..54975de44a0e 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c @@ -275,6 +275,20 @@ void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state, plane_state->normalized_zpos = val; } } + + if (plane->hotspot_x_property) { + if (!drm_object_property_get_default_value(&plane->base, + plane->hotspot_x_property, + &val)) + plane_state->hotspot_x = val; + } + + if (plane->hotspot_y_property) { + if (!drm_object_property_get_default_value(&plane->base, + plane->hotspot_y_property, + &val)) + plane_state->hotspot_y = val; + } } EXPORT_SYMBOL(__drm_atomic_helper_plane_state_reset); diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 98d3b10c08ae..aee4a65d4959 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -593,6 +593,22 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane, } else if (plane->funcs->atomic_set_property) { return plane->funcs->atomic_set_property(plane, state, property, val); + } else if (property == plane->hotspot_x_property) { + if (plane->type != DRM_PLANE_TYPE_CURSOR) { + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] is not a cursor plane: 0x%llx\n", + plane->base.id, plane->name, val); + return -EINVAL; + } + state->hotspot_x = val; + } else if (property == plane->hotspot_y_property) { + if (plane->type != DRM_PLANE_TYPE_CURSOR) { + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] is not a cursor plane: 0x%llx\n", + plane->base.id, plane->name, val); + return -EINVAL; + } + state->hotspot_y = val; } else { drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] unknown property [PROP:%d:%s]\n", @@ -653,6 +669,10 @@ drm_atomic_plane_get_property(struct drm_plane *plane, *val = state->scaling_filter; } else if (plane->funcs->atomic_get_property) { return plane->funcs->atomic_get_property(plane, state, property, val); + } else if (property == plane->hotspot_x_property) { + *val = state->hotspot_x; + } else if (property == plane->hotspot_y_property) { + *val = state->hotspot_y; } else { drm_dbg_atomic(dev, "[PLANE:%d:%s] unknown property [PROP:%d:%s]\n", @@ -1006,13 +1026,28 @@ out: return ret; } +static int drm_atomic_check_prop_changes(int ret, uint64_t old_val, uint64_t prop_value, + struct drm_property *prop) +{ + if (ret != 0 || old_val != prop_value) { + drm_dbg_atomic(prop->dev, + "[PROP:%d:%s] No prop can be changed during async flip\n", + prop->base.id, prop->name); + return -EINVAL; + } + + return 0; +} + int drm_atomic_set_property(struct drm_atomic_state *state, struct drm_file *file_priv, struct drm_mode_object *obj, struct drm_property *prop, - uint64_t prop_value) + u64 prop_value, + bool async_flip) { struct drm_mode_object *ref; + u64 old_val; int ret; if (!drm_property_change_valid_get(prop, prop_value, &ref)) @@ -1029,6 +1064,13 @@ int drm_atomic_set_property(struct drm_atomic_state *state, break; } + if (async_flip) { + ret = drm_atomic_connector_get_property(connector, connector_state, + prop, &old_val); + ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop); + break; + } + ret = drm_atomic_connector_set_property(connector, connector_state, file_priv, prop, prop_value); @@ -1044,6 +1086,13 @@ int drm_atomic_set_property(struct drm_atomic_state *state, break; } + if (async_flip) { + ret = drm_atomic_crtc_get_property(crtc, crtc_state, + prop, &old_val); + ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop); + break; + } + ret = drm_atomic_crtc_set_property(crtc, crtc_state, prop, prop_value); break; @@ -1051,6 +1100,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state, case DRM_MODE_OBJECT_PLANE: { struct drm_plane *plane = obj_to_plane(obj); struct drm_plane_state *plane_state; + struct drm_mode_config *config = &plane->dev->mode_config; plane_state = drm_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) { @@ -1058,6 +1108,21 @@ int drm_atomic_set_property(struct drm_atomic_state *state, break; } + if (async_flip && prop != config->prop_fb_id) { + ret = drm_atomic_plane_get_property(plane, plane_state, + prop, &old_val); + ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop); + break; + } + + if (async_flip && plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY) { + drm_dbg_atomic(prop->dev, + "[OBJECT:%d] Only primary planes can be changed during async flip\n", + obj->id); + ret = -EINVAL; + break; + } + ret = drm_atomic_plane_set_property(plane, plane_state, file_priv, prop, prop_value); @@ -1323,6 +1388,18 @@ static void complete_signaling(struct drm_device *dev, kfree(fence_state); } +static void +set_async_flip(struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int i; + + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + crtc_state->async_flip = true; + } +} + int drm_mode_atomic_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -1337,6 +1414,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, struct drm_out_fence_state *fence_state; int ret = 0; unsigned int i, j, num_fences; + bool async_flip = false; /* disallow for drivers not supporting atomic: */ if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) @@ -1363,9 +1441,13 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, } if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) { - drm_dbg_atomic(dev, - "commit failed: invalid flag DRM_MODE_PAGE_FLIP_ASYNC\n"); - return -EINVAL; + if (!dev->mode_config.async_page_flip) { + drm_dbg_atomic(dev, + "commit failed: DRM_MODE_PAGE_FLIP_ASYNC not supported\n"); + return -EINVAL; + } + + async_flip = true; } /* can't test and expect an event at the same time. */ @@ -1450,8 +1532,8 @@ retry: goto out; } - ret = drm_atomic_set_property(state, file_priv, - obj, prop, prop_value); + ret = drm_atomic_set_property(state, file_priv, obj, + prop, prop_value, async_flip); if (ret) { drm_mode_object_put(obj); goto out; @@ -1468,6 +1550,9 @@ retry: if (ret) goto out; + if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) + set_async_flip(state); + if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) { ret = drm_atomic_check_only(state); } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index 2ed2585ded37..252c105d614f 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -37,13 +37,12 @@ #include <drm/drm_print.h> #include "drm_internal.h" -#include "drm_legacy.h" /** * DOC: master and authentication * * &struct drm_master is used to track groups of clients with open - * primary/legacy device nodes. For every &struct drm_file which has had at + * primary device nodes. For every &struct drm_file which has had at * least once successfully became the device master (either through the * SET_MASTER IOCTL, or implicitly through opening the primary device node when * no one else is the current master that time) there exists one &drm_master. @@ -139,7 +138,6 @@ struct drm_master *drm_master_create(struct drm_device *dev) return NULL; kref_init(&master->refcount); - drm_master_legacy_init(master); idr_init_base(&master->magic_map, 1); master->dev = dev; @@ -365,8 +363,6 @@ void drm_master_release(struct drm_file *file_priv) if (!drm_is_current_master_locked(file_priv)) goto out; - drm_legacy_lock_master_cleanup(dev, master); - if (dev->master == file_priv->master) drm_drop_master(dev, file_priv); out: @@ -429,8 +425,6 @@ static void drm_master_destroy(struct kref *kref) if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_lease_destroy(master); - drm_legacy_master_rmmaps(dev, master); - idr_destroy(&master->magic_map); idr_destroy(&master->leases); idr_destroy(&master->lessee_idr); diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index 30d66bee0ec6..cee3188adf3d 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -1347,50 +1347,6 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np) EXPORT_SYMBOL(of_drm_find_bridge); #endif -#ifdef CONFIG_DEBUG_FS -static int drm_bridge_chains_info(struct seq_file *m, void *data) -{ - struct drm_debugfs_entry *entry = m->private; - struct drm_device *dev = entry->dev; - struct drm_printer p = drm_seq_file_printer(m); - struct drm_mode_config *config = &dev->mode_config; - struct drm_encoder *encoder; - unsigned int bridge_idx = 0; - - list_for_each_entry(encoder, &config->encoder_list, head) { - struct drm_bridge *bridge; - - drm_printf(&p, "encoder[%u]\n", encoder->base.id); - - drm_for_each_bridge_in_chain(encoder, bridge) { - drm_printf(&p, "\tbridge[%u] type: %u, ops: %#x", - bridge_idx, bridge->type, bridge->ops); - -#ifdef CONFIG_OF - if (bridge->of_node) - drm_printf(&p, ", OF: %pOFfc", bridge->of_node); -#endif - - drm_printf(&p, "\n"); - - bridge_idx++; - } - } - - return 0; -} - -static const struct drm_debugfs_info drm_bridge_debugfs_list[] = { - { "bridge_chains", drm_bridge_chains_info, 0 }, -}; - -void drm_bridge_debugfs_init(struct drm_device *dev) -{ - drm_debugfs_add_files(dev, drm_bridge_debugfs_list, - ARRAY_SIZE(drm_bridge_debugfs_list)); -} -#endif - MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>"); MODULE_DESCRIPTION("DRM bridge infrastructure"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c index 8239ad43aed5..3acd67021ec6 100644 --- a/drivers/gpu/drm/drm_bridge_connector.c +++ b/drivers/gpu/drm/drm_bridge_connector.c @@ -198,12 +198,6 @@ static void drm_bridge_connector_destroy(struct drm_connector *connector) struct drm_bridge_connector *bridge_connector = to_drm_bridge_connector(connector); - if (bridge_connector->bridge_hpd) { - struct drm_bridge *hpd = bridge_connector->bridge_hpd; - - drm_bridge_hpd_disable(hpd); - } - drm_connector_unregister(connector); drm_connector_cleanup(connector); diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c deleted file mode 100644 index 86700560fea2..000000000000 --- a/drivers/gpu/drm/drm_bufs.c +++ /dev/null @@ -1,1627 +0,0 @@ -/* - * Legacy: Generic DRM Buffer Management - * - * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Author: Rickard E. (Rik) Faith <faith@valinux.com> - * Author: Gareth Hughes <gareth@valinux.com> - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include <linux/export.h> -#include <linux/log2.h> -#include <linux/mm.h> -#include <linux/mman.h> -#include <linux/nospec.h> -#include <linux/pci.h> -#include <linux/slab.h> -#include <linux/uaccess.h> -#include <linux/vmalloc.h> - -#include <asm/shmparam.h> - -#include <drm/drm_device.h> -#include <drm/drm_drv.h> -#include <drm/drm_file.h> -#include <drm/drm_print.h> - -#include "drm_legacy.h" - - -static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, - struct drm_local_map *map) -{ - struct drm_map_list *entry; - - list_for_each_entry(entry, &dev->maplist, head) { - /* - * Because the kernel-userspace ABI is fixed at a 32-bit offset - * while PCI resources may live above that, we only compare the - * lower 32 bits of the map offset for maps of type - * _DRM_FRAMEBUFFER or _DRM_REGISTERS. - * It is assumed that if a driver have more than one resource - * of each type, the lower 32 bits are different. - */ - if (!entry->map || - map->type != entry->map->type || - entry->master != dev->master) - continue; - switch (map->type) { - case _DRM_SHM: - if (map->flags != _DRM_CONTAINS_LOCK) - break; - return entry; - case _DRM_REGISTERS: - case _DRM_FRAME_BUFFER: - if ((entry->map->offset & 0xffffffff) == - (map->offset & 0xffffffff)) - return entry; - break; - default: /* Make gcc happy */ - break; - } - if (entry->map->offset == map->offset) - return entry; - } - - return NULL; -} - -static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, - unsigned long user_token, int hashed_handle, int shm) -{ - int use_hashed_handle, shift; - unsigned long add; - -#if (BITS_PER_LONG == 64) - use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle); -#elif (BITS_PER_LONG == 32) - use_hashed_handle = hashed_handle; -#else -#error Unsupported long size. Neither 64 nor 32 bits. -#endif - - if (!use_hashed_handle) { - int ret; - - hash->key = user_token >> PAGE_SHIFT; - ret = drm_ht_insert_item(&dev->map_hash, hash); - if (ret != -EINVAL) - return ret; - } - - shift = 0; - add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT; - if (shm && (SHMLBA > PAGE_SIZE)) { - int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1; - - /* For shared memory, we have to preserve the SHMLBA - * bits of the eventual vma->vm_pgoff value during - * mmap(). Otherwise we run into cache aliasing problems - * on some platforms. On these platforms, the pgoff of - * a mmap() request is used to pick a suitable virtual - * address for the mmap() region such that it will not - * cause cache aliasing problems. - * - * Therefore, make sure the SHMLBA relevant bits of the - * hash value we use are equal to those in the original - * kernel virtual address. - */ - shift = bits; - add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL)); - } - - return drm_ht_just_insert_please(&dev->map_hash, hash, - user_token, 32 - PAGE_SHIFT - 3, - shift, add); -} - -/* - * Core function to create a range of memory available for mapping by a - * non-root process. - * - * Adjusts the memory offset to its absolute value according to the mapping - * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where - * applicable and if supported by the kernel. - */ -static int drm_addmap_core(struct drm_device *dev, resource_size_t offset, - unsigned int size, enum drm_map_type type, - enum drm_map_flags flags, - struct drm_map_list **maplist) -{ - struct drm_local_map *map; - struct drm_map_list *list; - unsigned long user_token; - int ret; - - map = kmalloc(sizeof(*map), GFP_KERNEL); - if (!map) - return -ENOMEM; - - map->offset = offset; - map->size = size; - map->flags = flags; - map->type = type; - - /* Only allow shared memory to be removable since we only keep enough - * book keeping information about shared memory to allow for removal - * when processes fork. - */ - if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { - kfree(map); - return -EINVAL; - } - DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", - (unsigned long long)map->offset, map->size, map->type); - - /* page-align _DRM_SHM maps. They are allocated here so there is no security - * hole created by that and it works around various broken drivers that use - * a non-aligned quantity to map the SAREA. --BenH - */ - if (map->type == _DRM_SHM) - map->size = PAGE_ALIGN(map->size); - - if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { - kfree(map); - return -EINVAL; - } - map->mtrr = -1; - map->handle = NULL; - - switch (map->type) { - case _DRM_REGISTERS: - case _DRM_FRAME_BUFFER: -#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__) - if (map->offset + (map->size-1) < map->offset || - map->offset < virt_to_phys(high_memory)) { - kfree(map); - return -EINVAL; - } -#endif - /* Some drivers preinitialize some maps, without the X Server - * needing to be aware of it. Therefore, we just return success - * when the server tries to create a duplicate map. - */ - list = drm_find_matching_map(dev, map); - if (list != NULL) { - if (list->map->size != map->size) { - DRM_DEBUG("Matching maps of type %d with " - "mismatched sizes, (%ld vs %ld)\n", - map->type, map->size, - list->map->size); - list->map->size = map->size; - } - - kfree(map); - *maplist = list; - return 0; - } - - if (map->type == _DRM_FRAME_BUFFER || - (map->flags & _DRM_WRITE_COMBINING)) { - map->mtrr = - arch_phys_wc_add(map->offset, map->size); - } - if (map->type == _DRM_REGISTERS) { - if (map->flags & _DRM_WRITE_COMBINING) - map->handle = ioremap_wc(map->offset, - map->size); - else - map->handle = ioremap(map->offset, map->size); - if (!map->handle) { - kfree(map); - return -ENOMEM; - } - } - - break; - case _DRM_SHM: - list = drm_find_matching_map(dev, map); - if (list != NULL) { - if (list->map->size != map->size) { - DRM_DEBUG("Matching maps of type %d with " - "mismatched sizes, (%ld vs %ld)\n", - map->type, map->size, list->map->size); - list->map->size = map->size; - } - - kfree(map); - *maplist = list; - return 0; - } - map->handle = vmalloc_user(map->size); - DRM_DEBUG("%lu %d %p\n", - map->size, order_base_2(map->size), map->handle); - if (!map->handle) { - kfree(map); - return -ENOMEM; - } - map->offset = (unsigned long)map->handle; - if (map->flags & _DRM_CONTAINS_LOCK) { - /* Prevent a 2nd X Server from creating a 2nd lock */ - if (dev->master->lock.hw_lock != NULL) { - vfree(map->handle); - kfree(map); - return -EBUSY; - } - dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */ - } - break; - case _DRM_AGP: { - struct drm_agp_mem *entry; - int valid = 0; - - if (!dev->agp) { - kfree(map); - return -EINVAL; - } -#ifdef __alpha__ - map->offset += dev->hose->mem_space->start; -#endif - /* In some cases (i810 driver), user space may have already - * added the AGP base itself, because dev->agp->base previously - * only got set during AGP enable. So, only add the base - * address if the map's offset isn't already within the - * aperture. - */ - if (map->offset < dev->agp->base || - map->offset > dev->agp->base + - dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { - map->offset += dev->agp->base; - } - map->mtrr = dev->agp->agp_mtrr; /* for getmap */ - - /* This assumes the DRM is in total control of AGP space. - * It's not always the case as AGP can be in the control - * of user space (i.e. i810 driver). So this loop will get - * skipped and we double check that dev->agp->memory is - * actually set as well as being invalid before EPERM'ing - */ - list_for_each_entry(entry, &dev->agp->memory, head) { - if ((map->offset >= entry->bound) && - (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { - valid = 1; - break; - } - } - if (!list_empty(&dev->agp->memory) && !valid) { - kfree(map); - return -EPERM; - } - DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", - (unsigned long long)map->offset, map->size); - - break; - } - case _DRM_SCATTER_GATHER: - if (!dev->sg) { - kfree(map); - return -EINVAL; - } - map->offset += (unsigned long)dev->sg->virtual; - break; - case _DRM_CONSISTENT: - /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, - * As we're limiting the address to 2^32-1 (or less), - * casting it down to 32 bits is no problem, but we - * need to point to a 64bit variable first. - */ - map->handle = dma_alloc_coherent(dev->dev, - map->size, - &map->offset, - GFP_KERNEL); - if (!map->handle) { - kfree(map); - return -ENOMEM; - } - break; - default: - kfree(map); - return -EINVAL; - } - - list = kzalloc(sizeof(*list), GFP_KERNEL); - if (!list) { - if (map->type == _DRM_REGISTERS) - iounmap(map->handle); - kfree(map); - return -EINVAL; - } - list->map = map; - - mutex_lock(&dev->struct_mutex); - list_add(&list->head, &dev->maplist); - - /* Assign a 32-bit handle */ - /* We do it here so that dev->struct_mutex protects the increment */ - user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle : - map->offset; - ret = drm_map_handle(dev, &list->hash, user_token, 0, - (map->type == _DRM_SHM)); - if (ret) { - if (map->type == _DRM_REGISTERS) - iounmap(map->handle); - kfree(map); - kfree(list); - mutex_unlock(&dev->struct_mutex); - return ret; - } - - list->user_token = list->hash.key << PAGE_SHIFT; - mutex_unlock(&dev->struct_mutex); - - if (!(map->flags & _DRM_DRIVER)) - list->master = dev->master; - *maplist = list; - return 0; -} - -int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset, - unsigned int size, enum drm_map_type type, - enum drm_map_flags flags, struct drm_local_map **map_ptr) -{ - struct drm_map_list *list; - int rc; - - rc = drm_addmap_core(dev, offset, size, type, flags, &list); - if (!rc) - *map_ptr = list->map; - return rc; -} -EXPORT_SYMBOL(drm_legacy_addmap); - -struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, - unsigned int token) -{ - struct drm_map_list *_entry; - - list_for_each_entry(_entry, &dev->maplist, head) - if (_entry->user_token == token) - return _entry->map; - return NULL; -} -EXPORT_SYMBOL(drm_legacy_findmap); - -/* - * Ioctl to specify a range of memory that is available for mapping by a - * non-root process. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a drm_map structure. - * \return zero on success or a negative value on error. - * - */ -int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_map *map = data; - struct drm_map_list *maplist; - int err; - - if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) - return -EPERM; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - err = drm_addmap_core(dev, map->offset, map->size, map->type, - map->flags, &maplist); - - if (err) - return err; - - /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ - map->handle = (void *)(unsigned long)maplist->user_token; - - /* - * It appears that there are no users of this value whatsoever -- - * drmAddMap just discards it. Let's not encourage its use. - * (Keeping drm_addmap_core's returned mtrr value would be wrong -- - * it's not a real mtrr index anymore.) - */ - map->mtrr = -1; - - return 0; -} - -/* - * Get a mapping information. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument, pointing to a drm_map structure. - * - * \return zero on success or a negative number on failure. - * - * Searches for the mapping with the specified offset and copies its information - * into userspace - */ -int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_map *map = data; - struct drm_map_list *r_list = NULL; - struct list_head *list; - int idx; - int i; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - idx = map->offset; - if (idx < 0) - return -EINVAL; - - i = 0; - mutex_lock(&dev->struct_mutex); - list_for_each(list, &dev->maplist) { - if (i == idx) { - r_list = list_entry(list, struct drm_map_list, head); - break; - } - i++; - } - if (!r_list || !r_list->map) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - - map->offset = r_list->map->offset; - map->size = r_list->map->size; - map->type = r_list->map->type; - map->flags = r_list->map->flags; - map->handle = (void *)(unsigned long) r_list->user_token; - map->mtrr = arch_phys_wc_index(r_list->map->mtrr); - - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -/* - * Remove a map private from list and deallocate resources if the mapping - * isn't in use. - * - * Searches the map on drm_device::maplist, removes it from the list, see if - * it's being used, and free any associated resource (such as MTRR's) if it's not - * being on use. - * - * \sa drm_legacy_addmap - */ -int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) -{ - struct drm_map_list *r_list = NULL, *list_t; - int found = 0; - struct drm_master *master; - - /* Find the list entry for the map and remove it */ - list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { - if (r_list->map == map) { - master = r_list->master; - list_del(&r_list->head); - drm_ht_remove_key(&dev->map_hash, - r_list->user_token >> PAGE_SHIFT); - kfree(r_list); - found = 1; - break; - } - } - - if (!found) - return -EINVAL; - - switch (map->type) { - case _DRM_REGISTERS: - iounmap(map->handle); - fallthrough; - case _DRM_FRAME_BUFFER: - arch_phys_wc_del(map->mtrr); - break; - case _DRM_SHM: - vfree(map->handle); - if (master) { - if (dev->sigdata.lock == master->lock.hw_lock) - dev->sigdata.lock = NULL; - master->lock.hw_lock = NULL; /* SHM removed */ - master->lock.file_priv = NULL; - wake_up_interruptible_all(&master->lock.lock_queue); - } - break; - case _DRM_AGP: - case _DRM_SCATTER_GATHER: - break; - case _DRM_CONSISTENT: - dma_free_coherent(dev->dev, - map->size, - map->handle, - map->offset); - break; - } - kfree(map); - - return 0; -} -EXPORT_SYMBOL(drm_legacy_rmmap_locked); - -void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map) -{ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - mutex_lock(&dev->struct_mutex); - drm_legacy_rmmap_locked(dev, map); - mutex_unlock(&dev->struct_mutex); -} -EXPORT_SYMBOL(drm_legacy_rmmap); - -void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master) -{ - struct drm_map_list *r_list, *list_temp; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - mutex_lock(&dev->struct_mutex); - list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { - if (r_list->master == master) { - drm_legacy_rmmap_locked(dev, r_list->map); - r_list = NULL; - } - } - mutex_unlock(&dev->struct_mutex); -} - -void drm_legacy_rmmaps(struct drm_device *dev) -{ - struct drm_map_list *r_list, *list_temp; - - list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) - drm_legacy_rmmap(dev, r_list->map); -} - -/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on - * the last close of the device, and this is necessary for cleanup when things - * exit uncleanly. Therefore, having userland manually remove mappings seems - * like a pointless exercise since they're going away anyway. - * - * One use case might be after addmap is allowed for normal users for SHM and - * gets used by drivers that the server doesn't need to care about. This seems - * unlikely. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a struct drm_map structure. - * \return zero on success or a negative value on error. - */ -int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_map *request = data; - struct drm_local_map *map = NULL; - struct drm_map_list *r_list; - int ret; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - mutex_lock(&dev->struct_mutex); - list_for_each_entry(r_list, &dev->maplist, head) { - if (r_list->map && - r_list->user_token == (unsigned long)request->handle && - r_list->map->flags & _DRM_REMOVABLE) { - map = r_list->map; - break; - } - } - - /* List has wrapped around to the head pointer, or it's empty we didn't - * find anything. - */ - if (list_empty(&dev->maplist) || !map) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - - /* Register and framebuffer maps are permanent */ - if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { - mutex_unlock(&dev->struct_mutex); - return 0; - } - - ret = drm_legacy_rmmap_locked(dev, map); - - mutex_unlock(&dev->struct_mutex); - - return ret; -} - -/* - * Cleanup after an error on one of the addbufs() functions. - * - * \param dev DRM device. - * \param entry buffer entry where the error occurred. - * - * Frees any pages and buffers associated with the given entry. - */ -static void drm_cleanup_buf_error(struct drm_device *dev, - struct drm_buf_entry *entry) -{ - drm_dma_handle_t *dmah; - int i; - - if (entry->seg_count) { - for (i = 0; i < entry->seg_count; i++) { - if (entry->seglist[i]) { - dmah = entry->seglist[i]; - dma_free_coherent(dev->dev, - dmah->size, - dmah->vaddr, - dmah->busaddr); - kfree(dmah); - } - } - kfree(entry->seglist); - - entry->seg_count = 0; - } - - if (entry->buf_count) { - for (i = 0; i < entry->buf_count; i++) { - kfree(entry->buflist[i].dev_private); - } - kfree(entry->buflist); - - entry->buf_count = 0; - } -} - -#if IS_ENABLED(CONFIG_AGP) -/* - * Add AGP buffers for DMA transfers. - * - * \param dev struct drm_device to which the buffers are to be added. - * \param request pointer to a struct drm_buf_desc describing the request. - * \return zero on success or a negative number on failure. - * - * After some sanity checks creates a drm_buf structure for each buffer and - * reallocates the buffer list of the same size order to accommodate the new - * buffers. - */ -int drm_legacy_addbufs_agp(struct drm_device *dev, - struct drm_buf_desc *request) -{ - struct drm_device_dma *dma = dev->dma; - struct drm_buf_entry *entry; - struct drm_agp_mem *agp_entry; - struct drm_buf *buf; - unsigned long offset; - unsigned long agp_offset; - int count; - int order; - int size; - int alignment; - int page_order; - int total; - int byte_count; - int i, valid; - struct drm_buf **temp_buflist; - - if (!dma) - return -EINVAL; - - count = request->count; - order = order_base_2(request->size); - size = 1 << order; - - alignment = (request->flags & _DRM_PAGE_ALIGN) - ? PAGE_ALIGN(size) : size; - page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; - total = PAGE_SIZE << page_order; - - byte_count = 0; - agp_offset = dev->agp->base + request->agp_start; - - DRM_DEBUG("count: %d\n", count); - DRM_DEBUG("order: %d\n", order); - DRM_DEBUG("size: %d\n", size); - DRM_DEBUG("agp_offset: %lx\n", agp_offset); - DRM_DEBUG("alignment: %d\n", alignment); - DRM_DEBUG("page_order: %d\n", page_order); - DRM_DEBUG("total: %d\n", total); - - if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) - return -EINVAL; - - /* Make sure buffers are located in AGP memory that we own */ - valid = 0; - list_for_each_entry(agp_entry, &dev->agp->memory, head) { - if ((agp_offset >= agp_entry->bound) && - (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { - valid = 1; - break; - } - } - if (!list_empty(&dev->agp->memory) && !valid) { - DRM_DEBUG("zone invalid\n"); - return -EINVAL; - } - spin_lock(&dev->buf_lock); - if (dev->buf_use) { - spin_unlock(&dev->buf_lock); - return -EBUSY; - } - atomic_inc(&dev->buf_alloc); - spin_unlock(&dev->buf_lock); - - mutex_lock(&dev->struct_mutex); - entry = &dma->bufs[order]; - if (entry->buf_count) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; /* May only call once for each order */ - } - - if (count < 0 || count > 4096) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -EINVAL; - } - - entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); - if (!entry->buflist) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - entry->buf_size = size; - entry->page_order = page_order; - - offset = 0; - - while (entry->buf_count < count) { - buf = &entry->buflist[entry->buf_count]; - buf->idx = dma->buf_count + entry->buf_count; - buf->total = alignment; - buf->order = order; - buf->used = 0; - - buf->offset = (dma->byte_count + offset); - buf->bus_address = agp_offset + offset; - buf->address = (void *)(agp_offset + offset); - buf->next = NULL; - buf->waiting = 0; - buf->pending = 0; - buf->file_priv = NULL; - - buf->dev_priv_size = dev->driver->dev_priv_size; - buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); - if (!buf->dev_private) { - /* Set count correctly so we free the proper amount. */ - entry->buf_count = count; - drm_cleanup_buf_error(dev, entry); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); - - offset += alignment; - entry->buf_count++; - byte_count += PAGE_SIZE << page_order; - } - - DRM_DEBUG("byte_count: %d\n", byte_count); - - temp_buflist = krealloc(dma->buflist, - (dma->buf_count + entry->buf_count) * - sizeof(*dma->buflist), GFP_KERNEL); - if (!temp_buflist) { - /* Free the entry because it isn't valid */ - drm_cleanup_buf_error(dev, entry); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - dma->buflist = temp_buflist; - - for (i = 0; i < entry->buf_count; i++) { - dma->buflist[i + dma->buf_count] = &entry->buflist[i]; - } - - dma->buf_count += entry->buf_count; - dma->seg_count += entry->seg_count; - dma->page_count += byte_count >> PAGE_SHIFT; - dma->byte_count += byte_count; - - DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); - DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); - - mutex_unlock(&dev->struct_mutex); - - request->count = entry->buf_count; - request->size = size; - - dma->flags = _DRM_DMA_USE_AGP; - - atomic_dec(&dev->buf_alloc); - return 0; -} -EXPORT_SYMBOL(drm_legacy_addbufs_agp); -#endif /* CONFIG_AGP */ - -int drm_legacy_addbufs_pci(struct drm_device *dev, - struct drm_buf_desc *request) -{ - struct drm_device_dma *dma = dev->dma; - int count; - int order; - int size; - int total; - int page_order; - struct drm_buf_entry *entry; - drm_dma_handle_t *dmah; - struct drm_buf *buf; - int alignment; - unsigned long offset; - int i; - int byte_count; - int page_count; - unsigned long *temp_pagelist; - struct drm_buf **temp_buflist; - - if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) - return -EOPNOTSUPP; - - if (!dma) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - count = request->count; - order = order_base_2(request->size); - size = 1 << order; - - DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", - request->count, request->size, size, order); - - if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) - return -EINVAL; - - alignment = (request->flags & _DRM_PAGE_ALIGN) - ? PAGE_ALIGN(size) : size; - page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; - total = PAGE_SIZE << page_order; - - spin_lock(&dev->buf_lock); - if (dev->buf_use) { - spin_unlock(&dev->buf_lock); - return -EBUSY; - } - atomic_inc(&dev->buf_alloc); - spin_unlock(&dev->buf_lock); - - mutex_lock(&dev->struct_mutex); - entry = &dma->bufs[order]; - if (entry->buf_count) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; /* May only call once for each order */ - } - - if (count < 0 || count > 4096) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -EINVAL; - } - - entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); - if (!entry->buflist) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL); - if (!entry->seglist) { - kfree(entry->buflist); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - /* Keep the original pagelist until we know all the allocations - * have succeeded - */ - temp_pagelist = kmalloc_array(dma->page_count + (count << page_order), - sizeof(*dma->pagelist), - GFP_KERNEL); - if (!temp_pagelist) { - kfree(entry->buflist); - kfree(entry->seglist); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - memcpy(temp_pagelist, - dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); - DRM_DEBUG("pagelist: %d entries\n", - dma->page_count + (count << page_order)); - - entry->buf_size = size; - entry->page_order = page_order; - byte_count = 0; - page_count = 0; - - while (entry->buf_count < count) { - dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL); - if (!dmah) { - /* Set count correctly so we free the proper amount. */ - entry->buf_count = count; - entry->seg_count = count; - drm_cleanup_buf_error(dev, entry); - kfree(temp_pagelist); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - dmah->size = total; - dmah->vaddr = dma_alloc_coherent(dev->dev, - dmah->size, - &dmah->busaddr, - GFP_KERNEL); - if (!dmah->vaddr) { - kfree(dmah); - - /* Set count correctly so we free the proper amount. */ - entry->buf_count = count; - entry->seg_count = count; - drm_cleanup_buf_error(dev, entry); - kfree(temp_pagelist); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - entry->seglist[entry->seg_count++] = dmah; - for (i = 0; i < (1 << page_order); i++) { - DRM_DEBUG("page %d @ 0x%08lx\n", - dma->page_count + page_count, - (unsigned long)dmah->vaddr + PAGE_SIZE * i); - temp_pagelist[dma->page_count + page_count++] - = (unsigned long)dmah->vaddr + PAGE_SIZE * i; - } - for (offset = 0; - offset + size <= total && entry->buf_count < count; - offset += alignment, ++entry->buf_count) { - buf = &entry->buflist[entry->buf_count]; - buf->idx = dma->buf_count + entry->buf_count; - buf->total = alignment; - buf->order = order; - buf->used = 0; - buf->offset = (dma->byte_count + byte_count + offset); - buf->address = (void *)(dmah->vaddr + offset); - buf->bus_address = dmah->busaddr + offset; - buf->next = NULL; - buf->waiting = 0; - buf->pending = 0; - buf->file_priv = NULL; - - buf->dev_priv_size = dev->driver->dev_priv_size; - buf->dev_private = kzalloc(buf->dev_priv_size, - GFP_KERNEL); - if (!buf->dev_private) { - /* Set count correctly so we free the proper amount. */ - entry->buf_count = count; - entry->seg_count = count; - drm_cleanup_buf_error(dev, entry); - kfree(temp_pagelist); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - DRM_DEBUG("buffer %d @ %p\n", - entry->buf_count, buf->address); - } - byte_count += PAGE_SIZE << page_order; - } - - temp_buflist = krealloc(dma->buflist, - (dma->buf_count + entry->buf_count) * - sizeof(*dma->buflist), GFP_KERNEL); - if (!temp_buflist) { - /* Free the entry because it isn't valid */ - drm_cleanup_buf_error(dev, entry); - kfree(temp_pagelist); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - dma->buflist = temp_buflist; - - for (i = 0; i < entry->buf_count; i++) { - dma->buflist[i + dma->buf_count] = &entry->buflist[i]; - } - - /* No allocations failed, so now we can replace the original pagelist - * with the new one. - */ - if (dma->page_count) { - kfree(dma->pagelist); - } - dma->pagelist = temp_pagelist; - - dma->buf_count += entry->buf_count; - dma->seg_count += entry->seg_count; - dma->page_count += entry->seg_count << page_order; - dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); - - mutex_unlock(&dev->struct_mutex); - - request->count = entry->buf_count; - request->size = size; - - if (request->flags & _DRM_PCI_BUFFER_RO) - dma->flags = _DRM_DMA_USE_PCI_RO; - - atomic_dec(&dev->buf_alloc); - return 0; - -} -EXPORT_SYMBOL(drm_legacy_addbufs_pci); - -static int drm_legacy_addbufs_sg(struct drm_device *dev, - struct drm_buf_desc *request) -{ - struct drm_device_dma *dma = dev->dma; - struct drm_buf_entry *entry; - struct drm_buf *buf; - unsigned long offset; - unsigned long agp_offset; - int count; - int order; - int size; - int alignment; - int page_order; - int total; - int byte_count; - int i; - struct drm_buf **temp_buflist; - - if (!drm_core_check_feature(dev, DRIVER_SG)) - return -EOPNOTSUPP; - - if (!dma) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - count = request->count; - order = order_base_2(request->size); - size = 1 << order; - - alignment = (request->flags & _DRM_PAGE_ALIGN) - ? PAGE_ALIGN(size) : size; - page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; - total = PAGE_SIZE << page_order; - - byte_count = 0; - agp_offset = request->agp_start; - - DRM_DEBUG("count: %d\n", count); - DRM_DEBUG("order: %d\n", order); - DRM_DEBUG("size: %d\n", size); - DRM_DEBUG("agp_offset: %lu\n", agp_offset); - DRM_DEBUG("alignment: %d\n", alignment); - DRM_DEBUG("page_order: %d\n", page_order); - DRM_DEBUG("total: %d\n", total); - - if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) - return -EINVAL; - - spin_lock(&dev->buf_lock); - if (dev->buf_use) { - spin_unlock(&dev->buf_lock); - return -EBUSY; - } - atomic_inc(&dev->buf_alloc); - spin_unlock(&dev->buf_lock); - - mutex_lock(&dev->struct_mutex); - entry = &dma->bufs[order]; - if (entry->buf_count) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; /* May only call once for each order */ - } - - if (count < 0 || count > 4096) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -EINVAL; - } - - entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); - if (!entry->buflist) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - entry->buf_size = size; - entry->page_order = page_order; - - offset = 0; - - while (entry->buf_count < count) { - buf = &entry->buflist[entry->buf_count]; - buf->idx = dma->buf_count + entry->buf_count; - buf->total = alignment; - buf->order = order; - buf->used = 0; - - buf->offset = (dma->byte_count + offset); - buf->bus_address = agp_offset + offset; - buf->address = (void *)(agp_offset + offset - + (unsigned long)dev->sg->virtual); - buf->next = NULL; - buf->waiting = 0; - buf->pending = 0; - buf->file_priv = NULL; - - buf->dev_priv_size = dev->driver->dev_priv_size; - buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); - if (!buf->dev_private) { - /* Set count correctly so we free the proper amount. */ - entry->buf_count = count; - drm_cleanup_buf_error(dev, entry); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); - - offset += alignment; - entry->buf_count++; - byte_count += PAGE_SIZE << page_order; - } - - DRM_DEBUG("byte_count: %d\n", byte_count); - - temp_buflist = krealloc(dma->buflist, - (dma->buf_count + entry->buf_count) * - sizeof(*dma->buflist), GFP_KERNEL); - if (!temp_buflist) { - /* Free the entry because it isn't valid */ - drm_cleanup_buf_error(dev, entry); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - dma->buflist = temp_buflist; - - for (i = 0; i < entry->buf_count; i++) { - dma->buflist[i + dma->buf_count] = &entry->buflist[i]; - } - - dma->buf_count += entry->buf_count; - dma->seg_count += entry->seg_count; - dma->page_count += byte_count >> PAGE_SHIFT; - dma->byte_count += byte_count; - - DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); - DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); - - mutex_unlock(&dev->struct_mutex); - - request->count = entry->buf_count; - request->size = size; - - dma->flags = _DRM_DMA_USE_SG; - - atomic_dec(&dev->buf_alloc); - return 0; -} - -/* - * Add buffers for DMA transfers (ioctl). - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a struct drm_buf_desc request. - * \return zero on success or a negative number on failure. - * - * According with the memory type specified in drm_buf_desc::flags and the - * build options, it dispatches the call either to addbufs_agp(), - * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent - * PCI memory respectively. - */ -int drm_legacy_addbufs(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_buf_desc *request = data; - int ret; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) - return -EOPNOTSUPP; - -#if IS_ENABLED(CONFIG_AGP) - if (request->flags & _DRM_AGP_BUFFER) - ret = drm_legacy_addbufs_agp(dev, request); - else -#endif - if (request->flags & _DRM_SG_BUFFER) - ret = drm_legacy_addbufs_sg(dev, request); - else if (request->flags & _DRM_FB_BUFFER) - ret = -EINVAL; - else - ret = drm_legacy_addbufs_pci(dev, request); - - return ret; -} - -/* - * Get information about the buffer mappings. - * - * This was originally mean for debugging purposes, or by a sophisticated - * client library to determine how best to use the available buffers (e.g., - * large buffers can be used for image transfer). - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a drm_buf_info structure. - * \return zero on success or a negative number on failure. - * - * Increments drm_device::buf_use while holding the drm_device::buf_lock - * lock, preventing of allocating more buffers after this call. Information - * about each requested buffer is then copied into user space. - */ -int __drm_legacy_infobufs(struct drm_device *dev, - void *data, int *p, - int (*f)(void *, int, struct drm_buf_entry *)) -{ - struct drm_device_dma *dma = dev->dma; - int i; - int count; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) - return -EOPNOTSUPP; - - if (!dma) - return -EINVAL; - - spin_lock(&dev->buf_lock); - if (atomic_read(&dev->buf_alloc)) { - spin_unlock(&dev->buf_lock); - return -EBUSY; - } - ++dev->buf_use; /* Can't allocate more after this call */ - spin_unlock(&dev->buf_lock); - - for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { - if (dma->bufs[i].buf_count) - ++count; - } - - DRM_DEBUG("count = %d\n", count); - - if (*p >= count) { - for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { - struct drm_buf_entry *from = &dma->bufs[i]; - - if (from->buf_count) { - if (f(data, count, from) < 0) - return -EFAULT; - DRM_DEBUG("%d %d %d %d %d\n", - i, - dma->bufs[i].buf_count, - dma->bufs[i].buf_size, - dma->bufs[i].low_mark, - dma->bufs[i].high_mark); - ++count; - } - } - } - *p = count; - - return 0; -} - -static int copy_one_buf(void *data, int count, struct drm_buf_entry *from) -{ - struct drm_buf_info *request = data; - struct drm_buf_desc __user *to = &request->list[count]; - struct drm_buf_desc v = {.count = from->buf_count, - .size = from->buf_size, - .low_mark = from->low_mark, - .high_mark = from->high_mark}; - - if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags))) - return -EFAULT; - return 0; -} - -int drm_legacy_infobufs(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_buf_info *request = data; - - return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf); -} - -/* - * Specifies a low and high water mark for buffer allocation - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg a pointer to a drm_buf_desc structure. - * \return zero on success or a negative number on failure. - * - * Verifies that the size order is bounded between the admissible orders and - * updates the respective drm_device_dma::bufs entry low and high water mark. - * - * \note This ioctl is deprecated and mostly never used. - */ -int drm_legacy_markbufs(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_device_dma *dma = dev->dma; - struct drm_buf_desc *request = data; - int order; - struct drm_buf_entry *entry; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) - return -EOPNOTSUPP; - - if (!dma) - return -EINVAL; - - DRM_DEBUG("%d, %d, %d\n", - request->size, request->low_mark, request->high_mark); - order = order_base_2(request->size); - if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) - return -EINVAL; - entry = &dma->bufs[order]; - - if (request->low_mark < 0 || request->low_mark > entry->buf_count) - return -EINVAL; - if (request->high_mark < 0 || request->high_mark > entry->buf_count) - return -EINVAL; - - entry->low_mark = request->low_mark; - entry->high_mark = request->high_mark; - - return 0; -} - -/* - * Unreserve the buffers in list, previously reserved using drmDMA. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a drm_buf_free structure. - * \return zero on success or a negative number on failure. - * - * Calls free_buffer() for each used buffer. - * This function is primarily used for debugging. - */ -int drm_legacy_freebufs(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_device_dma *dma = dev->dma; - struct drm_buf_free *request = data; - int i; - int idx; - struct drm_buf *buf; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) - return -EOPNOTSUPP; - - if (!dma) - return -EINVAL; - - DRM_DEBUG("%d\n", request->count); - for (i = 0; i < request->count; i++) { - if (copy_from_user(&idx, &request->list[i], sizeof(idx))) - return -EFAULT; - if (idx < 0 || idx >= dma->buf_count) { - DRM_ERROR("Index %d (of %d max)\n", - idx, dma->buf_count - 1); - return -EINVAL; - } - idx = array_index_nospec(idx, dma->buf_count); - buf = dma->buflist[idx]; - if (buf->file_priv != file_priv) { - DRM_ERROR("Process %d freeing buffer not owned\n", - task_pid_nr(current)); - return -EINVAL; - } - drm_legacy_free_buffer(dev, buf); - } - - return 0; -} - -/* - * Maps all of the DMA buffers into client-virtual space (ioctl). - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a drm_buf_map structure. - * \return zero on success or a negative number on failure. - * - * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information - * about each buffer into user space. For PCI buffers, it calls vm_mmap() with - * offset equal to 0, which drm_mmap() interprets as PCI buffers and calls - * drm_mmap_dma(). - */ -int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p, - void __user **v, - int (*f)(void *, int, unsigned long, - struct drm_buf *), - struct drm_file *file_priv) -{ - struct drm_device_dma *dma = dev->dma; - int retcode = 0; - unsigned long virtual; - int i; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) - return -EOPNOTSUPP; - - if (!dma) - return -EINVAL; - - spin_lock(&dev->buf_lock); - if (atomic_read(&dev->buf_alloc)) { - spin_unlock(&dev->buf_lock); - return -EBUSY; - } - dev->buf_use++; /* Can't allocate more after this call */ - spin_unlock(&dev->buf_lock); - - if (*p >= dma->buf_count) { - if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) - || (drm_core_check_feature(dev, DRIVER_SG) - && (dma->flags & _DRM_DMA_USE_SG))) { - struct drm_local_map *map = dev->agp_buffer_map; - unsigned long token = dev->agp_buffer_token; - - if (!map) { - retcode = -EINVAL; - goto done; - } - virtual = vm_mmap(file_priv->filp, 0, map->size, - PROT_READ | PROT_WRITE, - MAP_SHARED, - token); - } else { - virtual = vm_mmap(file_priv->filp, 0, dma->byte_count, - PROT_READ | PROT_WRITE, - MAP_SHARED, 0); - } - if (virtual > -1024UL) { - /* Real error */ - retcode = (signed long)virtual; - goto done; - } - *v = (void __user *)virtual; - - for (i = 0; i < dma->buf_count; i++) { - if (f(data, i, virtual, dma->buflist[i]) < 0) { - retcode = -EFAULT; - goto done; - } - } - } - done: - *p = dma->buf_count; - DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode); - - return retcode; -} - -static int map_one_buf(void *data, int idx, unsigned long virtual, - struct drm_buf *buf) -{ - struct drm_buf_map *request = data; - unsigned long address = virtual + buf->offset; /* *** */ - - if (copy_to_user(&request->list[idx].idx, &buf->idx, - sizeof(request->list[0].idx))) - return -EFAULT; - if (copy_to_user(&request->list[idx].total, &buf->total, - sizeof(request->list[0].total))) - return -EFAULT; - if (clear_user(&request->list[idx].used, sizeof(int))) - return -EFAULT; - if (copy_to_user(&request->list[idx].address, &address, - sizeof(address))) - return -EFAULT; - return 0; -} - -int drm_legacy_mapbufs(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_buf_map *request = data; - - return __drm_legacy_mapbufs(dev, data, &request->count, - &request->virtual, map_one_buf, - file_priv); -} - -int drm_legacy_dma_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (dev->driver->dma_ioctl) - return dev->driver->dma_ioctl(dev, data, file_priv); - else - return -EINVAL; -} - -struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev) -{ - struct drm_map_list *entry; - - list_for_each_entry(entry, &dev->maplist, head) { - if (entry->map && entry->map->type == _DRM_SHM && - (entry->map->flags & _DRM_CONTAINS_LOCK)) { - return entry->map; - } - } - return NULL; -} -EXPORT_SYMBOL(drm_legacy_getsarea); diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c deleted file mode 100644 index a0fc779e5e1e..000000000000 --- a/drivers/gpu/drm/drm_context.c +++ /dev/null @@ -1,513 +0,0 @@ -/* - * Legacy: Generic DRM Contexts - * - * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Author: Rickard E. (Rik) Faith <faith@valinux.com> - * Author: Gareth Hughes <gareth@valinux.com> - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include <linux/slab.h> -#include <linux/uaccess.h> - -#include <drm/drm_drv.h> -#include <drm/drm_file.h> -#include <drm/drm_print.h> - -#include "drm_legacy.h" - -struct drm_ctx_list { - struct list_head head; - drm_context_t handle; - struct drm_file *tag; -}; - -/******************************************************************/ -/** \name Context bitmap support */ -/*@{*/ - -/* - * Free a handle from the context bitmap. - * - * \param dev DRM device. - * \param ctx_handle context handle. - * - * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry - * in drm_device::ctx_idr, while holding the drm_device::struct_mutex - * lock. - */ -void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle) -{ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - mutex_lock(&dev->struct_mutex); - idr_remove(&dev->ctx_idr, ctx_handle); - mutex_unlock(&dev->struct_mutex); -} - -/* - * Context bitmap allocation. - * - * \param dev DRM device. - * \return (non-negative) context handle on success or a negative number on failure. - * - * Allocate a new idr from drm_device::ctx_idr while holding the - * drm_device::struct_mutex lock. - */ -static int drm_legacy_ctxbitmap_next(struct drm_device * dev) -{ - int ret; - - mutex_lock(&dev->struct_mutex); - ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0, - GFP_KERNEL); - mutex_unlock(&dev->struct_mutex); - return ret; -} - -/* - * Context bitmap initialization. - * - * \param dev DRM device. - * - * Initialise the drm_device::ctx_idr - */ -void drm_legacy_ctxbitmap_init(struct drm_device * dev) -{ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - idr_init(&dev->ctx_idr); -} - -/* - * Context bitmap cleanup. - * - * \param dev DRM device. - * - * Free all idr members using drm_ctx_sarea_free helper function - * while holding the drm_device::struct_mutex lock. - */ -void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev) -{ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - mutex_lock(&dev->struct_mutex); - idr_destroy(&dev->ctx_idr); - mutex_unlock(&dev->struct_mutex); -} - -/** - * drm_legacy_ctxbitmap_flush() - Flush all contexts owned by a file - * @dev: DRM device to operate on - * @file: Open file to flush contexts for - * - * This iterates over all contexts on @dev and drops them if they're owned by - * @file. Note that after this call returns, new contexts might be added if - * the file is still alive. - */ -void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) -{ - struct drm_ctx_list *pos, *tmp; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - mutex_lock(&dev->ctxlist_mutex); - - list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) { - if (pos->tag == file && - pos->handle != DRM_KERNEL_CONTEXT) { - if (dev->driver->context_dtor) - dev->driver->context_dtor(dev, pos->handle); - - drm_legacy_ctxbitmap_free(dev, pos->handle); - list_del(&pos->head); - kfree(pos); - } - } - - mutex_unlock(&dev->ctxlist_mutex); -} - -/*@}*/ - -/******************************************************************/ -/** \name Per Context SAREA Support */ -/*@{*/ - -/* - * Get per-context SAREA. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx_priv_map structure. - * \return zero on success or a negative number on failure. - * - * Gets the map from drm_device::ctx_idr with the handle specified and - * returns its handle. - */ -int drm_legacy_getsareactx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx_priv_map *request = data; - struct drm_local_map *map; - struct drm_map_list *_entry; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - mutex_lock(&dev->struct_mutex); - - map = idr_find(&dev->ctx_idr, request->ctx_id); - if (!map) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - - request->handle = NULL; - list_for_each_entry(_entry, &dev->maplist, head) { - if (_entry->map == map) { - request->handle = - (void *)(unsigned long)_entry->user_token; - break; - } - } - - mutex_unlock(&dev->struct_mutex); - - if (request->handle == NULL) - return -EINVAL; - - return 0; -} - -/* - * Set per-context SAREA. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx_priv_map structure. - * \return zero on success or a negative number on failure. - * - * Searches the mapping specified in \p arg and update the entry in - * drm_device::ctx_idr with it. - */ -int drm_legacy_setsareactx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx_priv_map *request = data; - struct drm_local_map *map = NULL; - struct drm_map_list *r_list = NULL; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - mutex_lock(&dev->struct_mutex); - list_for_each_entry(r_list, &dev->maplist, head) { - if (r_list->map - && r_list->user_token == (unsigned long) request->handle) - goto found; - } - bad: - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - - found: - map = r_list->map; - if (!map) - goto bad; - - if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id))) - goto bad; - - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -/*@}*/ - -/******************************************************************/ -/** \name The actual DRM context handling routines */ -/*@{*/ - -/* - * Switch context. - * - * \param dev DRM device. - * \param old old context handle. - * \param new new context handle. - * \return zero on success or a negative number on failure. - * - * Attempt to set drm_device::context_flag. - */ -static int drm_context_switch(struct drm_device * dev, int old, int new) -{ - if (test_and_set_bit(0, &dev->context_flag)) { - DRM_ERROR("Reentering -- FIXME\n"); - return -EBUSY; - } - - DRM_DEBUG("Context switch from %d to %d\n", old, new); - - if (new == dev->last_context) { - clear_bit(0, &dev->context_flag); - return 0; - } - - return 0; -} - -/* - * Complete context switch. - * - * \param dev DRM device. - * \param new new context handle. - * \return zero on success or a negative number on failure. - * - * Updates drm_device::last_context and drm_device::last_switch. Verifies the - * hardware lock is held, clears the drm_device::context_flag and wakes up - * drm_device::context_wait. - */ -static int drm_context_switch_complete(struct drm_device *dev, - struct drm_file *file_priv, int new) -{ - dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ - - if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) { - DRM_ERROR("Lock isn't held after context switch\n"); - } - - /* If a context switch is ever initiated - when the kernel holds the lock, release - that lock here. - */ - clear_bit(0, &dev->context_flag); - - return 0; -} - -/* - * Reserve contexts. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx_res structure. - * \return zero on success or a negative number on failure. - */ -int drm_legacy_resctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx_res *res = data; - struct drm_ctx ctx; - int i; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (res->count >= DRM_RESERVED_CONTEXTS) { - memset(&ctx, 0, sizeof(ctx)); - for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { - ctx.handle = i; - if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx))) - return -EFAULT; - } - } - res->count = DRM_RESERVED_CONTEXTS; - - return 0; -} - -/* - * Add context. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx structure. - * \return zero on success or a negative number on failure. - * - * Get a new handle for the context and copy to userspace. - */ -int drm_legacy_addctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx_list *ctx_entry; - struct drm_ctx *ctx = data; - int tmp_handle; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - tmp_handle = drm_legacy_ctxbitmap_next(dev); - if (tmp_handle == DRM_KERNEL_CONTEXT) { - /* Skip kernel's context and get a new one. */ - tmp_handle = drm_legacy_ctxbitmap_next(dev); - } - DRM_DEBUG("%d\n", tmp_handle); - if (tmp_handle < 0) { - DRM_DEBUG("Not enough free contexts.\n"); - /* Should this return -EBUSY instead? */ - return tmp_handle; - } - - ctx->handle = tmp_handle; - - ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL); - if (!ctx_entry) { - DRM_DEBUG("out of memory\n"); - return -ENOMEM; - } - - INIT_LIST_HEAD(&ctx_entry->head); - ctx_entry->handle = ctx->handle; - ctx_entry->tag = file_priv; - - mutex_lock(&dev->ctxlist_mutex); - list_add(&ctx_entry->head, &dev->ctxlist); - mutex_unlock(&dev->ctxlist_mutex); - - return 0; -} - -/* - * Get context. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx structure. - * \return zero on success or a negative number on failure. - */ -int drm_legacy_getctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx *ctx = data; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - /* This is 0, because we don't handle any context flags */ - ctx->flags = 0; - - return 0; -} - -/* - * Switch context. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx structure. - * \return zero on success or a negative number on failure. - * - * Calls context_switch(). - */ -int drm_legacy_switchctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx *ctx = data; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - DRM_DEBUG("%d\n", ctx->handle); - return drm_context_switch(dev, dev->last_context, ctx->handle); -} - -/* - * New context. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx structure. - * \return zero on success or a negative number on failure. - * - * Calls context_switch_complete(). - */ -int drm_legacy_newctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx *ctx = data; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - DRM_DEBUG("%d\n", ctx->handle); - drm_context_switch_complete(dev, file_priv, ctx->handle); - - return 0; -} - -/* - * Remove context. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx structure. - * \return zero on success or a negative number on failure. - * - * If not the special kernel context, calls ctxbitmap_free() to free the specified context. - */ -int drm_legacy_rmctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx *ctx = data; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - DRM_DEBUG("%d\n", ctx->handle); - if (ctx->handle != DRM_KERNEL_CONTEXT) { - if (dev->driver->context_dtor) - dev->driver->context_dtor(dev, ctx->handle); - drm_legacy_ctxbitmap_free(dev, ctx->handle); - } - - mutex_lock(&dev->ctxlist_mutex); - if (!list_empty(&dev->ctxlist)) { - struct drm_ctx_list *pos, *n; - - list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { - if (pos->handle == ctx->handle) { - list_del(&pos->head); - kfree(pos); - } - } - } - mutex_unlock(&dev->ctxlist_mutex); - - return 0; -} - -/*@}*/ diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index a209659a996c..2dafc39a27cb 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -439,11 +439,8 @@ EXPORT_SYMBOL(drm_crtc_helper_set_mode); * @state: atomic state object * * Provides a default CRTC-state check handler for CRTCs that only have - * one primary plane attached to it. - * - * This is often the case for the CRTC of simple framebuffers. See also - * drm_plane_helper_atomic_check() for the respective plane-state check - * helper function. + * one primary plane attached to it. This is often the case for the CRTC + * of simple framebuffers. * * RETURNS: * Zero on success, or an errno code otherwise. diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index 6b646e0783be..a514d5207e41 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -253,7 +253,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state, struct drm_file *file_priv, struct drm_mode_object *obj, struct drm_property *prop, - uint64_t prop_value); + u64 prop_value, bool async_flip); int drm_atomic_get_property(struct drm_mode_object *obj, struct drm_property *property, uint64_t *val); diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c index d8b2955e88fd..afb02aae707b 100644 --- a/drivers/gpu/drm/drm_damage_helper.c +++ b/drivers/gpu/drm/drm_damage_helper.c @@ -241,7 +241,8 @@ drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter, iter->plane_src.x2 = (src.x2 >> 16) + !!(src.x2 & 0xFFFF); iter->plane_src.y2 = (src.y2 >> 16) + !!(src.y2 & 0xFFFF); - if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) { + if (!iter->clips || state->ignore_damage_clips || + !drm_rect_equals(&state->src, &old_state->src)) { iter->clips = NULL; iter->num_clips = 0; iter->full_update = true; diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index f291fb4b359f..02e7481758c0 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -314,10 +314,8 @@ void drm_debugfs_dev_register(struct drm_device *dev) drm_framebuffer_debugfs_init(dev); drm_client_debugfs_init(dev); } - if (drm_drv_uses_atomic_modeset(dev)) { + if (drm_drv_uses_atomic_modeset(dev)) drm_atomic_debugfs_init(dev); - drm_bridge_debugfs_init(dev); - } } int drm_debugfs_register(struct drm_minor *minor, int minor_id, @@ -589,4 +587,65 @@ void drm_debugfs_crtc_remove(struct drm_crtc *crtc) crtc->debugfs_entry = NULL; } +static int bridges_show(struct seq_file *m, void *data) +{ + struct drm_encoder *encoder = m->private; + struct drm_printer p = drm_seq_file_printer(m); + struct drm_bridge *bridge; + unsigned int idx = 0; + + drm_for_each_bridge_in_chain(encoder, bridge) { + drm_printf(&p, "bridge[%d]: %ps\n", idx++, bridge->funcs); + drm_printf(&p, "\ttype: [%d] %s\n", + bridge->type, + drm_get_connector_type_name(bridge->type)); +#ifdef CONFIG_OF + if (bridge->of_node) + drm_printf(&p, "\tOF: %pOFfc\n", bridge->of_node); +#endif + drm_printf(&p, "\tops: [0x%x]", bridge->ops); + if (bridge->ops & DRM_BRIDGE_OP_DETECT) + drm_puts(&p, " detect"); + if (bridge->ops & DRM_BRIDGE_OP_EDID) + drm_puts(&p, " edid"); + if (bridge->ops & DRM_BRIDGE_OP_HPD) + drm_puts(&p, " hpd"); + if (bridge->ops & DRM_BRIDGE_OP_MODES) + drm_puts(&p, " modes"); + drm_puts(&p, "\n"); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(bridges); + +void drm_debugfs_encoder_add(struct drm_encoder *encoder) +{ + struct drm_minor *minor = encoder->dev->primary; + struct dentry *root; + char *name; + + name = kasprintf(GFP_KERNEL, "encoder-%d", encoder->index); + if (!name) + return; + + root = debugfs_create_dir(name, minor->debugfs_root); + kfree(name); + + encoder->debugfs_entry = root; + + /* bridges list */ + debugfs_create_file("bridges", 0444, root, encoder, + &bridges_fops); + + if (encoder->funcs->debugfs_init) + encoder->funcs->debugfs_init(encoder, root); +} + +void drm_debugfs_encoder_remove(struct drm_encoder *encoder) +{ + debugfs_remove_recursive(encoder->debugfs_entry); + encoder->debugfs_entry = NULL; +} + #endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c deleted file mode 100644 index eb6b741a6f99..000000000000 --- a/drivers/gpu/drm/drm_dma.c +++ /dev/null @@ -1,178 +0,0 @@ -/* - * \file drm_dma.c - * DMA IOCTL and function support - * - * \author Rickard E. (Rik) Faith <faith@valinux.com> - * \author Gareth Hughes <gareth@valinux.com> - */ - -/* - * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com - * - * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include <linux/export.h> -#include <linux/pci.h> - -#include <drm/drm_drv.h> -#include <drm/drm_print.h> - -#include "drm_legacy.h" - -/** - * drm_legacy_dma_setup() - Initialize the DMA data. - * - * @dev: DRM device. - * Return: zero on success or a negative value on failure. - * - * Allocate and initialize a drm_device_dma structure. - */ -int drm_legacy_dma_setup(struct drm_device *dev) -{ - int i; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) || - !drm_core_check_feature(dev, DRIVER_LEGACY)) - return 0; - - dev->buf_use = 0; - atomic_set(&dev->buf_alloc, 0); - - dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL); - if (!dev->dma) - return -ENOMEM; - - for (i = 0; i <= DRM_MAX_ORDER; i++) - memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0])); - - return 0; -} - -/** - * drm_legacy_dma_takedown() - Cleanup the DMA resources. - * - * @dev: DRM device. - * - * Free all pages associated with DMA buffers, the buffers and pages lists, and - * finally the drm_device::dma structure itself. - */ -void drm_legacy_dma_takedown(struct drm_device *dev) -{ - struct drm_device_dma *dma = dev->dma; - drm_dma_handle_t *dmah; - int i, j; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) || - !drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - if (!dma) - return; - - /* Clear dma buffers */ - for (i = 0; i <= DRM_MAX_ORDER; i++) { - if (dma->bufs[i].seg_count) { - DRM_DEBUG("order %d: buf_count = %d," - " seg_count = %d\n", - i, - dma->bufs[i].buf_count, - dma->bufs[i].seg_count); - for (j = 0; j < dma->bufs[i].seg_count; j++) { - if (dma->bufs[i].seglist[j]) { - dmah = dma->bufs[i].seglist[j]; - dma_free_coherent(dev->dev, - dmah->size, - dmah->vaddr, - dmah->busaddr); - kfree(dmah); - } - } - kfree(dma->bufs[i].seglist); - } - if (dma->bufs[i].buf_count) { - for (j = 0; j < dma->bufs[i].buf_count; j++) { - kfree(dma->bufs[i].buflist[j].dev_private); - } - kfree(dma->bufs[i].buflist); - } - } - - kfree(dma->buflist); - kfree(dma->pagelist); - kfree(dev->dma); - dev->dma = NULL; -} - -/** - * drm_legacy_free_buffer() - Free a buffer. - * - * @dev: DRM device. - * @buf: buffer to free. - * - * Resets the fields of \p buf. - */ -void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf) -{ - if (!buf) - return; - - buf->waiting = 0; - buf->pending = 0; - buf->file_priv = NULL; - buf->used = 0; -} - -/** - * drm_legacy_reclaim_buffers() - Reclaim the buffers. - * - * @dev: DRM device. - * @file_priv: DRM file private. - * - * Frees each buffer associated with \p file_priv not already on the hardware. - */ -void drm_legacy_reclaim_buffers(struct drm_device *dev, - struct drm_file *file_priv) -{ - struct drm_device_dma *dma = dev->dma; - int i; - - if (!dma) - return; - for (i = 0; i < dma->buf_count; i++) { - if (dma->buflist[i]->file_priv == file_priv) { - switch (dma->buflist[i]->list) { - case DRM_LIST_NONE: - drm_legacy_free_buffer(dev, dma->buflist[i]); - break; - case DRM_LIST_WAIT: - dma->buflist[i]->list = DRM_LIST_RECLAIM; - break; - default: - /* Buffer already on hardware. */ - break; - } - } - } -} diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 535f16e7882e..243cacb3575c 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -48,7 +48,6 @@ #include "drm_crtc_internal.h" #include "drm_internal.h" -#include "drm_legacy.h" MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"); MODULE_DESCRIPTION("DRM shared core routines"); @@ -585,8 +584,6 @@ static void drm_fs_inode_free(struct inode *inode) static void drm_dev_init_release(struct drm_device *dev, void *res) { - drm_legacy_ctxbitmap_cleanup(dev); - drm_legacy_remove_map_hash(dev); drm_fs_inode_free(dev->anon_inode); put_device(dev->dev); @@ -597,7 +594,6 @@ static void drm_dev_init_release(struct drm_device *dev, void *res) mutex_destroy(&dev->clientlist_mutex); mutex_destroy(&dev->filelist_mutex); mutex_destroy(&dev->struct_mutex); - drm_legacy_destroy_members(dev); } static int drm_dev_init(struct drm_device *dev, @@ -632,7 +628,6 @@ static int drm_dev_init(struct drm_device *dev, return -EINVAL; } - drm_legacy_init_members(dev); INIT_LIST_HEAD(&dev->filelist); INIT_LIST_HEAD(&dev->filelist_internal); INIT_LIST_HEAD(&dev->clientlist); @@ -673,12 +668,6 @@ static int drm_dev_init(struct drm_device *dev, goto err; } - ret = drm_legacy_create_map_hash(dev); - if (ret) - goto err; - - drm_legacy_ctxbitmap_init(dev); - if (drm_core_check_feature(dev, DRIVER_GEM)) { ret = drm_gem_init(dev); if (ret) { @@ -949,8 +938,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) goto err_minors; } - if (drm_core_check_feature(dev, DRIVER_MODESET)) - drm_modeset_register_all(dev); + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = drm_modeset_register_all(dev); + if (ret) + goto err_unload; + } DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", driver->name, driver->major, driver->minor, @@ -960,6 +952,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) goto out_unlock; +err_unload: + if (dev->driver->unload) + dev->driver->unload(dev); err_minors: remove_compat_control_link(dev); drm_minor_unregister(dev, DRM_MINOR_ACCEL); @@ -990,9 +985,6 @@ EXPORT_SYMBOL(drm_dev_register); */ void drm_dev_unregister(struct drm_device *dev) { - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - drm_lastclose(dev); - dev->registered = false; drm_client_dev_unregister(dev); @@ -1003,9 +995,6 @@ void drm_dev_unregister(struct drm_device *dev) if (dev->driver->unload) dev->driver->unload(dev); - drm_legacy_pci_agp_destroy(dev); - drm_legacy_rmmaps(dev); - remove_compat_control_link(dev); drm_minor_unregister(dev, DRM_MINOR_ACCEL); drm_minor_unregister(dev, DRM_MINOR_PRIMARY); diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c index 1143bc7f3252..8f2bc6a28482 100644 --- a/drivers/gpu/drm/drm_encoder.c +++ b/drivers/gpu/drm/drm_encoder.c @@ -30,6 +30,7 @@ #include <drm/drm_print.h> #include "drm_crtc_internal.h" +#include "drm_internal.h" /** * DOC: overview @@ -74,6 +75,8 @@ int drm_encoder_register_all(struct drm_device *dev) int ret = 0; drm_for_each_encoder(encoder, dev) { + drm_debugfs_encoder_add(encoder); + if (encoder->funcs && encoder->funcs->late_register) ret = encoder->funcs->late_register(encoder); if (ret) @@ -90,6 +93,7 @@ void drm_encoder_unregister_all(struct drm_device *dev) drm_for_each_encoder(encoder, dev) { if (encoder->funcs && encoder->funcs->early_unregister) encoder->funcs->early_unregister(encoder); + drm_debugfs_encoder_remove(encoder); } } diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 5ddaffd32586..8c87287c3e16 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -47,7 +47,6 @@ #include "drm_crtc_internal.h" #include "drm_internal.h" -#include "drm_legacy.h" /* from BKL pushdown */ DEFINE_MUTEX(drm_global_mutex); @@ -55,14 +54,6 @@ DEFINE_MUTEX(drm_global_mutex); bool drm_dev_needs_global_mutex(struct drm_device *dev) { /* - * Legacy drivers rely on all kinds of BKL locking semantics, don't - * bother. They also still need BKL locking for their ioctls, so better - * safe than sorry. - */ - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - return true; - - /* * The deprecated ->load callback must be called after the driver is * already registered. This means such drivers rely on the BKL to make * sure an open can't proceed until the driver is actually fully set up. @@ -107,9 +98,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev) * drm_send_event() as the main starting points. * * The memory mapping implementation will vary depending on how the driver - * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap() - * function, modern drivers should use one of the provided memory-manager - * specific implementations. For GEM-based drivers this is drm_gem_mmap(). + * manages memory. For GEM-based drivers this is drm_gem_mmap(). * * No other file operations are supported by the DRM userspace API. Overall the * following is an example &file_operations structure:: @@ -254,18 +243,6 @@ void drm_file_free(struct drm_file *file) (long)old_encode_dev(file->minor->kdev->devt), atomic_read(&dev->open_count)); -#ifdef CONFIG_DRM_LEGACY - if (drm_core_check_feature(dev, DRIVER_LEGACY) && - dev->driver->preclose) - dev->driver->preclose(dev, file); -#endif - - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - drm_legacy_lock_release(dev, file->filp); - - if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) - drm_legacy_reclaim_buffers(dev, file); - drm_events_release(file); if (drm_core_check_feature(dev, DRIVER_MODESET)) { @@ -279,8 +256,6 @@ void drm_file_free(struct drm_file *file) if (drm_core_check_feature(dev, DRIVER_GEM)) drm_gem_release(dev, file); - drm_legacy_ctxbitmap_flush(dev, file); - if (drm_is_primary_client(file)) drm_master_release(file); @@ -367,29 +342,6 @@ int drm_open_helper(struct file *filp, struct drm_minor *minor) list_add(&priv->lhead, &dev->filelist); mutex_unlock(&dev->filelist_mutex); -#ifdef CONFIG_DRM_LEGACY -#ifdef __alpha__ - /* - * Default the hose - */ - if (!dev->hose) { - struct pci_dev *pci_dev; - - pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); - if (pci_dev) { - dev->hose = pci_dev->sysdata; - pci_dev_put(pci_dev); - } - if (!dev->hose) { - struct pci_bus *b = list_entry(pci_root_buses.next, - struct pci_bus, node); - if (b) - dev->hose = b->sysdata; - } - } -#endif -#endif - return 0; } @@ -411,7 +363,6 @@ int drm_open(struct inode *inode, struct file *filp) struct drm_device *dev; struct drm_minor *minor; int retcode; - int need_setup = 0; minor = drm_minor_acquire(iminor(inode)); if (IS_ERR(minor)) @@ -421,8 +372,7 @@ int drm_open(struct inode *inode, struct file *filp) if (drm_dev_needs_global_mutex(dev)) mutex_lock(&drm_global_mutex); - if (!atomic_fetch_inc(&dev->open_count)) - need_setup = 1; + atomic_fetch_inc(&dev->open_count); /* share address_space across all char-devs of a single device */ filp->f_mapping = dev->anon_inode->i_mapping; @@ -430,13 +380,6 @@ int drm_open(struct inode *inode, struct file *filp) retcode = drm_open_helper(filp, minor); if (retcode) goto err_undo; - if (need_setup) { - retcode = drm_legacy_setup(dev); - if (retcode) { - drm_close_helper(filp); - goto err_undo; - } - } if (drm_dev_needs_global_mutex(dev)) mutex_unlock(&drm_global_mutex); @@ -460,9 +403,6 @@ void drm_lastclose(struct drm_device * dev) dev->driver->lastclose(dev); drm_dbg_core(dev, "driver lastclose completed\n"); - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - drm_legacy_dev_reinit(dev); - drm_client_dev_restore(dev); } @@ -958,7 +898,7 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file) { struct drm_gem_object *obj; struct drm_memory_stats status = {}; - enum drm_gem_object_status supported_status; + enum drm_gem_object_status supported_status = 0; int id; spin_lock(&file->table_lock); diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 09e289fca5c3..3cc0ffc28e86 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -583,7 +583,7 @@ int drm_mode_getfb2_ioctl(struct drm_device *dev, struct drm_mode_fb_cmd2 *r = data; struct drm_framebuffer *fb; unsigned int i; - int ret; + int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index 54f5e8851de5..9c0922c1a5a2 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -1086,6 +1086,37 @@ drm_gpuvm_put(struct drm_gpuvm *gpuvm) EXPORT_SYMBOL_GPL(drm_gpuvm_put); static int +exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj, + unsigned int num_fences) +{ + return num_fences ? drm_exec_prepare_obj(exec, obj, num_fences) : + drm_exec_lock_obj(exec, obj); +} + +/** + * drm_gpuvm_prepare_vm() - prepare the GPUVMs common dma-resv + * @gpuvm: the &drm_gpuvm + * @exec: the &drm_exec context + * @num_fences: the amount of &dma_fences to reserve + * + * Calls drm_exec_prepare_obj() for the GPUVMs dummy &drm_gem_object; if + * @num_fences is zero drm_exec_lock_obj() is called instead. + * + * Using this function directly, it is the drivers responsibility to call + * drm_exec_init() and drm_exec_fini() accordingly. + * + * Returns: 0 on success, negative error code on failure. + */ +int +drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + unsigned int num_fences) +{ + return exec_prepare_obj(exec, gpuvm->r_obj, num_fences); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_vm); + +static int __drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm, struct drm_exec *exec, unsigned int num_fences) @@ -1095,7 +1126,7 @@ __drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm, int ret = 0; for_each_vm_bo_in_list(gpuvm, extobj, &extobjs, vm_bo) { - ret = drm_exec_prepare_obj(exec, vm_bo->obj, num_fences); + ret = exec_prepare_obj(exec, vm_bo->obj, num_fences); if (ret) break; } @@ -1116,7 +1147,7 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm, drm_gpuvm_resv_assert_held(gpuvm); list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) { - ret = drm_exec_prepare_obj(exec, vm_bo->obj, num_fences); + ret = exec_prepare_obj(exec, vm_bo->obj, num_fences); if (ret) break; @@ -1134,7 +1165,8 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm, * @num_fences: the amount of &dma_fences to reserve * * Calls drm_exec_prepare_obj() for all &drm_gem_objects the given - * &drm_gpuvm contains mappings of. + * &drm_gpuvm contains mappings of; if @num_fences is zero drm_exec_lock_obj() + * is called instead. * * Using this function directly, it is the drivers responsibility to call * drm_exec_init() and drm_exec_fini() accordingly. @@ -1171,7 +1203,8 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_objects); * @num_fences: the amount of &dma_fences to reserve * * Calls drm_exec_prepare_obj() for all &drm_gem_objects mapped between @addr - * and @addr + @range. + * and @addr + @range; if @num_fences is zero drm_exec_lock_obj() is called + * instead. * * Returns: 0 on success, negative error code on failure. */ @@ -1186,7 +1219,7 @@ drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec, drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) { struct drm_gem_object *obj = va->gem.obj; - ret = drm_exec_prepare_obj(exec, obj, num_fences); + ret = exec_prepare_obj(exec, obj, num_fences); if (ret) return ret; } @@ -1502,14 +1535,18 @@ drm_gpuvm_bo_destroy(struct kref *kref) * hold the dma-resv or driver specific GEM gpuva lock. * * This function may only be called from non-atomic context. + * + * Returns: true if vm_bo was destroyed, false otherwise. */ -void +bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo) { might_sleep(); if (vm_bo) - kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy); + return !!kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy); + + return false; } EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put); diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c deleted file mode 100644 index 60afa1865559..000000000000 --- a/drivers/gpu/drm/drm_hashtab.c +++ /dev/null @@ -1,203 +0,0 @@ -/************************************************************************** - * - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * - **************************************************************************/ -/* - * Simple open hash tab implementation. - * - * Authors: - * Thomas Hellström <thomas-at-tungstengraphics-dot-com> - */ - -#include <linux/hash.h> -#include <linux/mm.h> -#include <linux/rculist.h> -#include <linux/slab.h> -#include <linux/vmalloc.h> - -#include <drm/drm_print.h> - -#include "drm_legacy.h" - -int drm_ht_create(struct drm_open_hash *ht, unsigned int order) -{ - unsigned int size = 1 << order; - - ht->order = order; - ht->table = NULL; - if (size <= PAGE_SIZE / sizeof(*ht->table)) - ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL); - else - ht->table = vzalloc(array_size(size, sizeof(*ht->table))); - if (!ht->table) { - DRM_ERROR("Out of memory for hash table\n"); - return -ENOMEM; - } - return 0; -} - -void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key) -{ - struct drm_hash_item *entry; - struct hlist_head *h_list; - unsigned int hashed_key; - int count = 0; - - hashed_key = hash_long(key, ht->order); - DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); - h_list = &ht->table[hashed_key]; - hlist_for_each_entry(entry, h_list, head) - DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); -} - -static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht, - unsigned long key) -{ - struct drm_hash_item *entry; - struct hlist_head *h_list; - unsigned int hashed_key; - - hashed_key = hash_long(key, ht->order); - h_list = &ht->table[hashed_key]; - hlist_for_each_entry(entry, h_list, head) { - if (entry->key == key) - return &entry->head; - if (entry->key > key) - break; - } - return NULL; -} - -static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht, - unsigned long key) -{ - struct drm_hash_item *entry; - struct hlist_head *h_list; - unsigned int hashed_key; - - hashed_key = hash_long(key, ht->order); - h_list = &ht->table[hashed_key]; - hlist_for_each_entry_rcu(entry, h_list, head) { - if (entry->key == key) - return &entry->head; - if (entry->key > key) - break; - } - return NULL; -} - -int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) -{ - struct drm_hash_item *entry; - struct hlist_head *h_list; - struct hlist_node *parent; - unsigned int hashed_key; - unsigned long key = item->key; - - hashed_key = hash_long(key, ht->order); - h_list = &ht->table[hashed_key]; - parent = NULL; - hlist_for_each_entry(entry, h_list, head) { - if (entry->key == key) - return -EINVAL; - if (entry->key > key) - break; - parent = &entry->head; - } - if (parent) { - hlist_add_behind_rcu(&item->head, parent); - } else { - hlist_add_head_rcu(&item->head, h_list); - } - return 0; -} - -/* - * Just insert an item and return any "bits" bit key that hasn't been - * used before. - */ -int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, - unsigned long seed, int bits, int shift, - unsigned long add) -{ - int ret; - unsigned long mask = (1UL << bits) - 1; - unsigned long first, unshifted_key; - - unshifted_key = hash_long(seed, bits); - first = unshifted_key; - do { - item->key = (unshifted_key << shift) + add; - ret = drm_ht_insert_item(ht, item); - if (ret) - unshifted_key = (unshifted_key + 1) & mask; - } while(ret && (unshifted_key != first)); - - if (ret) { - DRM_ERROR("Available key bit space exhausted\n"); - return -EINVAL; - } - return 0; -} - -int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, - struct drm_hash_item **item) -{ - struct hlist_node *list; - - list = drm_ht_find_key_rcu(ht, key); - if (!list) - return -EINVAL; - - *item = hlist_entry(list, struct drm_hash_item, head); - return 0; -} - -int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) -{ - struct hlist_node *list; - - list = drm_ht_find_key(ht, key); - if (list) { - hlist_del_init_rcu(list); - return 0; - } - return -EINVAL; -} - -int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) -{ - hlist_del_init_rcu(&item->head); - return 0; -} - -void drm_ht_remove(struct drm_open_hash *ht) -{ - if (ht->table) { - kvfree(ht->table); - ht->table = NULL; - } -} diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index b12c463bc460..8e4faf0a28e6 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -117,17 +117,10 @@ void drm_handle_vblank_works(struct drm_vblank_crtc *vblank); /* IOCTLS */ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); -int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); /* drm_irq.c */ /* IOCTLS */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int drm_legacy_irq_control(struct drm_device *dev, void *data, - struct drm_file *file_priv); -#endif - int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); @@ -194,6 +187,8 @@ void drm_debugfs_connector_remove(struct drm_connector *connector); void drm_debugfs_crtc_add(struct drm_crtc *crtc); void drm_debugfs_crtc_remove(struct drm_crtc *crtc); void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc); +void drm_debugfs_encoder_add(struct drm_encoder *encoder); +void drm_debugfs_encoder_remove(struct drm_encoder *encoder); #else static inline void drm_debugfs_dev_fini(struct drm_device *dev) { @@ -231,6 +226,14 @@ static inline void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc) { } +static inline void drm_debugfs_encoder_add(struct drm_encoder *encoder) +{ +} + +static inline void drm_debugfs_encoder_remove(struct drm_encoder *encoder) +{ +} + #endif drm_ioctl_t drm_version; diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index 025dc558c94e..129e2b91dbfe 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -31,12 +31,12 @@ #include <linux/ratelimit.h> #include <linux/export.h> +#include <drm/drm_device.h> #include <drm/drm_file.h> #include <drm/drm_print.h> #include "drm_crtc_internal.h" #include "drm_internal.h" -#include "drm_legacy.h" #define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t) #define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t) @@ -163,92 +163,6 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd, return -EINVAL; } -#if IS_ENABLED(CONFIG_DRM_LEGACY) -typedef struct drm_map32 { - u32 offset; /* Requested physical address (0 for SAREA) */ - u32 size; /* Requested physical size (bytes) */ - enum drm_map_type type; /* Type of memory to map */ - enum drm_map_flags flags; /* Flags */ - u32 handle; /* User-space: "Handle" to pass to mmap() */ - int mtrr; /* MTRR slot used */ -} drm_map32_t; - -static int compat_drm_getmap(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_map32_t __user *argp = (void __user *)arg; - drm_map32_t m32; - struct drm_map map; - int err; - - if (copy_from_user(&m32, argp, sizeof(m32))) - return -EFAULT; - - map.offset = m32.offset; - err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, 0); - if (err) - return err; - - m32.offset = map.offset; - m32.size = map.size; - m32.type = map.type; - m32.flags = map.flags; - m32.handle = ptr_to_compat((void __user *)map.handle); - m32.mtrr = map.mtrr; - if (copy_to_user(argp, &m32, sizeof(m32))) - return -EFAULT; - return 0; - -} - -static int compat_drm_addmap(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_map32_t __user *argp = (void __user *)arg; - drm_map32_t m32; - struct drm_map map; - int err; - - if (copy_from_user(&m32, argp, sizeof(m32))) - return -EFAULT; - - map.offset = m32.offset; - map.size = m32.size; - map.type = m32.type; - map.flags = m32.flags; - - err = drm_ioctl_kernel(file, drm_legacy_addmap_ioctl, &map, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); - if (err) - return err; - - m32.offset = map.offset; - m32.mtrr = map.mtrr; - m32.handle = ptr_to_compat((void __user *)map.handle); - if (map.handle != compat_ptr(m32.handle)) - pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n", - map.handle, m32.type, m32.offset); - - if (copy_to_user(argp, &m32, sizeof(m32))) - return -EFAULT; - - return 0; -} - -static int compat_drm_rmmap(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_map32_t __user *argp = (void __user *)arg; - struct drm_map map; - u32 handle; - - if (get_user(handle, &argp->handle)) - return -EFAULT; - map.handle = compat_ptr(handle); - return drm_ioctl_kernel(file, drm_legacy_rmmap_ioctl, &map, DRM_AUTH); -} -#endif - typedef struct drm_client32 { int idx; /* Which client desired? */ int auth; /* Is client authenticated? */ @@ -308,501 +222,6 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd, return 0; } -#if IS_ENABLED(CONFIG_DRM_LEGACY) -typedef struct drm_buf_desc32 { - int count; /* Number of buffers of this size */ - int size; /* Size in bytes */ - int low_mark; /* Low water mark */ - int high_mark; /* High water mark */ - int flags; - u32 agp_start; /* Start address in the AGP aperture */ -} drm_buf_desc32_t; - -static int compat_drm_addbufs(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_buf_desc32_t __user *argp = (void __user *)arg; - drm_buf_desc32_t desc32; - struct drm_buf_desc desc; - int err; - - if (copy_from_user(&desc32, argp, sizeof(drm_buf_desc32_t))) - return -EFAULT; - - desc = (struct drm_buf_desc){ - desc32.count, desc32.size, desc32.low_mark, desc32.high_mark, - desc32.flags, desc32.agp_start - }; - - err = drm_ioctl_kernel(file, drm_legacy_addbufs, &desc, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); - if (err) - return err; - - desc32 = (drm_buf_desc32_t){ - desc.count, desc.size, desc.low_mark, desc.high_mark, - desc.flags, desc.agp_start - }; - if (copy_to_user(argp, &desc32, sizeof(drm_buf_desc32_t))) - return -EFAULT; - - return 0; -} - -static int compat_drm_markbufs(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_buf_desc32_t b32; - drm_buf_desc32_t __user *argp = (void __user *)arg; - struct drm_buf_desc buf; - - if (copy_from_user(&b32, argp, sizeof(b32))) - return -EFAULT; - - buf.size = b32.size; - buf.low_mark = b32.low_mark; - buf.high_mark = b32.high_mark; - - return drm_ioctl_kernel(file, drm_legacy_markbufs, &buf, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} - -typedef struct drm_buf_info32 { - int count; /**< Entries in list */ - u32 list; -} drm_buf_info32_t; - -static int copy_one_buf32(void *data, int count, struct drm_buf_entry *from) -{ - drm_buf_info32_t *request = data; - drm_buf_desc32_t __user *to = compat_ptr(request->list); - drm_buf_desc32_t v = {.count = from->buf_count, - .size = from->buf_size, - .low_mark = from->low_mark, - .high_mark = from->high_mark}; - - if (copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags))) - return -EFAULT; - return 0; -} - -static int drm_legacy_infobufs32(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - drm_buf_info32_t *request = data; - - return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf32); -} - -static int compat_drm_infobufs(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_buf_info32_t req32; - drm_buf_info32_t __user *argp = (void __user *)arg; - int err; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - - if (req32.count < 0) - req32.count = 0; - - err = drm_ioctl_kernel(file, drm_legacy_infobufs32, &req32, DRM_AUTH); - if (err) - return err; - - if (put_user(req32.count, &argp->count)) - return -EFAULT; - - return 0; -} - -typedef struct drm_buf_pub32 { - int idx; /**< Index into the master buffer list */ - int total; /**< Buffer size */ - int used; /**< Amount of buffer in use (for DMA) */ - u32 address; /**< Address of buffer */ -} drm_buf_pub32_t; - -typedef struct drm_buf_map32 { - int count; /**< Length of the buffer list */ - u32 virtual; /**< Mmap'd area in user-virtual */ - u32 list; /**< Buffer information */ -} drm_buf_map32_t; - -static int map_one_buf32(void *data, int idx, unsigned long virtual, - struct drm_buf *buf) -{ - drm_buf_map32_t *request = data; - drm_buf_pub32_t __user *to = compat_ptr(request->list) + idx; - drm_buf_pub32_t v; - - v.idx = buf->idx; - v.total = buf->total; - v.used = 0; - v.address = virtual + buf->offset; - if (copy_to_user(to, &v, sizeof(v))) - return -EFAULT; - return 0; -} - -static int drm_legacy_mapbufs32(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - drm_buf_map32_t *request = data; - void __user *v; - int err = __drm_legacy_mapbufs(dev, data, &request->count, - &v, map_one_buf32, - file_priv); - request->virtual = ptr_to_compat(v); - return err; -} - -static int compat_drm_mapbufs(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_buf_map32_t __user *argp = (void __user *)arg; - drm_buf_map32_t req32; - int err; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - if (req32.count < 0) - return -EINVAL; - - err = drm_ioctl_kernel(file, drm_legacy_mapbufs32, &req32, DRM_AUTH); - if (err) - return err; - - if (put_user(req32.count, &argp->count) - || put_user(req32.virtual, &argp->virtual)) - return -EFAULT; - - return 0; -} - -typedef struct drm_buf_free32 { - int count; - u32 list; -} drm_buf_free32_t; - -static int compat_drm_freebufs(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_buf_free32_t req32; - struct drm_buf_free request; - drm_buf_free32_t __user *argp = (void __user *)arg; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - - request.count = req32.count; - request.list = compat_ptr(req32.list); - return drm_ioctl_kernel(file, drm_legacy_freebufs, &request, DRM_AUTH); -} - -typedef struct drm_ctx_priv_map32 { - unsigned int ctx_id; /**< Context requesting private mapping */ - u32 handle; /**< Handle of map */ -} drm_ctx_priv_map32_t; - -static int compat_drm_setsareactx(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_ctx_priv_map32_t req32; - struct drm_ctx_priv_map request; - drm_ctx_priv_map32_t __user *argp = (void __user *)arg; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - - request.ctx_id = req32.ctx_id; - request.handle = compat_ptr(req32.handle); - return drm_ioctl_kernel(file, drm_legacy_setsareactx, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} - -static int compat_drm_getsareactx(struct file *file, unsigned int cmd, - unsigned long arg) -{ - struct drm_ctx_priv_map req; - drm_ctx_priv_map32_t req32; - drm_ctx_priv_map32_t __user *argp = (void __user *)arg; - int err; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - - req.ctx_id = req32.ctx_id; - err = drm_ioctl_kernel(file, drm_legacy_getsareactx, &req, DRM_AUTH); - if (err) - return err; - - req32.handle = ptr_to_compat((void __user *)req.handle); - if (copy_to_user(argp, &req32, sizeof(req32))) - return -EFAULT; - - return 0; -} - -typedef struct drm_ctx_res32 { - int count; - u32 contexts; -} drm_ctx_res32_t; - -static int compat_drm_resctx(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_ctx_res32_t __user *argp = (void __user *)arg; - drm_ctx_res32_t res32; - struct drm_ctx_res res; - int err; - - if (copy_from_user(&res32, argp, sizeof(res32))) - return -EFAULT; - - res.count = res32.count; - res.contexts = compat_ptr(res32.contexts); - err = drm_ioctl_kernel(file, drm_legacy_resctx, &res, DRM_AUTH); - if (err) - return err; - - res32.count = res.count; - if (copy_to_user(argp, &res32, sizeof(res32))) - return -EFAULT; - - return 0; -} - -typedef struct drm_dma32 { - int context; /**< Context handle */ - int send_count; /**< Number of buffers to send */ - u32 send_indices; /**< List of handles to buffers */ - u32 send_sizes; /**< Lengths of data to send */ - enum drm_dma_flags flags; /**< Flags */ - int request_count; /**< Number of buffers requested */ - int request_size; /**< Desired size for buffers */ - u32 request_indices; /**< Buffer information */ - u32 request_sizes; - int granted_count; /**< Number of buffers granted */ -} drm_dma32_t; - -static int compat_drm_dma(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_dma32_t d32; - drm_dma32_t __user *argp = (void __user *)arg; - struct drm_dma d; - int err; - - if (copy_from_user(&d32, argp, sizeof(d32))) - return -EFAULT; - - d.context = d32.context; - d.send_count = d32.send_count; - d.send_indices = compat_ptr(d32.send_indices); - d.send_sizes = compat_ptr(d32.send_sizes); - d.flags = d32.flags; - d.request_count = d32.request_count; - d.request_indices = compat_ptr(d32.request_indices); - d.request_sizes = compat_ptr(d32.request_sizes); - err = drm_ioctl_kernel(file, drm_legacy_dma_ioctl, &d, DRM_AUTH); - if (err) - return err; - - if (put_user(d.request_size, &argp->request_size) - || put_user(d.granted_count, &argp->granted_count)) - return -EFAULT; - - return 0; -} -#endif - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -#if IS_ENABLED(CONFIG_AGP) -typedef struct drm_agp_mode32 { - u32 mode; /**< AGP mode */ -} drm_agp_mode32_t; - -static int compat_drm_agp_enable(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_agp_mode32_t __user *argp = (void __user *)arg; - struct drm_agp_mode mode; - - if (get_user(mode.mode, &argp->mode)) - return -EFAULT; - - return drm_ioctl_kernel(file, drm_legacy_agp_enable_ioctl, &mode, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} - -typedef struct drm_agp_info32 { - int agp_version_major; - int agp_version_minor; - u32 mode; - u32 aperture_base; /* physical address */ - u32 aperture_size; /* bytes */ - u32 memory_allowed; /* bytes */ - u32 memory_used; - - /* PCI information */ - unsigned short id_vendor; - unsigned short id_device; -} drm_agp_info32_t; - -static int compat_drm_agp_info(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_agp_info32_t __user *argp = (void __user *)arg; - drm_agp_info32_t i32; - struct drm_agp_info info; - int err; - - err = drm_ioctl_kernel(file, drm_legacy_agp_info_ioctl, &info, DRM_AUTH); - if (err) - return err; - - i32.agp_version_major = info.agp_version_major; - i32.agp_version_minor = info.agp_version_minor; - i32.mode = info.mode; - i32.aperture_base = info.aperture_base; - i32.aperture_size = info.aperture_size; - i32.memory_allowed = info.memory_allowed; - i32.memory_used = info.memory_used; - i32.id_vendor = info.id_vendor; - i32.id_device = info.id_device; - if (copy_to_user(argp, &i32, sizeof(i32))) - return -EFAULT; - - return 0; -} - -typedef struct drm_agp_buffer32 { - u32 size; /**< In bytes -- will round to page boundary */ - u32 handle; /**< Used for binding / unbinding */ - u32 type; /**< Type of memory to allocate */ - u32 physical; /**< Physical used by i810 */ -} drm_agp_buffer32_t; - -static int compat_drm_agp_alloc(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_agp_buffer32_t __user *argp = (void __user *)arg; - drm_agp_buffer32_t req32; - struct drm_agp_buffer request; - int err; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - - request.size = req32.size; - request.type = req32.type; - err = drm_ioctl_kernel(file, drm_legacy_agp_alloc_ioctl, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); - if (err) - return err; - - req32.handle = request.handle; - req32.physical = request.physical; - if (copy_to_user(argp, &req32, sizeof(req32))) { - drm_ioctl_kernel(file, drm_legacy_agp_free_ioctl, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); - return -EFAULT; - } - - return 0; -} - -static int compat_drm_agp_free(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_agp_buffer32_t __user *argp = (void __user *)arg; - struct drm_agp_buffer request; - - if (get_user(request.handle, &argp->handle)) - return -EFAULT; - - return drm_ioctl_kernel(file, drm_legacy_agp_free_ioctl, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} - -typedef struct drm_agp_binding32 { - u32 handle; /**< From drm_agp_buffer */ - u32 offset; /**< In bytes -- will round to page boundary */ -} drm_agp_binding32_t; - -static int compat_drm_agp_bind(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_agp_binding32_t __user *argp = (void __user *)arg; - drm_agp_binding32_t req32; - struct drm_agp_binding request; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - - request.handle = req32.handle; - request.offset = req32.offset; - return drm_ioctl_kernel(file, drm_legacy_agp_bind_ioctl, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} - -static int compat_drm_agp_unbind(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_agp_binding32_t __user *argp = (void __user *)arg; - struct drm_agp_binding request; - - if (get_user(request.handle, &argp->handle)) - return -EFAULT; - - return drm_ioctl_kernel(file, drm_legacy_agp_unbind_ioctl, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} -#endif /* CONFIG_AGP */ - -typedef struct drm_scatter_gather32 { - u32 size; /**< In bytes -- will round to page boundary */ - u32 handle; /**< Used for mapping / unmapping */ -} drm_scatter_gather32_t; - -static int compat_drm_sg_alloc(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_scatter_gather32_t __user *argp = (void __user *)arg; - struct drm_scatter_gather request; - int err; - - if (get_user(request.size, &argp->size)) - return -EFAULT; - - err = drm_ioctl_kernel(file, drm_legacy_sg_alloc, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); - if (err) - return err; - - /* XXX not sure about the handle conversion here... */ - if (put_user(request.handle >> PAGE_SHIFT, &argp->handle)) - return -EFAULT; - - return 0; -} - -static int compat_drm_sg_free(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_scatter_gather32_t __user *argp = (void __user *)arg; - struct drm_scatter_gather request; - unsigned long x; - - if (get_user(x, &argp->handle)) - return -EFAULT; - request.handle = x << PAGE_SHIFT; - return drm_ioctl_kernel(file, drm_legacy_sg_free, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} -#endif #if defined(CONFIG_X86) typedef struct drm_update_draw32 { drm_drawable_t handle; @@ -854,7 +273,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, req.request.type = req32.request.type; req.request.sequence = req32.request.sequence; req.request.signal = req32.request.signal; - err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED); + err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, 0); req32.reply.type = req.reply.type; req32.reply.sequence = req.reply.sequence; @@ -914,37 +333,9 @@ static struct { #define DRM_IOCTL32_DEF(n, f) [DRM_IOCTL_NR(n##32)] = {.fn = f, .name = #n} DRM_IOCTL32_DEF(DRM_IOCTL_VERSION, compat_drm_version), DRM_IOCTL32_DEF(DRM_IOCTL_GET_UNIQUE, compat_drm_getunique), -#if IS_ENABLED(CONFIG_DRM_LEGACY) - DRM_IOCTL32_DEF(DRM_IOCTL_GET_MAP, compat_drm_getmap), -#endif DRM_IOCTL32_DEF(DRM_IOCTL_GET_CLIENT, compat_drm_getclient), DRM_IOCTL32_DEF(DRM_IOCTL_GET_STATS, compat_drm_getstats), DRM_IOCTL32_DEF(DRM_IOCTL_SET_UNIQUE, compat_drm_setunique), -#if IS_ENABLED(CONFIG_DRM_LEGACY) - DRM_IOCTL32_DEF(DRM_IOCTL_ADD_MAP, compat_drm_addmap), - DRM_IOCTL32_DEF(DRM_IOCTL_ADD_BUFS, compat_drm_addbufs), - DRM_IOCTL32_DEF(DRM_IOCTL_MARK_BUFS, compat_drm_markbufs), - DRM_IOCTL32_DEF(DRM_IOCTL_INFO_BUFS, compat_drm_infobufs), - DRM_IOCTL32_DEF(DRM_IOCTL_MAP_BUFS, compat_drm_mapbufs), - DRM_IOCTL32_DEF(DRM_IOCTL_FREE_BUFS, compat_drm_freebufs), - DRM_IOCTL32_DEF(DRM_IOCTL_RM_MAP, compat_drm_rmmap), - DRM_IOCTL32_DEF(DRM_IOCTL_SET_SAREA_CTX, compat_drm_setsareactx), - DRM_IOCTL32_DEF(DRM_IOCTL_GET_SAREA_CTX, compat_drm_getsareactx), - DRM_IOCTL32_DEF(DRM_IOCTL_RES_CTX, compat_drm_resctx), - DRM_IOCTL32_DEF(DRM_IOCTL_DMA, compat_drm_dma), -#if IS_ENABLED(CONFIG_AGP) - DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ENABLE, compat_drm_agp_enable), - DRM_IOCTL32_DEF(DRM_IOCTL_AGP_INFO, compat_drm_agp_info), - DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ALLOC, compat_drm_agp_alloc), - DRM_IOCTL32_DEF(DRM_IOCTL_AGP_FREE, compat_drm_agp_free), - DRM_IOCTL32_DEF(DRM_IOCTL_AGP_BIND, compat_drm_agp_bind), - DRM_IOCTL32_DEF(DRM_IOCTL_AGP_UNBIND, compat_drm_agp_unbind), -#endif -#endif -#if IS_ENABLED(CONFIG_DRM_LEGACY) - DRM_IOCTL32_DEF(DRM_IOCTL_SG_ALLOC, compat_drm_sg_alloc), - DRM_IOCTL32_DEF(DRM_IOCTL_SG_FREE, compat_drm_sg_free), -#endif #if defined(CONFIG_X86) DRM_IOCTL32_DEF(DRM_IOCTL_UPDATE_DRAW, compat_drm_update_draw), #endif diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 44fda68c28ae..e368fc084c77 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -42,7 +42,6 @@ #include "drm_crtc_internal.h" #include "drm_internal.h" -#include "drm_legacy.h" /** * DOC: getunique and setversion story @@ -301,6 +300,10 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_ case DRM_CAP_CRTC_IN_VBLANK_EVENT: req->value = 1; break; + case DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP: + req->value = drm_core_check_feature(dev, DRIVER_ATOMIC) && + dev->mode_config.async_page_flip; + break; default: return -EINVAL; } @@ -361,6 +364,15 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv) return -EINVAL; file_priv->writeback_connectors = req->value; break; + case DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT: + if (!drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT)) + return -EOPNOTSUPP; + if (!file_priv->atomic) + return -EINVAL; + if (req->value > 1) + return -EINVAL; + file_priv->supports_virtualized_cursor_plane = req->value; + break; default: return -EINVAL; } @@ -559,21 +571,11 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) .name = #ioctl \ } -#if IS_ENABLED(CONFIG_DRM_LEGACY) -#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, _func, _flags) -#else -#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, drm_invalid_op, _flags) -#endif - /* Ioctl table */ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_legacy_irq_by_busid, - DRM_MASTER|DRM_ROOT_ONLY), - - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0), @@ -586,63 +588,15 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_MASTER), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH), - - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, 0), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_legacy_irq_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - -#if IS_ENABLED(CONFIG_AGP) - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_legacy_agp_acquire_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_legacy_agp_release_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_legacy_agp_enable_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_legacy_agp_info_ioctl, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_legacy_agp_alloc_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_legacy_agp_free_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_legacy_agp_bind_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_legacy_agp_unbind_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), -#endif - - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - - DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, DRM_UNLOCKED), - - DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_legacy_modeset_ctl_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), @@ -775,7 +729,7 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata, { struct drm_file *file_priv = file->private_data; struct drm_device *dev = file_priv->minor->dev; - int retcode; + int ret; /* Update drm_file owner if fd was passed along. */ drm_file_update_pid(file_priv); @@ -783,20 +737,11 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata, if (drm_dev_is_unplugged(dev)) return -ENODEV; - retcode = drm_ioctl_permit(flags, file_priv); - if (unlikely(retcode)) - return retcode; - - /* Enforce sane locking for modern driver ioctls. */ - if (likely(!drm_core_check_feature(dev, DRIVER_LEGACY)) || - (flags & DRM_UNLOCKED)) - retcode = func(dev, kdata, file_priv); - else { - mutex_lock(&drm_global_mutex); - retcode = func(dev, kdata, file_priv); - mutex_unlock(&drm_global_mutex); - } - return retcode; + ret = drm_ioctl_permit(flags, file_priv); + if (unlikely(ret)) + return ret; + + return func(dev, kdata, file_priv); } EXPORT_SYMBOL(drm_ioctl_kernel); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c deleted file mode 100644 index d327638e15ee..000000000000 --- a/drivers/gpu/drm/drm_irq.c +++ /dev/null @@ -1,204 +0,0 @@ -/* - * drm_irq.c IRQ and vblank support - * - * \author Rickard E. (Rik) Faith <faith@valinux.com> - * \author Gareth Hughes <gareth@valinux.com> - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com - * - * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - - -#include <linux/export.h> -#include <linux/interrupt.h> /* For task queue support */ -#include <linux/pci.h> -#include <linux/vgaarb.h> - -#include <drm/drm.h> -#include <drm/drm_device.h> -#include <drm/drm_drv.h> -#include <drm/drm_legacy.h> -#include <drm/drm_print.h> -#include <drm/drm_vblank.h> - -#include "drm_internal.h" - -static int drm_legacy_irq_install(struct drm_device *dev, int irq) -{ - int ret; - unsigned long sh_flags = 0; - - if (irq == 0) - return -EINVAL; - - if (dev->irq_enabled) - return -EBUSY; - dev->irq_enabled = true; - - DRM_DEBUG("irq=%d\n", irq); - - /* Before installing handler */ - if (dev->driver->irq_preinstall) - dev->driver->irq_preinstall(dev); - - /* PCI devices require shared interrupts. */ - if (dev_is_pci(dev->dev)) - sh_flags = IRQF_SHARED; - - ret = request_irq(irq, dev->driver->irq_handler, - sh_flags, dev->driver->name, dev); - - if (ret < 0) { - dev->irq_enabled = false; - return ret; - } - - /* After installing handler */ - if (dev->driver->irq_postinstall) - ret = dev->driver->irq_postinstall(dev); - - if (ret < 0) { - dev->irq_enabled = false; - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - vga_client_unregister(to_pci_dev(dev->dev)); - free_irq(irq, dev); - } else { - dev->irq = irq; - } - - return ret; -} - -int drm_legacy_irq_uninstall(struct drm_device *dev) -{ - unsigned long irqflags; - bool irq_enabled; - int i; - - irq_enabled = dev->irq_enabled; - dev->irq_enabled = false; - - /* - * Wake up any waiters so they don't hang. This is just to paper over - * issues for UMS drivers which aren't in full control of their - * vblank/irq handling. KMS drivers must ensure that vblanks are all - * disabled when uninstalling the irq handler. - */ - if (drm_dev_has_vblank(dev)) { - spin_lock_irqsave(&dev->vbl_lock, irqflags); - for (i = 0; i < dev->num_crtcs; i++) { - struct drm_vblank_crtc *vblank = &dev->vblank[i]; - - if (!vblank->enabled) - continue; - - WARN_ON(drm_core_check_feature(dev, DRIVER_MODESET)); - - drm_vblank_disable_and_save(dev, i); - wake_up(&vblank->queue); - } - spin_unlock_irqrestore(&dev->vbl_lock, irqflags); - } - - if (!irq_enabled) - return -EINVAL; - - DRM_DEBUG("irq=%d\n", dev->irq); - - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - vga_client_unregister(to_pci_dev(dev->dev)); - - if (dev->driver->irq_uninstall) - dev->driver->irq_uninstall(dev); - - free_irq(dev->irq, dev); - - return 0; -} -EXPORT_SYMBOL(drm_legacy_irq_uninstall); - -int drm_legacy_irq_control(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_control *ctl = data; - int ret = 0, irq; - struct pci_dev *pdev; - - /* if we haven't irq we fallback for compatibility reasons - - * this used to be a separate function in drm_dma.h - */ - - if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) - return 0; - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return 0; - /* UMS was only ever supported on pci devices. */ - if (WARN_ON(!dev_is_pci(dev->dev))) - return -EINVAL; - - switch (ctl->func) { - case DRM_INST_HANDLER: - pdev = to_pci_dev(dev->dev); - irq = pdev->irq; - - if (dev->if_version < DRM_IF_VERSION(1, 2) && - ctl->irq != irq) - return -EINVAL; - mutex_lock(&dev->struct_mutex); - ret = drm_legacy_irq_install(dev, irq); - mutex_unlock(&dev->struct_mutex); - - return ret; - case DRM_UNINST_HANDLER: - mutex_lock(&dev->struct_mutex); - ret = drm_legacy_irq_uninstall(dev); - mutex_unlock(&dev->struct_mutex); - - return ret; - default: - return -EINVAL; - } -} diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h deleted file mode 100644 index 70c9dba114a6..000000000000 --- a/drivers/gpu/drm/drm_legacy.h +++ /dev/null @@ -1,290 +0,0 @@ -#ifndef __DRM_LEGACY_H__ -#define __DRM_LEGACY_H__ - -/* - * Copyright (c) 2014 David Herrmann <dh.herrmann@gmail.com> - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * This file contains legacy interfaces that modern drm drivers - * should no longer be using. They cannot be removed as legacy - * drivers use them, and removing them are API breaks. - */ -#include <linux/list.h> - -#include <drm/drm.h> -#include <drm/drm_device.h> -#include <drm/drm_legacy.h> - -struct agp_memory; -struct drm_buf_desc; -struct drm_device; -struct drm_file; -struct drm_hash_item; -struct drm_open_hash; - -/* - * Hash-table Support - */ - -#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) - -/* drm_hashtab.c */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int drm_ht_create(struct drm_open_hash *ht, unsigned int order); -int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); -int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, - unsigned long seed, int bits, int shift, - unsigned long add); -int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); - -void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); -int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); -int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); -void drm_ht_remove(struct drm_open_hash *ht); -#endif - -/* - * RCU-safe interface - * - * The user of this API needs to make sure that two or more instances of the - * hash table manipulation functions are never run simultaneously. - * The lookup function drm_ht_find_item_rcu may, however, run simultaneously - * with any of the manipulation functions as long as it's called from within - * an RCU read-locked section. - */ -#define drm_ht_insert_item_rcu drm_ht_insert_item -#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please -#define drm_ht_remove_key_rcu drm_ht_remove_key -#define drm_ht_remove_item_rcu drm_ht_remove_item -#define drm_ht_find_item_rcu drm_ht_find_item - -/* - * Generic DRM Contexts - */ - -#define DRM_KERNEL_CONTEXT 0 -#define DRM_RESERVED_CONTEXTS 1 - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_ctxbitmap_init(struct drm_device *dev); -void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev); -void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file); -#else -static inline void drm_legacy_ctxbitmap_init(struct drm_device *dev) {} -static inline void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev) {} -static inline void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) {} -#endif - -void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle); - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int drm_legacy_resctx(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_addctx(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_getctx(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_switchctx(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_newctx(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f); - -int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f); -#endif - -/* - * Generic Buffer Management - */ - -#define DRM_MAP_HASH_OFFSET 0x10000000 - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -static inline int drm_legacy_create_map_hash(struct drm_device *dev) -{ - return drm_ht_create(&dev->map_hash, 12); -} - -static inline void drm_legacy_remove_map_hash(struct drm_device *dev) -{ - drm_ht_remove(&dev->map_hash); -} -#else -static inline int drm_legacy_create_map_hash(struct drm_device *dev) -{ - return 0; -} - -static inline void drm_legacy_remove_map_hash(struct drm_device *dev) {} -#endif - - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f); - -int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_infobufs(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_markbufs(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f); -#endif - -int __drm_legacy_infobufs(struct drm_device *, void *, int *, - int (*)(void *, int, struct drm_buf_entry *)); -int __drm_legacy_mapbufs(struct drm_device *, void *, int *, - void __user **, - int (*)(void *, int, unsigned long, struct drm_buf *), - struct drm_file *); - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_master_rmmaps(struct drm_device *dev, - struct drm_master *master); -void drm_legacy_rmmaps(struct drm_device *dev); -#else -static inline void drm_legacy_master_rmmaps(struct drm_device *dev, - struct drm_master *master) {} -static inline void drm_legacy_rmmaps(struct drm_device *dev) {} -#endif - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_vma_flush(struct drm_device *d); -#else -static inline void drm_legacy_vma_flush(struct drm_device *d) -{ - /* do nothing */ -} -#endif - -/* - * AGP Support - */ - -struct drm_agp_mem { - unsigned long handle; - struct agp_memory *memory; - unsigned long bound; - int pages; - struct list_head head; -}; - -/* drm_agpsupport.c */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP) -void drm_legacy_agp_clear(struct drm_device *dev); - -int drm_legacy_agp_acquire_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_release_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_enable_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_info_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_free_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_unbind_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_bind_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -#else -static inline void drm_legacy_agp_clear(struct drm_device *dev) {} -#endif - -/* drm_lock.c */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f); -void drm_legacy_lock_release(struct drm_device *dev, struct file *filp); -#else -static inline void drm_legacy_lock_release(struct drm_device *dev, struct file *filp) {} -#endif - -/* DMA support */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int drm_legacy_dma_setup(struct drm_device *dev); -void drm_legacy_dma_takedown(struct drm_device *dev); -#else -static inline int drm_legacy_dma_setup(struct drm_device *dev) -{ - return 0; -} -#endif - -void drm_legacy_free_buffer(struct drm_device *dev, - struct drm_buf * buf); -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_reclaim_buffers(struct drm_device *dev, - struct drm_file *filp); -#else -static inline void drm_legacy_reclaim_buffers(struct drm_device *dev, - struct drm_file *filp) {} -#endif - -/* Scatter Gather Support */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_sg_cleanup(struct drm_device *dev); -int drm_legacy_sg_alloc(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_sg_free(struct drm_device *dev, void *data, - struct drm_file *file_priv); -#endif - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_init_members(struct drm_device *dev); -void drm_legacy_destroy_members(struct drm_device *dev); -void drm_legacy_dev_reinit(struct drm_device *dev); -int drm_legacy_setup(struct drm_device * dev); -#else -static inline void drm_legacy_init_members(struct drm_device *dev) {} -static inline void drm_legacy_destroy_members(struct drm_device *dev) {} -static inline void drm_legacy_dev_reinit(struct drm_device *dev) {} -static inline int drm_legacy_setup(struct drm_device * dev) { return 0; } -#endif - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master); -#else -static inline void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master) {} -#endif - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_master_legacy_init(struct drm_master *master); -#else -static inline void drm_master_legacy_init(struct drm_master *master) {} -#endif - -/* drm_pci.c */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_PCI) -int drm_legacy_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv); -void drm_legacy_pci_agp_destroy(struct drm_device *dev); -#else -static inline int drm_legacy_irq_by_busid(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - return -EINVAL; -} - -static inline void drm_legacy_pci_agp_destroy(struct drm_device *dev) {} -#endif - -#endif /* __DRM_LEGACY_H__ */ diff --git a/drivers/gpu/drm/drm_legacy_misc.c b/drivers/gpu/drm/drm_legacy_misc.c deleted file mode 100644 index d4c5434062d7..000000000000 --- a/drivers/gpu/drm/drm_legacy_misc.c +++ /dev/null @@ -1,105 +0,0 @@ -/* - * \file drm_legacy_misc.c - * Misc legacy support functions. - * - * \author Rickard E. (Rik) Faith <faith@valinux.com> - * \author Gareth Hughes <gareth@valinux.com> - */ - -/* - * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include <drm/drm_device.h> -#include <drm/drm_drv.h> -#include <drm/drm_print.h> - -#include "drm_internal.h" -#include "drm_legacy.h" - -void drm_legacy_init_members(struct drm_device *dev) -{ - INIT_LIST_HEAD(&dev->ctxlist); - INIT_LIST_HEAD(&dev->vmalist); - INIT_LIST_HEAD(&dev->maplist); - spin_lock_init(&dev->buf_lock); - mutex_init(&dev->ctxlist_mutex); -} - -void drm_legacy_destroy_members(struct drm_device *dev) -{ - mutex_destroy(&dev->ctxlist_mutex); -} - -int drm_legacy_setup(struct drm_device * dev) -{ - int ret; - - if (dev->driver->firstopen && - drm_core_check_feature(dev, DRIVER_LEGACY)) { - ret = dev->driver->firstopen(dev); - if (ret != 0) - return ret; - } - - ret = drm_legacy_dma_setup(dev); - if (ret < 0) - return ret; - - - DRM_DEBUG("\n"); - return 0; -} - -void drm_legacy_dev_reinit(struct drm_device *dev) -{ - if (dev->irq_enabled) - drm_legacy_irq_uninstall(dev); - - mutex_lock(&dev->struct_mutex); - - drm_legacy_agp_clear(dev); - - drm_legacy_sg_cleanup(dev); - drm_legacy_vma_flush(dev); - drm_legacy_dma_takedown(dev); - - mutex_unlock(&dev->struct_mutex); - - dev->sigdata.lock = NULL; - - dev->context_flag = 0; - dev->last_context = 0; - dev->if_version = 0; - - DRM_DEBUG("lastclose completed\n"); -} - -void drm_master_legacy_init(struct drm_master *master) -{ - spin_lock_init(&master->lock.spinlock); - init_waitqueue_head(&master->lock.lock_queue); -} diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c deleted file mode 100644 index 1efbd5389d89..000000000000 --- a/drivers/gpu/drm/drm_lock.c +++ /dev/null @@ -1,373 +0,0 @@ -/* - * \file drm_lock.c - * IOCTLs for locking - * - * \author Rickard E. (Rik) Faith <faith@valinux.com> - * \author Gareth Hughes <gareth@valinux.com> - */ - -/* - * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include <linux/export.h> -#include <linux/sched/signal.h> - -#include <drm/drm.h> -#include <drm/drm_drv.h> -#include <drm/drm_file.h> -#include <drm/drm_print.h> - -#include "drm_internal.h" -#include "drm_legacy.h" - -static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); - -/* - * Take the heavyweight lock. - * - * \param lock lock pointer. - * \param context locking context. - * \return one if the lock is held, or zero otherwise. - * - * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. - */ -static -int drm_lock_take(struct drm_lock_data *lock_data, - unsigned int context) -{ - unsigned int old, new, prev; - volatile unsigned int *lock = &lock_data->hw_lock->lock; - - spin_lock_bh(&lock_data->spinlock); - do { - old = *lock; - if (old & _DRM_LOCK_HELD) - new = old | _DRM_LOCK_CONT; - else { - new = context | _DRM_LOCK_HELD | - ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? - _DRM_LOCK_CONT : 0); - } - prev = cmpxchg(lock, old, new); - } while (prev != old); - spin_unlock_bh(&lock_data->spinlock); - - if (_DRM_LOCKING_CONTEXT(old) == context) { - if (old & _DRM_LOCK_HELD) { - if (context != DRM_KERNEL_CONTEXT) { - DRM_ERROR("%d holds heavyweight lock\n", - context); - } - return 0; - } - } - - if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) { - /* Have lock */ - return 1; - } - return 0; -} - -/* - * This takes a lock forcibly and hands it to context. Should ONLY be used - * inside *_unlock to give lock to kernel before calling *_dma_schedule. - * - * \param dev DRM device. - * \param lock lock pointer. - * \param context locking context. - * \return always one. - * - * Resets the lock file pointer. - * Marks the lock as held by the given context, via the \p cmpxchg instruction. - */ -static int drm_lock_transfer(struct drm_lock_data *lock_data, - unsigned int context) -{ - unsigned int old, new, prev; - volatile unsigned int *lock = &lock_data->hw_lock->lock; - - lock_data->file_priv = NULL; - do { - old = *lock; - new = context | _DRM_LOCK_HELD; - prev = cmpxchg(lock, old, new); - } while (prev != old); - return 1; -} - -static int drm_legacy_lock_free(struct drm_lock_data *lock_data, - unsigned int context) -{ - unsigned int old, new, prev; - volatile unsigned int *lock = &lock_data->hw_lock->lock; - - spin_lock_bh(&lock_data->spinlock); - if (lock_data->kernel_waiters != 0) { - drm_lock_transfer(lock_data, 0); - lock_data->idle_has_lock = 1; - spin_unlock_bh(&lock_data->spinlock); - return 1; - } - spin_unlock_bh(&lock_data->spinlock); - - do { - old = *lock; - new = _DRM_LOCKING_CONTEXT(old); - prev = cmpxchg(lock, old, new); - } while (prev != old); - - if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { - DRM_ERROR("%d freed heavyweight lock held by %d\n", - context, _DRM_LOCKING_CONTEXT(old)); - return 1; - } - wake_up_interruptible(&lock_data->lock_queue); - return 0; -} - -/* - * Lock ioctl. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument, pointing to a drm_lock structure. - * \return zero on success or negative number on failure. - * - * Add the current task to the lock wait queue, and attempt to take to lock. - */ -int drm_legacy_lock(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - DECLARE_WAITQUEUE(entry, current); - struct drm_lock *lock = data; - struct drm_master *master = file_priv->master; - int ret = 0; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - ++file_priv->lock_count; - - if (lock->context == DRM_KERNEL_CONTEXT) { - DRM_ERROR("Process %d using kernel context %d\n", - task_pid_nr(current), lock->context); - return -EINVAL; - } - - DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", - lock->context, task_pid_nr(current), - master->lock.hw_lock ? master->lock.hw_lock->lock : -1, - lock->flags); - - add_wait_queue(&master->lock.lock_queue, &entry); - spin_lock_bh(&master->lock.spinlock); - master->lock.user_waiters++; - spin_unlock_bh(&master->lock.spinlock); - - for (;;) { - __set_current_state(TASK_INTERRUPTIBLE); - if (!master->lock.hw_lock) { - /* Device has been unregistered */ - send_sig(SIGTERM, current, 0); - ret = -EINTR; - break; - } - if (drm_lock_take(&master->lock, lock->context)) { - master->lock.file_priv = file_priv; - master->lock.lock_time = jiffies; - break; /* Got lock */ - } - - /* Contention */ - mutex_unlock(&drm_global_mutex); - schedule(); - mutex_lock(&drm_global_mutex); - if (signal_pending(current)) { - ret = -EINTR; - break; - } - } - spin_lock_bh(&master->lock.spinlock); - master->lock.user_waiters--; - spin_unlock_bh(&master->lock.spinlock); - __set_current_state(TASK_RUNNING); - remove_wait_queue(&master->lock.lock_queue, &entry); - - DRM_DEBUG("%d %s\n", lock->context, - ret ? "interrupted" : "has lock"); - if (ret) return ret; - - /* don't set the block all signals on the master process for now - * really probably not the correct answer but lets us debug xkb - * xserver for now */ - if (!drm_is_current_master(file_priv)) { - dev->sigdata.context = lock->context; - dev->sigdata.lock = master->lock.hw_lock; - } - - if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT)) - { - if (dev->driver->dma_quiescent(dev)) { - DRM_DEBUG("%d waiting for DMA quiescent\n", - lock->context); - return -EBUSY; - } - } - - return 0; -} - -/* - * Unlock ioctl. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument, pointing to a drm_lock structure. - * \return zero on success or negative number on failure. - * - * Transfer and free the lock. - */ -int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_lock *lock = data; - struct drm_master *master = file_priv->master; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (lock->context == DRM_KERNEL_CONTEXT) { - DRM_ERROR("Process %d using kernel context %d\n", - task_pid_nr(current), lock->context); - return -EINVAL; - } - - if (drm_legacy_lock_free(&master->lock, lock->context)) { - /* FIXME: Should really bail out here. */ - } - - return 0; -} - -/* - * This function returns immediately and takes the hw lock - * with the kernel context if it is free, otherwise it gets the highest priority when and if - * it is eventually released. - * - * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held - * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause - * a deadlock, which is why the "idlelock" was invented). - * - * This should be sufficient to wait for GPU idle without - * having to worry about starvation. - */ -void drm_legacy_idlelock_take(struct drm_lock_data *lock_data) -{ - int ret; - - spin_lock_bh(&lock_data->spinlock); - lock_data->kernel_waiters++; - if (!lock_data->idle_has_lock) { - - spin_unlock_bh(&lock_data->spinlock); - ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT); - spin_lock_bh(&lock_data->spinlock); - - if (ret == 1) - lock_data->idle_has_lock = 1; - } - spin_unlock_bh(&lock_data->spinlock); -} -EXPORT_SYMBOL(drm_legacy_idlelock_take); - -void drm_legacy_idlelock_release(struct drm_lock_data *lock_data) -{ - unsigned int old, prev; - volatile unsigned int *lock = &lock_data->hw_lock->lock; - - spin_lock_bh(&lock_data->spinlock); - if (--lock_data->kernel_waiters == 0) { - if (lock_data->idle_has_lock) { - do { - old = *lock; - prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT); - } while (prev != old); - wake_up_interruptible(&lock_data->lock_queue); - lock_data->idle_has_lock = 0; - } - } - spin_unlock_bh(&lock_data->spinlock); -} -EXPORT_SYMBOL(drm_legacy_idlelock_release); - -static int drm_legacy_i_have_hw_lock(struct drm_device *dev, - struct drm_file *file_priv) -{ - struct drm_master *master = file_priv->master; - - return (file_priv->lock_count && master->lock.hw_lock && - _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) && - master->lock.file_priv == file_priv); -} - -void drm_legacy_lock_release(struct drm_device *dev, struct file *filp) -{ - struct drm_file *file_priv = filp->private_data; - - /* if the master has gone away we can't do anything with the lock */ - if (!dev->master) - return; - - if (drm_legacy_i_have_hw_lock(dev, file_priv)) { - DRM_DEBUG("File %p released, freeing lock for context %d\n", - filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); - drm_legacy_lock_free(&file_priv->master->lock, - _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); - } -} - -void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master) -{ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - /* - * Since the master is disappearing, so is the - * possibility to lock. - */ - mutex_lock(&dev->struct_mutex); - if (master->lock.hw_lock) { - if (dev->sigdata.lock == master->lock.hw_lock) - dev->sigdata.lock = NULL; - master->lock.hw_lock = NULL; - master->lock.file_priv = NULL; - wake_up_interruptible_all(&master->lock.lock_queue); - } - mutex_unlock(&dev->struct_mutex); -} diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c deleted file mode 100644 index d2e1dccd8113..000000000000 --- a/drivers/gpu/drm/drm_memory.c +++ /dev/null @@ -1,138 +0,0 @@ -/* - * \file drm_memory.c - * Memory management wrappers for DRM - * - * \author Rickard E. (Rik) Faith <faith@valinux.com> - * \author Gareth Hughes <gareth@valinux.com> - */ - -/* - * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include <linux/export.h> -#include <linux/highmem.h> -#include <linux/pci.h> -#include <linux/vmalloc.h> - -#include <drm/drm_cache.h> -#include <drm/drm_device.h> - -#include "drm_legacy.h" - -#if IS_ENABLED(CONFIG_AGP) - -#ifdef HAVE_PAGE_AGP -# include <asm/agp.h> -#else -# ifdef __powerpc__ -# define PAGE_AGP pgprot_noncached_wc(PAGE_KERNEL) -# else -# define PAGE_AGP PAGE_KERNEL -# endif -#endif - -static void *agp_remap(unsigned long offset, unsigned long size, - struct drm_device *dev) -{ - unsigned long i, num_pages = - PAGE_ALIGN(size) / PAGE_SIZE; - struct drm_agp_mem *agpmem; - struct page **page_map; - struct page **phys_page_map; - void *addr; - - size = PAGE_ALIGN(size); - -#ifdef __alpha__ - offset -= dev->hose->mem_space->start; -#endif - - list_for_each_entry(agpmem, &dev->agp->memory, head) - if (agpmem->bound <= offset - && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= - (offset + size)) - break; - if (&agpmem->head == &dev->agp->memory) - return NULL; - - /* - * OK, we're mapping AGP space on a chipset/platform on which memory accesses by - * the CPU do not get remapped by the GART. We fix this by using the kernel's - * page-table instead (that's probably faster anyhow...). - */ - /* note: use vmalloc() because num_pages could be large... */ - page_map = vmalloc(array_size(num_pages, sizeof(struct page *))); - if (!page_map) - return NULL; - - phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE); - for (i = 0; i < num_pages; ++i) - page_map[i] = phys_page_map[i]; - addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); - vfree(page_map); - - return addr; -} - -#else /* CONFIG_AGP */ -static inline void *agp_remap(unsigned long offset, unsigned long size, - struct drm_device *dev) -{ - return NULL; -} - -#endif /* CONFIG_AGP */ - -void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev) -{ - if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) - map->handle = agp_remap(map->offset, map->size, dev); - else - map->handle = ioremap(map->offset, map->size); -} -EXPORT_SYMBOL(drm_legacy_ioremap); - -void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev) -{ - if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) - map->handle = agp_remap(map->offset, map->size, dev); - else - map->handle = ioremap_wc(map->offset, map->size); -} -EXPORT_SYMBOL(drm_legacy_ioremap_wc); - -void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev) -{ - if (!map->handle || !map->size) - return; - - if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) - vunmap(map->handle); - else - iounmap(map->handle); -} -EXPORT_SYMBOL(drm_legacy_ioremapfree); diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 14201f73aab1..843a6dbda93a 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -347,7 +347,8 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); - mipi_dsi_detach(dsi); + if (dsi->attached) + mipi_dsi_detach(dsi); mipi_dsi_device_unregister(dsi); return 0; @@ -370,11 +371,18 @@ EXPORT_SYMBOL(mipi_dsi_host_unregister); int mipi_dsi_attach(struct mipi_dsi_device *dsi) { const struct mipi_dsi_host_ops *ops = dsi->host->ops; + int ret; if (!ops || !ops->attach) return -ENOSYS; - return ops->attach(dsi->host, dsi); + ret = ops->attach(dsi->host, dsi); + if (ret) + return ret; + + dsi->attached = true; + + return 0; } EXPORT_SYMBOL(mipi_dsi_attach); @@ -386,9 +394,14 @@ int mipi_dsi_detach(struct mipi_dsi_device *dsi) { const struct mipi_dsi_host_ops *ops = dsi->host->ops; + if (WARN_ON(!dsi->attached)) + return -EINVAL; + if (!ops || !ops->detach) return -ENOSYS; + dsi->attached = false; + return ops->detach(dsi->host, dsi); } EXPORT_SYMBOL(mipi_dsi_detach); diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c index ac0d2ce3f870..0e8355063eee 100644 --- a/drivers/gpu/drm/drm_mode_object.c +++ b/drivers/gpu/drm/drm_mode_object.c @@ -538,7 +538,7 @@ retry: obj_to_connector(obj), prop_value); } else { - ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value); + ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value, false); if (ret) goto out; ret = drm_atomic_commit(state); diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 39d35fc3a43b..c585f1e8803e 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -29,18 +29,12 @@ #include <linux/pci.h> #include <linux/slab.h> +#include <drm/drm_auth.h> #include <drm/drm.h> #include <drm/drm_drv.h> #include <drm/drm_print.h> #include "drm_internal.h" -#include "drm_legacy.h" - -#ifdef CONFIG_DRM_LEGACY -/* List of devices hanging off drivers with stealth attach. */ -static LIST_HEAD(legacy_dev_list); -static DEFINE_MUTEX(legacy_dev_list_lock); -#endif static int drm_get_pci_domain(struct drm_device *dev) { @@ -71,199 +65,3 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master) master->unique_len = strlen(master->unique); return 0; } - -#ifdef CONFIG_DRM_LEGACY - -static int drm_legacy_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) -{ - struct pci_dev *pdev = to_pci_dev(dev->dev); - - if ((p->busnum >> 8) != drm_get_pci_domain(dev) || - (p->busnum & 0xff) != pdev->bus->number || - p->devnum != PCI_SLOT(pdev->devfn) || p->funcnum != PCI_FUNC(pdev->devfn)) - return -EINVAL; - - p->irq = pdev->irq; - - DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, - p->irq); - return 0; -} - -/** - * drm_legacy_irq_by_busid - Get interrupt from bus ID - * @dev: DRM device - * @data: IOCTL parameter pointing to a drm_irq_busid structure - * @file_priv: DRM file private. - * - * Finds the PCI device with the specified bus id and gets its IRQ number. - * This IOCTL is deprecated, and will now return EINVAL for any busid not equal - * to that of the device that this DRM instance attached to. - * - * Return: 0 on success or a negative error code on failure. - */ -int drm_legacy_irq_by_busid(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_irq_busid *p = data; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - /* UMS was only ever support on PCI devices. */ - if (WARN_ON(!dev_is_pci(dev->dev))) - return -EINVAL; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) - return -EOPNOTSUPP; - - return drm_legacy_pci_irq_by_busid(dev, p); -} - -void drm_legacy_pci_agp_destroy(struct drm_device *dev) -{ - if (dev->agp) { - arch_phys_wc_del(dev->agp->agp_mtrr); - drm_legacy_agp_clear(dev); - kfree(dev->agp); - dev->agp = NULL; - } -} - -static void drm_legacy_pci_agp_init(struct drm_device *dev) -{ - if (drm_core_check_feature(dev, DRIVER_USE_AGP)) { - if (pci_find_capability(to_pci_dev(dev->dev), PCI_CAP_ID_AGP)) - dev->agp = drm_legacy_agp_init(dev); - if (dev->agp) { - dev->agp->agp_mtrr = arch_phys_wc_add( - dev->agp->agp_info.aper_base, - dev->agp->agp_info.aper_size * - 1024 * 1024); - } - } -} - -static int drm_legacy_get_pci_dev(struct pci_dev *pdev, - const struct pci_device_id *ent, - const struct drm_driver *driver) -{ - struct drm_device *dev; - int ret; - - DRM_DEBUG("\n"); - - dev = drm_dev_alloc(driver, &pdev->dev); - if (IS_ERR(dev)) - return PTR_ERR(dev); - - ret = pci_enable_device(pdev); - if (ret) - goto err_free; - -#ifdef __alpha__ - dev->hose = pdev->sysdata; -#endif - - drm_legacy_pci_agp_init(dev); - - ret = drm_dev_register(dev, ent->driver_data); - if (ret) - goto err_agp; - - if (drm_core_check_feature(dev, DRIVER_LEGACY)) { - mutex_lock(&legacy_dev_list_lock); - list_add_tail(&dev->legacy_dev_list, &legacy_dev_list); - mutex_unlock(&legacy_dev_list_lock); - } - - return 0; - -err_agp: - drm_legacy_pci_agp_destroy(dev); - pci_disable_device(pdev); -err_free: - drm_dev_put(dev); - return ret; -} - -/** - * drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver - * @driver: DRM device driver - * @pdriver: PCI device driver - * - * This is only used by legacy dri1 drivers and deprecated. - * - * Return: 0 on success or a negative error code on failure. - */ -int drm_legacy_pci_init(const struct drm_driver *driver, - struct pci_driver *pdriver) -{ - struct pci_dev *pdev = NULL; - const struct pci_device_id *pid; - int i; - - DRM_DEBUG("\n"); - - if (WARN_ON(!(driver->driver_features & DRIVER_LEGACY))) - return -EINVAL; - - /* If not using KMS, fall back to stealth mode manual scanning. */ - for (i = 0; pdriver->id_table[i].vendor != 0; i++) { - pid = &pdriver->id_table[i]; - - /* Loop around setting up a DRM device for each PCI device - * matching our ID and device class. If we had the internal - * function that pci_get_subsys and pci_get_class used, we'd - * be able to just pass pid in instead of doing a two-stage - * thing. - */ - pdev = NULL; - while ((pdev = - pci_get_subsys(pid->vendor, pid->device, pid->subvendor, - pid->subdevice, pdev)) != NULL) { - if ((pdev->class & pid->class_mask) != pid->class) - continue; - - /* stealth mode requires a manual probe */ - pci_dev_get(pdev); - drm_legacy_get_pci_dev(pdev, pid, driver); - } - } - return 0; -} -EXPORT_SYMBOL(drm_legacy_pci_init); - -/** - * drm_legacy_pci_exit - unregister shadow-attach legacy DRM driver - * @driver: DRM device driver - * @pdriver: PCI device driver - * - * Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This - * is deprecated and only used by dri1 drivers. - */ -void drm_legacy_pci_exit(const struct drm_driver *driver, - struct pci_driver *pdriver) -{ - struct drm_device *dev, *tmp; - - DRM_DEBUG("\n"); - - if (!(driver->driver_features & DRIVER_LEGACY)) { - WARN_ON(1); - } else { - mutex_lock(&legacy_dev_list_lock); - list_for_each_entry_safe(dev, tmp, &legacy_dev_list, - legacy_dev_list) { - if (dev->driver == driver) { - list_del(&dev->legacy_dev_list); - drm_put_dev(dev); - } - } - mutex_unlock(&legacy_dev_list_lock); - } - DRM_INFO("Module unloaded\n"); -} -EXPORT_SYMBOL(drm_legacy_pci_exit); - -#endif diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 24e7998d1731..9e8e4c60983d 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -230,6 +230,103 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane return 0; } +/** + * DOC: hotspot properties + * + * HOTSPOT_X: property to set mouse hotspot x offset. + * HOTSPOT_Y: property to set mouse hotspot y offset. + * + * When the plane is being used as a cursor image to display a mouse pointer, + * the "hotspot" is the offset within the cursor image where mouse events + * are expected to go. + * + * Positive values move the hotspot from the top-left corner of the cursor + * plane towards the right and bottom. + * + * Most display drivers do not need this information because the + * hotspot is not actually connected to anything visible on screen. + * However, this is necessary for display drivers like the para-virtualized + * drivers (eg qxl, vbox, virtio, vmwgfx), that are attached to a user console + * with a mouse pointer. Since these consoles are often being remoted over a + * network, they would otherwise have to wait to display the pointer movement to + * the user until a full network round-trip has occurred. New mouse events have + * to be sent from the user's console, over the network to the virtual input + * devices, forwarded to the desktop for processing, and then the cursor plane's + * position can be updated and sent back to the user's console over the network. + * Instead, with the hotspot information, the console can anticipate the new + * location, and draw the mouse cursor there before the confirmation comes in. + * To do that correctly, the user's console must be able predict how the + * desktop will process mouse events, which normally requires the desktop's + * mouse topology information, ie where each CRTC sits in the mouse coordinate + * space. This is typically sent to the para-virtualized drivers using some + * driver-specific method, and the driver then forwards it to the console by + * way of the virtual display device or hypervisor. + * + * The assumption is generally made that there is only one cursor plane being + * used this way at a time, and that the desktop is feeding all mouse devices + * into the same global pointer. Para-virtualized drivers that require this + * should only be exposing a single cursor plane, or find some other way + * to coordinate with a userspace desktop that supports multiple pointers. + * If the hotspot properties are set, the cursor plane is therefore assumed to be + * used only for displaying a mouse cursor image, and the position of the combined + * cursor plane + offset can therefore be used for coordinating with input from a + * mouse device. + * + * The cursor will then be drawn either at the location of the plane in the CRTC + * console, or as a free-floating cursor plane on the user's console + * corresponding to their desktop mouse position. + * + * DRM clients which would like to work correctly on drivers which expose + * hotspot properties should advertise DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT. + * Setting this property on drivers which do not special case + * cursor planes will return EOPNOTSUPP, which can be used by userspace to + * gauge requirements of the hardware/drivers they're running on. Advertising + * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT implies that the userspace client will be + * correctly setting the hotspot properties. + */ + +/** + * drm_plane_create_hotspot_properties - creates the mouse hotspot + * properties and attaches them to the given cursor plane + * + * @plane: drm cursor plane + * + * This function enables the mouse hotspot property on a given + * cursor plane. Look at the documentation for hotspot properties + * to get a better understanding for what they're used for. + * + * RETURNS: + * Zero for success or -errno + */ +static int drm_plane_create_hotspot_properties(struct drm_plane *plane) +{ + struct drm_property *prop_x; + struct drm_property *prop_y; + + drm_WARN_ON(plane->dev, + !drm_core_check_feature(plane->dev, + DRIVER_CURSOR_HOTSPOT)); + + prop_x = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_X", + INT_MIN, INT_MAX); + if (IS_ERR(prop_x)) + return PTR_ERR(prop_x); + + prop_y = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_Y", + INT_MIN, INT_MAX); + if (IS_ERR(prop_y)) { + drm_property_destroy(plane->dev, prop_x); + return PTR_ERR(prop_y); + } + + drm_object_attach_property(&plane->base, prop_x, 0); + drm_object_attach_property(&plane->base, prop_y, 0); + plane->hotspot_x_property = prop_x; + plane->hotspot_y_property = prop_y; + + return 0; +} + __printf(9, 0) static int __drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, @@ -348,6 +445,10 @@ static int __drm_universal_plane_init(struct drm_device *dev, drm_object_attach_property(&plane->base, config->prop_src_w, 0); drm_object_attach_property(&plane->base, config->prop_src_h, 0); } + if (drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT) && + type == DRM_PLANE_TYPE_CURSOR) { + drm_plane_create_hotspot_properties(plane); + } if (format_modifier_count) create_in_format_blob(dev, plane); @@ -678,6 +779,19 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data, !file_priv->universal_planes) continue; + /* + * If we're running on a virtualized driver then, + * unless userspace advertizes support for the + * virtualized cursor plane, disable cursor planes + * because they'll be broken due to missing cursor + * hotspot info. + */ + if (plane->type == DRM_PLANE_TYPE_CURSOR && + drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT) && + file_priv->atomic && + !file_priv->supports_virtualized_cursor_plane) + continue; + if (drm_lease_held(file_priv, plane->base.id)) { if (count < plane_resp->count_planes && put_user(plane->base.id, plane_ptr + count)) @@ -1052,8 +1166,10 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, return PTR_ERR(fb); } - fb->hot_x = req->hot_x; - fb->hot_y = req->hot_y; + if (plane->hotspot_x_property && plane->state) + plane->state->hotspot_x = req->hot_x; + if (plane->hotspot_y_property && plane->state) + plane->state->hotspot_y = req->hot_y; } else { fb = NULL; } @@ -1442,6 +1558,36 @@ out: * Drivers implementing damage can use drm_atomic_helper_damage_iter_init() and * drm_atomic_helper_damage_iter_next() helper iterator function to get damage * rectangles clipped to &drm_plane_state.src. + * + * Note that there are two types of damage handling: frame damage and buffer + * damage, the type of damage handling implemented depends on a driver's upload + * target. Drivers implementing a per-plane or per-CRTC upload target need to + * handle frame damage, while drivers implementing a per-buffer upload target + * need to handle buffer damage. + * + * The existing damage helpers only support the frame damage type, there is no + * buffer age support or similar damage accumulation algorithm implemented yet. + * + * Only drivers handling frame damage can use the mentioned damage helpers to + * iterate over the damaged regions. Drivers that handle buffer damage, must set + * &drm_plane_state.ignore_damage_clips for drm_atomic_helper_damage_iter_init() + * to know that damage clips should be ignored and return &drm_plane_state.src + * as the damage rectangle, to force a full plane update. + * + * Drivers with a per-buffer upload target could compare the &drm_plane_state.fb + * of the old and new plane states to determine if the framebuffer attached to a + * plane has changed or not since the last plane update. If &drm_plane_state.fb + * has changed, then &drm_plane_state.ignore_damage_clips must be set to true. + * + * That is because drivers with a per-plane upload target, expect the backing + * storage buffer to not change for a given plane. If the upload buffer changes + * between page flips, the new upload buffer has to be updated as a whole. This + * can be improved in the future if support for frame damage is added to the DRM + * damage helpers, similarly to how user-space already handle this case as it is + * explained in the following documents: + * + * https://registry.khronos.org/EGL/extensions/KHR/EGL_KHR_swap_buffers_with_damage.txt + * https://emersion.fr/blog/2019/intro-to-damage-tracking/ */ /** diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 5e95089676ff..7982be4b0306 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -279,35 +279,3 @@ void drm_plane_helper_destroy(struct drm_plane *plane) kfree(plane); } EXPORT_SYMBOL(drm_plane_helper_destroy); - -/** - * drm_plane_helper_atomic_check() - Helper to check plane atomic-state - * @plane: plane to check - * @state: atomic state object - * - * Provides a default plane-state check handler for planes whose atomic-state - * scale and positioning are not expected to change since the plane is always - * a fullscreen scanout buffer. - * - * This is often the case for the primary plane of simple framebuffers. See - * also drm_crtc_helper_atomic_check() for the respective CRTC-state check - * helper function. - * - * RETURNS: - * Zero on success, or an errno code otherwise. - */ -int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) -{ - struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); - struct drm_crtc *new_crtc = new_plane_state->crtc; - struct drm_crtc_state *new_crtc_state = NULL; - - if (new_crtc) - new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); - - return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state, - DRM_PLANE_NO_SCALING, - DRM_PLANE_NO_SCALING, - false, false); -} -EXPORT_SYMBOL(drm_plane_helper_atomic_check); diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c deleted file mode 100644 index f4e6184d1877..000000000000 --- a/drivers/gpu/drm/drm_scatter.c +++ /dev/null @@ -1,220 +0,0 @@ -/* - * \file drm_scatter.c - * IOCTLs to manage scatter/gather memory - * - * \author Gareth Hughes <gareth@valinux.com> - */ - -/* - * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com - * - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include <linux/mm.h> -#include <linux/slab.h> -#include <linux/vmalloc.h> - -#include <drm/drm.h> -#include <drm/drm_drv.h> -#include <drm/drm_print.h> - -#include "drm_legacy.h" - -#define DEBUG_SCATTER 0 - -static void drm_sg_cleanup(struct drm_sg_mem * entry) -{ - struct page *page; - int i; - - for (i = 0; i < entry->pages; i++) { - page = entry->pagelist[i]; - if (page) - ClearPageReserved(page); - } - - vfree(entry->virtual); - - kfree(entry->busaddr); - kfree(entry->pagelist); - kfree(entry); -} - -void drm_legacy_sg_cleanup(struct drm_device *dev) -{ - if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg && - drm_core_check_feature(dev, DRIVER_LEGACY)) { - drm_sg_cleanup(dev->sg); - dev->sg = NULL; - } -} -#ifdef _LP64 -# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1))) -#else -# define ScatterHandle(x) (unsigned int)(x) -#endif - -int drm_legacy_sg_alloc(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_scatter_gather *request = data; - struct drm_sg_mem *entry; - unsigned long pages, i, j; - - DRM_DEBUG("\n"); - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_SG)) - return -EOPNOTSUPP; - - if (request->size > SIZE_MAX - PAGE_SIZE) - return -EINVAL; - - if (dev->sg) - return -EINVAL; - - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; - DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); - - entry->pages = pages; - entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL); - if (!entry->pagelist) { - kfree(entry); - return -ENOMEM; - } - - entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL); - if (!entry->busaddr) { - kfree(entry->pagelist); - kfree(entry); - return -ENOMEM; - } - - entry->virtual = vmalloc_32(pages << PAGE_SHIFT); - if (!entry->virtual) { - kfree(entry->busaddr); - kfree(entry->pagelist); - kfree(entry); - return -ENOMEM; - } - - /* This also forces the mapping of COW pages, so our page list - * will be valid. Please don't remove it... - */ - memset(entry->virtual, 0, pages << PAGE_SHIFT); - - entry->handle = ScatterHandle((unsigned long)entry->virtual); - - DRM_DEBUG("handle = %08lx\n", entry->handle); - DRM_DEBUG("virtual = %p\n", entry->virtual); - - for (i = (unsigned long)entry->virtual, j = 0; j < pages; - i += PAGE_SIZE, j++) { - entry->pagelist[j] = vmalloc_to_page((void *)i); - if (!entry->pagelist[j]) - goto failed; - SetPageReserved(entry->pagelist[j]); - } - - request->handle = entry->handle; - - dev->sg = entry; - -#if DEBUG_SCATTER - /* Verify that each page points to its virtual address, and vice - * versa. - */ - { - int error = 0; - - for (i = 0; i < pages; i++) { - unsigned long *tmp; - - tmp = page_address(entry->pagelist[i]); - for (j = 0; - j < PAGE_SIZE / sizeof(unsigned long); - j++, tmp++) { - *tmp = 0xcafebabe; - } - tmp = (unsigned long *)((u8 *) entry->virtual + - (PAGE_SIZE * i)); - for (j = 0; - j < PAGE_SIZE / sizeof(unsigned long); - j++, tmp++) { - if (*tmp != 0xcafebabe && error == 0) { - error = 1; - DRM_ERROR("Scatter allocation error, " - "pagelist does not match " - "virtual mapping\n"); - } - } - tmp = page_address(entry->pagelist[i]); - for (j = 0; - j < PAGE_SIZE / sizeof(unsigned long); - j++, tmp++) { - *tmp = 0; - } - } - if (error == 0) - DRM_ERROR("Scatter allocation matches pagelist\n"); - } -#endif - - return 0; - - failed: - drm_sg_cleanup(entry); - return -ENOMEM; -} - -int drm_legacy_sg_free(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_scatter_gather *request = data; - struct drm_sg_mem *entry; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_SG)) - return -EOPNOTSUPP; - - entry = dev->sg; - dev->sg = NULL; - - if (!entry || entry->handle != request->handle) - return -EINVAL; - - DRM_DEBUG("virtual = %p\n", entry->virtual); - - drm_sg_cleanup(entry); - - return 0; -} diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 01da6789d044..cbb65b7ba425 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -126,6 +126,11 @@ * synchronize between the two. * This requirement is inherited from the Vulkan fence API. * + * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE is set, the ioctl will also set + * a fence deadline hint on the backing fences before waiting, to provide the + * fence signaler with an appropriate sense of urgency. The deadline is + * specified as an absolute &CLOCK_MONOTONIC value in units of ns. + * * Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj * handles as well as an array of u64 points and does a host-side wait on all * of syncobj fences at the given points simultaneously. @@ -1027,7 +1032,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, uint32_t count, uint32_t flags, signed long timeout, - uint32_t *idx) + uint32_t *idx, + ktime_t *deadline) { struct syncobj_wait_entry *entries; struct dma_fence *fence; @@ -1108,6 +1114,15 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]); } + if (deadline) { + for (i = 0; i < count; ++i) { + fence = entries[i].fence; + if (!fence) + continue; + dma_fence_set_deadline(fence, *deadline); + } + } + do { set_current_state(TASK_INTERRUPTIBLE); @@ -1206,7 +1221,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev, struct drm_file *file_private, struct drm_syncobj_wait *wait, struct drm_syncobj_timeline_wait *timeline_wait, - struct drm_syncobj **syncobjs, bool timeline) + struct drm_syncobj **syncobjs, bool timeline, + ktime_t *deadline) { signed long timeout = 0; uint32_t first = ~0; @@ -1217,7 +1233,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev, NULL, wait->count_handles, wait->flags, - timeout, &first); + timeout, &first, + deadline); if (timeout < 0) return timeout; wait->first_signaled = first; @@ -1227,7 +1244,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev, u64_to_user_ptr(timeline_wait->points), timeline_wait->count_handles, timeline_wait->flags, - timeout, &first); + timeout, &first, + deadline); if (timeout < 0) return timeout; timeline_wait->first_signaled = first; @@ -1298,17 +1316,22 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, { struct drm_syncobj_wait *args = data; struct drm_syncobj **syncobjs; + unsigned int possible_flags; + ktime_t t, *tp = NULL; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) return -EOPNOTSUPP; - if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | - DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)) + possible_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE; + + if (args->flags & ~possible_flags) return -EINVAL; if (args->count_handles == 0) - return -EINVAL; + return 0; ret = drm_syncobj_array_find(file_private, u64_to_user_ptr(args->handles), @@ -1317,8 +1340,13 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, if (ret < 0) return ret; + if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE) { + t = ns_to_ktime(args->deadline_nsec); + tp = &t; + } + ret = drm_syncobj_array_wait(dev, file_private, - args, NULL, syncobjs, false); + args, NULL, syncobjs, false, tp); drm_syncobj_array_free(syncobjs, args->count_handles); @@ -1331,18 +1359,23 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data, { struct drm_syncobj_timeline_wait *args = data; struct drm_syncobj **syncobjs; + unsigned int possible_flags; + ktime_t t, *tp = NULL; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) return -EOPNOTSUPP; - if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | - DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | - DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) + possible_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE; + + if (args->flags & ~possible_flags) return -EINVAL; if (args->count_handles == 0) - return -EINVAL; + return 0; ret = drm_syncobj_array_find(file_private, u64_to_user_ptr(args->handles), @@ -1351,8 +1384,13 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data, if (ret < 0) return ret; + if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE) { + t = ns_to_ktime(args->deadline_nsec); + tp = &t; + } + ret = drm_syncobj_array_wait(dev, file_private, - NULL, args, syncobjs, true); + NULL, args, syncobjs, true, tp); drm_syncobj_array_free(syncobjs, args->count_handles); diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 877e2067534f..702a12bc93bd 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -210,11 +210,6 @@ static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe) if (crtc->funcs->get_vblank_counter) return crtc->funcs->get_vblank_counter(crtc); } -#ifdef CONFIG_DRM_LEGACY - else if (dev->driver->get_vblank_counter) { - return dev->driver->get_vblank_counter(dev, pipe); - } -#endif return drm_vblank_no_hw_counter(dev, pipe); } @@ -433,11 +428,6 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe) if (crtc->funcs->disable_vblank) crtc->funcs->disable_vblank(crtc); } -#ifdef CONFIG_DRM_LEGACY - else { - dev->driver->disable_vblank(dev, pipe); - } -#endif } /* @@ -1151,11 +1141,6 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe) if (crtc->funcs->enable_vblank) return crtc->funcs->enable_vblank(crtc); } -#ifdef CONFIG_DRM_LEGACY - else if (dev->driver->enable_vblank) { - return dev->driver->enable_vblank(dev, pipe); - } -#endif return -EINVAL; } @@ -1574,88 +1559,6 @@ void drm_crtc_vblank_restore(struct drm_crtc *crtc) } EXPORT_SYMBOL(drm_crtc_vblank_restore); -static void drm_legacy_vblank_pre_modeset(struct drm_device *dev, - unsigned int pipe) -{ - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; - - /* vblank is not initialized (IRQ not installed ?), or has been freed */ - if (!drm_dev_has_vblank(dev)) - return; - - if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) - return; - - /* - * To avoid all the problems that might happen if interrupts - * were enabled/disabled around or between these calls, we just - * have the kernel take a reference on the CRTC (just once though - * to avoid corrupting the count if multiple, mismatch calls occur), - * so that interrupts remain enabled in the interim. - */ - if (!vblank->inmodeset) { - vblank->inmodeset = 0x1; - if (drm_vblank_get(dev, pipe) == 0) - vblank->inmodeset |= 0x2; - } -} - -static void drm_legacy_vblank_post_modeset(struct drm_device *dev, - unsigned int pipe) -{ - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; - - /* vblank is not initialized (IRQ not installed ?), or has been freed */ - if (!drm_dev_has_vblank(dev)) - return; - - if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) - return; - - if (vblank->inmodeset) { - spin_lock_irq(&dev->vbl_lock); - drm_reset_vblank_timestamp(dev, pipe); - spin_unlock_irq(&dev->vbl_lock); - - if (vblank->inmodeset & 0x2) - drm_vblank_put(dev, pipe); - - vblank->inmodeset = 0; - } -} - -int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_modeset_ctl *modeset = data; - unsigned int pipe; - - /* If drm_vblank_init() hasn't been called yet, just no-op */ - if (!drm_dev_has_vblank(dev)) - return 0; - - /* KMS drivers handle this internally */ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return 0; - - pipe = modeset->crtc; - if (pipe >= dev->num_crtcs) - return -EINVAL; - - switch (modeset->cmd) { - case _DRM_PRE_MODESET: - drm_legacy_vblank_pre_modeset(dev, pipe); - break; - case _DRM_POST_MODESET: - drm_legacy_vblank_post_modeset(dev, pipe); - break; - default: - return -EINVAL; - } - - return 0; -} - static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe, u64 req_seq, union drm_wait_vblank *vblwait, @@ -1780,10 +1683,6 @@ static void drm_wait_vblank_reply(struct drm_device *dev, unsigned int pipe, static bool drm_wait_vblank_supported(struct drm_device *dev) { -#if IS_ENABLED(CONFIG_DRM_LEGACY) - if (unlikely(drm_core_check_feature(dev, DRIVER_LEGACY))) - return dev->irq_enabled; -#endif return drm_dev_has_vblank(dev); } diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c deleted file mode 100644 index 87c9fe55dec7..000000000000 --- a/drivers/gpu/drm/drm_vm.c +++ /dev/null @@ -1,665 +0,0 @@ -/* - * \file drm_vm.c - * Memory mapping for DRM - * - * \author Rickard E. (Rik) Faith <faith@valinux.com> - * \author Gareth Hughes <gareth@valinux.com> - */ - -/* - * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include <linux/export.h> -#include <linux/pci.h> -#include <linux/seq_file.h> -#include <linux/vmalloc.h> -#include <linux/pgtable.h> - -#if defined(__ia64__) -#include <linux/efi.h> -#include <linux/slab.h> -#endif -#include <linux/mem_encrypt.h> - -#include <drm/drm_device.h> -#include <drm/drm_drv.h> -#include <drm/drm_file.h> -#include <drm/drm_framebuffer.h> -#include <drm/drm_print.h> - -#include "drm_internal.h" -#include "drm_legacy.h" - -struct drm_vma_entry { - struct list_head head; - struct vm_area_struct *vma; - pid_t pid; -}; - -static void drm_vm_open(struct vm_area_struct *vma); -static void drm_vm_close(struct vm_area_struct *vma); - -static pgprot_t drm_io_prot(struct drm_local_map *map, - struct vm_area_struct *vma) -{ - pgprot_t tmp = vm_get_page_prot(vma->vm_flags); - -#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \ - defined(__mips__) || defined(__loongarch__) - if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING)) - tmp = pgprot_noncached(tmp); - else - tmp = pgprot_writecombine(tmp); -#elif defined(__ia64__) - if (efi_range_is_wc(vma->vm_start, vma->vm_end - - vma->vm_start)) - tmp = pgprot_writecombine(tmp); - else - tmp = pgprot_noncached(tmp); -#elif defined(__sparc__) || defined(__arm__) - tmp = pgprot_noncached(tmp); -#endif - return tmp; -} - -static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) -{ - pgprot_t tmp = vm_get_page_prot(vma->vm_flags); - -#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) - tmp = pgprot_noncached_wc(tmp); -#endif - return tmp; -} - -/* - * \c fault method for AGP virtual memory. - * - * \param vma virtual memory area. - * \param address access address. - * \return pointer to the page structure. - * - * Find the right map and if it's AGP memory find the real physical page to - * map, get the page, increment the use count and return it. - */ -#if IS_ENABLED(CONFIG_AGP) -static vm_fault_t drm_vm_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct drm_file *priv = vma->vm_file->private_data; - struct drm_device *dev = priv->minor->dev; - struct drm_local_map *map = NULL; - struct drm_map_list *r_list; - struct drm_hash_item *hash; - - /* - * Find the right map - */ - if (!dev->agp) - goto vm_fault_error; - - if (!dev->agp || !dev->agp->cant_use_aperture) - goto vm_fault_error; - - if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) - goto vm_fault_error; - - r_list = drm_hash_entry(hash, struct drm_map_list, hash); - map = r_list->map; - - if (map && map->type == _DRM_AGP) { - /* - * Using vm_pgoff as a selector forces us to use this unusual - * addressing scheme. - */ - resource_size_t offset = vmf->address - vma->vm_start; - resource_size_t baddr = map->offset + offset; - struct drm_agp_mem *agpmem; - struct page *page; - -#ifdef __alpha__ - /* - * Adjust to a bus-relative address - */ - baddr -= dev->hose->mem_space->start; -#endif - - /* - * It's AGP memory - find the real physical page to map - */ - list_for_each_entry(agpmem, &dev->agp->memory, head) { - if (agpmem->bound <= baddr && - agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) - break; - } - - if (&agpmem->head == &dev->agp->memory) - goto vm_fault_error; - - /* - * Get the page, inc the use count, and return it - */ - offset = (baddr - agpmem->bound) >> PAGE_SHIFT; - page = agpmem->memory->pages[offset]; - get_page(page); - vmf->page = page; - - DRM_DEBUG - ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n", - (unsigned long long)baddr, - agpmem->memory->pages[offset], - (unsigned long long)offset, - page_count(page)); - return 0; - } -vm_fault_error: - return VM_FAULT_SIGBUS; /* Disallow mremap */ -} -#else -static vm_fault_t drm_vm_fault(struct vm_fault *vmf) -{ - return VM_FAULT_SIGBUS; -} -#endif - -/* - * \c nopage method for shared virtual memory. - * - * \param vma virtual memory area. - * \param address access address. - * \return pointer to the page structure. - * - * Get the mapping, find the real physical page to map, get the page, and - * return it. - */ -static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct drm_local_map *map = vma->vm_private_data; - unsigned long offset; - unsigned long i; - struct page *page; - - if (!map) - return VM_FAULT_SIGBUS; /* Nothing allocated */ - - offset = vmf->address - vma->vm_start; - i = (unsigned long)map->handle + offset; - page = vmalloc_to_page((void *)i); - if (!page) - return VM_FAULT_SIGBUS; - get_page(page); - vmf->page = page; - - DRM_DEBUG("shm_fault 0x%lx\n", offset); - return 0; -} - -/* - * \c close method for shared virtual memory. - * - * \param vma virtual memory area. - * - * Deletes map information if we are the last - * person to close a mapping and it's not in the global maplist. - */ -static void drm_vm_shm_close(struct vm_area_struct *vma) -{ - struct drm_file *priv = vma->vm_file->private_data; - struct drm_device *dev = priv->minor->dev; - struct drm_vma_entry *pt, *temp; - struct drm_local_map *map; - struct drm_map_list *r_list; - int found_maps = 0; - - DRM_DEBUG("0x%08lx,0x%08lx\n", - vma->vm_start, vma->vm_end - vma->vm_start); - - map = vma->vm_private_data; - - mutex_lock(&dev->struct_mutex); - list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { - if (pt->vma->vm_private_data == map) - found_maps++; - if (pt->vma == vma) { - list_del(&pt->head); - kfree(pt); - } - } - - /* We were the only map that was found */ - if (found_maps == 1 && map->flags & _DRM_REMOVABLE) { - /* Check to see if we are in the maplist, if we are not, then - * we delete this mappings information. - */ - found_maps = 0; - list_for_each_entry(r_list, &dev->maplist, head) { - if (r_list->map == map) - found_maps++; - } - - if (!found_maps) { - switch (map->type) { - case _DRM_REGISTERS: - case _DRM_FRAME_BUFFER: - arch_phys_wc_del(map->mtrr); - iounmap(map->handle); - break; - case _DRM_SHM: - vfree(map->handle); - break; - case _DRM_AGP: - case _DRM_SCATTER_GATHER: - break; - case _DRM_CONSISTENT: - dma_free_coherent(dev->dev, - map->size, - map->handle, - map->offset); - break; - } - kfree(map); - } - } - mutex_unlock(&dev->struct_mutex); -} - -/* - * \c fault method for DMA virtual memory. - * - * \param address access address. - * \return pointer to the page structure. - * - * Determine the page number from the page offset and get it from drm_device_dma::pagelist. - */ -static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct drm_file *priv = vma->vm_file->private_data; - struct drm_device *dev = priv->minor->dev; - struct drm_device_dma *dma = dev->dma; - unsigned long offset; - unsigned long page_nr; - struct page *page; - - if (!dma) - return VM_FAULT_SIGBUS; /* Error */ - if (!dma->pagelist) - return VM_FAULT_SIGBUS; /* Nothing allocated */ - - offset = vmf->address - vma->vm_start; - /* vm_[pg]off[set] should be 0 */ - page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */ - page = virt_to_page((void *)dma->pagelist[page_nr]); - - get_page(page); - vmf->page = page; - - DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr); - return 0; -} - -/* - * \c fault method for scatter-gather virtual memory. - * - * \param address access address. - * \return pointer to the page structure. - * - * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. - */ -static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct drm_local_map *map = vma->vm_private_data; - struct drm_file *priv = vma->vm_file->private_data; - struct drm_device *dev = priv->minor->dev; - struct drm_sg_mem *entry = dev->sg; - unsigned long offset; - unsigned long map_offset; - unsigned long page_offset; - struct page *page; - - if (!entry) - return VM_FAULT_SIGBUS; /* Error */ - if (!entry->pagelist) - return VM_FAULT_SIGBUS; /* Nothing allocated */ - - offset = vmf->address - vma->vm_start; - map_offset = map->offset - (unsigned long)dev->sg->virtual; - page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); - page = entry->pagelist[page_offset]; - get_page(page); - vmf->page = page; - - return 0; -} - -/** AGP virtual memory operations */ -static const struct vm_operations_struct drm_vm_ops = { - .fault = drm_vm_fault, - .open = drm_vm_open, - .close = drm_vm_close, -}; - -/** Shared virtual memory operations */ -static const struct vm_operations_struct drm_vm_shm_ops = { - .fault = drm_vm_shm_fault, - .open = drm_vm_open, - .close = drm_vm_shm_close, -}; - -/** DMA virtual memory operations */ -static const struct vm_operations_struct drm_vm_dma_ops = { - .fault = drm_vm_dma_fault, - .open = drm_vm_open, - .close = drm_vm_close, -}; - -/** Scatter-gather virtual memory operations */ -static const struct vm_operations_struct drm_vm_sg_ops = { - .fault = drm_vm_sg_fault, - .open = drm_vm_open, - .close = drm_vm_close, -}; - -static void drm_vm_open_locked(struct drm_device *dev, - struct vm_area_struct *vma) -{ - struct drm_vma_entry *vma_entry; - - DRM_DEBUG("0x%08lx,0x%08lx\n", - vma->vm_start, vma->vm_end - vma->vm_start); - - vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL); - if (vma_entry) { - vma_entry->vma = vma; - vma_entry->pid = current->pid; - list_add(&vma_entry->head, &dev->vmalist); - } -} - -static void drm_vm_open(struct vm_area_struct *vma) -{ - struct drm_file *priv = vma->vm_file->private_data; - struct drm_device *dev = priv->minor->dev; - - mutex_lock(&dev->struct_mutex); - drm_vm_open_locked(dev, vma); - mutex_unlock(&dev->struct_mutex); -} - -static void drm_vm_close_locked(struct drm_device *dev, - struct vm_area_struct *vma) -{ - struct drm_vma_entry *pt, *temp; - - DRM_DEBUG("0x%08lx,0x%08lx\n", - vma->vm_start, vma->vm_end - vma->vm_start); - - list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { - if (pt->vma == vma) { - list_del(&pt->head); - kfree(pt); - break; - } - } -} - -/* - * \c close method for all virtual memory types. - * - * \param vma virtual memory area. - * - * Search the \p vma private data entry in drm_device::vmalist, unlink it, and - * free it. - */ -static void drm_vm_close(struct vm_area_struct *vma) -{ - struct drm_file *priv = vma->vm_file->private_data; - struct drm_device *dev = priv->minor->dev; - - mutex_lock(&dev->struct_mutex); - drm_vm_close_locked(dev, vma); - mutex_unlock(&dev->struct_mutex); -} - -/* - * mmap DMA memory. - * - * \param file_priv DRM file private. - * \param vma virtual memory area. - * \return zero on success or a negative number on failure. - * - * Sets the virtual memory area operations structure to vm_dma_ops, the file - * pointer, and calls vm_open(). - */ -static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *priv = filp->private_data; - struct drm_device *dev; - struct drm_device_dma *dma; - unsigned long length = vma->vm_end - vma->vm_start; - - dev = priv->minor->dev; - dma = dev->dma; - DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", - vma->vm_start, vma->vm_end, vma->vm_pgoff); - - /* Length must match exact page count */ - if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { - return -EINVAL; - } - - if (!capable(CAP_SYS_ADMIN) && - (dma->flags & _DRM_DMA_USE_PCI_RO)) { - vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE); -#if defined(__i386__) || defined(__x86_64__) - pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; -#else - /* Ye gads this is ugly. With more thought - we could move this up higher and use - `protection_map' instead. */ - vma->vm_page_prot = - __pgprot(pte_val - (pte_wrprotect - (__pte(pgprot_val(vma->vm_page_prot))))); -#endif - } - - vma->vm_ops = &drm_vm_dma_ops; - - vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); - - drm_vm_open_locked(dev, vma); - return 0; -} - -static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev) -{ -#ifdef __alpha__ - return dev->hose->dense_mem_base; -#else - return 0; -#endif -} - -/* - * mmap DMA memory. - * - * \param file_priv DRM file private. - * \param vma virtual memory area. - * \return zero on success or a negative number on failure. - * - * If the virtual memory area has no offset associated with it then it's a DMA - * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist, - * checks that the restricted flag is not set, sets the virtual memory operations - * according to the mapping type and remaps the pages. Finally sets the file - * pointer and calls vm_open(). - */ -static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->minor->dev; - struct drm_local_map *map = NULL; - resource_size_t offset = 0; - struct drm_hash_item *hash; - - DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", - vma->vm_start, vma->vm_end, vma->vm_pgoff); - - if (!priv->authenticated) - return -EACCES; - - /* We check for "dma". On Apple's UniNorth, it's valid to have - * the AGP mapped at physical address 0 - * --BenH. - */ - if (!vma->vm_pgoff -#if IS_ENABLED(CONFIG_AGP) - && (!dev->agp - || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) -#endif - ) - return drm_mmap_dma(filp, vma); - - if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) { - DRM_ERROR("Could not find map\n"); - return -EINVAL; - } - - map = drm_hash_entry(hash, struct drm_map_list, hash)->map; - if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) - return -EPERM; - - /* Check for valid size. */ - if (map->size < vma->vm_end - vma->vm_start) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { - vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE); -#if defined(__i386__) || defined(__x86_64__) - pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; -#else - /* Ye gads this is ugly. With more thought - we could move this up higher and use - `protection_map' instead. */ - vma->vm_page_prot = - __pgprot(pte_val - (pte_wrprotect - (__pte(pgprot_val(vma->vm_page_prot))))); -#endif - } - - switch (map->type) { -#if !defined(__arm__) - case _DRM_AGP: - if (dev->agp && dev->agp->cant_use_aperture) { - /* - * On some platforms we can't talk to bus dma address from the CPU, so for - * memory of type DRM_AGP, we'll deal with sorting out the real physical - * pages and mappings in fault() - */ -#if defined(__powerpc__) - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -#endif - vma->vm_ops = &drm_vm_ops; - break; - } - fallthrough; /* to _DRM_FRAME_BUFFER... */ -#endif - case _DRM_FRAME_BUFFER: - case _DRM_REGISTERS: - offset = drm_core_get_reg_ofs(dev); - vma->vm_page_prot = drm_io_prot(map, vma); - if (io_remap_pfn_range(vma, vma->vm_start, - (map->offset + offset) >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," - " offset = 0x%llx\n", - map->type, - vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset)); - - vma->vm_ops = &drm_vm_ops; - break; - case _DRM_CONSISTENT: - /* Consistent memory is really like shared memory. But - * it's allocated in a different way, so avoid fault */ - if (remap_pfn_range(vma, vma->vm_start, - page_to_pfn(virt_to_page(map->handle)), - vma->vm_end - vma->vm_start, vma->vm_page_prot)) - return -EAGAIN; - vma->vm_page_prot = drm_dma_prot(map->type, vma); - fallthrough; /* to _DRM_SHM */ - case _DRM_SHM: - vma->vm_ops = &drm_vm_shm_ops; - vma->vm_private_data = (void *)map; - break; - case _DRM_SCATTER_GATHER: - vma->vm_ops = &drm_vm_sg_ops; - vma->vm_private_data = (void *)map; - vma->vm_page_prot = drm_dma_prot(map->type, vma); - break; - default: - return -EINVAL; /* This should never happen. */ - } - vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); - - drm_vm_open_locked(dev, vma); - return 0; -} - -int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->minor->dev; - int ret; - - if (drm_dev_is_unplugged(dev)) - return -ENODEV; - - mutex_lock(&dev->struct_mutex); - ret = drm_mmap_locked(filp, vma); - mutex_unlock(&dev->struct_mutex); - - return ret; -} -EXPORT_SYMBOL(drm_legacy_mmap); - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_vma_flush(struct drm_device *dev) -{ - struct drm_vma_entry *vma, *vma_temp; - - /* Clear vma list (only needed for legacy drivers) */ - list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { - list_del(&vma->head); - kfree(vma); - } -} -#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 34cdabc30b4f..35771fb4e85d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -11,9 +11,10 @@ #include <linux/component.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> -#include <linux/of_device.h> +#include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/property.h> #include <linux/regmap.h> #include <drm/drm_fourcc.h> @@ -103,7 +104,7 @@ struct gsc_context { unsigned int num_formats; void __iomem *regs; - const char **clk_names; + const char *const *clk_names; struct clk *clocks[GSC_MAX_CLOCKS]; int num_clocks; struct gsc_scaler sc; @@ -1217,7 +1218,7 @@ static const unsigned int gsc_tiled_formats[] = { static int gsc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct gsc_driverdata *driver_data; + const struct gsc_driverdata *driver_data; struct exynos_drm_ipp_formats *formats; struct gsc_context *ctx; int num_formats, ret, i, j; @@ -1226,7 +1227,7 @@ static int gsc_probe(struct platform_device *pdev) if (!ctx) return -ENOMEM; - driver_data = (struct gsc_driverdata *)of_device_get_match_data(dev); + driver_data = device_get_match_data(dev); ctx->dev = dev; ctx->num_clocks = driver_data->num_clocks; ctx->clk_names = driver_data->clk_names; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 683fd8d3151c..ccc077b74d2d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -9,6 +9,7 @@ #include <linux/sync_file.h> #include <linux/uaccess.h> +#include <drm/drm_auth.h> #include <drm/drm_syncobj.h> #include "display/intel_frontbuffer.h" diff --git a/drivers/gpu/drm/imagination/pvr_cccb.h b/drivers/gpu/drm/imagination/pvr_cccb.h index f35b3d4c9575..943fe8f2c963 100644 --- a/drivers/gpu/drm/imagination/pvr_cccb.h +++ b/drivers/gpu/drm/imagination/pvr_cccb.h @@ -86,6 +86,7 @@ pvr_cccb_get_size_of_cmd_with_hdr(u32 cmd_size) /** * pvr_cccb_cmdseq_can_fit() - Check if a command sequence can fit in the CCCB. + * @pvr_cccb: Target Client CCB. * @size: Command sequence size. * * Returns: diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c index 8499becf4fbb..1704c0268589 100644 --- a/drivers/gpu/drm/imagination/pvr_device.c +++ b/drivers/gpu/drm/imagination/pvr_device.c @@ -105,12 +105,12 @@ static int pvr_device_clk_init(struct pvr_device *pvr_dev) sys_clk = devm_clk_get_optional(drm_dev->dev, "sys"); if (IS_ERR(sys_clk)) - return dev_err_probe(drm_dev->dev, PTR_ERR(core_clk), + return dev_err_probe(drm_dev->dev, PTR_ERR(sys_clk), "failed to get sys clock\n"); mem_clk = devm_clk_get_optional(drm_dev->dev, "mem"); if (IS_ERR(mem_clk)) - return dev_err_probe(drm_dev->dev, PTR_ERR(core_clk), + return dev_err_probe(drm_dev->dev, PTR_ERR(mem_clk), "failed to get mem clock\n"); pvr_dev->core_clk = core_clk; @@ -127,7 +127,7 @@ static int pvr_device_clk_init(struct pvr_device *pvr_dev) * This is called any time we receive a FW event. It iterates over all * active queues and calls pvr_queue_process() on them. */ -void pvr_device_process_active_queues(struct pvr_device *pvr_dev) +static void pvr_device_process_active_queues(struct pvr_device *pvr_dev) { struct pvr_queue *queue, *tmp_queue; LIST_HEAD(active_queues); @@ -286,8 +286,8 @@ pvr_request_firmware(struct pvr_device *pvr_dev) filename = pvr_build_firmware_filename(pvr_dev, "powervr/rogue", PVR_FW_VERSION_MAJOR); - if (IS_ERR(filename)) - return PTR_ERR(filename); + if (!filename) + return -ENOMEM; /* * This function takes a copy of &filename, meaning we can free our diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h index e07655fc65e8..2ca7e535799f 100644 --- a/drivers/gpu/drm/imagination/pvr_device.h +++ b/drivers/gpu/drm/imagination/pvr_device.h @@ -203,17 +203,29 @@ struct pvr_device { struct mutex lock; } queues; + /** + * @watchdog: Watchdog for communications with firmware. + */ struct { /** @work: Work item for watchdog callback. */ struct delayed_work work; - /** @old_kccb_cmds_executed: KCCB command execution count at last watchdog poll. */ + /** + * @old_kccb_cmds_executed: KCCB command execution count at last + * watchdog poll. + */ u32 old_kccb_cmds_executed; - /** @kccb_stall_count: Number of watchdog polls KCCB has been stalled for. */ + /** + * @kccb_stall_count: Number of watchdog polls KCCB has been + * stalled for. + */ u32 kccb_stall_count; } watchdog; + /** + * @kccb: Circular buffer for communications with firmware. + */ struct { /** @ccb: Kernel CCB. */ struct pvr_ccb ccb; @@ -225,8 +237,8 @@ struct pvr_device { struct pvr_fw_object *rtn_obj; /** - * @rtn: Pointer to CPU mapping of KCCB return slots. Must be accessed by - * READ_ONCE()/WRITE_ONCE(). + * @rtn: Pointer to CPU mapping of KCCB return slots. Must be + * accessed by READ_ONCE()/WRITE_ONCE(). */ u32 *rtn; @@ -293,14 +305,13 @@ struct pvr_file { /** * @pvr_dev: A reference to the powervr-specific wrapper for the - * associated device. Saves on repeated calls to - * to_pvr_device(). + * associated device. Saves on repeated calls to to_pvr_device(). */ struct pvr_device *pvr_dev; /** * @ctx_handles: Array of contexts belonging to this file. Array members - * are of type "struct pvr_context *". + * are of type "struct pvr_context *". * * This array is used to allocate handles returned to userspace. */ diff --git a/drivers/gpu/drm/imagination/pvr_device_info.c b/drivers/gpu/drm/imagination/pvr_device_info.c index 11e6bef52ecd..d3301cde7d11 100644 --- a/drivers/gpu/drm/imagination/pvr_device_info.c +++ b/drivers/gpu/drm/imagination/pvr_device_info.c @@ -227,7 +227,8 @@ int pvr_device_info_set_features(struct pvr_device *pvr_dev, const u64 *features /* Verify no unsupported values in the bitmask. */ if (features_size > mapping_max_size) { drm_warn(from_pvr_device(pvr_dev), "Unsupported features in firmware image"); - } else if (features_size == mapping_max_size && (mapping_max & 63)) { + } else if (features_size == mapping_max_size && + ((mapping_max & 63) != 0)) { u64 invalid_mask = ~0ull << (mapping_max & 63); if (features[features_size - 1] & invalid_mask) diff --git a/drivers/gpu/drm/imagination/pvr_free_list.c b/drivers/gpu/drm/imagination/pvr_free_list.c index c61fd417edcb..5e51bc980751 100644 --- a/drivers/gpu/drm/imagination/pvr_free_list.c +++ b/drivers/gpu/drm/imagination/pvr_free_list.c @@ -255,7 +255,7 @@ pvr_free_list_insert_pages_locked(struct pvr_free_list *free_list, if (!num_pages) break; - }; + } /* clang-format on */ /* Make sure our free_list update is flushed. */ diff --git a/drivers/gpu/drm/imagination/pvr_fw.h b/drivers/gpu/drm/imagination/pvr_fw.h index 5cd3ef08d82b..b7966bd574a9 100644 --- a/drivers/gpu/drm/imagination/pvr_fw.h +++ b/drivers/gpu/drm/imagination/pvr_fw.h @@ -481,7 +481,8 @@ pvr_fw_object_unmap_and_destroy(struct pvr_fw_object *fw_obj) } /** - * pvr_fw_get_dma_addr() - Get DMA address for given offset in firmware object + * pvr_fw_object_get_dma_addr() - Get DMA address for given offset in firmware + * object. * @fw_obj: Pointer to object to lookup address in. * @offset: Offset within object to lookup address at. * @dma_addr_out: Pointer to location to store DMA address. diff --git a/drivers/gpu/drm/imagination/pvr_fw_info.h b/drivers/gpu/drm/imagination/pvr_fw_info.h index ad5d44a3067a..c3639440610e 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_info.h +++ b/drivers/gpu/drm/imagination/pvr_fw_info.h @@ -122,13 +122,13 @@ struct pvr_fw_layout_entry { * struct pvr_fw_device_info_header - Device information header. */ struct pvr_fw_device_info_header { - /* BRN Mask size (in u64s). */ + /** @brn_mask_size: BRN mask size (in u64s). */ u64 brn_mask_size; - /* ERN Mask size (in u64s). */ + /** @ern_mask_size: ERN mask size (in u64s). */ u64 ern_mask_size; - /* Feature Mask size (in u64s). */ + /** @feature_mask_size: Feature mask size (in u64s). */ u64 feature_mask_size; - /* Feature Parameter size (in u64s). */ + /** @feature_param_size: Feature parameter size (in u64s). */ u64 feature_param_size; }; diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c index 119934c36184..c39beb70c317 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_meta.c +++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c @@ -4,6 +4,7 @@ #include "pvr_device.h" #include "pvr_fw.h" #include "pvr_fw_info.h" +#include "pvr_fw_meta.h" #include "pvr_gem.h" #include "pvr_rogue_cr_defs.h" #include "pvr_rogue_meta.h" diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c index 87a42fb6ace6..7159fc479001 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_trace.c +++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c @@ -121,6 +121,8 @@ void pvr_fw_trace_fini(struct pvr_device *pvr_dev) pvr_fw_object_unmap_and_destroy(fw_trace->tracebuf_ctrl_obj); } +#if defined(CONFIG_DEBUG_FS) + /** * update_logtype() - Send KCCB command to trigger FW to update logtype * @pvr_dev: Target PowerVR device @@ -165,52 +167,6 @@ err_up_read: return err; } -#if defined(CONFIG_DEBUG_FS) - -static int fw_trace_group_mask_show(struct seq_file *m, void *data) -{ - struct pvr_device *pvr_dev = m->private; - - seq_printf(m, "%08x\n", pvr_dev->fw_dev.fw_trace.group_mask); - - return 0; -} - -static int fw_trace_group_mask_open(struct inode *inode, struct file *file) -{ - return single_open(file, fw_trace_group_mask_show, inode->i_private); -} - -static ssize_t fw_trace_group_mask_write(struct file *file, const char __user *ubuf, size_t len, - loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct pvr_device *pvr_dev = m->private; - u32 new_group_mask; - int err; - - err = kstrtouint_from_user(ubuf, len, 0, &new_group_mask); - if (err) - return err; - - err = update_logtype(pvr_dev, new_group_mask); - if (err) - return err; - - pvr_dev->fw_dev.fw_trace.group_mask = new_group_mask; - - return (ssize_t)len; -} - -static const struct file_operations pvr_fw_trace_group_mask_fops = { - .owner = THIS_MODULE, - .open = fw_trace_group_mask_open, - .read = seq_read, - .write = fw_trace_group_mask_write, - .llseek = default_llseek, - .release = single_release, -}; - struct pvr_fw_trace_seq_data { /** @buffer: Pointer to copy of trace data. */ u32 *buffer; diff --git a/drivers/gpu/drm/imagination/pvr_hwrt.h b/drivers/gpu/drm/imagination/pvr_hwrt.h index 76992948d047..676070b20c3b 100644 --- a/drivers/gpu/drm/imagination/pvr_hwrt.h +++ b/drivers/gpu/drm/imagination/pvr_hwrt.h @@ -64,6 +64,7 @@ struct pvr_hwrt_dataset { /** @common_fw_obj: FW object representing common FW-side structure. */ struct pvr_fw_object *common_fw_obj; + /** @common: Common HWRT data. */ struct rogue_fwif_hwrtdata_common common; /** @data: HWRT data structures belonging to this set. */ diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c index 9d0812710295..04139da6c04d 100644 --- a/drivers/gpu/drm/imagination/pvr_job.c +++ b/drivers/gpu/drm/imagination/pvr_job.c @@ -378,7 +378,7 @@ prepare_job_syncs(struct pvr_file *pvr_file, /** * prepare_job_syncs_for_each() - Prepare all sync objects for an array of jobs. - * @file: PowerVR file. + * @pvr_file: PowerVR file. * @job_data: Array of precreated jobs and their sync_ops. * @job_count: Number of jobs. * @signal_array: xarray to receive signal sync objects. @@ -696,8 +696,6 @@ pvr_jobs_link_geom_frag(struct pvr_job_data *job_data, u32 *job_count) * @pvr_dev: Target PowerVR device. * @pvr_file: Pointer to PowerVR file structure. * @args: Ioctl args. - * @job_count: Number of jobs in @jobs_args. On error this will be updated - * with the index into @jobs_args where the error occurred. * * This initial implementation is entirely synchronous; on return the GPU will * be idle. This will not be the case for future implementations. diff --git a/drivers/gpu/drm/imagination/pvr_mmu.c b/drivers/gpu/drm/imagination/pvr_mmu.c index b71d30e5f380..c8562bfc0dcd 100644 --- a/drivers/gpu/drm/imagination/pvr_mmu.c +++ b/drivers/gpu/drm/imagination/pvr_mmu.c @@ -335,8 +335,9 @@ pvr_mmu_backing_page_fini(struct pvr_mmu_backing_page *page) /** * pvr_mmu_backing_page_sync() - Flush a MMU backing page from the CPU to the - * device. + * device. * @page: Target backing page. + * @flags: MMU flush flags. Must be one of %PVR_MMU_SYNC_LEVEL_*_FLAGS. * * .. caution:: * diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c index d65c3fbedf5a..5ed9c98fb599 100644 --- a/drivers/gpu/drm/imagination/pvr_queue.c +++ b/drivers/gpu/drm/imagination/pvr_queue.c @@ -1292,7 +1292,7 @@ struct pvr_queue *pvr_queue_create(struct pvr_context *ctx, goto err_release_ufo; err = drm_sched_entity_init(&queue->entity, - DRM_SCHED_PRIORITY_MIN, + DRM_SCHED_PRIORITY_KERNEL, &sched, 1, &ctx->faulty); if (err) goto err_sched_fini; diff --git a/drivers/gpu/drm/imagination/pvr_queue.h b/drivers/gpu/drm/imagination/pvr_queue.h index b5ce2c742150..e06ced69302f 100644 --- a/drivers/gpu/drm/imagination/pvr_queue.h +++ b/drivers/gpu/drm/imagination/pvr_queue.h @@ -50,7 +50,7 @@ struct pvr_queue_cccb_fence_ctx { */ struct pvr_job *job; - /** @lock: Lock protecting access to the job object. */ + /** @job_lock: Lock protecting access to the job object. */ struct mutex job_lock; }; @@ -114,7 +114,7 @@ struct pvr_queue { } timeline_ufo; /** - * last_queued_job_scheduled_fence: The scheduled fence of the last + * @last_queued_job_scheduled_fence: The scheduled fence of the last * job queued to this queue. * * We use it to insert frag -> geom dependencies when issuing combined diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h index 571954182f33..56e11009e123 100644 --- a/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h +++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h @@ -497,7 +497,7 @@ static const struct rogue_km_stid_fmt stid_fmts[] = { { ROGUE_FW_LOG_CREATESFID(213, ROGUE_FW_GROUP_MAIN, 1), "Safety Watchdog threshold period set to 0x%x clock cycles" }, { ROGUE_FW_LOG_CREATESFID(214, ROGUE_FW_GROUP_MAIN, 0), - "MTS Safety Event trigged by the safety watchdog." }, + "MTS Safety Event triggered by the safety watchdog." }, { ROGUE_FW_LOG_CREATESFID(215, ROGUE_FW_GROUP_MAIN, 3), "DM%d USC tasks range limit 0 - %d, stride %d" }, { ROGUE_FW_LOG_CREATESFID(216, ROGUE_FW_GROUP_MAIN, 1), @@ -1114,7 +1114,7 @@ static const struct rogue_km_stid_fmt stid_fmts[] = { { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_SPM, 2), "3DMemFree matches freelist 0x%08x (FL type = %u)" }, { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_SPM, 0), - "Raise the 3DMemFreeDedected flag" }, + "Raise the 3DMemFreeDetected flag" }, { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_SPM, 1), "Wait for pending grow on Freelist 0x%08x" }, { ROGUE_FW_LOG_CREATESFID(42, ROGUE_FW_GROUP_SPM, 1), diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 3ad1366294b9..f42345fbe4bf 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -42,7 +42,7 @@ struct pvr_vm_context { /** @mmu_ctx: The context for binding to physical memory. */ struct pvr_mmu_context *mmu_ctx; - /** @gpuva_mgr: GPUVA manager object associated with this context. */ + /** @gpuvm_mgr: GPUVM object associated with this context. */ struct drm_gpuvm gpuvm_mgr; /** @lock: Global lock on this VM. */ @@ -64,6 +64,12 @@ struct pvr_vm_context { struct drm_gem_object dummy_gem; }; +static inline +struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm) +{ + return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr); +} + struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx) { if (vm_ctx) @@ -108,12 +114,6 @@ struct pvr_vm_gpuva { struct drm_gpuva base; }; -static __always_inline -struct pvr_vm_gpuva *to_pvr_vm_gpuva(struct drm_gpuva *gpuva) -{ - return container_of(gpuva, struct pvr_vm_gpuva, base); -} - enum pvr_vm_bind_type { PVR_VM_BIND_TYPE_MAP, PVR_VM_BIND_TYPE_UNMAP, @@ -224,6 +224,7 @@ pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op, struct pvr_gem_object *pvr_obj, u64 offset, u64 device_addr, u64 size) { + struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj); const bool is_user = vm_ctx == vm_ctx->pvr_dev->kernel_vm_ctx; const u64 pvr_obj_size = pvr_gem_object_size(pvr_obj); struct sg_table *sgt; @@ -238,17 +239,18 @@ pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op, return -EINVAL; } - if (!pvr_device_addr_and_size_are_valid(device_addr, size) || + if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size) || offset & ~PAGE_MASK || size & ~PAGE_MASK || offset >= pvr_obj_size || offset_plus_size > pvr_obj_size) return -EINVAL; bind_op->type = PVR_VM_BIND_TYPE_MAP; - bind_op->gpuvm_bo = drm_gpuvm_bo_create(&vm_ctx->gpuvm_mgr, - gem_from_pvr_gem(pvr_obj)); - if (!bind_op->gpuvm_bo) - return -ENOMEM; + dma_resv_lock(obj->resv, NULL); + bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj); + dma_resv_unlock(obj->resv); + if (IS_ERR(bind_op->gpuvm_bo)) + return PTR_ERR(bind_op->gpuvm_bo); bind_op->new_va = kzalloc(sizeof(*bind_op->new_va), GFP_KERNEL); bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL); @@ -293,7 +295,7 @@ pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op, { int err; - if (!pvr_device_addr_and_size_are_valid(device_addr, size)) + if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size)) return -EINVAL; bind_op->type = PVR_VM_BIND_TYPE_UNMAP; @@ -325,48 +327,6 @@ err_bind_op_fini: return err; } -static int -pvr_vm_bind_op_lock_resvs(struct drm_exec *exec, struct pvr_vm_bind_op *bind_op) -{ - drm_exec_until_all_locked(exec) { - struct drm_gem_object *r_obj = &bind_op->vm_ctx->dummy_gem; - struct drm_gpuvm *gpuvm = &bind_op->vm_ctx->gpuvm_mgr; - struct pvr_gem_object *pvr_obj = bind_op->pvr_obj; - struct drm_gpuvm_bo *gpuvm_bo; - - /* Acquire lock on the vm_context's reserve object. */ - int err = drm_exec_lock_obj(exec, r_obj); - - drm_exec_retry_on_contention(exec); - if (err) - return err; - - /* Acquire lock on all BOs in the context. */ - list_for_each_entry(gpuvm_bo, &gpuvm->extobj.list, - list.entry.extobj) { - err = drm_exec_lock_obj(exec, gpuvm_bo->obj); - - drm_exec_retry_on_contention(exec); - if (err) - return err; - } - - /* Unmap operations don't have an object to lock. */ - if (!pvr_obj) - break; - - /* Acquire lock on the GEM being mapped. */ - err = drm_exec_lock_obj(exec, - gem_from_pvr_gem(bind_op->pvr_obj)); - - drm_exec_retry_on_contention(exec); - if (err) - return err; - } - - return 0; -} - /** * pvr_vm_gpuva_map() - Insert a mapping into a memory context. * @op: gpuva op containing the remap details. @@ -503,6 +463,7 @@ pvr_device_addr_is_valid(u64 device_addr) /** * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual * address and associated size are both valid. + * @vm_ctx: Target VM context. * @device_addr: Virtual device address to test. * @size: Size of the range based at @device_addr to test. * @@ -521,16 +482,18 @@ pvr_device_addr_is_valid(u64 device_addr) * * %false otherwise. */ bool -pvr_device_addr_and_size_are_valid(u64 device_addr, u64 size) +pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx, + u64 device_addr, u64 size) { return pvr_device_addr_is_valid(device_addr) && + drm_gpuvm_range_valid(&vm_ctx->gpuvm_mgr, device_addr, size) && size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 && (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE); } -void pvr_gpuvm_free(struct drm_gpuvm *gpuvm) +static void pvr_gpuvm_free(struct drm_gpuvm *gpuvm) { - + kfree(to_pvr_vm_context(gpuvm)); } static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = { @@ -650,12 +613,11 @@ pvr_vm_context_release(struct kref *ref_count) WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start, vm_ctx->gpuvm_mgr.mm_range)); - drm_gpuvm_put(&vm_ctx->gpuvm_mgr); pvr_mmu_context_destroy(vm_ctx->mmu_ctx); drm_gem_private_object_fini(&vm_ctx->dummy_gem); mutex_destroy(&vm_ctx->lock); - kfree(vm_ctx); + drm_gpuvm_put(&vm_ctx->gpuvm_mgr); } /** @@ -721,6 +683,20 @@ void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file) } } +static int +pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec) +{ + struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv; + struct pvr_gem_object *pvr_obj = bind_op->pvr_obj; + + /* Unmap operations don't have an object to lock. */ + if (!pvr_obj) + return 0; + + /* Acquire lock on the GEM being mapped. */ + return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj)); +} + /** * pvr_vm_map() - Map a section of physical memory into a section of * device-virtual memory. @@ -748,7 +724,15 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset, u64 device_addr, u64 size) { struct pvr_vm_bind_op bind_op = {0}; - struct drm_exec exec; + struct drm_gpuvm_exec vm_exec = { + .vm = &vm_ctx->gpuvm_mgr, + .flags = DRM_EXEC_INTERRUPTIBLE_WAIT | + DRM_EXEC_IGNORE_DUPLICATES, + .extra = { + .fn = pvr_vm_lock_extra, + .priv = &bind_op, + }, + }; int err = pvr_vm_bind_op_map_init(&bind_op, vm_ctx, pvr_obj, pvr_obj_offset, device_addr, @@ -757,18 +741,15 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, if (err) return err; - drm_exec_init(&exec, - DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES); - pvr_gem_object_get(pvr_obj); - err = pvr_vm_bind_op_lock_resvs(&exec, &bind_op); + err = drm_gpuvm_exec_lock(&vm_exec); if (err) goto err_cleanup; err = pvr_vm_bind_op_exec(&bind_op); - drm_exec_fini(&exec); + drm_gpuvm_exec_unlock(&vm_exec); err_cleanup: pvr_vm_bind_op_fini(&bind_op); @@ -794,24 +775,28 @@ int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size) { struct pvr_vm_bind_op bind_op = {0}; - struct drm_exec exec; + struct drm_gpuvm_exec vm_exec = { + .vm = &vm_ctx->gpuvm_mgr, + .flags = DRM_EXEC_INTERRUPTIBLE_WAIT | + DRM_EXEC_IGNORE_DUPLICATES, + .extra = { + .fn = pvr_vm_lock_extra, + .priv = &bind_op, + }, + }; int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr, size); - if (err) return err; - drm_exec_init(&exec, - DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES); - - err = pvr_vm_bind_op_lock_resvs(&exec, &bind_op); + err = drm_gpuvm_exec_lock(&vm_exec); if (err) goto err_cleanup; err = pvr_vm_bind_op_exec(&bind_op); - drm_exec_fini(&exec); + drm_gpuvm_exec_unlock(&vm_exec); err_cleanup: pvr_vm_bind_op_fini(&bind_op); diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h index cf8b97553dc8..f2a6463f2b05 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.h +++ b/drivers/gpu/drm/imagination/pvr_vm.h @@ -29,7 +29,8 @@ struct drm_exec; /* Functions defined in pvr_vm.c */ bool pvr_device_addr_is_valid(u64 device_addr); -bool pvr_device_addr_and_size_are_valid(u64 device_addr, u64 size); +bool pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx, + u64 device_addr, u64 size); struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context); diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c index 7268cf6e630b..2bc7181a4c3e 100644 --- a/drivers/gpu/drm/imagination/pvr_vm_mips.c +++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c @@ -57,6 +57,7 @@ pvr_vm_mips_init(struct pvr_device *pvr_dev) PAGE_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, mips_data->pt_dma_addr[page_nr])) { err = -ENOMEM; + __free_page(mips_data->pt_pages[page_nr]); goto err_free_pages; } } @@ -79,13 +80,11 @@ pvr_vm_mips_init(struct pvr_device *pvr_dev) return 0; err_free_pages: - for (; page_nr >= 0; page_nr--) { - if (mips_data->pt_dma_addr[page_nr]) - dma_unmap_page(from_pvr_device(pvr_dev)->dev, - mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE); + while (--page_nr >= 0) { + dma_unmap_page(from_pvr_device(pvr_dev)->dev, + mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE); - if (mips_data->pt_pages[page_nr]) - __free_page(mips_data->pt_pages[page_nr]); + __free_page(mips_data->pt_pages[page_nr]); } return err; diff --git a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c index 989eca32d325..53840ab054c7 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c @@ -12,8 +12,10 @@ #include <linux/mfd/syscon.h> #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/of_graph.h> +#include <linux/platform_device.h> +#include <linux/property.h> #include <linux/regmap.h> #include <linux/videodev2.h> @@ -617,7 +619,6 @@ static int imx_ldb_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - const struct of_device_id *of_id = of_match_device(imx_ldb_dt_ids, dev); struct device_node *child; struct imx_ldb *imx_ldb; int dual; @@ -638,9 +639,7 @@ static int imx_ldb_probe(struct platform_device *pdev) regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0); imx_ldb->dev = dev; - - if (of_id) - imx_ldb->lvds_mux = of_id->data; + imx_ldb->lvds_mux = device_get_match_data(dev); dual = of_property_read_bool(np, "fsl,dual-channel"); if (dual) diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c index 0902983374d0..43ddf3a9810b 100644 --- a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c +++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c @@ -342,21 +342,12 @@ static const struct drm_mode_config_helper_funcs imx_lcdc_mode_config_helpers = .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, }; -static void imx_lcdc_release(struct drm_device *drm) -{ - struct imx_lcdc *lcdc = imx_lcdc_from_drmdev(drm); - - drm_kms_helper_poll_fini(drm); - kfree(lcdc); -} - DEFINE_DRM_GEM_DMA_FOPS(imx_lcdc_drm_fops); static struct drm_driver imx_lcdc_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &imx_lcdc_drm_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, - .release = imx_lcdc_release, .name = "imx-lcdc", .desc = "i.MX LCDC driver", .date = "20200716", diff --git a/drivers/gpu/drm/loongson/Kconfig b/drivers/gpu/drm/loongson/Kconfig index df6946d505fa..8e59753e532d 100644 --- a/drivers/gpu/drm/loongson/Kconfig +++ b/drivers/gpu/drm/loongson/Kconfig @@ -3,6 +3,7 @@ config DRM_LOONGSON tristate "DRM support for Loongson Graphics" depends on DRM && PCI && MMU + depends on LOONGARCH || MIPS || COMPILE_TEST select DRM_KMS_HELPER select DRM_TTM select I2C diff --git a/drivers/gpu/drm/loongson/lsdc_plane.c b/drivers/gpu/drm/loongson/lsdc_plane.c index 0d5094633222..d227a2c1dcf1 100644 --- a/drivers/gpu/drm/loongson/lsdc_plane.c +++ b/drivers/gpu/drm/loongson/lsdc_plane.c @@ -9,7 +9,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> -#include <drm/drm_plane_helper.h> #include "lsdc_drv.h" #include "lsdc_regs.h" diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 4252e3839fbc..2bfcb222e353 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -347,7 +347,7 @@ struct msm_gpu_perfcntr { * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some * cases, so we don't use it (no need for kernel generated jobs). */ -#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN) +#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH) /** * struct msm_file_private - per-drm_file context diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 625c1bfc4173..b483ef48216a 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -11,9 +11,10 @@ #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/io.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/pm_runtime.h> #include <drm/drm_atomic_helper.h> @@ -346,18 +347,13 @@ MODULE_DEVICE_TABLE(of, mxsfb_dt_ids); static int mxsfb_probe(struct platform_device *pdev) { struct drm_device *drm; - const struct of_device_id *of_id = - of_match_device(mxsfb_dt_ids, &pdev->dev); int ret; - if (!pdev->dev.of_node) - return -ENODEV; - drm = drm_dev_alloc(&mxsfb_driver, &pdev->dev); if (IS_ERR(drm)) return PTR_ERR(drm); - ret = mxsfb_load(drm, of_id->data); + ret = mxsfb_load(drm, device_get_match_data(&pdev->dev)); if (ret) goto err_free; diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 2edd7bb13fae..a04156ca8390 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -127,21 +127,14 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16, { struct nouveau_abi16_ntfy *ntfy, *temp; - /* When a client exits without waiting for it's queued up jobs to - * finish it might happen that we fault the channel. This is due to - * drm_file_free() calling drm_gem_release() before the postclose() - * callback. Hence, we can't tear down this scheduler entity before - * uvmm mappings are unmapped. Currently, we can't detect this case. - * - * However, this should be rare and harmless, since the channel isn't - * needed anymore. - */ - nouveau_sched_entity_fini(&chan->sched_entity); + /* Cancel all jobs from the entity's queue. */ + drm_sched_entity_fini(&chan->sched.entity); - /* wait for all activity to stop before cleaning up */ if (chan->chan) nouveau_channel_idle(chan->chan); + nouveau_sched_fini(&chan->sched); + /* cleanup notifier state */ list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) { nouveau_abi16_ntfy_fini(chan, ntfy); @@ -344,8 +337,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) if (ret) goto done; - ret = nouveau_sched_entity_init(&chan->sched_entity, &drm->sched, - drm->sched_wq); + ret = nouveau_sched_init(&chan->sched, drm, drm->sched_wq, + chan->chan->dma.ib_max); if (ret) goto done; diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h index 9f538486c10e..1f5e243c0c75 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.h +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h @@ -26,7 +26,7 @@ struct nouveau_abi16_chan { struct nouveau_bo *ntfy; struct nouveau_vma *ntfy_vma; struct nvkm_mm heap; - struct nouveau_sched_entity sched_entity; + struct nouveau_sched sched; }; struct nouveau_abi16 { diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 7afad86da64b..b7dda486a7ea 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1061,17 +1061,18 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); + struct drm_gem_object *obj = &bo->base; struct ttm_resource *old_reg = bo->resource; struct nouveau_drm_tile *new_tile = NULL; int ret = 0; - if (new_reg->mem_type == TTM_PL_TT) { ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg); if (ret) return ret; } + drm_gpuvm_bo_gem_evict(obj, evict); nouveau_bo_move_ntfy(bo, new_reg); ret = ttm_bo_wait_ctx(bo, ctx); if (ret) @@ -1136,6 +1137,7 @@ out: out_ntfy: if (ret) { nouveau_bo_move_ntfy(bo, bo->resource); + drm_gpuvm_bo_gem_evict(obj, !evict); } return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index f603eaef1560..6f6c31a9937b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -201,9 +201,9 @@ nouveau_cli_fini(struct nouveau_cli *cli) WARN_ON(!list_empty(&cli->worker)); usif_client_fini(cli); + nouveau_sched_fini(&cli->sched); if (uvmm) nouveau_uvmm_fini(uvmm); - nouveau_sched_entity_fini(&cli->sched_entity); nouveau_vmm_fini(&cli->svm); nouveau_vmm_fini(&cli->vmm); nvif_mmu_dtor(&cli->mmu); @@ -310,8 +310,17 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, cli->mem = &mems[ret]; - ret = nouveau_sched_entity_init(&cli->sched_entity, &drm->sched, - drm->sched_wq); + /* Don't pass in the (shared) sched_wq in order to let + * nouveau_sched_init() create a dedicated one for VM_BIND jobs. + * + * This is required to ensure that for VM_BIND jobs free_job() work and + * run_job() work can always run concurrently and hence, free_job() work + * can never stall run_job() work. For EXEC jobs we don't have this + * requirement, since EXEC job's free_job() does not require to take any + * locks which indirectly or directly are held for allocations + * elsewhere. + */ + ret = nouveau_sched_init(&cli->sched, drm, NULL, 1); if (ret) goto done; @@ -582,13 +591,16 @@ nouveau_drm_device_init(struct drm_device *dev) nvif_parent_ctor(&nouveau_parent, &drm->parent); drm->master.base.object.parent = &drm->parent; - ret = nouveau_sched_init(drm); - if (ret) + drm->sched_wq = alloc_workqueue("nouveau_sched_wq_shared", 0, + WQ_MAX_ACTIVE); + if (!drm->sched_wq) { + ret = -ENOMEM; goto fail_alloc; + } ret = nouveau_cli_init(drm, "DRM-master", &drm->master); if (ret) - goto fail_sched; + goto fail_wq; ret = nouveau_cli_init(drm, "DRM", &drm->client); if (ret) @@ -658,8 +670,8 @@ fail_ttm: nouveau_cli_fini(&drm->client); fail_master: nouveau_cli_fini(&drm->master); -fail_sched: - nouveau_sched_fini(drm); +fail_wq: + destroy_workqueue(drm->sched_wq); fail_alloc: nvif_parent_dtor(&drm->parent); kfree(drm); @@ -711,10 +723,9 @@ nouveau_drm_device_fini(struct drm_device *dev) } mutex_unlock(&drm->clients_lock); - nouveau_sched_fini(drm); - nouveau_cli_fini(&drm->client); nouveau_cli_fini(&drm->master); + destroy_workqueue(drm->sched_wq); nvif_parent_dtor(&drm->parent); mutex_destroy(&drm->clients_lock); kfree(drm); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 7f7051df84a6..8a6d94c8b163 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -98,7 +98,7 @@ struct nouveau_cli { bool disabled; } uvmm; - struct nouveau_sched_entity sched_entity; + struct nouveau_sched sched; const struct nvif_mclass *mem; @@ -258,6 +258,9 @@ struct nouveau_drm { u64 context_base; } *runl; + /* Workqueue used for channel schedulers. */ + struct workqueue_struct *sched_wq; + /* context for accelerated drm-internal operations */ struct nouveau_channel *cechan; struct nouveau_channel *channel; @@ -298,10 +301,6 @@ struct nouveau_drm { struct mutex lock; bool component_registered; } audio; - - struct drm_gpu_scheduler sched; - struct workqueue_struct *sched_wq; - }; static inline struct nouveau_drm * diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c index 9a5ef574744b..bc5d71b79ab2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.c +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: MIT -#include <drm/drm_exec.h> - #include "nouveau_drv.h" #include "nouveau_gem.h" #include "nouveau_mem.h" @@ -86,14 +84,12 @@ */ static int -nouveau_exec_job_submit(struct nouveau_job *job) +nouveau_exec_job_submit(struct nouveau_job *job, + struct drm_gpuvm_exec *vme) { struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job); struct nouveau_cli *cli = job->cli; struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli); - struct drm_exec *exec = &job->exec; - struct drm_gem_object *obj; - unsigned long index; int ret; /* Create a new fence, but do not emit yet. */ @@ -102,52 +98,29 @@ nouveau_exec_job_submit(struct nouveau_job *job) return ret; nouveau_uvmm_lock(uvmm); - drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT | - DRM_EXEC_IGNORE_DUPLICATES); - drm_exec_until_all_locked(exec) { - struct drm_gpuva *va; - - drm_gpuvm_for_each_va(va, &uvmm->base) { - if (unlikely(va == &uvmm->base.kernel_alloc_node)) - continue; - - ret = drm_exec_prepare_obj(exec, va->gem.obj, 1); - drm_exec_retry_on_contention(exec); - if (ret) - goto err_uvmm_unlock; - } + ret = drm_gpuvm_exec_lock(vme); + if (ret) { + nouveau_uvmm_unlock(uvmm); + return ret; } nouveau_uvmm_unlock(uvmm); - drm_exec_for_each_locked_object(exec, index, obj) { - struct nouveau_bo *nvbo = nouveau_gem_object(obj); - - ret = nouveau_bo_validate(nvbo, true, false); - if (ret) - goto err_exec_fini; + ret = drm_gpuvm_exec_validate(vme); + if (ret) { + drm_gpuvm_exec_unlock(vme); + return ret; } return 0; - -err_uvmm_unlock: - nouveau_uvmm_unlock(uvmm); -err_exec_fini: - drm_exec_fini(exec); - return ret; - } static void -nouveau_exec_job_armed_submit(struct nouveau_job *job) +nouveau_exec_job_armed_submit(struct nouveau_job *job, + struct drm_gpuvm_exec *vme) { - struct drm_exec *exec = &job->exec; - struct drm_gem_object *obj; - unsigned long index; - - drm_exec_for_each_locked_object(exec, index, obj) - dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage); - - drm_exec_fini(exec); + drm_gpuvm_exec_resv_add_fence(vme, job->done_fence, + job->resv_usage, job->resv_usage); + drm_gpuvm_exec_unlock(vme); } static struct dma_fence * @@ -192,6 +165,7 @@ nouveau_exec_job_free(struct nouveau_job *job) { struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job); + nouveau_job_done(job); nouveau_job_free(job); kfree(exec_job->fence); @@ -211,8 +185,6 @@ nouveau_exec_job_timeout(struct nouveau_job *job) NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n", chan->chid); - nouveau_sched_entity_fini(job->entity); - return DRM_GPU_SCHED_STAT_NOMINAL; } @@ -259,10 +231,12 @@ nouveau_exec_job_init(struct nouveau_exec_job **pjob, } } + args.file_priv = __args->file_priv; job->chan = __args->chan; - args.sched_entity = __args->sched_entity; - args.file_priv = __args->file_priv; + args.sched = __args->sched; + /* Plus one to account for the HW fence. */ + args.credits = job->push.count + 1; args.in_sync.count = __args->in_sync.count; args.in_sync.s = __args->in_sync.s; @@ -415,7 +389,7 @@ nouveau_exec_ioctl_exec(struct drm_device *dev, if (ret) goto out; - args.sched_entity = &chan16->sched_entity; + args.sched = &chan16->sched; args.file_priv = file_priv; args.chan = chan; diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.h b/drivers/gpu/drm/nouveau/nouveau_exec.h index 5488d337bcc0..9b3b151facfd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.h +++ b/drivers/gpu/drm/nouveau/nouveau_exec.h @@ -3,16 +3,12 @@ #ifndef __NOUVEAU_EXEC_H__ #define __NOUVEAU_EXEC_H__ -#include <drm/drm_exec.h> - #include "nouveau_drv.h" #include "nouveau_sched.h" struct nouveau_exec_job_args { struct drm_file *file_priv; - struct nouveau_sched_entity *sched_entity; - - struct drm_exec exec; + struct nouveau_sched *sched; struct nouveau_channel *chan; struct { diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index e03fd2bc8a11..dd98f6910f9c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -12,30 +12,28 @@ #include "nouveau_abi16.h" #include "nouveau_sched.h" -/* FIXME - * - * We want to make sure that jobs currently executing can't be deferred by - * other jobs competing for the hardware. Otherwise we might end up with job - * timeouts just because of too many clients submitting too many jobs. We don't - * want jobs to time out because of system load, but because of the job being - * too bulky. - * - * For now allow for up to 16 concurrent jobs in flight until we know how many - * rings the hardware can process in parallel. - */ -#define NOUVEAU_SCHED_HW_SUBMISSIONS 16 #define NOUVEAU_SCHED_JOB_TIMEOUT_MS 10000 +/* Starts at 0, since the DRM scheduler interprets those parameters as (initial) + * index to the run-queue array. + */ +enum nouveau_sched_priority { + NOUVEAU_SCHED_PRIORITY_SINGLE = DRM_SCHED_PRIORITY_KERNEL, + NOUVEAU_SCHED_PRIORITY_COUNT, +}; + int nouveau_job_init(struct nouveau_job *job, struct nouveau_job_args *args) { - struct nouveau_sched_entity *entity = args->sched_entity; + struct nouveau_sched *sched = args->sched; int ret; + INIT_LIST_HEAD(&job->entry); + job->file_priv = args->file_priv; job->cli = nouveau_cli(args->file_priv); - job->entity = entity; + job->sched = sched; job->sync = args->sync; job->resv_usage = args->resv_usage; @@ -86,10 +84,10 @@ nouveau_job_init(struct nouveau_job *job, ret = -ENOMEM; goto err_free_objs; } - } - ret = drm_sched_job_init(&job->base, &entity->base, 1, NULL); + ret = drm_sched_job_init(&job->base, &sched->entity, + args->credits, NULL); if (ret) goto err_free_chains; @@ -109,6 +107,27 @@ return ret; } void +nouveau_job_fini(struct nouveau_job *job) +{ + dma_fence_put(job->done_fence); + drm_sched_job_cleanup(&job->base); + + job->ops->free(job); +} + +void +nouveau_job_done(struct nouveau_job *job) +{ + struct nouveau_sched *sched = job->sched; + + spin_lock(&sched->job.list.lock); + list_del(&job->entry); + spin_unlock(&sched->job.list.lock); + + wake_up(&sched->job.wq); +} + +void nouveau_job_free(struct nouveau_job *job) { kfree(job->in_sync.data); @@ -117,13 +136,6 @@ nouveau_job_free(struct nouveau_job *job) kfree(job->out_sync.chains); } -void nouveau_job_fini(struct nouveau_job *job) -{ - dma_fence_put(job->done_fence); - drm_sched_job_cleanup(&job->base); - job->ops->free(job); -} - static int sync_find_fence(struct nouveau_job *job, struct drm_nouveau_sync *sync, @@ -261,8 +273,13 @@ nouveau_job_fence_attach(struct nouveau_job *job) int nouveau_job_submit(struct nouveau_job *job) { - struct nouveau_sched_entity *entity = to_nouveau_sched_entity(job->base.entity); + struct nouveau_sched *sched = job->sched; struct dma_fence *done_fence = NULL; + struct drm_gpuvm_exec vm_exec = { + .vm = &nouveau_cli_uvmm(job->cli)->base, + .flags = DRM_EXEC_IGNORE_DUPLICATES, + .num_fences = 1, + }; int ret; ret = nouveau_job_add_deps(job); @@ -276,46 +293,29 @@ nouveau_job_submit(struct nouveau_job *job) /* Make sure the job appears on the sched_entity's queue in the same * order as it was submitted. */ - mutex_lock(&entity->mutex); + mutex_lock(&sched->mutex); /* Guarantee we won't fail after the submit() callback returned * successfully. */ if (job->ops->submit) { - ret = job->ops->submit(job); + ret = job->ops->submit(job, &vm_exec); if (ret) goto err_cleanup; } + /* Submit was successful; add the job to the schedulers job list. */ + spin_lock(&sched->job.list.lock); + list_add(&job->entry, &sched->job.list.head); + spin_unlock(&sched->job.list.lock); + drm_sched_job_arm(&job->base); job->done_fence = dma_fence_get(&job->base.s_fence->finished); if (job->sync) done_fence = dma_fence_get(job->done_fence); - /* If a sched job depends on a dma-fence from a job from the same GPU - * scheduler instance, but a different scheduler entity, the GPU - * scheduler does only wait for the particular job to be scheduled, - * rather than for the job to fully complete. This is due to the GPU - * scheduler assuming that there is a scheduler instance per ring. - * However, the current implementation, in order to avoid arbitrary - * amounts of kthreads, has a single scheduler instance while scheduler - * entities represent rings. - * - * As a workaround, set the DRM_SCHED_FENCE_DONT_PIPELINE for all - * out-fences in order to force the scheduler to wait for full job - * completion for dependent jobs from different entities and same - * scheduler instance. - * - * There is some work in progress [1] to address the issues of firmware - * schedulers; once it is in-tree the scheduler topology in Nouveau - * should be re-worked accordingly. - * - * [1] https://lore.kernel.org/dri-devel/20230801205103.627779-1-matthew.brost@intel.com/ - */ - set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &job->done_fence->flags); - if (job->ops->armed_submit) - job->ops->armed_submit(job); + job->ops->armed_submit(job, &vm_exec); nouveau_job_fence_attach(job); @@ -326,7 +326,7 @@ nouveau_job_submit(struct nouveau_job *job) drm_sched_entity_push_job(&job->base); - mutex_unlock(&entity->mutex); + mutex_unlock(&sched->mutex); if (done_fence) { dma_fence_wait(done_fence, true); @@ -336,20 +336,13 @@ nouveau_job_submit(struct nouveau_job *job) return 0; err_cleanup: - mutex_unlock(&entity->mutex); + mutex_unlock(&sched->mutex); nouveau_job_fence_attach_cleanup(job); err: job->state = NOUVEAU_JOB_SUBMIT_FAILED; return ret; } -bool -nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity, - struct work_struct *work) -{ - return queue_work(entity->sched_wq, work); -} - static struct dma_fence * nouveau_job_run(struct nouveau_job *job) { @@ -399,50 +392,82 @@ nouveau_sched_free_job(struct drm_sched_job *sched_job) nouveau_job_fini(job); } -int nouveau_sched_entity_init(struct nouveau_sched_entity *entity, - struct drm_gpu_scheduler *sched, - struct workqueue_struct *sched_wq) -{ - mutex_init(&entity->mutex); - spin_lock_init(&entity->job.list.lock); - INIT_LIST_HEAD(&entity->job.list.head); - init_waitqueue_head(&entity->job.wq); - - entity->sched_wq = sched_wq; - return drm_sched_entity_init(&entity->base, - DRM_SCHED_PRIORITY_NORMAL, - &sched, 1, NULL); -} - -void -nouveau_sched_entity_fini(struct nouveau_sched_entity *entity) -{ - drm_sched_entity_destroy(&entity->base); -} - static const struct drm_sched_backend_ops nouveau_sched_ops = { .run_job = nouveau_sched_run_job, .timedout_job = nouveau_sched_timedout_job, .free_job = nouveau_sched_free_job, }; -int nouveau_sched_init(struct nouveau_drm *drm) +int +nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm, + struct workqueue_struct *wq, u32 credit_limit) { - struct drm_gpu_scheduler *sched = &drm->sched; + struct drm_gpu_scheduler *drm_sched = &sched->base; + struct drm_sched_entity *entity = &sched->entity; long job_hang_limit = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS); + int ret; - drm->sched_wq = create_singlethread_workqueue("nouveau_sched_wq"); - if (!drm->sched_wq) - return -ENOMEM; + if (!wq) { + wq = alloc_workqueue("nouveau_sched_wq_%d", 0, WQ_MAX_ACTIVE, + current->pid); + if (!wq) + return -ENOMEM; + + sched->wq = wq; + } - return drm_sched_init(sched, &nouveau_sched_ops, NULL, - DRM_SCHED_PRIORITY_COUNT, - NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit, - NULL, NULL, "nouveau_sched", drm->dev->dev); + ret = drm_sched_init(drm_sched, &nouveau_sched_ops, wq, + NOUVEAU_SCHED_PRIORITY_COUNT, + credit_limit, 0, job_hang_limit, + NULL, NULL, "nouveau_sched", drm->dev->dev); + if (ret) + goto fail_wq; + + /* Using DRM_SCHED_PRIORITY_KERNEL, since that's what we're required to use + * when we want to have a single run-queue only. + * + * It's not documented, but one will find out when trying to use any + * other priority running into faults, because the scheduler uses the + * priority as array index. + * + * Can't use NOUVEAU_SCHED_PRIORITY_SINGLE either, because it's not + * matching the enum type used in drm_sched_entity_init(). + */ + ret = drm_sched_entity_init(entity, DRM_SCHED_PRIORITY_KERNEL, + &drm_sched, 1, NULL); + if (ret) + goto fail_sched; + + mutex_init(&sched->mutex); + spin_lock_init(&sched->job.list.lock); + INIT_LIST_HEAD(&sched->job.list.head); + init_waitqueue_head(&sched->job.wq); + + return 0; + +fail_sched: + drm_sched_fini(drm_sched); +fail_wq: + if (sched->wq) + destroy_workqueue(sched->wq); + return ret; } -void nouveau_sched_fini(struct nouveau_drm *drm) +void +nouveau_sched_fini(struct nouveau_sched *sched) { - destroy_workqueue(drm->sched_wq); - drm_sched_fini(&drm->sched); + struct drm_gpu_scheduler *drm_sched = &sched->base; + struct drm_sched_entity *entity = &sched->entity; + + rmb(); /* for list_empty to work without lock */ + wait_event(sched->job.wq, list_empty(&sched->job.list.head)); + + drm_sched_entity_fini(entity); + drm_sched_fini(drm_sched); + + /* Destroy workqueue after scheduler tear down, otherwise it might still + * be in use. + */ + if (sched->wq) + destroy_workqueue(sched->wq); } diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h index 27ac19792597..a6528f5981e6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.h +++ b/drivers/gpu/drm/nouveau/nouveau_sched.h @@ -5,7 +5,7 @@ #include <linux/types.h> -#include <drm/drm_exec.h> +#include <drm/drm_gpuvm.h> #include <drm/gpu_scheduler.h> #include "nouveau_drv.h" @@ -26,7 +26,8 @@ enum nouveau_job_state { struct nouveau_job_args { struct drm_file *file_priv; - struct nouveau_sched_entity *sched_entity; + struct nouveau_sched *sched; + u32 credits; enum dma_resv_usage resv_usage; bool sync; @@ -49,12 +50,12 @@ struct nouveau_job { enum nouveau_job_state state; - struct nouveau_sched_entity *entity; + struct nouveau_sched *sched; + struct list_head entry; struct drm_file *file_priv; struct nouveau_cli *cli; - struct drm_exec exec; enum dma_resv_usage resv_usage; struct dma_fence *done_fence; @@ -76,8 +77,8 @@ struct nouveau_job { /* If .submit() returns without any error, it is guaranteed that * armed_submit() is called. */ - int (*submit)(struct nouveau_job *); - void (*armed_submit)(struct nouveau_job *); + int (*submit)(struct nouveau_job *, struct drm_gpuvm_exec *); + void (*armed_submit)(struct nouveau_job *, struct drm_gpuvm_exec *); struct dma_fence *(*run)(struct nouveau_job *); void (*free)(struct nouveau_job *); enum drm_gpu_sched_stat (*timeout)(struct nouveau_job *); @@ -90,20 +91,17 @@ int nouveau_job_ucopy_syncs(struct nouveau_job_args *args, int nouveau_job_init(struct nouveau_job *job, struct nouveau_job_args *args); -void nouveau_job_free(struct nouveau_job *job); - -int nouveau_job_submit(struct nouveau_job *job); void nouveau_job_fini(struct nouveau_job *job); +int nouveau_job_submit(struct nouveau_job *job); +void nouveau_job_done(struct nouveau_job *job); +void nouveau_job_free(struct nouveau_job *job); -#define to_nouveau_sched_entity(entity) \ - container_of((entity), struct nouveau_sched_entity, base) - -struct nouveau_sched_entity { - struct drm_sched_entity base; +struct nouveau_sched { + struct drm_gpu_scheduler base; + struct drm_sched_entity entity; + struct workqueue_struct *wq; struct mutex mutex; - struct workqueue_struct *sched_wq; - struct { struct { struct list_head head; @@ -113,15 +111,8 @@ struct nouveau_sched_entity { } job; }; -int nouveau_sched_entity_init(struct nouveau_sched_entity *entity, - struct drm_gpu_scheduler *sched, - struct workqueue_struct *sched_wq); -void nouveau_sched_entity_fini(struct nouveau_sched_entity *entity); - -bool nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity, - struct work_struct *work); - -int nouveau_sched_init(struct nouveau_drm *drm); -void nouveau_sched_fini(struct nouveau_drm *drm); +int nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm, + struct workqueue_struct *wq, u32 credit_limit); +void nouveau_sched_fini(struct nouveau_sched *sched); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index eda7bb8624f1..dae3baf707a0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -438,8 +438,9 @@ nouveau_uvma_region_complete(struct nouveau_uvma_region *reg) static void op_map_prepare_unwind(struct nouveau_uvma *uvma) { + struct drm_gpuva *va = &uvma->va; nouveau_uvma_gem_put(uvma); - drm_gpuva_remove(&uvma->va); + drm_gpuva_remove(va); nouveau_uvma_free(uvma); } @@ -468,6 +469,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm, break; case DRM_GPUVA_OP_REMAP: { struct drm_gpuva_op_remap *r = &op->remap; + struct drm_gpuva *va = r->unmap->va; if (r->next) op_map_prepare_unwind(new->next); @@ -475,7 +477,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm, if (r->prev) op_map_prepare_unwind(new->prev); - op_unmap_prepare_unwind(r->unmap->va); + op_unmap_prepare_unwind(va); break; } case DRM_GPUVA_OP_UNMAP: @@ -606,6 +608,9 @@ op_unmap_prepare(struct drm_gpuva_op_unmap *u) drm_gpuva_unmap(u); } +/* + * Note: @args should not be NULL when calling for a map operation. + */ static int nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm, struct nouveau_uvma_prealloc *new, @@ -626,7 +631,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm, if (ret) goto unwind; - if (args && vmm_get_range) { + if (vmm_get_range) { ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start, vmm_get_range); if (ret) { @@ -634,6 +639,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm, goto unwind; } } + break; } case DRM_GPUVA_OP_REMAP: { @@ -960,6 +966,12 @@ nouveau_uvmm_bind_job_free(struct kref *kref) { struct nouveau_uvmm_bind_job *job = container_of(kref, struct nouveau_uvmm_bind_job, kref); + struct bind_job_op *op, *next; + + list_for_each_op_safe(op, next, &job->ops) { + list_del(&op->entry); + kfree(op); + } nouveau_job_free(&job->base); kfree(job); @@ -1001,14 +1013,16 @@ bind_validate_op(struct nouveau_job *job, static void bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range) { - struct nouveau_uvmm_bind_job *bind_job; - struct nouveau_sched_entity *entity = job->entity; + struct nouveau_sched *sched = job->sched; + struct nouveau_job *__job; struct bind_job_op *op; u64 end = addr + range; again: - spin_lock(&entity->job.list.lock); - list_for_each_entry(bind_job, &entity->job.list.head, entry) { + spin_lock(&sched->job.list.lock); + list_for_each_entry(__job, &sched->job.list.head, entry) { + struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(__job); + list_for_each_op(op, &bind_job->ops) { if (op->op == OP_UNMAP) { u64 op_addr = op->va.addr; @@ -1016,7 +1030,7 @@ again: if (!(end <= op_addr || addr >= op_end)) { nouveau_uvmm_bind_job_get(bind_job); - spin_unlock(&entity->job.list.lock); + spin_unlock(&sched->job.list.lock); wait_for_completion(&bind_job->complete); nouveau_uvmm_bind_job_put(bind_job); goto again; @@ -1024,7 +1038,7 @@ again: } } } - spin_unlock(&entity->job.list.lock); + spin_unlock(&sched->job.list.lock); } static int @@ -1135,12 +1149,52 @@ bind_link_gpuvas(struct bind_job_op *bop) } static int -nouveau_uvmm_bind_job_submit(struct nouveau_job *job) +bind_lock_validate(struct nouveau_job *job, struct drm_exec *exec, + unsigned int num_fences) +{ + struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job); + struct bind_job_op *op; + int ret; + + list_for_each_op(op, &bind_job->ops) { + struct drm_gpuva_op *va_op; + + if (!op->ops) + continue; + + drm_gpuva_for_each_op(va_op, op->ops) { + struct drm_gem_object *obj = op_gem_obj(va_op); + + if (unlikely(!obj)) + continue; + + ret = drm_exec_prepare_obj(exec, obj, num_fences); + if (ret) + return ret; + + /* Don't validate GEMs backing mappings we're about to + * unmap, it's not worth the effort. + */ + if (va_op->op == DRM_GPUVA_OP_UNMAP) + continue; + + ret = nouveau_bo_validate(nouveau_gem_object(obj), + true, false); + if (ret) + return ret; + } + } + + return 0; +} + +static int +nouveau_uvmm_bind_job_submit(struct nouveau_job *job, + struct drm_gpuvm_exec *vme) { struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli); struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job); - struct nouveau_sched_entity *entity = job->entity; - struct drm_exec *exec = &job->exec; + struct drm_exec *exec = &vme->exec; struct bind_job_op *op; int ret; @@ -1157,6 +1211,8 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) dma_resv_unlock(obj->resv); if (IS_ERR(op->vm_bo)) return PTR_ERR(op->vm_bo); + + drm_gpuvm_bo_extobj_add(op->vm_bo); } ret = bind_validate_op(job, op); @@ -1179,6 +1235,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) * unwind all GPU VA space changes on failure. */ nouveau_uvmm_lock(uvmm); + list_for_each_op(op, &bind_job->ops) { switch (op->op) { case OP_MAP_SPARSE: @@ -1290,55 +1347,13 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) } } - drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT | - DRM_EXEC_IGNORE_DUPLICATES); + drm_exec_init(exec, vme->flags); drm_exec_until_all_locked(exec) { - list_for_each_op(op, &bind_job->ops) { - struct drm_gpuva_op *va_op; - - if (IS_ERR_OR_NULL(op->ops)) - continue; - - drm_gpuva_for_each_op(va_op, op->ops) { - struct drm_gem_object *obj = op_gem_obj(va_op); - - if (unlikely(!obj)) - continue; - - ret = drm_exec_prepare_obj(exec, obj, 1); - drm_exec_retry_on_contention(exec); - if (ret) { - op = list_last_op(&bind_job->ops); - goto unwind; - } - } - } - } - - list_for_each_op(op, &bind_job->ops) { - struct drm_gpuva_op *va_op; - - if (IS_ERR_OR_NULL(op->ops)) - continue; - - drm_gpuva_for_each_op(va_op, op->ops) { - struct drm_gem_object *obj = op_gem_obj(va_op); - - if (unlikely(!obj)) - continue; - - /* Don't validate GEMs backing mappings we're about to - * unmap, it's not worth the effort. - */ - if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP)) - continue; - - ret = nouveau_bo_validate(nouveau_gem_object(obj), - true, false); - if (ret) { - op = list_last_op(&bind_job->ops); - goto unwind; - } + ret = bind_lock_validate(job, exec, vme->num_fences); + drm_exec_retry_on_contention(exec); + if (ret) { + op = list_last_op(&bind_job->ops); + goto unwind; } } @@ -1375,10 +1390,6 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) } nouveau_uvmm_unlock(uvmm); - spin_lock(&entity->job.list.lock); - list_add(&bind_job->entry, &entity->job.list.head); - spin_unlock(&entity->job.list.lock); - return 0; unwind_continue: @@ -1413,21 +1424,17 @@ unwind: } nouveau_uvmm_unlock(uvmm); - drm_exec_fini(exec); + drm_gpuvm_exec_unlock(vme); return ret; } static void -nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job) +nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job, + struct drm_gpuvm_exec *vme) { - struct drm_exec *exec = &job->exec; - struct drm_gem_object *obj; - unsigned long index; - - drm_exec_for_each_locked_object(exec, index, obj) - dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage); - - drm_exec_fini(exec); + drm_gpuvm_exec_resv_add_fence(vme, job->done_fence, + job->resv_usage, job->resv_usage); + drm_gpuvm_exec_unlock(vme); } static struct dma_fence * @@ -1465,14 +1472,11 @@ out: } static void -nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work) +nouveau_uvmm_bind_job_cleanup(struct nouveau_job *job) { - struct nouveau_uvmm_bind_job *bind_job = - container_of(work, struct nouveau_uvmm_bind_job, work); - struct nouveau_job *job = &bind_job->base; + struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job); struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli); - struct nouveau_sched_entity *entity = job->entity; - struct bind_job_op *op, *next; + struct bind_job_op *op; list_for_each_op(op, &bind_job->ops) { struct drm_gem_object *obj = op->gem.obj; @@ -1524,38 +1528,17 @@ nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work) drm_gem_object_put(obj); } - spin_lock(&entity->job.list.lock); - list_del(&bind_job->entry); - spin_unlock(&entity->job.list.lock); - + nouveau_job_done(job); complete_all(&bind_job->complete); - wake_up(&entity->job.wq); - - /* Remove and free ops after removing the bind job from the job list to - * avoid races against bind_validate_map_sparse(). - */ - list_for_each_op_safe(op, next, &bind_job->ops) { - list_del(&op->entry); - kfree(op); - } nouveau_uvmm_bind_job_put(bind_job); } -static void -nouveau_uvmm_bind_job_free_qwork(struct nouveau_job *job) -{ - struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job); - struct nouveau_sched_entity *entity = job->entity; - - nouveau_sched_entity_qwork(entity, &bind_job->work); -} - static struct nouveau_job_ops nouveau_bind_job_ops = { .submit = nouveau_uvmm_bind_job_submit, .armed_submit = nouveau_uvmm_bind_job_armed_submit, .run = nouveau_uvmm_bind_job_run, - .free = nouveau_uvmm_bind_job_free_qwork, + .free = nouveau_uvmm_bind_job_cleanup, }; static int @@ -1616,7 +1599,6 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob, return ret; INIT_LIST_HEAD(&job->ops); - INIT_LIST_HEAD(&job->entry); for (i = 0; i < __args->op.count; i++) { ret = bind_job_op_from_uop(&op, &__args->op.s[i]); @@ -1627,11 +1609,12 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob, } init_completion(&job->complete); - INIT_WORK(&job->work, nouveau_uvmm_bind_job_free_work_fn); - args.sched_entity = __args->sched_entity; args.file_priv = __args->file_priv; + args.sched = __args->sched; + args.credits = 1; + args.in_sync.count = __args->in_sync.count; args.in_sync.s = __args->in_sync.s; @@ -1757,7 +1740,7 @@ nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev, if (ret) return ret; - args.sched_entity = &cli->sched_entity; + args.sched = &cli->sched; args.file_priv = file_priv; ret = nouveau_uvmm_vm_bind(&args); @@ -1815,8 +1798,17 @@ nouveau_uvmm_free(struct drm_gpuvm *gpuvm) kfree(uvmm); } +static int +nouveau_uvmm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec) +{ + struct nouveau_bo *nvbo = nouveau_gem_object(vm_bo->obj); + + return nouveau_bo_validate(nvbo, true, false); +} + static const struct drm_gpuvm_ops gpuvm_ops = { .vm_free = nouveau_uvmm_free, + .vm_bo_validate = nouveau_uvmm_bo_validate, }; int @@ -1900,12 +1892,8 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm) MA_STATE(mas, &uvmm->region_mt, 0, 0); struct nouveau_uvma_region *reg; struct nouveau_cli *cli = uvmm->vmm.cli; - struct nouveau_sched_entity *entity = &cli->sched_entity; struct drm_gpuva *va, *next; - rmb(); /* for list_empty to work without lock */ - wait_event(entity->job.wq, list_empty(&entity->job.list.head)); - nouveau_uvmm_lock(uvmm); drm_gpuvm_for_each_va_safe(va, next, &uvmm->base) { struct nouveau_uvma *uvma = uvma_from_va(va); diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.h b/drivers/gpu/drm/nouveau/nouveau_uvmm.h index f0a6d98ace4f..9d3c348581eb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.h +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.h @@ -44,8 +44,6 @@ struct nouveau_uvmm_bind_job { struct nouveau_job base; struct kref kref; - struct list_head entry; - struct work_struct work; struct completion complete; /* struct bind_job_op */ @@ -54,7 +52,7 @@ struct nouveau_uvmm_bind_job { struct nouveau_uvmm_bind_job_args { struct drm_file *file_priv; - struct nouveau_sched_entity *sched_entity; + struct nouveau_sched *sched; unsigned int flags; diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index c26aab4939fa..993691b3cc7e 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -22,11 +22,11 @@ #include <linux/hardirq.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/property.h> #include <linux/sizes.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/component.h> #include <linux/sys_soc.h> #include <drm/drm_fourcc.h> @@ -4765,7 +4765,7 @@ static int dispc_bind(struct device *dev, struct device *master, void *data) if (soc) dispc->feat = soc->data; else - dispc->feat = of_match_device(dispc_of_match, &pdev->dev)->data; + dispc->feat = device_get_match_data(&pdev->dev); r = dispc_errata_i734_wa_init(dispc); if (r) diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 02955f976845..988888e164d7 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -22,12 +22,13 @@ #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/property.h> #include <linux/gfp.h> #include <linux/sizes.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/of_platform.h> #include <linux/of_graph.h> #include <linux/regulator/consumer.h> #include <linux/suspend.h> @@ -1445,7 +1446,7 @@ static int dss_probe(struct platform_device *pdev) if (soc) dss->feat = soc->data; else - dss->feat = of_match_device(dss_of_match, &pdev->dev)->data; + dss->feat = device_get_match_data(&pdev->dev); /* Map I/O registers, get and setup clocks. */ dss->base = devm_platform_ioremap_resource(pdev, 0); diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index c48fa531ca32..3421e8389222 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -48,7 +48,7 @@ struct omap_gem_object { * OMAP_BO_MEM_DMA_API flag set) * * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set) - * if they are physically contiguous (when sgt->orig_nents == 1) + * if they are physically contiguous * * - buffers mapped through the TILER when pin_cnt is not zero, in which * case the DMA address points to the TILER aperture @@ -148,12 +148,18 @@ u64 omap_gem_mmap_offset(struct drm_gem_object *obj) return drm_vma_node_offset_addr(&obj->vma_node); } +static bool omap_gem_sgt_is_contiguous(struct sg_table *sgt, size_t size) +{ + return !(drm_prime_get_contiguous_size(sgt) < size); +} + static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj) { if (omap_obj->flags & OMAP_BO_MEM_DMA_API) return true; - if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1) + if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && + omap_gem_sgt_is_contiguous(omap_obj->sgt, omap_obj->base.size)) return true; return false; @@ -1385,7 +1391,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, union omap_gem_size gsize; /* Without a DMM only physically contiguous buffers can be supported. */ - if (sgt->orig_nents != 1 && !priv->has_dmm) + if (!omap_gem_sgt_is_contiguous(sgt, size) && !priv->has_dmm) return ERR_PTR(-EINVAL); gsize.bytes = PAGE_ALIGN(size); @@ -1399,7 +1405,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, omap_obj->sgt = sgt; - if (sgt->orig_nents == 1) { + if (omap_gem_sgt_is_contiguous(sgt, size)) { omap_obj->dma_addr = sg_dma_address(sgt->sgl); } else { /* Create pages list from sgt */ diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index f22677373171..c76f186c4baa 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -203,6 +203,9 @@ struct edp_panel_entry { /** @name: Name of this panel (for printing to logs). */ const char *name; + + /** @override_edid_mode: Override the mode obtained by edid. */ + const struct drm_display_mode *override_edid_mode; }; struct panel_edp { @@ -301,6 +304,24 @@ static unsigned int panel_edp_get_display_modes(struct panel_edp *panel, return num; } +static int panel_edp_override_edid_mode(struct panel_edp *panel, + struct drm_connector *connector, + const struct drm_display_mode *override_mode) +{ + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, override_mode); + if (!mode) { + dev_err(panel->base.dev, "failed to add additional mode\n"); + return 0; + } + + mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + drm_mode_set_name(mode); + drm_mode_probed_add(connector, mode); + return 1; +} + static int panel_edp_get_non_edid_modes(struct panel_edp *panel, struct drm_connector *connector) { @@ -568,6 +589,10 @@ static int panel_edp_get_modes(struct drm_panel *panel, { struct panel_edp *p = to_panel_edp(panel); int num = 0; + bool has_hard_coded_modes = p->desc->num_timings || p->desc->num_modes; + bool has_override_edid_mode = p->detected_panel && + p->detected_panel != ERR_PTR(-EINVAL) && + p->detected_panel->override_edid_mode; /* probe EDID if a DDC bus is available */ if (p->ddc) { @@ -575,20 +600,28 @@ static int panel_edp_get_modes(struct drm_panel *panel, if (!p->edid) p->edid = drm_get_edid(connector, p->ddc); - - if (p->edid) - num += drm_add_edid_modes(connector, p->edid); + /* + * If both edid and hard-coded modes exists, skip edid modes to + * avoid multiple preferred modes. + */ + if (p->edid && !has_hard_coded_modes) { + if (has_override_edid_mode) { + /* + * override_edid_mode is specified. Use + * override_edid_mode instead of from edid. + */ + num += panel_edp_override_edid_mode(p, connector, + p->detected_panel->override_edid_mode); + } else { + num += drm_add_edid_modes(connector, p->edid); + } + } pm_runtime_mark_last_busy(panel->dev); pm_runtime_put_autosuspend(panel->dev); } - /* - * Add hard-coded panel modes. Don't call this if there are no timings - * and no modes (the generic edp-panel case) because it will clobber - * the display_info that was already set by drm_add_edid_modes(). - */ - if (p->desc->num_timings || p->desc->num_modes) + if (has_hard_coded_modes) num += panel_edp_get_non_edid_modes(p, connector); else if (!num) dev_warn(p->base.dev, "No display modes\n"); @@ -950,6 +983,19 @@ static const struct panel_desc auo_b101ean01 = { }, }; +static const struct drm_display_mode auo_b116xa3_mode = { + .clock = 70589, + .hdisplay = 1366, + .hsync_start = 1366 + 40, + .hsync_end = 1366 + 40 + 40, + .htotal = 1366 + 40 + 40 + 32, + .vdisplay = 768, + .vsync_start = 768 + 10, + .vsync_end = 768 + 10 + 12, + .vtotal = 768 + 10 + 12 + 6, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, +}; + static const struct drm_display_mode auo_b116xak01_mode = { .clock = 69300, .hdisplay = 1366, @@ -1849,6 +1895,15 @@ static const struct panel_delay delay_200_150_e200 = { .delay = _delay \ } +#define EDP_PANEL_ENTRY2(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name, _mode) \ +{ \ + .name = _name, \ + .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \ + product_id), \ + .delay = _delay, \ + .override_edid_mode = _mode \ +} + /* * This table is used to figure out power sequencing delays for panels that * are detected by EDID. Entries here may point to entries in the @@ -1866,9 +1921,11 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x255c, &delay_200_500_e50, "B116XTN02.5"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x403d, &delay_200_500_e50, "B140HAN04.0"), - EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"), + EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0", + &auo_b116xa3_mode), EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"), - EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"), + EDP_PANEL_ENTRY2('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1", + &auo_b116xa3_mode), EDP_PANEL_ENTRY('A', 'U', 'O', 0x635c, &delay_200_500_e50, "B116XAN06.3"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x639c, &delay_200_500_e50, "B140HAK02.7"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"), @@ -1910,9 +1967,9 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"), - EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"), EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5c, &delay_200_500_e200, "MB116AN01-2"), @@ -1926,6 +1983,8 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"), EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"), + EDP_PANEL_ENTRY('S', 'D', 'C', 0x416d, &delay_100_500_e200, "ATNA45AF01"), + EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &sharp_lq140m1jw46.delay, "LQ140M1JW46"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"), diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c index c73243d85de7..ff0dc08b9829 100644 --- a/drivers/gpu/drm/panel/panel-himax-hx8394.c +++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c @@ -38,6 +38,7 @@ #define HX8394_CMD_SETMIPI 0xba #define HX8394_CMD_SETOTP 0xbb #define HX8394_CMD_SETREGBANK 0xbd +#define HX8394_CMD_UNKNOWN5 0xbf #define HX8394_CMD_UNKNOWN1 0xc0 #define HX8394_CMD_SETDGCLUT 0xc1 #define HX8394_CMD_SETID 0xc3 @@ -52,6 +53,7 @@ #define HX8394_CMD_SETGIP1 0xd5 #define HX8394_CMD_SETGIP2 0xd6 #define HX8394_CMD_SETGPO 0xd6 +#define HX8394_CMD_UNKNOWN4 0xd8 #define HX8394_CMD_SETSCALING 0xdd #define HX8394_CMD_SETIDLE 0xdf #define HX8394_CMD_SETGAMMA 0xe0 @@ -68,7 +70,7 @@ struct hx8394 { struct gpio_desc *reset_gpio; struct regulator *vcc; struct regulator *iovcc; - bool prepared; + enum drm_panel_orientation orientation; const struct hx8394_panel_desc *desc; }; @@ -203,6 +205,140 @@ static const struct hx8394_panel_desc hsd060bhw4_desc = { .init_sequence = hsd060bhw4_init_sequence, }; +static int powkiddy_x55_init_sequence(struct hx8394 *ctx) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); + + /* 5.19.8 SETEXTC: Set extension command (B9h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC, + 0xff, 0x83, 0x94); + + /* 5.19.9 SETMIPI: Set MIPI control (BAh) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI, + 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0); + + /* 5.19.2 SETPOWER: Set power (B1h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER, + 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, 0x71, 0x71, 0x57, 0x47); + + /* 5.19.3 SETDISP: Set display related register (B2h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP, + 0x00, 0x80, 0x64, 0x2c, 0x16, 0x2f); + + /* 5.19.4 SETCYC: Set display waveform cycles (B4h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC, + 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, 0x86, 0x75, + 0x00, 0x3f, 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, + 0x86); + + /* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM, + 0x6e, 0x6e); + + /* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0, + 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, 0x0c, 0x00, 0x08, 0x10, + 0x08, 0x00, 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, 0x02, 0x15, + 0x06, 0x05, 0x06, 0x47, 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07, + 0x07, 0x0c, 0x40); + + /* 5.19.20 Set GIP Option1 (D5h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1, + 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, 0x18, 0x18, + 0x26, 0x27, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x20, 0x21, + 0x18, 0x18, 0x18, 0x18); + + /* 5.19.21 Set GIP Option2 (D6h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2, + 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, + 0x01, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, 0x18, 0x18, + 0x27, 0x26, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x25, 0x24, + 0x18, 0x18, 0x18, 0x18); + + /* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA, + 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, + 0x65, 0x66, 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, 0x98, 0xa8, + 0xb9, 0x5d, 0x5c, 0x61, 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00, + 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, 0x65, + 0x65, 0x6e, 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, 0xa8, 0xba, + 0x5d, 0x5d, 0x62, 0x67, 0x6b, 0x72, 0x7f, 0x7f); + + /* Unknown command, not listed in the HX8394-F datasheet */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1, + 0x1f, 0x31); + + /* 5.19.17 SETPANEL (CCh) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL, + 0x0b); + + /* Unknown command, not listed in the HX8394-F datasheet */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3, + 0x02); + + /* 5.19.11 Set register bank (BDh) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, + 0x02); + + /* Unknown command, not listed in the HX8394-F datasheet */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff); + + /* 5.19.11 Set register bank (BDh) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, + 0x00); + + /* 5.19.11 Set register bank (BDh) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, + 0x01); + + /* 5.19.2 SETPOWER: Set power (B1h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER, + 0x00); + + /* 5.19.11 Set register bank (BDh) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, + 0x00); + + /* Unknown command, not listed in the HX8394-F datasheet */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN5, + 0x40, 0x81, 0x50, 0x00, 0x1a, 0xfc, 0x01); + + /* Unknown command, not listed in the HX8394-F datasheet */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2, + 0xed); + + return 0; +} + +static const struct drm_display_mode powkiddy_x55_mode = { + .hdisplay = 720, + .hsync_start = 720 + 44, + .hsync_end = 720 + 44 + 20, + .htotal = 720 + 44 + 20 + 20, + .vdisplay = 1280, + .vsync_start = 1280 + 12, + .vsync_end = 1280 + 12 + 10, + .vtotal = 1280 + 12 + 10 + 10, + .clock = 63290, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, + .width_mm = 67, + .height_mm = 121, +}; + +static const struct hx8394_panel_desc powkiddy_x55_desc = { + .mode = &powkiddy_x55_mode, + .lanes = 4, + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET, + .format = MIPI_DSI_FMT_RGB888, + .init_sequence = powkiddy_x55_init_sequence, +}; + static int hx8394_enable(struct drm_panel *panel) { struct hx8394 *ctx = panel_to_hx8394(panel); @@ -262,16 +398,11 @@ static int hx8394_unprepare(struct drm_panel *panel) { struct hx8394 *ctx = panel_to_hx8394(panel); - if (!ctx->prepared) - return 0; - gpiod_set_value_cansleep(ctx->reset_gpio, 1); regulator_disable(ctx->iovcc); regulator_disable(ctx->vcc); - ctx->prepared = false; - return 0; } @@ -280,9 +411,6 @@ static int hx8394_prepare(struct drm_panel *panel) struct hx8394 *ctx = panel_to_hx8394(panel); int ret; - if (ctx->prepared) - return 0; - gpiod_set_value_cansleep(ctx->reset_gpio, 1); ret = regulator_enable(ctx->vcc); @@ -301,8 +429,6 @@ static int hx8394_prepare(struct drm_panel *panel) msleep(180); - ctx->prepared = true; - return 0; disable_vcc: @@ -335,12 +461,20 @@ static int hx8394_get_modes(struct drm_panel *panel, return 1; } +static enum drm_panel_orientation hx8394_get_orientation(struct drm_panel *panel) +{ + struct hx8394 *ctx = panel_to_hx8394(panel); + + return ctx->orientation; +} + static const struct drm_panel_funcs hx8394_drm_funcs = { .disable = hx8394_disable, .unprepare = hx8394_unprepare, .prepare = hx8394_prepare, .enable = hx8394_enable, .get_modes = hx8394_get_modes, + .get_orientation = hx8394_get_orientation, }; static int hx8394_probe(struct mipi_dsi_device *dsi) @@ -358,6 +492,12 @@ static int hx8394_probe(struct mipi_dsi_device *dsi) return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "Failed to get reset gpio\n"); + ret = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation); + if (ret < 0) { + dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, ret); + return ret; + } + mipi_dsi_set_drvdata(dsi, ctx); ctx->dev = dev; @@ -401,27 +541,11 @@ static int hx8394_probe(struct mipi_dsi_device *dsi) return 0; } -static void hx8394_shutdown(struct mipi_dsi_device *dsi) -{ - struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi); - int ret; - - ret = drm_panel_disable(&ctx->panel); - if (ret < 0) - dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret); - - ret = drm_panel_unprepare(&ctx->panel); - if (ret < 0) - dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret); -} - static void hx8394_remove(struct mipi_dsi_device *dsi) { struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi); int ret; - hx8394_shutdown(dsi); - ret = mipi_dsi_detach(dsi); if (ret < 0) dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); @@ -431,6 +555,7 @@ static void hx8394_remove(struct mipi_dsi_device *dsi) static const struct of_device_id hx8394_of_match[] = { { .compatible = "hannstar,hsd060bhw4", .data = &hsd060bhw4_desc }, + { .compatible = "powkiddy,x55-panel", .data = &powkiddy_x55_desc }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, hx8394_of_match); @@ -438,7 +563,6 @@ MODULE_DEVICE_TABLE(of, hx8394_of_match); static struct mipi_dsi_driver hx8394_driver = { .probe = hx8394_probe, .remove = hx8394_remove, - .shutdown = hx8394_shutdown, .driver = { .name = DRV_NAME, .of_match_table = hx8394_of_match, diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c index 7838947a1bf3..2ffe5f68a890 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c @@ -830,6 +830,203 @@ static const struct ili9881c_instr w552946ab_init[] = { ILI9881C_SWITCH_PAGE_INSTR(0), }; +static const struct ili9881c_instr am8001280g_init[] = { + ILI9881C_SWITCH_PAGE_INSTR(3), + ILI9881C_COMMAND_INSTR(0x01, 0x00), + ILI9881C_COMMAND_INSTR(0x02, 0x00), + ILI9881C_COMMAND_INSTR(0x03, 0x73), + ILI9881C_COMMAND_INSTR(0x04, 0xD3), + ILI9881C_COMMAND_INSTR(0x05, 0x00), + ILI9881C_COMMAND_INSTR(0x06, 0x0A), + ILI9881C_COMMAND_INSTR(0x07, 0x0E), + ILI9881C_COMMAND_INSTR(0x08, 0x00), + ILI9881C_COMMAND_INSTR(0x09, 0x01), + ILI9881C_COMMAND_INSTR(0x0a, 0x01), + ILI9881C_COMMAND_INSTR(0x0b, 0x01), + ILI9881C_COMMAND_INSTR(0x0c, 0x01), + ILI9881C_COMMAND_INSTR(0x0d, 0x01), + ILI9881C_COMMAND_INSTR(0x0e, 0x01), + ILI9881C_COMMAND_INSTR(0x0f, 0x01), + ILI9881C_COMMAND_INSTR(0x10, 0x01), + ILI9881C_COMMAND_INSTR(0x11, 0x00), + ILI9881C_COMMAND_INSTR(0x12, 0x00), + ILI9881C_COMMAND_INSTR(0x13, 0x00), + ILI9881C_COMMAND_INSTR(0x14, 0x00), + ILI9881C_COMMAND_INSTR(0x15, 0x00), + ILI9881C_COMMAND_INSTR(0x16, 0x00), + ILI9881C_COMMAND_INSTR(0x17, 0x00), + ILI9881C_COMMAND_INSTR(0x18, 0x00), + ILI9881C_COMMAND_INSTR(0x19, 0x00), + ILI9881C_COMMAND_INSTR(0x1a, 0x00), + ILI9881C_COMMAND_INSTR(0x1b, 0x00), + ILI9881C_COMMAND_INSTR(0x1c, 0x00), + ILI9881C_COMMAND_INSTR(0x1d, 0x00), + ILI9881C_COMMAND_INSTR(0x1e, 0x40), + ILI9881C_COMMAND_INSTR(0x1f, 0x80), + ILI9881C_COMMAND_INSTR(0x20, 0x06), + ILI9881C_COMMAND_INSTR(0x21, 0x01), + ILI9881C_COMMAND_INSTR(0x22, 0x00), + ILI9881C_COMMAND_INSTR(0x23, 0x00), + ILI9881C_COMMAND_INSTR(0x24, 0x00), + ILI9881C_COMMAND_INSTR(0x25, 0x00), + ILI9881C_COMMAND_INSTR(0x26, 0x00), + ILI9881C_COMMAND_INSTR(0x27, 0x00), + ILI9881C_COMMAND_INSTR(0x28, 0x33), + ILI9881C_COMMAND_INSTR(0x29, 0x03), + ILI9881C_COMMAND_INSTR(0x2a, 0x00), + ILI9881C_COMMAND_INSTR(0x2b, 0x00), + ILI9881C_COMMAND_INSTR(0x2c, 0x00), + ILI9881C_COMMAND_INSTR(0x2d, 0x00), + ILI9881C_COMMAND_INSTR(0x2e, 0x00), + ILI9881C_COMMAND_INSTR(0x2f, 0x00), + ILI9881C_COMMAND_INSTR(0x30, 0x00), + ILI9881C_COMMAND_INSTR(0x31, 0x00), + ILI9881C_COMMAND_INSTR(0x32, 0x00), + ILI9881C_COMMAND_INSTR(0x33, 0x00), + ILI9881C_COMMAND_INSTR(0x34, 0x03), + ILI9881C_COMMAND_INSTR(0x35, 0x00), + ILI9881C_COMMAND_INSTR(0x36, 0x03), + ILI9881C_COMMAND_INSTR(0x37, 0x00), + ILI9881C_COMMAND_INSTR(0x38, 0x00), + ILI9881C_COMMAND_INSTR(0x39, 0x00), + ILI9881C_COMMAND_INSTR(0x3a, 0x40), + ILI9881C_COMMAND_INSTR(0x3b, 0x40), + ILI9881C_COMMAND_INSTR(0x3c, 0x00), + ILI9881C_COMMAND_INSTR(0x3d, 0x00), + ILI9881C_COMMAND_INSTR(0x3e, 0x00), + ILI9881C_COMMAND_INSTR(0x3f, 0x00), + ILI9881C_COMMAND_INSTR(0x40, 0x00), + ILI9881C_COMMAND_INSTR(0x41, 0x00), + ILI9881C_COMMAND_INSTR(0x42, 0x00), + ILI9881C_COMMAND_INSTR(0x43, 0x00), + ILI9881C_COMMAND_INSTR(0x44, 0x00), + + ILI9881C_COMMAND_INSTR(0x50, 0x01), + ILI9881C_COMMAND_INSTR(0x51, 0x23), + ILI9881C_COMMAND_INSTR(0x52, 0x45), + ILI9881C_COMMAND_INSTR(0x53, 0x67), + ILI9881C_COMMAND_INSTR(0x54, 0x89), + ILI9881C_COMMAND_INSTR(0x55, 0xab), + ILI9881C_COMMAND_INSTR(0x56, 0x01), + ILI9881C_COMMAND_INSTR(0x57, 0x23), + ILI9881C_COMMAND_INSTR(0x58, 0x45), + ILI9881C_COMMAND_INSTR(0x59, 0x67), + ILI9881C_COMMAND_INSTR(0x5a, 0x89), + ILI9881C_COMMAND_INSTR(0x5b, 0xab), + ILI9881C_COMMAND_INSTR(0x5c, 0xcd), + ILI9881C_COMMAND_INSTR(0x5d, 0xef), + + ILI9881C_COMMAND_INSTR(0x5e, 0x11), + ILI9881C_COMMAND_INSTR(0x5f, 0x02), + ILI9881C_COMMAND_INSTR(0x60, 0x00), + ILI9881C_COMMAND_INSTR(0x61, 0x01), + ILI9881C_COMMAND_INSTR(0x62, 0x0D), + ILI9881C_COMMAND_INSTR(0x63, 0x0C), + ILI9881C_COMMAND_INSTR(0x64, 0x0F), + ILI9881C_COMMAND_INSTR(0x65, 0x0E), + ILI9881C_COMMAND_INSTR(0x66, 0x06), + ILI9881C_COMMAND_INSTR(0x67, 0x07), + ILI9881C_COMMAND_INSTR(0x68, 0x02), + ILI9881C_COMMAND_INSTR(0x69, 0x02), + ILI9881C_COMMAND_INSTR(0x6a, 0x08), + ILI9881C_COMMAND_INSTR(0x6b, 0x02), + ILI9881C_COMMAND_INSTR(0x6c, 0x02), + ILI9881C_COMMAND_INSTR(0x6d, 0x02), + ILI9881C_COMMAND_INSTR(0x6e, 0x02), + ILI9881C_COMMAND_INSTR(0x6f, 0x02), + ILI9881C_COMMAND_INSTR(0x70, 0x02), + ILI9881C_COMMAND_INSTR(0x71, 0x02), + ILI9881C_COMMAND_INSTR(0x72, 0x02), + ILI9881C_COMMAND_INSTR(0x73, 0x02), + ILI9881C_COMMAND_INSTR(0x74, 0x02), + ILI9881C_COMMAND_INSTR(0x75, 0x02), + ILI9881C_COMMAND_INSTR(0x76, 0x00), + ILI9881C_COMMAND_INSTR(0x77, 0x01), + ILI9881C_COMMAND_INSTR(0x78, 0x0D), + ILI9881C_COMMAND_INSTR(0x79, 0x0C), + ILI9881C_COMMAND_INSTR(0x7a, 0x0F), + ILI9881C_COMMAND_INSTR(0x7b, 0x0E), + ILI9881C_COMMAND_INSTR(0x7c, 0x06), + ILI9881C_COMMAND_INSTR(0x7d, 0x07), + ILI9881C_COMMAND_INSTR(0x7e, 0x02), + ILI9881C_COMMAND_INSTR(0x7f, 0x02), + ILI9881C_COMMAND_INSTR(0x80, 0x08), + ILI9881C_COMMAND_INSTR(0x81, 0x02), + ILI9881C_COMMAND_INSTR(0x82, 0x02), + ILI9881C_COMMAND_INSTR(0x83, 0x02), + ILI9881C_COMMAND_INSTR(0x84, 0x02), + ILI9881C_COMMAND_INSTR(0x85, 0x02), + ILI9881C_COMMAND_INSTR(0x86, 0x02), + ILI9881C_COMMAND_INSTR(0x87, 0x02), + ILI9881C_COMMAND_INSTR(0x88, 0x02), + ILI9881C_COMMAND_INSTR(0x89, 0x02), + ILI9881C_COMMAND_INSTR(0x8A, 0x02), + + ILI9881C_SWITCH_PAGE_INSTR(4), + ILI9881C_COMMAND_INSTR(0x6c, 0x15), + ILI9881C_COMMAND_INSTR(0x6e, 0x30), + ILI9881C_COMMAND_INSTR(0x6f, 0x33), + ILI9881C_COMMAND_INSTR(0x8d, 0x15), + ILI9881C_COMMAND_INSTR(0x3a, 0xa4), + ILI9881C_COMMAND_INSTR(0x87, 0xba), + ILI9881C_COMMAND_INSTR(0x26, 0x76), + ILI9881C_COMMAND_INSTR(0xb2, 0xd1), + + ILI9881C_SWITCH_PAGE_INSTR(1), + ILI9881C_COMMAND_INSTR(0x22, 0x0A), + ILI9881C_COMMAND_INSTR(0x31, 0x0B), + ILI9881C_COMMAND_INSTR(0x50, 0xa5), + ILI9881C_COMMAND_INSTR(0x51, 0xa0), + ILI9881C_COMMAND_INSTR(0x53, 0x70), + ILI9881C_COMMAND_INSTR(0x55, 0x7A), + ILI9881C_COMMAND_INSTR(0x60, 0x14), + + ILI9881C_COMMAND_INSTR(0xA0, 0x00), + ILI9881C_COMMAND_INSTR(0xA1, 0x53), + ILI9881C_COMMAND_INSTR(0xA2, 0x50), + ILI9881C_COMMAND_INSTR(0xA3, 0x20), + ILI9881C_COMMAND_INSTR(0xA4, 0x27), + ILI9881C_COMMAND_INSTR(0xA5, 0x33), + ILI9881C_COMMAND_INSTR(0xA6, 0x25), + ILI9881C_COMMAND_INSTR(0xA7, 0x25), + ILI9881C_COMMAND_INSTR(0xA8, 0xD4), + ILI9881C_COMMAND_INSTR(0xA9, 0x1A), + ILI9881C_COMMAND_INSTR(0xAA, 0x2B), + ILI9881C_COMMAND_INSTR(0xAB, 0xB5), + ILI9881C_COMMAND_INSTR(0xAC, 0x19), + ILI9881C_COMMAND_INSTR(0xAD, 0x18), + ILI9881C_COMMAND_INSTR(0xAE, 0x53), + ILI9881C_COMMAND_INSTR(0xAF, 0x1A), + ILI9881C_COMMAND_INSTR(0xB0, 0x25), + ILI9881C_COMMAND_INSTR(0xB1, 0x62), + ILI9881C_COMMAND_INSTR(0xB2, 0x6A), + ILI9881C_COMMAND_INSTR(0xB3, 0x31), + + ILI9881C_COMMAND_INSTR(0xC0, 0x00), + ILI9881C_COMMAND_INSTR(0xC1, 0x53), + ILI9881C_COMMAND_INSTR(0xC2, 0x50), + ILI9881C_COMMAND_INSTR(0xC3, 0x20), + ILI9881C_COMMAND_INSTR(0xC4, 0x27), + ILI9881C_COMMAND_INSTR(0xC5, 0x33), + ILI9881C_COMMAND_INSTR(0xC6, 0x25), + ILI9881C_COMMAND_INSTR(0xC7, 0x25), + ILI9881C_COMMAND_INSTR(0xC8, 0xD4), + ILI9881C_COMMAND_INSTR(0xC9, 0x1A), + ILI9881C_COMMAND_INSTR(0xCA, 0x2B), + ILI9881C_COMMAND_INSTR(0xCB, 0xB5), + ILI9881C_COMMAND_INSTR(0xCC, 0x19), + ILI9881C_COMMAND_INSTR(0xCD, 0x18), + ILI9881C_COMMAND_INSTR(0xCE, 0x53), + ILI9881C_COMMAND_INSTR(0xCF, 0x1A), + ILI9881C_COMMAND_INSTR(0xD0, 0x25), + ILI9881C_COMMAND_INSTR(0xD1, 0x62), + ILI9881C_COMMAND_INSTR(0xD2, 0x6A), + ILI9881C_COMMAND_INSTR(0xD3, 0x31), + ILI9881C_SWITCH_PAGE_INSTR(0), + ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c), + ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_POWER_SAVE, 0x00), +}; + static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel) { return container_of(panel, struct ili9881c, panel); @@ -1014,6 +1211,23 @@ static const struct drm_display_mode w552946aba_default_mode = { .height_mm = 121, }; +static const struct drm_display_mode am8001280g_default_mode = { + .clock = 67911, + + .hdisplay = 800, + .hsync_start = 800 + 20, + .hsync_end = 800 + 20 + 32, + .htotal = 800 + 20 + 32 + 20, + + .vdisplay = 1280, + .vsync_start = 1280 + 6, + .vsync_end = 1280 + 6 + 8, + .vtotal = 1280 + 6 + 8 + 4, + + .width_mm = 94, + .height_mm = 151, +}; + static int ili9881c_get_modes(struct drm_panel *panel, struct drm_connector *connector) { @@ -1094,6 +1308,8 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi) return ret; } + ctx->panel.prepare_prev_first = true; + ret = drm_panel_of_backlight(&ctx->panel); if (ret) return ret; @@ -1145,11 +1361,20 @@ static const struct ili9881c_desc w552946aba_desc = { MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET, }; +static const struct ili9881c_desc am8001280g_desc = { + .init = am8001280g_init, + .init_length = ARRAY_SIZE(am8001280g_init), + .mode = &am8001280g_default_mode, + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | + MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM, +}; + static const struct of_device_id ili9881c_of_match[] = { { .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc }, { .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc }, { .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc }, { .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc }, + { .compatible = "ampire,am8001280g", .data = &am8001280g_desc }, { } }; MODULE_DEVICE_TABLE(of, ili9881c_of_match); diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 9367a4572dcf..8017ad33cf18 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -1324,6 +1324,35 @@ static const struct panel_desc bananapi_s070wv20_ct16 = { }, }; +static const struct drm_display_mode boe_bp101wx1_100_mode = { + .clock = 78945, + .hdisplay = 1280, + .hsync_start = 1280 + 0, + .hsync_end = 1280 + 0 + 2, + .htotal = 1280 + 62 + 0 + 2, + .vdisplay = 800, + .vsync_start = 800 + 8, + .vsync_end = 800 + 8 + 2, + .vtotal = 800 + 6 + 8 + 2, +}; + +static const struct panel_desc boe_bp101wx1_100 = { + .modes = &boe_bp101wx1_100_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 217, + .height = 136, + }, + .delay = { + .enable = 50, + .disable = 50, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, + .bus_flags = DRM_BUS_FLAG_DE_HIGH, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + static const struct display_timing boe_ev121wxm_n10_1850_timing = { .pixelclock = { 69922000, 71000000, 72293000 }, .hactive = { 1280, 1280, 1280 }, @@ -1973,6 +2002,33 @@ static const struct panel_desc eink_vb3300_kca = { .connector_type = DRM_MODE_CONNECTOR_DPI, }; +static const struct display_timing evervision_vgg644804_timing = { + .pixelclock = { 25175000, 25175000, 25175000 }, + .hactive = { 640, 640, 640 }, + .hfront_porch = { 16, 16, 16 }, + .hback_porch = { 82, 114, 170 }, + .hsync_len = { 5, 30, 30 }, + .vactive = { 480, 480, 480 }, + .vfront_porch = { 10, 10, 10 }, + .vback_porch = { 30, 32, 34 }, + .vsync_len = { 1, 3, 5 }, + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | + DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE | + DISPLAY_FLAGS_SYNC_POSEDGE, +}; + +static const struct panel_desc evervision_vgg644804 = { + .timings = &evervision_vgg644804_timing, + .num_timings = 1, + .bpc = 8, + .size = { + .width = 115, + .height = 86, + }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE, +}; + static const struct display_timing evervision_vgg804821_timing = { .pixelclock = { 27600000, 33300000, 50000000 }, .hactive = { 800, 800, 800 }, @@ -4254,6 +4310,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "bananapi,s070wv20-ct16", .data = &bananapi_s070wv20_ct16, }, { + .compatible = "boe,bp101wx1-100", + .data = &boe_bp101wx1_100, + }, { .compatible = "boe,ev121wxm-n10-1850", .data = &boe_ev121wxm_n10_1850, }, { @@ -4335,6 +4394,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "eink,vb3300-kca", .data = &eink_vb3300_kca, }, { + .compatible = "evervision,vgg644804", + .data = &evervision_vgg644804, + }, { .compatible = "evervision,vgg804821", .data = &evervision_vgg804821, }, { diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c index c90ad5ee34e7..a45e4addcc19 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.c +++ b/drivers/gpu/drm/panfrost/panfrost_device.c @@ -421,6 +421,9 @@ static int panfrost_device_runtime_suspend(struct device *dev) return -EBUSY; panfrost_devfreq_suspend(pfdev); + panfrost_job_suspend_irq(pfdev); + panfrost_mmu_suspend_irq(pfdev); + panfrost_gpu_suspend_irq(pfdev); panfrost_gpu_power_off(pfdev); return 0; diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h index 0fc558db6bfd..62f7e3527385 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.h +++ b/drivers/gpu/drm/panfrost/panfrost_device.h @@ -25,6 +25,13 @@ struct panfrost_perfcnt; #define NUM_JOB_SLOTS 3 #define MAX_PM_DOMAINS 5 +enum panfrost_drv_comp_bits { + PANFROST_COMP_BIT_GPU, + PANFROST_COMP_BIT_JOB, + PANFROST_COMP_BIT_MMU, + PANFROST_COMP_BIT_MAX +}; + /** * enum panfrost_gpu_pm - Supported kernel power management features * @GPU_PM_CLK_DIS: Allow disabling clocks during system suspend @@ -94,6 +101,8 @@ struct panfrost_device { struct device *dev; struct drm_device *ddev; struct platform_device *pdev; + int gpu_irq; + int mmu_irq; void __iomem *iomem; struct clk *clock; @@ -107,6 +116,7 @@ struct panfrost_device { struct panfrost_features features; const struct panfrost_compatible *comp; + DECLARE_BITMAP(is_suspended, PANFROST_COMP_BIT_MAX); spinlock_t as_lock; unsigned long as_in_use_mask; diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index 09f5e1563ebd..9063ce254642 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -22,9 +22,13 @@ static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data) { struct panfrost_device *pfdev = data; - u32 state = gpu_read(pfdev, GPU_INT_STAT); - u32 fault_status = gpu_read(pfdev, GPU_FAULT_STATUS); + u32 fault_status, state; + if (test_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended)) + return IRQ_NONE; + + fault_status = gpu_read(pfdev, GPU_FAULT_STATUS); + state = gpu_read(pfdev, GPU_INT_STAT); if (!state) return IRQ_NONE; @@ -61,6 +65,8 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev) gpu_write(pfdev, GPU_INT_MASK, 0); gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED); + clear_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended); + gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET); ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT, val, val & GPU_IRQ_RESET_COMPLETED, 10, 10000); @@ -78,7 +84,12 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev) } gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL); - gpu_write(pfdev, GPU_INT_MASK, GPU_IRQ_MASK_ALL); + + /* Only enable the interrupts we care about */ + gpu_write(pfdev, GPU_INT_MASK, + GPU_IRQ_MASK_ERROR | + GPU_IRQ_PERFCNT_SAMPLE_COMPLETED | + GPU_IRQ_CLEAN_CACHES_COMPLETED); /* * All in-flight jobs should have released their cycle @@ -425,11 +436,10 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev) void panfrost_gpu_power_off(struct panfrost_device *pfdev) { - u64 core_mask = panfrost_get_core_mask(pfdev); int ret; u32 val; - gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present & core_mask); + gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present); ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO, val, !val, 1, 1000); if (ret) @@ -441,16 +451,24 @@ void panfrost_gpu_power_off(struct panfrost_device *pfdev) if (ret) dev_err(pfdev->dev, "tiler power transition timeout"); - gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present & core_mask); + gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present); ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO, val, !val, 0, 1000); if (ret) dev_err(pfdev->dev, "l2 power transition timeout"); } +void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev) +{ + set_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended); + + gpu_write(pfdev, GPU_INT_MASK, 0); + synchronize_irq(pfdev->gpu_irq); +} + int panfrost_gpu_init(struct panfrost_device *pfdev) { - int err, irq; + int err; err = panfrost_gpu_soft_reset(pfdev); if (err) @@ -465,11 +483,11 @@ int panfrost_gpu_init(struct panfrost_device *pfdev) dma_set_max_seg_size(pfdev->dev, UINT_MAX); - irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu"); - if (irq < 0) - return irq; + pfdev->gpu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu"); + if (pfdev->gpu_irq < 0) + return pfdev->gpu_irq; - err = devm_request_irq(pfdev->dev, irq, panfrost_gpu_irq_handler, + err = devm_request_irq(pfdev->dev, pfdev->gpu_irq, panfrost_gpu_irq_handler, IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev); if (err) { dev_err(pfdev->dev, "failed to request gpu irq"); diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h index 876fdad9f721..d841b86504ea 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.h +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h @@ -15,6 +15,7 @@ u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev); int panfrost_gpu_soft_reset(struct panfrost_device *pfdev); void panfrost_gpu_power_on(struct panfrost_device *pfdev); void panfrost_gpu_power_off(struct panfrost_device *pfdev); +void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev); void panfrost_cycle_counter_get(struct panfrost_device *pfdev); void panfrost_cycle_counter_put(struct panfrost_device *pfdev); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index f9446e197428..0c2dbf6ef2a5 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -405,6 +405,8 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev) int j; u32 irq_mask = 0; + clear_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended); + for (j = 0; j < NUM_JOB_SLOTS; j++) { irq_mask |= MK_JS_MASK(j); } @@ -413,6 +415,14 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev) job_write(pfdev, JOB_INT_MASK, irq_mask); } +void panfrost_job_suspend_irq(struct panfrost_device *pfdev) +{ + set_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended); + + job_write(pfdev, JOB_INT_MASK, 0); + synchronize_irq(pfdev->js->irq); +} + static void panfrost_job_handle_err(struct panfrost_device *pfdev, struct panfrost_job *job, unsigned int js) @@ -792,17 +802,25 @@ static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data) struct panfrost_device *pfdev = data; panfrost_job_handle_irqs(pfdev); - job_write(pfdev, JOB_INT_MASK, - GENMASK(16 + NUM_JOB_SLOTS - 1, 16) | - GENMASK(NUM_JOB_SLOTS - 1, 0)); + + /* Enable interrupts only if we're not about to get suspended */ + if (!test_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended)) + job_write(pfdev, JOB_INT_MASK, + GENMASK(16 + NUM_JOB_SLOTS - 1, 16) | + GENMASK(NUM_JOB_SLOTS - 1, 0)); + return IRQ_HANDLED; } static irqreturn_t panfrost_job_irq_handler(int irq, void *data) { struct panfrost_device *pfdev = data; - u32 status = job_read(pfdev, JOB_INT_STAT); + u32 status; + + if (test_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended)) + return IRQ_NONE; + status = job_read(pfdev, JOB_INT_STAT); if (!status) return IRQ_NONE; diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h index 17ff808dba07..ec581b97852b 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.h +++ b/drivers/gpu/drm/panfrost/panfrost_job.h @@ -47,6 +47,7 @@ int panfrost_job_get_slot(struct panfrost_job *job); int panfrost_job_push(struct panfrost_job *job); void panfrost_job_put(struct panfrost_job *job); void panfrost_job_enable_interrupts(struct panfrost_device *pfdev); +void panfrost_job_suspend_irq(struct panfrost_device *pfdev); int panfrost_job_is_idle(struct panfrost_device *pfdev); #endif diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 846dd697c410..f38385fe76bb 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -231,6 +231,8 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev) { struct panfrost_mmu *mmu, *mmu_tmp; + clear_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended); + spin_lock(&pfdev->as_lock); pfdev->as_alloc_mask = 0; @@ -670,6 +672,9 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data) { struct panfrost_device *pfdev = data; + if (test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) + return IRQ_NONE; + if (!mmu_read(pfdev, MMU_INT_STAT)) return IRQ_NONE; @@ -744,22 +749,25 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask; } - spin_lock(&pfdev->as_lock); - mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask); - spin_unlock(&pfdev->as_lock); + /* Enable interrupts only if we're not about to get suspended */ + if (!test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) { + spin_lock(&pfdev->as_lock); + mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask); + spin_unlock(&pfdev->as_lock); + } return IRQ_HANDLED; }; int panfrost_mmu_init(struct panfrost_device *pfdev) { - int err, irq; + int err; - irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu"); - if (irq < 0) - return irq; + pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu"); + if (pfdev->mmu_irq < 0) + return pfdev->mmu_irq; - err = devm_request_threaded_irq(pfdev->dev, irq, + err = devm_request_threaded_irq(pfdev->dev, pfdev->mmu_irq, panfrost_mmu_irq_handler, panfrost_mmu_irq_handler_thread, IRQF_SHARED, KBUILD_MODNAME "-mmu", @@ -777,3 +785,11 @@ void panfrost_mmu_fini(struct panfrost_device *pfdev) { mmu_write(pfdev, MMU_INT_MASK, 0); } + +void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev) +{ + set_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended); + + mmu_write(pfdev, MMU_INT_MASK, 0); + synchronize_irq(pfdev->mmu_irq); +} diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h index cc2a0d307feb..022a9a74a114 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.h +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h @@ -14,6 +14,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping); int panfrost_mmu_init(struct panfrost_device *pfdev); void panfrost_mmu_fini(struct panfrost_device *pfdev); void panfrost_mmu_reset(struct panfrost_device *pfdev); +void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev); u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu); void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu); diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 404b0483bb7c..c6d35c33d5d6 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -485,7 +485,6 @@ static int qxl_primary_atomic_check(struct drm_plane *plane, static int qxl_primary_apply_cursor(struct qxl_device *qdev, struct drm_plane_state *plane_state) { - struct drm_framebuffer *fb = plane_state->fb; struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc); struct qxl_cursor_cmd *cmd; struct qxl_release *release; @@ -510,8 +509,8 @@ static int qxl_primary_apply_cursor(struct qxl_device *qdev, cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_CURSOR_SET; - cmd->u.set.position.x = plane_state->crtc_x + fb->hot_x; - cmd->u.set.position.y = plane_state->crtc_y + fb->hot_y; + cmd->u.set.position.x = plane_state->crtc_x + plane_state->hotspot_x; + cmd->u.set.position.y = plane_state->crtc_y + plane_state->hotspot_y; cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0); @@ -531,7 +530,6 @@ out_free_release: static int qxl_primary_move_cursor(struct qxl_device *qdev, struct drm_plane_state *plane_state) { - struct drm_framebuffer *fb = plane_state->fb; struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc); struct qxl_cursor_cmd *cmd; struct qxl_release *release; @@ -554,8 +552,8 @@ static int qxl_primary_move_cursor(struct qxl_device *qdev, cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_CURSOR_MOVE; - cmd->u.position.x = plane_state->crtc_x + fb->hot_x; - cmd->u.position.y = plane_state->crtc_y + fb->hot_y; + cmd->u.position.x = plane_state->crtc_x + plane_state->hotspot_x; + cmd->u.position.y = plane_state->crtc_y + plane_state->hotspot_y; qxl_release_unmap(qdev, release, &cmd->release_info); qxl_release_fence_buffer_objects(release); @@ -851,8 +849,8 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane, struct qxl_bo *old_cursor_bo = qcrtc->cursor_bo; qcrtc->cursor_bo = qxl_create_cursor(qdev, user_bo, - new_state->fb->hot_x, - new_state->fb->hot_y); + new_state->hotspot_x, + new_state->hotspot_y); qxl_free_cursor(old_cursor_bo); } diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 46de4f171970..beee5563031a 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -285,7 +285,7 @@ static const struct drm_ioctl_desc qxl_ioctls[] = { }; static struct drm_driver qxl_driver = { - .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_CURSOR_HOTSPOT, .dumb_create = qxl_mode_dumb_create, .dumb_map_offset = drm_gem_ttm_dumb_map_offset, diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 307a890fde13..32069acd93f8 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -119,7 +119,6 @@ struct qxl_output { #define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base) #define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base) -#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc) struct qxl_mman { struct ttm_device bdev; @@ -256,8 +255,6 @@ struct qxl_device { #define to_qxl(dev) container_of(dev, struct qxl_device, ddev) -int qxl_debugfs_fence_init(struct qxl_device *rdev); - int qxl_device_init(struct qxl_device *qdev, struct pci_dev *pdev); void qxl_device_fini(struct qxl_device *qdev); @@ -344,8 +341,6 @@ qxl_image_alloc_objects(struct qxl_device *qdev, int height, int stride); void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage); -void qxl_update_screen(struct qxl_device *qxl); - /* qxl io operations (qxl_cmd.c) */ void qxl_io_create_primary(struct qxl_device *qdev, @@ -445,8 +440,6 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev, int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo); -struct qxl_drv_surface * -qxl_surface_lookup(struct drm_device *dev, int surface_id); void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing); /* qxl_ioctl.c */ diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 34a1c73d3938..02a65971d140 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -33,7 +33,6 @@ #include <linux/firmware.h> #include <linux/platform_device.h> -#include <drm/drm_legacy.h> #include "radeon_family.h" diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c index 8f9a728affde..07ad17d24294 100644 --- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c +++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c @@ -14,7 +14,6 @@ #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> -#include <drm/drm_plane_helper.h> #include "shmob_drm_drv.h" #include "shmob_drm_kms.h" diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 4d42b1e4daa6..3c4f5a392b06 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -81,12 +81,16 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, */ pr_warn("%s: called with uninitialized scheduler\n", __func__); } else if (num_sched_list) { - /* The "priority" of an entity cannot exceed the number - * of run-queues of a scheduler. + /* The "priority" of an entity cannot exceed the number of run-queues of a + * scheduler. Protect against num_rqs being 0, by converting to signed. Choose + * the lowest priority available. */ - if (entity->priority >= sched_list[0]->num_rqs) - entity->priority = max_t(u32, sched_list[0]->num_rqs, - DRM_SCHED_PRIORITY_MIN); + if (entity->priority >= sched_list[0]->num_rqs) { + drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n", + entity->priority, sched_list[0]->num_rqs); + entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1, + (s32) DRM_SCHED_PRIORITY_KERNEL); + } entity->rq = sched_list[0]->sched_rq[entity->priority]; } diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 044a8c4875ba..550492a7a031 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -1029,9 +1029,8 @@ EXPORT_SYMBOL(drm_sched_job_cleanup); void drm_sched_wakeup(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity) { - if (drm_sched_entity_is_ready(entity)) - if (drm_sched_can_queue(sched, entity)) - drm_sched_run_job_queue(sched); + if (drm_sched_can_queue(sched, entity)) + drm_sched_run_job_queue(sched); } /** @@ -1051,8 +1050,9 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched) struct drm_sched_entity *entity; int i; - /* Kernel run queue has higher priority than normal run queue*/ - for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + /* Start with the highest priority. + */ + for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ? drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) : drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]); @@ -1291,7 +1291,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, if (!sched->sched_rq) goto Out_free; sched->num_rqs = num_rqs; - for (i = DRM_SCHED_PRIORITY_MIN; i < sched->num_rqs; i++) { + for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL); if (!sched->sched_rq[i]) goto Out_unroll; @@ -1312,7 +1312,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, sched->ready = true; return 0; Out_unroll: - for (--i ; i >= DRM_SCHED_PRIORITY_MIN; i--) + for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--) kfree(sched->sched_rq[i]); Out_free: kfree(sched->sched_rq); @@ -1338,7 +1338,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) drm_sched_wqueue_stop(sched); - for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { struct drm_sched_rq *rq = sched->sched_rq[i]; spin_lock(&rq->lock); @@ -1390,9 +1390,7 @@ void drm_sched_increase_karma(struct drm_sched_job *bad) if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) { atomic_inc(&bad->karma); - for (i = DRM_SCHED_PRIORITY_MIN; - i < min_t(typeof(sched->num_rqs), sched->num_rqs, DRM_SCHED_PRIORITY_KERNEL); - i++) { + for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) { struct drm_sched_rq *rq = sched->sched_rq[i]; spin_lock(&rq->lock); diff --git a/drivers/gpu/drm/solomon/ssd130x.h b/drivers/gpu/drm/solomon/ssd130x.h index acf7cedf0c1a..075c5c3ee75a 100644 --- a/drivers/gpu/drm/solomon/ssd130x.h +++ b/drivers/gpu/drm/solomon/ssd130x.h @@ -17,7 +17,6 @@ #include <drm/drm_crtc.h> #include <drm/drm_drv.h> #include <drm/drm_encoder.h> -#include <drm/drm_plane_helper.h> #include <linux/regmap.h> diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile index 2645af241ff0..d6183b3d7688 100644 --- a/drivers/gpu/drm/tests/Makefile +++ b/drivers/gpu/drm/tests/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \ drm_format_helper_test.o \ drm_format_test.o \ drm_framebuffer_test.o \ + drm_gem_shmem_test.o \ drm_managed_test.o \ drm_mm_test.o \ drm_modes_test.o \ diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c new file mode 100644 index 000000000000..91202e40cde9 --- /dev/null +++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit test suite for GEM objects backed by shmem buffers + * + * Copyright (C) 2023 Red Hat, Inc. + * + * Author: Marco Pagani <marpagan@redhat.com> + */ + +#include <linux/dma-buf.h> +#include <linux/iosys-map.h> +#include <linux/sizes.h> + +#include <kunit/test.h> + +#include <drm/drm_device.h> +#include <drm/drm_drv.h> +#include <drm/drm_gem.h> +#include <drm/drm_gem_shmem_helper.h> +#include <drm/drm_kunit_helpers.h> + +#define TEST_SIZE SZ_1M +#define TEST_BYTE 0xae + +/* + * Wrappers to avoid an explicit type casting when passing action + * functions to kunit_add_action(). + */ +static void kfree_wrapper(void *ptr) +{ + const void *obj = ptr; + + kfree(obj); +} + +static void sg_free_table_wrapper(void *ptr) +{ + struct sg_table *sgt = ptr; + + sg_free_table(sgt); +} + +static void drm_gem_shmem_free_wrapper(void *ptr) +{ + struct drm_gem_shmem_object *shmem = ptr; + + drm_gem_shmem_free(shmem); +} + +/* + * Test creating a shmem GEM object backed by shmem buffer. The test + * case succeeds if the GEM object is successfully allocated with the + * shmem file node and object functions attributes set, and the size + * attribute is equal to the correct size. + */ +static void drm_gem_shmem_test_obj_create(struct kunit *test) +{ + struct drm_device *drm_dev = test->priv; + struct drm_gem_shmem_object *shmem; + + shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); + KUNIT_EXPECT_EQ(test, shmem->base.size, TEST_SIZE); + KUNIT_EXPECT_NOT_NULL(test, shmem->base.filp); + KUNIT_EXPECT_NOT_NULL(test, shmem->base.funcs); + + drm_gem_shmem_free(shmem); +} + +/* + * Test creating a shmem GEM object from a scatter/gather table exported + * via a DMA-BUF. The test case succeed if the GEM object is successfully + * created with the shmem file node attribute equal to NULL and the sgt + * attribute pointing to the scatter/gather table that has been imported. + */ +static void drm_gem_shmem_test_obj_create_private(struct kunit *test) +{ + struct drm_device *drm_dev = test->priv; + struct drm_gem_shmem_object *shmem; + struct drm_gem_object *gem_obj; + struct dma_buf buf_mock; + struct dma_buf_attachment attach_mock; + struct sg_table *sgt; + char *buf; + int ret; + + /* Create a mock scatter/gather table */ + buf = kunit_kzalloc(test, TEST_SIZE, GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, buf); + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, sgt); + + ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt); + KUNIT_ASSERT_EQ(test, ret, 0); + + sg_init_one(sgt->sgl, buf, TEST_SIZE); + + /* Init a mock DMA-BUF */ + buf_mock.size = TEST_SIZE; + attach_mock.dmabuf = &buf_mock; + + gem_obj = drm_gem_shmem_prime_import_sg_table(drm_dev, &attach_mock, sgt); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gem_obj); + KUNIT_EXPECT_EQ(test, gem_obj->size, TEST_SIZE); + KUNIT_EXPECT_NULL(test, gem_obj->filp); + KUNIT_EXPECT_NOT_NULL(test, gem_obj->funcs); + + /* The scatter/gather table will be freed by drm_gem_shmem_free */ + kunit_remove_action(test, sg_free_table_wrapper, sgt); + kunit_remove_action(test, kfree_wrapper, sgt); + + shmem = to_drm_gem_shmem_obj(gem_obj); + KUNIT_EXPECT_PTR_EQ(test, shmem->sgt, sgt); + + drm_gem_shmem_free(shmem); +} + +/* + * Test pinning backing pages for a shmem GEM object. The test case + * succeeds if a suitable number of backing pages are allocated, and + * the pages table counter attribute is increased by one. + */ +static void drm_gem_shmem_test_pin_pages(struct kunit *test) +{ + struct drm_device *drm_dev = test->priv; + struct drm_gem_shmem_object *shmem; + int i, ret; + + shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); + KUNIT_EXPECT_NULL(test, shmem->pages); + KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0); + + ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = drm_gem_shmem_pin(shmem); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_ASSERT_NOT_NULL(test, shmem->pages); + KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1); + + for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++) + KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]); + + drm_gem_shmem_unpin(shmem); + KUNIT_EXPECT_NULL(test, shmem->pages); + KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0); +} + +/* + * Test creating a virtual mapping for a shmem GEM object. The test + * case succeeds if the backing memory is mapped and the reference + * counter for virtual mapping is increased by one. Moreover, the test + * case writes and then reads a test pattern over the mapped memory. + */ +static void drm_gem_shmem_test_vmap(struct kunit *test) +{ + struct drm_device *drm_dev = test->priv; + struct drm_gem_shmem_object *shmem; + struct iosys_map map; + int ret, i; + + shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); + KUNIT_EXPECT_NULL(test, shmem->vaddr); + KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0); + + ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = drm_gem_shmem_vmap(shmem, &map); + KUNIT_ASSERT_EQ(test, ret, 0); + KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr); + KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map)); + KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1); + + iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE); + for (i = 0; i < TEST_SIZE; i++) + KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE); + + drm_gem_shmem_vunmap(shmem, &map); + KUNIT_EXPECT_NULL(test, shmem->vaddr); + KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0); +} + +/* + * Test exporting a scatter/gather table of pinned pages suitable for + * PRIME usage from a shmem GEM object. The test case succeeds if a + * scatter/gather table large enough to accommodate the backing memory + * is successfully exported. + */ +static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test) +{ + struct drm_device *drm_dev = test->priv; + struct drm_gem_shmem_object *shmem; + struct sg_table *sgt; + struct scatterlist *sg; + unsigned int si, len = 0; + int ret; + + shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); + + ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = drm_gem_shmem_pin(shmem); + KUNIT_ASSERT_EQ(test, ret, 0); + + sgt = drm_gem_shmem_get_sg_table(shmem); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt); + KUNIT_EXPECT_NULL(test, shmem->sgt); + + ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt); + KUNIT_ASSERT_EQ(test, ret, 0); + + for_each_sgtable_sg(sgt, sg, si) { + KUNIT_EXPECT_NOT_NULL(test, sg); + len += sg->length; + } + + KUNIT_EXPECT_GE(test, len, TEST_SIZE); +} + +/* + * Test pinning pages and exporting a scatter/gather table suitable for + * driver usage from a shmem GEM object. The test case succeeds if the + * backing pages are pinned and a scatter/gather table large enough to + * accommodate the backing memory is successfully exported. + */ +static void drm_gem_shmem_test_get_sg_table(struct kunit *test) +{ + struct drm_device *drm_dev = test->priv; + struct drm_gem_shmem_object *shmem; + struct sg_table *sgt; + struct scatterlist *sg; + unsigned int si, ret, len = 0; + + shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); + + ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem); + KUNIT_ASSERT_EQ(test, ret, 0); + + /* The scatter/gather table will be freed by drm_gem_shmem_free */ + sgt = drm_gem_shmem_get_pages_sgt(shmem); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt); + KUNIT_ASSERT_NOT_NULL(test, shmem->pages); + KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1); + KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt); + + for_each_sgtable_sg(sgt, sg, si) { + KUNIT_EXPECT_NOT_NULL(test, sg); + len += sg->length; + } + + KUNIT_EXPECT_GE(test, len, TEST_SIZE); +} + +/* + * Test updating the madvise state of a shmem GEM object. The test + * case checks that the function for setting madv updates it only if + * its current value is greater or equal than zero and returns false + * if it has a negative value. + */ +static void drm_gem_shmem_test_madvise(struct kunit *test) +{ + struct drm_device *drm_dev = test->priv; + struct drm_gem_shmem_object *shmem; + int ret; + + shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); + KUNIT_ASSERT_EQ(test, shmem->madv, 0); + + ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = drm_gem_shmem_madvise(shmem, 1); + KUNIT_EXPECT_TRUE(test, ret); + KUNIT_ASSERT_EQ(test, shmem->madv, 1); + + /* Set madv to a negative value */ + ret = drm_gem_shmem_madvise(shmem, -1); + KUNIT_EXPECT_FALSE(test, ret); + KUNIT_ASSERT_EQ(test, shmem->madv, -1); + + /* Check that madv cannot be set back to a positive value */ + ret = drm_gem_shmem_madvise(shmem, 0); + KUNIT_EXPECT_FALSE(test, ret); + KUNIT_ASSERT_EQ(test, shmem->madv, -1); +} + +/* + * Test purging a shmem GEM object. First, assert that a newly created + * shmem GEM object is not purgeable. Then, set madvise to a positive + * value and call drm_gem_shmem_get_pages_sgt() to pin and dma-map the + * backing pages. Finally, assert that the shmem GEM object is now + * purgeable and purge it. + */ +static void drm_gem_shmem_test_purge(struct kunit *test) +{ + struct drm_device *drm_dev = test->priv; + struct drm_gem_shmem_object *shmem; + struct sg_table *sgt; + int ret; + + shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); + + ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem); + KUNIT_ASSERT_EQ(test, ret, 0); + + ret = drm_gem_shmem_is_purgeable(shmem); + KUNIT_EXPECT_FALSE(test, ret); + + ret = drm_gem_shmem_madvise(shmem, 1); + KUNIT_EXPECT_TRUE(test, ret); + + /* The scatter/gather table will be freed by drm_gem_shmem_free */ + sgt = drm_gem_shmem_get_pages_sgt(shmem); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt); + + ret = drm_gem_shmem_is_purgeable(shmem); + KUNIT_EXPECT_TRUE(test, ret); + + drm_gem_shmem_purge(shmem); + KUNIT_EXPECT_NULL(test, shmem->pages); + KUNIT_EXPECT_NULL(test, shmem->sgt); + KUNIT_EXPECT_EQ(test, shmem->madv, -1); +} + +static int drm_gem_shmem_test_init(struct kunit *test) +{ + struct device *dev; + struct drm_device *drm_dev; + + /* Allocate a parent device */ + dev = drm_kunit_helper_alloc_device(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); + + /* + * The DRM core will automatically initialize the GEM core and create + * a DRM Memory Manager object which provides an address space pool + * for GEM objects allocation. + */ + drm_dev = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm_dev), + 0, DRIVER_GEM); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm_dev); + + test->priv = drm_dev; + + return 0; +} + +static struct kunit_case drm_gem_shmem_test_cases[] = { + KUNIT_CASE(drm_gem_shmem_test_obj_create), + KUNIT_CASE(drm_gem_shmem_test_obj_create_private), + KUNIT_CASE(drm_gem_shmem_test_pin_pages), + KUNIT_CASE(drm_gem_shmem_test_vmap), + KUNIT_CASE(drm_gem_shmem_test_get_pages_sgt), + KUNIT_CASE(drm_gem_shmem_test_get_sg_table), + KUNIT_CASE(drm_gem_shmem_test_madvise), + KUNIT_CASE(drm_gem_shmem_test_purge), + {} +}; + +static struct kunit_suite drm_gem_shmem_suite = { + .name = "drm_gem_shmem", + .init = drm_gem_shmem_test_init, + .test_cases = drm_gem_shmem_test_cases +}; + +kunit_test_suite(drm_gem_shmem_suite); + +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c index 5e5e466f35d1..5f838980c7a1 100644 --- a/drivers/gpu/drm/tidss/tidss_crtc.c +++ b/drivers/gpu/drm/tidss/tidss_crtc.c @@ -169,14 +169,10 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc, struct tidss_device *tidss = to_tidss(ddev); unsigned long flags; - dev_dbg(ddev->dev, - "%s: %s enabled %d, needs modeset %d, event %p\n", __func__, - crtc->name, drm_atomic_crtc_needs_modeset(crtc->state), - crtc->state->enable, crtc->state->event); - - /* There is nothing to do if CRTC is not going to be enabled. */ - if (!crtc->state->enable) - return; + dev_dbg(ddev->dev, "%s: %s is %sactive, %s modeset, event %p\n", + __func__, crtc->name, crtc->state->active ? "" : "not ", + drm_atomic_crtc_needs_modeset(crtc->state) ? "needs" : "doesn't need", + crtc->state->event); /* * Flush CRTC changes with go bit only if new modeset is not diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c index 9d9dee7abaef..1ad711f8d2a8 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.c +++ b/drivers/gpu/drm/tidss/tidss_dispc.c @@ -322,6 +322,60 @@ const struct dispc_features dispc_am625_feats = { .vid_order = { 1, 0 }, }; +const struct dispc_features dispc_am62a7_feats = { + /* + * if the code reaches dispc_mode_valid with VP1, + * it should return MODE_BAD. + */ + .max_pclk_khz = { + [DISPC_VP_TIED_OFF] = 0, + [DISPC_VP_DPI] = 165000, + }, + + .scaling = { + .in_width_max_5tap_rgb = 1280, + .in_width_max_3tap_rgb = 2560, + .in_width_max_5tap_yuv = 2560, + .in_width_max_3tap_yuv = 4096, + .upscale_limit = 16, + .downscale_limit_5tap = 4, + .downscale_limit_3tap = 2, + /* + * The max supported pixel inc value is 255. The value + * of pixel inc is calculated like this: 1+(xinc-1)*bpp. + * The maximum bpp of all formats supported by the HW + * is 8. So the maximum supported xinc value is 32, + * because 1+(32-1)*8 < 255 < 1+(33-1)*4. + */ + .xinc_max = 32, + }, + + .subrev = DISPC_AM62A7, + + .common = "common", + .common_regs = tidss_am65x_common_regs, + + .num_vps = 2, + .vp_name = { "vp1", "vp2" }, + .ovr_name = { "ovr1", "ovr2" }, + .vpclk_name = { "vp1", "vp2" }, + /* VP1 of the DSS in AM62A7 SoC is tied off internally */ + .vp_bus_type = { DISPC_VP_TIED_OFF, DISPC_VP_DPI }, + + .vp_feat = { .color = { + .has_ctm = true, + .gamma_size = 256, + .gamma_type = TIDSS_GAMMA_8BIT, + }, + }, + + .num_planes = 2, + /* note: vid is plane_id 0 and vidl1 is plane_id 1 */ + .vid_name = { "vid", "vidl1" }, + .vid_lite = { false, true, }, + .vid_order = { 1, 0 }, +}; + static const u16 *dispc_common_regmap; struct dss_vp_data { @@ -824,6 +878,7 @@ dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc) case DISPC_K2G: return dispc_k2g_read_and_clear_irqstatus(dispc); case DISPC_AM625: + case DISPC_AM62A7: case DISPC_AM65X: case DISPC_J721E: return dispc_k3_read_and_clear_irqstatus(dispc); @@ -840,6 +895,7 @@ void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask) dispc_k2g_set_irqenable(dispc, mask); break; case DISPC_AM625: + case DISPC_AM62A7: case DISPC_AM65X: case DISPC_J721E: dispc_k3_set_irqenable(dispc, mask); @@ -1331,6 +1387,7 @@ void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane, x, y, layer); break; case DISPC_AM625: + case DISPC_AM62A7: case DISPC_AM65X: dispc_am65x_ovr_set_plane(dispc, hw_plane, hw_videoport, x, y, layer); @@ -2250,6 +2307,7 @@ static void dispc_plane_init(struct dispc_device *dispc) dispc_k2g_plane_init(dispc); break; case DISPC_AM625: + case DISPC_AM62A7: case DISPC_AM65X: case DISPC_J721E: dispc_k3_plane_init(dispc); @@ -2357,6 +2415,7 @@ static void dispc_vp_write_gamma_table(struct dispc_device *dispc, dispc_k2g_vp_write_gamma_table(dispc, hw_videoport); break; case DISPC_AM625: + case DISPC_AM62A7: case DISPC_AM65X: dispc_am65x_vp_write_gamma_table(dispc, hw_videoport); break; @@ -2702,18 +2761,83 @@ static void dispc_init_errata(struct dispc_device *dispc) } } -static void dispc_softreset(struct dispc_device *dispc) +/* + * K2G display controller does not support soft reset, so we do a basic manual + * reset here: make sure the IRQs are masked and VPs are disabled. + */ +static void dispc_softreset_k2g(struct dispc_device *dispc) +{ + dispc_set_irqenable(dispc, 0); + dispc_read_and_clear_irqstatus(dispc); + + for (unsigned int vp_idx = 0; vp_idx < dispc->feat->num_vps; ++vp_idx) + VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, 0, 0); +} + +static int dispc_softreset(struct dispc_device *dispc) { u32 val; - int ret = 0; + int ret; + + if (dispc->feat->subrev == DISPC_K2G) { + dispc_softreset_k2g(dispc); + return 0; + } /* Soft reset */ REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1); /* Wait for reset to complete */ ret = readl_poll_timeout(dispc->base_common + DSS_SYSSTATUS, val, val & 1, 100, 5000); + if (ret) { + dev_err(dispc->dev, "failed to reset dispc\n"); + return ret; + } + + return 0; +} + +static int dispc_init_hw(struct dispc_device *dispc) +{ + struct device *dev = dispc->dev; + int ret; + + ret = pm_runtime_set_active(dev); + if (ret) { + dev_err(dev, "Failed to set DSS PM to active\n"); + return ret; + } + + ret = clk_prepare_enable(dispc->fclk); + if (ret) { + dev_err(dev, "Failed to enable DSS fclk\n"); + goto err_runtime_suspend; + } + + ret = dispc_softreset(dispc); if (ret) - dev_warn(dispc->dev, "failed to reset dispc\n"); + goto err_clk_disable; + + clk_disable_unprepare(dispc->fclk); + ret = pm_runtime_set_suspended(dev); + if (ret) { + dev_err(dev, "Failed to set DSS PM to suspended\n"); + return ret; + } + + return 0; + +err_clk_disable: + clk_disable_unprepare(dispc->fclk); + +err_runtime_suspend: + ret = pm_runtime_set_suspended(dev); + if (ret) { + dev_err(dev, "Failed to set DSS PM to suspended\n"); + return ret; + } + + return ret; } int dispc_init(struct tidss_device *tidss) @@ -2777,10 +2901,6 @@ int dispc_init(struct tidss_device *tidss) return r; } - /* K2G display controller does not support soft reset */ - if (feat->subrev != DISPC_K2G) - dispc_softreset(dispc); - for (i = 0; i < dispc->feat->num_vps; i++) { u32 gamma_size = dispc->feat->vp_feat.color.gamma_size; u32 *gamma_table; @@ -2829,6 +2949,10 @@ int dispc_init(struct tidss_device *tidss) of_property_read_u32(dispc->dev->of_node, "max-memory-bandwidth", &dispc->memory_bandwidth_limit); + r = dispc_init_hw(dispc); + if (r) + return r; + tidss->dispc = dispc; return 0; diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h index 33ac5ad7a423..086327d51a90 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.h +++ b/drivers/gpu/drm/tidss/tidss_dispc.h @@ -54,12 +54,14 @@ enum dispc_vp_bus_type { DISPC_VP_DPI, /* DPI output */ DISPC_VP_OLDI, /* OLDI (LVDS) output */ DISPC_VP_INTERNAL, /* SoC internal routing */ + DISPC_VP_TIED_OFF, /* Tied off / Unavailable */ DISPC_VP_MAX_BUS_TYPE, }; enum dispc_dss_subrevision { DISPC_K2G, DISPC_AM625, + DISPC_AM62A7, DISPC_AM65X, DISPC_J721E, }; @@ -88,6 +90,7 @@ struct dispc_features { extern const struct dispc_features dispc_k2g_feats; extern const struct dispc_features dispc_am625_feats; +extern const struct dispc_features dispc_am62a7_feats; extern const struct dispc_features dispc_am65x_feats; extern const struct dispc_features dispc_j721e_feats; diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index 4d063eb9cd0b..d15f836dca95 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -32,9 +32,9 @@ int tidss_runtime_get(struct tidss_device *tidss) dev_dbg(tidss->dev, "%s\n", __func__); - r = pm_runtime_get_sync(tidss->dev); + r = pm_runtime_resume_and_get(tidss->dev); WARN_ON(r < 0); - return r < 0 ? r : 0; + return r; } void tidss_runtime_put(struct tidss_device *tidss) @@ -43,7 +43,9 @@ void tidss_runtime_put(struct tidss_device *tidss) dev_dbg(tidss->dev, "%s\n", __func__); - r = pm_runtime_put_sync(tidss->dev); + pm_runtime_mark_last_busy(tidss->dev); + + r = pm_runtime_put_autosuspend(tidss->dev); WARN_ON(r < 0); } @@ -136,6 +138,8 @@ static int tidss_probe(struct platform_device *pdev) platform_set_drvdata(pdev, tidss); + spin_lock_init(&tidss->wait_lock); + ret = dispc_init(tidss); if (ret) { dev_err(dev, "failed to initialize dispc: %d\n", ret); @@ -144,6 +148,9 @@ static int tidss_probe(struct platform_device *pdev) pm_runtime_enable(dev); + pm_runtime_set_autosuspend_delay(dev, 1000); + pm_runtime_use_autosuspend(dev); + #ifndef CONFIG_PM /* If we don't have PM, we need to call resume manually */ dispc_runtime_resume(tidss->dispc); @@ -192,6 +199,7 @@ err_runtime_suspend: #ifndef CONFIG_PM dispc_runtime_suspend(tidss->dispc); #endif + pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); return ret; @@ -215,6 +223,7 @@ static void tidss_remove(struct platform_device *pdev) /* If we don't have PM, we need to call suspend manually */ dispc_runtime_suspend(tidss->dispc); #endif + pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); /* devm allocated dispc goes away with the dev so mark it NULL */ @@ -231,6 +240,7 @@ static void tidss_shutdown(struct platform_device *pdev) static const struct of_device_id tidss_of_table[] = { { .compatible = "ti,k2g-dss", .data = &dispc_k2g_feats, }, { .compatible = "ti,am625-dss", .data = &dispc_am625_feats, }, + { .compatible = "ti,am62a7-dss", .data = &dispc_am62a7_feats, }, { .compatible = "ti,am65x-dss", .data = &dispc_am65x_feats, }, { .compatible = "ti,j721e-dss", .data = &dispc_j721e_feats, }, { } diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c index 0c681c7600bc..604334ef526a 100644 --- a/drivers/gpu/drm/tidss/tidss_irq.c +++ b/drivers/gpu/drm/tidss/tidss_irq.c @@ -93,33 +93,21 @@ void tidss_irq_resume(struct tidss_device *tidss) spin_unlock_irqrestore(&tidss->wait_lock, flags); } -static void tidss_irq_preinstall(struct drm_device *ddev) -{ - struct tidss_device *tidss = to_tidss(ddev); - - spin_lock_init(&tidss->wait_lock); - - tidss_runtime_get(tidss); - - dispc_set_irqenable(tidss->dispc, 0); - dispc_read_and_clear_irqstatus(tidss->dispc); - - tidss_runtime_put(tidss); -} - -static void tidss_irq_postinstall(struct drm_device *ddev) +int tidss_irq_install(struct drm_device *ddev, unsigned int irq) { struct tidss_device *tidss = to_tidss(ddev); - unsigned long flags; - unsigned int i; + int ret; - tidss_runtime_get(tidss); + if (irq == IRQ_NOTCONNECTED) + return -ENOTCONN; - spin_lock_irqsave(&tidss->wait_lock, flags); + ret = request_irq(irq, tidss_irq_handler, 0, ddev->driver->name, ddev); + if (ret) + return ret; tidss->irq_mask = DSS_IRQ_DEVICE_OCP_ERR; - for (i = 0; i < tidss->num_crtcs; ++i) { + for (unsigned int i = 0; i < tidss->num_crtcs; ++i) { struct tidss_crtc *tcrtc = to_tidss_crtc(tidss->crtcs[i]); tidss->irq_mask |= DSS_IRQ_VP_SYNC_LOST(tcrtc->hw_videoport); @@ -127,28 +115,6 @@ static void tidss_irq_postinstall(struct drm_device *ddev) tidss->irq_mask |= DSS_IRQ_VP_FRAME_DONE(tcrtc->hw_videoport); } - tidss_irq_update(tidss); - - spin_unlock_irqrestore(&tidss->wait_lock, flags); - - tidss_runtime_put(tidss); -} - -int tidss_irq_install(struct drm_device *ddev, unsigned int irq) -{ - int ret; - - if (irq == IRQ_NOTCONNECTED) - return -ENOTCONN; - - tidss_irq_preinstall(ddev); - - ret = request_irq(irq, tidss_irq_handler, 0, ddev->driver->name, ddev); - if (ret) - return ret; - - tidss_irq_postinstall(ddev); - return 0; } @@ -156,9 +122,5 @@ void tidss_irq_uninstall(struct drm_device *ddev) { struct tidss_device *tidss = to_tidss(ddev); - tidss_runtime_get(tidss); - dispc_set_irqenable(tidss->dispc, 0); - tidss_runtime_put(tidss); - free_irq(tidss->irq, ddev); } diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c index d096d8d2bc8f..a0e494c806a9 100644 --- a/drivers/gpu/drm/tidss/tidss_kms.c +++ b/drivers/gpu/drm/tidss/tidss_kms.c @@ -29,7 +29,7 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state) tidss_runtime_get(tidss); drm_atomic_helper_commit_modeset_disables(ddev, old_state); - drm_atomic_helper_commit_planes(ddev, old_state, 0); + drm_atomic_helper_commit_planes(ddev, old_state, DRM_PLANE_COMMIT_ACTIVE_ONLY); drm_atomic_helper_commit_modeset_enables(ddev, old_state); drm_atomic_helper_commit_hw_done(old_state); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 2f6eaac7f659..23bf16f596f6 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -570,19 +570,18 @@ static int tilcdc_pdev_probe(struct platform_device *pdev) match); } -static int tilcdc_pdev_remove(struct platform_device *pdev) +static void tilcdc_pdev_remove(struct platform_device *pdev) { int ret; ret = tilcdc_get_external_components(&pdev->dev, NULL); if (ret < 0) - return ret; + dev_err(&pdev->dev, "tilcdc_get_external_components() failed (%pe)\n", + ERR_PTR(ret)); else if (ret == 0) tilcdc_fini(platform_get_drvdata(pdev)); else component_master_del(&pdev->dev, &tilcdc_comp_ops); - - return 0; } static void tilcdc_pdev_shutdown(struct platform_device *pdev) @@ -599,7 +598,7 @@ MODULE_DEVICE_TABLE(of, tilcdc_of_match); static struct platform_driver tilcdc_platform_driver = { .probe = tilcdc_pdev_probe, - .remove = tilcdc_pdev_remove, + .remove_new = tilcdc_pdev_remove, .shutdown = tilcdc_pdev_shutdown, .driver = { .name = "tilcdc", diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c index 05a72473cfc6..ab89b7fc7bf6 100644 --- a/drivers/gpu/drm/tiny/ofdrm.c +++ b/drivers/gpu/drm/tiny/ofdrm.c @@ -19,7 +19,6 @@ #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> -#include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index 34bbbd7b53dd..7ce1c4617675 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -25,7 +25,6 @@ #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> -#include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> #define DRIVER_NAME "simpledrm" diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index e58b7e249816..edf10618fe2b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -370,7 +370,13 @@ static void ttm_bo_release(struct kref *kref) spin_unlock(&bo->bdev->lru_lock); INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete); - queue_work(bdev->wq, &bo->delayed_delete); + + /* Schedule the worker on the closest NUMA node. This + * improves performance since system memory might be + * cleared on free and that is best done on a CPU core + * close to it. + */ + queue_work_node(bdev->pool.nid, bdev->wq, &bo->delayed_delete); return; } diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c index d48b39132b32..f5187b384ae9 100644 --- a/drivers/gpu/drm/ttm/ttm_device.c +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -204,7 +204,8 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func if (ret) return ret; - bdev->wq = alloc_workqueue("ttm", WQ_MEM_RECLAIM | WQ_HIGHPRI, 16); + bdev->wq = alloc_workqueue("ttm", + WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 16); if (!bdev->wq) { ttm_global_release(); return -ENOMEM; @@ -213,7 +214,8 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func bdev->funcs = funcs; ttm_sys_man_init(bdev); - ttm_pool_init(&bdev->pool, dev, NUMA_NO_NODE, use_dma_alloc, use_dma32); + + ttm_pool_init(&bdev->pool, dev, dev_to_node(dev), use_dma_alloc, use_dma32); bdev->vma_manager = vma_manager; spin_lock_init(&bdev->lru_lock); diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index 40876bcdd79a..7702359c90c2 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -21,7 +21,6 @@ #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_modeset_helper_vtables.h> -#include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -261,6 +260,22 @@ static const uint64_t udl_primary_plane_fmtmods[] = { DRM_FORMAT_MOD_INVALID }; +static int udl_primary_plane_helper_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_crtc *new_crtc = new_plane_state->crtc; + struct drm_crtc_state *new_crtc_state = NULL; + + if (new_crtc) + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); + + return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + false, false); +} + static void udl_primary_plane_helper_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { @@ -296,7 +311,7 @@ out_drm_gem_fb_end_cpu_access: static const struct drm_plane_helper_funcs udl_primary_plane_helper_funcs = { DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, - .atomic_check = drm_plane_helper_atomic_check, + .atomic_check = udl_primary_plane_helper_atomic_check, .atomic_update = udl_primary_plane_helper_atomic_update, }; diff --git a/drivers/gpu/drm/v3d/Makefile b/drivers/gpu/drm/v3d/Makefile index 4b21b20e4998..b7d673f1153b 100644 --- a/drivers/gpu/drm/v3d/Makefile +++ b/drivers/gpu/drm/v3d/Makefile @@ -12,7 +12,8 @@ v3d-y := \ v3d_perfmon.o \ v3d_trace_points.o \ v3d_sched.o \ - v3d_sysfs.o + v3d_sysfs.o \ + v3d_submit.o v3d-$(CONFIG_DEBUG_FS) += v3d_debugfs.o diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index 8b3229a37c6d..1bdfac8beafd 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -33,6 +33,9 @@ void v3d_free_object(struct drm_gem_object *obj) struct v3d_dev *v3d = to_v3d_dev(obj->dev); struct v3d_bo *bo = to_v3d_bo(obj); + if (bo->vaddr) + v3d_put_bo_vaddr(bo); + v3d_mmu_remove_ptes(bo); mutex_lock(&v3d->bo_lock); @@ -134,6 +137,7 @@ struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, if (IS_ERR(shmem_obj)) return ERR_CAST(shmem_obj); bo = to_v3d_bo(&shmem_obj->base); + bo->vaddr = NULL; ret = v3d_bo_create_finish(&shmem_obj->base); if (ret) @@ -167,6 +171,20 @@ v3d_prime_import_sg_table(struct drm_device *dev, return obj; } +void v3d_get_bo_vaddr(struct v3d_bo *bo) +{ + struct drm_gem_shmem_object *obj = &bo->base; + + bo->vaddr = vmap(obj->pages, obj->base.size >> PAGE_SHIFT, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); +} + +void v3d_put_bo_vaddr(struct v3d_bo *bo) +{ + vunmap(bo->vaddr); + bo->vaddr = NULL; +} + int v3d_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -233,3 +251,36 @@ int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, drm_gem_object_put(gem_obj); return 0; } + +int +v3d_wait_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret; + struct drm_v3d_wait_bo *args = data; + ktime_t start = ktime_get(); + u64 delta_ns; + unsigned long timeout_jiffies = + nsecs_to_jiffies_timeout(args->timeout_ns); + + if (args->pad != 0) + return -EINVAL; + + ret = drm_gem_dma_resv_wait(file_priv, args->handle, + true, timeout_jiffies); + + /* Decrement the user's timeout, in case we got interrupted + * such that the ioctl will be restarted. + */ + delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start)); + if (delta_ns < args->timeout_ns) + args->timeout_ns -= delta_ns; + else + args->timeout_ns = 0; + + /* Asked to wait beyond the jiffie/scheduler precision? */ + if (ret == -ETIME && args->timeout_ns) + ret = -EAGAIN; + + return ret; +} diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 44a1ca57d6a4..3debf37e7d9b 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -91,6 +91,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data, case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT: args->value = 1; return 0; + case DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE: + args->value = 1; + return 0; default: DRM_DEBUG("Unknown parameter %d\n", args->param); return -EINVAL; @@ -189,6 +192,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(V3D_PERFMON_CREATE, v3d_perfmon_create_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(V3D_PERFMON_DESTROY, v3d_perfmon_destroy_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_VALUES, v3d_perfmon_get_values_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CPU, v3d_submit_cpu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), }; static const struct drm_driver v3d_drm_driver = { diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 4c59fefaa0b4..3c7d58866570 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -19,7 +19,7 @@ struct reset_control; #define GMP_GRANULARITY (128 * 1024) -#define V3D_MAX_QUEUES (V3D_CACHE_CLEAN + 1) +#define V3D_MAX_QUEUES (V3D_CPU + 1) static inline char *v3d_queue_to_string(enum v3d_queue queue) { @@ -29,6 +29,7 @@ static inline char *v3d_queue_to_string(enum v3d_queue queue) case V3D_TFU: return "tfu"; case V3D_CSD: return "csd"; case V3D_CACHE_CLEAN: return "cache_clean"; + case V3D_CPU: return "cpu"; } return "UNKNOWN"; } @@ -122,6 +123,7 @@ struct v3d_dev { struct v3d_render_job *render_job; struct v3d_tfu_job *tfu_job; struct v3d_csd_job *csd_job; + struct v3d_cpu_job *cpu_job; struct v3d_queue_state queue[V3D_MAX_QUEUES]; @@ -200,6 +202,8 @@ struct v3d_bo { * v3d_render_job->unref_list */ struct list_head unref_head; + + void *vaddr; }; static inline struct v3d_bo * @@ -312,6 +316,112 @@ struct v3d_csd_job { struct drm_v3d_submit_csd args; }; +enum v3d_cpu_job_type { + V3D_CPU_JOB_TYPE_INDIRECT_CSD = 1, + V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY, + V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY, + V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY, + V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY, + V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY, +}; + +struct v3d_timestamp_query { + /* Offset of this query in the timestamp BO for its value. */ + u32 offset; + + /* Syncobj that indicates the timestamp availability */ + struct drm_syncobj *syncobj; +}; + +/* Number of perfmons required to handle all supported performance counters */ +#define V3D_MAX_PERFMONS DIV_ROUND_UP(V3D_PERFCNT_NUM, \ + DRM_V3D_MAX_PERF_COUNTERS) + +struct v3d_performance_query { + /* Performance monitor IDs for this query */ + u32 kperfmon_ids[V3D_MAX_PERFMONS]; + + /* Syncobj that indicates the query availability */ + struct drm_syncobj *syncobj; +}; + +struct v3d_indirect_csd_info { + /* Indirect CSD */ + struct v3d_csd_job *job; + + /* Clean cache job associated to the Indirect CSD job */ + struct v3d_job *clean_job; + + /* Offset within the BO where the workgroup counts are stored */ + u32 offset; + + /* Workgroups size */ + u32 wg_size; + + /* Indices of the uniforms with the workgroup dispatch counts + * in the uniform stream. + */ + u32 wg_uniform_offsets[3]; + + /* Indirect BO */ + struct drm_gem_object *indirect; + + /* Context of the Indirect CSD job */ + struct ww_acquire_ctx acquire_ctx; +}; + +struct v3d_timestamp_query_info { + struct v3d_timestamp_query *queries; + + u32 count; +}; + +struct v3d_performance_query_info { + struct v3d_performance_query *queries; + + /* Number of performance queries */ + u32 count; + + /* Number of performance monitors related to that query pool */ + u32 nperfmons; + + /* Number of performance counters related to that query pool */ + u32 ncounters; +}; + +struct v3d_copy_query_results_info { + /* Define if should write to buffer using 64 or 32 bits */ + bool do_64bit; + + /* Define if it can write to buffer even if the query is not available */ + bool do_partial; + + /* Define if it should write availability bit to buffer */ + bool availability_bit; + + /* Offset of the copy buffer in the BO */ + u32 offset; + + /* Stride of the copy buffer in the BO */ + u32 stride; +}; + +struct v3d_cpu_job { + struct v3d_job base; + + enum v3d_cpu_job_type job_type; + + struct v3d_indirect_csd_info indirect_csd; + + struct v3d_timestamp_query_info timestamp_query; + + struct v3d_copy_query_results_info copy; + + struct v3d_performance_query_info performance_query; +}; + +typedef void (*v3d_cpu_job_fn)(struct v3d_cpu_job *); + struct v3d_submit_outsync { struct drm_syncobj *syncobj; }; @@ -379,12 +489,16 @@ struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size); void v3d_free_object(struct drm_gem_object *gem_obj); struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, size_t size); +void v3d_get_bo_vaddr(struct v3d_bo *bo); +void v3d_put_bo_vaddr(struct v3d_bo *bo); int v3d_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int v3d_wait_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); @@ -399,19 +513,21 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue); /* v3d_gem.c */ int v3d_gem_init(struct drm_device *dev); void v3d_gem_destroy(struct drm_device *dev); +void v3d_reset(struct v3d_dev *v3d); +void v3d_invalidate_caches(struct v3d_dev *v3d); +void v3d_clean_caches(struct v3d_dev *v3d); + +/* v3d_submit.c */ +void v3d_job_cleanup(struct v3d_job *job); +void v3d_job_put(struct v3d_job *job); int v3d_submit_cl_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int v3d_submit_csd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -int v3d_wait_bo_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -void v3d_job_cleanup(struct v3d_job *job); -void v3d_job_put(struct v3d_job *job); -void v3d_reset(struct v3d_dev *v3d); -void v3d_invalidate_caches(struct v3d_dev *v3d); -void v3d_clean_caches(struct v3d_dev *v3d); +int v3d_submit_cpu_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* v3d_irq.c */ int v3d_irq_init(struct v3d_dev *v3d); @@ -420,8 +536,6 @@ void v3d_irq_disable(struct v3d_dev *v3d); void v3d_irq_reset(struct v3d_dev *v3d); /* v3d_mmu.c */ -int v3d_mmu_get_offset(struct drm_file *file_priv, struct v3d_bo *bo, - u32 *offset); int v3d_mmu_set_page_table(struct v3d_dev *v3d); void v3d_mmu_insert_ptes(struct v3d_bo *bo); void v3d_mmu_remove_ptes(struct v3d_bo *bo); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 9d2ac23c29e3..afc565078c78 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -11,8 +11,6 @@ #include <linux/uaccess.h> #include <drm/drm_managed.h> -#include <drm/drm_syncobj.h> -#include <uapi/drm/v3d_drm.h> #include "v3d_drv.h" #include "v3d_regs.h" @@ -241,772 +239,6 @@ v3d_invalidate_caches(struct v3d_dev *v3d) v3d_invalidate_slices(v3d, 0); } -/* Takes the reservation lock on all the BOs being referenced, so that - * at queue submit time we can update the reservations. - * - * We don't lock the RCL the tile alloc/state BOs, or overflow memory - * (all of which are on exec->unref_list). They're entirely private - * to v3d, so we don't attach dma-buf fences to them. - */ -static int -v3d_lock_bo_reservations(struct v3d_job *job, - struct ww_acquire_ctx *acquire_ctx) -{ - int i, ret; - - ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); - if (ret) - return ret; - - for (i = 0; i < job->bo_count; i++) { - ret = dma_resv_reserve_fences(job->bo[i]->resv, 1); - if (ret) - goto fail; - - ret = drm_sched_job_add_implicit_dependencies(&job->base, - job->bo[i], true); - if (ret) - goto fail; - } - - return 0; - -fail: - drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); - return ret; -} - -/** - * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects - * referenced by the job. - * @dev: DRM device - * @file_priv: DRM file for this fd - * @job: V3D job being set up - * @bo_handles: GEM handles - * @bo_count: Number of GEM handles passed in - * - * The command validator needs to reference BOs by their index within - * the submitted job's BO list. This does the validation of the job's - * BO list and reference counting for the lifetime of the job. - * - * Note that this function doesn't need to unreference the BOs on - * failure, because that will happen at v3d_exec_cleanup() time. - */ -static int -v3d_lookup_bos(struct drm_device *dev, - struct drm_file *file_priv, - struct v3d_job *job, - u64 bo_handles, - u32 bo_count) -{ - job->bo_count = bo_count; - - if (!job->bo_count) { - /* See comment on bo_index for why we have to check - * this. - */ - DRM_DEBUG("Rendering requires BOs\n"); - return -EINVAL; - } - - return drm_gem_objects_lookup(file_priv, - (void __user *)(uintptr_t)bo_handles, - job->bo_count, &job->bo); -} - -static void -v3d_job_free(struct kref *ref) -{ - struct v3d_job *job = container_of(ref, struct v3d_job, refcount); - int i; - - if (job->bo) { - for (i = 0; i < job->bo_count; i++) - drm_gem_object_put(job->bo[i]); - kvfree(job->bo); - } - - dma_fence_put(job->irq_fence); - dma_fence_put(job->done_fence); - - if (job->perfmon) - v3d_perfmon_put(job->perfmon); - - kfree(job); -} - -static void -v3d_render_job_free(struct kref *ref) -{ - struct v3d_render_job *job = container_of(ref, struct v3d_render_job, - base.refcount); - struct v3d_bo *bo, *save; - - list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) { - drm_gem_object_put(&bo->base.base); - } - - v3d_job_free(ref); -} - -void v3d_job_cleanup(struct v3d_job *job) -{ - if (!job) - return; - - drm_sched_job_cleanup(&job->base); - v3d_job_put(job); -} - -void v3d_job_put(struct v3d_job *job) -{ - kref_put(&job->refcount, job->free); -} - -int -v3d_wait_bo_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - int ret; - struct drm_v3d_wait_bo *args = data; - ktime_t start = ktime_get(); - u64 delta_ns; - unsigned long timeout_jiffies = - nsecs_to_jiffies_timeout(args->timeout_ns); - - if (args->pad != 0) - return -EINVAL; - - ret = drm_gem_dma_resv_wait(file_priv, args->handle, - true, timeout_jiffies); - - /* Decrement the user's timeout, in case we got interrupted - * such that the ioctl will be restarted. - */ - delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start)); - if (delta_ns < args->timeout_ns) - args->timeout_ns -= delta_ns; - else - args->timeout_ns = 0; - - /* Asked to wait beyond the jiffie/scheduler precision? */ - if (ret == -ETIME && args->timeout_ns) - ret = -EAGAIN; - - return ret; -} - -static int -v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, - void **container, size_t size, void (*free)(struct kref *ref), - u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue) -{ - struct v3d_file_priv *v3d_priv = file_priv->driver_priv; - struct v3d_job *job; - bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC); - int ret, i; - - *container = kcalloc(1, size, GFP_KERNEL); - if (!*container) { - DRM_ERROR("Cannot allocate memory for v3d job."); - return -ENOMEM; - } - - job = *container; - job->v3d = v3d; - job->free = free; - job->file = file_priv; - - ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue], - 1, v3d_priv); - if (ret) - goto fail; - - if (has_multisync) { - if (se->in_sync_count && se->wait_stage == queue) { - struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs); - - for (i = 0; i < se->in_sync_count; i++) { - struct drm_v3d_sem in; - - if (copy_from_user(&in, handle++, sizeof(in))) { - ret = -EFAULT; - DRM_DEBUG("Failed to copy wait dep handle.\n"); - goto fail_deps; - } - ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0); - - // TODO: Investigate why this was filtered out for the IOCTL. - if (ret && ret != -ENOENT) - goto fail_deps; - } - } - } else { - ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0); - - // TODO: Investigate why this was filtered out for the IOCTL. - if (ret && ret != -ENOENT) - goto fail_deps; - } - - kref_init(&job->refcount); - - return 0; - -fail_deps: - drm_sched_job_cleanup(&job->base); -fail: - kfree(*container); - *container = NULL; - - return ret; -} - -static void -v3d_push_job(struct v3d_job *job) -{ - drm_sched_job_arm(&job->base); - - job->done_fence = dma_fence_get(&job->base.s_fence->finished); - - /* put by scheduler job completion */ - kref_get(&job->refcount); - - drm_sched_entity_push_job(&job->base); -} - -static void -v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, - struct v3d_job *job, - struct ww_acquire_ctx *acquire_ctx, - u32 out_sync, - struct v3d_submit_ext *se, - struct dma_fence *done_fence) -{ - struct drm_syncobj *sync_out; - bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC); - int i; - - for (i = 0; i < job->bo_count; i++) { - /* XXX: Use shared fences for read-only objects. */ - dma_resv_add_fence(job->bo[i]->resv, job->done_fence, - DMA_RESV_USAGE_WRITE); - } - - drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); - - /* Update the return sync object for the job */ - /* If it only supports a single signal semaphore*/ - if (!has_multisync) { - sync_out = drm_syncobj_find(file_priv, out_sync); - if (sync_out) { - drm_syncobj_replace_fence(sync_out, done_fence); - drm_syncobj_put(sync_out); - } - return; - } - - /* If multiple semaphores extension is supported */ - if (se->out_sync_count) { - for (i = 0; i < se->out_sync_count; i++) { - drm_syncobj_replace_fence(se->out_syncs[i].syncobj, - done_fence); - drm_syncobj_put(se->out_syncs[i].syncobj); - } - kvfree(se->out_syncs); - } -} - -static void -v3d_put_multisync_post_deps(struct v3d_submit_ext *se) -{ - unsigned int i; - - if (!(se && se->out_sync_count)) - return; - - for (i = 0; i < se->out_sync_count; i++) - drm_syncobj_put(se->out_syncs[i].syncobj); - kvfree(se->out_syncs); -} - -static int -v3d_get_multisync_post_deps(struct drm_file *file_priv, - struct v3d_submit_ext *se, - u32 count, u64 handles) -{ - struct drm_v3d_sem __user *post_deps; - int i, ret; - - if (!count) - return 0; - - se->out_syncs = (struct v3d_submit_outsync *) - kvmalloc_array(count, - sizeof(struct v3d_submit_outsync), - GFP_KERNEL); - if (!se->out_syncs) - return -ENOMEM; - - post_deps = u64_to_user_ptr(handles); - - for (i = 0; i < count; i++) { - struct drm_v3d_sem out; - - if (copy_from_user(&out, post_deps++, sizeof(out))) { - ret = -EFAULT; - DRM_DEBUG("Failed to copy post dep handles\n"); - goto fail; - } - - se->out_syncs[i].syncobj = drm_syncobj_find(file_priv, - out.handle); - if (!se->out_syncs[i].syncobj) { - ret = -EINVAL; - goto fail; - } - } - se->out_sync_count = count; - - return 0; - -fail: - for (i--; i >= 0; i--) - drm_syncobj_put(se->out_syncs[i].syncobj); - kvfree(se->out_syncs); - - return ret; -} - -/* Get data for multiple binary semaphores synchronization. Parse syncobj - * to be signaled when job completes (out_sync). - */ -static int -v3d_get_multisync_submit_deps(struct drm_file *file_priv, - struct drm_v3d_extension __user *ext, - void *data) -{ - struct drm_v3d_multi_sync multisync; - struct v3d_submit_ext *se = data; - int ret; - - if (copy_from_user(&multisync, ext, sizeof(multisync))) - return -EFAULT; - - if (multisync.pad) - return -EINVAL; - - ret = v3d_get_multisync_post_deps(file_priv, data, multisync.out_sync_count, - multisync.out_syncs); - if (ret) - return ret; - - se->in_sync_count = multisync.in_sync_count; - se->in_syncs = multisync.in_syncs; - se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC; - se->wait_stage = multisync.wait_stage; - - return 0; -} - -/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data - * according to the extension id (name). - */ -static int -v3d_get_extensions(struct drm_file *file_priv, - u64 ext_handles, - void *data) -{ - struct drm_v3d_extension __user *user_ext; - int ret; - - user_ext = u64_to_user_ptr(ext_handles); - while (user_ext) { - struct drm_v3d_extension ext; - - if (copy_from_user(&ext, user_ext, sizeof(ext))) { - DRM_DEBUG("Failed to copy submit extension\n"); - return -EFAULT; - } - - switch (ext.id) { - case DRM_V3D_EXT_ID_MULTI_SYNC: - ret = v3d_get_multisync_submit_deps(file_priv, user_ext, data); - if (ret) - return ret; - break; - default: - DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id); - return -EINVAL; - } - - user_ext = u64_to_user_ptr(ext.next); - } - - return 0; -} - -/** - * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D. - * @dev: DRM device - * @data: ioctl argument - * @file_priv: DRM file for this fd - * - * This is the main entrypoint for userspace to submit a 3D frame to - * the GPU. Userspace provides the binner command list (if - * applicable), and the kernel sets up the render command list to draw - * to the framebuffer described in the ioctl, using the command lists - * that the 3D engine's binner will produce. - */ -int -v3d_submit_cl_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct v3d_dev *v3d = to_v3d_dev(dev); - struct v3d_file_priv *v3d_priv = file_priv->driver_priv; - struct drm_v3d_submit_cl *args = data; - struct v3d_submit_ext se = {0}; - struct v3d_bin_job *bin = NULL; - struct v3d_render_job *render = NULL; - struct v3d_job *clean_job = NULL; - struct v3d_job *last_job; - struct ww_acquire_ctx acquire_ctx; - int ret = 0; - - trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end); - - if (args->pad) - return -EINVAL; - - if (args->flags && - args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE | - DRM_V3D_SUBMIT_EXTENSION)) { - DRM_INFO("invalid flags: %d\n", args->flags); - return -EINVAL; - } - - if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { - ret = v3d_get_extensions(file_priv, args->extensions, &se); - if (ret) { - DRM_DEBUG("Failed to get extensions.\n"); - return ret; - } - } - - ret = v3d_job_init(v3d, file_priv, (void *)&render, sizeof(*render), - v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER); - if (ret) - goto fail; - - render->start = args->rcl_start; - render->end = args->rcl_end; - INIT_LIST_HEAD(&render->unref_list); - - if (args->bcl_start != args->bcl_end) { - ret = v3d_job_init(v3d, file_priv, (void *)&bin, sizeof(*bin), - v3d_job_free, args->in_sync_bcl, &se, V3D_BIN); - if (ret) - goto fail; - - bin->start = args->bcl_start; - bin->end = args->bcl_end; - bin->qma = args->qma; - bin->qms = args->qms; - bin->qts = args->qts; - bin->render = render; - } - - if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) { - ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job), - v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); - if (ret) - goto fail; - - last_job = clean_job; - } else { - last_job = &render->base; - } - - ret = v3d_lookup_bos(dev, file_priv, last_job, - args->bo_handles, args->bo_handle_count); - if (ret) - goto fail; - - ret = v3d_lock_bo_reservations(last_job, &acquire_ctx); - if (ret) - goto fail; - - if (args->perfmon_id) { - render->base.perfmon = v3d_perfmon_find(v3d_priv, - args->perfmon_id); - - if (!render->base.perfmon) { - ret = -ENOENT; - goto fail_perfmon; - } - } - - mutex_lock(&v3d->sched_lock); - if (bin) { - bin->base.perfmon = render->base.perfmon; - v3d_perfmon_get(bin->base.perfmon); - v3d_push_job(&bin->base); - - ret = drm_sched_job_add_dependency(&render->base.base, - dma_fence_get(bin->base.done_fence)); - if (ret) - goto fail_unreserve; - } - - v3d_push_job(&render->base); - - if (clean_job) { - struct dma_fence *render_fence = - dma_fence_get(render->base.done_fence); - ret = drm_sched_job_add_dependency(&clean_job->base, - render_fence); - if (ret) - goto fail_unreserve; - clean_job->perfmon = render->base.perfmon; - v3d_perfmon_get(clean_job->perfmon); - v3d_push_job(clean_job); - } - - mutex_unlock(&v3d->sched_lock); - - v3d_attach_fences_and_unlock_reservation(file_priv, - last_job, - &acquire_ctx, - args->out_sync, - &se, - last_job->done_fence); - - if (bin) - v3d_job_put(&bin->base); - v3d_job_put(&render->base); - if (clean_job) - v3d_job_put(clean_job); - - return 0; - -fail_unreserve: - mutex_unlock(&v3d->sched_lock); -fail_perfmon: - drm_gem_unlock_reservations(last_job->bo, - last_job->bo_count, &acquire_ctx); -fail: - v3d_job_cleanup((void *)bin); - v3d_job_cleanup((void *)render); - v3d_job_cleanup(clean_job); - v3d_put_multisync_post_deps(&se); - - return ret; -} - -/** - * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D. - * @dev: DRM device - * @data: ioctl argument - * @file_priv: DRM file for this fd - * - * Userspace provides the register setup for the TFU, which we don't - * need to validate since the TFU is behind the MMU. - */ -int -v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct v3d_dev *v3d = to_v3d_dev(dev); - struct drm_v3d_submit_tfu *args = data; - struct v3d_submit_ext se = {0}; - struct v3d_tfu_job *job = NULL; - struct ww_acquire_ctx acquire_ctx; - int ret = 0; - - trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia); - - if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) { - DRM_DEBUG("invalid flags: %d\n", args->flags); - return -EINVAL; - } - - if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { - ret = v3d_get_extensions(file_priv, args->extensions, &se); - if (ret) { - DRM_DEBUG("Failed to get extensions.\n"); - return ret; - } - } - - ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job), - v3d_job_free, args->in_sync, &se, V3D_TFU); - if (ret) - goto fail; - - job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles), - sizeof(*job->base.bo), GFP_KERNEL); - if (!job->base.bo) { - ret = -ENOMEM; - goto fail; - } - - job->args = *args; - - for (job->base.bo_count = 0; - job->base.bo_count < ARRAY_SIZE(args->bo_handles); - job->base.bo_count++) { - struct drm_gem_object *bo; - - if (!args->bo_handles[job->base.bo_count]) - break; - - bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]); - if (!bo) { - DRM_DEBUG("Failed to look up GEM BO %d: %d\n", - job->base.bo_count, - args->bo_handles[job->base.bo_count]); - ret = -ENOENT; - goto fail; - } - job->base.bo[job->base.bo_count] = bo; - } - - ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx); - if (ret) - goto fail; - - mutex_lock(&v3d->sched_lock); - v3d_push_job(&job->base); - mutex_unlock(&v3d->sched_lock); - - v3d_attach_fences_and_unlock_reservation(file_priv, - &job->base, &acquire_ctx, - args->out_sync, - &se, - job->base.done_fence); - - v3d_job_put(&job->base); - - return 0; - -fail: - v3d_job_cleanup((void *)job); - v3d_put_multisync_post_deps(&se); - - return ret; -} - -/** - * v3d_submit_csd_ioctl() - Submits a CSD (texture formatting) job to the V3D. - * @dev: DRM device - * @data: ioctl argument - * @file_priv: DRM file for this fd - * - * Userspace provides the register setup for the CSD, which we don't - * need to validate since the CSD is behind the MMU. - */ -int -v3d_submit_csd_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct v3d_dev *v3d = to_v3d_dev(dev); - struct v3d_file_priv *v3d_priv = file_priv->driver_priv; - struct drm_v3d_submit_csd *args = data; - struct v3d_submit_ext se = {0}; - struct v3d_csd_job *job = NULL; - struct v3d_job *clean_job = NULL; - struct ww_acquire_ctx acquire_ctx; - int ret; - - trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]); - - if (args->pad) - return -EINVAL; - - if (!v3d_has_csd(v3d)) { - DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n"); - return -EINVAL; - } - - if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) { - DRM_INFO("invalid flags: %d\n", args->flags); - return -EINVAL; - } - - if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { - ret = v3d_get_extensions(file_priv, args->extensions, &se); - if (ret) { - DRM_DEBUG("Failed to get extensions.\n"); - return ret; - } - } - - ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job), - v3d_job_free, args->in_sync, &se, V3D_CSD); - if (ret) - goto fail; - - ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job), - v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); - if (ret) - goto fail; - - job->args = *args; - - ret = v3d_lookup_bos(dev, file_priv, clean_job, - args->bo_handles, args->bo_handle_count); - if (ret) - goto fail; - - ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx); - if (ret) - goto fail; - - if (args->perfmon_id) { - job->base.perfmon = v3d_perfmon_find(v3d_priv, - args->perfmon_id); - if (!job->base.perfmon) { - ret = -ENOENT; - goto fail_perfmon; - } - } - - mutex_lock(&v3d->sched_lock); - v3d_push_job(&job->base); - - ret = drm_sched_job_add_dependency(&clean_job->base, - dma_fence_get(job->base.done_fence)); - if (ret) - goto fail_unreserve; - - v3d_push_job(clean_job); - mutex_unlock(&v3d->sched_lock); - - v3d_attach_fences_and_unlock_reservation(file_priv, - clean_job, - &acquire_ctx, - args->out_sync, - &se, - clean_job->done_fence); - - v3d_job_put(&job->base); - v3d_job_put(clean_job); - - return 0; - -fail_unreserve: - mutex_unlock(&v3d->sched_lock); -fail_perfmon: - drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count, - &acquire_ctx); -fail: - v3d_job_cleanup((void *)job); - v3d_job_cleanup(clean_job); - v3d_put_multisync_post_deps(&se); - - return ret; -} - int v3d_gem_init(struct drm_device *dev) { diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index fccbea2a5f2e..54015ad765c7 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -21,10 +21,14 @@ #include <linux/sched/clock.h> #include <linux/kthread.h> +#include <drm/drm_syncobj.h> + #include "v3d_drv.h" #include "v3d_regs.h" #include "v3d_trace.h" +#define V3D_CSD_CFG012_WG_COUNT_SHIFT 16 + static struct v3d_job * to_v3d_job(struct drm_sched_job *sched_job) { @@ -55,6 +59,12 @@ to_csd_job(struct drm_sched_job *sched_job) return container_of(sched_job, struct v3d_csd_job, base.base); } +static struct v3d_cpu_job * +to_cpu_job(struct drm_sched_job *sched_job) +{ + return container_of(sched_job, struct v3d_cpu_job, base.base); +} + static void v3d_sched_job_free(struct drm_sched_job *sched_job) { @@ -64,6 +74,28 @@ v3d_sched_job_free(struct drm_sched_job *sched_job) } static void +v3d_cpu_job_free(struct drm_sched_job *sched_job) +{ + struct v3d_cpu_job *job = to_cpu_job(sched_job); + struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query; + struct v3d_performance_query_info *performance_query = &job->performance_query; + + if (timestamp_query->queries) { + for (int i = 0; i < timestamp_query->count; i++) + drm_syncobj_put(timestamp_query->queries[i].syncobj); + kvfree(timestamp_query->queries); + } + + if (performance_query->queries) { + for (int i = 0; i < performance_query->count; i++) + drm_syncobj_put(performance_query->queries[i].syncobj); + kvfree(performance_query->queries); + } + + v3d_job_cleanup(&job->base); +} + +static void v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job) { if (job->perfmon != v3d->active_perfmon) @@ -262,6 +294,275 @@ v3d_csd_job_run(struct drm_sched_job *sched_job) return fence; } +static void +v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job) +{ + struct v3d_indirect_csd_info *indirect_csd = &job->indirect_csd; + struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); + struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect); + struct drm_v3d_submit_csd *args = &indirect_csd->job->args; + u32 *wg_counts; + + v3d_get_bo_vaddr(bo); + v3d_get_bo_vaddr(indirect); + + wg_counts = (uint32_t *)(bo->vaddr + indirect_csd->offset); + + if (wg_counts[0] == 0 || wg_counts[1] == 0 || wg_counts[2] == 0) + return; + + args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT; + args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT; + args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT; + args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) * + (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1; + + for (int i = 0; i < 3; i++) { + /* 0xffffffff indicates that the uniform rewrite is not needed */ + if (indirect_csd->wg_uniform_offsets[i] != 0xffffffff) { + u32 uniform_idx = indirect_csd->wg_uniform_offsets[i]; + ((uint32_t *)indirect->vaddr)[uniform_idx] = wg_counts[i]; + } + } + + v3d_put_bo_vaddr(indirect); + v3d_put_bo_vaddr(bo); +} + +static void +v3d_timestamp_query(struct v3d_cpu_job *job) +{ + struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query; + struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); + u8 *value_addr; + + v3d_get_bo_vaddr(bo); + + for (int i = 0; i < timestamp_query->count; i++) { + value_addr = ((u8 *)bo->vaddr) + timestamp_query->queries[i].offset; + *((u64 *)value_addr) = i == 0 ? ktime_get_ns() : 0ull; + + drm_syncobj_replace_fence(timestamp_query->queries[i].syncobj, + job->base.done_fence); + } + + v3d_put_bo_vaddr(bo); +} + +static void +v3d_reset_timestamp_queries(struct v3d_cpu_job *job) +{ + struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query; + struct v3d_timestamp_query *queries = timestamp_query->queries; + struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); + u8 *value_addr; + + v3d_get_bo_vaddr(bo); + + for (int i = 0; i < timestamp_query->count; i++) { + value_addr = ((u8 *)bo->vaddr) + queries[i].offset; + *((u64 *)value_addr) = 0; + + drm_syncobj_replace_fence(queries[i].syncobj, NULL); + } + + v3d_put_bo_vaddr(bo); +} + +static void +write_to_buffer(void *dst, u32 idx, bool do_64bit, u64 value) +{ + if (do_64bit) { + u64 *dst64 = (u64 *)dst; + + dst64[idx] = value; + } else { + u32 *dst32 = (u32 *)dst; + + dst32[idx] = (u32)value; + } +} + +static void +v3d_copy_query_results(struct v3d_cpu_job *job) +{ + struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query; + struct v3d_timestamp_query *queries = timestamp_query->queries; + struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); + struct v3d_bo *timestamp = to_v3d_bo(job->base.bo[1]); + struct v3d_copy_query_results_info *copy = &job->copy; + struct dma_fence *fence; + u8 *query_addr; + bool available, write_result; + u8 *data; + int i; + + v3d_get_bo_vaddr(bo); + v3d_get_bo_vaddr(timestamp); + + data = ((u8 *)bo->vaddr) + copy->offset; + + for (i = 0; i < timestamp_query->count; i++) { + fence = drm_syncobj_fence_get(queries[i].syncobj); + available = fence ? dma_fence_is_signaled(fence) : false; + + write_result = available || copy->do_partial; + if (write_result) { + query_addr = ((u8 *)timestamp->vaddr) + queries[i].offset; + write_to_buffer(data, 0, copy->do_64bit, *((u64 *)query_addr)); + } + + if (copy->availability_bit) + write_to_buffer(data, 1, copy->do_64bit, available ? 1u : 0u); + + data += copy->stride; + + dma_fence_put(fence); + } + + v3d_put_bo_vaddr(timestamp); + v3d_put_bo_vaddr(bo); +} + +static void +v3d_reset_performance_queries(struct v3d_cpu_job *job) +{ + struct v3d_performance_query_info *performance_query = &job->performance_query; + struct v3d_file_priv *v3d_priv = job->base.file->driver_priv; + struct v3d_dev *v3d = job->base.v3d; + struct v3d_perfmon *perfmon; + + for (int i = 0; i < performance_query->count; i++) { + for (int j = 0; j < performance_query->nperfmons; j++) { + perfmon = v3d_perfmon_find(v3d_priv, + performance_query->queries[i].kperfmon_ids[j]); + if (!perfmon) { + DRM_DEBUG("Failed to find perfmon."); + continue; + } + + v3d_perfmon_stop(v3d, perfmon, false); + + memset(perfmon->values, 0, perfmon->ncounters * sizeof(u64)); + + v3d_perfmon_put(perfmon); + } + + drm_syncobj_replace_fence(performance_query->queries[i].syncobj, NULL); + } +} + +static void +v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, u32 query) +{ + struct v3d_performance_query_info *performance_query = &job->performance_query; + struct v3d_copy_query_results_info *copy = &job->copy; + struct v3d_file_priv *v3d_priv = job->base.file->driver_priv; + struct v3d_dev *v3d = job->base.v3d; + struct v3d_perfmon *perfmon; + u64 counter_values[V3D_PERFCNT_NUM]; + + for (int i = 0; i < performance_query->nperfmons; i++) { + perfmon = v3d_perfmon_find(v3d_priv, + performance_query->queries[query].kperfmon_ids[i]); + if (!perfmon) { + DRM_DEBUG("Failed to find perfmon."); + continue; + } + + v3d_perfmon_stop(v3d, perfmon, true); + + memcpy(&counter_values[i * DRM_V3D_MAX_PERF_COUNTERS], perfmon->values, + perfmon->ncounters * sizeof(u64)); + + v3d_perfmon_put(perfmon); + } + + for (int i = 0; i < performance_query->ncounters; i++) + write_to_buffer(data, i, copy->do_64bit, counter_values[i]); +} + +static void +v3d_copy_performance_query(struct v3d_cpu_job *job) +{ + struct v3d_performance_query_info *performance_query = &job->performance_query; + struct v3d_copy_query_results_info *copy = &job->copy; + struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); + struct dma_fence *fence; + bool available, write_result; + u8 *data; + + v3d_get_bo_vaddr(bo); + + data = ((u8 *)bo->vaddr) + copy->offset; + + for (int i = 0; i < performance_query->count; i++) { + fence = drm_syncobj_fence_get(performance_query->queries[i].syncobj); + available = fence ? dma_fence_is_signaled(fence) : false; + + write_result = available || copy->do_partial; + if (write_result) + v3d_write_performance_query_result(job, data, i); + + if (copy->availability_bit) + write_to_buffer(data, performance_query->ncounters, + copy->do_64bit, available ? 1u : 0u); + + data += copy->stride; + + dma_fence_put(fence); + } + + v3d_put_bo_vaddr(bo); +} + +static const v3d_cpu_job_fn cpu_job_function[] = { + [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = v3d_rewrite_csd_job_wg_counts_from_indirect, + [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = v3d_timestamp_query, + [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = v3d_reset_timestamp_queries, + [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = v3d_copy_query_results, + [V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = v3d_reset_performance_queries, + [V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = v3d_copy_performance_query, +}; + +static struct dma_fence * +v3d_cpu_job_run(struct drm_sched_job *sched_job) +{ + struct v3d_cpu_job *job = to_cpu_job(sched_job); + struct v3d_dev *v3d = job->base.v3d; + struct v3d_file_priv *file = job->base.file->driver_priv; + u64 runtime; + + v3d->cpu_job = job; + + if (job->job_type >= ARRAY_SIZE(cpu_job_function)) { + DRM_DEBUG_DRIVER("Unknown CPU job: %d\n", job->job_type); + return NULL; + } + + file->start_ns[V3D_CPU] = local_clock(); + v3d->queue[V3D_CPU].start_ns = file->start_ns[V3D_CPU]; + + trace_v3d_cpu_job_begin(&v3d->drm, job->job_type); + + cpu_job_function[job->job_type](job); + + trace_v3d_cpu_job_end(&v3d->drm, job->job_type); + + runtime = local_clock() - file->start_ns[V3D_CPU]; + + file->enabled_ns[V3D_CPU] += runtime; + v3d->queue[V3D_CPU].enabled_ns += runtime; + + file->jobs_sent[V3D_CPU]++; + v3d->queue[V3D_CPU].jobs_sent++; + + file->start_ns[V3D_CPU] = 0; + v3d->queue[V3D_CPU].start_ns = 0; + + return NULL; +} + static struct dma_fence * v3d_cache_clean_job_run(struct drm_sched_job *sched_job) { @@ -416,6 +717,12 @@ static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = { .free_job = v3d_sched_job_free }; +static const struct drm_sched_backend_ops v3d_cpu_sched_ops = { + .run_job = v3d_cpu_job_run, + .timedout_job = v3d_generic_job_timedout, + .free_job = v3d_cpu_job_free +}; + int v3d_sched_init(struct v3d_dev *v3d) { @@ -471,6 +778,15 @@ v3d_sched_init(struct v3d_dev *v3d) goto fail; } + ret = drm_sched_init(&v3d->queue[V3D_CPU].sched, + &v3d_cpu_sched_ops, NULL, + DRM_SCHED_PRIORITY_COUNT, + 1, job_hang_limit, + msecs_to_jiffies(hang_limit_ms), NULL, + NULL, "v3d_cpu", v3d->drm.dev); + if (ret) + goto fail; + return 0; fail: diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c new file mode 100644 index 000000000000..fcff41dd2315 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_submit.c @@ -0,0 +1,1320 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2014-2018 Broadcom + * Copyright (C) 2023 Raspberry Pi + */ + +#include <drm/drm_syncobj.h> + +#include "v3d_drv.h" +#include "v3d_regs.h" +#include "v3d_trace.h" + +/* Takes the reservation lock on all the BOs being referenced, so that + * at queue submit time we can update the reservations. + * + * We don't lock the RCL the tile alloc/state BOs, or overflow memory + * (all of which are on exec->unref_list). They're entirely private + * to v3d, so we don't attach dma-buf fences to them. + */ +static int +v3d_lock_bo_reservations(struct v3d_job *job, + struct ww_acquire_ctx *acquire_ctx) +{ + int i, ret; + + ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); + if (ret) + return ret; + + for (i = 0; i < job->bo_count; i++) { + ret = dma_resv_reserve_fences(job->bo[i]->resv, 1); + if (ret) + goto fail; + + ret = drm_sched_job_add_implicit_dependencies(&job->base, + job->bo[i], true); + if (ret) + goto fail; + } + + return 0; + +fail: + drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); + return ret; +} + +/** + * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects + * referenced by the job. + * @dev: DRM device + * @file_priv: DRM file for this fd + * @job: V3D job being set up + * @bo_handles: GEM handles + * @bo_count: Number of GEM handles passed in + * + * The command validator needs to reference BOs by their index within + * the submitted job's BO list. This does the validation of the job's + * BO list and reference counting for the lifetime of the job. + * + * Note that this function doesn't need to unreference the BOs on + * failure, because that will happen at v3d_exec_cleanup() time. + */ +static int +v3d_lookup_bos(struct drm_device *dev, + struct drm_file *file_priv, + struct v3d_job *job, + u64 bo_handles, + u32 bo_count) +{ + job->bo_count = bo_count; + + if (!job->bo_count) { + /* See comment on bo_index for why we have to check + * this. + */ + DRM_DEBUG("Rendering requires BOs\n"); + return -EINVAL; + } + + return drm_gem_objects_lookup(file_priv, + (void __user *)(uintptr_t)bo_handles, + job->bo_count, &job->bo); +} + +static void +v3d_job_free(struct kref *ref) +{ + struct v3d_job *job = container_of(ref, struct v3d_job, refcount); + int i; + + if (job->bo) { + for (i = 0; i < job->bo_count; i++) + drm_gem_object_put(job->bo[i]); + kvfree(job->bo); + } + + dma_fence_put(job->irq_fence); + dma_fence_put(job->done_fence); + + if (job->perfmon) + v3d_perfmon_put(job->perfmon); + + kfree(job); +} + +static void +v3d_render_job_free(struct kref *ref) +{ + struct v3d_render_job *job = container_of(ref, struct v3d_render_job, + base.refcount); + struct v3d_bo *bo, *save; + + list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) { + drm_gem_object_put(&bo->base.base); + } + + v3d_job_free(ref); +} + +void v3d_job_cleanup(struct v3d_job *job) +{ + if (!job) + return; + + drm_sched_job_cleanup(&job->base); + v3d_job_put(job); +} + +void v3d_job_put(struct v3d_job *job) +{ + if (!job) + return; + + kref_put(&job->refcount, job->free); +} + +static int +v3d_job_allocate(void **container, size_t size) +{ + *container = kcalloc(1, size, GFP_KERNEL); + if (!*container) { + DRM_ERROR("Cannot allocate memory for V3D job.\n"); + return -ENOMEM; + } + + return 0; +} + +static int +v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, + struct v3d_job *job, void (*free)(struct kref *ref), + u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue) +{ + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC); + int ret, i; + + job->v3d = v3d; + job->free = free; + job->file = file_priv; + + ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue], + 1, v3d_priv); + if (ret) + return ret; + + if (has_multisync) { + if (se->in_sync_count && se->wait_stage == queue) { + struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs); + + for (i = 0; i < se->in_sync_count; i++) { + struct drm_v3d_sem in; + + if (copy_from_user(&in, handle++, sizeof(in))) { + ret = -EFAULT; + DRM_DEBUG("Failed to copy wait dep handle.\n"); + goto fail_deps; + } + ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0); + + // TODO: Investigate why this was filtered out for the IOCTL. + if (ret && ret != -ENOENT) + goto fail_deps; + } + } + } else { + ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0); + + // TODO: Investigate why this was filtered out for the IOCTL. + if (ret && ret != -ENOENT) + goto fail_deps; + } + + kref_init(&job->refcount); + + return 0; + +fail_deps: + drm_sched_job_cleanup(&job->base); + return ret; +} + +static void +v3d_push_job(struct v3d_job *job) +{ + drm_sched_job_arm(&job->base); + + job->done_fence = dma_fence_get(&job->base.s_fence->finished); + + /* put by scheduler job completion */ + kref_get(&job->refcount); + + drm_sched_entity_push_job(&job->base); +} + +static void +v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, + struct v3d_job *job, + struct ww_acquire_ctx *acquire_ctx, + u32 out_sync, + struct v3d_submit_ext *se, + struct dma_fence *done_fence) +{ + struct drm_syncobj *sync_out; + bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC); + int i; + + for (i = 0; i < job->bo_count; i++) { + /* XXX: Use shared fences for read-only objects. */ + dma_resv_add_fence(job->bo[i]->resv, job->done_fence, + DMA_RESV_USAGE_WRITE); + } + + drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); + + /* Update the return sync object for the job */ + /* If it only supports a single signal semaphore*/ + if (!has_multisync) { + sync_out = drm_syncobj_find(file_priv, out_sync); + if (sync_out) { + drm_syncobj_replace_fence(sync_out, done_fence); + drm_syncobj_put(sync_out); + } + return; + } + + /* If multiple semaphores extension is supported */ + if (se->out_sync_count) { + for (i = 0; i < se->out_sync_count; i++) { + drm_syncobj_replace_fence(se->out_syncs[i].syncobj, + done_fence); + drm_syncobj_put(se->out_syncs[i].syncobj); + } + kvfree(se->out_syncs); + } +} + +static int +v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv, + struct v3d_dev *v3d, + struct drm_v3d_submit_csd *args, + struct v3d_csd_job **job, + struct v3d_job **clean_job, + struct v3d_submit_ext *se, + struct ww_acquire_ctx *acquire_ctx) +{ + int ret; + + ret = v3d_job_allocate((void *)job, sizeof(**job)); + if (ret) + return ret; + + ret = v3d_job_init(v3d, file_priv, &(*job)->base, + v3d_job_free, args->in_sync, se, V3D_CSD); + if (ret) + return ret; + + ret = v3d_job_allocate((void *)clean_job, sizeof(**clean_job)); + if (ret) + return ret; + + ret = v3d_job_init(v3d, file_priv, *clean_job, + v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); + if (ret) + return ret; + + (*job)->args = *args; + + ret = v3d_lookup_bos(&v3d->drm, file_priv, *clean_job, + args->bo_handles, args->bo_handle_count); + if (ret) + return ret; + + return v3d_lock_bo_reservations(*clean_job, acquire_ctx); +} + +static void +v3d_put_multisync_post_deps(struct v3d_submit_ext *se) +{ + unsigned int i; + + if (!(se && se->out_sync_count)) + return; + + for (i = 0; i < se->out_sync_count; i++) + drm_syncobj_put(se->out_syncs[i].syncobj); + kvfree(se->out_syncs); +} + +static int +v3d_get_multisync_post_deps(struct drm_file *file_priv, + struct v3d_submit_ext *se, + u32 count, u64 handles) +{ + struct drm_v3d_sem __user *post_deps; + int i, ret; + + if (!count) + return 0; + + se->out_syncs = (struct v3d_submit_outsync *) + kvmalloc_array(count, + sizeof(struct v3d_submit_outsync), + GFP_KERNEL); + if (!se->out_syncs) + return -ENOMEM; + + post_deps = u64_to_user_ptr(handles); + + for (i = 0; i < count; i++) { + struct drm_v3d_sem out; + + if (copy_from_user(&out, post_deps++, sizeof(out))) { + ret = -EFAULT; + DRM_DEBUG("Failed to copy post dep handles\n"); + goto fail; + } + + se->out_syncs[i].syncobj = drm_syncobj_find(file_priv, + out.handle); + if (!se->out_syncs[i].syncobj) { + ret = -EINVAL; + goto fail; + } + } + se->out_sync_count = count; + + return 0; + +fail: + for (i--; i >= 0; i--) + drm_syncobj_put(se->out_syncs[i].syncobj); + kvfree(se->out_syncs); + + return ret; +} + +/* Get data for multiple binary semaphores synchronization. Parse syncobj + * to be signaled when job completes (out_sync). + */ +static int +v3d_get_multisync_submit_deps(struct drm_file *file_priv, + struct drm_v3d_extension __user *ext, + struct v3d_submit_ext *se) +{ + struct drm_v3d_multi_sync multisync; + int ret; + + if (se->in_sync_count || se->out_sync_count) { + DRM_DEBUG("Two multisync extensions were added to the same job."); + return -EINVAL; + } + + if (copy_from_user(&multisync, ext, sizeof(multisync))) + return -EFAULT; + + if (multisync.pad) + return -EINVAL; + + ret = v3d_get_multisync_post_deps(file_priv, se, multisync.out_sync_count, + multisync.out_syncs); + if (ret) + return ret; + + se->in_sync_count = multisync.in_sync_count; + se->in_syncs = multisync.in_syncs; + se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC; + se->wait_stage = multisync.wait_stage; + + return 0; +} + +/* Get data for the indirect CSD job submission. */ +static int +v3d_get_cpu_indirect_csd_params(struct drm_file *file_priv, + struct drm_v3d_extension __user *ext, + struct v3d_cpu_job *job) +{ + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct v3d_dev *v3d = v3d_priv->v3d; + struct drm_v3d_indirect_csd indirect_csd; + struct v3d_indirect_csd_info *info = &job->indirect_csd; + + if (!job) { + DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); + return -EINVAL; + } + + if (job->job_type) { + DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n"); + return -EINVAL; + } + + if (copy_from_user(&indirect_csd, ext, sizeof(indirect_csd))) + return -EFAULT; + + if (!v3d_has_csd(v3d)) { + DRM_DEBUG("Attempting CSD submit on non-CSD hardware.\n"); + return -EINVAL; + } + + job->job_type = V3D_CPU_JOB_TYPE_INDIRECT_CSD; + info->offset = indirect_csd.offset; + info->wg_size = indirect_csd.wg_size; + memcpy(&info->wg_uniform_offsets, &indirect_csd.wg_uniform_offsets, + sizeof(indirect_csd.wg_uniform_offsets)); + + info->indirect = drm_gem_object_lookup(file_priv, indirect_csd.indirect); + + return v3d_setup_csd_jobs_and_bos(file_priv, v3d, &indirect_csd.submit, + &info->job, &info->clean_job, + NULL, &info->acquire_ctx); +} + +/* Get data for the query timestamp job submission. */ +static int +v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv, + struct drm_v3d_extension __user *ext, + struct v3d_cpu_job *job) +{ + u32 __user *offsets, *syncs; + struct drm_v3d_timestamp_query timestamp; + + if (!job) { + DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); + return -EINVAL; + } + + if (job->job_type) { + DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n"); + return -EINVAL; + } + + if (copy_from_user(×tamp, ext, sizeof(timestamp))) + return -EFAULT; + + if (timestamp.pad) + return -EINVAL; + + job->job_type = V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY; + + job->timestamp_query.queries = kvmalloc_array(timestamp.count, + sizeof(struct v3d_timestamp_query), + GFP_KERNEL); + if (!job->timestamp_query.queries) + return -ENOMEM; + + offsets = u64_to_user_ptr(timestamp.offsets); + syncs = u64_to_user_ptr(timestamp.syncs); + + for (int i = 0; i < timestamp.count; i++) { + u32 offset, sync; + + if (copy_from_user(&offset, offsets++, sizeof(offset))) { + kvfree(job->timestamp_query.queries); + return -EFAULT; + } + + job->timestamp_query.queries[i].offset = offset; + + if (copy_from_user(&sync, syncs++, sizeof(sync))) { + kvfree(job->timestamp_query.queries); + return -EFAULT; + } + + job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); + } + job->timestamp_query.count = timestamp.count; + + return 0; +} + +static int +v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv, + struct drm_v3d_extension __user *ext, + struct v3d_cpu_job *job) +{ + u32 __user *syncs; + struct drm_v3d_reset_timestamp_query reset; + + if (!job) { + DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); + return -EINVAL; + } + + if (job->job_type) { + DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n"); + return -EINVAL; + } + + if (copy_from_user(&reset, ext, sizeof(reset))) + return -EFAULT; + + job->job_type = V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY; + + job->timestamp_query.queries = kvmalloc_array(reset.count, + sizeof(struct v3d_timestamp_query), + GFP_KERNEL); + if (!job->timestamp_query.queries) + return -ENOMEM; + + syncs = u64_to_user_ptr(reset.syncs); + + for (int i = 0; i < reset.count; i++) { + u32 sync; + + job->timestamp_query.queries[i].offset = reset.offset + 8 * i; + + if (copy_from_user(&sync, syncs++, sizeof(sync))) { + kvfree(job->timestamp_query.queries); + return -EFAULT; + } + + job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); + } + job->timestamp_query.count = reset.count; + + return 0; +} + +/* Get data for the copy timestamp query results job submission. */ +static int +v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv, + struct drm_v3d_extension __user *ext, + struct v3d_cpu_job *job) +{ + u32 __user *offsets, *syncs; + struct drm_v3d_copy_timestamp_query copy; + int i; + + if (!job) { + DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); + return -EINVAL; + } + + if (job->job_type) { + DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n"); + return -EINVAL; + } + + if (copy_from_user(©, ext, sizeof(copy))) + return -EFAULT; + + if (copy.pad) + return -EINVAL; + + job->job_type = V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY; + + job->timestamp_query.queries = kvmalloc_array(copy.count, + sizeof(struct v3d_timestamp_query), + GFP_KERNEL); + if (!job->timestamp_query.queries) + return -ENOMEM; + + offsets = u64_to_user_ptr(copy.offsets); + syncs = u64_to_user_ptr(copy.syncs); + + for (i = 0; i < copy.count; i++) { + u32 offset, sync; + + if (copy_from_user(&offset, offsets++, sizeof(offset))) { + kvfree(job->timestamp_query.queries); + return -EFAULT; + } + + job->timestamp_query.queries[i].offset = offset; + + if (copy_from_user(&sync, syncs++, sizeof(sync))) { + kvfree(job->timestamp_query.queries); + return -EFAULT; + } + + job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); + } + job->timestamp_query.count = copy.count; + + job->copy.do_64bit = copy.do_64bit; + job->copy.do_partial = copy.do_partial; + job->copy.availability_bit = copy.availability_bit; + job->copy.offset = copy.offset; + job->copy.stride = copy.stride; + + return 0; +} + +static int +v3d_get_cpu_reset_performance_params(struct drm_file *file_priv, + struct drm_v3d_extension __user *ext, + struct v3d_cpu_job *job) +{ + u32 __user *syncs; + u64 __user *kperfmon_ids; + struct drm_v3d_reset_performance_query reset; + + if (!job) { + DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); + return -EINVAL; + } + + if (job->job_type) { + DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n"); + return -EINVAL; + } + + if (copy_from_user(&reset, ext, sizeof(reset))) + return -EFAULT; + + job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY; + + job->performance_query.queries = kvmalloc_array(reset.count, + sizeof(struct v3d_performance_query), + GFP_KERNEL); + if (!job->performance_query.queries) + return -ENOMEM; + + syncs = u64_to_user_ptr(reset.syncs); + kperfmon_ids = u64_to_user_ptr(reset.kperfmon_ids); + + for (int i = 0; i < reset.count; i++) { + u32 sync; + u64 ids; + u32 __user *ids_pointer; + u32 id; + + if (copy_from_user(&sync, syncs++, sizeof(sync))) { + kvfree(job->performance_query.queries); + return -EFAULT; + } + + job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); + + if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) { + kvfree(job->performance_query.queries); + return -EFAULT; + } + + ids_pointer = u64_to_user_ptr(ids); + + for (int j = 0; j < reset.nperfmons; j++) { + if (copy_from_user(&id, ids_pointer++, sizeof(id))) { + kvfree(job->performance_query.queries); + return -EFAULT; + } + + job->performance_query.queries[i].kperfmon_ids[j] = id; + } + } + job->performance_query.count = reset.count; + job->performance_query.nperfmons = reset.nperfmons; + + return 0; +} + +static int +v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv, + struct drm_v3d_extension __user *ext, + struct v3d_cpu_job *job) +{ + u32 __user *syncs; + u64 __user *kperfmon_ids; + struct drm_v3d_copy_performance_query copy; + + if (!job) { + DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); + return -EINVAL; + } + + if (job->job_type) { + DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n"); + return -EINVAL; + } + + if (copy_from_user(©, ext, sizeof(copy))) + return -EFAULT; + + if (copy.pad) + return -EINVAL; + + job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY; + + job->performance_query.queries = kvmalloc_array(copy.count, + sizeof(struct v3d_performance_query), + GFP_KERNEL); + if (!job->performance_query.queries) + return -ENOMEM; + + syncs = u64_to_user_ptr(copy.syncs); + kperfmon_ids = u64_to_user_ptr(copy.kperfmon_ids); + + for (int i = 0; i < copy.count; i++) { + u32 sync; + u64 ids; + u32 __user *ids_pointer; + u32 id; + + if (copy_from_user(&sync, syncs++, sizeof(sync))) { + kvfree(job->performance_query.queries); + return -EFAULT; + } + + job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); + + if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) { + kvfree(job->performance_query.queries); + return -EFAULT; + } + + ids_pointer = u64_to_user_ptr(ids); + + for (int j = 0; j < copy.nperfmons; j++) { + if (copy_from_user(&id, ids_pointer++, sizeof(id))) { + kvfree(job->performance_query.queries); + return -EFAULT; + } + + job->performance_query.queries[i].kperfmon_ids[j] = id; + } + } + job->performance_query.count = copy.count; + job->performance_query.nperfmons = copy.nperfmons; + job->performance_query.ncounters = copy.ncounters; + + job->copy.do_64bit = copy.do_64bit; + job->copy.do_partial = copy.do_partial; + job->copy.availability_bit = copy.availability_bit; + job->copy.offset = copy.offset; + job->copy.stride = copy.stride; + + return 0; +} + +/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data + * according to the extension id (name). + */ +static int +v3d_get_extensions(struct drm_file *file_priv, + u64 ext_handles, + struct v3d_submit_ext *se, + struct v3d_cpu_job *job) +{ + struct drm_v3d_extension __user *user_ext; + int ret; + + user_ext = u64_to_user_ptr(ext_handles); + while (user_ext) { + struct drm_v3d_extension ext; + + if (copy_from_user(&ext, user_ext, sizeof(ext))) { + DRM_DEBUG("Failed to copy submit extension\n"); + return -EFAULT; + } + + switch (ext.id) { + case DRM_V3D_EXT_ID_MULTI_SYNC: + ret = v3d_get_multisync_submit_deps(file_priv, user_ext, se); + break; + case DRM_V3D_EXT_ID_CPU_INDIRECT_CSD: + ret = v3d_get_cpu_indirect_csd_params(file_priv, user_ext, job); + break; + case DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY: + ret = v3d_get_cpu_timestamp_query_params(file_priv, user_ext, job); + break; + case DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY: + ret = v3d_get_cpu_reset_timestamp_params(file_priv, user_ext, job); + break; + case DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY: + ret = v3d_get_cpu_copy_query_results_params(file_priv, user_ext, job); + break; + case DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY: + ret = v3d_get_cpu_reset_performance_params(file_priv, user_ext, job); + break; + case DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY: + ret = v3d_get_cpu_copy_performance_query_params(file_priv, user_ext, job); + break; + default: + DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id); + return -EINVAL; + } + + if (ret) + return ret; + + user_ext = u64_to_user_ptr(ext.next); + } + + return 0; +} + +/** + * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D. + * @dev: DRM device + * @data: ioctl argument + * @file_priv: DRM file for this fd + * + * This is the main entrypoint for userspace to submit a 3D frame to + * the GPU. Userspace provides the binner command list (if + * applicable), and the kernel sets up the render command list to draw + * to the framebuffer described in the ioctl, using the command lists + * that the 3D engine's binner will produce. + */ +int +v3d_submit_cl_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct drm_v3d_submit_cl *args = data; + struct v3d_submit_ext se = {0}; + struct v3d_bin_job *bin = NULL; + struct v3d_render_job *render = NULL; + struct v3d_job *clean_job = NULL; + struct v3d_job *last_job; + struct ww_acquire_ctx acquire_ctx; + int ret = 0; + + trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end); + + if (args->pad) + return -EINVAL; + + if (args->flags && + args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE | + DRM_V3D_SUBMIT_EXTENSION)) { + DRM_INFO("invalid flags: %d\n", args->flags); + return -EINVAL; + } + + if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { + ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL); + if (ret) { + DRM_DEBUG("Failed to get extensions.\n"); + return ret; + } + } + + ret = v3d_job_allocate((void *)&render, sizeof(*render)); + if (ret) + return ret; + + ret = v3d_job_init(v3d, file_priv, &render->base, + v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER); + if (ret) + goto fail; + + render->start = args->rcl_start; + render->end = args->rcl_end; + INIT_LIST_HEAD(&render->unref_list); + + if (args->bcl_start != args->bcl_end) { + ret = v3d_job_allocate((void *)&bin, sizeof(*bin)); + if (ret) + goto fail; + + ret = v3d_job_init(v3d, file_priv, &bin->base, + v3d_job_free, args->in_sync_bcl, &se, V3D_BIN); + if (ret) + goto fail; + + bin->start = args->bcl_start; + bin->end = args->bcl_end; + bin->qma = args->qma; + bin->qms = args->qms; + bin->qts = args->qts; + bin->render = render; + } + + if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) { + ret = v3d_job_allocate((void *)&clean_job, sizeof(*clean_job)); + if (ret) + goto fail; + + ret = v3d_job_init(v3d, file_priv, clean_job, + v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); + if (ret) + goto fail; + + last_job = clean_job; + } else { + last_job = &render->base; + } + + ret = v3d_lookup_bos(dev, file_priv, last_job, + args->bo_handles, args->bo_handle_count); + if (ret) + goto fail; + + ret = v3d_lock_bo_reservations(last_job, &acquire_ctx); + if (ret) + goto fail; + + if (args->perfmon_id) { + render->base.perfmon = v3d_perfmon_find(v3d_priv, + args->perfmon_id); + + if (!render->base.perfmon) { + ret = -ENOENT; + goto fail_perfmon; + } + } + + mutex_lock(&v3d->sched_lock); + if (bin) { + bin->base.perfmon = render->base.perfmon; + v3d_perfmon_get(bin->base.perfmon); + v3d_push_job(&bin->base); + + ret = drm_sched_job_add_dependency(&render->base.base, + dma_fence_get(bin->base.done_fence)); + if (ret) + goto fail_unreserve; + } + + v3d_push_job(&render->base); + + if (clean_job) { + struct dma_fence *render_fence = + dma_fence_get(render->base.done_fence); + ret = drm_sched_job_add_dependency(&clean_job->base, + render_fence); + if (ret) + goto fail_unreserve; + clean_job->perfmon = render->base.perfmon; + v3d_perfmon_get(clean_job->perfmon); + v3d_push_job(clean_job); + } + + mutex_unlock(&v3d->sched_lock); + + v3d_attach_fences_and_unlock_reservation(file_priv, + last_job, + &acquire_ctx, + args->out_sync, + &se, + last_job->done_fence); + + v3d_job_put(&bin->base); + v3d_job_put(&render->base); + v3d_job_put(clean_job); + + return 0; + +fail_unreserve: + mutex_unlock(&v3d->sched_lock); +fail_perfmon: + drm_gem_unlock_reservations(last_job->bo, + last_job->bo_count, &acquire_ctx); +fail: + v3d_job_cleanup((void *)bin); + v3d_job_cleanup((void *)render); + v3d_job_cleanup(clean_job); + v3d_put_multisync_post_deps(&se); + + return ret; +} + +/** + * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D. + * @dev: DRM device + * @data: ioctl argument + * @file_priv: DRM file for this fd + * + * Userspace provides the register setup for the TFU, which we don't + * need to validate since the TFU is behind the MMU. + */ +int +v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct drm_v3d_submit_tfu *args = data; + struct v3d_submit_ext se = {0}; + struct v3d_tfu_job *job = NULL; + struct ww_acquire_ctx acquire_ctx; + int ret = 0; + + trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia); + + if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) { + DRM_DEBUG("invalid flags: %d\n", args->flags); + return -EINVAL; + } + + if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { + ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL); + if (ret) { + DRM_DEBUG("Failed to get extensions.\n"); + return ret; + } + } + + ret = v3d_job_allocate((void *)&job, sizeof(*job)); + if (ret) + return ret; + + ret = v3d_job_init(v3d, file_priv, &job->base, + v3d_job_free, args->in_sync, &se, V3D_TFU); + if (ret) + goto fail; + + job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles), + sizeof(*job->base.bo), GFP_KERNEL); + if (!job->base.bo) { + ret = -ENOMEM; + goto fail; + } + + job->args = *args; + + for (job->base.bo_count = 0; + job->base.bo_count < ARRAY_SIZE(args->bo_handles); + job->base.bo_count++) { + struct drm_gem_object *bo; + + if (!args->bo_handles[job->base.bo_count]) + break; + + bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]); + if (!bo) { + DRM_DEBUG("Failed to look up GEM BO %d: %d\n", + job->base.bo_count, + args->bo_handles[job->base.bo_count]); + ret = -ENOENT; + goto fail; + } + job->base.bo[job->base.bo_count] = bo; + } + + ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx); + if (ret) + goto fail; + + mutex_lock(&v3d->sched_lock); + v3d_push_job(&job->base); + mutex_unlock(&v3d->sched_lock); + + v3d_attach_fences_and_unlock_reservation(file_priv, + &job->base, &acquire_ctx, + args->out_sync, + &se, + job->base.done_fence); + + v3d_job_put(&job->base); + + return 0; + +fail: + v3d_job_cleanup((void *)job); + v3d_put_multisync_post_deps(&se); + + return ret; +} + +/** + * v3d_submit_csd_ioctl() - Submits a CSD (compute shader) job to the V3D. + * @dev: DRM device + * @data: ioctl argument + * @file_priv: DRM file for this fd + * + * Userspace provides the register setup for the CSD, which we don't + * need to validate since the CSD is behind the MMU. + */ +int +v3d_submit_csd_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct drm_v3d_submit_csd *args = data; + struct v3d_submit_ext se = {0}; + struct v3d_csd_job *job = NULL; + struct v3d_job *clean_job = NULL; + struct ww_acquire_ctx acquire_ctx; + int ret; + + trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]); + + if (args->pad) + return -EINVAL; + + if (!v3d_has_csd(v3d)) { + DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n"); + return -EINVAL; + } + + if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) { + DRM_INFO("invalid flags: %d\n", args->flags); + return -EINVAL; + } + + if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { + ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL); + if (ret) { + DRM_DEBUG("Failed to get extensions.\n"); + return ret; + } + } + + ret = v3d_setup_csd_jobs_and_bos(file_priv, v3d, args, + &job, &clean_job, &se, + &acquire_ctx); + if (ret) + goto fail; + + if (args->perfmon_id) { + job->base.perfmon = v3d_perfmon_find(v3d_priv, + args->perfmon_id); + if (!job->base.perfmon) { + ret = -ENOENT; + goto fail_perfmon; + } + } + + mutex_lock(&v3d->sched_lock); + v3d_push_job(&job->base); + + ret = drm_sched_job_add_dependency(&clean_job->base, + dma_fence_get(job->base.done_fence)); + if (ret) + goto fail_unreserve; + + v3d_push_job(clean_job); + mutex_unlock(&v3d->sched_lock); + + v3d_attach_fences_and_unlock_reservation(file_priv, + clean_job, + &acquire_ctx, + args->out_sync, + &se, + clean_job->done_fence); + + v3d_job_put(&job->base); + v3d_job_put(clean_job); + + return 0; + +fail_unreserve: + mutex_unlock(&v3d->sched_lock); +fail_perfmon: + drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count, + &acquire_ctx); +fail: + v3d_job_cleanup((void *)job); + v3d_job_cleanup(clean_job); + v3d_put_multisync_post_deps(&se); + + return ret; +} + +static const unsigned int cpu_job_bo_handle_count[] = { + [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = 1, + [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = 1, + [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = 1, + [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = 2, + [V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = 0, + [V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = 1, +}; + +/** + * v3d_submit_cpu_ioctl() - Submits a CPU job to the V3D. + * @dev: DRM device + * @data: ioctl argument + * @file_priv: DRM file for this fd + * + * Userspace specifies the CPU job type and data required to perform its + * operations through the drm_v3d_extension struct. + */ +int +v3d_submit_cpu_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct drm_v3d_submit_cpu *args = data; + struct v3d_submit_ext se = {0}; + struct v3d_submit_ext *out_se = NULL; + struct v3d_cpu_job *cpu_job = NULL; + struct v3d_csd_job *csd_job = NULL; + struct v3d_job *clean_job = NULL; + struct ww_acquire_ctx acquire_ctx; + int ret; + + if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) { + DRM_INFO("Invalid flags: %d\n", args->flags); + return -EINVAL; + } + + ret = v3d_job_allocate((void *)&cpu_job, sizeof(*cpu_job)); + if (ret) + return ret; + + if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { + ret = v3d_get_extensions(file_priv, args->extensions, &se, cpu_job); + if (ret) { + DRM_DEBUG("Failed to get extensions.\n"); + goto fail; + } + } + + /* Every CPU job must have a CPU job user extension */ + if (!cpu_job->job_type) { + DRM_DEBUG("CPU job must have a CPU job user extension.\n"); + ret = -EINVAL; + goto fail; + } + + if (args->bo_handle_count != cpu_job_bo_handle_count[cpu_job->job_type]) { + DRM_DEBUG("This CPU job was not submitted with the proper number of BOs.\n"); + ret = -EINVAL; + goto fail; + } + + trace_v3d_submit_cpu_ioctl(&v3d->drm, cpu_job->job_type); + + ret = v3d_job_init(v3d, file_priv, &cpu_job->base, + v3d_job_free, 0, &se, V3D_CPU); + if (ret) + goto fail; + + clean_job = cpu_job->indirect_csd.clean_job; + csd_job = cpu_job->indirect_csd.job; + + if (args->bo_handle_count) { + ret = v3d_lookup_bos(dev, file_priv, &cpu_job->base, + args->bo_handles, args->bo_handle_count); + if (ret) + goto fail; + + ret = v3d_lock_bo_reservations(&cpu_job->base, &acquire_ctx); + if (ret) + goto fail; + } + + mutex_lock(&v3d->sched_lock); + v3d_push_job(&cpu_job->base); + + switch (cpu_job->job_type) { + case V3D_CPU_JOB_TYPE_INDIRECT_CSD: + ret = drm_sched_job_add_dependency(&csd_job->base.base, + dma_fence_get(cpu_job->base.done_fence)); + if (ret) + goto fail_unreserve; + + v3d_push_job(&csd_job->base); + + ret = drm_sched_job_add_dependency(&clean_job->base, + dma_fence_get(csd_job->base.done_fence)); + if (ret) + goto fail_unreserve; + + v3d_push_job(clean_job); + + break; + default: + break; + } + mutex_unlock(&v3d->sched_lock); + + out_se = (cpu_job->job_type == V3D_CPU_JOB_TYPE_INDIRECT_CSD) ? NULL : &se; + + v3d_attach_fences_and_unlock_reservation(file_priv, + &cpu_job->base, + &acquire_ctx, 0, + out_se, cpu_job->base.done_fence); + + switch (cpu_job->job_type) { + case V3D_CPU_JOB_TYPE_INDIRECT_CSD: + v3d_attach_fences_and_unlock_reservation(file_priv, + clean_job, + &cpu_job->indirect_csd.acquire_ctx, + 0, &se, clean_job->done_fence); + break; + default: + break; + } + + v3d_job_put(&cpu_job->base); + v3d_job_put(&csd_job->base); + v3d_job_put(clean_job); + + return 0; + +fail_unreserve: + mutex_unlock(&v3d->sched_lock); + + drm_gem_unlock_reservations(cpu_job->base.bo, cpu_job->base.bo_count, + &acquire_ctx); + + drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count, + &cpu_job->indirect_csd.acquire_ctx); + +fail: + v3d_job_cleanup((void *)cpu_job); + v3d_job_cleanup((void *)csd_job); + v3d_job_cleanup(clean_job); + v3d_put_multisync_post_deps(&se); + kvfree(cpu_job->timestamp_query.queries); + kvfree(cpu_job->performance_query.queries); + + return ret; +} diff --git a/drivers/gpu/drm/v3d/v3d_trace.h b/drivers/gpu/drm/v3d/v3d_trace.h index 7aa8dc356e54..5917b94148f5 100644 --- a/drivers/gpu/drm/v3d/v3d_trace.h +++ b/drivers/gpu/drm/v3d/v3d_trace.h @@ -225,6 +225,63 @@ TRACE_EVENT(v3d_submit_csd, __entry->seqno) ); +TRACE_EVENT(v3d_submit_cpu_ioctl, + TP_PROTO(struct drm_device *dev, enum v3d_cpu_job_type job_type), + TP_ARGS(dev, job_type), + + TP_STRUCT__entry( + __field(u32, dev) + __field(enum v3d_cpu_job_type, job_type) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->job_type = job_type; + ), + + TP_printk("dev=%u, job_type=%d", + __entry->dev, + __entry->job_type) +); + +TRACE_EVENT(v3d_cpu_job_begin, + TP_PROTO(struct drm_device *dev, enum v3d_cpu_job_type job_type), + TP_ARGS(dev, job_type), + + TP_STRUCT__entry( + __field(u32, dev) + __field(enum v3d_cpu_job_type, job_type) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->job_type = job_type; + ), + + TP_printk("dev=%u, job_type=%d", + __entry->dev, + __entry->job_type) +); + +TRACE_EVENT(v3d_cpu_job_end, + TP_PROTO(struct drm_device *dev, enum v3d_cpu_job_type job_type), + TP_ARGS(dev, job_type), + + TP_STRUCT__entry( + __field(u32, dev) + __field(enum v3d_cpu_job_type, job_type) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->job_type = job_type; + ), + + TP_printk("dev=%u, job_type=%d", + __entry->dev, + __entry->job_type) +); + TRACE_EVENT(v3d_cache_clean_begin, TP_PROTO(struct drm_device *dev), TP_ARGS(dev), diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c index 047b95812334..cd9e66a06596 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.c +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -182,7 +182,7 @@ DEFINE_DRM_GEM_FOPS(vbox_fops); static const struct drm_driver driver = { .driver_features = - DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, + DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_CURSOR_HOTSPOT, .fops = &vbox_fops, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c index 341edd982cb3..9ff3bade9795 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_mode.c +++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c @@ -429,8 +429,8 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA; hgsmi_update_pointer_shape(vbox->guest_pool, flags, - min_t(u32, max(fb->hot_x, 0), width), - min_t(u32, max(fb->hot_y, 0), height), + min_t(u32, max(new_state->hotspot_x, 0), width), + min_t(u32, max(new_state->hotspot_y, 0), height), width, height, vbox->cursor_data, data_size); mutex_unlock(&vbox->hw_mutex); diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 4334c7608408..f8e9abe647b9 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -177,7 +177,7 @@ static const struct drm_driver driver = { * out via drm_device::driver_features: */ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC | - DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE, + DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE | DRIVER_CURSOR_HOTSPOT, .open = virtio_gpu_driver_open, .postclose = virtio_gpu_driver_postclose, diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index a2e045f3a000..a72a2dbda031 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -79,6 +79,8 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, { struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, + plane); bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR; struct drm_crtc_state *crtc_state; int ret; @@ -86,6 +88,14 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) return 0; + /* + * Ignore damage clips if the framebuffer attached to the plane's state + * has changed since the last plane update (page-flip). In this case, a + * full plane update should happen because uploads are done per-buffer. + */ + if (old_plane_state->fb != new_plane_state->fb) + new_plane_state->ignore_damage_clips = true; + crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc); if (IS_ERR(crtc_state)) @@ -323,16 +333,16 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle, plane->state->crtc_x, plane->state->crtc_y, - plane->state->fb ? plane->state->fb->hot_x : 0, - plane->state->fb ? plane->state->fb->hot_y : 0); + plane->state->hotspot_x, + plane->state->hotspot_y); output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); output->cursor.resource_id = cpu_to_le32(handle); if (plane->state->fb) { output->cursor.hot_x = - cpu_to_le32(plane->state->fb->hot_x); + cpu_to_le32(plane->state->hotspot_x); output->cursor.hot_y = - cpu_to_le32(plane->state->fb->hot_y); + cpu_to_le32(plane->state->hotspot_y); } else { output->cursor.hot_x = cpu_to_le32(0); output->cursor.hot_y = cpu_to_le32(0); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 8b24ecf60e3e..d3e308fdfd5b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1611,7 +1611,7 @@ static const struct file_operations vmwgfx_driver_fops = { static const struct drm_driver driver = { .driver_features = - DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM, + DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM | DRIVER_CURSOR_HOTSPOT, .ioctls = vmw_ioctls, .num_ioctls = ARRAY_SIZE(vmw_ioctls), .master_set = vmw_master_set, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 818b7f109f53..5fd0ccaa0b41 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -768,13 +768,8 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state); s32 hotspot_x, hotspot_y; - hotspot_x = du->hotspot_x; - hotspot_y = du->hotspot_y; - - if (new_state->fb) { - hotspot_x += new_state->fb->hot_x; - hotspot_y += new_state->fb->hot_y; - } + hotspot_x = du->hotspot_x + new_state->hotspot_x; + hotspot_y = du->hotspot_y + new_state->hotspot_y; du->cursor_surface = vps->surf; du->cursor_bo = vps->bo; @@ -837,10 +832,21 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, { struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); struct drm_crtc_state *crtc_state = NULL; struct drm_framebuffer *new_fb = new_state->fb; + struct drm_framebuffer *old_fb = old_state->fb; int ret; + /* + * Ignore damage clips if the framebuffer attached to the plane's state + * has changed since the last plane update (page-flip). In this case, a + * full plane update should happen because uploads are done per-buffer. + */ + if (old_fb != new_fb) + new_state->ignore_damage_clips = true; + if (new_state->crtc) crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc); diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c index a7f8611be6f4..db3bb4afbfc4 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_kms.c +++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c @@ -27,7 +27,6 @@ #include <drm/drm_managed.h> #include <drm/drm_mode_config.h> #include <drm/drm_plane.h> -#include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include <drm/drm_vblank.h> |