diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/Kconfig | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_modeset_lock.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/imx/imx-drm-core.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_gem.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_gem.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 94 | ||||
-rw-r--r-- | drivers/gpu/drm/udl/udl_connector.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_drv.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c | 72 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c | 3 |
12 files changed, 21 insertions, 189 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index a4c020a9a0eb..0039df26854b 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -117,9 +117,8 @@ config DRM_DEBUG_MODESET_LOCK config DRM_FBDEV_EMULATION bool "Enable legacy fbdev support for your modesetting driver" - depends on DRM - depends on FB=y || FB=DRM - select DRM_KMS_HELPER + depends on DRM_KMS_HELPER + depends on FB=y || FB=DRM_KMS_HELPER select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index a573424a6e0b..a1e63ba4c54a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -60,9 +60,10 @@ static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf) goto unlock; } - ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, - TTM_BO_VM_NUM_PREFAULT, 1); - drm_dev_exit(idx); + ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, + TTM_BO_VM_NUM_PREFAULT); + + drm_dev_exit(idx); } else { ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); } diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index 4d32b61fa1fd..c97323365675 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -79,7 +79,7 @@ static DEFINE_WW_CLASS(crtc_ww_class); #if IS_ENABLED(CONFIG_DRM_DEBUG_MODESET_LOCK) -static noinline depot_stack_handle_t __stack_depot_save(void) +static noinline depot_stack_handle_t __drm_stack_depot_save(void) { unsigned long entries[8]; unsigned int n; @@ -89,7 +89,7 @@ static noinline depot_stack_handle_t __stack_depot_save(void) return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN); } -static void __stack_depot_print(depot_stack_handle_t stack_depot) +static void __drm_stack_depot_print(depot_stack_handle_t stack_depot) { struct drm_printer p = drm_debug_printer("drm_modeset_lock"); unsigned long *entries; @@ -108,11 +108,11 @@ static void __stack_depot_print(depot_stack_handle_t stack_depot) kfree(buf); } #else /* CONFIG_DRM_DEBUG_MODESET_LOCK */ -static depot_stack_handle_t __stack_depot_save(void) +static depot_stack_handle_t __drm_stack_depot_save(void) { return 0; } -static void __stack_depot_print(depot_stack_handle_t stack_depot) +static void __drm_stack_depot_print(depot_stack_handle_t stack_depot) { } #endif /* CONFIG_DRM_DEBUG_MODESET_LOCK */ @@ -266,7 +266,7 @@ EXPORT_SYMBOL(drm_modeset_acquire_fini); void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx) { if (WARN_ON(ctx->contended)) - __stack_depot_print(ctx->stack_depot); + __drm_stack_depot_print(ctx->stack_depot); while (!list_empty(&ctx->locked)) { struct drm_modeset_lock *lock; @@ -286,7 +286,7 @@ static inline int modeset_lock(struct drm_modeset_lock *lock, int ret; if (WARN_ON(ctx->contended)) - __stack_depot_print(ctx->stack_depot); + __drm_stack_depot_print(ctx->stack_depot); if (ctx->trylock_only) { lockdep_assert_held(&ctx->ww_ctx); @@ -317,7 +317,7 @@ static inline int modeset_lock(struct drm_modeset_lock *lock, ret = 0; } else if (ret == -EDEADLK) { ctx->contended = lock; - ctx->stack_depot = __stack_depot_save(); + ctx->stack_depot = __drm_stack_depot_save(); } return ret; diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 9558e9e1b431..cb685fe2039b 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -81,7 +81,6 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state) struct drm_plane_state *old_plane_state, *new_plane_state; bool plane_disabling = false; int i; - bool fence_cookie = dma_fence_begin_signalling(); drm_atomic_helper_commit_modeset_disables(dev, state); @@ -112,7 +111,6 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state) } drm_atomic_helper_commit_hw_done(state); - dma_fence_end_signalling(fence_cookie); } static const struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = { diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index b3210e9b2cd8..9416bee92141 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -56,7 +56,7 @@ static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf) nouveau_bo_del_io_reserve_lru(bo); prot = vm_get_page_prot(vma->vm_flags); - ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1); + ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT); nouveau_bo_add_io_reserve_lru(bo); if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) return ret; diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 458f92a70887..a36a4f2c76b0 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -61,7 +61,7 @@ static vm_fault_t radeon_gem_fault(struct vm_fault *vmf) goto unlock_resv; ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, - TTM_BO_VM_NUM_PREFAULT, 1); + TTM_BO_VM_NUM_PREFAULT); if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) goto unlock_mclk; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 33680c94127c..08ba083a80d2 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -173,89 +173,6 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_vm_reserve); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -/** - * ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults - * @vmf: Fault data - * @bo: The buffer object - * @page_offset: Page offset from bo start - * @fault_page_size: The size of the fault in pages. - * @pgprot: The page protections. - * Does additional checking whether it's possible to insert a PUD or PMD - * pfn and performs the insertion. - * - * Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if - * a huge fault was not possible, or on insertion error. - */ -static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, - struct ttm_buffer_object *bo, - pgoff_t page_offset, - pgoff_t fault_page_size, - pgprot_t pgprot) -{ - pgoff_t i; - vm_fault_t ret; - unsigned long pfn; - pfn_t pfnt; - struct ttm_tt *ttm = bo->ttm; - bool write = vmf->flags & FAULT_FLAG_WRITE; - - /* Fault should not cross bo boundary. */ - page_offset &= ~(fault_page_size - 1); - if (page_offset + fault_page_size > bo->resource->num_pages) - goto out_fallback; - - if (bo->resource->bus.is_iomem) - pfn = ttm_bo_io_mem_pfn(bo, page_offset); - else - pfn = page_to_pfn(ttm->pages[page_offset]); - - /* pfn must be fault_page_size aligned. */ - if ((pfn & (fault_page_size - 1)) != 0) - goto out_fallback; - - /* Check that memory is contiguous. */ - if (!bo->resource->bus.is_iomem) { - for (i = 1; i < fault_page_size; ++i) { - if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i) - goto out_fallback; - } - } else if (bo->bdev->funcs->io_mem_pfn) { - for (i = 1; i < fault_page_size; ++i) { - if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i) - goto out_fallback; - } - } - - pfnt = __pfn_to_pfn_t(pfn, PFN_DEV); - if (fault_page_size == (HPAGE_PMD_SIZE >> PAGE_SHIFT)) - ret = vmf_insert_pfn_pmd_prot(vmf, pfnt, pgprot, write); -#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD - else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT)) - ret = vmf_insert_pfn_pud_prot(vmf, pfnt, pgprot, write); -#endif - else - WARN_ON_ONCE(ret = VM_FAULT_FALLBACK); - - if (ret != VM_FAULT_NOPAGE) - goto out_fallback; - - return VM_FAULT_NOPAGE; -out_fallback: - count_vm_event(THP_FAULT_FALLBACK); - return VM_FAULT_FALLBACK; -} -#else -static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, - struct ttm_buffer_object *bo, - pgoff_t page_offset, - pgoff_t fault_page_size, - pgprot_t pgprot) -{ - return VM_FAULT_FALLBACK; -} -#endif - /** * ttm_bo_vm_fault_reserved - TTM fault helper * @vmf: The struct vm_fault given as argument to the fault callback @@ -263,7 +180,6 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, * @num_prefault: Maximum number of prefault pages. The caller may want to * specify this based on madvice settings and the size of the GPU object * backed by the memory. - * @fault_page_size: The size of the fault in pages. * * This function inserts one or more page table entries pointing to the * memory backing the buffer object, and then returns a return code @@ -277,8 +193,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, */ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, pgprot_t prot, - pgoff_t num_prefault, - pgoff_t fault_page_size) + pgoff_t num_prefault) { struct vm_area_struct *vma = vmf->vma; struct ttm_buffer_object *bo = vma->vm_private_data; @@ -329,11 +244,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, prot = pgprot_decrypted(prot); } - /* We don't prefault on huge faults. Yet. */ - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) - return ttm_bo_vm_insert_huge(vmf, bo, page_offset, - fault_page_size, prot); - /* * Speculatively prefault a number of pages. Only error on * first page. @@ -429,7 +339,7 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) prot = vma->vm_page_prot; if (drm_dev_enter(ddev, &idx)) { - ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1); + ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT); drm_dev_exit(idx); } else { ret = ttm_bo_vm_dummy_page(vmf, prot); diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index 3750fd216131..930574ad2bca 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -30,7 +30,7 @@ static int udl_get_edid_block(void *data, u8 *buf, unsigned int block, int bval = (i + block * EDID_LENGTH) << 8; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x02, (0x80 | (0x02 << 5)), bval, - 0xA1, read_buff, 2, HZ); + 0xA1, read_buff, 2, 1000); if (ret < 1) { DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); kfree(read_buff); diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 749db18dcfa2..d86e1ad4a972 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -163,10 +163,11 @@ static __poll_t virtio_gpu_poll(struct file *filp, struct drm_file *drm_file = filp->private_data; struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv; struct drm_device *dev = drm_file->minor->dev; + struct virtio_gpu_device *vgdev = dev->dev_private; struct drm_pending_event *e = NULL; __poll_t mask = 0; - if (!vfpriv->ring_idx_mask) + if (!vgdev->has_virgl_3d || !vfpriv || !vfpriv->ring_idx_mask) return drm_poll(filp, wait); poll_wait(filp, &drm_file->event_wait, wait); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index a833751099b5..858aff99a3fe 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1550,10 +1550,6 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, pgoff_t start, pgoff_t end); vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf); vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, - enum page_entry_size pe_size); -#endif /* Transparent hugepage support - vmwgfx_thp.c */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c index e5a9a5cbd01a..922317d1acc8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -477,7 +477,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) else prot = vm_get_page_prot(vma->vm_flags); - ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault, 1); + ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault); if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) return ret; @@ -486,73 +486,3 @@ out_unlock: return ret; } - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, - enum page_entry_size pe_size) -{ - struct vm_area_struct *vma = vmf->vma; - struct ttm_buffer_object *bo = (struct ttm_buffer_object *) - vma->vm_private_data; - struct vmw_buffer_object *vbo = - container_of(bo, struct vmw_buffer_object, base); - pgprot_t prot; - vm_fault_t ret; - pgoff_t fault_page_size; - bool write = vmf->flags & FAULT_FLAG_WRITE; - - switch (pe_size) { - case PE_SIZE_PMD: - fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT; - break; -#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD - case PE_SIZE_PUD: - fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT; - break; -#endif - default: - WARN_ON_ONCE(1); - return VM_FAULT_FALLBACK; - } - - /* Always do write dirty-tracking and COW on PTE level. */ - if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping(vma->vm_flags))) - return VM_FAULT_FALLBACK; - - ret = ttm_bo_vm_reserve(bo, vmf); - if (ret) - return ret; - - if (vbo->dirty) { - pgoff_t allowed_prefault; - unsigned long page_offset; - - page_offset = vmf->pgoff - - drm_vma_node_start(&bo->base.vma_node); - if (page_offset >= bo->resource->num_pages || - vmw_resources_clean(vbo, page_offset, - page_offset + PAGE_SIZE, - &allowed_prefault)) { - ret = VM_FAULT_SIGBUS; - goto out_unlock; - } - - /* - * Write protect, so we get a new fault on write, and can - * split. - */ - prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED); - } else { - prot = vm_get_page_prot(vma->vm_flags); - } - - ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size); - if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) - return ret; - -out_unlock: - dma_resv_unlock(bo->base.resv); - - return ret; -} -#endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index e6b1f98ec99f..0a4c340252ec 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c @@ -61,9 +61,6 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) .fault = vmw_bo_vm_fault, .open = ttm_bo_vm_open, .close = ttm_bo_vm_close, -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - .huge_fault = vmw_bo_vm_huge_fault, -#endif }; struct drm_file *file_priv = filp->private_data; struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); |