diff options
| author | Boris Brezillon <boris.brezillon@collabora.com> | 2026-03-20 18:19:13 +0300 |
|---|---|---|
| committer | Boris Brezillon <boris.brezillon@collabora.com> | 2026-04-03 11:11:04 +0300 |
| commit | cb2a2a5b37adb34ec46d39346b1c71e255827116 (patch) | |
| tree | 95136501b1dfbde1cd08df1074a56c1a1290ff01 | |
| parent | c636ae346d196b71e972188f91b3260ae522ade6 (diff) | |
| download | linux-cb2a2a5b37adb34ec46d39346b1c71e255827116.tar.xz | |
drm/shmem_helper: Make sure PMD entries get the writeable upgrade
Unlike PTEs which are automatically upgraded to writeable entries if
.pfn_mkwrite() returns 0, the PMD upgrades go through .huge_fault(),
and we currently pretend to have handled the make-writeable request
even though we only ever map things read-only. Make sure we pass the
proper "write" info to vmf_insert_pfn_pmd() in that case.
This also means we have to record the mkwrite event in the .huge_fault()
path now. Move the dirty tracking logic to a
drm_gem_shmem_record_mkwrite() helper so it can also be called from
drm_gem_shmem_pfn_mkwrite().
Note that this wasn't a problem before commit 28e3918179aa
("drm/gem-shmem: Track folio accessed/dirty status in mmap"), because
the pgprot were not lowered to read-only before this commit (see the
vma_wants_writenotify() in vma_set_page_prot()).
Fixes: 28e3918179aa ("drm/gem-shmem: Track folio accessed/dirty status in mmap")
Cc: Biju Das <biju.das.jz@bp.renesas.com>
Cc: Thomas Zimmermann <tzimmermann@suse.de>
Cc: Tommaso Merciai <tommaso.merciai.xr@bp.renesas.com>
Reviewed-by: Loïc Molinari <loic.molinari@collabora.com>
Tested-by: Biju Das <biju.das.jz@bp.renesas.com>
Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
Tested-by: Tommaso Merciai <tommaso.merciai.xr@bp.renesas.com>
Link: https://patch.msgid.link/20260320151914.586945-1-boris.brezillon@collabora.com
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
| -rw-r--r-- | drivers/gpu/drm/drm_gem_shmem_helper.c | 46 |
1 files changed, 32 insertions, 14 deletions
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 2062ca607833..545933c7f712 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -554,6 +554,21 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, } EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create); +static void drm_gem_shmem_record_mkwrite(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct drm_gem_object *obj = vma->vm_private_data; + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + loff_t num_pages = obj->size >> PAGE_SHIFT; + pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */ + + if (drm_WARN_ON(obj->dev, !shmem->pages || page_offset >= num_pages)) + return; + + file_update_time(vma->vm_file); + folio_mark_dirty(page_folio(shmem->pages[page_offset])); +} + static vm_fault_t try_insert_pfn(struct vm_fault *vmf, unsigned int order, unsigned long pfn) { @@ -566,8 +581,23 @@ static vm_fault_t try_insert_pfn(struct vm_fault *vmf, unsigned int order, if (aligned && folio_test_pmd_mappable(page_folio(pfn_to_page(pfn)))) { + vm_fault_t ret; + pfn &= PMD_MASK >> PAGE_SHIFT; - return vmf_insert_pfn_pmd(vmf, pfn, false); + + /* Unlike PTEs which are automatically upgraded to + * writeable entries, the PMD upgrades go through + * .huge_fault(). Make sure we pass the "write" info + * along in that case. + * This also means we have to record the write fault + * here, instead of in .pfn_mkwrite(). + */ + ret = vmf_insert_pfn_pmd(vmf, pfn, + vmf->flags & FAULT_FLAG_WRITE); + if (ret == VM_FAULT_NOPAGE && (vmf->flags & FAULT_FLAG_WRITE)) + drm_gem_shmem_record_mkwrite(vmf); + + return ret; } #endif } @@ -655,19 +685,7 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) static vm_fault_t drm_gem_shmem_pfn_mkwrite(struct vm_fault *vmf) { - struct vm_area_struct *vma = vmf->vma; - struct drm_gem_object *obj = vma->vm_private_data; - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - loff_t num_pages = obj->size >> PAGE_SHIFT; - pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */ - - if (drm_WARN_ON(obj->dev, !shmem->pages || page_offset >= num_pages)) - return VM_FAULT_SIGBUS; - - file_update_time(vma->vm_file); - - folio_mark_dirty(page_folio(shmem->pages[page_offset])); - + drm_gem_shmem_record_mkwrite(vmf); return 0; } |
