diff options
Diffstat (limited to 'drivers/gpu/drm/drm_gem_shmem_helper.c')
-rw-r--r-- | drivers/gpu/drm/drm_gem_shmem_helper.c | 23 |
1 files changed, 21 insertions, 2 deletions
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index b25736baba49..7b9f69f21f1e 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -10,6 +10,10 @@ #include <linux/slab.h> #include <linux/vmalloc.h> +#ifdef CONFIG_X86 +#include <asm/set_memory.h> +#endif + #include <drm/drm.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> @@ -164,6 +168,16 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) return PTR_ERR(pages); } + /* + * TODO: Allocating WC pages which are correctly flushed is only + * supported on x86. Ideal solution would be a GFP_WC flag, which also + * ttm_pool.c could use. + */ +#ifdef CONFIG_X86 + if (shmem->map_wc) + set_pages_array_wc(pages, obj->size >> PAGE_SHIFT); +#endif + shmem->pages = pages; return 0; @@ -205,6 +219,11 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) if (--shmem->pages_use_count > 0) return; +#ifdef CONFIG_X86 + if (shmem->map_wc) + set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT); +#endif + drm_gem_put_pages(obj, shmem->pages, shmem->pages_mark_dirty_on_put, shmem->pages_mark_accessed_on_put); @@ -544,7 +563,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) } else { page = shmem->pages[page_offset]; - ret = vmf_insert_page(vma, vmf->address, page); + ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); } mutex_unlock(&shmem->pages_lock); @@ -614,7 +633,7 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) return ret; } - vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; + vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); if (shmem->map_wc) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |