diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_pages.c')
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_pages.c | 30 |
1 files changed, 28 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index e8a083743e09..d6eeefab3d01 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -254,9 +254,35 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj, if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC) return NULL; + if (GEM_WARN_ON(type == I915_MAP_WC && + !static_cpu_has(X86_FEATURE_PAT))) + return NULL; + /* A single page can always be kmapped */ - if (n_pte == 1 && type == I915_MAP_WB) - return kmap(sg_page(sgt->sgl)); + if (n_pte == 1 && type == I915_MAP_WB) { + struct page *page = sg_page(sgt->sgl); + + /* + * On 32b, highmem using a finite set of indirect PTE (i.e. + * vmap) to provide virtual mappings of the high pages. + * As these are finite, map_new_virtual() must wait for some + * other kmap() to finish when it runs out. If we map a large + * number of objects, there is no method for it to tell us + * to release the mappings, and we deadlock. + * + * However, if we make an explicit vmap of the page, that + * uses a larger vmalloc arena, and also has the ability + * to tell us to release unwanted mappings. Most importantly, + * it will fail and propagate an error instead of waiting + * forever. + * + * So if the page is beyond the 32b boundary, make an explicit + * vmap. On 64b, this check will be optimised away as we can + * directly kmap any page on the system. + */ + if (!PageHighMem(page)) + return kmap(page); + } mem = stack; if (n_pte > ARRAY_SIZE(stack)) { |