diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-05-31 20:19:02 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-05-31 20:19:02 +0400 |
commit | 80e0679469a481ab8baa4fe982205f99004a0686 (patch) | |
tree | d43d923d126156cdb9b4c32f236c40f23e139806 /drivers | |
parent | 9f12600fe425bc28f0ccba034a77783c09c15af4 (diff) | |
parent | 18ee37a485653aa635cfab9a3710e9bcf5fbca01 (diff) | |
download | linux-80e0679469a481ab8baa4fe982205f99004a0686.tar.xz |
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie:
"Mostly quiet now:
i915:
fixing userspace visiblie issues, all stable marked
radeon:
one more pll fix, two crashers, one suspend/resume regression"
* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
drm/radeon: Resume fbcon last
drm/radeon: only allocate necessary size for vm bo list
drm/radeon: don't allow RADEON_GEM_DOMAIN_CPU for command submission
drm/radeon: avoid crash if VM command submission isn't available
drm/radeon: lower the ref * post PLL maximum once more
drm/i915: Prevent negative relocation deltas from wrapping
drm/i915: Only copy back the modified fields to userspace from execbuffer
drm/i915: Fix dynamic allocation of physical handles
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 30 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 365 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_evict.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 130 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_overlay.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_cs.c | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_device.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_display.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_vm.c | 6 |
12 files changed, 298 insertions, 304 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 96177eec0a0e..eedb023af27d 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1833,7 +1833,6 @@ int i915_driver_unload(struct drm_device *dev) flush_workqueue(dev_priv->wq); mutex_lock(&dev->struct_mutex); - i915_gem_free_all_phys_object(dev); i915_gem_cleanup_ringbuffer(dev); i915_gem_context_fini(dev); WARN_ON(dev_priv->mm.aliasing_ppgtt); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 108e1ec2fa4b..388c028e223c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -242,18 +242,6 @@ struct intel_ddi_plls { #define WATCH_LISTS 0 #define WATCH_GTT 0 -#define I915_GEM_PHYS_CURSOR_0 1 -#define I915_GEM_PHYS_CURSOR_1 2 -#define I915_GEM_PHYS_OVERLAY_REGS 3 -#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) - -struct drm_i915_gem_phys_object { - int id; - struct page **page_list; - drm_dma_handle_t *handle; - struct drm_i915_gem_object *cur_obj; -}; - struct opregion_header; struct opregion_acpi; struct opregion_swsci; @@ -1187,9 +1175,6 @@ struct i915_gem_mm { /** Bit 6 swizzling required for Y tiling */ uint32_t bit_6_swizzle_y; - /* storage for physical objects */ - struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; - /* accounting, useful for userland debugging */ spinlock_t object_stat_lock; size_t object_memory; @@ -1769,7 +1754,7 @@ struct drm_i915_gem_object { struct drm_file *pin_filp; /** for phy allocated objects */ - struct drm_i915_gem_phys_object *phys_obj; + drm_dma_handle_t *phys_handle; }; #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) @@ -2204,10 +2189,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma); #define PIN_MAPPABLE 0x1 #define PIN_NONBLOCK 0x2 #define PIN_GLOBAL 0x4 +#define PIN_OFFSET_BIAS 0x8 +#define PIN_OFFSET_MASK (~4095) int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm, uint32_t alignment, - unsigned flags); + uint64_t flags); int __must_check i915_vma_unbind(struct i915_vma *vma); int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); @@ -2334,13 +2321,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, struct intel_ring_buffer *pipelined); void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); -int i915_gem_attach_phys_object(struct drm_device *dev, - struct drm_i915_gem_object *obj, - int id, +int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); -void i915_gem_detach_phys_object(struct drm_device *dev, - struct drm_i915_gem_object *obj); -void i915_gem_free_all_phys_object(struct drm_device *dev); int i915_gem_open(struct drm_device *dev, struct drm_file *file); void i915_gem_release(struct drm_device *dev, struct drm_file *file); @@ -2465,6 +2447,8 @@ int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment, unsigned cache_level, + unsigned long start, + unsigned long end, unsigned flags); int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); int i915_gem_evict_everything(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2871ce75f438..3326770c9ed2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -43,10 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o static __must_check int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, bool readonly); -static int i915_gem_phys_pwrite(struct drm_device *dev, - struct drm_i915_gem_object *obj, - struct drm_i915_gem_pwrite *args, - struct drm_file *file); static void i915_gem_write_fence(struct drm_device *dev, int reg, struct drm_i915_gem_object *obj); @@ -209,6 +205,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, return 0; } +static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj) +{ + drm_dma_handle_t *phys = obj->phys_handle; + + if (!phys) + return; + + if (obj->madv == I915_MADV_WILLNEED) { + struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; + char *vaddr = phys->vaddr; + int i; + + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { + struct page *page = shmem_read_mapping_page(mapping, i); + if (!IS_ERR(page)) { + char *dst = kmap_atomic(page); + memcpy(dst, vaddr, PAGE_SIZE); + drm_clflush_virt_range(dst, PAGE_SIZE); + kunmap_atomic(dst); + + set_page_dirty(page); + mark_page_accessed(page); + page_cache_release(page); + } + vaddr += PAGE_SIZE; + } + i915_gem_chipset_flush(obj->base.dev); + } + +#ifdef CONFIG_X86 + set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); +#endif + drm_pci_free(obj->base.dev, phys); + obj->phys_handle = NULL; +} + +int +i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, + int align) +{ + drm_dma_handle_t *phys; + struct address_space *mapping; + char *vaddr; + int i; + + if (obj->phys_handle) { + if ((unsigned long)obj->phys_handle->vaddr & (align -1)) + return -EBUSY; + + return 0; + } + + if (obj->madv != I915_MADV_WILLNEED) + return -EFAULT; + + if (obj->base.filp == NULL) + return -EINVAL; + + /* create a new object */ + phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); + if (!phys) + return -ENOMEM; + + vaddr = phys->vaddr; +#ifdef CONFIG_X86 + set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE); +#endif + mapping = file_inode(obj->base.filp)->i_mapping; + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { + struct page *page; + char *src; + + page = shmem_read_mapping_page(mapping, i); + if (IS_ERR(page)) { +#ifdef CONFIG_X86 + set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); +#endif + drm_pci_free(obj->base.dev, phys); + return PTR_ERR(page); + } + + src = kmap_atomic(page); + memcpy(vaddr, src, PAGE_SIZE); + kunmap_atomic(src); + + mark_page_accessed(page); + page_cache_release(page); + + vaddr += PAGE_SIZE; + } + + obj->phys_handle = phys; + return 0; +} + +static int +i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) +{ + struct drm_device *dev = obj->base.dev; + void *vaddr = obj->phys_handle->vaddr + args->offset; + char __user *user_data = to_user_ptr(args->data_ptr); + + if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { + unsigned long unwritten; + + /* The physical object once assigned is fixed for the lifetime + * of the obj, so we can safely drop the lock and continue + * to access vaddr. + */ + mutex_unlock(&dev->struct_mutex); + unwritten = copy_from_user(vaddr, user_data, args->size); + mutex_lock(&dev->struct_mutex); + if (unwritten) + return -EFAULT; + } + + i915_gem_chipset_flush(dev); + return 0; +} + void *i915_gem_object_alloc(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -921,8 +1039,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * pread/pwrite currently are reading and writing from the CPU * perspective, requiring manual detiling by the client. */ - if (obj->phys_obj) { - ret = i915_gem_phys_pwrite(dev, obj, args, file); + if (obj->phys_handle) { + ret = i915_gem_phys_pwrite(obj, args, file); goto out; } @@ -3208,12 +3326,14 @@ static struct i915_vma * i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, struct i915_address_space *vm, unsigned alignment, - unsigned flags) + uint64_t flags) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 size, fence_size, fence_alignment, unfenced_alignment; - size_t gtt_max = + unsigned long start = + flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; + unsigned long end = flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; struct i915_vma *vma; int ret; @@ -3242,11 +3362,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, /* If the object is bigger than the entire aperture, reject it early * before evicting everything in a vain attempt to find space. */ - if (obj->base.size > gtt_max) { - DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", + if (obj->base.size > end) { + DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n", obj->base.size, flags & PIN_MAPPABLE ? "mappable" : "total", - gtt_max); + end); return ERR_PTR(-E2BIG); } @@ -3263,12 +3383,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, search_free: ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, size, alignment, - obj->cache_level, 0, gtt_max, + obj->cache_level, + start, end, DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT); if (ret) { ret = i915_gem_evict_something(dev, vm, size, alignment, - obj->cache_level, flags); + obj->cache_level, + start, end, + flags); if (ret == 0) goto search_free; @@ -3828,11 +3951,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) return ret; } +static bool +i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) +{ + struct drm_i915_gem_object *obj = vma->obj; + + if (alignment && + vma->node.start & (alignment - 1)) + return true; + + if (flags & PIN_MAPPABLE && !obj->map_and_fenceable) + return true; + + if (flags & PIN_OFFSET_BIAS && + vma->node.start < (flags & PIN_OFFSET_MASK)) + return true; + + return false; +} + int i915_gem_object_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm, uint32_t alignment, - unsigned flags) + uint64_t flags) { struct i915_vma *vma; int ret; @@ -3845,15 +3987,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) return -EBUSY; - if ((alignment && - vma->node.start & (alignment - 1)) || - (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) { + if (i915_vma_misplaced(vma, alignment, flags)) { WARN(vma->pin_count, "bo is already pinned with incorrect alignment:" " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," " obj->map_and_fenceable=%d\n", i915_gem_obj_offset(obj, vm), alignment, - flags & PIN_MAPPABLE, + !!(flags & PIN_MAPPABLE), obj->map_and_fenceable); ret = i915_vma_unbind(vma); if (ret) @@ -4163,9 +4303,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) trace_i915_gem_object_destroy(obj); - if (obj->phys_obj) - i915_gem_detach_phys_object(dev, obj); - list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { int ret; @@ -4183,6 +4320,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) } } + i915_gem_object_detach_phys(obj); + /* Stolen objects don't hold a ref, but do hold pin count. Fix that up * before progressing. */ if (obj->stolen) @@ -4646,190 +4785,6 @@ i915_gem_load(struct drm_device *dev) register_shrinker(&dev_priv->mm.inactive_shrinker); } -/* - * Create a physically contiguous memory object for this object - * e.g. for cursor + overlay regs - */ -static int i915_gem_init_phys_object(struct drm_device *dev, - int id, int size, int align) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_phys_object *phys_obj; - int ret; - - if (dev_priv->mm.phys_objs[id - 1] || !size) - return 0; - - phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL); - if (!phys_obj) - return -ENOMEM; - - phys_obj->id = id; - - phys_obj->handle = drm_pci_alloc(dev, size, align); - if (!phys_obj->handle) { - ret = -ENOMEM; - goto kfree_obj; - } -#ifdef CONFIG_X86 - set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); -#endif - - dev_priv->mm.phys_objs[id - 1] = phys_obj; - - return 0; -kfree_obj: - kfree(phys_obj); - return ret; -} - -static void i915_gem_free_phys_object(struct drm_device *dev, int id) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_phys_object *phys_obj; - - if (!dev_priv->mm.phys_objs[id - 1]) - return; - - phys_obj = dev_priv->mm.phys_objs[id - 1]; - if (phys_obj->cur_obj) { - i915_gem_detach_phys_object(dev, phys_obj->cur_obj); - } - -#ifdef CONFIG_X86 - set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); -#endif - drm_pci_free(dev, phys_obj->handle); - kfree(phys_obj); - dev_priv->mm.phys_objs[id - 1] = NULL; -} - -void i915_gem_free_all_phys_object(struct drm_device *dev) -{ - int i; - - for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++) - i915_gem_free_phys_object(dev, i); -} - -void i915_gem_detach_phys_object(struct drm_device *dev, - struct drm_i915_gem_object *obj) -{ - struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; - char *vaddr; - int i; - int page_count; - - if (!obj->phys_obj) - return; - vaddr = obj->phys_obj->handle->vaddr; - - page_count = obj->base.size / PAGE_SIZE; - for (i = 0; i < page_count; i++) { - struct page *page = shmem_read_mapping_page(mapping, i); - if (!IS_ERR(page)) { - char *dst = kmap_atomic(page); - memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); - kunmap_atomic(dst); - - drm_clflush_pages(&page, 1); - - set_page_dirty(page); - mark_page_accessed(page); - page_cache_release(page); - } - } - i915_gem_chipset_flush(dev); - - obj->phys_obj->cur_obj = NULL; - obj->phys_obj = NULL; -} - -int -i915_gem_attach_phys_object(struct drm_device *dev, - struct drm_i915_gem_object *obj, - int id, - int align) -{ - struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; - struct drm_i915_private *dev_priv = dev->dev_private; - int ret = 0; - int page_count; - int i; - - if (id > I915_MAX_PHYS_OBJECT) - return -EINVAL; - - if (obj->phys_obj) { - if (obj->phys_obj->id == id) - return 0; - i915_gem_detach_phys_object(dev, obj); - } - - /* create a new object */ - if (!dev_priv->mm.phys_objs[id - 1]) { - ret = i915_gem_init_phys_object(dev, id, - obj->base.size, align); - if (ret) { - DRM_ERROR("failed to init phys object %d size: %zu\n", - id, obj->base.size); - return ret; - } - } - - /* bind to the object */ - obj->phys_obj = dev_priv->mm.phys_objs[id - 1]; - obj->phys_obj->cur_obj = obj; - - page_count = obj->base.size / PAGE_SIZE; - - for (i = 0; i < page_count; i++) { - struct page *page; - char *dst, *src; - - page = shmem_read_mapping_page(mapping, i); - if (IS_ERR(page)) - return PTR_ERR(page); - - src = kmap_atomic(page); - dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE); - memcpy(dst, src, PAGE_SIZE); - kunmap_atomic(src); - - mark_page_accessed(page); - page_cache_release(page); - } - - return 0; -} - -static int -i915_gem_phys_pwrite(struct drm_device *dev, - struct drm_i915_gem_object *obj, - struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) -{ - void *vaddr = obj->phys_obj->handle->vaddr + args->offset; - char __user *user_data = to_user_ptr(args->data_ptr); - - if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { - unsigned long unwritten; - - /* The physical object once assigned is fixed for the lifetime - * of the obj, so we can safely drop the lock and continue - * to access vaddr. - */ - mutex_unlock(&dev->struct_mutex); - unwritten = copy_from_user(vaddr, user_data, args->size); - mutex_lock(&dev->struct_mutex); - if (unwritten) - return -EFAULT; - } - - i915_gem_chipset_flush(dev); - return 0; -} - void i915_gem_release(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 75fca63dc8c1..bbf4b12d842e 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -68,9 +68,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind) int i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, int min_size, unsigned alignment, unsigned cache_level, + unsigned long start, unsigned long end, unsigned flags) { - struct drm_i915_private *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; struct i915_vma *vma; int ret = 0; @@ -102,11 +102,10 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, */ INIT_LIST_HEAD(&unwind_list); - if (flags & PIN_MAPPABLE) { - BUG_ON(!i915_is_ggtt(vm)); + if (start != 0 || end != vm->total) { drm_mm_init_scan_with_range(&vm->mm, min_size, - alignment, cache_level, 0, - dev_priv->gtt.mappable_end); + alignment, cache_level, + start, end); } else drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 2c9d9cbaf653..20fef6c50267 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -35,6 +35,9 @@ #define __EXEC_OBJECT_HAS_PIN (1<<31) #define __EXEC_OBJECT_HAS_FENCE (1<<30) +#define __EXEC_OBJECT_NEEDS_BIAS (1<<28) + +#define BATCH_OFFSET_BIAS (256*1024) struct eb_vmas { struct list_head vmas; @@ -545,7 +548,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; bool need_fence; - unsigned flags; + uint64_t flags; int ret; flags = 0; @@ -559,6 +562,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, if (entry->flags & EXEC_OBJECT_NEEDS_GTT) flags |= PIN_GLOBAL; + if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) + flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); if (ret) @@ -592,6 +597,36 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, return 0; } +static bool +eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access) +{ + struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; + struct drm_i915_gem_object *obj = vma->obj; + bool need_fence, need_mappable; + + need_fence = + has_fenced_gpu_access && + entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode != I915_TILING_NONE; + need_mappable = need_fence || need_reloc_mappable(vma); + + WARN_ON((need_mappable || need_fence) && + !i915_is_ggtt(vma->vm)); + + if (entry->alignment && + vma->node.start & (entry->alignment - 1)) + return true; + + if (need_mappable && !obj->map_and_fenceable) + return true; + + if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS && + vma->node.start < BATCH_OFFSET_BIAS) + return true; + + return false; +} + static int i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, struct list_head *vmas, @@ -653,26 +688,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, /* Unbind any ill-fitting objects or pin. */ list_for_each_entry(vma, vmas, exec_list) { - struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; - bool need_fence, need_mappable; - - obj = vma->obj; - if (!drm_mm_node_allocated(&vma->node)) continue; - need_fence = - has_fenced_gpu_access && - entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj->tiling_mode != I915_TILING_NONE; - need_mappable = need_fence || need_reloc_mappable(vma); - - WARN_ON((need_mappable || need_fence) && - !i915_is_ggtt(vma->vm)); - - if ((entry->alignment && - vma->node.start & (entry->alignment - 1)) || - (need_mappable && !obj->map_and_fenceable)) + if (eb_vma_misplaced(vma, has_fenced_gpu_access)) ret = i915_vma_unbind(vma); else ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); @@ -773,9 +792,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, * relocations were valid. */ for (j = 0; j < exec[i].relocation_count; j++) { - if (copy_to_user(&user_relocs[j].presumed_offset, - &invalid_offset, - sizeof(invalid_offset))) { + if (__copy_to_user(&user_relocs[j].presumed_offset, + &invalid_offset, + sizeof(invalid_offset))) { ret = -EFAULT; mutex_lock(&dev->struct_mutex); goto err; @@ -999,6 +1018,25 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, return 0; } +static struct drm_i915_gem_object * +eb_get_batch(struct eb_vmas *eb) +{ + struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list); + + /* + * SNA is doing fancy tricks with compressing batch buffers, which leads + * to negative relocation deltas. Usually that works out ok since the + * relocate address is still positive, except when the batch is placed + * very low in the GTT. Ensure this doesn't happen. + * + * Note that actual hangs have only been observed on gen7, but for + * paranoia do it everywhere. + */ + vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; + + return vma->obj; +} + static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_file *file, @@ -1153,7 +1191,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; /* take note of the batch buffer before we might reorder the lists */ - batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj; + batch_obj = eb_get_batch(eb); /* Move the objects en-masse into the GTT, evicting if necessary. */ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; @@ -1355,18 +1393,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); if (!ret) { + struct drm_i915_gem_exec_object __user *user_exec_list = + to_user_ptr(args->buffers_ptr); + /* Copy the new buffer offsets back to the user's exec list. */ - for (i = 0; i < args->buffer_count; i++) - exec_list[i].offset = exec2_list[i].offset; - /* ... and back out to userspace */ - ret = copy_to_user(to_user_ptr(args->buffers_ptr), - exec_list, - sizeof(*exec_list) * args->buffer_count); - if (ret) { - ret = -EFAULT; - DRM_DEBUG("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); + for (i = 0; i < args->buffer_count; i++) { + ret = __copy_to_user(&user_exec_list[i].offset, + &exec2_list[i].offset, + sizeof(user_exec_list[i].offset)); + if (ret) { + ret = -EFAULT; + DRM_DEBUG("failed to copy %d exec entries " + "back to user (%d)\n", + args->buffer_count, ret); + break; + } } } @@ -1412,14 +1453,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ - ret = copy_to_user(to_user_ptr(args->buffers_ptr), - exec2_list, - sizeof(*exec2_list) * args->buffer_count); - if (ret) { - ret = -EFAULT; - DRM_DEBUG("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); + struct drm_i915_gem_exec_object2 *user_exec_list = + to_user_ptr(args->buffers_ptr); + int i; + + for (i = 0; i < args->buffer_count; i++) { + ret = __copy_to_user(&user_exec_list[i].offset, + &exec2_list[i].offset, + sizeof(user_exec_list[i].offset)); + if (ret) { + ret = -EFAULT; + DRM_DEBUG("failed to copy %d exec entries " + "back to user\n", + args->buffer_count); + break; + } } } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 154b0f8bb88d..5deb22864c52 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1089,7 +1089,9 @@ alloc: if (ret == -ENOSPC && !retried) { ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, GEN6_PD_SIZE, GEN6_PD_ALIGN, - I915_CACHE_NONE, 0); + I915_CACHE_NONE, + 0, dev_priv->gtt.base.total, + 0); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 48aa516a1ac0..5b60e25baa32 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7825,14 +7825,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, addr = i915_gem_obj_ggtt_offset(obj); } else { int align = IS_I830(dev) ? 16 * 1024 : 256; - ret = i915_gem_attach_phys_object(dev, obj, - (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, - align); + ret = i915_gem_object_attach_phys(obj, align); if (ret) { DRM_DEBUG_KMS("failed to attach phys object\n"); goto fail_locked; } - addr = obj->phys_obj->handle->busaddr; + addr = obj->phys_handle->busaddr; } if (IS_GEN2(dev)) @@ -7840,10 +7838,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, finish: if (intel_crtc->cursor_bo) { - if (INTEL_INFO(dev)->cursor_needs_physical) { - if (intel_crtc->cursor_bo != obj) - i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); - } else + if (!INTEL_INFO(dev)->cursor_needs_physical) i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); drm_gem_object_unreference(&intel_crtc->cursor_bo->base); } diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index d8adc9104dca..129db0c7d835 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) struct overlay_registers __iomem *regs; if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) - regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; + regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; else regs = io_mapping_map_wc(dev_priv->gtt.mappable, i915_gem_obj_ggtt_offset(overlay->reg_bo)); @@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev) overlay->reg_bo = reg_bo; if (OVERLAY_NEEDS_PHYSICAL(dev)) { - ret = i915_gem_attach_phys_object(dev, reg_bo, - I915_GEM_PHYS_OVERLAY_REGS, - PAGE_SIZE); + ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE); if (ret) { DRM_ERROR("failed to attach phys overlay regs\n"); goto out_free_bo; } - overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; + overlay->flip_addr = reg_bo->phys_handle->busaddr; } else { ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE); if (ret) { @@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) /* Cast to make sparse happy, but it's wc memory anyway, so * equivalent to the wc io mapping on X86. */ regs = (struct overlay_registers __iomem *) - overlay->reg_bo->phys_obj->handle->vaddr; + overlay->reg_bo->phys_handle->vaddr; else regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, i915_gem_obj_ggtt_offset(overlay->reg_bo)); @@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev) error->dovsta = I915_READ(DOVSTA); error->isr = I915_READ(ISR); if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) - error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; + error->base = (__force long)overlay->reg_bo->phys_handle->vaddr; else error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 2b6e0ebcc13a..41ecf8a60611 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -152,6 +152,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) uint32_t domain = r->write_domain ? r->write_domain : r->read_domains; + if (domain & RADEON_GEM_DOMAIN_CPU) { + DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid " + "for command submission\n"); + return -EINVAL; + } + p->relocs[i].domain = domain; if (domain == RADEON_GEM_DOMAIN_VRAM) domain |= RADEON_GEM_DOMAIN_GTT; @@ -342,10 +348,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) return -EINVAL; /* we only support VM on some SI+ rings */ - if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) && - ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { - DRM_ERROR("Ring %d requires VM!\n", p->ring); - return -EINVAL; + if ((p->cs_flags & RADEON_CS_USE_VM) == 0) { + if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) { + DRM_ERROR("Ring %d requires VM!\n", p->ring); + return -EINVAL; + } + } else { + if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) { + DRM_ERROR("VM not supported on ring %d!\n", + p->ring); + return -EINVAL; + } } } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0e770bbf7e29..14671406212f 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1533,11 +1533,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) radeon_restore_bios_scratch_regs(rdev); - if (fbcon) { - radeon_fbdev_set_suspend(rdev, 0); - console_unlock(); - } - /* init dig PHYs, disp eng pll */ if (rdev->is_atom_bios) { radeon_atom_encoder_init(rdev); @@ -1562,6 +1557,12 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) } drm_kms_helper_poll_enable(dev); + + if (fbcon) { + radeon_fbdev_set_suspend(rdev, 0); + console_unlock(); + } + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index f00dbbf4d806..356b733caafe 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -862,7 +862,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, unsigned *fb_div, unsigned *ref_div) { /* limit reference * post divider to a maximum */ - ref_div_max = min(128 / post_div, ref_div_max); + ref_div_max = max(min(100 / post_div, ref_div_max), 1u); /* get matching reference and feedback divider */ *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index d9ab99f47612..1f426696de36 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -130,10 +130,10 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, struct list_head *head) { struct radeon_cs_reloc *list; - unsigned i, idx, size; + unsigned i, idx; - size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc); - list = kmalloc(size, GFP_KERNEL); + list = kmalloc_array(vm->max_pde_used + 1, + sizeof(struct radeon_cs_reloc), GFP_KERNEL); if (!list) return NULL; |