summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c42
5 files changed, 62 insertions, 26 deletions
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 91b68dca0641..1b2f41c3f191 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -876,7 +876,9 @@ int intel_opregion_setup(struct drm_device *dev)
return -ENOTSUPP;
}
+#ifdef CONFIG_ACPI
INIT_WORK(&opregion->asle_work, asle_work);
+#endif
base = acpi_os_ioremap(asls, OPREGION_SIZE);
if (!base)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index c03514b93f9c..ac617f3ecd0c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -102,6 +102,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int retval = VM_FAULT_NOPAGE;
struct ttm_mem_type_manager *man =
&bdev->man[bo->mem.mem_type];
+ struct vm_area_struct cvma;
/*
* Work around locking order reversal in fault / nopfn
@@ -164,26 +165,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
/*
- * Strictly, we're not allowed to modify vma->vm_page_prot here,
- * since the mmap_sem is only held in read mode. However, we
- * modify only the caching bits of vma->vm_page_prot and
- * consider those bits protected by
- * the bo->mutex, as we should be the only writers.
- * There shouldn't really be any readers of these bits except
- * within vm_insert_mixed()? fork?
- *
- * TODO: Add a list of vmas to the bo, and change the
- * vma->vm_page_prot when the object changes caching policy, with
- * the correct locks held.
+ * Make a local vma copy to modify the page_prot member
+ * and vm_flags if necessary. The vma parameter is protected
+ * by mmap_sem in write mode.
*/
+ cvma = *vma;
+ cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
+
if (bo->mem.bus.is_iomem) {
- vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
- vma->vm_page_prot);
+ cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
+ cvma.vm_page_prot);
} else {
ttm = bo->ttm;
- vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
- vm_get_page_prot(vma->vm_flags) :
- ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
+ if (!(bo->mem.placement & TTM_PL_FLAG_CACHED))
+ cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
+ cvma.vm_page_prot);
/* Allocate all page at once, most common usage */
if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
@@ -210,7 +206,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
pfn = page_to_pfn(page);
}
- ret = vm_insert_mixed(vma, address, pfn);
+ ret = vm_insert_mixed(&cvma, address, pfn);
/*
* Somebody beat us to this PTE or prefaulting to
* an already populated PTE, or prefaulting error.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 814665b7a117..20d5485eaf98 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -453,12 +453,13 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
*/
static int vmw_dma_select_mode(struct vmw_private *dev_priv)
{
- const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
static const char *names[vmw_dma_map_max] = {
[vmw_dma_phys] = "Using physical TTM page addresses.",
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
[vmw_dma_map_populate] = "Keeping DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
+#ifdef CONFIG_X86
+ const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_enabled) {
@@ -500,6 +501,10 @@ out_fixup:
return -EINVAL;
#endif
+#else /* CONFIG_X86 */
+ dev_priv->map_mode = vmw_dma_map_populate;
+#endif /* CONFIG_X86 */
+
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 6d0952366f91..6ef0b035becb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -145,7 +145,9 @@ static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
}
page_virtual = kmap_atomic(page);
- desc_dma = page_virtual[desc_per_page].ppn << PAGE_SHIFT;
+ desc_dma = (dma_addr_t)
+ le32_to_cpu(page_virtual[desc_per_page].ppn) <<
+ PAGE_SHIFT;
kunmap_atomic(page_virtual);
__free_page(page);
@@ -217,7 +219,8 @@ static int vmw_gmr_build_descriptors(struct device *dev,
desc_dma = 0;
list_for_each_entry_reverse(page, desc_pages, lru) {
page_virtual = kmap_atomic(page);
- page_virtual[desc_per_page].ppn = desc_dma >> PAGE_SHIFT;
+ page_virtual[desc_per_page].ppn = cpu_to_le32
+ (desc_dma >> PAGE_SHIFT);
kunmap_atomic(page_virtual);
desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
DMA_TO_DEVICE);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 37fb4befec82..252501a54def 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -32,6 +32,8 @@
#include <drm/drmP.h>
#include "vmwgfx_resource_priv.h"
+#define VMW_RES_EVICT_ERR_COUNT 10
+
struct vmw_user_dma_buffer {
struct ttm_base_object base;
struct vmw_dma_buffer dma;
@@ -1091,8 +1093,9 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
* to a backup buffer.
*
* @res: The resource to evict.
+ * @interruptible: Whether to wait interruptible.
*/
-int vmw_resource_do_evict(struct vmw_resource *res)
+int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
{
struct ttm_validate_buffer val_buf;
const struct vmw_res_func *func = res->func;
@@ -1102,7 +1105,8 @@ int vmw_resource_do_evict(struct vmw_resource *res)
BUG_ON(!func->may_evict);
val_buf.bo = NULL;
- ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf);
+ ret = vmw_resource_check_buffer(res, &ticket, interruptible,
+ &val_buf);
if (unlikely(ret != 0))
return ret;
@@ -1141,6 +1145,7 @@ int vmw_resource_validate(struct vmw_resource *res)
struct vmw_private *dev_priv = res->dev_priv;
struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
struct ttm_validate_buffer val_buf;
+ unsigned err_count = 0;
if (likely(!res->func->may_evict))
return 0;
@@ -1155,7 +1160,7 @@ int vmw_resource_validate(struct vmw_resource *res)
write_lock(&dev_priv->resource_lock);
if (list_empty(lru_list) || !res->func->may_evict) {
- DRM_ERROR("Out of device device id entries "
+ DRM_ERROR("Out of device device resources "
"for %s.\n", res->func->type_name);
ret = -EBUSY;
write_unlock(&dev_priv->resource_lock);
@@ -1168,7 +1173,19 @@ int vmw_resource_validate(struct vmw_resource *res)
list_del_init(&evict_res->lru_head);
write_unlock(&dev_priv->resource_lock);
- vmw_resource_do_evict(evict_res);
+
+ ret = vmw_resource_do_evict(evict_res, true);
+ if (unlikely(ret != 0)) {
+ write_lock(&dev_priv->resource_lock);
+ list_add_tail(&evict_res->lru_head, lru_list);
+ write_unlock(&dev_priv->resource_lock);
+ if (ret == -ERESTARTSYS ||
+ ++err_count > VMW_RES_EVICT_ERR_COUNT) {
+ vmw_resource_unreference(&evict_res);
+ goto out_no_validate;
+ }
+ }
+
vmw_resource_unreference(&evict_res);
} while (1);
@@ -1253,13 +1270,15 @@ bool vmw_resource_needs_backup(const struct vmw_resource *res)
* @type: The resource type to evict
*
* To avoid thrashing starvation or as part of the hibernation sequence,
- * evict all evictable resources of a specific type.
+ * try to evict all evictable resources of a specific type.
*/
static void vmw_resource_evict_type(struct vmw_private *dev_priv,
enum vmw_res_type type)
{
struct list_head *lru_list = &dev_priv->res_lru[type];
struct vmw_resource *evict_res;
+ unsigned err_count = 0;
+ int ret;
do {
write_lock(&dev_priv->resource_lock);
@@ -1272,7 +1291,18 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
lru_head));
list_del_init(&evict_res->lru_head);
write_unlock(&dev_priv->resource_lock);
- vmw_resource_do_evict(evict_res);
+
+ ret = vmw_resource_do_evict(evict_res, false);
+ if (unlikely(ret != 0)) {
+ write_lock(&dev_priv->resource_lock);
+ list_add_tail(&evict_res->lru_head, lru_list);
+ write_unlock(&dev_priv->resource_lock);
+ if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
+ vmw_resource_unreference(&evict_res);
+ return;
+ }
+ }
+
vmw_resource_unreference(&evict_res);
} while (1);