diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-11-14 18:17:41 +0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2013-11-15 12:44:29 +0400 |
commit | 96ab4c70396e4e5a4d623bc95e86484682bef78f (patch) | |
tree | b5f31a822c7b8c1872ce91ca2aff6506fcd26af9 /drivers/gpu | |
parent | 565ee3897f0cb1e9b09905747b3784e6605767e8 (diff) | |
parent | 596cc11e7a4a89bf6c45f955402d0bd0c7d51f13 (diff) | |
download | linux-96ab4c70396e4e5a4d623bc95e86484682bef78f.tar.xz |
Merge branch 'bdw-fixes' into backlight-rework
Merge the bdw changes into the backlight rework branch so that we can
adapt the new code for bdw, too. This is a bit a mess, but doing this
another way would have delayed the merging of the backlight
refactoring. Mea culpa.
As discussed with Jani on irc only do bdw-specific callbacks for the
set/get methods and bake in the only other special-case into the pch
enable function.
Conflicts:
drivers/gpu/drm/i915/intel_panel.c
v2: Don't enable the PWM too early for bdw (Jani).
v3: Create new bdw_ functions for setup and enable - the rules change
sufficiently imo with the switch from controlling the pwm from the cpu
to controlling it completel from the pch to warrant this.
v4: Rip out unused pipe variable in bdw_enable_backlight (0-day
builder).
Tested-by: Ben Widawsky <ben@bwidawsk.net> (on bdw)
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/drm_edid.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_opregion.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_panel.c | 81 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_uncore.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 30 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 42 |
12 files changed, 182 insertions, 34 deletions
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 2f325bcd0708..fb7cf0e796f6 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1329,7 +1329,7 @@ static u32 edid_get_quirks(struct edid *edid) } #define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay) -#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh)) +#define MODE_REFRESH_DIFF(c,t) (abs((c) - (t))) /** * edid_fixup_preferred - set preferred modes based on quirk list @@ -1344,6 +1344,7 @@ static void edid_fixup_preferred(struct drm_connector *connector, { struct drm_display_mode *t, *cur_mode, *preferred_mode; int target_refresh = 0; + int cur_vrefresh, preferred_vrefresh; if (list_empty(&connector->probed_modes)) return; @@ -1366,10 +1367,14 @@ static void edid_fixup_preferred(struct drm_connector *connector, if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode)) preferred_mode = cur_mode; + cur_vrefresh = cur_mode->vrefresh ? + cur_mode->vrefresh : drm_mode_vrefresh(cur_mode); + preferred_vrefresh = preferred_mode->vrefresh ? + preferred_mode->vrefresh : drm_mode_vrefresh(preferred_mode); /* At a given size, try to get closest to target refresh */ if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) && - MODE_REFRESH_DIFF(cur_mode, target_refresh) < - MODE_REFRESH_DIFF(preferred_mode, target_refresh)) { + MODE_REFRESH_DIFF(cur_vrefresh, target_refresh) < + MODE_REFRESH_DIFF(preferred_vrefresh, target_refresh)) { preferred_mode = cur_mode; } } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d069f6ba4286..2a83e14abea6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1755,8 +1755,13 @@ struct drm_i915_file_private { #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ ((dev)->pdev->device & 0xFF00) == 0x0C00) -#define IS_ULT(dev) (IS_HASWELL(dev) && \ +#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ + (((dev)->pdev->device & 0xf) == 0x2 || \ + ((dev)->pdev->device & 0xf) == 0x6 || \ + ((dev)->pdev->device & 0xf) == 0xe)) +#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ ((dev)->pdev->device & 0xFF00) == 0x0A00) +#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ ((dev)->pdev->device & 0x00F0) == 0x0020) #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 3620a1b0a73c..f69bdc741b80 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -335,8 +335,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) kfree(ppgtt->gen8_pt_dma_addr[i]); } - __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT); - __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT); + __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT)); + __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); } /** @@ -1239,6 +1239,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; if (bdw_gmch_ctl) bdw_gmch_ctl = 1 << bdw_gmch_ctl; + if (bdw_gmch_ctl > 4) { + WARN_ON(!i915_preliminary_hw_support); + return 4<<20; + } + return bdw_gmch_ctl << 20; } diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index a0b5a99204a8..6506df26ac9e 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -859,7 +859,9 @@ int intel_opregion_setup(struct drm_device *dev) return -ENOTSUPP; } +#ifdef CONFIG_ACPI INIT_WORK(&opregion->asle_work, asle_work); +#endif base = acpi_os_ioremap(asls, OPREGION_SIZE); if (!base) diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index eadfe338dbeb..e480cf41c536 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -352,6 +352,14 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector, return val; } +static u32 bdw_get_backlight(struct intel_connector *connector) +{ + struct drm_device *dev = connector->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK; +} + static u32 pch_get_backlight(struct intel_connector *connector) { struct drm_device *dev = connector->base.dev; @@ -414,6 +422,14 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector) return val; } +static void bdw_set_backlight(struct intel_connector *connector, u32 level) +{ + struct drm_device *dev = connector->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK; + I915_WRITE(BLC_PWM_PCH_CTL2, val | level); +} + static void pch_set_backlight(struct intel_connector *connector, u32 level) { struct drm_device *dev = connector->base.dev; @@ -585,6 +601,38 @@ void intel_panel_disable_backlight(struct intel_connector *connector) spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); } +static void bdw_enable_backlight(struct intel_connector *connector) +{ + struct drm_device *dev = connector->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_panel *panel = &connector->panel; + u32 pch_ctl1, pch_ctl2; + + pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1); + if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { + DRM_DEBUG_KMS("pch backlight already enabled\n"); + pch_ctl1 &= ~BLM_PCH_PWM_ENABLE; + I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); + } + + pch_ctl2 = panel->backlight.max << 16; + I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2); + + pch_ctl1 = 0; + if (panel->backlight.active_low_pwm) + pch_ctl1 |= BLM_PCH_POLARITY; + + /* BDW always uses the pch pwm controls. */ + pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE; + + I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); + POSTING_READ(BLC_PWM_PCH_CTL1); + I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE); + + /* This won't stick until the above enable. */ + intel_panel_actually_set_backlight(connector, panel->backlight.level); +} + static void pch_enable_backlight(struct intel_connector *connector) { struct drm_device *dev = connector->base.dev; @@ -626,6 +674,7 @@ static void pch_enable_backlight(struct intel_connector *connector) pch_ctl1 = 0; if (panel->backlight.active_low_pwm) pch_ctl1 |= BLM_PCH_POLARITY; + I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); POSTING_READ(BLC_PWM_PCH_CTL1); I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE); @@ -869,6 +918,30 @@ static void intel_backlight_device_unregister(struct intel_connector *connector) * XXX: Query mode clock or hardware clock and program PWM modulation frequency * appropriately when it's 0. Use VBT and/or sane defaults. */ +static int bdw_setup_backlight(struct intel_connector *connector) +{ + struct drm_device *dev = connector->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_panel *panel = &connector->panel; + u32 pch_ctl1, pch_ctl2, val; + + pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1); + panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY; + + pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2); + panel->backlight.max = pch_ctl2 >> 16; + if (!panel->backlight.max) + return -ENODEV; + + val = bdw_get_backlight(connector); + panel->backlight.level = intel_panel_compute_brightness(connector, val); + + panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) && + panel->backlight.level != 0; + + return 0; +} + static int pch_setup_backlight(struct intel_connector *connector) { struct drm_device *dev = connector->base.dev; @@ -1036,7 +1109,13 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (HAS_PCH_SPLIT(dev)) { + if (IS_BROADWELL(dev)) { + dev_priv->display.setup_backlight = bdw_setup_backlight; + dev_priv->display.enable_backlight = bdw_enable_backlight; + dev_priv->display.disable_backlight = pch_disable_backlight; + dev_priv->display.set_backlight = bdw_set_backlight; + dev_priv->display.get_backlight = bdw_get_backlight; + } else if (HAS_PCH_SPLIT(dev)) { dev_priv->display.setup_backlight = pch_setup_backlight; dev_priv->display.enable_backlight = pch_enable_backlight; dev_priv->display.disable_backlight = pch_disable_backlight; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0a07d7c9cafc..33a8dbe64039 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5684,6 +5684,7 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) { struct drm_i915_private *dev_priv = dev->dev_private; bool is_enabled, enable_requested; + unsigned long irqflags; uint32_t tmp; tmp = I915_READ(HSW_PWR_WELL_DRIVER); @@ -5701,9 +5702,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) HSW_PWR_WELL_STATE_ENABLED), 20)) DRM_ERROR("Timeout enabling power well\n"); } + + if (IS_BROADWELL(dev)) { + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B), + dev_priv->de_irq_mask[PIPE_B]); + I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B), + ~dev_priv->de_irq_mask[PIPE_B] | + GEN8_PIPE_VBLANK); + I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C), + dev_priv->de_irq_mask[PIPE_C]); + I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C), + ~dev_priv->de_irq_mask[PIPE_C] | + GEN8_PIPE_VBLANK); + POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C)); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + } } else { if (enable_requested) { - unsigned long irqflags; enum pipe p; I915_WRITE(HSW_PWR_WELL_DRIVER, 0); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b620337e6d67..c2f09d456300 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -965,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) } else if (IS_GEN6(ring->dev)) { mmio = RING_HWS_PGA_GEN6(ring->mmio_base); } else { + /* XXX: gen8 returns to sanity */ mmio = RING_HWS_PGA(ring->mmio_base); } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index f9883ceff946..6a4f9b615de1 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -782,6 +782,7 @@ static int gen6_do_reset(struct drm_device *dev) int intel_gpu_reset(struct drm_device *dev) { switch (INTEL_INFO(dev)->gen) { + case 8: case 7: case 6: return gen6_do_reset(dev); case 5: return ironlake_do_reset(dev); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index c03514b93f9c..ac617f3ecd0c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -102,6 +102,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) int retval = VM_FAULT_NOPAGE; struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; + struct vm_area_struct cvma; /* * Work around locking order reversal in fault / nopfn @@ -164,26 +165,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } /* - * Strictly, we're not allowed to modify vma->vm_page_prot here, - * since the mmap_sem is only held in read mode. However, we - * modify only the caching bits of vma->vm_page_prot and - * consider those bits protected by - * the bo->mutex, as we should be the only writers. - * There shouldn't really be any readers of these bits except - * within vm_insert_mixed()? fork? - * - * TODO: Add a list of vmas to the bo, and change the - * vma->vm_page_prot when the object changes caching policy, with - * the correct locks held. + * Make a local vma copy to modify the page_prot member + * and vm_flags if necessary. The vma parameter is protected + * by mmap_sem in write mode. */ + cvma = *vma; + cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags); + if (bo->mem.bus.is_iomem) { - vma->vm_page_prot = ttm_io_prot(bo->mem.placement, - vma->vm_page_prot); + cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, + cvma.vm_page_prot); } else { ttm = bo->ttm; - vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ? - vm_get_page_prot(vma->vm_flags) : - ttm_io_prot(bo->mem.placement, vma->vm_page_prot); + if (!(bo->mem.placement & TTM_PL_FLAG_CACHED)) + cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, + cvma.vm_page_prot); /* Allocate all page at once, most common usage */ if (ttm->bdev->driver->ttm_tt_populate(ttm)) { @@ -210,7 +206,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) pfn = page_to_pfn(page); } - ret = vm_insert_mixed(vma, address, pfn); + ret = vm_insert_mixed(&cvma, address, pfn); /* * Somebody beat us to this PTE or prefaulting to * an already populated PTE, or prefaulting error. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 814665b7a117..20d5485eaf98 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -453,12 +453,13 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) */ static int vmw_dma_select_mode(struct vmw_private *dev_priv) { - const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); static const char *names[vmw_dma_map_max] = { [vmw_dma_phys] = "Using physical TTM page addresses.", [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", [vmw_dma_map_populate] = "Keeping DMA mappings.", [vmw_dma_map_bind] = "Giving up DMA mappings early."}; +#ifdef CONFIG_X86 + const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); #ifdef CONFIG_INTEL_IOMMU if (intel_iommu_enabled) { @@ -500,6 +501,10 @@ out_fixup: return -EINVAL; #endif +#else /* CONFIG_X86 */ + dev_priv->map_mode = vmw_dma_map_populate; +#endif /* CONFIG_X86 */ + DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index 6d0952366f91..6ef0b035becb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c @@ -145,7 +145,9 @@ static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma, } page_virtual = kmap_atomic(page); - desc_dma = page_virtual[desc_per_page].ppn << PAGE_SHIFT; + desc_dma = (dma_addr_t) + le32_to_cpu(page_virtual[desc_per_page].ppn) << + PAGE_SHIFT; kunmap_atomic(page_virtual); __free_page(page); @@ -217,7 +219,8 @@ static int vmw_gmr_build_descriptors(struct device *dev, desc_dma = 0; list_for_each_entry_reverse(page, desc_pages, lru) { page_virtual = kmap_atomic(page); - page_virtual[desc_per_page].ppn = desc_dma >> PAGE_SHIFT; + page_virtual[desc_per_page].ppn = cpu_to_le32 + (desc_dma >> PAGE_SHIFT); kunmap_atomic(page_virtual); desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_TO_DEVICE); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 37fb4befec82..252501a54def 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -32,6 +32,8 @@ #include <drm/drmP.h> #include "vmwgfx_resource_priv.h" +#define VMW_RES_EVICT_ERR_COUNT 10 + struct vmw_user_dma_buffer { struct ttm_base_object base; struct vmw_dma_buffer dma; @@ -1091,8 +1093,9 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, * to a backup buffer. * * @res: The resource to evict. + * @interruptible: Whether to wait interruptible. */ -int vmw_resource_do_evict(struct vmw_resource *res) +int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) { struct ttm_validate_buffer val_buf; const struct vmw_res_func *func = res->func; @@ -1102,7 +1105,8 @@ int vmw_resource_do_evict(struct vmw_resource *res) BUG_ON(!func->may_evict); val_buf.bo = NULL; - ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf); + ret = vmw_resource_check_buffer(res, &ticket, interruptible, + &val_buf); if (unlikely(ret != 0)) return ret; @@ -1141,6 +1145,7 @@ int vmw_resource_validate(struct vmw_resource *res) struct vmw_private *dev_priv = res->dev_priv; struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; struct ttm_validate_buffer val_buf; + unsigned err_count = 0; if (likely(!res->func->may_evict)) return 0; @@ -1155,7 +1160,7 @@ int vmw_resource_validate(struct vmw_resource *res) write_lock(&dev_priv->resource_lock); if (list_empty(lru_list) || !res->func->may_evict) { - DRM_ERROR("Out of device device id entries " + DRM_ERROR("Out of device device resources " "for %s.\n", res->func->type_name); ret = -EBUSY; write_unlock(&dev_priv->resource_lock); @@ -1168,7 +1173,19 @@ int vmw_resource_validate(struct vmw_resource *res) list_del_init(&evict_res->lru_head); write_unlock(&dev_priv->resource_lock); - vmw_resource_do_evict(evict_res); + + ret = vmw_resource_do_evict(evict_res, true); + if (unlikely(ret != 0)) { + write_lock(&dev_priv->resource_lock); + list_add_tail(&evict_res->lru_head, lru_list); + write_unlock(&dev_priv->resource_lock); + if (ret == -ERESTARTSYS || + ++err_count > VMW_RES_EVICT_ERR_COUNT) { + vmw_resource_unreference(&evict_res); + goto out_no_validate; + } + } + vmw_resource_unreference(&evict_res); } while (1); @@ -1253,13 +1270,15 @@ bool vmw_resource_needs_backup(const struct vmw_resource *res) * @type: The resource type to evict * * To avoid thrashing starvation or as part of the hibernation sequence, - * evict all evictable resources of a specific type. + * try to evict all evictable resources of a specific type. */ static void vmw_resource_evict_type(struct vmw_private *dev_priv, enum vmw_res_type type) { struct list_head *lru_list = &dev_priv->res_lru[type]; struct vmw_resource *evict_res; + unsigned err_count = 0; + int ret; do { write_lock(&dev_priv->resource_lock); @@ -1272,7 +1291,18 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv, lru_head)); list_del_init(&evict_res->lru_head); write_unlock(&dev_priv->resource_lock); - vmw_resource_do_evict(evict_res); + + ret = vmw_resource_do_evict(evict_res, false); + if (unlikely(ret != 0)) { + write_lock(&dev_priv->resource_lock); + list_add_tail(&evict_res->lru_head, lru_list); + write_unlock(&dev_priv->resource_lock); + if (++err_count > VMW_RES_EVICT_ERR_COUNT) { + vmw_resource_unreference(&evict_res); + return; + } + } + vmw_resource_unreference(&evict_res); } while (1); |