summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c36
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c2
3 files changed, 22 insertions, 26 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 8965bbb13db7..0055b8567a43 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -315,8 +315,16 @@ struct i915_ggtt {
struct i915_address_space base;
struct io_mapping mappable; /* Mapping to our CPU mappable region */
+ /* Stolen memory is segmented in hardware with different portions
+ * offlimits to certain functions.
+ *
+ * The drm_mm is initialised to the total accessible range, as found
+ * from the PCI config. On Broadwell+, this is further restricted to
+ * avoid the first page! The upper end of stolen memory is reserved for
+ * hardware functions and similarly removed from the accessible range.
+ */
size_t stolen_size; /* Total size of stolen memory */
- size_t stolen_usable_size; /* Total size minus BIOS reserved */
+ size_t stolen_usable_size; /* Total size minus reserved ranges */
size_t stolen_reserved_base;
size_t stolen_reserved_size;
u64 mappable_end; /* End offset that we can CPU map */
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index e092a3e836dd..f1a80bfa9919 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -54,12 +54,6 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV;
- /* See the comment at the drm_mm_init() call for more about this check.
- * WaSkipStolenMemoryFirstPage:bdw+ (incomplete)
- */
- if (start < 4096 && INTEL_GEN(dev_priv) >= 8)
- start = 4096;
-
mutex_lock(&dev_priv->mm.stolen_lock);
ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
alignment, start, end,
@@ -73,11 +67,8 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node, u64 size,
unsigned alignment)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
- alignment, 0,
- ggtt->stolen_usable_size);
+ alignment, 0, U64_MAX);
}
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
@@ -410,7 +401,7 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long reserved_total, reserved_base = 0, reserved_size;
- unsigned long stolen_top;
+ unsigned long stolen_usable_start, stolen_top;
mutex_init(&dev_priv->mm.stolen_lock);
@@ -488,20 +479,17 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
ggtt->stolen_size >> 10,
(ggtt->stolen_size - reserved_total) >> 10);
- ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total;
+ stolen_usable_start = 0;
+ /* WaSkipStolenMemoryFirstPage:bdw+ */
+ if (INTEL_GEN(dev_priv) >= 8)
+ stolen_usable_start = 4096;
- /*
- * Basic memrange allocator for stolen space.
- *
- * TODO: Notice that some platforms require us to not use the first page
- * of the stolen memory but their BIOSes may still put the framebuffer
- * on the first page. So we don't reserve this page for now because of
- * that. Our current solution is to just prevent new nodes from being
- * inserted on the first page - see the check we have at
- * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
- * problem later.
- */
- drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size);
+ ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total -
+ stolen_usable_start;
+
+ /* Basic memrange allocator for stolen space. */
+ drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start,
+ ggtt->stolen_usable_size);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 62f215b12eb5..bb5f58c19c61 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -538,7 +538,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
end = ggtt->stolen_size - 8 * 1024 * 1024;
else
- end = ggtt->stolen_usable_size;
+ end = U64_MAX;
/* HACK: This code depends on what we will do in *_enable_fbc. If that
* code changes, this code needs to change as well.