summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/intel_gtt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_gtt.c')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c44
1 files changed, 23 insertions, 21 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 2eaeba14319e..2ba3983984b9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -12,9 +12,11 @@
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
+#include "i915_reg.h"
#include "i915_trace.h"
#include "i915_utils.h"
#include "intel_gt.h"
+#include "intel_gt_mcr.h"
#include "intel_gt_regs.h"
#include "intel_gtt.h"
@@ -269,11 +271,7 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
memset64(vm->min_alignment, I915_GTT_MIN_ALIGNMENT,
ARRAY_SIZE(vm->min_alignment));
- if (HAS_64K_PAGES(vm->i915) && NEEDS_COMPACT_PT(vm->i915) &&
- subclass == VM_CLASS_PPGTT) {
- vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_2M;
- vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_2M;
- } else if (HAS_64K_PAGES(vm->i915)) {
+ if (HAS_64K_PAGES(vm->i915)) {
vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_64K;
vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_64K;
}
@@ -343,7 +341,8 @@ int setup_scratch_page(struct i915_address_space *vm)
*/
size = I915_GTT_PAGE_SIZE_4K;
if (i915_vm_is_4lvl(vm) &&
- HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K))
+ HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K) &&
+ !HAS_64K_PAGES(vm->i915))
size = I915_GTT_PAGE_SIZE_64K;
do {
@@ -385,18 +384,6 @@ skip:
if (size == I915_GTT_PAGE_SIZE_4K)
return -ENOMEM;
- /*
- * If we need 64K minimum GTT pages for device local-memory,
- * like on XEHPSDV, then we need to fail the allocation here,
- * otherwise we can't safely support the insertion of
- * local-memory pages for this vm, since the HW expects the
- * correct physical alignment and size when the page-table is
- * operating in 64K GTT mode, which includes any scratch PTEs,
- * since userspace can still touch them.
- */
- if (HAS_64K_PAGES(vm->i915))
- return -ENOMEM;
-
size = I915_GTT_PAGE_SIZE_4K;
} while (1);
}
@@ -493,6 +480,18 @@ static void tgl_setup_private_ppat(struct intel_uncore *uncore)
intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
}
+static void xehp_setup_private_ppat(struct intel_gt *gt)
+{
+ intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(0), GEN8_PPAT_WB);
+ intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(1), GEN8_PPAT_WC);
+ intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(2), GEN8_PPAT_WT);
+ intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(3), GEN8_PPAT_UC);
+ intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(4), GEN8_PPAT_WB);
+ intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(5), GEN8_PPAT_WB);
+ intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(6), GEN8_PPAT_WB);
+ intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(7), GEN8_PPAT_WB);
+}
+
static void icl_setup_private_ppat(struct intel_uncore *uncore)
{
intel_uncore_write(uncore,
@@ -585,13 +584,16 @@ static void chv_setup_private_ppat(struct intel_uncore *uncore)
intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
}
-void setup_private_pat(struct intel_uncore *uncore)
+void setup_private_pat(struct intel_gt *gt)
{
- struct drm_i915_private *i915 = uncore->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct drm_i915_private *i915 = gt->i915;
GEM_BUG_ON(GRAPHICS_VER(i915) < 8);
- if (GRAPHICS_VER(i915) >= 12)
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ xehp_setup_private_ppat(gt);
+ else if (GRAPHICS_VER(i915) >= 12)
tgl_setup_private_ppat(uncore);
else if (GRAPHICS_VER(i915) >= 11)
icl_setup_private_ppat(uncore);