From a1e2265332e9344f913811ac6d2b84a506195bd8 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 21 Sep 2013 00:35:38 +0200 Subject: drm/i915: Use kcalloc more No buffer overflows here, but better safe than sorry. v2: - Fixup the sizeof conversion, I've missed the pointer deref (Jani). - Drop the redundant GFP_ZERO, kcalloc alreads memsets (Jani). - Use kmalloc_array for the execbuf fastpath to avoid the memset (Chris). I've opted to leave all other conversions as-is since they aren't in a fastpath and dealing with cleared memory instead of random garbage is just generally nicer. Cc: Jani Nikula Cc: Chris Wilson Reviewed-by: Jani Nikula [danvet: Drop the contentious kmalloc_array hunk in execbuf.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 212f6d8c35ec..e999496532c6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -336,7 +336,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; ppgtt->base.cleanup = gen6_ppgtt_cleanup; ppgtt->base.scratch = dev_priv->gtt.base.scratch; - ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, + ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *), GFP_KERNEL); if (!ppgtt->pt_pages) return -ENOMEM; @@ -347,7 +347,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) goto err_pt_alloc; } - ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries, + ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t), GFP_KERNEL); if (!ppgtt->pt_dma_addr) goto err_pt_alloc; -- cgit v1.2.3 From b35b380ed46bb01726bec1795e6443e625306757 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 16 Oct 2013 09:18:21 -0700 Subject: drm/i915: Make PTE valid encoding optional We need this to work around a corruption when the boot kernel image loads the hibernated kernel image from swap on Haswell systems - somehow not everything is properly shut off. This is just the prep work, the next patch will implement the actual workaround. Signed-off-by: Ben Widawsky [danvet: Add a commit message suitable for -fixes and add cc: stable] Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 3 ++- drivers/gpu/drm/i915/i915_gem_gtt.c | 35 ++++++++++++++++++++--------------- 2 files changed, 22 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 35874b3a86dc..3979a81dd6ee 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -497,7 +497,8 @@ struct i915_address_space { /* FIXME: Need a more generic return type */ gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, - enum i915_cache_level level); + enum i915_cache_level level, + bool valid); /* Create a valid PTE */ void (*clear_range)(struct i915_address_space *vm, unsigned int first_entry, unsigned int num_entries); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 212f6d8c35ec..32aa69d7ef20 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -58,9 +58,10 @@ #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, - enum i915_cache_level level) + enum i915_cache_level level, + bool valid) { - gen6_gtt_pte_t pte = GEN6_PTE_VALID; + gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= GEN6_PTE_ADDR_ENCODE(addr); switch (level) { @@ -79,9 +80,10 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, } static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, - enum i915_cache_level level) + enum i915_cache_level level, + bool valid) { - gen6_gtt_pte_t pte = GEN6_PTE_VALID; + gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= GEN6_PTE_ADDR_ENCODE(addr); switch (level) { @@ -105,9 +107,10 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, - enum i915_cache_level level) + enum i915_cache_level level, + bool valid) { - gen6_gtt_pte_t pte = GEN6_PTE_VALID; + gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= GEN6_PTE_ADDR_ENCODE(addr); /* Mark the page as writeable. Other platforms don't have a @@ -122,9 +125,10 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, } static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, - enum i915_cache_level level) + enum i915_cache_level level, + bool valid) { - gen6_gtt_pte_t pte = GEN6_PTE_VALID; + gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= HSW_PTE_ADDR_ENCODE(addr); if (level != I915_CACHE_NONE) @@ -134,9 +138,10 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, } static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, - enum i915_cache_level level) + enum i915_cache_level level, + bool valid) { - gen6_gtt_pte_t pte = GEN6_PTE_VALID; + gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= HSW_PTE_ADDR_ENCODE(addr); switch (level) { @@ -245,7 +250,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned last_pte, i; - scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); + scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); while (num_entries) { last_pte = first_pte + num_entries; @@ -282,7 +287,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, dma_addr_t page_addr; page_addr = sg_page_iter_dma_address(&sg_iter); - pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level); + pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true); if (++act_pte == I915_PPGTT_PT_ENTRIES) { kunmap_atomic(pt_vaddr); act_pt++; @@ -536,7 +541,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { addr = sg_page_iter_dma_address(&sg_iter); - iowrite32(vm->pte_encode(addr, level), >t_entries[i]); + iowrite32(vm->pte_encode(addr, level, true), >t_entries[i]); i++; } @@ -548,7 +553,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, */ if (i != 0) WARN_ON(readl(>t_entries[i-1]) != - vm->pte_encode(addr, level)); + vm->pte_encode(addr, level, true)); /* This next bit makes the above posting read even more important. We * want to flush the TLBs only after we're certain all the PTE updates @@ -573,7 +578,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, first_entry, num_entries, max_entries)) num_entries = max_entries; - scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); + scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); for (i = 0; i < num_entries; i++) iowrite32(scratch_pte, >t_base[i]); readl(gtt_base); -- cgit v1.2.3 From 828c79087cec61eaf4c76bb32c222fbe35ac3930 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Wed, 16 Oct 2013 09:21:30 -0700 Subject: drm/i915: Disable GGTT PTEs on GEN6+ suspend Once the machine gets to a certain point in the suspend process, we expect the GPU to be idle. If it is not, we might corrupt memory. Empirically (with an early version of this patch) we have seen this is not the case. We cannot currently explain why the latent GPU writes occur. In the technical sense, this patch is a workaround in that we have an issue we can't explain, and the patch indirectly solves the issue. However, it's really better than a workaround because we understand why it works, and it really should be a safe thing to do in all cases. The noticeable effect other than the debug messages would be an increase in the suspend time. I have not measure how expensive it actually is. I think it would be good to spend further time to root cause why we're seeing these latent writes, but it shouldn't preclude preventing the fallout. NOTE: It should be safe (and makes some sense IMO) to also keep the VALID bit unset on resume when we clear_range(). I've opted not to do this as properly clearing those bits at some later point would be extra work. v2: Fix bugzilla link Bugzilla: http://bugs.freedesktop.org/show_bug.cgi?id=65496 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=59321 Tested-by: Takashi Iwai Tested-by: Paulo Zanoni Signed-off-by: Ben Widawsky Tested-By: Todd Previte Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.c | 5 ++- drivers/gpu/drm/i915/i915_drv.h | 5 ++- drivers/gpu/drm/i915/i915_gem_gtt.c | 76 ++++++++++++++++++++++++++++++++----- drivers/gpu/drm/i915/i915_reg.h | 4 ++ 4 files changed, 78 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 69d8ed5416c3..2ad27880cd04 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -505,6 +505,8 @@ static int i915_drm_freeze(struct drm_device *dev) intel_modeset_suspend_hw(dev); } + i915_gem_suspend_gtt_mappings(dev); + i915_save_state(dev); intel_opregion_fini(dev); @@ -648,7 +650,8 @@ static int i915_drm_thaw(struct drm_device *dev) mutex_lock(&dev->struct_mutex); i915_gem_restore_gtt_mappings(dev); mutex_unlock(&dev->struct_mutex); - } + } else if (drm_core_check_feature(dev, DRIVER_MODESET)) + i915_check_and_clear_faults(dev); __i915_drm_thaw(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3979a81dd6ee..ab0f2c0a440c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -501,7 +501,8 @@ struct i915_address_space { bool valid); /* Create a valid PTE */ void (*clear_range)(struct i915_address_space *vm, unsigned int first_entry, - unsigned int num_entries); + unsigned int num_entries, + bool use_scratch); void (*insert_entries)(struct i915_address_space *vm, struct sg_table *st, unsigned int first_entry, @@ -2066,6 +2067,8 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj); +void i915_check_and_clear_faults(struct drm_device *dev); +void i915_gem_suspend_gtt_mappings(struct drm_device *dev); void i915_gem_restore_gtt_mappings(struct drm_device *dev); int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 32aa69d7ef20..1f7b4caefb6e 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -241,7 +241,8 @@ static int gen6_ppgtt_enable(struct drm_device *dev) /* PPGTT support for Sandybdrige/Gen6 and later */ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, unsigned first_entry, - unsigned num_entries) + unsigned num_entries, + bool use_scratch) { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); @@ -372,7 +373,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) } ppgtt->base.clear_range(&ppgtt->base, 0, - ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES); + ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true); ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); @@ -449,7 +450,8 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, { ppgtt->base.clear_range(&ppgtt->base, i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, - obj->base.size >> PAGE_SHIFT); + obj->base.size >> PAGE_SHIFT, + true); } extern int intel_iommu_gfx_mapped; @@ -490,15 +492,65 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) dev_priv->mm.interruptible = interruptible; } +void i915_check_and_clear_faults(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + int i; + + if (INTEL_INFO(dev)->gen < 6) + return; + + for_each_ring(ring, dev_priv, i) { + u32 fault_reg; + fault_reg = I915_READ(RING_FAULT_REG(ring)); + if (fault_reg & RING_FAULT_VALID) { + DRM_DEBUG_DRIVER("Unexpected fault\n" + "\tAddr: 0x%08lx\\n" + "\tAddress space: %s\n" + "\tSource ID: %d\n" + "\tType: %d\n", + fault_reg & PAGE_MASK, + fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", + RING_FAULT_SRCID(fault_reg), + RING_FAULT_FAULT_TYPE(fault_reg)); + I915_WRITE(RING_FAULT_REG(ring), + fault_reg & ~RING_FAULT_VALID); + } + } + POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); +} + +void i915_gem_suspend_gtt_mappings(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Don't bother messing with faults pre GEN6 as we have little + * documentation supporting that it's a good idea. + */ + if (INTEL_INFO(dev)->gen < 6) + return; + + i915_check_and_clear_faults(dev); + + dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, + dev_priv->gtt.base.start / PAGE_SIZE, + dev_priv->gtt.base.total / PAGE_SIZE, + false); +} + void i915_gem_restore_gtt_mappings(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; + i915_check_and_clear_faults(dev); + /* First fill our portion of the GTT with scratch pages */ dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, dev_priv->gtt.base.start / PAGE_SIZE, - dev_priv->gtt.base.total / PAGE_SIZE); + dev_priv->gtt.base.total / PAGE_SIZE, + true); list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { i915_gem_clflush_object(obj, obj->pin_display); @@ -565,7 +617,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, static void gen6_ggtt_clear_range(struct i915_address_space *vm, unsigned int first_entry, - unsigned int num_entries) + unsigned int num_entries, + bool use_scratch) { struct drm_i915_private *dev_priv = vm->dev->dev_private; gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = @@ -578,7 +631,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, first_entry, num_entries, max_entries)) num_entries = max_entries; - scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); + scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch); + for (i = 0; i < num_entries; i++) iowrite32(scratch_pte, >t_base[i]); readl(gtt_base); @@ -599,7 +653,8 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm, static void i915_ggtt_clear_range(struct i915_address_space *vm, unsigned int first_entry, - unsigned int num_entries) + unsigned int num_entries, + bool unused) { intel_gtt_clear_range(first_entry, num_entries); } @@ -627,7 +682,8 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, entry, - obj->base.size >> PAGE_SHIFT); + obj->base.size >> PAGE_SHIFT, + true); obj->has_global_gtt_mapping = 0; } @@ -714,11 +770,11 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, const unsigned long count = (hole_end - hole_start) / PAGE_SIZE; DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", hole_start, hole_end); - ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count); + ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true); } /* And finally clear the reserved guard page */ - ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1); + ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true); } static bool diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index fd721ea8728e..ef9b35479f01 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -604,6 +604,10 @@ #define ARB_MODE_SWIZZLE_IVB (1<<5) #define RENDER_HWS_PGA_GEN7 (0x04080) #define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) +#define RING_FAULT_GTTSEL_MASK (1<<11) +#define RING_FAULT_SRCID(x) ((x >> 3) & 0xff) +#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3) +#define RING_FAULT_VALID (1<<0) #define DONE_REG 0x40b0 #define BSD_HWS_PGA_GEN7 (0x04180) #define BLT_HWS_PGA_GEN7 (0x04280) -- cgit v1.2.3 From 8fe6bd239a72ba31c2568a29b730c9cd4f05c52c Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 2 Nov 2013 21:07:01 -0700 Subject: drm/i915/bdw: Disable PPGTT for now This will be changed once the gen8 code is fully implemented. v2: Use ENOSYS instead of ENXIO as suggested by Chris. Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index c4c42e7cbd7b..7d5a51fea1bb 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -410,6 +410,8 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) if (INTEL_INFO(dev)->gen < 8) ret = gen6_ppgtt_init(ppgtt); + else if (IS_GEN8(dev)) + ret = -ENOSYS; else BUG(); -- cgit v1.2.3 From 9459d252378aea80d28dc12bfec9a0d31b2a61bf Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Sun, 3 Nov 2013 16:53:55 -0800 Subject: drm/i915/bdw: support GMS and GGMS changes All the BARs have the ability to grow. v2: Pulled out the simulator workaround to a separate patch. Rebased. v3: Rebase onto latest vlv patches from Jesse. v4: Rebased on top of the early stolen quirk patch from Jesse. v5: Use the new macro names. s/INTEL_BDW_PCI_IDS_D/INTEL_BDW_D_IDS s/INTEL_BDW_PCI_IDS_M/INTEL_BDW_M_IDS It's Jesse's fault for not following the convention I originally set. Cc: Ingo Molnar Cc: H. Peter Anvin Cc: Jesse Barnes Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- arch/x86/kernel/early-quirks.c | 12 ++++++++++++ drivers/gpu/drm/i915/i915_gem_gtt.c | 29 ++++++++++++++++++++++++++--- include/drm/i915_drm.h | 4 ++++ 3 files changed, 42 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index b3cd3ebae077..96f958d8cd45 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -313,6 +313,16 @@ static size_t __init gen6_stolen_size(int num, int slot, int func) return gmch_ctrl << 25; /* 32 MB units */ } +static inline size_t gen8_stolen_size(int num, int slot, int func) +{ + u16 gmch_ctrl; + + gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); + gmch_ctrl >>= BDW_GMCH_GMS_SHIFT; + gmch_ctrl &= BDW_GMCH_GMS_MASK; + return gmch_ctrl << 25; /* 32 MB units */ +} + typedef size_t (*stolen_size_fn)(int num, int slot, int func); static struct pci_device_id intel_stolen_ids[] __initdata = { @@ -336,6 +346,8 @@ static struct pci_device_id intel_stolen_ids[] __initdata = { INTEL_IVB_D_IDS(gen6_stolen_size), INTEL_HSW_D_IDS(gen6_stolen_size), INTEL_HSW_M_IDS(gen6_stolen_size), + INTEL_BDW_M_IDS(gen8_stolen_size), + INTEL_BDW_D_IDS(gen8_stolen_size) }; static void __init intel_graphics_stolen(int num, int slot, int func) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 7d5a51fea1bb..074aec11fc9b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -869,6 +869,15 @@ static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) return snb_gmch_ctl << 20; } +static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) +{ + bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; + bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; + if (bdw_gmch_ctl) + bdw_gmch_ctl = 1 << bdw_gmch_ctl; + return bdw_gmch_ctl << 20; +} + static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) { snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; @@ -876,6 +885,13 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) return snb_gmch_ctl << 25; /* 32 MB units */ } +static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl) +{ + bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; + bdw_gmch_ctl &= BDW_GMCH_GMS_MASK; + return bdw_gmch_ctl << 25; /* 32 MB units */ +} + static int gen6_gmch_probe(struct drm_device *dev, size_t *gtt_total, size_t *stolen, @@ -903,10 +919,16 @@ static int gen6_gmch_probe(struct drm_device *dev, if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); - gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); - *stolen = gen6_get_stolen_size(snb_gmch_ctl); - *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; + if (IS_GEN8(dev)) { + gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); + *gtt_total = (gtt_size / 8) << PAGE_SHIFT; + *stolen = gen8_get_stolen_size(snb_gmch_ctl); + } else { + gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); + *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; + *stolen = gen6_get_stolen_size(snb_gmch_ctl); + } /* For Modern GENs the PTEs and register space are split in the BAR */ gtt_bus_addr = pci_resource_start(dev->pdev, 0) + @@ -916,6 +938,7 @@ static int gen6_gmch_probe(struct drm_device *dev, if (!dev_priv->gtt.gsm) { DRM_ERROR("Failed to map the gtt page table\n"); return -ENOMEM; + } ret = setup_scratch_page(dev); diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 3abfa6ea226e..97d5497debc1 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -49,6 +49,10 @@ extern bool i915_gpu_turbo_disable(void); #define SNB_GMCH_GGMS_MASK 0x3 #define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ #define SNB_GMCH_GMS_MASK 0x1f +#define BDW_GMCH_GGMS_SHIFT 6 +#define BDW_GMCH_GGMS_MASK 0x3 +#define BDW_GMCH_GMS_SHIFT 8 +#define BDW_GMCH_GMS_MASK 0xff #define I830_GMCH_CTRL 0x52 -- cgit v1.2.3 From 63340133f3d58634b068f15f9f43f905ea94b837 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 4 Nov 2013 19:32:22 -0800 Subject: drm/i915/bdw: Make gen8_gmch_probe Probing gen8 is similar to gen6. To make the code cleaner and more maintainable however we can use the probe functions to split it out. v2: Rebased on top of update gtt probe infrastructure. v3: Rebased on top of Kenneth' Graunke's ->pte_encode refactoring. V4: Resolve conflicts with Ben's latest ppgtt patches, also switch to gen < 8 testing instead of gen <= 7. v5: Resolve conflicts with address space vfunc changes in upstream. v6: Use 39b DMA mask. At least, for this mode, it is the correct mask. (Imre) Cc: Imre Deak Signed-off-by: Ben Widawsky Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 92 +++++++++++++++++++++++++++---------- 1 file changed, 68 insertions(+), 24 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 074aec11fc9b..ee71aa558f9a 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -892,6 +892,66 @@ static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl) return bdw_gmch_ctl << 25; /* 32 MB units */ } +static int ggtt_probe_common(struct drm_device *dev, + size_t gtt_size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + phys_addr_t gtt_bus_addr; + int ret; + + /* For Modern GENs the PTEs and register space are split in the BAR */ + gtt_bus_addr = pci_resource_start(dev->pdev, 0) + + (pci_resource_len(dev->pdev, 0) / 2); + + dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size); + if (!dev_priv->gtt.gsm) { + DRM_ERROR("Failed to map the gtt page table\n"); + return -ENOMEM; + } + + ret = setup_scratch_page(dev); + if (ret) { + DRM_ERROR("Scratch setup failed\n"); + /* iounmap will also get called at remove, but meh */ + iounmap(dev_priv->gtt.gsm); + } + + return ret; +} + +static int gen8_gmch_probe(struct drm_device *dev, + size_t *gtt_total, + size_t *stolen, + phys_addr_t *mappable_base, + unsigned long *mappable_end) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned int gtt_size; + u16 snb_gmch_ctl; + int ret; + + /* TODO: We're not aware of mappable constraints on gen8 yet */ + *mappable_base = pci_resource_start(dev->pdev, 2); + *mappable_end = pci_resource_len(dev->pdev, 2); + + if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39))) + pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39)); + + pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); + + *stolen = gen8_get_stolen_size(snb_gmch_ctl); + + gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); + *gtt_total = (gtt_size / 8) << PAGE_SHIFT; + + ret = ggtt_probe_common(dev, gtt_size); + + dev_priv->gtt.base.clear_range = NULL; + dev_priv->gtt.base.insert_entries = NULL; + + return ret; +} + static int gen6_gmch_probe(struct drm_device *dev, size_t *gtt_total, size_t *stolen, @@ -899,7 +959,6 @@ static int gen6_gmch_probe(struct drm_device *dev, unsigned long *mappable_end) { struct drm_i915_private *dev_priv = dev->dev_private; - phys_addr_t gtt_bus_addr; unsigned int gtt_size; u16 snb_gmch_ctl; int ret; @@ -920,30 +979,12 @@ static int gen6_gmch_probe(struct drm_device *dev, pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); - if (IS_GEN8(dev)) { - gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); - *gtt_total = (gtt_size / 8) << PAGE_SHIFT; - *stolen = gen8_get_stolen_size(snb_gmch_ctl); - } else { - gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); - *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; - *stolen = gen6_get_stolen_size(snb_gmch_ctl); - } + *stolen = gen6_get_stolen_size(snb_gmch_ctl); - /* For Modern GENs the PTEs and register space are split in the BAR */ - gtt_bus_addr = pci_resource_start(dev->pdev, 0) + - (pci_resource_len(dev->pdev, 0) / 2); + gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); + *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; - dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size); - if (!dev_priv->gtt.gsm) { - DRM_ERROR("Failed to map the gtt page table\n"); - return -ENOMEM; - - } - - ret = setup_scratch_page(dev); - if (ret) - DRM_ERROR("Scratch setup failed\n"); + ret = ggtt_probe_common(dev, gtt_size); dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range; dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries; @@ -997,7 +1038,7 @@ int i915_gem_gtt_init(struct drm_device *dev) if (INTEL_INFO(dev)->gen <= 5) { gtt->gtt_probe = i915_gmch_probe; gtt->base.cleanup = i915_gmch_remove; - } else { + } else if (INTEL_INFO(dev)->gen < 8) { gtt->gtt_probe = gen6_gmch_probe; gtt->base.cleanup = gen6_gmch_remove; if (IS_HASWELL(dev) && dev_priv->ellc_size) @@ -1010,6 +1051,9 @@ int i915_gem_gtt_init(struct drm_device *dev) gtt->base.pte_encode = ivb_pte_encode; else gtt->base.pte_encode = snb_pte_encode; + } else { + dev_priv->gtt.gtt_probe = gen8_gmch_probe; + dev_priv->gtt.base.cleanup = gen6_gmch_remove; } ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size, -- cgit v1.2.3 From d31eb10e6c9f0f040c82ab710f93ce95e6f14d89 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Sat, 2 Nov 2013 21:07:17 -0700 Subject: drm/i915/bdw: Create gen8_gtt_pte_t With gen6 PTE type in place, pave the way for the new gen8 type. Signed-off-by: Ben Widawsky Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index ee71aa558f9a..b66284e10032 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -30,6 +30,7 @@ #define GEN6_PPGTT_PD_ENTRIES 512 #define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t)) +typedef uint64_t gen8_gtt_pte_t; /* PPGTT stuff */ #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) @@ -942,7 +943,7 @@ static int gen8_gmch_probe(struct drm_device *dev, *stolen = gen8_get_stolen_size(snb_gmch_ctl); gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); - *gtt_total = (gtt_size / 8) << PAGE_SHIFT; + *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT; ret = ggtt_probe_common(dev, gtt_size); -- cgit v1.2.3 From 94ec8f6130ef4fdce1c80ca6bdeeef103a239a7c Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Sat, 2 Nov 2013 21:07:18 -0700 Subject: drm/i915/bdw: Add GTT functions With the PTE clarifications, the bind and clear functions can now be added for gen8. v2: Use for_each_sg_pages in gen8_ggtt_insert_entries. v3: Drop dev argument to pte encode functions, upstream lost it. Also rebase on top of the scratch page movement. v4: Rebase on top of the new address space vfuncs. v5: Add the bool use_scratch argument to clear_range and the bool valid argument to the PTE encode function to follow upstream changes. v6: Add a FIXME(BDW) about the size mismatch of the readback check that Jon Bloomfield spotted. v7: Squash in fixup patch from Ben for the posting read to match the 64bit ptes and so shut up the WARN. Signed-off-by: Ben Widawsky (v1) Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 89 +++++++++++++++++++++++++++++++++++-- 1 file changed, 86 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index b66284e10032..cf539a6a5a22 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -58,6 +58,15 @@ typedef uint64_t gen8_gtt_pte_t; #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) +static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + bool valid) +{ + gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; + pte |= addr; + return pte; +} + static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, enum i915_cache_level level, bool valid) @@ -576,6 +585,57 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) return 0; } +static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte) +{ +#ifdef writeq + writeq(pte, addr); +#else + iowrite32((u32)pte, addr); + iowrite32(pte >> 32, addr + 4); +#endif +} + +static void gen8_ggtt_insert_entries(struct i915_address_space *vm, + struct sg_table *st, + unsigned int first_entry, + enum i915_cache_level level) +{ + struct drm_i915_private *dev_priv = vm->dev->dev_private; + gen8_gtt_pte_t __iomem *gtt_entries = + (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; + int i = 0; + struct sg_page_iter sg_iter; + dma_addr_t addr; + + for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { + addr = sg_dma_address(sg_iter.sg) + + (sg_iter.sg_pgoffset << PAGE_SHIFT); + gen8_set_pte(>t_entries[i], + gen8_pte_encode(addr, level, true)); + i++; + } + + /* + * XXX: This serves as a posting read to make sure that the PTE has + * actually been updated. There is some concern that even though + * registers and PTEs are within the same BAR that they are potentially + * of NUMA access patterns. Therefore, even with the way we assume + * hardware should work, we must keep this posting read for paranoia. + */ + if (i != 0) + WARN_ON(readq(>t_entries[i-1]) + != gen8_pte_encode(addr, level, true)); + +#if 0 /* TODO: Still needed on GEN8? */ + /* This next bit makes the above posting read even more important. We + * want to flush the TLBs only after we're certain all the PTE updates + * have finished. + */ + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + POSTING_READ(GFX_FLSH_CNTL_GEN6); +#endif +} + /* * Binds an object into the global gtt with the specified cache level. The object * will be accessible to the GPU via commands whose operands reference offsets @@ -618,6 +678,30 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, POSTING_READ(GFX_FLSH_CNTL_GEN6); } +static void gen8_ggtt_clear_range(struct i915_address_space *vm, + unsigned int first_entry, + unsigned int num_entries, + bool use_scratch) +{ + struct drm_i915_private *dev_priv = vm->dev->dev_private; + gen8_gtt_pte_t scratch_pte, __iomem *gtt_base = + (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; + const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; + int i; + + if (WARN(num_entries > max_entries, + "First entry = %d; Num entries = %d (max=%d)\n", + first_entry, num_entries, max_entries)) + num_entries = max_entries; + + scratch_pte = gen8_pte_encode(vm->scratch.addr, + I915_CACHE_LLC, + use_scratch); + for (i = 0; i < num_entries; i++) + gen8_set_pte(>t_base[i], scratch_pte); + readl(gtt_base); +} + static void gen6_ggtt_clear_range(struct i915_address_space *vm, unsigned int first_entry, unsigned int num_entries, @@ -641,7 +725,6 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, readl(gtt_base); } - static void i915_ggtt_insert_entries(struct i915_address_space *vm, struct sg_table *st, unsigned int pg_start, @@ -947,8 +1030,8 @@ static int gen8_gmch_probe(struct drm_device *dev, ret = ggtt_probe_common(dev, gtt_size); - dev_priv->gtt.base.clear_range = NULL; - dev_priv->gtt.base.insert_entries = NULL; + dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range; + dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries; return ret; } -- cgit v1.2.3 From fbe5d36e7789827704b8f15cd99971f2615c0832 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 4 Nov 2013 19:56:49 -0800 Subject: drm/i915/bdw: Support BDW caching MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit BDW caching works differently than the previous generations. Instead of having bits in the PTE which directly control how the page is cached, the 3 PTE bits PWT PCD and PAT provide an index into a PAT defined by register 0x40e0. This style of caching is functionally equivalent to how it works on HSW and before. v2: Tiny bikeshed as discussed on internal irc. v3: Squash in patch from Ville to mirror the x86 PAT setup more like in arch/x86/mm/pat.c. Primarily, the 0th index will be WB, and not uncached. v4: Comment for reason to not use a 64b write on the PPAT. v5: Add a FIXME comment that the caching bits in the PAT registers might be wrong due to doc confusion. Cc: Chris Wilson Signed-off-by: Ben Widawsky (v1) Signed-off-by: Ville Syrjälä Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 45 +++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 1 + 2 files changed, 46 insertions(+) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index cf539a6a5a22..47765d26f15a 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -58,12 +58,21 @@ typedef uint64_t gen8_gtt_pte_t; #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) +#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) +#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ +#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */ +#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */ + static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, enum i915_cache_level level, bool valid) { gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; pte |= addr; + if (level != I915_CACHE_NONE) + pte |= PPAT_CACHED_INDEX; + else + pte |= PPAT_UNCACHED_INDEX; return pte; } @@ -806,6 +815,7 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node, *end -= 4096; } } + void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, unsigned long mappable_end, @@ -1003,6 +1013,39 @@ static int ggtt_probe_common(struct drm_device *dev, return ret; } +/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability + * bits. When using advanced contexts each context stores its own PAT, but + * writing this data shouldn't be harmful even in those cases. */ +static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv) +{ +#define GEN8_PPAT_UC (0<<0) +#define GEN8_PPAT_WC (1<<0) +#define GEN8_PPAT_WT (2<<0) +#define GEN8_PPAT_WB (3<<0) +#define GEN8_PPAT_ELLC_OVERRIDE (0<<2) +/* FIXME(BDW): Bspec is completely confused about cache control bits. */ +#define GEN8_PPAT_LLC (1<<2) +#define GEN8_PPAT_LLCELLC (2<<2) +#define GEN8_PPAT_LLCeLLC (3<<2) +#define GEN8_PPAT_AGE(x) (x<<4) +#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8)) + uint64_t pat; + + pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ + GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ + GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ + GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ + GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | + GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | + GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | + GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); + + /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b + * write would work. */ + I915_WRITE(GEN8_PRIVATE_PAT, pat); + I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32); +} + static int gen8_gmch_probe(struct drm_device *dev, size_t *gtt_total, size_t *stolen, @@ -1028,6 +1071,8 @@ static int gen8_gmch_probe(struct drm_device *dev, gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT; + gen8_setup_private_ppat(dev_priv); + ret = ggtt_probe_common(dev, gtt_size); dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c696779fb85e..132b5d083df8 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -665,6 +665,7 @@ #define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3) #define RING_FAULT_VALID (1<<0) #define DONE_REG 0x40b0 +#define GEN8_PRIVATE_PAT 0x40e0 #define BSD_HWS_PGA_GEN7 (0x04180) #define BLT_HWS_PGA_GEN7 (0x04280) #define VEBOX_HWS_PGA_GEN7 (0x04380) -- cgit v1.2.3 From 37aca44ad5b0fa30d4a9cd77b492b45f2b6a4643 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 4 Nov 2013 20:47:32 -0800 Subject: drm/i915/bdw: PPGTT init & cleanup Aside from the potential size increase of the PPGTT, the primary difference from previous hardware is the Page Directories are no longer carved out of the Global GTT. Note that the PDE allocation is done as a 8MB contiguous allocation, this needs to be eventually fixed (since driver reloading will be a pain otherwise). Also, this will be a no-go for real PPGTT support. v2: Move vtable initialization v3: Resolve conflicts due to patch series reordering. v4: Rebase on top of the address space refactoring of the PPGTT support. Drop Imre's r-b tag for v2, too outdated by now. v5: Free the correct amount of memory, "get_order takes size not a page count." (Imre) Signed-off-by: Ben Widawsky Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 19 ++++-- drivers/gpu/drm/i915/i915_gem_gtt.c | 123 +++++++++++++++++++++++++++++++++++- 2 files changed, 137 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 12cc0c51c73d..99695c517de7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -573,10 +573,21 @@ struct i915_gtt { struct i915_hw_ppgtt { struct i915_address_space base; unsigned num_pd_entries; - struct page **pt_pages; - uint32_t pd_offset; - dma_addr_t *pt_dma_addr; - + union { + struct page **pt_pages; + struct page *gen8_pt_pages; + }; + struct page *pd_pages; + int num_pd_pages; + int num_pt_pages; + union { + uint32_t pd_offset; + dma_addr_t pd_dma_addr[4]; + }; + union { + dma_addr_t *pt_dma_addr; + dma_addr_t *gen8_pt_dma_addr[4]; + }; int (*enable)(struct drm_device *dev); }; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 47765d26f15a..5704d6437c64 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -31,6 +31,7 @@ #define GEN6_PPGTT_PD_ENTRIES 512 #define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t)) typedef uint64_t gen8_gtt_pte_t; +typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; /* PPGTT stuff */ #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) @@ -58,6 +59,9 @@ typedef uint64_t gen8_gtt_pte_t; #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) +#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) +#define GEN8_LEGACY_PDPS 4 + #define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) #define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ #define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */ @@ -177,6 +181,123 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, return pte; } +static void gen8_ppgtt_cleanup(struct i915_address_space *vm) +{ + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); + int i, j; + + for (i = 0; i < ppgtt->num_pd_pages ; i++) { + if (ppgtt->pd_dma_addr[i]) { + pci_unmap_page(ppgtt->base.dev->pdev, + ppgtt->pd_dma_addr[i], + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + + for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { + dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; + if (addr) + pci_unmap_page(ppgtt->base.dev->pdev, + addr, + PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + + } + } + kfree(ppgtt->gen8_pt_dma_addr[i]); + } + + __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT); + __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT); +} + +/** + * GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a + * net effect resembling a 2-level page table in normal x86 terms. Each PDP + * represents 1GB of memory + * 4 * 512 * 512 * 4096 = 4GB legacy 32b address space. + * + * TODO: Do something with the size parameter + **/ +static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) +{ + struct page *pt_pages; + int i, j, ret = -ENOMEM; + const int max_pdp = DIV_ROUND_UP(size, 1 << 30); + const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp; + + if (size % (1<<30)) + DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size); + + /* FIXME: split allocation into smaller pieces. For now we only ever do + * this once, but with full PPGTT, the multiple contiguous allocations + * will be bad. + */ + ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT)); + if (!ppgtt->pd_pages) + return -ENOMEM; + + pt_pages = alloc_pages(GFP_KERNEL, get_order(num_pt_pages << PAGE_SHIFT)); + if (!pt_pages) { + __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT)); + return -ENOMEM; + } + + ppgtt->gen8_pt_pages = pt_pages; + ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT); + ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT); + ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; + ppgtt->base.clear_range = NULL; + ppgtt->base.insert_entries = NULL; + ppgtt->base.cleanup = gen8_ppgtt_cleanup; + + BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); + + /* + * - Create a mapping for the page directories. + * - For each page directory: + * allocate space for page table mappings. + * map each page table + */ + for (i = 0; i < max_pdp; i++) { + dma_addr_t temp; + temp = pci_map_page(ppgtt->base.dev->pdev, + &ppgtt->pd_pages[i], 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp)) + goto err_out; + + ppgtt->pd_dma_addr[i] = temp; + + ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL); + if (!ppgtt->gen8_pt_dma_addr[i]) + goto err_out; + + for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { + struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j]; + temp = pci_map_page(ppgtt->base.dev->pdev, + p, 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + + if (pci_dma_mapping_error(ppgtt->base.dev->pdev, temp)) + goto err_out; + + ppgtt->gen8_pt_dma_addr[i][j] = temp; + } + } + + DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n", + ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp); + DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n", + ppgtt->num_pt_pages, + (ppgtt->num_pt_pages - num_pt_pages) + + size % (1<<30)); + return -ENOSYS; /* Not ready yet */ + +err_out: + ppgtt->base.cleanup(&ppgtt->base); + return ret; +} + static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) { struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; @@ -430,7 +551,7 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) if (INTEL_INFO(dev)->gen < 8) ret = gen6_ppgtt_init(ppgtt); else if (IS_GEN8(dev)) - ret = -ENOSYS; + ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); else BUG(); -- cgit v1.2.3 From b1fe6673298113636f356dce8e8ac2a93a579882 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 4 Nov 2013 21:20:14 -0800 Subject: drm/i915/bdw: Initialize the PDEs The upcoming clear and insert routines will expect that PDEs all point to valid Page Directories. Doing that lazily doesn't really buy us anything. The page allocation is done regardless earlier in init so it shouldn't hurt set the PDEs. v2: Squash in patches to implement fixed PDE write function: - If I had done this in the first place, the bug that's going to be fixed in an upcoming patch would have been much easier to find. - Use WB for PDEs. The PAT bit is used for page size. 2ME PDEs aren't even supported in BDW, so this was completely invalid. The solution is to make our PDEs WB+LLC instead of the pervious WB+eLLC. As far as I can guess, this change won't matter for performance. Thanks to Ville for the quick correction when discussing on IRC. v3: Return the pde type for pde encoding (Damien) Reviewed-by: Imre Deak Reviewed-by: Damien Lespiau Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 5704d6437c64..9bee72c2c3b1 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -80,6 +80,19 @@ static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, return pte; } +static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev, + dma_addr_t addr, + enum i915_cache_level level) +{ + gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW; + pde |= addr; + if (level != I915_CACHE_NONE) + pde |= PPAT_CACHED_PDE_INDEX; + else + pde |= PPAT_UNCACHED_INDEX; + return pde; +} + static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, enum i915_cache_level level, bool valid) @@ -285,6 +298,20 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) } } + /* For now, the PPGTT helper functions all require that the PDEs are + * plugged in correctly. So we do that now/here. For aliasing PPGTT, we + * will never need to touch the PDEs again */ + for (i = 0; i < max_pdp; i++) { + gen8_ppgtt_pde_t *pd_vaddr; + pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]); + for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { + dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; + pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr, + I915_CACHE_LLC); + } + kunmap_atomic(pd_vaddr); + } + DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n", ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp); DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n", -- cgit v1.2.3 From 459108b8cccfeeb965653b51be0160784e51fdc0 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Sat, 2 Nov 2013 21:07:23 -0700 Subject: drm/i915/bdw: Implement PPGTT clear range GEN8 PPGTT range clearing is very similar to GEN6 if we assume that our PDEs are all valid, which they should be. v2: Rebase on top of the address space refactoring. v3: Rebase on top of the bool use_scratch addition to the clear_range interface. Reviewed-by: Imre Deak Signed-off-by: Ben Widawsky (v1) Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 42 ++++++++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 9bee72c2c3b1..2f138ed3730e 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -59,6 +59,7 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) +#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) #define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) #define GEN8_LEGACY_PDPS 4 @@ -194,6 +195,41 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, return pte; } +static void gen8_ppgtt_clear_range(struct i915_address_space *vm, + unsigned first_entry, + unsigned num_entries, + bool use_scratch) +{ + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); + gen8_gtt_pte_t *pt_vaddr, scratch_pte; + unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE; + unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE; + unsigned last_pte, i; + + scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr, + I915_CACHE_LLC, use_scratch); + + while (num_entries) { + struct page *page_table = &ppgtt->gen8_pt_pages[act_pt]; + + last_pte = first_pte + num_entries; + if (last_pte > GEN8_PTES_PER_PAGE) + last_pte = GEN8_PTES_PER_PAGE; + + pt_vaddr = kmap_atomic(page_table); + + for (i = first_pte; i < last_pte; i++) + pt_vaddr[i] = scratch_pte; + + kunmap_atomic(pt_vaddr); + + num_entries -= last_pte - first_pte; + first_pte = 0; + act_pt++; + } +} + static void gen8_ppgtt_cleanup(struct i915_address_space *vm) { struct i915_hw_ppgtt *ppgtt = @@ -259,7 +295,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT); ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT); ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; - ppgtt->base.clear_range = NULL; + ppgtt->base.clear_range = gen8_ppgtt_clear_range; ppgtt->base.insert_entries = NULL; ppgtt->base.cleanup = gen8_ppgtt_cleanup; @@ -312,6 +348,10 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) kunmap_atomic(pd_vaddr); } + ppgtt->base.clear_range(&ppgtt->base, 0, + ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE, + true); + DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n", ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp); DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n", -- cgit v1.2.3 From 9df15b499b083821b5e93fda8766b4e8eeabcd2b Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Sat, 2 Nov 2013 21:07:24 -0700 Subject: drm/i915/bdw: Implement PPGTT insert GEN8 insertion is very similar to GEN6. v2: Rebase on top of Imre's for_each_sg_page helpers. v3: Fixup my conversion (spotted by Ville). v4: Rebase on top of the address space refactoring. Signed-off-by: Ben Widawsky (v1) Reviewed-by: Imre Deak Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 2f138ed3730e..39bfaf290a95 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -230,6 +230,37 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, } } +static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, + struct sg_table *pages, + unsigned first_entry, + enum i915_cache_level cache_level) +{ + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); + gen8_gtt_pte_t *pt_vaddr; + unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE; + unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE; + struct sg_page_iter sg_iter; + + pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]); + for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { + dma_addr_t page_addr; + + page_addr = sg_dma_address(sg_iter.sg) + + (sg_iter.sg_pgoffset << PAGE_SHIFT); + pt_vaddr[act_pte] = gen8_pte_encode(page_addr, cache_level, + true); + if (++act_pte == GEN8_PTES_PER_PAGE) { + kunmap_atomic(pt_vaddr); + act_pt++; + pt_vaddr = kmap_atomic(&ppgtt->gen8_pt_pages[act_pt]); + act_pte = 0; + + } + } + kunmap_atomic(pt_vaddr); +} + static void gen8_ppgtt_cleanup(struct i915_address_space *vm) { struct i915_hw_ppgtt *ppgtt = @@ -296,7 +327,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT); ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; ppgtt->base.clear_range = gen8_ppgtt_clear_range; - ppgtt->base.insert_entries = NULL; + ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; ppgtt->base.cleanup = gen8_ppgtt_cleanup; BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); -- cgit v1.2.3 From 94e409c1449858b893c55d22f4e9ea9e845a91ed Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 4 Nov 2013 22:29:36 -0800 Subject: drm/i915/bdw: Implement PPGTT enable Legacy PPGTT on GEN8 requires programming 4 PDP registers per ring. Since all rings are using the same address space with the current code the logic is simply to program all the tables we've setup for the PPGTT. v2: Turn on PPGTT in GFX_MODE v3: v2 was the wrong patch v4: Resolve conflicts due to patch series reordering. v5: Squash in fixup from Ben: Use LRI to write PDPs The docs (and simulator seems to back up) suggest that we can only program legacy PPGTT PDPs with LRI commands. v6: Rebase around context differences conflicts. v7: Use #defines for per ring PDPs. (Damien) v8: Don't use typede'f private_t. Signed-off-by: Ben Widawsky (up to v3 and v7) Reviewed-by: Imre Deak Reviewed-by: Damien Lespiau Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 50 +++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 3 +++ 2 files changed, 53 insertions(+) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 39bfaf290a95..60f398183c87 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -195,6 +195,55 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, return pte; } +/* Broadwell Page Directory Pointer Descriptors */ +static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry, + uint64_t val) +{ + int ret; + + BUG_ON(entry >= 4); + + ret = intel_ring_begin(ring, 6); + if (ret) + return ret; + + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry)); + intel_ring_emit(ring, (u32)(val >> 32)); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry)); + intel_ring_emit(ring, (u32)(val)); + intel_ring_advance(ring); + + return 0; +} + +static int gen8_ppgtt_enable(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; + int i, j, ret; + + /* bit of a hack to find the actual last used pd */ + int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE; + + for_each_ring(ring, dev_priv, j) { + I915_WRITE(RING_MODE_GEN7(ring), + _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); + } + + for (i = used_pd - 1; i >= 0; i--) { + dma_addr_t addr = ppgtt->pd_dma_addr[i]; + for_each_ring(ring, dev_priv, j) { + ret = gen8_write_pdp(ring, i, addr); + if (ret) + return ret; + } + } + return 0; +} + static void gen8_ppgtt_clear_range(struct i915_address_space *vm, unsigned first_entry, unsigned num_entries, @@ -326,6 +375,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT); ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT); ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; + ppgtt->enable = gen8_ppgtt_enable; ppgtt->base.clear_range = gen8_ppgtt_clear_range; ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; ppgtt->base.cleanup = gen8_ppgtt_cleanup; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 132b5d083df8..d186c779aa2e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -110,6 +110,9 @@ #define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) #define PP_DIR_DCLV_2G 0xffffffff +#define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4)) +#define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8) + #define GAM_ECOCHK 0x4090 #define ECOCHK_SNB_BIT (1<<10) #define HSW_ECOCHK_ARB_PRIO_SOL (1<<6) -- cgit v1.2.3 From 28cf541543302d0b54703613838914fd9b6e3447 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Sat, 2 Nov 2013 21:07:26 -0700 Subject: drm/i915/bdw: unleash PPGTT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: Squash in fix from Ben: Set PPGTT batches as necessary This fixes the regression in the last couple of days when we enabled PPGTT. v3: Squash in fixup to still use GTT for secure batches from Ville: BDW doesn't have a separate secure vs. non-secure bit in MI_BATCH_BUFFER_START. So for secure batches we have to simply leave the PPGTT bit unset. Fortunately older generations (except HSW) had similar limitations so execbuffer already creates a GTT mapping for all secure batches. Cc: Ville Syrjälä Signed-off-by: Ben Widawsky Reviewed-by: Imre Deak Signed-off-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 3 +-- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 5 ++++- 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 78786c44fe52..885d595e0e02 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1146,8 +1146,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure * batch" bit. Hence we need to pin secure batches into the global gtt. - * hsw should have this fixed, but let's be paranoid and do it - * unconditionally for now. */ + * hsw should have this fixed, but bdw mucks it up again. */ if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 60f398183c87..4fb0efa3e24a 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -439,7 +439,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) ppgtt->num_pt_pages, (ppgtt->num_pt_pages - num_pt_pages) + size % (1<<30)); - return -ENOSYS; /* Not ready yet */ + return 0; err_out: ppgtt->base.cleanup(&ppgtt->base); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index ef0e7b99ded7..db086f4bf712 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1699,6 +1699,9 @@ gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 len, unsigned flags) { + struct drm_i915_private *dev_priv = ring->dev->dev_private; + bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL && + !(flags & I915_DISPATCH_SECURE); int ret; ret = intel_ring_begin(ring, 4); @@ -1706,7 +1709,7 @@ gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, return ret; /* FIXME(BDW): Address space and security selectors. */ - intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8); + intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8)); intel_ring_emit(ring, offset); intel_ring_emit(ring, 0); intel_ring_emit(ring, MI_NOOP); -- cgit v1.2.3 From b42218c19f3c57d2272241e3b4944a6af0d5f14a Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Sat, 2 Nov 2013 21:07:29 -0700 Subject: drm/i915/bdw: Don't muck with gtt_size on Gen8 when PPGTT setup fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: Resolve rebase conflicts and switch to gen < 8 color for GenX checking. v3: Rebase on top of the address space refactoring. Reviewed-by: Ben Widawsky Signed-off-by: Ville Syrjälä (v1) Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 4fb0efa3e24a..3620a1b0a73c 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1182,7 +1182,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev) DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); drm_mm_takedown(&dev_priv->gtt.base.mm); - gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE; + if (INTEL_INFO(dev)->gen < 8) + gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE; } i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); } -- cgit v1.2.3 From 230f955f73074ec5b1b4389f3f26152eebf54404 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 7 Nov 2013 21:40:48 -0800 Subject: drm/i915/bdw: Free correct number of ppgtt pages I am unclear how this got messed up in the shuffle, but it did. Cc: Imre Deak Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 3620a1b0a73c..5a3cc3189f1f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -335,8 +335,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) kfree(ppgtt->gen8_pt_dma_addr[i]); } - __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT); - __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT); + __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT)); + __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); } /** -- cgit v1.2.3 From 3a2ffb65eec6dbda2fd8151894f51c18b42c8d41 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Thu, 7 Nov 2013 21:40:51 -0800 Subject: drm/i915/bdw: Limit GTT to 2GB Because of the way in which we're allocating the pages for the Aliasing PPGTT, we cannot actually successfully alloc enough space for anything greater than 2GB. Instead of a quick hack to fix this, we should defer until we have the real solution in place (allocating much less contiguous space). This wasn't found sooner because we didn't not have any systems supporting more than a 2GB GTT. Signed-off-by: Ben Widawsky Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_gtt.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 5a3cc3189f1f..f69bdc741b80 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1239,6 +1239,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; if (bdw_gmch_ctl) bdw_gmch_ctl = 1 << bdw_gmch_ctl; + if (bdw_gmch_ctl > 4) { + WARN_ON(!i915_preliminary_hw_support); + return 4<<20; + } + return bdw_gmch_ctl << 20; } -- cgit v1.2.3 From c51e9701c4ad68d87baecefad6c58d051574f848 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 22 Nov 2013 10:37:53 +0000 Subject: drm/i915: Prefer setting PTE cache age to 3 We have conflicting benchmark data that suggest either age 0 or age 3 is better. However, the earlier benchmark on which we based the switch to age 0 (commit 0d8ff15e9a15f2b393e53337a107b7a1e5919b6d Author: Ben Widawsky Date: Thu Jul 4 11:02:03 2013 -0700 drm/i915/hsw: Set correct Haswell PTE encodings) actually seems to prefer the default PTE encoding as age 3. Presumably, this is in part due to the use of MOCS to override the PTE encodings when appropriate. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=69870 Tested-by: mengmeng.meng@intel.com Signed-off-by: Chris Wilson Acked-by: Jesse Barnes Reviewed-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem_gtt.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 3620a1b0a73c..38cb8d44a013 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -57,7 +57,9 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) +#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) +#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) #define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) #define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) @@ -185,10 +187,10 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, case I915_CACHE_NONE: break; case I915_CACHE_WT: - pte |= HSW_WT_ELLC_LLC_AGE0; + pte |= HSW_WT_ELLC_LLC_AGE3; break; default: - pte |= HSW_WB_ELLC_LLC_AGE0; + pte |= HSW_WB_ELLC_LLC_AGE3; break; } -- cgit v1.2.3