diff options
Diffstat (limited to 'drivers/iommu/intel-iommu-debugfs.c')
-rw-r--r-- | drivers/iommu/intel-iommu-debugfs.c | 137 |
1 files changed, 113 insertions, 24 deletions
diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c index 7fabf9b1c2dc..73a552914455 100644 --- a/drivers/iommu/intel-iommu-debugfs.c +++ b/drivers/iommu/intel-iommu-debugfs.c @@ -14,6 +14,17 @@ #include <asm/irq_remapping.h> +#include "intel-pasid.h" + +struct tbl_walk { + u16 bus; + u16 devfn; + u32 pasid; + struct root_entry *rt_entry; + struct context_entry *ctx_entry; + struct pasid_entry *pasid_tbl_entry; +}; + struct iommu_regset { int offset; const char *regs; @@ -131,16 +142,86 @@ out: } DEFINE_SHOW_ATTRIBUTE(iommu_regset); -static void ctx_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu, - int bus) +static inline void print_tbl_walk(struct seq_file *m) { - struct context_entry *context; - int devfn; + struct tbl_walk *tbl_wlk = m->private; + + seq_printf(m, "%02x:%02x.%x\t0x%016llx:0x%016llx\t0x%016llx:0x%016llx\t", + tbl_wlk->bus, PCI_SLOT(tbl_wlk->devfn), + PCI_FUNC(tbl_wlk->devfn), tbl_wlk->rt_entry->hi, + tbl_wlk->rt_entry->lo, tbl_wlk->ctx_entry->hi, + tbl_wlk->ctx_entry->lo); + + /* + * A legacy mode DMAR doesn't support PASID, hence default it to -1 + * indicating that it's invalid. Also, default all PASID related fields + * to 0. + */ + if (!tbl_wlk->pasid_tbl_entry) + seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", -1, + (u64)0, (u64)0, (u64)0); + else + seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", + tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[0], + tbl_wlk->pasid_tbl_entry->val[1], + tbl_wlk->pasid_tbl_entry->val[2]); +} - seq_printf(m, " Context Table Entries for Bus: %d\n", bus); - seq_puts(m, " Entry\tB:D.F\tHigh\tLow\n"); +static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry, + u16 dir_idx) +{ + struct tbl_walk *tbl_wlk = m->private; + u8 tbl_idx; + + for (tbl_idx = 0; tbl_idx < PASID_TBL_ENTRIES; tbl_idx++) { + if (pasid_pte_is_present(tbl_entry)) { + tbl_wlk->pasid_tbl_entry = tbl_entry; + tbl_wlk->pasid = (dir_idx << PASID_PDE_SHIFT) + tbl_idx; + print_tbl_walk(m); + } + + tbl_entry++; + } +} + +static void pasid_dir_walk(struct seq_file *m, u64 pasid_dir_ptr, + u16 pasid_dir_size) +{ + struct pasid_dir_entry *dir_entry = phys_to_virt(pasid_dir_ptr); + struct pasid_entry *pasid_tbl; + u16 dir_idx; + + for (dir_idx = 0; dir_idx < pasid_dir_size; dir_idx++) { + pasid_tbl = get_pasid_table_from_pde(dir_entry); + if (pasid_tbl) + pasid_tbl_walk(m, pasid_tbl, dir_idx); + + dir_entry++; + } +} + +static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus) +{ + struct context_entry *context; + u16 devfn, pasid_dir_size; + u64 pasid_dir_ptr; for (devfn = 0; devfn < 256; devfn++) { + struct tbl_walk tbl_wlk = {0}; + + /* + * Scalable mode root entry points to upper scalable mode + * context table and lower scalable mode context table. Each + * scalable mode context table has 128 context entries where as + * legacy mode context table has 256 context entries. So in + * scalable mode, the context entries for former 128 devices are + * in the lower scalable mode context table, while the latter + * 128 devices are in the upper scalable mode context table. + * In scalable mode, when devfn > 127, iommu_context_addr() + * automatically refers to upper scalable mode context table and + * hence the caller doesn't have to worry about differences + * between scalable mode and non scalable mode. + */ context = iommu_context_addr(iommu, bus, devfn, 0); if (!context) return; @@ -148,33 +229,41 @@ static void ctx_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu, if (!context_present(context)) continue; - seq_printf(m, " %-5d\t%02x:%02x.%x\t%-6llx\t%llx\n", devfn, - bus, PCI_SLOT(devfn), PCI_FUNC(devfn), - context[0].hi, context[0].lo); + tbl_wlk.bus = bus; + tbl_wlk.devfn = devfn; + tbl_wlk.rt_entry = &iommu->root_entry[bus]; + tbl_wlk.ctx_entry = context; + m->private = &tbl_wlk; + + if (pasid_supported(iommu) && is_pasid_enabled(context)) { + pasid_dir_ptr = context->lo & VTD_PAGE_MASK; + pasid_dir_size = get_pasid_dir_size(context); + pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size); + continue; + } + + print_tbl_walk(m); } } -static void root_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu) +static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu) { unsigned long flags; - int bus; + u16 bus; spin_lock_irqsave(&iommu->lock, flags); - seq_printf(m, "IOMMU %s: Root Table Address:%llx\n", iommu->name, + seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name, (u64)virt_to_phys(iommu->root_entry)); - seq_puts(m, "Root Table Entries:\n"); + seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n"); - for (bus = 0; bus < 256; bus++) { - if (!(iommu->root_entry[bus].lo & 1)) - continue; + /* + * No need to check if the root entry is present or not because + * iommu_context_addr() performs the same check before returning + * context entry. + */ + for (bus = 0; bus < 256; bus++) + ctx_tbl_walk(m, iommu, bus); - seq_printf(m, " Bus: %d H: %llx L: %llx\n", bus, - iommu->root_entry[bus].hi, - iommu->root_entry[bus].lo); - - ctx_tbl_entry_show(m, iommu, bus); - seq_putc(m, '\n'); - } spin_unlock_irqrestore(&iommu->lock, flags); } @@ -185,7 +274,7 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused) rcu_read_lock(); for_each_active_iommu(iommu, drhd) { - root_tbl_entry_show(m, iommu); + root_tbl_walk(m, iommu); seq_putc(m, '\n'); } rcu_read_unlock(); |