diff options
Diffstat (limited to 'drivers/misc/ocxl/link.c')
-rw-r--r-- | drivers/misc/ocxl/link.c | 70 |
1 files changed, 68 insertions, 2 deletions
diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c index fd73d3bc0eb6..ab039c115381 100644 --- a/drivers/misc/ocxl/link.c +++ b/drivers/misc/ocxl/link.c @@ -2,8 +2,10 @@ // Copyright 2017 IBM Corp. #include <linux/sched/mm.h> #include <linux/mutex.h> +#include <linux/mm.h> #include <linux/mm_types.h> #include <linux/mmu_context.h> +#include <linux/mmu_notifier.h> #include <asm/copro.h> #include <asm/pnv-ocxl.h> #include <asm/xive.h> @@ -33,6 +35,7 @@ #define SPA_PE_VALID 0x80000000 +struct ocxl_link; struct pe_data { struct mm_struct *mm; @@ -41,6 +44,8 @@ struct pe_data { /* opaque pointer to be passed to the above callback */ void *xsl_err_data; struct rcu_head rcu; + struct ocxl_link *link; + struct mmu_notifier mmu_notifier; }; struct spa { @@ -83,6 +88,8 @@ struct ocxl_link { int domain; int bus; int dev; + void __iomem *arva; /* ATSD register virtual address */ + spinlock_t atsd_lock; /* to serialize shootdowns */ atomic_t irq_available; struct spa *spa; void *platform_data; @@ -388,6 +395,7 @@ static int alloc_link(struct pci_dev *dev, int PE_mask, struct ocxl_link **out_l link->bus = dev->bus->number; link->dev = PCI_SLOT(dev->devfn); atomic_set(&link->irq_available, MAX_IRQ_PER_LINK); + spin_lock_init(&link->atsd_lock); rc = alloc_spa(dev, link); if (rc) @@ -403,6 +411,13 @@ static int alloc_link(struct pci_dev *dev, int PE_mask, struct ocxl_link **out_l if (rc) goto err_xsl_irq; + /* if link->arva is not defeined, MMIO registers are not used to + * generate TLB invalidate. PowerBus snooping is enabled. + * Otherwise, PowerBus snooping is disabled. TLB Invalidates are + * initiated using MMIO registers. + */ + pnv_ocxl_map_lpar(dev, mfspr(SPRN_LPID), 0, &link->arva); + *out_link = link; return 0; @@ -454,6 +469,11 @@ static void release_xsl(struct kref *ref) { struct ocxl_link *link = container_of(ref, struct ocxl_link, ref); + if (link->arva) { + pnv_ocxl_unmap_lpar(link->arva); + link->arva = NULL; + } + list_del(&link->list); /* call platform code before releasing data */ pnv_ocxl_spa_release(link->platform_data); @@ -470,6 +490,27 @@ void ocxl_link_release(struct pci_dev *dev, void *link_handle) } EXPORT_SYMBOL_GPL(ocxl_link_release); +static void invalidate_range(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + struct pe_data *pe_data = container_of(mn, struct pe_data, mmu_notifier); + struct ocxl_link *link = pe_data->link; + unsigned long addr, pid, page_size = PAGE_SIZE; + + pid = mm->context.id; + trace_ocxl_mmu_notifier_range(start, end, pid); + + spin_lock(&link->atsd_lock); + for (addr = start; addr < end; addr += page_size) + pnv_ocxl_tlb_invalidate(link->arva, pid, addr, page_size); + spin_unlock(&link->atsd_lock); +} + +static const struct mmu_notifier_ops ocxl_mmu_notifier_ops = { + .invalidate_range = invalidate_range, +}; + static u64 calculate_cfg_state(bool kernel) { u64 state; @@ -494,7 +535,7 @@ static u64 calculate_cfg_state(bool kernel) } int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr, - u64 amr, struct mm_struct *mm, + u64 amr, u16 bdf, struct mm_struct *mm, void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr), void *xsl_err_data) { @@ -526,9 +567,13 @@ int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr, pe_data->mm = mm; pe_data->xsl_err_cb = xsl_err_cb; pe_data->xsl_err_data = xsl_err_data; + pe_data->link = link; + pe_data->mmu_notifier.ops = &ocxl_mmu_notifier_ops; memset(pe, 0, sizeof(struct ocxl_process_element)); pe->config_state = cpu_to_be64(calculate_cfg_state(pidr == 0)); + pe->pasid = cpu_to_be32(pasid << (31 - 19)); + pe->bdf = cpu_to_be16(bdf); pe->lpid = cpu_to_be32(mfspr(SPRN_LPID)); pe->pid = cpu_to_be32(pidr); pe->tid = cpu_to_be32(tidr); @@ -540,8 +585,17 @@ int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr, * by the nest MMU. If we have a kernel context, TLBIs are * already global. */ - if (mm) + if (mm) { mm_context_add_copro(mm); + if (link->arva) { + /* Use MMIO registers for the TLB Invalidate + * operations. + */ + trace_ocxl_init_mmu_notifier(pasid, mm->context.id); + mmu_notifier_register(&pe_data->mmu_notifier, mm); + } + } + /* * Barrier is to make sure PE is visible in the SPA before it * is used by the device. It also helps with the global TLBI @@ -672,6 +726,18 @@ int ocxl_link_remove_pe(void *link_handle, int pasid) WARN(1, "Couldn't find pe data when removing PE\n"); } else { if (pe_data->mm) { + if (link->arva) { + trace_ocxl_release_mmu_notifier(pasid, + pe_data->mm->context.id); + mmu_notifier_unregister(&pe_data->mmu_notifier, + pe_data->mm); + spin_lock(&link->atsd_lock); + pnv_ocxl_tlb_invalidate(link->arva, + pe_data->mm->context.id, + 0ull, + PAGE_SIZE); + spin_unlock(&link->atsd_lock); + } mm_context_remove_copro(pe_data->mm); mmdrop(pe_data->mm); } |